if t.Width == -2 {
lno = int(lineno)
lineno = int32(t.Lineno)
- if !(t.Broke != 0) {
+ if t.Broke == 0 {
t.Broke = 1
Yyerror("invalid recursive type %v", Tconv(t, 0))
}
checkwidth(t.Down)
case TFORW: // should have been filled in
- if !(t.Broke != 0) {
+ if t.Broke == 0 {
Yyerror("invalid recursive type %v", Tconv(t, 0))
}
w = 1 // anything will do
// dummy type; should be replaced before use.
case TANY:
- if !(Debug['A'] != 0) {
+ if Debug['A'] == 0 {
Fatal("dowidth any")
}
w = 1 // anything will do
checkwidth(t.Type)
t.Align = uint8(Widthptr)
} else if t.Bound == -100 {
- if !(t.Broke != 0) {
+ if t.Broke == 0 {
Yyerror("use of [...] array outside of array literal")
t.Broke = 1
}
Fatal("checkwidth %v", Tconv(t, 0))
}
- if !(defercalc != 0) {
+ if defercalc == 0 {
dowidth(t)
return
}
func resumecheckwidth() {
var l *TypeList
- if !(defercalc != 0) {
+ if defercalc == 0 {
Fatal("resumecheckwidth")
}
for l = tlq; l != nil; l = tlq {
return c;
}
*/
-func bany(a *Bits) int {
+func bany(a *Bits) bool {
var i int
for i = 0; i < BITS; i++ {
if a.b[i] != 0 {
- return 1
+ return true
}
}
- return 0
+ return false
}
/*
return c
}
-func btest(a *Bits, n uint) int {
- return bool2int(a.b[n/64]&(1<<(n%64)) != 0)
+func btest(a *Bits, n uint) bool {
+ return a.b[n/64]&(1<<(n%64)) != 0
}
func biset(a *Bits, n uint) {
first = 1
- for bany(&bits) != 0 {
+ for bany(&bits) {
i = bnum(bits)
if first != 0 {
first = 0
return int(i)
}
-func bvisempty(bv *Bvec) int {
+func bvisempty(bv *Bvec) bool {
var i int32
for i = 0; i < bv.n; i += WORDBITS {
if bv.b[i>>WORDSHIFT] != 0 {
- return 0
+ return false
}
}
- return 1
+ return true
}
func bvnot(bv *Bvec) {
for l = func_.Cvars; l != nil; l = l.Next {
n = l.N.Closure
- if !(n.Captured != 0) {
+ if n.Captured == 0 {
n.Captured = 1
if n.Decldepth == 0 {
Fatal("typecheckclosure: var %v does not have decldepth assigned", Nconv(n, obj.FmtShort))
v.Outerexpr = nil
// out parameters will be assigned to implicitly upon return.
- if outer.Class != PPARAMOUT && !(v.Closure.Addrtaken != 0) && !(v.Closure.Assigned != 0) && v.Type.Width <= 128 {
+ if outer.Class != PPARAMOUT && v.Closure.Addrtaken == 0 && v.Closure.Assigned == 0 && v.Type.Width <= 128 {
v.Byval = 1
} else {
v.Closure.Addrtaken = 1
cv = Nod(OCLOSUREVAR, nil, nil)
cv.Type = v.Type
- if !(v.Byval != 0) {
+ if v.Byval == 0 {
cv.Type = Ptrto(v.Type)
}
offset = Rnd(offset, int64(cv.Type.Align))
typechecklist(body, Etop)
walkstmtlist(body)
xfunc.Enter = body
- xfunc.Needctxt = uint8(bool2int(nvar > 0))
+ xfunc.Needctxt = nvar > 0
}
lineno = int32(lno)
continue
}
typ1 = typenod(v.Type)
- if !(v.Byval != 0) {
+ if v.Byval == 0 {
typ1 = Nod(OIND, typ1, nil)
}
typ.List = list(typ.List, Nod(ODCLFIELD, newname(v.Sym), typ1))
// Declare and initialize variable holding receiver.
body = nil
- xfunc.Needctxt = 1
+ xfunc.Needctxt = true
cv = Nod(OCLOSUREVAR, nil, nil)
cv.Xoffset = int64(Widthptr)
cv.Type = rcvrtype
ptr.Used = 1
ptr.Curfn = xfunc
xfunc.Dcl = list(xfunc.Dcl, ptr)
- if Isptr[rcvrtype.Etype] != 0 || Isinter(rcvrtype) != 0 {
+ if Isptr[rcvrtype.Etype] != 0 || Isinter(rcvrtype) {
ptr.Ntype = typenod(rcvrtype)
body = list(body, Nod(OAS, ptr, cv))
} else {
//
// Like walkclosure above.
- if Isinter(n.Left.Type) != 0 {
+ if Isinter(n.Left.Type) {
// Trigger panic for method on nil interface now.
// Otherwise it happens in the wrapper and is confusing.
n.Left = cheapexpr(n.Left, init)
* implicit conversion.
*/
func Convlit(np **Node, t *Type) {
- convlit1(np, t, 0)
+ convlit1(np, t, false)
}
/*
* return a new node if necessary
* (if n is a named constant, can't edit n->type directly).
*/
-func convlit1(np **Node, t *Type, explicit int) {
+func convlit1(np **Node, t *Type, explicit bool) {
var ct int
var et int
var n *Node
var nn *Node
n = *np
- if n == nil || t == nil || n.Type == nil || isideal(t) != 0 || n.Type == t {
+ if n == nil || t == nil || n.Type == nil || isideal(t) || n.Type == t {
return
}
- if !(explicit != 0) && !(isideal(n.Type) != 0) {
+ if !explicit && !isideal(n.Type) {
return
}
// target is invalid type for a constant? leave alone.
case OLITERAL:
- if !(okforconst[t.Etype] != 0) && n.Type.Etype != TNIL {
+ if okforconst[t.Etype] == 0 && n.Type.Etype != TNIL {
defaultlit(&n, nil)
*np = n
return
case OLSH,
ORSH:
- convlit1(&n.Left, t, bool2int(explicit != 0 && isideal(n.Left.Type) != 0))
+ convlit1(&n.Left, t, explicit && isideal(n.Left.Type))
t = n.Left.Type
if t != nil && t.Etype == TIDEAL && n.Val.Ctype != CTINT {
n.Val = toint(n.Val)
}
- if t != nil && !(Isint[t.Etype] != 0) {
+ if t != nil && Isint[t.Etype] == 0 {
Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
t = nil
}
return
case TARRAY:
- if !(Isslice(t) != 0) {
+ if !Isslice(t) {
goto bad
}
case CTCPLX:
overflow(n.Val, t)
}
- } else if et == TSTRING && (ct == CTINT || ct == CTRUNE) && explicit != 0 {
+ } else if et == TSTRING && (ct == CTINT || ct == CTRUNE) && explicit {
n.Val = tostr(n.Val)
} else {
goto bad
return
bad:
- if !(n.Diag != 0) {
- if !(t.Broke != 0) {
+ if n.Diag == 0 {
+ if t.Broke == 0 {
Yyerror("cannot convert %v to type %v", Nconv(n, 0), Tconv(t, 0))
}
n.Diag = 1
}
- if isideal(n.Type) != 0 {
+ if isideal(n.Type) {
defaultlit(&n, nil)
*np = n
}
return v
}
-func doesoverflow(v Val, t *Type) int {
+func doesoverflow(v Val, t *Type) bool {
switch v.Ctype {
case CTINT,
CTRUNE:
- if !(Isint[t.Etype] != 0) {
+ if Isint[t.Etype] == 0 {
Fatal("overflow: %v integer constant", Tconv(t, 0))
}
if Mpcmpfixfix(v.U.Xval, Minintval[t.Etype]) < 0 || Mpcmpfixfix(v.U.Xval, Maxintval[t.Etype]) > 0 {
- return 1
+ return true
}
case CTFLT:
- if !(Isfloat[t.Etype] != 0) {
+ if Isfloat[t.Etype] == 0 {
Fatal("overflow: %v floating-point constant", Tconv(t, 0))
}
if mpcmpfltflt(v.U.Fval, minfltval[t.Etype]) <= 0 || mpcmpfltflt(v.U.Fval, maxfltval[t.Etype]) >= 0 {
- return 1
+ return true
}
case CTCPLX:
- if !(Iscomplex[t.Etype] != 0) {
+ if Iscomplex[t.Etype] == 0 {
Fatal("overflow: %v complex constant", Tconv(t, 0))
}
if mpcmpfltflt(&v.U.Cval.Real, minfltval[t.Etype]) <= 0 || mpcmpfltflt(&v.U.Cval.Real, maxfltval[t.Etype]) >= 0 || mpcmpfltflt(&v.U.Cval.Imag, minfltval[t.Etype]) <= 0 || mpcmpfltflt(&v.U.Cval.Imag, maxfltval[t.Etype]) >= 0 {
- return 1
+ return true
}
}
- return 0
+ return false
}
func overflow(v Val, t *Type) {
return
}
- if !(doesoverflow(v, t) != 0) {
+ if !doesoverflow(v, t) {
return
}
return int(n.Val.Ctype)
}
-func Isconst(n *Node, ct int) int {
+func Isconst(n *Node, ct int) bool {
var t int
t = consttype(n)
// If the caller is asking for CTINT, allow CTRUNE too.
// Makes life easier for back ends.
- return bool2int(t == ct || (ct == CTINT && t == CTRUNE))
+ return t == ct || (ct == CTINT && t == CTRUNE)
}
func saveorig(n *Node) *Node {
if n.Type == nil {
return
}
- if !(okforconst[n.Type.Etype] != 0) && n.Type.Etype != TNIL {
+ if okforconst[n.Type.Etype] == 0 && n.Type.Etype != TNIL {
return
}
// merge adjacent constants in the argument list.
case OADDSTR:
for l1 = n.List; l1 != nil; l1 = l1.Next {
- if Isconst(l1.N, CTSTR) != 0 && l1.Next != nil && Isconst(l1.Next.N, CTSTR) != 0 {
+ if Isconst(l1.N, CTSTR) && l1.Next != nil && Isconst(l1.Next.N, CTSTR) {
// merge from l1 up to but not including l2
str = new(Strlit)
l2 = l1
- for l2 != nil && Isconst(l2.N, CTSTR) != 0 {
+ for l2 != nil && Isconst(l2.N, CTSTR) {
nr = l2.N
str.S += nr.Val.U.Sval.S
l2 = l2.Next
}
// collapse single-constant list to single constant.
- if count(n.List) == 1 && Isconst(n.List.N, CTSTR) != 0 {
+ if count(n.List) == 1 && Isconst(n.List.N, CTSTR) {
n.Op = OLITERAL
n.Val = n.List.N.Val
}
defaultlit(&nr, Types[TUINT])
n.Right = nr
- if nr.Type != nil && (Issigned[nr.Type.Etype] != 0 || !(Isint[nr.Type.Etype] != 0)) {
+ if nr.Type != nil && (Issigned[nr.Type.Etype] != 0 || Isint[nr.Type.Etype] == 0) {
goto illegal
}
if nl.Val.Ctype != CTRUNE {
// The default case above would print 'ideal % ideal',
// which is not quite an ideal error.
case OMOD<<16 | CTFLT:
- if !(n.Diag != 0) {
+ if n.Diag == 0 {
Yyerror("illegal constant expression: floating-point % operation")
n.Diag = 1
}
switch uint32(n.Op)<<16 | uint32(v.Ctype) {
default:
- if !(n.Diag != 0) {
+ if n.Diag == 0 {
Yyerror("illegal constant expression %v %v", Oconv(int(n.Op), 0), Tconv(nl.Type, 0))
n.Diag = 1
}
OCONV<<16 | CTRUNE,
OCONV<<16 | CTFLT,
OCONV<<16 | CTSTR:
- convlit1(&nl, n.Type, 1)
+ convlit1(&nl, n.Type, true)
v = nl.Val
mpnegflt(&v.U.Cval.Imag)
case ONOT<<16 | CTBOOL:
- if !(v.U.Bval != 0) {
+ if v.U.Bval == 0 {
goto settrue
}
goto setfalse
settrue:
norig = saveorig(n)
- *n = *Nodbool(1)
+ *n = *Nodbool(true)
n.Orig = norig
return
setfalse:
norig = saveorig(n)
- *n = *Nodbool(0)
+ *n = *Nodbool(false)
n.Orig = norig
return
illegal:
- if !(n.Diag != 0) {
+ if n.Diag == 0 {
Yyerror("illegal constant expression: %v %v %v", Tconv(nl.Type, 0), Oconv(int(n.Op), 0), Tconv(nr.Type, 0))
n.Diag = 1
}
switch v.Ctype {
default:
Fatal("nodlit ctype %d", v.Ctype)
- fallthrough
case CTSTR:
n.Type = idealstring
var k1 int
var k2 int
- if n == nil || !(isideal(n.Type) != 0) {
+ if n == nil || !isideal(n.Type) {
return CTxxx
}
var t1 *Type
n = *np
- if n == nil || !(isideal(n.Type) != 0) {
+ if n == nil || !isideal(n.Type) {
return
}
if n.Val.Ctype == CTNIL {
lineno = int32(lno)
- if !(n.Diag != 0) {
+ if n.Diag == 0 {
Yyerror("use of untyped nil")
n.Diag = 1
}
if l.Type == nil || r.Type == nil {
return
}
- if !(isideal(l.Type) != 0) {
+ if !isideal(l.Type) {
Convlit(rp, l.Type)
return
}
- if !(isideal(r.Type) != 0) {
+ if !isideal(r.Type) {
Convlit(lp, r.Type)
return
}
- if !(force != 0) {
+ if force == 0 {
return
}
if l.Type.Etype == TBOOL {
return stringsCompare(l.Val.U.Sval.S, r.Val.U.Sval.S)
}
-func Smallintconst(n *Node) int {
- if n.Op == OLITERAL && Isconst(n, CTINT) != 0 && n.Type != nil {
+func Smallintconst(n *Node) bool {
+ if n.Op == OLITERAL && Isconst(n, CTINT) && n.Type != nil {
switch Simtype[n.Type.Etype] {
case TINT8,
TUINT8,
TUINT32,
TBOOL,
TPTR32:
- return 1
+ return true
case TIDEAL,
TINT64,
if Mpcmpfixfix(n.Val.U.Xval, Minintval[TINT32]) < 0 || Mpcmpfixfix(n.Val.U.Xval, Maxintval[TINT32]) > 0 {
break
}
- return 1
+ return true
}
}
- return 0
+ return false
}
func nonnegconst(n *Node) int {
switch val.Ctype {
default:
Fatal("convconst ctype=%d %v", val.Ctype, Tconv(t, obj.FmtLong))
- fallthrough
case CTINT,
CTRUNE:
// may be known at compile time, are not Go language constants.
// Only called for expressions known to evaluated to compile-time
// constants.
-func isgoconst(n *Node) int {
+func isgoconst(n *Node) bool {
var l *Node
var t *Type
OCOMPLEX,
OREAL,
OIMAG:
- if isgoconst(n.Left) != 0 && (n.Right == nil || isgoconst(n.Right) != 0) {
- return 1
+ if isgoconst(n.Left) && (n.Right == nil || isgoconst(n.Right)) {
+ return true
}
case OCONV:
- if okforconst[n.Type.Etype] != 0 && isgoconst(n.Left) != 0 {
- return 1
+ if okforconst[n.Type.Etype] != 0 && isgoconst(n.Left) {
+ return true
}
case OLEN,
OCAP:
l = n.Left
- if isgoconst(l) != 0 {
- return 1
+ if isgoconst(l) {
+ return true
}
// Special case: len/cap is constant when applied to array or
if t != nil && Isptr[t.Etype] != 0 {
t = t.Type
}
- if Isfixedarray(t) != 0 && !(hascallchan(l) != 0) {
- return 1
+ if Isfixedarray(t) && !hascallchan(l) {
+ return true
}
case OLITERAL:
if n.Val.Ctype != CTNIL {
- return 1
+ return true
}
case ONAME:
l = n.Sym.Def
if l != nil && l.Op == OLITERAL && n.Val.Ctype != CTNIL {
- return 1
+ return true
}
case ONONAME:
if n.Sym.Def != nil && n.Sym.Def.Op == OIOTA {
- return 1
+ return true
}
// Only constant calls are unsafe.Alignof, Offsetof, and Sizeof.
break
}
if l.Sym.Name == "Alignof" || l.Sym.Name == "Offsetof" || l.Sym.Name == "Sizeof" {
- return 1
+ return true
}
}
//dump("nonconst", n);
- return 0
+ return false
}
-func hascallchan(n *Node) int {
+func hascallchan(n *Node) bool {
var l *NodeList
if n == nil {
- return 0
+ return false
}
switch n.Op {
case OAPPEND,
OREAL,
ORECOVER,
ORECV:
- return 1
+ return true
}
- if hascallchan(n.Left) != 0 || hascallchan(n.Right) != 0 {
- return 1
+ if hascallchan(n.Left) || hascallchan(n.Right) {
+ return true
}
for l = n.List; l != nil; l = l.Next {
- if hascallchan(l.N) != 0 {
- return 1
+ if hascallchan(l.N) {
+ return true
}
}
for l = n.Rlist; l != nil; l = l.Next {
- if hascallchan(l.N) != 0 {
- return 1
+ if hascallchan(l.N) {
+ return true
}
}
- return 0
+ return false
}
return a<<16 | b
}
-func overlap_cplx(f *Node, t *Node) int {
+func overlap_cplx(f *Node, t *Node) bool {
// check whether f and t could be overlapping stack references.
// not exact, because it's hard to check for the stack register
// in portable code. close enough: worst case we will allocate
// an extra temporary and the registerizer will clean it up.
- return bool2int(f.Op == OINDREG && t.Op == OINDREG && f.Xoffset+f.Type.Width >= t.Xoffset && t.Xoffset+t.Type.Width >= f.Xoffset)
+ return f.Op == OINDREG && t.Op == OINDREG && f.Xoffset+f.Type.Width >= t.Xoffset && t.Xoffset+t.Type.Width >= f.Xoffset
}
func Complexbool(op int, nl *Node, nr *Node, true_ bool, likely int, to *obj.Prog) {
// make both sides addable in ullman order
if nr != nil {
- if nl.Ullman > nr.Ullman && !(nl.Addable != 0) {
+ if nl.Ullman > nr.Ullman && nl.Addable == 0 {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
nl = &tnl
}
- if !(nr.Addable != 0) {
+ if nr.Addable == 0 {
Tempname(&tnr, nr.Type)
Thearch.Cgen(nr, &tnr)
nr = &tnr
}
}
- if !(nl.Addable != 0) {
+ if nl.Addable == 0 {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
nl = &tnl
var tc int
var t *Type
- if !(nc.Addable != 0) {
+ if nc.Addable == 0 {
Fatal("subnode not addable")
}
n.Val.Ctype = CTFLT
n.Type = t
- if !(Isfloat[t.Etype] != 0) {
+ if Isfloat[t.Etype] == 0 {
Fatal("nodfconst: bad type %v", Tconv(t, 0))
}
}
/*
* cplx.c
*/
-func Complexop(n *Node, res *Node) int {
+func Complexop(n *Node, res *Node) bool {
if n != nil && n.Type != nil {
if Iscomplex[n.Type.Etype] != 0 {
goto maybe
//dump("\ncomplex-no", n);
no:
- return 0
+ return false
//dump("\ncomplex-yes", n);
yes:
- return 1
+ return true
}
func Complexmove(f *Node, t *Node) {
Dump("complexmove-t", t)
}
- if !(t.Addable != 0) {
+ if t.Addable == 0 {
Fatal("complexmove: to not addable")
}
switch uint32(ft)<<16 | uint32(tt) {
default:
Fatal("complexmove: unknown conversion: %v -> %v\n", Tconv(f.Type, 0), Tconv(t.Type, 0))
- fallthrough
// complex to complex move/convert.
// make f addable.
TCOMPLEX64<<16 | TCOMPLEX128,
TCOMPLEX128<<16 | TCOMPLEX64,
TCOMPLEX128<<16 | TCOMPLEX128:
- if !(f.Addable != 0) || overlap_cplx(f, t) != 0 {
+ if f.Addable == 0 || overlap_cplx(f, t) {
Tempname(&tmp, f.Type)
Complexmove(f, &tmp)
f = &tmp
case OREAL,
OIMAG:
nl = n.Left
- if !(nl.Addable != 0) {
+ if nl.Addable == 0 {
Tempname(&tmp, nl.Type)
Complexgen(nl, &tmp)
nl = &tmp
tr = Simsimtype(n.Type)
tr = cplxsubtype(tr)
if tl != tr {
- if !(n.Addable != 0) {
+ if n.Addable == 0 {
Tempname(&n1, n.Type)
Complexmove(n, &n1)
n = &n1
return
}
- if !(res.Addable != 0) {
+ if res.Addable == 0 {
Thearch.Igen(res, &n1, nil)
Thearch.Cgen(n, &n1)
Thearch.Regfree(&n1)
default:
Dump("complexgen: unknown op", n)
Fatal("complexgen: unknown op %v", Oconv(int(n.Op), 0))
- fallthrough
case ODOT,
ODOTPTR,
// make both sides addable in ullman order
if nr != nil {
- if nl.Ullman > nr.Ullman && !(nl.Addable != 0) {
+ if nl.Ullman > nr.Ullman && nl.Addable == 0 {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
nl = &tnl
}
- if !(nr.Addable != 0) {
+ if nr.Addable == 0 {
Tempname(&tnr, nr.Type)
Thearch.Cgen(nr, &tnr)
nr = &tnr
}
}
- if !(nl.Addable != 0) {
+ if nl.Addable == 0 {
Tempname(&tnl, nl.Type)
Thearch.Cgen(nl, &tnl)
nl = &tnl
"strings"
)
-func dflag() int {
- if !(Debug['d'] != 0) {
- return 0
+func dflag() bool {
+ if Debug['d'] == 0 {
+ return false
}
if Debug['y'] != 0 {
- return 1
+ return true
}
if incannedimport != 0 {
- return 0
+ return false
}
- return 1
+ return true
}
/*
d = push()
dcopy(d, s)
- if dflag() != 0 {
+ if dflag() {
fmt.Printf("\t%v push %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), s.Def)
}
return d
lno = int(s.Lastlineno)
dcopy(s, d)
d.Lastlineno = int32(lno)
- if dflag() != 0 {
+ if dflag() {
fmt.Printf("\t%v pop %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), s.Def)
}
}
s = n.Sym
// kludgy: typecheckok means we're past parsing. Eg genwrapper may declare out of package names later.
- if importpkg == nil && !(typecheckok != 0) && s.Pkg != localpkg {
+ if importpkg == nil && typecheckok == 0 && s.Pkg != localpkg {
Yyerror("cannot declare name %v", Sconv(s, 0))
}
gen = 0
if ctxt == PEXTERN {
externdcl = list(externdcl, n)
- if dflag() != 0 {
+ if dflag() {
fmt.Printf("\t%v global decl %v %p\n", Ctxt.Line(int(lineno)), Sconv(s, 0), n)
}
} else {
* new_name_list (type | [type] = expr_list)
*/
func variter(vl *NodeList, t *Node, el *NodeList) *NodeList {
- var doexpr int
+ var doexpr bool
var v *Node
var e *Node
var as2 *Node
var init *NodeList
init = nil
- doexpr = bool2int(el != nil)
+ doexpr = el != nil
if count(el) == 1 && count(vl) > 1 {
e = el.N
}
for ; vl != nil; vl = vl.Next {
- if doexpr != 0 {
+ if doexpr {
if el == nil {
Yyerror("missing expression in var declaration")
break
/*
* := declarations
*/
-func colasname(n *Node) int {
+func colasname(n *Node) bool {
switch n.Op {
case ONAME,
ONONAME,
OPACK,
OTYPE,
OLITERAL:
- return bool2int(n.Sym != nil)
+ return n.Sym != nil
}
- return 0
+ return false
}
func colasdefn(left *NodeList, defn *Node) {
if isblank(n) {
continue
}
- if !(colasname(n) != 0) {
+ if !colasname(n) {
yyerrorl(int(defn.Lineno), "non-name %v on left side of :=", Nconv(n, 0))
nerr++
continue
if t.Thistuple != 0 {
for ft = getthisx(t).Type; ft != nil; ft = ft.Down {
- if !(ft.Nname != nil) || !(ft.Nname.Sym != nil) {
+ if ft.Nname == nil || ft.Nname.Sym == nil {
continue
}
n = ft.Nname // no need for newname(ft->nname->sym)
if t.Intuple != 0 {
for ft = getinargx(t).Type; ft != nil; ft = ft.Down {
- if !(ft.Nname != nil) || !(ft.Nname.Sym != nil) {
+ if ft.Nname == nil || ft.Nname.Sym == nil {
continue
}
n = ft.Nname
if t.Outtuple != 0 {
for ft = getoutargx(t).Type; ft != nil; ft = ft.Down {
- if !(ft.Nname != nil) || !(ft.Nname.Sym != nil) {
+ if ft.Nname == nil || ft.Nname.Sym == nil {
continue
}
n = ft.Nname
tp = &f.Down
}
- for f = t.Type; f != nil && !(t.Broke != 0); f = f.Down {
+ for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
if f.Broke != 0 {
t.Broke = 1
}
uniqgen++
checkdupfields(t.Type, "field")
- if !(t.Broke != 0) {
+ if t.Broke == 0 {
checkwidth(t)
}
tp = &f.Down
}
- for f = t.Type; f != nil && !(t.Broke != 0); f = f.Down {
+ for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
if f.Broke != 0 {
t.Broke = 1
}
}
}
- for f = t.Type; f != nil && !(t.Broke != 0); f = f.Down {
+ for f = t.Type; f != nil && t.Broke == 0; f = f.Down {
if f.Broke != 0 {
t.Broke = 1
}
}
n = Nod(ODCLFIELD, n, t)
if n.Right != nil && n.Right.Op == ODDD {
- if !(input != 0) {
+ if input == 0 {
Yyerror("cannot use ... in output argument list")
} else if l.Next != nil {
Yyerror("can only use ... as final argument in list")
* *struct{} as the receiver.
* (See fakethis above.)
*/
-func isifacemethod(f *Type) int {
+func isifacemethod(f *Type) bool {
var rcvr *Type
var t *Type
rcvr = getthisx(f).Type
if rcvr.Sym != nil {
- return 0
+ return false
}
t = rcvr.Type
- if !(Isptr[t.Etype] != 0) {
- return 0
+ if Isptr[t.Etype] == 0 {
+ return false
}
t = t.Type
if t.Sym != nil || t.Etype != TSTRUCT || t.Type != nil {
- return 0
+ return false
}
- return 1
+ return true
}
/*
}
}
- if local && !(pa.Local != 0) {
+ if local && pa.Local == 0 {
// defining method on non-local type.
Yyerror("cannot define new methods on non-local type %v", Tconv(pa, 0))
}
f = structfield(n)
- f.Nointerface = uint8(bool2int(nointerface))
+ f.Nointerface = nointerface
// during import unexported method names should be in the type's package
if importpkg != nil && f.Sym != nil && !exportname(f.Sym.Name) && f.Sym.Pkg != structpkg {
func visit(n *Node) uint32 {
var min uint32
- var recursive uint32
+ var recursive bool
var l *NodeList
var block *NodeList
// If visitcodelist found its way back to n->walkgen, then this
// block is a set of mutually recursive functions.
// Otherwise it's just a lone function that does not recurse.
- recursive = uint32(bool2int(min == n.Walkgen))
+ recursive = min == n.Walkgen
// Remove connected component from stack.
// Mark walkgen so that future visits return a large number
l.Next = nil
// Run escape analysis on this set of functions.
- analyze(block, int(recursive))
+ analyze(block, recursive)
}
return min
dstcount int
edgecount int
noesc *NodeList
- recursive int
+ recursive bool
}
var tags [16]*Strlit
return EscReturn | em<<EscBits
}
-func analyze(all *NodeList, recursive int) {
+func analyze(all *NodeList, recursive bool) {
var l *NodeList
var es EscState
var e *EscState
}
// in a mutually recursive group we lose track of the return values
- if e.recursive != 0 {
+ if e.recursive {
for ll = Curfn.Dcl; ll != nil; ll = ll.Next {
if ll.N.Op == ONAME && ll.N.Class == PPARAMOUT {
escflows(e, &e.theSink, ll.N)
switch n.Op {
case OLABEL:
- if !(n.Left != nil) || !(n.Left.Sym != nil) {
+ if n.Left == nil || n.Left.Sym == nil {
Fatal("esc:label without label: %v", Nconv(n, obj.FmtSign))
}
n.Left.Sym.Label = &nonlooping
case OGOTO:
- if !(n.Left != nil) || !(n.Left.Sym != nil) {
+ if n.Left == nil || n.Left.Sym == nil {
Fatal("esc:goto without label: %v", Nconv(n, obj.FmtSign))
}
// Everything but fixed array is a dereference.
case ORANGE:
- if Isfixedarray(n.Type) != 0 && n.List != nil && n.List.Next != nil {
+ if Isfixedarray(n.Type) && n.List != nil && n.List.Next != nil {
escassign(e, n.List.Next.N, n.Right)
}
escassign(e, &e.theSink, n.Left)
case OAPPEND:
- if !(n.Isddd != 0) {
+ if n.Isddd == 0 {
for ll = n.List.Next; ll != nil; ll = ll.Next {
escassign(e, &e.theSink, ll.N) // lose track of assign to dereference
}
escassign(e, n, n.Left)
case OARRAYLIT:
- if Isslice(n.Type) != 0 {
+ if Isslice(n.Type) {
n.Esc = EscNone // until proven otherwise
e.noesc = list(e.noesc, n)
n.Escloopdepth = e.loopdepth
continue
}
a = v.Closure
- if !(v.Byval != 0) {
+ if v.Byval == 0 {
a = Nod(OADDR, a, nil)
a.Lineno = v.Lineno
a.Escloopdepth = e.loopdepth
default:
Dump("dst", dst)
Fatal("escassign: unexpected dst")
- fallthrough
case OARRAYLIT,
OCLOSURE,
return
case OINDEX:
- if Isfixedarray(dst.Left.Type) != 0 {
+ if Isfixedarray(dst.Left.Type) {
escassign(e, dst.Left, src)
return
}
// Index of array preserves input value.
case OINDEX:
- if Isfixedarray(src.Left.Type) != 0 {
+ if Isfixedarray(src.Left.Type) {
escassign(e, dst, src.Left)
}
switch n.Op {
default:
Fatal("esccall")
- fallthrough
case OCALLFUNC:
fn = n.Left
for lr = fn.Ntype.List; ll != nil && lr != nil; (func() { ll = ll.Next; lr = lr.Next })() {
src = ll.N
- if lr.N.Isddd != 0 && !(n.Isddd != 0) {
+ if lr.N.Isddd != 0 && n.Isddd == 0 {
// Introduce ODDDARG node to represent ... allocation.
src = Nod(ODDDARG, nil, nil)
for t = getinargx(fntype).Type; ll != nil; ll = ll.Next {
src = ll.N
- if t.Isddd != 0 && !(n.Isddd != 0) {
+ if t.Isddd != 0 && n.Isddd == 0 {
// Introduce ODDDARG node to represent ... allocation.
src = Nod(ODDDARG, nil, nil)
func escwalk(e *EscState, level int, dst *Node, src *Node) {
var ll *NodeList
- var leaks int
+ var leaks bool
var newlevel int
if src.Walkgen == walkgen && src.Esclevel <= int32(level) {
// The second clause is for values pointed at by an object passed to a call
// that returns something reached via indirect from the object.
// We don't know which result it is or how many indirects, so we treat it as leaking.
- leaks = bool2int(level <= 0 && dst.Escloopdepth < src.Escloopdepth || level < 0 && dst == &e.funcParam && haspointers(src.Type))
+ leaks = level <= 0 && dst.Escloopdepth < src.Escloopdepth || level < 0 && dst == &e.funcParam && haspointers(src.Type)
switch src.Op {
case ONAME:
- if src.Class == PPARAM && (leaks != 0 || dst.Escloopdepth < 0) && src.Esc != EscHeap {
+ if src.Class == PPARAM && (leaks || dst.Escloopdepth < 0) && src.Esc != EscHeap {
src.Esc = EscScope
if Debug['m'] != 0 {
Warnl(int(src.Lineno), "leaking param: %v", Nconv(src, obj.FmtShort))
// Treat a PPARAMREF closure variable as equivalent to the
// original variable.
if src.Class == PPARAMREF {
- if leaks != 0 && Debug['m'] != 0 {
+ if leaks && Debug['m'] != 0 {
Warnl(int(src.Lineno), "leaking closure reference %v", Nconv(src, obj.FmtShort))
}
escwalk(e, level, dst, src.Closure)
case OPTRLIT,
OADDR:
- if leaks != 0 {
+ if leaks {
src.Esc = EscHeap
addrescapes(src.Left)
if Debug['m'] != 0 {
escwalk(e, newlevel, dst, src.Left)
case OARRAYLIT:
- if Isfixedarray(src.Type) != 0 {
+ if Isfixedarray(src.Type) {
break
}
fallthrough
OCLOSURE,
OCALLPART,
ORUNESTR:
- if leaks != 0 {
+ if leaks {
src.Esc = EscHeap
if Debug['m'] != 0 {
Warnl(int(src.Lineno), "%v escapes to heap", Nconv(src, obj.FmtShort))
escwalk(e, level, dst, src.Left)
case OINDEX:
- if Isfixedarray(src.Left.Type) != 0 {
+ if Isfixedarray(src.Left.Type) {
escwalk(e, level, dst, src.Left)
break
}
return unicode.IsUpper(r)
}
-func initname(s string) int {
- return bool2int(s == "init")
+func initname(s string) bool {
+ return s == "init"
}
// exportedsym reports whether a symbol will be visible
// to files that import our package.
-func exportedsym(sym *Sym) int {
+func exportedsym(sym *Sym) bool {
// Builtins are visible everywhere.
if sym.Pkg == builtinpkg || sym.Origpkg == builtinpkg {
- return 1
+ return true
}
- return bool2int(sym.Pkg == localpkg && exportname(sym.Name))
+ return sym.Pkg == localpkg && exportname(sym.Name)
}
func autoexport(n *Node, ctxt int) {
}
// -A is for cmd/gc/mkbuiltin script, so export everything
- if Debug['A'] != 0 || exportname(n.Sym.Name) || initname(n.Sym.Name) != 0 {
+ if Debug['A'] != 0 || exportname(n.Sym.Name) || initname(n.Sym.Name) {
exportsym(n)
}
- if asmhdr != "" && n.Sym.Pkg == localpkg && !(n.Sym.Flags&SymAsm != 0) {
+ if asmhdr != "" && n.Sym.Pkg == localpkg && n.Sym.Flags&SymAsm == 0 {
n.Sym.Flags |= SymAsm
asmlist = list(asmlist, n)
}
}
p.Exported = 1
suffix = ""
- if !(p.Direct != 0) {
+ if p.Direct == 0 {
suffix = " // indirect"
}
fmt.Fprintf(bout, "\timport %s \"%v\"%s\n", p.Name, Zconv(p.Path, 0), suffix)
func reexportdep(n *Node) {
var t *Type
- if !(n != nil) {
+ if n == nil {
return
}
}
// nodes for method calls.
- if !(n.Type != nil) || n.Type.Thistuple > 0 {
+ if n.Type == nil || n.Type.Thistuple > 0 {
break
}
fallthrough
// fallthrough
case PEXTERN:
- if n.Sym != nil && !(exportedsym(n.Sym) != 0) {
+ if n.Sym != nil && !exportedsym(n.Sym) {
if Debug['E'] != 0 {
fmt.Printf("reexport name %v\n", Sconv(n.Sym, 0))
}
if Isptr[t.Etype] != 0 {
t = t.Type
}
- if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
+ if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
if Debug['E'] != 0 {
fmt.Printf("reexport type %v from declaration\n", Sconv(t.Sym, 0))
}
if Isptr[t.Etype] != 0 {
t = t.Type
}
- if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
+ if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
if Debug['E'] != 0 {
fmt.Printf("reexport literal type %v\n", Sconv(t.Sym, 0))
}
// fallthrough
case OTYPE:
- if n.Sym != nil && !(exportedsym(n.Sym) != 0) {
+ if n.Sym != nil && !exportedsym(n.Sym) {
if Debug['E'] != 0 {
fmt.Printf("reexport literal/type %v\n", Sconv(n.Sym, 0))
}
OMAKECHAN:
t = n.Type
- if !(t.Sym != nil) && t.Type != nil {
+ if t.Sym == nil && t.Type != nil {
t = t.Type
}
- if t != nil && t.Sym != nil && t.Sym.Def != nil && !(exportedsym(t.Sym) != 0) {
+ if t != nil && t.Sym != nil && t.Sym.Def != nil && !exportedsym(t.Sym) {
if Debug['E'] != 0 {
fmt.Printf("reexport type for expression %v\n", Sconv(t.Sym, 0))
}
t = n.Type // may or may not be specified
dumpexporttype(t)
- if t != nil && !(isideal(t) != 0) {
+ if t != nil && !isideal(t) {
fmt.Fprintf(bout, "\tconst %v %v = %v\n", Sconv(s, obj.FmtSharp), Tconv(t, obj.FmtSharp), Vconv(&n.Val, obj.FmtSharp))
} else {
fmt.Fprintf(bout, "\tconst %v = %v\n", Sconv(s, obj.FmtSharp), Vconv(&n.Val, obj.FmtSharp))
fmt.Fprintf(bout, "\ttype %v %v\n", Sconv(t.Sym, obj.FmtSharp), Tconv(t, obj.FmtSharp|obj.FmtLong))
for i = 0; i < n; i++ {
f = m[i]
- if f.Nointerface != 0 {
+ if f.Nointerface {
fmt.Fprintf(bout, "\t//go:nointerface\n")
}
if f.Type.Nname != nil && f.Type.Nname.Inl != nil { // nname was set by caninl
// mark the symbol so it is not reexported
if s.Def == nil {
- if exportname(s.Name) || initname(s.Name) != 0 {
+ if exportname(s.Name) || initname(s.Name) {
s.Flags |= SymExport
} else {
s.Flags |= SymPackage // package scope
Yyerror("conflicting names %s and %s for package \"%v\"", p.Name, s.Name, Zconv(p.Path, 0))
}
- if !(incannedimport != 0) && myimportpath != "" && z.S == myimportpath {
+ if incannedimport == 0 && myimportpath != "" && z.S == myimportpath {
Yyerror("import \"%v\": package depends on \"%v\" (import cycle)", Zconv(importpkg.Path, 0), Zconv(z, 0))
errorexit()
}
c = flag & obj.FmtShort
- if !(c != 0) && n.Ullman != 0 {
+ if c == 0 && n.Ullman != 0 {
fp += fmt.Sprintf(" u(%d)", n.Ullman)
}
- if !(c != 0) && n.Addable != 0 {
+ if c == 0 && n.Addable != 0 {
fp += fmt.Sprintf(" a(%d)", n.Addable)
}
- if !(c != 0) && n.Vargen != 0 {
+ if c == 0 && n.Vargen != 0 {
fp += fmt.Sprintf(" g(%d)", n.Vargen)
}
fp += fmt.Sprintf(" l(%d)", n.Lineno)
}
- if !(c != 0) && n.Xoffset != BADWIDTH {
+ if c == 0 && n.Xoffset != BADWIDTH {
fp += fmt.Sprintf(" x(%d%+d)", n.Xoffset, n.Stkdelta)
}
fp += fmt.Sprintf(" esc(no)")
case EscNever:
- if !(c != 0) {
+ if c == 0 {
fp += fmt.Sprintf(" esc(N)")
}
fp += fmt.Sprintf(" ld(%d)", n.Escloopdepth)
}
- if !(c != 0) && n.Typecheck != 0 {
+ if c == 0 && n.Typecheck != 0 {
fp += fmt.Sprintf(" tc(%d)", n.Typecheck)
}
- if !(c != 0) && n.Dodata != 0 {
+ if c == 0 && n.Dodata != 0 {
fp += fmt.Sprintf(" dd(%d)", n.Dodata)
}
fp += fmt.Sprintf(" assigned")
}
- if !(c != 0) && n.Used != 0 {
+ if c == 0 && n.Used != 0 {
fp += fmt.Sprintf(" used(%d)", n.Used)
}
return fp
var p string
- if s.Pkg != nil && !(flag&obj.FmtShort != 0 /*untyped*/) {
+ if s.Pkg != nil && flag&obj.FmtShort == 0 /*untyped*/ {
switch fmtmode {
case FErr: // This is for the user
if s.Pkg == localpkg {
}
// Unless the 'l' flag was specified, if the type has a name, just print that name.
- if !(flag&obj.FmtLong != 0 /*untyped*/) && t.Sym != nil && t.Etype != TFIELD && t != Types[t.Etype] {
+ if flag&obj.FmtLong == 0 /*untyped*/ && t.Sym != nil && t.Etype != TFIELD && t != Types[t.Etype] {
switch fmtmode {
case FTypeId:
if flag&obj.FmtShort != 0 /*untyped*/ {
return fp
case TFIELD:
- if !(flag&obj.FmtShort != 0 /*untyped*/) {
+ if flag&obj.FmtShort == 0 /*untyped*/ {
s = t.Sym
// Take the name from the original, lest we substituted it with ~r%d or ~b%d.
}
}
- if s != nil && !(t.Embedded != 0) {
+ if s != nil && t.Embedded == 0 {
if t.Funarg != 0 {
fp += fmt.Sprintf("%v ", Nconv(t.Nname, 0))
} else if flag&obj.FmtLong != 0 /*untyped*/ {
fp += fmt.Sprintf("%v", Tconv(t.Type, 0))
}
- if !(flag&obj.FmtShort != 0 /*untyped*/) && t.Note != nil {
+ if flag&obj.FmtShort == 0 /*untyped*/ && t.Note != nil {
fp += fmt.Sprintf(" \"%v\"", Zconv(t.Note, 0))
}
return fp
}
// Statements which may be rendered with a simplestmt as init.
-func stmtwithinit(op int) int {
+func stmtwithinit(op int) bool {
switch op {
case OIF,
OFOR,
OSWITCH:
- return 1
+ return true
}
- return 0
+ return false
}
func stmtfmt(n *Node) string {
var f string
- var complexinit int
- var simpleinit int
- var extrablock int
+ var complexinit bool
+ var simpleinit bool
+ var extrablock bool
// some statements allow for an init, but at most one,
// but we may have an arbitrary number added, eg by typecheck
// block starting with the init statements.
// if we can just say "for" n->ninit; ... then do so
- simpleinit = bool2int(n.Ninit != nil && !(n.Ninit.Next != nil) && !(n.Ninit.N.Ninit != nil) && stmtwithinit(int(n.Op)) != 0)
+ simpleinit = n.Ninit != nil && n.Ninit.Next == nil && n.Ninit.N.Ninit == nil && stmtwithinit(int(n.Op))
// otherwise, print the inits as separate statements
- complexinit = bool2int(n.Ninit != nil && !(simpleinit != 0) && (fmtmode != FErr))
+ complexinit = n.Ninit != nil && !simpleinit && (fmtmode != FErr)
// but if it was for if/for/switch, put in an extra surrounding block to limit the scope
- extrablock = bool2int(complexinit != 0 && stmtwithinit(int(n.Op)) != 0)
+ extrablock = complexinit && stmtwithinit(int(n.Op))
- if extrablock != 0 {
+ if extrablock {
f += "{"
}
- if complexinit != 0 {
+ if complexinit {
f += fmt.Sprintf(" %v; ", Hconv(n.Ninit, 0))
}
break
}
- if n.Colas != 0 && !(complexinit != 0) {
+ if n.Colas != 0 && !complexinit {
f += fmt.Sprintf("%v := %v", Nconv(n.Left, 0), Nconv(n.Right, 0))
} else {
f += fmt.Sprintf("%v = %v", Nconv(n.Left, 0), Nconv(n.Right, 0))
f += fmt.Sprintf("%v %v= %v", Nconv(n.Left, 0), Oconv(int(n.Etype), obj.FmtSharp), Nconv(n.Right, 0))
case OAS2:
- if n.Colas != 0 && !(complexinit != 0) {
+ if n.Colas != 0 && !complexinit {
f += fmt.Sprintf("%v := %v", Hconv(n.List, obj.FmtComma), Hconv(n.Rlist, obj.FmtComma))
break
}
f += fmt.Sprintf("defer %v", Nconv(n.Left, 0))
case OIF:
- if simpleinit != 0 {
+ if simpleinit {
f += fmt.Sprintf("if %v; %v { %v }", Nconv(n.Ninit.N, 0), Nconv(n.Ntest, 0), Hconv(n.Nbody, 0))
} else {
f += fmt.Sprintf("if %v { %v }", Nconv(n.Ntest, 0), Hconv(n.Nbody, 0))
}
f += "for"
- if simpleinit != 0 {
+ if simpleinit {
f += fmt.Sprintf(" %v;", Nconv(n.Ninit.N, 0))
} else if n.Nincr != nil {
f += " ;"
if n.Nincr != nil {
f += fmt.Sprintf("; %v", Nconv(n.Nincr, 0))
- } else if simpleinit != 0 {
+ } else if simpleinit {
f += ";"
}
}
f += fmt.Sprintf("%v", Oconv(int(n.Op), obj.FmtSharp))
- if simpleinit != 0 {
+ if simpleinit {
f += fmt.Sprintf(" %v;", Nconv(n.Ninit.N, 0))
}
if n.Ntest != nil {
}
ret:
- if extrablock != 0 {
+ if extrablock {
f += "}"
}
var f string
var nprec int
- var ptrlit int
+ var ptrlit bool
var l *NodeList
for n != nil && n.Implicit != 0 && (n.Op == OIND || n.Op == OADDR) {
return f
case OCOMPLIT:
- ptrlit = bool2int(n.Right != nil && n.Right.Implicit != 0 && n.Right.Type != nil && Isptr[n.Right.Type.Etype] != 0)
+ ptrlit = n.Right != nil && n.Right.Implicit != 0 && n.Right.Type != nil && Isptr[n.Right.Type.Etype] != 0
if fmtmode == FErr {
- if n.Right != nil && n.Right.Type != nil && !(n.Implicit != 0) {
- if ptrlit != 0 {
+ if n.Right != nil && n.Right.Type != nil && n.Implicit == 0 {
+ if ptrlit {
f += fmt.Sprintf("&%v literal", Tconv(n.Right.Type.Type, 0))
return f
} else {
return f
}
- if fmtmode == FExp && ptrlit != 0 {
+ if fmtmode == FExp && ptrlit {
// typecheck has overwritten OIND by OTYPE with pointer type.
f += fmt.Sprintf("(&%v{ %v })", Tconv(n.Right.Type.Type, 0), Hconv(n.List, obj.FmtComma))
return f
}
}
- if !(n.Implicit != 0) {
+ if n.Implicit == 0 {
f += "})"
return f
}
}
}
- if !(n.Left != nil) && n.Right != nil {
+ if n.Left == nil && n.Right != nil {
f += fmt.Sprintf(":%v", Nconv(n.Right, 0))
return f
}
- if n.Left != nil && !(n.Right != nil) {
+ if n.Left != nil && n.Right == nil {
f += fmt.Sprintf("%v:", Nconv(n.Left, 0))
return f
}
func nodedump(n *Node, flag int) string {
var fp string
- var recur int
+ var recur bool
if n == nil {
return fp
}
- recur = bool2int(!(flag&obj.FmtShort != 0 /*untyped*/))
+ recur = flag&obj.FmtShort == 0 /*untyped*/
- if recur != 0 {
+ if recur {
fp = indent(fp)
if dumpdepth > 10 {
fp += "..."
} else {
fp += fmt.Sprintf("%v%v", Oconv(int(n.Op), 0), Jconv(n, 0))
}
- if recur != 0 && n.Type == nil && n.Ntype != nil {
+ if recur && n.Type == nil && n.Ntype != nil {
fp = indent(fp)
fp += fmt.Sprintf("%v-ntype%v", Oconv(int(n.Op), 0), Nconv(n.Ntype, 0))
}
case OTYPE:
fp += fmt.Sprintf("%v %v%v type=%v", Oconv(int(n.Op), 0), Sconv(n.Sym, 0), Jconv(n, 0), Tconv(n.Type, 0))
- if recur != 0 && n.Type == nil && n.Ntype != nil {
+ if recur && n.Type == nil && n.Ntype != nil {
fp = indent(fp)
fp += fmt.Sprintf("%v-ntype%v", Oconv(int(n.Op), 0), Nconv(n.Ntype, 0))
}
fp += fmt.Sprintf(" %v", Tconv(n.Type, 0))
}
- if recur != 0 {
+ if recur {
if n.Left != nil {
fp += fmt.Sprintf("%v", Nconv(n.Left, 0))
}
// is always a heap pointer anyway.
case ODOT,
OINDEX:
- if !(Isslice(n.Left.Type) != 0) {
+ if !Isslice(n.Left.Type) {
addrescapes(n.Left)
}
}
switch n.Left.Op {
default:
Fatal("cgen_proc: unknown call %v", Oconv(int(n.Left.Op), 0))
- fallthrough
case OCALLMETH:
Cgen_callmeth(n.Left, proc)
Fatal("cgen_dcl")
}
- if !(n.Class&PHEAP != 0) {
+ if n.Class&PHEAP == 0 {
return
}
if compiling_runtime != 0 {
switch nr.Op {
case ONAME:
- if !(nr.Class&PHEAP != 0) && nr.Class != PEXTERN && nr.Class != PFUNC && nr.Class != PPARAMREF {
+ if nr.Class&PHEAP == 0 && nr.Class != PEXTERN && nr.Class != PFUNC && nr.Class != PPARAMREF {
gused(nr)
}
tmpcap = tmplen
}
- if isnil(n.Left) != 0 {
+ if isnil(n.Left) {
Tempname(&src, n.Left.Type)
Thearch.Cgen(n.Left, &src)
} else {
}
if n.Op == OSLICEARR || n.Op == OSLICE3ARR {
- if !(Isptr[n.Left.Type.Etype] != 0) {
+ if Isptr[n.Left.Type.Etype] == 0 {
Fatal("slicearr is supposed to work on pointer: %v\n", Nconv(n, obj.FmtSign))
}
Thearch.Cgen(&src, base)
var p2 *obj.Prog
var p3 *obj.Prog
var lab *Label
- var wasregalloc int32
//dump("gen", n);
lno = setlineno(n)
- wasregalloc = int32(Thearch.Anyregalloc())
+ wasregalloc := Thearch.Anyregalloc()
if n == nil {
goto ret
cgen_dcl(n.Left)
case OAS:
- if gen_as_init(n) != 0 {
+ if gen_as_init(n) {
break
}
Cgen_as(n.Left, n.Right)
}
ret:
- if int32(Thearch.Anyregalloc()) != wasregalloc {
+ if Thearch.Anyregalloc() != wasregalloc {
Dump("node", n)
Fatal("registers left allocated")
}
return
}
- if nr == nil || iszero(nr) != 0 {
+ if nr == nil || iszero(nr) {
// heaps should already be clear
if nr == nil && (nl.Class&PHEAP != 0) {
return
if tl == nil {
return
}
- if Isfat(tl) != 0 {
+ if Isfat(tl) {
if nl.Op == ONAME {
Gvardef(nl)
}
continue
}
- if lab.Use == nil && !(lab.Used != 0) {
+ if lab.Use == nil && lab.Used == 0 {
yyerrorl(int(lab.Def.Lineno), "label %v defined and not used", Sconv(lab.Sym, 0))
continue
}
import (
"bytes"
"cmd/internal/obj"
- "encoding/binary"
)
// Copyright 2009 The Go Authors. All rights reserved.
Addable uint8
Trecur uint8
Etype uint8
- Bounded uint8
+ Bounded bool
Class uint8
Method uint8
Embedded uint8
Likely int8
Hasbreak uint8
Needzero uint8
- Needctxt uint8
+ Needctxt bool
Esc uint
Funcdepth int
Type *Type
type Type struct {
Etype uint8
- Nointerface uint8
+ Nointerface bool
Noalg uint8
Chan uint8
Trecur uint8
var Use_sse int
+var hunk string
+
+var nhunk int32
+
var thunk int32
var Funcdepth int
)
type Arch struct {
- ByteOrder binary.ByteOrder
Thechar int
Thestring string
Thelinkarch *obj.LinkArch
REGSP int
REGCTXT int
MAXWIDTH int64
- Anyregalloc func() int
+ Anyregalloc func() bool
Betypeinit func()
Bgen func(*Node, bool, int, *obj.Prog)
Cgen func(*Node, *Node)
Proginfo func(*ProgInfo, *obj.Prog)
Regalloc func(*Node, *Type, *Node)
Regfree func(*Node)
- Regtyp func(*obj.Addr) int
- Sameaddr func(*obj.Addr, *obj.Addr) int
- Smallindir func(*obj.Addr, *obj.Addr) int
- Stackaddr func(*obj.Addr) int
+ Regtyp func(*obj.Addr) bool
+ Sameaddr func(*obj.Addr, *obj.Addr) bool
+ Smallindir func(*obj.Addr, *obj.Addr) bool
+ Stackaddr func(*obj.Addr) bool
Excludedregs func() uint64
RtoB func(int) uint64
FtoB func(int) uint64
/*
* Is this node a memory operand?
*/
-func Ismem(n *Node) int {
+func Ismem(n *Node) bool {
switch n.Op {
case OITAB,
OSPTR,
ONAME,
OPARAM,
OCLOSUREVAR:
- return 1
+ return true
case OADDR:
- return bool2int(Thearch.Thechar == '6' || Thearch.Thechar == '9') // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
+ return Thearch.Thechar == '6' || Thearch.Thechar == '9' // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
}
- return 0
+ return false
}
-func Samereg(a *Node, b *Node) int {
+func Samereg(a *Node, b *Node) bool {
if a == nil || b == nil {
- return 0
+ return false
}
if a.Op != OREGISTER {
- return 0
+ return false
}
if b.Op != OREGISTER {
- return 0
+ return false
}
if a.Val.U.Reg != b.Val.U.Reg {
- return 0
+ return false
}
- return 1
+ return true
}
/*
for lp = &p; ; {
p = *lp
- if !(p != nil) {
+ if p == nil {
break
}
- if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && !(((p.From.Node).(*Node)).Used != 0) {
+ if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && ((p.From.Node).(*Node)).Used == 0 {
*lp = p.Link
continue
}
- if (p.As == obj.AVARDEF || p.As == obj.AVARKILL) && p.To.Node != nil && !(((p.To.Node).(*Node)).Used != 0) {
+ if (p.As == obj.AVARDEF || p.As == obj.AVARKILL) && p.To.Node != nil && ((p.To.Node).(*Node)).Used == 0 {
// Cannot remove VARDEF instruction, because - unlike TYPE handled above -
// VARDEFs are interspersed with other code, and a jump might be using the
// VARDEF as a target. Replace with a no-op instead. A later pass will remove
Thearch.Gins(obj.ANOP, n, nil) // used
}
-func Isfat(t *Type) int {
+func Isfat(t *Type) bool {
if t != nil {
switch t.Etype {
case TSTRUCT,
TARRAY,
TSTRING,
TINTER: // maybe remove later
- return 1
+ return true
}
}
- return 0
+ return false
}
func markautoused(p *obj.Prog) {
func Naddr(n *Node, a *obj.Addr, canemitcode int) {
var s *Sym
- *a = obj.Zprog.From
+ *a = obj.Addr{}
if n == nil {
return
}
a.Node = n.Left.Orig
case OCLOSUREVAR:
- if !(Curfn.Needctxt != 0) {
+ if !Curfn.Needctxt {
Fatal("closurevar without needctxt")
}
a.Type = obj.TYPE_MEM
switch n.Class {
default:
Fatal("naddr: ONAME class %v %d\n", Sconv(n.Sym, 0), n.Class)
- fallthrough
case PEXTERN:
a.Name = obj.NAME_EXTERN
* return (11)
* }
*/
-func anyinit(n *NodeList) int {
+func anyinit(n *NodeList) bool {
var h uint32
var s *Sym
var l *NodeList
break
case OAS:
- if isblank(l.N.Left) && candiscard(l.N.Right) != 0 {
+ if isblank(l.N.Left) && candiscard(l.N.Right) {
break
}
fallthrough
// fall through
default:
- return 1
+ return true
}
}
// is this main
if localpkg.Name == "main" {
- return 1
+ return true
}
// is there an explicit init function
s = Lookup(namebuf)
if s.Def != nil {
- return 1
+ return true
}
// are there any imported init functions
if s.Def == nil {
continue
}
- return 1
+ return true
}
}
// then none
- return 0
+ return false
}
func fninit(n *NodeList) {
}
n = initfix(n)
- if !(anyinit(n) != 0) {
+ if !anyinit(n) {
return
}
if Isptr[rcvr.Etype] != 0 {
rcvr = rcvr.Type
}
- if !(rcvr.Sym != nil) {
+ if rcvr.Sym == nil {
Fatal("receiver with no sym: [%v] %v (%v)", Sconv(fn.Sym, 0), Nconv(fn, obj.FmtLong), Tconv(rcvr, 0))
}
return rcvr.Sym.Pkg
if fn.Op != ODCLFUNC {
Fatal("caninl %v", Nconv(fn, 0))
}
- if !(fn.Nname != nil) {
+ if fn.Nname == nil {
Fatal("caninl no nname %v", Nconv(fn, obj.FmtSign))
}
}
budget = 40 // allowed hairyness
- if ishairylist(fn.Nbody, &budget) != 0 {
+ if ishairylist(fn.Nbody, &budget) {
return
}
}
// Look for anything we want to punt on.
-func ishairylist(ll *NodeList, budget *int) int {
+func ishairylist(ll *NodeList, budget *int) bool {
for ; ll != nil; ll = ll.Next {
- if ishairy(ll.N, budget) != 0 {
- return 1
+ if ishairy(ll.N, budget) {
+ return true
}
}
- return 0
+ return false
}
-func ishairy(n *Node, budget *int) int {
- if !(n != nil) {
- return 0
+func ishairy(n *Node, budget *int) bool {
+ if n == nil {
+ return false
}
// Things that are too hairy, irrespective of the budget
OPANIC,
ORECOVER:
if Debug['l'] < 4 {
- return 1
+ return true
}
case OCLOSURE,
ODCLTYPE, // can't print yet
ODCLCONST, // can't print yet
ORETJMP:
- return 1
+ return true
}
(*budget)--
- return bool2int(*budget < 0 || ishairy(n.Left, budget) != 0 || ishairy(n.Right, budget) != 0 || ishairylist(n.List, budget) != 0 || ishairylist(n.Rlist, budget) != 0 || ishairylist(n.Ninit, budget) != 0 || ishairy(n.Ntest, budget) != 0 || ishairy(n.Nincr, budget) != 0 || ishairylist(n.Nbody, budget) != 0 || ishairylist(n.Nelse, budget) != 0)
+ return *budget < 0 || ishairy(n.Left, budget) || ishairy(n.Right, budget) || ishairylist(n.List, budget) || ishairylist(n.Rlist, budget) || ishairylist(n.Ninit, budget) || ishairy(n.Ntest, budget) || ishairy(n.Nincr, budget) || ishairylist(n.Nbody, budget) || ishairylist(n.Nelse, budget)
}
// Inlcopy and inlcopylist recursively copy the body of a function.
func tinlvar(t *Type) *Node {
if t.Nname != nil && !isblank(t.Nname) {
- if !(t.Nname.Inlvar != nil) {
+ if t.Nname.Inlvar == nil {
Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
}
return t.Nname.Inlvar
// parameters.
func mkinlcall1(np **Node, fn *Node, isddd int) {
var i int
- var chkargcount int
+ var chkargcount bool
var n *Node
var call *Node
var saveinlfn *Node
var ninit *NodeList
var body *NodeList
var t *Type
- var variadic int
+ var variadic bool
var varargcount int
var multiret int
var vararg *Node
// method call with a receiver.
t = getthisx(fn.Type).Type
- if t != nil && t.Nname != nil && !isblank(t.Nname) && !(t.Nname.Inlvar != nil) {
+ if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Inlvar == nil {
Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
}
- if !(n.Left.Left != nil) {
+ if n.Left.Left == nil {
Fatal("method call without receiver: %v", Nconv(n, obj.FmtSign))
}
if t == nil {
}
// check if inlined function is variadic.
- variadic = 0
+ variadic = false
varargtype = nil
varargcount = 0
for t = fn.Type.Type.Down.Down.Type; t != nil; t = t.Down {
if t.Isddd != 0 {
- variadic = 1
+ variadic = true
varargtype = t.Type
}
}
// but if argument is dotted too forget about variadicity.
- if variadic != 0 && isddd != 0 {
- variadic = 0
+ if variadic && isddd != 0 {
+ variadic = false
}
// check if argument is actually a returned tuple from call.
multiret = 0
- if n.List != nil && !(n.List.Next != nil) {
+ if n.List != nil && n.List.Next == nil {
switch n.List.N.Op {
case OCALL,
OCALLFUNC,
}
}
- if variadic != 0 {
+ if variadic {
varargcount = count(n.List) + multiret
if n.Left.Op != ODOTMETH {
varargcount -= fn.Type.Thistuple
// TODO: if len(nlist) == 1 but multiple args, check that n->list->n is a call?
if fn.Type.Thistuple != 0 && n.Left.Op != ODOTMETH {
// non-method call to method
- if !(n.List != nil) {
+ if n.List == nil {
Fatal("non-method call to method without first arg: %v", Nconv(n, obj.FmtSign))
}
// append receiver inlvar to LHS.
t = getthisx(fn.Type).Type
- if t != nil && t.Nname != nil && !isblank(t.Nname) && !(t.Nname.Inlvar != nil) {
+ if t != nil && t.Nname != nil && !isblank(t.Nname) && t.Nname.Inlvar == nil {
Fatal("missing inlvar for %v\n", Nconv(t.Nname, 0))
}
if t == nil {
}
// append ordinary arguments to LHS.
- chkargcount = bool2int(n.List != nil && n.List.Next != nil)
+ chkargcount = n.List != nil && n.List.Next != nil
vararg = nil // the slice argument to a variadic call
varargs = nil // the list of LHS names to put in vararg.
- if !(chkargcount != 0) {
+ if !chkargcount {
// 0 or 1 expression on RHS.
for t = getinargx(fn.Type).Type; t != nil; t = t.Down {
- if variadic != 0 && t.Isddd != 0 {
+ if variadic && t.Isddd != 0 {
vararg = tinlvar(t)
for i = 0; i < varargcount && ll != nil; i++ {
m = argvar(varargtype, i)
} else {
// match arguments except final variadic (unless the call is dotted itself)
for t = getinargx(fn.Type).Type; t != nil; {
- if !(ll != nil) {
+ if ll == nil {
break
}
- if variadic != 0 && t.Isddd != 0 {
+ if variadic && t.Isddd != 0 {
break
}
as.List = list(as.List, tinlvar(t))
}
// match varargcount arguments with variadic parameters.
- if variadic != 0 && t != nil && t.Isddd != 0 {
+ if variadic && t != nil && t.Isddd != 0 {
vararg = tinlvar(t)
for i = 0; i < varargcount && ll != nil; i++ {
m = argvar(varargtype, i)
}
// turn the variadic args into a slice.
- if variadic != 0 {
+ if variadic {
as = Nod(OAS, vararg, nil)
- if !(varargcount != 0) {
+ if varargcount == 0 {
as.Right = nodnil()
as.Right.Type = varargtype
} else {
}
func setlno(n *Node, lno int) {
- if !(n != nil) {
+ if n == nil {
return
}
return i
}
-func skiptopkgdef(b *obj.Biobuf) int {
+func skiptopkgdef(b *obj.Biobuf) bool {
var p string
var sz int
/* archive header */
p = obj.Brdline(b, '\n')
if p == "" {
- return 0
+ return false
}
if obj.Blinelen(b) != 8 {
- return 0
+ return false
}
if p != "!<arch>\n" {
- return 0
+ return false
}
/* symbol table may be first; skip it */
sz = arsize(b, "__.PKGDEF")
if sz <= 0 {
- return 0
+ return false
}
- return 1
+ return true
}
func addidir(dir string) {
strings.HasPrefix(name.S, "../") || name.S == ".."
}
-func findpkg(name *Strlit) int {
+func findpkg(name *Strlit) bool {
var p *Idir
var q string
var suffix string
if islocalname(name) {
if safemode != 0 || nolocalimports != 0 {
- return 0
+ return false
}
// try .a before .6. important for building libraries:
namebuf = fmt.Sprintf("%v.a", Zconv(name, 0))
if obj.Access(namebuf, 0) >= 0 {
- return 1
+ return true
}
namebuf = fmt.Sprintf("%v.%c", Zconv(name, 0), Thearch.Thechar)
if obj.Access(namebuf, 0) >= 0 {
- return 1
+ return true
}
- return 0
+ return false
}
// local imports should be canonicalized already.
_ = q
if path.Clean(name.S) != name.S {
Yyerror("non-canonical import path %v (should be %s)", Zconv(name, 0), q)
- return 0
+ return false
}
for p = idirs; p != nil; p = p.link {
namebuf = fmt.Sprintf("%s/%v.a", p.dir, Zconv(name, 0))
if obj.Access(namebuf, 0) >= 0 {
- return 1
+ return true
}
namebuf = fmt.Sprintf("%s/%v.%c", p.dir, Zconv(name, 0), Thearch.Thechar)
if obj.Access(namebuf, 0) >= 0 {
- return 1
+ return true
}
}
namebuf = fmt.Sprintf("%s/pkg/%s_%s%s%s/%v.a", goroot, goos, goarch, suffixsep, suffix, Zconv(name, 0))
if obj.Access(namebuf, 0) >= 0 {
- return 1
+ return true
}
namebuf = fmt.Sprintf("%s/pkg/%s_%s%s%s/%v.%c", goroot, goos, goarch, suffixsep, suffix, Zconv(name, 0), Thearch.Thechar)
if obj.Access(namebuf, 0) >= 0 {
- return 1
+ return true
}
}
- return 0
+ return false
}
func fakeimport() {
}
}
- if !(findpkg(path_) != 0) {
+ if !findpkg(path_) {
Yyerror("can't find import: \"%v\"", Zconv(f.U.Sval, 0))
errorexit()
}
n = len(namebuf)
if n > 2 && namebuf[n-2] == '.' && namebuf[n-1] == 'a' {
- if !(skiptopkgdef(imp) != 0) {
+ if !skiptopkgdef(imp) {
Yyerror("import %s: not a package file", file)
errorexit()
}
for {
- if escchar('"', &escflag, &v) != 0 {
+ if escchar('"', &escflag, &v) {
break
}
if v < utf8.RuneSelf || escflag != 0 {
/* '.' */
case '\'':
- if escchar('\'', &escflag, &v) != 0 {
+ if escchar('\'', &escflag, &v) {
Yyerror("empty character literal or unescaped ' in character literal")
v = '\''
}
- if !(escchar('\'', &escflag, &v) != 0) {
+ if !escchar('\'', &escflag, &v) {
Yyerror("missing '")
ungetc(int(v))
}
}
if verb == "go:linkname" {
- if !(imported_unsafe != 0) {
+ if imported_unsafe == 0 {
Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
}
f := strings.Fields(cmd)
}
if verb == "go:nowritebarrier" {
- if !(compiling_runtime != 0) {
+ if compiling_runtime == 0 {
Yyerror("//go:nowritebarrier only allowed in runtime")
}
nowritebarrier = true
}
}
-func escchar(e int, escflg *int, val *int64) int {
+func escchar(e int, escflg *int, val *int64) bool {
var i int
var u int
var c int
switch c {
case EOF:
Yyerror("eof in string")
- return 1
+ return true
case '\n':
Yyerror("newline in string")
- return 1
+ return true
case '\\':
break
default:
if c == e {
- return 1
+ return true
}
*val = int64(c)
- return 0
+ return false
}
u = 0
}
*val = int64(c)
- return 0
+ return false
hex:
l = 0
}
*val = l
- return 0
+ return false
oct:
l = int64(c) - '0'
}
*val = l
- return 0
+ return false
}
var syms = []struct {
idealbool = typ(TBOOL)
s = Pkglookup("true", builtinpkg)
- s.Def = Nodbool(1)
+ s.Def = Nodbool(true)
s.Def.Sym = Lookup("true")
s.Def.Type = idealbool
s = Pkglookup("false", builtinpkg)
- s.Def = Nodbool(0)
+ s.Def = Nodbool(false)
s.Def.Sym = Lookup("false")
s.Def.Type = idealbool
s = Lookup("true")
if s.Def == nil {
- s.Def = Nodbool(1)
+ s.Def = Nodbool(true)
s.Def.Sym = s
s.Origpkg = builtinpkg
}
s = Lookup("false")
if s.Def == nil {
- s.Def = Nodbool(0)
+ s.Def = Nodbool(false)
s.Def.Sym = s
s.Origpkg = builtinpkg
}
// leave s->block set to cause redeclaration
// errors if a conflicting top-level name is
// introduced by a different file.
- if !(s.Def.Used != 0) && !(nsyntaxerrors != 0) {
+ if s.Def.Used == 0 && nsyntaxerrors == 0 {
pkgnotused(int(s.Def.Lineno), s.Def.Pkg.Path, s.Name)
}
s.Def = nil
if s.Def.Sym != s {
// throw away top-level name left over
// from previous import . "x"
- if s.Def.Pack != nil && !(s.Def.Pack.Used != 0) && !(nsyntaxerrors != 0) {
+ if s.Def.Pack != nil && s.Def.Pack.Used == 0 && nsyntaxerrors == 0 {
pkgnotused(int(s.Def.Pack.Lineno), s.Def.Pack.Pkg.Path, "")
s.Def.Pack.Used = 1
}
}
a.Ovf = uint8(c)
- if a.Ovf != 0 && !(quiet != 0) {
+ if a.Ovf != 0 && quiet == 0 {
Yyerror("constant shift overflow")
}
}
i = Mpprec - 1
if a.A[i] != 0 {
a.Ovf = 1
- if !(quiet != 0) {
+ if quiet == 0 {
Yyerror("constant shift overflow")
}
}
}
a.Ovf = uint8(c)
- if a.Ovf != 0 && !(quiet != 0) {
+ if a.Ovf != 0 && quiet == 0 {
Yyerror("constant addition overflow")
}
q.Neg = uint8(ns ^ ds)
}
-func mpiszero(a *Mpint) int {
+func mpiszero(a *Mpint) bool {
var i int
for i = Mpprec - 1; i >= 0; i-- {
if a.A[i] != 0 {
- return 0
+ return false
}
}
- return 1
+ return true
}
func mpdivfract(a *Mpint, b *Mpint) {
for j = 0; j < Mpscale; j++ {
x <<= 1
if mpcmp(&d, &n) <= 0 {
- if !(mpiszero(&d) != 0) {
+ if !mpiszero(&d) {
x |= 1
}
mpsubfixfix(&n, &d)
// Ordertemp allocates a new temporary with the given type,
// pushes it onto the temp stack, and returns it.
// If clear is true, ordertemp emits code to zero the temporary.
-func ordertemp(t *Type, order *Order, clear int) *Node {
+func ordertemp(t *Type, order *Order, clear bool) *Node {
var var_ *Node
var a *Node
var l *NodeList
var_ = temp(t)
- if clear != 0 {
+ if clear {
a = Nod(OAS, var_, nil)
typecheck(&a, Etop)
order.out = list(order.out, a)
var a *Node
var var_ *Node
- var_ = ordertemp(t, order, clear)
+ var_ = ordertemp(t, order, clear != 0)
a = Nod(OAS, var_, n)
typecheck(&a, Etop)
order.out = list(order.out, a)
var a *Node
switch n.Op {
- default:
- Fatal("ordersafeexpr %v", Oconv(int(n.Op), 0))
- fallthrough
-
case ONAME,
OLITERAL:
return n
case OINDEX,
OINDEXMAP:
- if Isfixedarray(n.Left.Type) != 0 {
+ if Isfixedarray(n.Left.Type) {
l = ordersafeexpr(n.Left, order)
} else {
l = ordercheapexpr(n.Left, order)
typecheck(&a, Erv)
return a
}
+
+ Fatal("ordersafeexpr %v", Oconv(int(n.Op), 0))
+ return nil // not reached
}
// Istemp reports whether n is a temporary variable.
-func istemp(n *Node) int {
+func istemp(n *Node) bool {
if n.Op != ONAME {
- return 0
+ return false
}
- return bool2int(strings.HasPrefix(n.Sym.Name, "autotmp_"))
+ return strings.HasPrefix(n.Sym.Name, "autotmp_")
}
// Isaddrokay reports whether it is okay to pass n's address to runtime routines.
// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
// because we emit explicit VARKILL instructions marking the end of those
// temporaries' lifetimes.
-func isaddrokay(n *Node) int {
- return bool2int(islvalue(n) != 0 && (n.Op != ONAME || n.Class == PEXTERN || istemp(n) != 0))
+func isaddrokay(n *Node) bool {
+ return islvalue(n) && (n.Op != ONAME || n.Class == PEXTERN || istemp(n))
}
// Orderaddrtemp ensures that *np is okay to pass by address to runtime routines.
var n *Node
n = *np
- if isaddrokay(n) != 0 {
+ if isaddrokay(n) {
return
}
*np = ordercopyexpr(n, n.Type, order, 0)
for {
l = order.temp
- if !(l != mark) {
+ if l == mark {
break
}
order.temp = l.Next
// Ismulticall reports whether the list l is f() for a multi-value function.
// Such an f() could appear as the lone argument to a multi-arg function.
-func ismulticall(l *NodeList) int {
+func ismulticall(l *NodeList) bool {
var n *Node
// one arg only
if l == nil || l.Next != nil {
- return 0
+ return false
}
n = l.N
// must be call
switch n.Op {
default:
- return 0
+ return false
case OCALLFUNC,
OCALLMETH,
}
// call must return multiple values
- return bool2int(n.Left.Type.Outtuple > 1)
+ return n.Left.Type.Outtuple > 1
}
// Copyret emits t1, t2, ... = n, where n is a function call,
var l2 *NodeList
var tl Iter
- if n.Type.Etype != TSTRUCT || !(n.Type.Funarg != 0) {
+ if n.Type.Etype != TSTRUCT || n.Type.Funarg == 0 {
Fatal("copyret %v %d", Tconv(n.Type, 0), n.Left.Type.Outtuple)
}
// Ordercallargs orders the list of call arguments *l.
func ordercallargs(l **NodeList, order *Order) {
- if ismulticall(*l) != 0 {
+ if ismulticall(*l) {
// return f() where f() is multiple values.
*l = copyret((*l).N, order)
} else {
switch n.Op {
default:
Fatal("ordermapassign %v", Oconv(int(n.Op), 0))
- fallthrough
case OAS:
order.out = list(order.out, n)
// We call writebarrierfat only for values > 4 pointers long. See walk.c.
- if (n.Left.Op == OINDEXMAP || (needwritebarrier(n.Left, n.Right) != 0 && n.Left.Type.Width > int64(4*Widthptr))) && !(isaddrokay(n.Right) != 0) {
+ if (n.Left.Op == OINDEXMAP || (needwritebarrier(n.Left, n.Right) && n.Left.Type.Width > int64(4*Widthptr))) && !isaddrokay(n.Right) {
m = n.Left
- n.Left = ordertemp(m.Type, order, 0)
+ n.Left = ordertemp(m.Type, order, false)
a = Nod(OAS, m, n.Left)
typecheck(&a, Etop)
order.out = list(order.out, a)
for l = n.List; l != nil; l = l.Next {
if l.N.Op == OINDEXMAP {
m = l.N
- if !(istemp(m.Left) != 0) {
+ if !istemp(m.Left) {
m.Left = ordercopyexpr(m.Left, m.Left.Type, order, 0)
}
- if !(istemp(m.Right) != 0) {
+ if !istemp(m.Right) {
m.Right = ordercopyexpr(m.Right, m.Right.Type, order, 0)
}
- l.N = ordertemp(m.Type, order, 0)
+ l.N = ordertemp(m.Type, order, false)
a = Nod(OAS, m, l.N)
typecheck(&a, Etop)
post = list(post, a)
switch n.Op {
default:
Fatal("orderstmt %v", Oconv(int(n.Op), 0))
- fallthrough
case OVARKILL:
order.out = list(order.out, n)
order.out = list(order.out, n)
} else {
typ = n.Rlist.N.Type
- tmp1 = ordertemp(typ, order, bool2int(haspointers(typ)))
+ tmp1 = ordertemp(typ, order, haspointers(typ))
order.out = list(order.out, n)
r = Nod(OAS, n.List.N, tmp1)
typecheck(&r, Etop)
orderexprlist(n.List, order)
orderexpr(&n.Rlist.N.Left, order) // arg to recv
ch = n.Rlist.N.Left.Type
- tmp1 = ordertemp(ch.Type, order, bool2int(haspointers(ch.Type)))
+ tmp1 = ordertemp(ch.Type, order, haspointers(ch.Type))
if !isblank(n.List.Next.N) {
- tmp2 = ordertemp(n.List.Next.N.Type, order, 0)
+ tmp2 = ordertemp(n.List.Next.N.Type, order, false)
} else {
- tmp2 = ordertemp(Types[TBOOL], order, 0)
+ tmp2 = ordertemp(Types[TBOOL], order, false)
}
order.out = list(order.out, n)
r = Nod(OAS, n.List.N, tmp1)
t = marktemp(order)
orderexpr(&n.Left, order)
- if !(Isinter(n.Left.Type) != 0) {
+ if !Isinter(n.Left.Type) {
orderaddrtemp(&n.Left, order)
}
order.out = list(order.out, n)
switch n.Type.Etype {
default:
Fatal("orderstmt range %v", Tconv(n.Type, 0))
- fallthrough
// Mark []byte(str) range expression to reuse string backing storage.
// It is safe because the storage cannot be mutated.
n.Right = ordercopyexpr(r, r.Type, order, 0)
// n->alloc is the temp for the iterator.
- n.Alloc = ordertemp(Types[TUINT8], order, 1)
+ n.Alloc = ordertemp(Types[TUINT8], order, true)
}
for l = n.List; l != nil; l = l.Next {
l.N.Ninit = list(l.N.Ninit, tmp2)
}
- r.Left = ordertemp(r.Right.Left.Type.Type, order, bool2int(haspointers(r.Right.Left.Type.Type)))
+ r.Left = ordertemp(r.Right.Left.Type.Type, order, haspointers(r.Right.Left.Type.Type))
tmp2 = Nod(OAS, tmp1, r.Left)
typecheck(&tmp2, Etop)
l.N.Ninit = list(l.N.Ninit, tmp2)
l.N.Ninit = list(l.N.Ninit, tmp2)
}
- r.Ntest = ordertemp(tmp1.Type, order, 0)
+ r.Ntest = ordertemp(tmp1.Type, order, false)
tmp2 = Nod(OAS, tmp1, r.Ntest)
typecheck(&tmp2, Etop)
l.N.Ninit = list(l.N.Ninit, tmp2)
// r->left is c, r->right is x, both are always evaluated.
orderexpr(&r.Left, order)
- if !(istemp(r.Left) != 0) {
+ if !istemp(r.Left) {
r.Left = ordercopyexpr(r.Left, r.Left.Type, order, 0)
}
orderexpr(&r.Right, order)
- if !(istemp(r.Right) != 0) {
+ if !istemp(r.Right) {
r.Right = ordercopyexpr(r.Right, r.Right.Type, order, 0)
}
}
var l *NodeList
var t *Type
var lno int
- var haslit int
- var hasbyte int
+ var haslit bool
+ var hasbyte bool
n = *np
if n == nil {
t = typ(TARRAY)
t.Bound = int64(count(n.List))
t.Type = Types[TSTRING]
- n.Alloc = ordertemp(t, order, 0)
+ n.Alloc = ordertemp(t, order, false)
}
// Mark string(byteSlice) arguments to reuse byteSlice backing
// Otherwise if all other arguments are empty strings,
// concatstrings will return the reference to the temp string
// to the caller.
- hasbyte = 0
+ hasbyte = false
- haslit = 0
+ haslit = false
for l = n.List; l != nil; l = l.Next {
- hasbyte |= bool2int(l.N.Op == OARRAYBYTESTR)
- haslit |= bool2int(l.N.Op == OLITERAL && len(l.N.Val.U.Sval.S) != 0)
+ hasbyte = hasbyte || l.N.Op == OARRAYBYTESTR
+ haslit = haslit || l.N.Op == OLITERAL && len(l.N.Val.U.Sval.S) != 0
}
- if haslit != 0 && hasbyte != 0 {
+ if haslit && hasbyte {
for l = n.List; l != nil; l = l.Next {
if l.N.Op == OARRAYBYTESTR {
l.N.Op = OARRAYBYTESTRTMP
case OCONVIFACE:
orderexpr(&n.Left, order)
- if !(Isinter(n.Left.Type) != 0) {
+ if !Isinter(n.Left.Type) {
orderaddrtemp(&n.Left, order)
}
case OCLOSURE:
if n.Noescape && n.Cvars != nil {
- n.Alloc = ordertemp(Types[TUINT8], order, 0) // walk will fill in correct type
+ n.Alloc = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type
}
case OARRAYLIT,
orderexprlist(n.List, order)
orderexprlist(n.Rlist, order)
if n.Noescape {
- n.Alloc = ordertemp(Types[TUINT8], order, 0) // walk will fill in correct type
+ n.Alloc = ordertemp(Types[TUINT8], order, false) // walk will fill in correct type
}
case ODDDARG:
// Allocate a temporary that will be cleaned up when this statement
// completes. We could be more aggressive and try to arrange for it
// to be cleaned up when the call completes.
- n.Alloc = ordertemp(n.Type.Type, order, 0)
+ n.Alloc = ordertemp(n.Type.Type, order, false)
}
case ORECV,
orderexpr(&n.Left, order)
orderexpr(&n.Right, order)
t = n.Left.Type
- if t.Etype == TSTRUCT || Isfixedarray(t) != 0 {
+ if t.Etype == TSTRUCT || Isfixedarray(t) {
// for complex comparisons, we need both args to be
// addressable so we can pass them to the runtime.
orderaddrtemp(&n.Left, order)
ll = Curfn.Dcl
n = ll.N
- if n.Class == PAUTO && n.Op == ONAME && !(n.Used != 0) {
+ if n.Class == PAUTO && n.Op == ONAME && n.Used == 0 {
// No locals used at all
Curfn.Dcl = nil
for ll = Curfn.Dcl; ll.Next != nil; ll = ll.Next {
n = ll.Next.N
- if n.Class == PAUTO && n.Op == ONAME && !(n.Used != 0) {
+ if n.Class == PAUTO && n.Op == ONAME && n.Used == 0 {
ll.Next = nil
Curfn.Dcl.End = ll
break
}
// Ideally we wouldn't see any integer types here, but we do.
- if n.Type == nil || (!(Isptr[n.Type.Etype] != 0) && !(Isint[n.Type.Etype] != 0) && n.Type.Etype != TUNSAFEPTR) {
+ if n.Type == nil || (Isptr[n.Type.Etype] == 0 && Isint[n.Type.Etype] == 0 && n.Type.Etype != TUNSAFEPTR) {
Dump("checknil", n)
Fatal("bad checknil")
}
- if ((Thearch.Thechar == '5' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !(n.Addable != 0) || n.Op == OLITERAL {
+ if ((Thearch.Thechar == '5' || Thearch.Thechar == '9') && n.Op != OREGISTER) || n.Addable == 0 || n.Op == OLITERAL {
Thearch.Regalloc(®, Types[Tptr], n)
Thearch.Cgen(n, ®)
Thearch.Gins(obj.ACHECKNIL, ®, nil)
if fn.Wrapper != 0 {
ptxt.From3.Offset |= obj.WRAPPER
}
- if fn.Needctxt != 0 {
+ if fn.Needctxt {
ptxt.From3.Offset |= obj.NEEDCTXT
}
if fn.Nosplit {
Pc.Lineno = lineno
fixjmp(ptxt)
- if !(Debug['N'] != 0) || Debug['R'] != 0 || Debug['P'] != 0 {
+ if Debug['N'] == 0 || Debug['R'] != 0 || Debug['P'] != 0 {
regopt(ptxt)
nilopt(ptxt)
}
// are two criteria for termination. If the end of basic block is reached a
// value of zero is returned. If the callback returns a non-zero value, the
// iteration is stopped and the value of the callback is returned.
-func blockany(bb *BasicBlock, callback func(*obj.Prog) int) int {
- var p *obj.Prog
- var result int
-
- for p = bb.last; p != nil; p = p.Opt.(*obj.Prog) {
- result = callback(p)
- if result != 0 {
- return result
+func blockany(bb *BasicBlock, f func(*obj.Prog) bool) bool {
+ for p := bb.last; p != nil; p = p.Opt.(*obj.Prog) {
+ if f(p) {
+ return true
}
}
-
- return 0
+ return false
}
// Collects and returns and array of Node*s for functions arguments and local
var isselectcommcasecall_names [5]*obj.LSym
-func isselectcommcasecall(prog *obj.Prog) int {
+func isselectcommcasecall(prog *obj.Prog) bool {
var i int32
if isselectcommcasecall_names[0] == nil {
for i = 0; isselectcommcasecall_names[i] != nil; i++ {
if iscall(prog, isselectcommcasecall_names[i]) {
- return 1
+ return true
}
}
- return 0
+ return false
}
// Returns true for call instructions that target runtime·newselect.
var isnewselect_sym *obj.LSym
-func isnewselect(prog *obj.Prog) int {
+func isnewselect(prog *obj.Prog) bool {
if isnewselect_sym == nil {
isnewselect_sym = Linksym(Pkglookup("newselect", Runtimepkg))
}
- return bool2int(iscall(prog, isnewselect_sym))
+ return iscall(prog, isnewselect_sym)
}
// Returns true for call instructions that target runtime·selectgo.
var isselectgocall_sym *obj.LSym
-func isselectgocall(prog *obj.Prog) int {
+func isselectgocall(prog *obj.Prog) bool {
if isselectgocall_sym == nil {
isselectgocall_sym = Linksym(Pkglookup("selectgo", Runtimepkg))
}
- return bool2int(iscall(prog, isselectgocall_sym))
+ return iscall(prog, isselectgocall_sym)
}
var isdeferreturn_sym *obj.LSym
-func isdeferreturn(prog *obj.Prog) int {
+func isdeferreturn(prog *obj.Prog) bool {
if isdeferreturn_sym == nil {
isdeferreturn_sym = Linksym(Pkglookup("deferreturn", Runtimepkg))
}
- return bool2int(iscall(prog, isdeferreturn_sym))
+ return iscall(prog, isdeferreturn_sym)
}
// Walk backwards from a runtime·selectgo call up to its immediately dominating
Fatal("selectgo does not have a newselect")
}
pred = pred.pred[0]
- if blockany(pred, isselectcommcasecall) != 0 {
+ if blockany(pred, isselectcommcasecall) {
// A select comm case block should have exactly one
// successor.
if len(pred.succ) != 1 {
addedge(selectgo, succ)
}
- if blockany(pred, isnewselect) != 0 {
+ if blockany(pred, isnewselect) {
// Reached the matching newselect.
break
}
p.Link.Opt = newblock(p.Link)
cfg = append(cfg, p.Link.Opt.(*BasicBlock))
}
- } else if isselectcommcasecall(p) != 0 || isselectgocall(p) != 0 {
+ } else if isselectcommcasecall(p) || isselectgocall(p) {
// Accommodate implicit selectgo control flow.
if p.Link.Opt == nil {
p.Link.Opt = newblock(p.Link)
}
// Collect basic blocks with selectgo calls.
- if isselectgocall(p) != 0 {
+ if isselectgocall(p) {
selectgo = append(selectgo, bb)
}
}
// non-tail-call return instructions; see note above
// the for loop for details.
case PPARAMOUT:
- if !(node.Addrtaken != 0) && prog.To.Type == obj.TYPE_NONE {
+ if node.Addrtaken == 0 && prog.To.Type == obj.TYPE_NONE {
bvset(uevar, i)
}
}
bvset(uevar, pos)
}
if info.Flags&LeftWrite != 0 {
- if from.Node != nil && !(Isfat(((from.Node).(*Node)).Type) != 0) {
+ if from.Node != nil && !Isfat(((from.Node).(*Node)).Type) {
bvset(varkill, pos)
}
}
bvset(uevar, pos)
}
if info.Flags&RightWrite != 0 {
- if to.Node != nil && (!(Isfat(((to.Node).(*Node)).Type) != 0) || prog.As == obj.AVARDEF) {
+ if to.Node != nil && (!Isfat(((to.Node).(*Node)).Type) || prog.As == obj.AVARDEF) {
bvset(varkill, pos)
}
}
if t.Bound < -1 {
Fatal("twobitwalktype1: invalid bound, %v", Tconv(t, 0))
}
- if Isslice(t) != 0 {
+ if Isslice(t) {
// struct { byte *array; uintgo len; uintgo cap; }
if *xoffset&int64(Widthptr-1) != 0 {
Fatal("twobitwalktype1: invalid TARRAY alignment, %v", Tconv(t, 0))
for i = 0; ; i++ {
i = int32(bvnext(liveout, i))
- if !(i >= 0) {
+ if i < 0 {
break
}
node = vars[i]
// Returns true for instructions that are safe points that must be annotated
// with liveness information.
-func issafepoint(prog *obj.Prog) int {
- return bool2int(prog.As == obj.ATEXT || prog.As == obj.ACALL)
+func issafepoint(prog *obj.Prog) bool {
+ return prog.As == obj.ATEXT || prog.As == obj.ACALL
}
// Initializes the sets for solving the live variables. Visits all the
// This function is slow but it is only used for generating debug prints.
// Check whether n is marked live in args/locals.
-func islive(n *Node, args *Bvec, locals *Bvec) int {
+func islive(n *Node, args *Bvec, locals *Bvec) bool {
var i int
switch n.Class {
PPARAMOUT:
for i = 0; int64(i) < n.Type.Width/int64(Widthptr)*obj.BitsPerPointer; i++ {
if bvget(args, int32(n.Xoffset/int64(Widthptr)*obj.BitsPerPointer+int64(i))) != 0 {
- return 1
+ return true
}
}
case PAUTO:
for i = 0; int64(i) < n.Type.Width/int64(Widthptr)*obj.BitsPerPointer; i++ {
if bvget(locals, int32((n.Xoffset+stkptrsize)/int64(Widthptr)*obj.BitsPerPointer+int64(i))) != 0 {
- return 1
+ return true
}
}
}
- return 0
+ return false
}
// Visits all instructions in a basic block and computes a bit vector of live
bvor(any, any, avarinit)
bvor(all, all, avarinit)
- if issafepoint(p) != 0 {
+ if issafepoint(p) {
// Annotate ambiguously live variables so that they can
// be zeroed at function entry.
// livein and liveout are dead here and used as temporaries.
bvresetall(livein)
bvandnot(liveout, any, all)
- if !(bvisempty(liveout) != 0) {
+ if !bvisempty(liveout) {
for pos = 0; pos < liveout.n; pos++ {
- if !(bvget(liveout, pos) != 0) {
+ if bvget(liveout, pos) == 0 {
continue
}
bvset(all, pos) // silence future warnings in this block
n = lv.vars[pos]
- if !(n.Needzero != 0) {
+ if n.Needzero == 0 {
n.Needzero = 1
if debuglive >= 1 {
Warnl(int(p.Lineno), "%v: %v is ambiguously live", Nconv(Curfn.Nname, 0), Nconv(n, obj.FmtLong))
bvcopy(liveout, livein)
bvandnot(livein, liveout, varkill)
bvor(livein, livein, uevar)
- if debuglive >= 3 && issafepoint(p) != 0 {
+ if debuglive >= 3 && issafepoint(p) {
fmt.Printf("%v\n", p)
printvars("uevar", uevar, lv.vars)
printvars("varkill", varkill, lv.vars)
printvars("liveout", liveout, lv.vars)
}
- if issafepoint(p) != 0 {
+ if issafepoint(p) {
// Found an interesting instruction, record the
// corresponding liveness information.
// input parameters.
if p.As == obj.ATEXT {
for j = 0; j < liveout.n; j++ {
- if !(bvget(liveout, j) != 0) {
+ if bvget(liveout, j) == 0 {
continue
}
n = lv.vars[j]
numlive = 0
for j = 0; j < int32(len(lv.vars)); j++ {
n = lv.vars[j]
- if islive(n, args, locals) != 0 {
+ if islive(n, args, locals) {
fmt_ += fmt.Sprintf(" %v", Nconv(n, 0))
numlive++
}
// Only CALL instructions need a PCDATA annotation.
// The TEXT instruction annotation is implicit.
if p.As == obj.ACALL {
- if isdeferreturn(p) != 0 {
+ if isdeferreturn(p) {
// runtime.deferreturn modifies its return address to return
// back to the CALL, not to the subsequent instruction.
// Because the return comes back one instruction early,
started = 0
for i = 0; i < len(vars); i++ {
- if !(bvget(bits, int32(i)) != 0) {
+ if bvget(bits, int32(i)) == 0 {
continue
}
- if !(started != 0) {
- if !(printed != 0) {
+ if started == 0 {
+ if printed == 0 {
fmt.Printf("\t")
} else {
fmt.Printf(" ")
if printed != 0 {
fmt.Printf("\n")
}
- if issafepoint(p) != 0 {
+ if issafepoint(p) {
args = lv.argslivepointers[pcdata]
locals = lv.livepointers[pcdata]
fmt.Printf("\tlive=")
printed = 0
for j = 0; j < len(lv.vars); j++ {
n = lv.vars[j]
- if islive(n, args, locals) != 0 {
+ if islive(n, args, locals) {
tmp9 := printed
printed++
if tmp9 != 0 {
var noreturn_symlist [10]*Sym
-func Noreturn(p *obj.Prog) int {
+func Noreturn(p *obj.Prog) bool {
var s *Sym
var i int
}
if p.To.Node == nil {
- return 0
+ return false
}
s = ((p.To.Node).(*Node)).Sym
if s == nil {
- return 0
+ return false
}
for i = 0; noreturn_symlist[i] != nil; i++ {
if s == noreturn_symlist[i] {
- return 1
+ return true
}
}
- return 0
+ return false
}
// JMP chasing and removal.
// pass 4: elide JMP to next instruction.
// only safe if there are no jumps to JMPs anymore.
- if !(jmploop != 0) {
+ if jmploop == 0 {
last = nil
for p = firstp; p != nil; p = p.Link {
if p.As == obj.AJMP && p.To.Type == obj.TYPE_BRANCH && p.To.U.Branch == p.Link {
for f = start; f != nil; f = f.Link {
p = f.Prog
Thearch.Proginfo(&info, p)
- if !(info.Flags&Break != 0) {
+ if info.Flags&Break == 0 {
f1 = f.Link
f.S1 = f1
f1.P1 = f
r.Rpo = 1
r1 = r.S1
- if r1 != nil && !(r1.Rpo != 0) {
+ if r1 != nil && r1.Rpo == 0 {
n = postorder(r1, rpo2r, n)
}
r1 = r.S2
- if r1 != nil && !(r1.Rpo != 0) {
+ if r1 != nil && r1.Rpo == 0 {
n = postorder(r1, rpo2r, n)
}
rpo2r[n] = r
return rpo1
}
-func doms(idom []int32, r int32, s int32) int {
+func doms(idom []int32, r int32, s int32) bool {
for s > r {
s = idom[s]
}
- return bool2int(s == r)
+ return s == r
}
-func loophead(idom []int32, r *Flow) int {
+func loophead(idom []int32, r *Flow) bool {
var src int32
src = r.Rpo
- if r.P1 != nil && doms(idom, src, r.P1.Rpo) != 0 {
- return 1
+ if r.P1 != nil && doms(idom, src, r.P1.Rpo) {
+ return true
}
for r = r.P2; r != nil; r = r.P2link {
- if doms(idom, src, r.Rpo) != 0 {
- return 1
+ if doms(idom, src, r.Rpo) {
+ return true
}
}
- return 0
+ return false
}
func loopmark(rpo2r **Flow, head int32, r *Flow) {
for i = 0; i < nr; i++ {
r1 = rpo2r[i]
r1.Loop++
- if r1.P2 != nil && loophead(idom, r1) != 0 {
+ if r1.P2 != nil && loophead(idom, r1) {
loopmark(&rpo2r[0], i, r1)
}
}
}
// Is n available for merging?
-func canmerge(n *Node) int {
- return bool2int(n.Class == PAUTO && strings.HasPrefix(n.Sym.Name, "autotmp"))
+func canmerge(n *Node) bool {
+ return n.Class == PAUTO && strings.HasPrefix(n.Sym.Name, "autotmp")
}
func mergetemp(firstp *obj.Prog) {
// Build list of all mergeable variables.
nvar = 0
for l = Curfn.Dcl; l != nil; l = l.Next {
- if canmerge(l.N) != 0 {
+ if canmerge(l.N) {
nvar++
}
}
nvar = 0
for l = Curfn.Dcl; l != nil; l = l.Next {
n = l.N
- if canmerge(n) != 0 {
+ if canmerge(n) {
v = &var_[nvar]
nvar++
n.Opt = v
if f != nil && f.Data.(*Flow) == nil {
p = f.Prog
Thearch.Proginfo(&info, p)
- if p.To.Node == v.node && (info.Flags&RightWrite != 0) && !(info.Flags&RightRead != 0) {
+ if p.To.Node == v.node && (info.Flags&RightWrite != 0) && info.Flags&RightRead == 0 {
p.As = obj.ANOP
- p.To = obj.Zprog.To
+ p.To = obj.Addr{}
v.removed = 1
if debugmerge > 0 && Debug['v'] != 0 {
fmt.Printf("drop write-only %v\n", Sconv(v.node.Sym, 0))
const (
SizeAny = SizeB | SizeW | SizeL | SizeQ | SizeF | SizeD
)
- if p.From.Node == v.node && p1.To.Node == v.node && (info.Flags&Move != 0) && !((info.Flags|info1.Flags)&(LeftAddr|RightAddr) != 0) && info.Flags&SizeAny == info1.Flags&SizeAny {
+ if p.From.Node == v.node && p1.To.Node == v.node && (info.Flags&Move != 0) && (info.Flags|info1.Flags)&(LeftAddr|RightAddr) == 0 && info.Flags&SizeAny == info1.Flags&SizeAny {
p1.From = p.From
Thearch.Excise(f)
v.removed = 1
// Delete merged nodes from declaration list.
for lp = &Curfn.Dcl; ; {
l = *lp
- if !(l != nil) {
+ if l == nil {
break
}
nkill = 0
for f = g.Start; f != nil; f = f.Link {
p = f.Prog
- if p.As != obj.ACHECKNIL || !(Thearch.Regtyp(&p.From) != 0) {
+ if p.As != obj.ACHECKNIL || !Thearch.Regtyp(&p.From) {
continue
}
ncheck++
- if Thearch.Stackaddr(&p.From) != 0 {
+ if Thearch.Stackaddr(&p.From) {
if Debug_checknil != 0 && p.Lineno > 1 {
Warnl(int(p.Lineno), "removed nil check of SP address")
}
for f = fcheck; f != nil; f = Uniqp(f) {
p = f.Prog
Thearch.Proginfo(&info, p)
- if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) != 0 {
+ if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
// Found initialization of value we're checking for nil.
// without first finding the check, so this one is unchecked.
return
}
- if f != fcheck && p.As == obj.ACHECKNIL && Thearch.Sameaddr(&p.From, &fcheck.Prog.From) != 0 {
+ if f != fcheck && p.As == obj.ACHECKNIL && Thearch.Sameaddr(&p.From, &fcheck.Prog.From) {
fcheck.Data = &killed
return
}
p = f.Prog
Thearch.Proginfo(&info, p)
- if (info.Flags&LeftRead != 0) && Thearch.Smallindir(&p.From, &fcheck.Prog.From) != 0 {
+ if (info.Flags&LeftRead != 0) && Thearch.Smallindir(&p.From, &fcheck.Prog.From) {
fcheck.Data = &killed
return
}
- if (info.Flags&(RightRead|RightWrite) != 0) && Thearch.Smallindir(&p.To, &fcheck.Prog.From) != 0 {
+ if (info.Flags&(RightRead|RightWrite) != 0) && Thearch.Smallindir(&p.To, &fcheck.Prog.From) {
fcheck.Data = &killed
return
}
}
// Stop if value is lost.
- if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) != 0 {
+ if (info.Flags&RightWrite != 0) && Thearch.Sameaddr(&p.To, &fcheck.Prog.From) {
return
}
// Stop if memory write.
- if (info.Flags&RightWrite != 0) && !(Thearch.Regtyp(&p.To) != 0) {
+ if (info.Flags&RightWrite != 0) && !Thearch.Regtyp(&p.To) {
return
}
// Memory accesses in the packages are either uninteresting or will cause false positives.
var noinst_pkgs = []string{"sync", "sync/atomic"}
-func ispkgin(pkgs []string) int {
+func ispkgin(pkgs []string) bool {
var i int
if myimportpath != "" {
for i = 0; i < len(pkgs); i++ {
if myimportpath == pkgs[i] {
- return 1
+ return true
}
}
}
- return 0
+ return false
}
-func isforkfunc(fn *Node) int {
+func isforkfunc(fn *Node) bool {
// Special case for syscall.forkAndExecInChild.
// In the child, this function must not acquire any locks, because
// they might have been locked at the time of the fork. This means
// no rescheduling, no malloc calls, and no new stack segments.
// Race instrumentation does all of the above.
- return bool2int(myimportpath != "" && myimportpath == "syscall" && fn.Nname.Sym.Name == "forkAndExecInChild")
+ return myimportpath != "" && myimportpath == "syscall" && fn.Nname.Sym.Name == "forkAndExecInChild"
}
func racewalk(fn *Node) {
var nodpc *Node
var s string
- if ispkgin(omit_pkgs) != 0 || isforkfunc(fn) != 0 {
+ if ispkgin(omit_pkgs) || isforkfunc(fn) {
return
}
- if !(ispkgin(noinst_pkgs) != 0) {
+ if !ispkgin(noinst_pkgs) {
racewalklist(fn.Nbody, nil)
// nothing interesting for race detector in fn->enter
switch n.Op {
default:
Fatal("racewalk: unknown node type %v", Oconv(int(n.Op), 0))
- fallthrough
case OAS,
OAS2FUNC:
OLEN,
OCAP:
racewalknode(&n.Left, init, 0, 0)
- if Istype(n.Left.Type, TMAP) != 0 {
+ if Istype(n.Left.Type, TMAP) {
n1 = Nod(OCONVNOP, n.Left, nil)
n1.Type = Ptrto(Types[TUINT8])
n1 = Nod(OIND, n1, nil)
goto ret
case OINDEX:
- if !(Isfixedarray(n.Left.Type) != 0) {
+ if !Isfixedarray(n.Left.Type) {
racewalknode(&n.Left, init, 0, 0)
- } else if !(islvalue(n.Left) != 0) {
+ } else if !islvalue(n.Left) {
// index of unaddressable array, like Map[k][i].
racewalknode(&n.Left, init, wr, 0)
*np = n
}
-func isartificial(n *Node) int {
+func isartificial(n *Node) bool {
// compiler-emitted artificial things that we do not want to instrument,
// cant' possibly participate in a data race.
if n.Op == ONAME && n.Sym != nil && n.Sym.Name != "" {
if n.Sym.Name == "_" {
- return 1
+ return true
}
// autotmp's are always local
if strings.HasPrefix(n.Sym.Name, "autotmp_") {
- return 1
+ return true
}
// statictmp's are read-only
if strings.HasPrefix(n.Sym.Name, "statictmp_") {
- return 1
+ return true
}
// go.itab is accessed only by the compiler and runtime (assume safe)
if n.Sym.Pkg != nil && n.Sym.Pkg.Name != "" && n.Sym.Pkg.Name == "go.itab" {
- return 1
+ return true
}
}
- return 0
+ return false
}
-func callinstr(np **Node, init **NodeList, wr int, skip int) int {
+func callinstr(np **Node, init **NodeList, wr int, skip int) bool {
var name string
var f *Node
var b *Node
// n, n->op, n->type ? n->type->etype : -1, n->class);
if skip != 0 || n.Type == nil || n.Type.Etype >= TIDEAL {
- return 0
+ return false
}
t = n.Type
- if isartificial(n) != 0 {
- return 0
+ if isartificial(n) {
+ return false
}
b = outervalue(n)
// it skips e.g. stores to ... parameter array
- if isartificial(b) != 0 {
- return 0
+ if isartificial(b) {
+ return false
}
class = int(b.Class)
n = treecopy(n)
makeaddable(n)
- if t.Etype == TSTRUCT || Isfixedarray(t) != 0 {
+ if t.Etype == TSTRUCT || Isfixedarray(t) {
name = "racereadrange"
if wr != 0 {
name = "racewriterange"
}
*init = list(*init, f)
- return 1
+ return true
}
- return 0
+ return false
}
// makeaddable returns a node whose memory location is the
// an addressable value.
switch n.Op {
case OINDEX:
- if Isfixedarray(n.Left.Type) != 0 {
+ if Isfixedarray(n.Left.Type) {
makeaddable(n.Left)
}
var r *Node
r = Nod(OADDR, n, nil)
- r.Bounded = 1
+ r.Bounded = true
r = conv(r, Types[TUNSAFEPTR])
r = conv(r, Types[TUINTPTR])
return r
}
}
- if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) != 0 {
+ if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) {
t = t.Type
}
n.Type = t
t2 = t.Type
case TCHAN:
- if !(t.Chan&Crecv != 0) {
+ if t.Chan&Crecv == 0 {
Yyerror("invalid operation: range %v (receive from send-only type %v)", Nconv(n.Right, 0), Tconv(n.Right.Type, 0))
goto out
}
switch t.Etype {
default:
Fatal("walkrange")
- fallthrough
// Lower n into runtime·memclr if possible, for
// fast zeroing of slices and arrays (issue 5373).
//
// in which the evaluation of a is side-effect-free.
case TARRAY:
- if !(Debug['N'] != 0) {
- if !(flag_race != 0) {
+ if Debug['N'] == 0 {
+ if flag_race == 0 {
if v1 != nil {
if v2 == nil {
if n.Nbody != nil {
tmp = n.Nbody.N // first statement of body
if tmp.Op == OAS {
if tmp.Left.Op == OINDEX {
- if samesafeexpr(tmp.Left.Left, a) != 0 {
- if samesafeexpr(tmp.Left.Right, v1) != 0 {
+ if samesafeexpr(tmp.Left.Left, a) {
+ if samesafeexpr(tmp.Left.Right, v1) {
if t.Type.Width > 0 {
- if iszero(tmp.Right) != 0 {
+ if iszero(tmp.Right) {
// Convert to
// if len(a) != 0 {
// hp = &a[0]
hp = temp(Ptrto(Types[TUINT8]))
tmp = Nod(OINDEX, a, Nodintconst(0))
- tmp.Bounded = 1
+ tmp.Bounded = true
tmp = Nod(OADDR, tmp, nil)
tmp = Nod(OCONVNOP, tmp, nil)
tmp.Type = Ptrto(Types[TUINT8])
if v2 != nil {
hp = temp(Ptrto(n.Type.Type))
tmp = Nod(OINDEX, ha, Nodintconst(0))
- tmp.Bounded = 1
+ tmp.Bounded = true
init = list(init, Nod(OAS, hp, Nod(OADDR, tmp, nil)))
}
}
hb = temp(Types[TBOOL])
- n.Ntest = Nod(ONE, hb, Nodbool(0))
+ n.Ntest = Nod(ONE, hb, Nodbool(false))
a = Nod(OAS2RECV, nil, nil)
a.Typecheck = 1
a.List = list(list1(hv1), hb)
// type stored in interface word
it = t
- if !(isdirectiface(it) != 0) {
+ if !isdirectiface(it) {
it = Ptrto(t)
}
if f.Type.Etype != TFUNC || f.Type.Thistuple == 0 {
Fatal("non-method on %v method %v %v\n", Tconv(mt, 0), Sconv(f.Sym, 0), Tconv(f, 0))
}
- if !(getthisx(f.Type).Type != nil) {
+ if getthisx(f.Type).Type == nil {
Fatal("receiver with no type on %v method %v %v\n", Tconv(mt, 0), Sconv(f.Sym, 0), Tconv(f, 0))
}
- if f.Nointerface != 0 {
+ if f.Nointerface {
continue
}
if Isptr[this.Etype] != 0 && this.Type == t {
continue
}
- if Isptr[this.Etype] != 0 && !(Isptr[t.Etype] != 0) && f.Embedded != 2 && !(isifacemethod(f.Type) != 0) {
+ if Isptr[this.Etype] != 0 && Isptr[t.Etype] == 0 && f.Embedded != 2 && !isifacemethod(f.Type) {
continue
}
a.type_ = methodfunc(f.Type, t)
a.mtype = methodfunc(f.Type, nil)
- if !(a.isym.Flags&SymSiggen != 0) {
+ if a.isym.Flags&SymSiggen == 0 {
a.isym.Flags |= SymSiggen
if !Eqtype(this, it) || this.Width < Types[Tptr].Width {
compiling_wrappers = 1
}
}
- if !(a.tsym.Flags&SymSiggen != 0) {
+ if a.tsym.Flags&SymSiggen == 0 {
a.tsym.Flags |= SymSiggen
if !Eqtype(this, t) {
compiling_wrappers = 1
// code can refer to it.
isym = methodsym(method, t, 0)
- if !(isym.Flags&SymSiggen != 0) {
+ if isym.Flags&SymSiggen == 0 {
isym.Flags |= SymSiggen
genwrapper(t, f, isym, 0)
}
func haspointers(t *Type) bool {
var t1 *Type
- var ret int
+ var ret bool
if t.Haspointers != 0 {
return t.Haspointers-1 != 0
TCOMPLEX64,
TCOMPLEX128,
TBOOL:
- ret = 0
+ ret = false
case TARRAY:
if t.Bound < 0 { // slice
- ret = 1
+ ret = true
break
}
if t.Bound == 0 { // empty array
- ret = 0
+ ret = false
break
}
- ret = bool2int(haspointers(t.Type))
+ ret = haspointers(t.Type)
case TSTRUCT:
- ret = 0
+ ret = false
for t1 = t.Type; t1 != nil; t1 = t1.Down {
if haspointers(t1.Type) {
- ret = 1
+ ret = true
break
}
}
TFUNC:
fallthrough
default:
- ret = 1
+ ret = true
}
- t.Haspointers = uint8(1 + ret)
- return ret != 0
+ t.Haspointers = 1 + uint8(bool2int(ret))
+ return ret
}
/*
var i int
var alg int
var sizeofAlg int
- var gcprog int
+ var gcprog bool
var sptr *Sym
var algsym *Sym
var zero *Sym
algsym = dalgsym(t)
}
- if t.Sym != nil && !(Isptr[t.Etype] != 0) {
+ if t.Sym != nil && Isptr[t.Etype] == 0 {
sptr = dtypesym(Ptrto(t))
} else {
sptr = weaktypesym(Ptrto(t))
if !haspointers(t) {
i |= obj.KindNoPointers
}
- if isdirectiface(t) != 0 {
+ if isdirectiface(t) {
i |= obj.KindDirectIface
}
- if gcprog != 0 {
+ if gcprog {
i |= obj.KindGCProg
}
ot = duint8(s, ot, uint8(i)) // kind
}
// gc
- if gcprog != 0 {
+ if gcprog {
gengcprog(t, &gcprog0, &gcprog1)
if gcprog0 != nil {
ot = dsymptr(s, ot, gcprog0, 0)
var s *Sym
var n *Node
- if t == nil || (Isptr[t.Etype] != 0 && t.Type == nil) || isideal(t) != 0 {
+ if t == nil || (Isptr[t.Etype] != 0 && t.Type == nil) || isideal(t) {
Fatal("typename %v", Tconv(t, 0))
}
s = typesym(t)
* Returns 1 if t has a reflexive equality operator.
* That is, if x==x for all x of type t.
*/
-func isreflexive(t *Type) int {
+func isreflexive(t *Type) bool {
var t1 *Type
switch t.Etype {
case TBOOL,
TUNSAFEPTR,
TSTRING,
TCHAN:
- return 1
+ return true
case TFLOAT32,
TFLOAT64,
TCOMPLEX64,
TCOMPLEX128,
TINTER:
- return 0
+ return false
case TARRAY:
- if Isslice(t) != 0 {
+ if Isslice(t) {
Fatal("slice can't be a map key: %v", Tconv(t, 0))
}
return isreflexive(t.Type)
case TSTRUCT:
for t1 = t.Type; t1 != nil; t1 = t1.Down {
- if !(isreflexive(t1.Type) != 0) {
- return 0
+ if !isreflexive(t1.Type) {
+ return false
}
}
- return 1
+ return true
default:
Fatal("bad type for map key: %v", Tconv(t, 0))
- return 0
+ return false
}
}
t = Types[t.Etype]
}
- if isideal(t) != 0 {
+ if isideal(t) {
Fatal("dtypesym %v", Tconv(t, 0))
}
}
// named types from other files are defined only by those files
- if tbase.Sym != nil && !(tbase.Local != 0) {
+ if tbase.Sym != nil && tbase.Local == 0 {
return s
}
if isforw[tbase.Etype] != 0 {
}
ot = duint16(s, ot, uint16(mapbucket(t).Width))
- ot = duint8(s, ot, uint8(isreflexive(t.Down)))
+ ot = duint8(s, ot, uint8(bool2int(isreflexive(t.Down))))
case TPTR32,
TPTR64:
ot = duintxx(s, ot, uint64(n), Widthint)
for t1 = t.Type; t1 != nil; t1 = t1.Down {
// ../../runtime/type.go:/structField
- if t1.Sym != nil && !(t1.Embedded != 0) {
+ if t1.Sym != nil && t1.Embedded == 0 {
ot = dgostringptr(s, ot, t1.Sym.Name)
if exportname(t1.Sym.Name) {
ot = dgostringptr(s, ot, "")
return s
}
-func usegcprog(t *Type) int {
+func usegcprog(t *Type) bool {
var size int64
var nptr int64
if !haspointers(t) {
- return 0
+ return false
}
if t.Width == BADWIDTH {
dowidth(t)
// While large objects usually contain arrays; and even if it don't
// the program uses 2-bits per word while mask uses 4-bits per word,
// so the program is still smaller.
- return bool2int(size > int64(2*Widthptr))
+ return size > int64(2*Widthptr)
}
// Generates sparse GC bitmask (4 bits per word).
var nptr int64
var i int64
var j int64
- var half int
+ var half bool
var bits uint8
var pos []byte
pos = gcmask
nptr = (t.Width + int64(Widthptr) - 1) / int64(Widthptr)
- half = 0
+ half = false
// If number of words is odd, repeat the mask.
// This makes simpler handling of arrays in runtime.
bits = obj.BitsScalar
}
bits <<= 2
- if half != 0 {
+ if half {
bits <<= 4
}
pos[0] |= byte(bits)
- half = bool2int(!(half != 0))
- if !(half != 0) {
+ half = !half
+ if !half {
pos = pos[1:]
}
}
*xoffset += t.Width
case TARRAY:
- if Isslice(t) != 0 {
+ if Isslice(t) {
proggendata(g, obj.BitsPointer)
proggendata(g, obj.BitsScalar)
proggendata(g, obj.BitsScalar)
var v *Var
var node *Node
- for bany(&bit) != 0 {
+ for bany(&bit) {
// convert each bit to a variable
i = bnum(bit)
p1.From.Type = obj.TYPE_REG
p1.From.Reg = int16(rn)
p1.From.Name = obj.NAME_NONE
- if !(f != 0) {
+ if f == 0 {
p1.From = *a
- *a = obj.Zprog.From
+ *a = obj.Addr{}
a.Type = obj.TYPE_REG
a.Reg = int16(rn)
}
Ostats.Nspill++
}
-func overlap_reg(o1 int64, w1 int, o2 int64, w2 int) int {
+func overlap_reg(o1 int64, w1 int, o2 int64, w2 int) bool {
var t1 int64
var t2 int64
t1 = o1 + int64(w1)
t2 = o2 + int64(w2)
- if !(t1 > o2 && t2 > o1) {
- return 0
+ if t1 <= o2 || t2 <= o1 {
+ return false
}
- return 1
+ return true
}
func mkvar(f *Flow, a *obj.Addr) Bits {
if int(v.etype) == et {
if int64(v.width) == w {
// TODO(rsc): Remove special case for arm here.
- if !(flag != 0) || Thearch.Thechar != '5' {
+ if flag == 0 || Thearch.Thechar != '5' {
return blsh(uint(i))
}
}
}
// if they overlap, disable both
- if overlap_reg(v.offset, v.width, o, int(w)) != 0 {
+ if overlap_reg(v.offset, v.width, o, int(w)) {
// print("disable overlap %s %d %d %d %d, %E != %E\n", s->name, v->offset, v->width, o, w, v->etype, et);
v.addr = 1
switch f1.Prog.As {
case obj.ACALL:
- if Noreturn(f1.Prog) != 0 {
+ if Noreturn(f1.Prog) {
break
}
// This will set the bits at most twice, keeping the overall loop linear.
v1, _ = v.node.Opt.(*Var)
- if v == v1 || !(btest(&cal, uint(v1.id)) != 0) {
+ if v == v1 || !btest(&cal, uint(v1.id)) {
for ; v1 != nil; v1 = v1.nextinnode {
biset(&cal, uint(v1.id))
}
return
}
for {
- if !(r.refbehind.b[z]&bb != 0) {
+ if r.refbehind.b[z]&bb == 0 {
break
}
f1 = f.P1
break
}
r1 = f1.Data.(*Reg)
- if !(r1.refahead.b[z]&bb != 0) {
+ if r1.refahead.b[z]&bb == 0 {
break
}
if r1.act.b[z]&bb != 0 {
}
}
- if !(r.refahead.b[z]&bb != 0) {
+ if r.refahead.b[z]&bb == 0 {
break
}
f1 = f.S2
if r.act.b[z]&bb != 0 {
break
}
- if !(r.refbehind.b[z]&bb != 0) {
+ if r.refbehind.b[z]&bb == 0 {
break
}
}
bb = 1 << uint(bn%64)
vreg = regbits
r = f.Data.(*Reg)
- if !(r.act.b[z]&bb != 0) {
+ if r.act.b[z]&bb == 0 {
return vreg
}
for {
- if !(r.refbehind.b[z]&bb != 0) {
+ if r.refbehind.b[z]&bb == 0 {
break
}
f1 = f.P1
break
}
r1 = f1.Data.(*Reg)
- if !(r1.refahead.b[z]&bb != 0) {
+ if r1.refahead.b[z]&bb == 0 {
break
}
- if !(r1.act.b[z]&bb != 0) {
+ if r1.act.b[z]&bb == 0 {
break
}
f = f1
}
}
- if !(r.refahead.b[z]&bb != 0) {
+ if r.refahead.b[z]&bb == 0 {
break
}
f1 = f.S2
break
}
r = f.Data.(*Reg)
- if !(r.act.b[z]&bb != 0) {
+ if r.act.b[z]&bb == 0 {
break
}
- if !(r.refbehind.b[z]&bb != 0) {
+ if r.refbehind.b[z]&bb == 0 {
break
}
}
return
}
for {
- if !(r.refbehind.b[z]&bb != 0) {
+ if r.refbehind.b[z]&bb == 0 {
break
}
f1 = f.P1
break
}
r1 = f1.Data.(*Reg)
- if !(r1.refahead.b[z]&bb != 0) {
+ if r1.refahead.b[z]&bb == 0 {
break
}
if r1.act.b[z]&bb != 0 {
}
}
- if !(r.refahead.b[z]&bb != 0) {
+ if r.refahead.b[z]&bb == 0 {
break
}
f1 = f.S2
if r.act.b[z]&bb != 0 {
break
}
- if !(r.refbehind.b[z]&bb != 0) {
+ if r.refbehind.b[z]&bb == 0 {
break
}
}
for z = 0; z < BITS; z++ {
bit.b[z] = r.set.b[z] | r.use1.b[z] | r.use2.b[z] | r.refbehind.b[z] | r.refahead.b[z] | r.calbehind.b[z] | r.calahead.b[z] | r.regdiff.b[z] | r.act.b[z] | 0
}
- if bany(&bit) != 0 {
+ if bany(&bit) {
fmt.Printf("\t")
- if bany(&r.set) != 0 {
+ if bany(&r.set) {
fmt.Printf(" s:%v", Qconv(r.set, 0))
}
- if bany(&r.use1) != 0 {
+ if bany(&r.use1) {
fmt.Printf(" u1:%v", Qconv(r.use1, 0))
}
- if bany(&r.use2) != 0 {
+ if bany(&r.use2) {
fmt.Printf(" u2:%v", Qconv(r.use2, 0))
}
- if bany(&r.refbehind) != 0 {
+ if bany(&r.refbehind) {
fmt.Printf(" rb:%v ", Qconv(r.refbehind, 0))
}
- if bany(&r.refahead) != 0 {
+ if bany(&r.refahead) {
fmt.Printf(" ra:%v ", Qconv(r.refahead, 0))
}
- if bany(&r.calbehind) != 0 {
+ if bany(&r.calbehind) {
fmt.Printf(" cb:%v ", Qconv(r.calbehind, 0))
}
- if bany(&r.calahead) != 0 {
+ if bany(&r.calahead) {
fmt.Printf(" ca:%v ", Qconv(r.calahead, 0))
}
- if bany(&r.regdiff) != 0 {
+ if bany(&r.regdiff) {
fmt.Printf(" d:%v ", Qconv(r.regdiff, 0))
}
- if bany(&r.act) != 0 {
+ if bany(&r.act) {
fmt.Printf(" a:%v ", Qconv(r.act, 0))
}
}
r.set.b[0] |= info.Regset
bit = mkvar(f, &p.From)
- if bany(&bit) != 0 {
+ if bany(&bit) {
if info.Flags&LeftAddr != 0 {
setaddrs(bit)
}
}
bit = mkvar(f, &p.To)
- if bany(&bit) != 0 {
+ if bany(&bit) {
if info.Flags&RightAddr != 0 {
setaddrs(bit)
}
for f = firstf; f != nil; f = f.Link {
p = f.Prog
- if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) != 0 && ((p.To.Node).(*Node)).Opt != nil {
+ if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) && ((p.To.Node).(*Node)).Opt != nil {
active++
walkvardef(p.To.Node.(*Node), f, active)
}
for f = firstf; f != nil; f = f1 {
f1 = f.Link
- if f1 != nil && f1.Active != 0 && !(f.Active != 0) {
+ if f1 != nil && f1.Active != 0 && f.Active == 0 {
prop(f, zbits, zbits)
i = 1
}
for z = 0; z < BITS; z++ {
bit.b[z] = (r.refahead.b[z] | r.calahead.b[z]) &^ (externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z])
}
- if bany(&bit) != 0 && !(f.Refset != 0) {
+ if bany(&bit) && f.Refset == 0 {
// should never happen - all variables are preset
if Debug['w'] != 0 {
fmt.Printf("%v: used and not set: %v\n", f.Prog.Line(), Qconv(bit, 0))
for z = 0; z < BITS; z++ {
bit.b[z] = r.set.b[z] &^ (r.refahead.b[z] | r.calahead.b[z] | addrs.b[z])
}
- if bany(&bit) != 0 && !(f.Refset != 0) {
+ if bany(&bit) && f.Refset == 0 {
if Debug['w'] != 0 {
fmt.Printf("%v: set and not used: %v\n", f.Prog.Line(), Qconv(bit, 0))
}
for z = 0; z < BITS; z++ {
bit.b[z] = LOAD(r, z) &^ (r.act.b[z] | addrs.b[z])
}
- for bany(&bit) != 0 {
+ for bany(&bit) {
i = bnum(bit)
change = 0
paint1(f, i)
* pass 7
* peep-hole on basic block
*/
- if !(Debug['R'] != 0) || Debug['P'] != 0 {
+ if Debug['R'] == 0 || Debug['P'] != 0 {
Thearch.Peep(firstp)
}
switch n.Op {
default:
Fatal("select %v", Oconv(int(n.Op), 0))
- fallthrough
// ok already
case OSEND:
switch n.Op {
default:
Fatal("select %v", Oconv(int(n.Op), 0))
- fallthrough
// if selectnbsend(c, v) { body } else { default body }
case OSEND:
switch n.Op {
default:
Fatal("select %v", Oconv(int(n.Op), 0))
- fallthrough
// selectsend(sel *byte, hchan *chan any, elem *any) (selected bool);
case OSEND:
if n.Defn.Left != n {
goto bad
}
- if isblank(n.Defn.Left) && candiscard(n.Defn.Right) != 0 {
+ if isblank(n.Defn.Left) && candiscard(n.Defn.Right) {
n.Defn.Op = OEMPTY
n.Defn.Left = nil
n.Defn.Right = nil
if Debug['j'] != 0 {
fmt.Printf("%v\n", Sconv(n.Sym, 0))
}
- if isblank(n) || !(staticinit(n, out) != 0) {
+ if isblank(n) || !staticinit(n, out) {
if Debug['%'] != 0 {
Dump("nonstatic", n.Defn)
}
* compilation of top-level (static) assignments
* into DATA statements if at all possible.
*/
-func staticinit(n *Node, out **NodeList) int {
+func staticinit(n *Node, out **NodeList) bool {
var l *Node
var r *Node
// like staticassign but we are copying an already
// initialized value r.
-func staticcopy(l *Node, r *Node, out **NodeList) int {
+func staticcopy(l *Node, r *Node, out **NodeList) bool {
var i int
var e *InitEntry
var p *InitPlan
var n1 Node
if r.Op != ONAME || r.Class != PEXTERN || r.Sym.Pkg != localpkg {
- return 0
+ return false
}
if r.Defn == nil { // probably zeroed but perhaps supplied externally and of unknown value
- return 0
+ return false
}
if r.Defn.Op != OAS {
- return 0
+ return false
}
orig = r
r = r.Defn.Right
switch r.Op {
case ONAME:
- if staticcopy(l, r, out) != 0 {
- return 1
+ if staticcopy(l, r, out) {
+ return true
}
*out = list(*out, Nod(OAS, l, r))
- return 1
+ return true
case OLITERAL:
- if iszero(r) != 0 {
- return 1
+ if iszero(r) {
+ return true
}
gdata(l, r, int(l.Type.Width))
- return 1
+ return true
case OADDR:
switch r.Left.Op {
case ONAME:
gdata(l, r, int(l.Type.Width))
- return 1
+ return true
}
case OPTRLIT:
OMAPLIT:
gdata(l, Nod(OADDR, r.Nname, nil), int(l.Type.Width))
- return 1
+ return true
}
case OARRAYLIT:
- if Isslice(r.Type) != 0 {
+ if Isslice(r.Type) {
// copy slice
a = r.Nname
gdata(&n1, r.Right, Widthint)
n1.Xoffset = l.Xoffset + int64(Array_cap)
gdata(&n1, r.Right, Widthint)
- return 1
+ return true
}
fallthrough
ll = Nod(OXXX, nil, nil)
*ll = n1
ll.Orig = ll // completely separate copy
- if !(staticassign(ll, e.Expr, out) != 0) {
+ if !staticassign(ll, e.Expr, out) {
// Requires computation, but we're
// copying someone else's computation.
rr = Nod(OXXX, nil, nil)
}
}
- return 1
+ return true
}
- return 0
+ return false
}
-func staticassign(l *Node, r *Node, out **NodeList) int {
+func staticassign(l *Node, r *Node, out **NodeList) bool {
var a *Node
var n1 Node
var nam Node
}
case OLITERAL:
- if iszero(r) != 0 {
- return 1
+ if iszero(r) {
+ return true
}
gdata(l, r, int(l.Type.Width))
- return 1
+ return true
case OADDR:
- if stataddr(&nam, r.Left) != 0 {
+ if stataddr(&nam, r.Left) {
n1 = *r
n1.Left = &nam
gdata(l, &n1, int(l.Type.Width))
- return 1
+ return true
}
fallthrough
gdata(l, Nod(OADDR, a, nil), int(l.Type.Width))
// Init underlying literal.
- if !(staticassign(a, r.Left, out) != 0) {
+ if !staticassign(a, r.Left, out) {
*out = list(*out, Nod(OAS, a, r.Left))
}
- return 1
+ return true
}
case OSTRARRAYBYTE:
if l.Class == PEXTERN && r.Left.Op == OLITERAL {
sval = r.Left.Val.U.Sval
slicebytes(l, sval.S, len(sval.S))
- return 1
+ return true
}
case OARRAYLIT:
initplan(r)
- if Isslice(r.Type) != 0 {
+ if Isslice(r.Type) {
// Init slice.
ta = typ(TARRAY)
a = Nod(OXXX, nil, nil)
*a = n1
a.Orig = a // completely separate copy
- if !(staticassign(a, e.Expr, out) != 0) {
+ if !staticassign(a, e.Expr, out) {
*out = list(*out, Nod(OAS, a, e.Expr))
}
}
}
- return 1
+ return true
// TODO: Table-driven map insert.
case OMAPLIT:
break
}
- return 0
+ return false
}
/*
namebuf = fmt.Sprintf("statictmp_%.4d", statuniqgen)
statuniqgen++
n = newname(Lookup(namebuf))
- if !(ctxt != 0) {
+ if ctxt == 0 {
n.Readonly = 1
}
addvar(n, t, PEXTERN)
return n
}
-func isliteral(n *Node) int {
+func isliteral(n *Node) bool {
if n.Op == OLITERAL {
if n.Val.Ctype != CTNIL {
- return 1
+ return true
}
}
- return 0
+ return false
}
-func simplename(n *Node) int {
+func simplename(n *Node) bool {
if n.Op != ONAME {
goto no
}
- if !(n.Addable != 0) {
+ if n.Addable == 0 {
goto no
}
if n.Class&PHEAP != 0 {
if n.Class == PPARAMREF {
goto no
}
- return 1
+ return true
no:
- return 0
+ return false
}
func litas(l *Node, r *Node, init **NodeList) {
mode = 0
switch n.Op {
default:
- if isliteral(n) != 0 {
+ if isliteral(n) {
return MODECONST
}
return MODEDYNAM
case OARRAYLIT:
- if !(top != 0) && n.Type.Bound < 0 {
+ if top == 0 && n.Type.Bound < 0 {
return MODEDYNAM
}
fallthrough
continue
}
- if isliteral(value) != 0 {
+ if isliteral(value) {
if pass == 2 {
continue
}
continue
}
- if isliteral(index) != 0 && isliteral(value) != 0 {
+ if isliteral(index) && isliteral(value) {
if pass == 2 {
continue
}
index = r.Left
value = r.Right
a = Nod(OINDEX, var_, index)
- a.Bounded = 1
+ a.Bounded = true
// TODO need to check bounds?
continue
}
- if isliteral(index) != 0 && isliteral(value) != 0 {
+ if isliteral(index) && isliteral(value) {
continue
}
index = r.Left
value = r.Right
- if isliteral(index) != 0 && isliteral(value) != 0 {
+ if isliteral(index) && isliteral(value) {
b++
}
}
index = r.Left
value = r.Right
- if isliteral(index) != 0 && isliteral(value) != 0 {
+ if isliteral(index) && isliteral(value) {
// build vstat[b].a = key;
a = Nodintconst(b)
index = temp(Types[TINT])
a = Nod(OINDEX, vstat, index)
- a.Bounded = 1
+ a.Bounded = true
a = Nod(ODOT, a, newname(symb))
r = Nod(OINDEX, vstat, index)
- r.Bounded = 1
+ r.Bounded = true
r = Nod(ODOT, r, newname(syma))
r = Nod(OINDEX, var_, r)
index = r.Left
value = r.Right
- if isliteral(index) != 0 && isliteral(value) != 0 {
+ if isliteral(index) && isliteral(value) {
continue
}
switch n.Op {
default:
Fatal("anylit: not lit")
- fallthrough
case OPTRLIT:
- if !(Isptr[t.Etype] != 0) {
+ if Isptr[t.Etype] == 0 {
Fatal("anylit: not ptr")
}
Fatal("anylit: not struct")
}
- if simplename(var_) != 0 && count(n.List) > 4 {
+ if simplename(var_) && count(n.List) > 4 {
if ctxt == 0 {
// lay out static data
vstat = staticname(t, ctxt)
}
// initialize of not completely specified
- if simplename(var_) != 0 || count(n.List) < structcount(t) {
+ if simplename(var_) || count(n.List) < structcount(t) {
a = Nod(OAS, var_, nil)
typecheck(&a, Etop)
walkexpr(&a, init)
break
}
- if simplename(var_) != 0 && count(n.List) > 4 {
+ if simplename(var_) && count(n.List) > 4 {
if ctxt == 0 {
// lay out static data
vstat = staticname(t, ctxt)
}
// initialize of not completely specified
- if simplename(var_) != 0 || int64(count(n.List)) < t.Bound {
+ if simplename(var_) || int64(count(n.List)) < t.Bound {
a = Nod(OAS, var_, nil)
typecheck(&a, Etop)
walkexpr(&a, init)
}
}
-func oaslit(n *Node, init **NodeList) int {
+func oaslit(n *Node, init **NodeList) bool {
var ctxt int
if n.Left == nil || n.Right == nil {
if n.Left.Type == nil || n.Right.Type == nil {
goto no
}
- if !(simplename(n.Left) != 0) {
+ if !simplename(n.Left) {
goto no
}
if !Eqtype(n.Left.Type, n.Right.Type) {
case OSTRUCTLIT,
OARRAYLIT,
OMAPLIT:
- if vmatch1(n.Left, n.Right) != 0 {
+ if vmatch1(n.Left, n.Right) {
goto no
}
anylit(ctxt, n.Right, n.Left, init)
}
n.Op = OEMPTY
- return 1
+ return true
// not a special composit literal assignment
no:
- return 0
+ return false
}
func getlit(lit *Node) int {
- if Smallintconst(lit) != 0 {
+ if Smallintconst(lit) {
return int(Mpgetfix(lit.Val.U.Xval))
}
return -1
}
-func stataddr(nam *Node, n *Node) int {
+func stataddr(nam *Node, n *Node) bool {
var l int
if n == nil {
switch n.Op {
case ONAME:
*nam = *n
- return int(n.Addable)
+ return n.Addable != 0
case ODOT:
- if !(stataddr(nam, n.Left) != 0) {
+ if !stataddr(nam, n.Left) {
break
}
nam.Xoffset += n.Xoffset
nam.Type = n.Type
- return 1
+ return true
case OINDEX:
if n.Left.Type.Bound < 0 {
break
}
- if !(stataddr(nam, n.Left) != 0) {
+ if !stataddr(nam, n.Left) {
break
}
l = getlit(n.Right)
}
nam.Xoffset += int64(l) * n.Type.Width
nam.Type = n.Type
- return 1
+ return true
}
no:
- return 0
+ return false
}
func initplan(n *Node) {
switch n.Op {
default:
Fatal("initplan")
- fallthrough
case OARRAYLIT:
for l = n.List; l != nil; l = l.Next {
a = l.N
- if a.Op != OKEY || !(Smallintconst(a.Left) != 0) {
+ if a.Op != OKEY || !Smallintconst(a.Left) {
Fatal("initplan arraylit")
}
addvalue(p, n.Type.Type.Width*Mpgetfix(a.Left.Val.U.Xval), nil, a.Right)
var e *InitEntry
// special case: zero can be dropped entirely
- if iszero(n) != 0 {
+ if iszero(n) {
p.Zero += n.Type.Width
return
}
// special case: inline struct and array (not slice) literals
- if isvaluelit(n) != 0 {
+ if isvaluelit(n) {
initplan(n)
q = n.Initplan
for i = 0; i < len(q.E); i++ {
e.Expr = n
}
-func iszero(n *Node) int {
+func iszero(n *Node) bool {
var l *NodeList
switch n.Op {
default:
Dump("unexpected literal", n)
Fatal("iszero")
- fallthrough
case CTNIL:
- return 1
+ return true
case CTSTR:
- return bool2int(n.Val.U.Sval == nil || len(n.Val.U.Sval.S) == 0)
+ return n.Val.U.Sval == nil || len(n.Val.U.Sval.S) == 0
case CTBOOL:
- return bool2int(n.Val.U.Bval == 0)
+ return n.Val.U.Bval == 0
case CTINT,
CTRUNE:
- return bool2int(mpcmpfixc(n.Val.U.Xval, 0) == 0)
+ return mpcmpfixc(n.Val.U.Xval, 0) == 0
case CTFLT:
- return bool2int(mpcmpfltc(n.Val.U.Fval, 0) == 0)
+ return mpcmpfltc(n.Val.U.Fval, 0) == 0
case CTCPLX:
- return bool2int(mpcmpfltc(&n.Val.U.Cval.Real, 0) == 0 && mpcmpfltc(&n.Val.U.Cval.Imag, 0) == 0)
+ return mpcmpfltc(&n.Val.U.Cval.Real, 0) == 0 && mpcmpfltc(&n.Val.U.Cval.Imag, 0) == 0
}
case OARRAYLIT:
- if Isslice(n.Type) != 0 {
+ if Isslice(n.Type) {
break
}
fallthrough
// fall through
case OSTRUCTLIT:
for l = n.List; l != nil; l = l.Next {
- if !(iszero(l.N.Right) != 0) {
- return 0
+ if !iszero(l.N.Right) {
+ return false
}
}
- return 1
+ return true
}
- return 0
+ return false
}
-func isvaluelit(n *Node) int {
- return bool2int((n.Op == OARRAYLIT && Isfixedarray(n.Type) != 0) || n.Op == OSTRUCTLIT)
+func isvaluelit(n *Node) bool {
+ return (n.Op == OARRAYLIT && Isfixedarray(n.Type)) || n.Op == OSTRUCTLIT
}
func entry(p *InitPlan) *InitEntry {
return &p.E[len(p.E)-1]
}
-func gen_as_init(n *Node) int {
+func gen_as_init(n *Node) bool {
var nr *Node
var nl *Node
var nam Node
nr = n.Right
nl = n.Left
if nr == nil {
- if !(stataddr(&nam, nl) != 0) {
+ if !stataddr(&nam, nl) {
goto no
}
if nam.Class != PEXTERN {
goto no
}
- if !(stataddr(&nam, nl) != 0) {
+ if !stataddr(&nam, nl) {
goto no
}
}
yes:
- return 1
+ return true
slice:
gused(nil) // in case the data is the dest of a goto
Fatal("gen_as_init couldnt make data statement")
}
- return 0
+ return false
}
hcrash()
nerrors++
- if nsavederrors+nerrors >= 10 && !(Debug['e'] != 0) {
+ if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
Flusherrors()
fmt.Printf("%v: too many errors\n", Ctxt.Line(line))
errorexit()
hcrash()
nerrors++
- if nsavederrors+nerrors >= 10 && !(Debug['e'] != 0) {
+ if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
Flusherrors()
fmt.Printf("%v: too many errors\n", Ctxt.Line(parserline()))
errorexit()
fmt.Printf(" at line %v\n", Ctxt.Line(int(lexlineno)))
}
- if off < 0 && file[0] != '/' && !(relative != 0) {
+ if off < 0 && file[0] != '/' && relative == 0 {
file = fmt.Sprintf("%s/%s", Ctxt.Pathname, file)
}
obj.Linklinehist(Ctxt, int(lexlineno), file, int(off))
}
}
+func gethunk() {
+ var h string
+ var nh int32
+
+ nh = NHUNK
+ if thunk >= 10*NHUNK {
+ nh = 10 * NHUNK
+ }
+ h = string(make([]byte, nh))
+ if h == "" {
+ Flusherrors()
+ Yyerror("out of memory")
+ errorexit()
+ }
+
+ hunk = h
+ nhunk = nh
+ thunk += nh
+}
+
func Nod(op int, nleft *Node, nright *Node) *Node {
var n *Node
// ispaddedfield reports whether the given field
// is followed by padding. For the case where t is
// the last field, total gives the size of the enclosing struct.
-func ispaddedfield(t *Type, total int64) int {
+func ispaddedfield(t *Type, total int64) bool {
if t.Etype != TFIELD {
Fatal("ispaddedfield called non-field %v", Tconv(t, 0))
}
if t.Down == nil {
- return bool2int(t.Width+t.Type.Width != total)
+ return t.Width+t.Type.Width != total
}
- return bool2int(t.Width+t.Type.Width != t.Down.Width)
+ return t.Width+t.Type.Width != t.Down.Width
}
func algtype1(t *Type, bad **Type) int {
return ASTRING
case TINTER:
- if isnilinter(t) != 0 {
+ if isnilinter(t) {
return ANILINTER
}
return AINTER
case TARRAY:
- if Isslice(t) != 0 {
+ if Isslice(t) {
if bad != nil {
*bad = t
}
// Blank fields, padded fields, fields with non-memory
// equality need special compare.
- if a != AMEM || isblanksym(t1.Sym) || ispaddedfield(t1, t.Width) != 0 {
+ if a != AMEM || isblanksym(t1.Sym) || ispaddedfield(t1, t.Width) {
ret = -1
continue
}
a = algtype1(t, nil)
if a == AMEM || a == ANOEQ {
- if Isslice(t) != 0 {
+ if Isslice(t) {
return ASLICE
}
switch t.Width {
for {
tmp11 := i
i--
- if !(tmp11 > 0) {
+ if tmp11 <= 0 {
break
}
a[i].Down = f
return c
}
-func Nodbool(b int) *Node {
+func Nodbool(b bool) *Node {
var c *Node
c = Nodintconst(0)
c.Val.Ctype = CTBOOL
- c.Val.U.Bval = int16(b)
+ c.Val.U.Bval = int16(bool2int(b))
c.Type = idealbool
return c
}
return m
}
-func isnil(n *Node) int {
+func isnil(n *Node) bool {
if n == nil {
- return 0
+ return false
}
if n.Op != OLITERAL {
- return 0
+ return false
}
if n.Val.Ctype != CTNIL {
- return 0
+ return false
}
- return 1
+ return true
}
-func isptrto(t *Type, et int) int {
+func isptrto(t *Type, et int) bool {
if t == nil {
- return 0
+ return false
}
- if !(Isptr[t.Etype] != 0) {
- return 0
+ if Isptr[t.Etype] == 0 {
+ return false
}
t = t.Type
if t == nil {
- return 0
+ return false
}
if int(t.Etype) != et {
- return 0
+ return false
}
- return 1
+ return true
}
-func Istype(t *Type, et int) int {
- return bool2int(t != nil && int(t.Etype) == et)
+func Istype(t *Type, et int) bool {
+ return t != nil && int(t.Etype) == et
}
-func Isfixedarray(t *Type) int {
- return bool2int(t != nil && t.Etype == TARRAY && t.Bound >= 0)
+func Isfixedarray(t *Type) bool {
+ return t != nil && t.Etype == TARRAY && t.Bound >= 0
}
-func Isslice(t *Type) int {
- return bool2int(t != nil && t.Etype == TARRAY && t.Bound < 0)
+func Isslice(t *Type) bool {
+ return t != nil && t.Etype == TARRAY && t.Bound < 0
}
func isblank(n *Node) bool {
return s != nil && s.Name == "_"
}
-func Isinter(t *Type) int {
- return bool2int(t != nil && t.Etype == TINTER)
+func Isinter(t *Type) bool {
+ return t != nil && t.Etype == TINTER
}
-func isnilinter(t *Type) int {
- if !(Isinter(t) != 0) {
- return 0
+func isnilinter(t *Type) bool {
+ if !Isinter(t) {
+ return false
}
if t.Type != nil {
- return 0
+ return false
}
- return 1
+ return true
}
-func isideal(t *Type) int {
+func isideal(t *Type) bool {
if t == nil {
- return 0
+ return false
}
if t == idealstring || t == idealbool {
- return 1
+ return true
}
switch t.Etype {
case TNIL,
TIDEAL:
- return 1
+ return true
}
- return 0
+ return false
}
/*
}
// check types
- if !(issimple[t.Etype] != 0) {
+ if issimple[t.Etype] == 0 {
switch t.Etype {
default:
return nil
next *TypePairList
}
-func onlist(l *TypePairList, t1 *Type, t2 *Type) int {
+func onlist(l *TypePairList, t1 *Type, t2 *Type) bool {
for ; l != nil; l = l.next {
if (l.t1 == t1 && l.t2 == t2) || (l.t1 == t2 && l.t2 == t1) {
- return 1
+ return true
}
}
- return 0
+ return false
}
// Return 1 if t1 and t2 are identical, following the spec rules.
// pointer (t1 == t2), so there's no chance of chasing cycles
// ad infinitum, so no need for a depth counter.
func Eqtype(t1 *Type, t2 *Type) bool {
- return eqtype1(t1, t2, nil) != 0
+ return eqtype1(t1, t2, nil)
}
-func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) int {
+func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) bool {
var l TypePairList
if t1 == t2 {
- return 1
+ return true
}
if t1 == nil || t2 == nil || t1.Etype != t2.Etype {
- return 0
+ return false
}
if t1.Sym != nil || t2.Sym != nil {
// Special case: we keep byte and uint8 separate
switch t1.Etype {
case TUINT8:
if (t1 == Types[TUINT8] || t1 == bytetype) && (t2 == Types[TUINT8] || t2 == bytetype) {
- return 1
+ return true
}
case TINT,
TINT32:
if (t1 == Types[runetype.Etype] || t1 == runetype) && (t2 == Types[runetype.Etype] || t2 == runetype) {
- return 1
+ return true
}
}
- return 0
+ return false
}
- if onlist(assumed_equal, t1, t2) != 0 {
- return 1
+ if onlist(assumed_equal, t1, t2) {
+ return true
}
l.next = assumed_equal
l.t1 = t1
if t1.Etype != TFIELD || t2.Etype != TFIELD {
Fatal("struct/interface missing field: %v %v", Tconv(t1, 0), Tconv(t2, 0))
}
- if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !(eqtype1(t1.Type, t2.Type, &l) != 0) || !eqnote(t1.Note, t2.Note) {
+ if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, &l) || !eqnote(t1.Note, t2.Note) {
goto no
}
}
if ta.Etype != TFIELD || tb.Etype != TFIELD {
Fatal("func struct missing field: %v %v", Tconv(ta, 0), Tconv(tb, 0))
}
- if ta.Isddd != tb.Isddd || !(eqtype1(ta.Type, tb.Type, &l) != 0) {
+ if ta.Isddd != tb.Isddd || !eqtype1(ta.Type, tb.Type, &l) {
goto no
}
}
}
}
- if eqtype1(t1.Down, t2.Down, &l) != 0 && eqtype1(t1.Type, t2.Type, &l) != 0 {
+ if eqtype1(t1.Down, t2.Down, &l) && eqtype1(t1.Type, t2.Type, &l) {
goto yes
}
goto no
yes:
- return 1
+ return true
no:
- return 0
+ return false
}
// Are t1 and t2 equal struct types when field names are ignored?
// For deciding whether the result struct from g can be copied
// directly when compiling f(g()).
-func eqtypenoname(t1 *Type, t2 *Type) int {
+func eqtypenoname(t1 *Type, t2 *Type) bool {
if t1 == nil || t2 == nil || t1.Etype != TSTRUCT || t2.Etype != TSTRUCT {
- return 0
+ return false
}
t1 = t1.Type
t2 = t2.Type
for {
if !Eqtype(t1, t2) {
- return 0
+ return false
}
if t1 == nil {
- return 1
+ return true
}
t1 = t1.Down
t2 = t2.Down
// both are empty interface types.
// For assignable but different non-empty interface types,
// we want to recompute the itab.
- if Eqtype(src.Orig, dst.Orig) && (src.Sym == nil || dst.Sym == nil || isnilinter(src) != 0) {
+ if Eqtype(src.Orig, dst.Orig) && (src.Sym == nil || dst.Sym == nil || isnilinter(src)) {
return OCONVNOP
}
// 3. dst is an interface type and src implements dst.
if dst.Etype == TINTER && src.Etype != TNIL {
- if implements(src, dst, &missing, &have, &ptr) != 0 {
+ if implements(src, dst, &missing, &have, &ptr) {
return OCONVIFACE
}
}
if why != nil {
- if isptrto(src, TINTER) != 0 {
+ if isptrto(src, TINTER) {
*why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", Tconv(src, 0))
- } else if have != nil && have.Sym == missing.Sym && have.Nointerface != 0 {
+ } else if have != nil && have.Sym == missing.Sym && have.Nointerface {
*why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0))
} else if have != nil && have.Sym == missing.Sym {
*why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", Tconv(src, 0), Tconv(dst, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort|obj.FmtByte), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
return 0
}
- if isptrto(dst, TINTER) != 0 {
+ if isptrto(dst, TINTER) {
if why != nil {
*why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", Tconv(dst, 0))
}
}
if src.Etype == TINTER && dst.Etype != TBLANK {
- if why != nil && implements(dst, src, &missing, &have, &ptr) != 0 {
+ if why != nil && implements(dst, src, &missing, &have, &ptr) {
*why = ": need type assertion"
}
return 0
return ORUNESTR
}
- if Isslice(src) != 0 && dst.Etype == TSTRING {
+ if Isslice(src) && dst.Etype == TSTRING {
if src.Type.Etype == bytetype.Etype {
return OARRAYBYTESTR
}
// 7. src is a string and dst is []byte or []rune.
// String to slice.
- if src.Etype == TSTRING && Isslice(dst) != 0 {
+ if src.Etype == TSTRING && Isslice(dst) {
if dst.Type.Etype == bytetype.Etype {
return OSTRARRAYBYTE
}
return r
}
-func subtype(stp **Type, t *Type, d int) int {
+func subtype(stp **Type, t *Type, d int) bool {
var st *Type
loop:
st = *stp
if st == nil {
- return 0
+ return false
}
d++
if d >= 10 {
- return 0
+ return false
}
switch st.Etype {
default:
- return 0
+ return false
case TPTR32,
TPTR64,
goto loop
case TANY:
- if !(st.Copyany != 0) {
- return 0
+ if st.Copyany == 0 {
+ return false
}
*stp = t
case TMAP:
- if subtype(&st.Down, t, d) != 0 {
+ if subtype(&st.Down, t, d) {
break
}
stp = &st.Type
case TFUNC:
for {
- if subtype(&st.Type, t, d) != 0 {
+ if subtype(&st.Type, t, d) {
break
}
- if subtype(&st.Type.Down.Down, t, d) != 0 {
+ if subtype(&st.Type.Down.Down, t, d) {
break
}
- if subtype(&st.Type.Down, t, d) != 0 {
+ if subtype(&st.Type.Down, t, d) {
break
}
- return 0
+ return false
}
case TSTRUCT:
for st = st.Type; st != nil; st = st.Down {
- if subtype(&st.Type, t, d) != 0 {
- return 1
+ if subtype(&st.Type, t, d) {
+ return true
}
}
- return 0
+ return false
}
- return 1
+ return true
}
/*
* Is this a 64-bit type?
*/
-func Is64(t *Type) int {
+func Is64(t *Type) bool {
if t == nil {
- return 0
+ return false
}
switch Simtype[t.Etype] {
case TINT64,
TUINT64,
TPTR64:
- return 1
+ return true
}
- return 0
+ return false
}
/*
* Is a conversion between t1 and t2 a no-op?
*/
-func Noconv(t1 *Type, t2 *Type) int {
+func Noconv(t1 *Type, t2 *Type) bool {
var e1 int
var e2 int
switch e1 {
case TINT8,
TUINT8:
- return bool2int(e2 == TINT8 || e2 == TUINT8)
+ return e2 == TINT8 || e2 == TUINT8
case TINT16,
TUINT16:
- return bool2int(e2 == TINT16 || e2 == TUINT16)
+ return e2 == TINT16 || e2 == TUINT16
case TINT32,
TUINT32,
TPTR32:
- return bool2int(e2 == TINT32 || e2 == TUINT32 || e2 == TPTR32)
+ return e2 == TINT32 || e2 == TUINT32 || e2 == TPTR32
case TINT64,
TUINT64,
TPTR64:
- return bool2int(e2 == TINT64 || e2 == TUINT64 || e2 == TPTR64)
+ return e2 == TINT64 || e2 == TUINT64 || e2 == TPTR64
case TFLOAT32:
- return bool2int(e2 == TFLOAT32)
+ return e2 == TFLOAT32
case TFLOAT64:
- return bool2int(e2 == TFLOAT64)
+ return e2 == TFLOAT64
}
- return 0
+ return false
}
func argtype(on *Node, t *Type) {
dowidth(t)
- if !(subtype(&on.Type, t, 0) != 0) {
+ if !subtype(&on.Type, t, 0) {
Fatal("argtype: failed %v %v\n", Nconv(on, 0), Tconv(t, 0))
}
}
Fatal("syslook: can't find runtime.%s", name)
}
- if !(copy != 0) {
+ if copy == 0 {
return s.Def
}
var fp *Type
fp = structnext(s)
- if fp == nil && !(s.Done != 0) {
+ if fp == nil && s.Done == 0 {
s.Done = 1
fp = Structfirst(s, getinarg(s.Tfunc))
}
}
// make a copy; must not be used as an lvalue
- if islvalue(n) != 0 {
+ if islvalue(n) {
Fatal("missing lvalue case in safeexpr: %v", Nconv(n, 0))
}
return cheapexpr(n, init)
* assignment to it.
*/
func localexpr(n *Node, t *Type, init **NodeList) *Node {
- if n.Op == ONAME && (!(n.Addrtaken != 0) || strings.HasPrefix(n.Sym.Name, "autotmp_")) && (n.Class == PAUTO || n.Class == PPARAM || n.Class == PPARAMOUT) && convertop(n.Type, t, nil) == OCONVNOP {
+ if n.Op == ONAME && (n.Addrtaken == 0 || strings.HasPrefix(n.Sym.Name, "autotmp_")) && (n.Class == PAUTO || n.Class == PPARAM || n.Class == PPARAMOUT) && convertop(n.Type, t, nil) == OCONVNOP {
return n
}
d--
for f = u.Type; f != nil; f = f.Down {
- if !(f.Embedded != 0) {
+ if f.Embedded == 0 {
continue
}
if f.Sym == nil {
}
for f = u.Type; f != nil; f = f.Down {
- if !(f.Embedded != 0) {
+ if f.Embedded == 0 {
continue
}
if f.Sym == nil {
dot = adddot(Nod(OXDOT, this.Left, newname(method.Sym)))
// generate call
- if !(flag_race != 0) && Isptr[rcvr.Etype] != 0 && Isptr[methodrcvr.Etype] != 0 && method.Embedded != 0 && !(isifacemethod(method.Type) != 0) {
+ if flag_race == 0 && Isptr[rcvr.Etype] != 0 && Isptr[methodrcvr.Etype] != 0 && method.Embedded != 0 && !isifacemethod(method.Type) {
// generate tail call: adjust pointer receiver and jump to embedded method.
dot = dot.Left // skip final .M
- if !(Isptr[dotlist[0].field.Type.Etype] != 0) {
+ if Isptr[dotlist[0].field.Type.Etype] == 0 {
dot = Nod(OADDR, dot, nil)
}
as = Nod(OAS, this.Left, Nod(OCONVNOP, dot, nil))
// Set inl_nonlocal to whether we are calling a method on a
// type defined in a different package. Checked in inlvar.
- if !(methodrcvr.Local != 0) {
+ if methodrcvr.Local == 0 {
inl_nonlocal = 1
}
switch a {
case AMEM:
Fatal("hashfor with AMEM type")
- fallthrough
case AINTER:
sym = Pkglookup("interhash", Runtimepkg)
switch t.Etype {
default:
Fatal("genhash %v", Tconv(t, 0))
- fallthrough
case TARRAY:
- if Isslice(t) != 0 {
+ if Isslice(t) {
Fatal("genhash %v", Tconv(t, 0))
}
call = Nod(OCALL, hashel, nil)
nx = Nod(OINDEX, np, ni)
- nx.Bounded = 1
+ nx.Bounded = true
na = Nod(OADDR, nx, nil)
na.Etype = 1 // no escape to heap
call.List = list(call.List, na)
}
// If it's a memory field but it's padded, stop here.
- if ispaddedfield(t1, t.Width) != 0 {
+ if ispaddedfield(t1, t.Width) {
t1 = t1.Down
} else {
continue
nif = Nod(OIF, nil, nil)
nif.Ntest = Nod(ONE, nx, ny)
r = Nod(ORETURN, nil, nil)
- r.List = list(r.List, Nodbool(0))
+ r.List = list(r.List, Nodbool(false))
nif.Nbody = list(nif.Nbody, r)
return nif
}
nif.Ninit = list(nif.Ninit, call)
nif.Ntest = Nod(ONOT, call, nil)
r = Nod(ORETURN, nil, nil)
- r.List = list(r.List, Nodbool(0))
+ r.List = list(r.List, Nodbool(false))
nif.Nbody = list(nif.Nbody, r)
return nif
}
switch t.Etype {
default:
Fatal("geneq %v", Tconv(t, 0))
- fallthrough
case TARRAY:
- if Isslice(t) != 0 {
+ if Isslice(t) {
Fatal("geneq %v", Tconv(t, 0))
}
// if p[i] != q[i] { return false }
nx = Nod(OINDEX, np, ni)
- nx.Bounded = 1
+ nx.Bounded = true
ny = Nod(OINDEX, nq, ni)
- ny.Bounded = 1
+ ny.Bounded = true
nif = Nod(OIF, nil, nil)
nif.Ntest = Nod(ONE, nx, ny)
r = Nod(ORETURN, nil, nil)
- r.List = list(r.List, Nodbool(0))
+ r.List = list(r.List, Nodbool(false))
nif.Nbody = list(nif.Nbody, r)
nrange.Nbody = list(nrange.Nbody, nif)
fn.Nbody = list(fn.Nbody, nrange)
}
// If it's a memory field but it's padded, stop here.
- if ispaddedfield(t1, t.Width) != 0 {
+ if ispaddedfield(t1, t.Width) {
t1 = t1.Down
} else {
continue
// return true
r = Nod(ORETURN, nil, nil)
- r.List = list(r.List, Nodbool(1))
+ r.List = list(r.List, Nodbool(true))
fn.Nbody = list(fn.Nbody, r)
if Debug['r'] != 0 {
return nil
}
-func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) int {
+func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool {
var t0 *Type
var im *Type
var tm *Type
t0 = t
if t == nil {
- return 0
+ return false
}
// if this is too slow,
*m = im
*samename = tm
*ptr = 0
- return 0
+ return false
}
}
*m = im
*samename = nil
*ptr = 0
- return 0
+ return false
found:
}
- return 1
+ return true
}
t = methtype(t, 0)
for im = iface.Type; im != nil; im = im.Down {
imtype = methodfunc(im.Type, nil)
tm = ifacelookdot(im.Sym, t, &followptr, 0)
- if tm == nil || tm.Nointerface != 0 || !Eqtype(methodfunc(tm.Type, nil), imtype) {
+ if tm == nil || tm.Nointerface || !Eqtype(methodfunc(tm.Type, nil), imtype) {
if tm == nil {
tm = ifacelookdot(im.Sym, t, &followptr, 1)
}
*m = im
*samename = tm
*ptr = 0
- return 0
+ return false
}
// if pointer receiver in method,
// the method does not exist for value types.
rcvr = getthisx(tm.Type).Type.Type
- if Isptr[rcvr.Etype] != 0 && !(Isptr[t0.Etype] != 0) && !(followptr != 0) && !(isifacemethod(tm.Type) != 0) {
+ if Isptr[rcvr.Etype] != 0 && Isptr[t0.Etype] == 0 && followptr == 0 && !isifacemethod(tm.Type) {
if false && Debug['r'] != 0 {
Yyerror("interface pointer mismatch")
}
*m = im
*samename = nil
*ptr = 1
- return 0
+ return false
}
}
- return 1
+ return true
}
/*
if n == nil || n.Op != OLITERAL || n.Type == nil {
goto no
}
- if !(Isint[n.Type.Etype] != 0) {
+ if Isint[n.Type.Etype] == 0 {
goto no
}
b = b << 1
}
- if !(Issigned[n.Type.Etype] != 0) {
+ if Issigned[n.Type.Etype] == 0 {
goto no
}
func checknil(x *Node, init **NodeList) {
var n *Node
- if Isinter(x.Type) != 0 {
+ if Isinter(x.Type) {
x = Nod(OITAB, x, nil)
typecheck(&x, Erv)
}
* Can this type be stored directly in an interface word?
* Yes, if the representation is a single pointer.
*/
-func isdirectiface(t *Type) int {
+func isdirectiface(t *Type) bool {
switch t.Etype {
case TPTR32,
TPTR64,
TMAP,
TFUNC,
TUNSAFEPTR:
- return 1
+ return true
// Array of 1 direct iface type can be direct.
case TARRAY:
- return bool2int(t.Bound == 1 && isdirectiface(t.Type) != 0)
+ return t.Bound == 1 && isdirectiface(t.Type)
// Struct with 1 field of direct iface type can be direct.
case TSTRUCT:
- return bool2int(t.Type != nil && t.Type.Down == nil && isdirectiface(t.Type.Type) != 0)
+ return t.Type != nil && t.Type.Down == nil && isdirectiface(t.Type.Type)
}
- return 0
+ return false
}
var go_ *Node
var br *Node
var lno int32
- var needvar int32
+ var needvar bool
if sw.List == nil {
return
Fatal("casebody %v", Oconv(int(n.Op), 0))
}
n.Op = OCASE
- needvar = int32(bool2int(count(n.List) != 1 || n.List.N.Op == OLITERAL))
+ needvar = count(n.List) != 1 || n.List.N.Op == OLITERAL
go_ = Nod(OGOTO, newlabel_swt(), nil)
if n.List == nil {
}
stat = list(stat, Nod(OLABEL, go_.Left, nil))
- if typeswvar != nil && needvar != 0 && n.Nname != nil {
+ if typeswvar != nil && needvar && n.Nname != nil {
var l *NodeList
l = list1(Nod(ODCL, n.Nname, nil))
continue
}
- if Istype(n.Left.Type, TINTER) != 0 {
+ if Istype(n.Left.Type, TINTER) {
c.type_ = Ttypevar
continue
}
casebody(sw, nil)
arg = Snorm
- if Isconst(sw.Ntest, CTBOOL) != 0 {
+ if Isconst(sw.Ntest, CTBOOL) {
arg = Strue
if sw.Ntest.Val.U.Bval == 0 {
arg = Sfalse
cas = nil
if arg == Strue || arg == Sfalse {
- exprname = Nodbool(bool2int(arg == Strue))
+ exprname = Nodbool(arg == Strue)
} else if consttype(sw.Ntest) >= 0 {
// leave constants to enable dead code elimination (issue 9608)
exprname = sw.Ntest
}
// deal with the variables one-at-a-time
- if !(okforcmp[t.Etype] != 0) || c0.type_ != Texprconst {
+ if okforcmp[t.Etype] == 0 || c0.type_ != Texprconst {
a = exprbsw(c0, 1, arg)
cas = list(cas, a)
c0 = c0.link
}
walkexpr(&sw.Ntest.Right, &sw.Ninit)
- if !(Istype(sw.Ntest.Right.Type, TINTER) != 0) {
+ if !Istype(sw.Ntest.Right.Type, TINTER) {
Yyerror("type switch must be on an interface")
return
}
typecheck(&hashname, Erv)
t = sw.Ntest.Right.Type
- if isnilinter(t) != 0 {
+ if isnilinter(t) {
a = syslook("efacethash", 1)
} else {
a = syslook("ifacethash", 1)
* both have inserted OBREAK statements
*/
if sw.Ntest == nil {
- sw.Ntest = Nodbool(1)
+ sw.Ntest = Nodbool(true)
typecheck(&sw.Ntest, Erv)
}
t = Types[TBOOL]
}
if t != nil {
- if !(okforeq[t.Etype] != 0) {
+ if okforeq[t.Etype] == 0 {
Yyerror("cannot switch on %v", Nconv(n.Ntest, obj.FmtLong))
- } else if t.Etype == TARRAY && !(Isfixedarray(t) != 0) {
+ } else if t.Etype == TARRAY && !Isfixedarray(t) {
nilonly = "slice"
- } else if t.Etype == TARRAY && Isfixedarray(t) != 0 && algtype1(t, nil) == ANOEQ {
+ } else if t.Etype == TARRAY && Isfixedarray(t) && algtype1(t, nil) == ANOEQ {
Yyerror("cannot switch on %v", Nconv(n.Ntest, obj.FmtLong))
} else if t.Etype == TSTRUCT && algtype1(t, &badtype) == ANOEQ {
Yyerror("cannot switch on %v (struct containing %v cannot be compared)", Nconv(n.Ntest, obj.FmtLong), Tconv(badtype, 0))
if ll.N.Op == OTYPE {
Yyerror("type %v is not an expression", Tconv(ll.N.Type, 0))
- } else if ll.N.Type != nil && !(assignop(ll.N.Type, t, nil) != 0) && !(assignop(t, ll.N.Type, nil) != 0) {
+ } else if ll.N.Type != nil && assignop(ll.N.Type, t, nil) == 0 && assignop(t, ll.N.Type, nil) == 0 {
if n.Ntest != nil {
Yyerror("invalid case %v in switch on %v (mismatched types %v and %v)", Nconv(ll.N, 0), Nconv(n.Ntest, 0), Tconv(ll.N.Type, 0), Tconv(t, 0))
} else {
Yyerror("invalid case %v in switch (mismatched types %v and bool)", Nconv(ll.N, 0), Tconv(ll.N.Type, 0))
}
- } else if nilonly != "" && !(Isconst(ll.N, CTNIL) != 0) {
+ } else if nilonly != "" && !Isconst(ll.N, CTNIL) {
Yyerror("invalid case %v in switch (can only compare %s %v to nil)", Nconv(ll.N, 0), nilonly, Nconv(n.Ntest, 0))
}
case Etype: // type switch
- if ll.N.Op == OLITERAL && Istype(ll.N.Type, TNIL) != 0 {
+ if ll.N.Op == OLITERAL && Istype(ll.N.Type, TNIL) {
} else if ll.N.Op != OTYPE && ll.N.Type != nil { // should this be ||?
Yyerror("%v is not a type", Nconv(ll.N, obj.FmtLong))
// reset to original type
ll.N = n.Ntest.Right
- } else if ll.N.Type.Etype != TINTER && t.Etype == TINTER && !(implements(ll.N.Type, t, &missing, &have, &ptr) != 0) {
- if have != nil && !(missing.Broke != 0) && !(have.Broke != 0) {
+ } else if ll.N.Type.Etype != TINTER && t.Etype == TINTER && !implements(ll.N.Type, t, &missing, &have, &ptr) {
+ if have != nil && missing.Broke == 0 && have.Broke == 0 {
Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (wrong type for %v method)\n\thave %v%v\n\twant %v%v", Nconv(n.Ntest.Right, obj.FmtLong), Tconv(ll.N.Type, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort))
- } else if !(missing.Broke != 0) {
+ } else if missing.Broke == 0 {
Yyerror("impossible type switch case: %v cannot have dynamic type %v"+" (missing %v method)", Nconv(n.Ntest.Right, obj.FmtLong), Tconv(ll.N.Type, 0), Sconv(missing.Sym, 0))
}
}
ll = ncase.List
nvar = ncase.Nname
if nvar != nil {
- if ll != nil && ll.Next == nil && ll.N.Type != nil && !(Istype(ll.N.Type, TNIL) != 0) {
+ if ll != nil && ll.Next == nil && ll.N.Type != nil && !Istype(ll.N.Type, TNIL) {
// single entry type switch
nvar.Ntype = typenod(ll.N.Type)
} else {
var et int
var s string
- if Isslice(t) != 0 {
+ if Isslice(t) {
return "slice"
}
et = int(t.Etype)
var l *NodeList
// cannot type check until all the source has been parsed
- if !(typecheckok != 0) {
+ if typecheckok == 0 {
Fatal("early typecheck")
}
/*
* does n contain a call or receive operation?
*/
-func callrecv(n *Node) int {
+func callrecv(n *Node) bool {
if n == nil {
- return 0
+ return false
}
switch n.Op {
ONEW,
OAPPEND,
ODELETE:
- return 1
+ return true
}
- return bool2int(callrecv(n.Left) != 0 || callrecv(n.Right) != 0 || callrecv(n.Ntest) != 0 || callrecv(n.Nincr) != 0 || callrecvlist(n.Ninit) != 0 || callrecvlist(n.Nbody) != 0 || callrecvlist(n.Nelse) != 0 || callrecvlist(n.List) != 0 || callrecvlist(n.Rlist) != 0)
+ return callrecv(n.Left) || callrecv(n.Right) || callrecv(n.Ntest) || callrecv(n.Nincr) || callrecvlist(n.Ninit) || callrecvlist(n.Nbody) || callrecvlist(n.Nelse) || callrecvlist(n.List) || callrecvlist(n.Rlist)
}
-func callrecvlist(l *NodeList) int {
+func callrecvlist(l *NodeList) bool {
for ; l != nil; l = l.Next {
- if callrecv(l.N) != 0 {
- return 1
+ if callrecv(l.N) {
+ return true
}
}
- return 0
+ return false
}
// indexlit implements typechecking of untyped values as
var n *Node
n = *np
- if n == nil || !(isideal(n.Type) != 0) {
+ if n == nil || !isideal(n.Type) {
return
}
switch consttype(n) {
n = *np
if n.Sym != nil {
- if n.Op == ONAME && n.Etype != 0 && !(top&Ecall != 0) {
+ if n.Op == ONAME && n.Etype != 0 && top&Ecall == 0 {
Yyerror("use of builtin %v not in function call", Sconv(n.Sym, 0))
goto error
}
Dump("typecheck", n)
Fatal("typecheck %v", Oconv(int(n.Op), 0))
- fallthrough
/*
* names
goto ret
}
- if !(top&Easgn != 0) {
+ if top&Easgn == 0 {
// not a write to the variable
if isblank(n) {
Yyerror("cannot use _ as value")
n.Used = 1
}
- if !(top&Ecall != 0) && isunsafebuiltin(n) != 0 {
+ if top&Ecall == 0 && isunsafebuiltin(n) {
Yyerror("%v is not an expression, must be called", Nconv(n, 0))
goto error
}
t.Bound = -1 // slice
} else if l.Op == ODDD {
t.Bound = -100 // to be filled in
- if !(top&Ecomplit != 0) && !(n.Diag != 0) {
+ if top&Ecomplit == 0 && n.Diag == 0 {
t.Broke = 1
n.Diag = 1
Yyerror("use of [...] array outside of array literal")
}
t.Bound = Mpgetfix(v.U.Xval)
- if doesoverflow(v, Types[TINT]) != 0 {
+ if doesoverflow(v, Types[TINT]) {
Yyerror("array bound is too large")
goto error
} else if t.Bound < 0 {
case OIND:
ntop = Erv | Etype
- if !(top&Eaddr != 0) { // The *x in &*x is not an indirect.
+ if top&Eaddr == 0 {
ntop |= Eindir
}
ntop |= top & Ecomplit
goto ret
}
- if !(Isptr[t.Etype] != 0) {
+ if Isptr[t.Etype] == 0 {
if top&(Erv|Etop) != 0 {
Yyerror("invalid indirect of %v", Nconv(n.Left, obj.FmtLong))
goto error
if t == nil {
goto error
}
- if !(okfor[n.Op][t.Etype] != 0) {
+ if okfor[n.Op][t.Etype] == 0 {
Yyerror("invalid operation: %v %v", Oconv(int(n.Op), 0), Tconv(t, 0))
goto error
}
r = n.Right
if n.Left.Op == OTYPE {
- if !(looktypedot(n, t, 0) != 0) {
- if looktypedot(n, t, 1) != 0 {
+ if !looktypedot(n, t, 0) {
+ if looktypedot(n, t, 1) {
Yyerror("%v undefined (cannot refer to unexported method %v)", Nconv(n, 0), Sconv(n.Right.Sym, 0))
} else {
Yyerror("%v undefined (type %v has no method %v)", Nconv(n, 0), Tconv(t, 0), Sconv(n.Right.Sym, 0))
goto error
}
- if !(lookdot(n, t, 0) != 0) {
- if lookdot(n, t, 1) != 0 {
+ if !lookdot(n, t, 0) {
+ if lookdot(n, t, 1) {
Yyerror("%v undefined (cannot refer to unexported field or method %v)", Nconv(n, 0), Sconv(n.Right.Sym, 0))
} else {
Yyerror("%v undefined (type %v has no field or method %v)", Nconv(n, 0), Tconv(n.Left.Type, 0), Sconv(n.Right.Sym, 0))
if t == nil {
goto error
}
- if !(Isinter(t) != 0) {
+ if !Isinter(t) {
Yyerror("invalid type assertion: %v (non-interface type %v on left)", Nconv(n, 0), Tconv(t, 0))
goto error
}
}
if n.Type != nil && n.Type.Etype != TINTER {
- if !(implements(n.Type, t, &missing, &have, &ptr) != 0) {
+ if !implements(n.Type, t, &missing, &have, &ptr) {
if have != nil && have.Sym == missing.Sym {
Yyerror("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", Tconv(n.Type, 0), Tconv(t, 0), Sconv(missing.Sym, 0), Sconv(have.Sym, 0), Tconv(have.Type, obj.FmtShort|obj.FmtByte), Sconv(missing.Sym, 0), Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
} else if ptr != 0 {
}
why = "string"
if t.Etype == TARRAY {
- if Isfixedarray(t) != 0 {
+ if Isfixedarray(t) {
why = "array"
} else {
why = "slice"
}
}
- if n.Right.Type != nil && !(Isint[n.Right.Type.Etype] != 0) {
+ if n.Right.Type != nil && Isint[n.Right.Type.Etype] == 0 {
Yyerror("non-integer %s index %v", why, Nconv(n.Right, 0))
break
}
- if Isconst(n.Right, CTINT) != 0 {
+ if Isconst(n.Right, CTINT) {
x = Mpgetfix(n.Right.Val.U.Xval)
if x < 0 {
Yyerror("invalid %s index %v (index must be non-negative)", why, Nconv(n.Right, 0))
- } else if Isfixedarray(t) != 0 && t.Bound > 0 && x >= t.Bound {
+ } else if Isfixedarray(t) && t.Bound > 0 && x >= t.Bound {
Yyerror("invalid array index %v (out of bounds for %d-element array)", Nconv(n.Right, 0), t.Bound)
- } else if Isconst(n.Left, CTSTR) != 0 && x >= int64(len(n.Left.Val.U.Sval.S)) {
+ } else if Isconst(n.Left, CTSTR) && x >= int64(len(n.Left.Val.U.Sval.S)) {
Yyerror("invalid string index %v (out of bounds for %d-byte string)", Nconv(n.Right, 0), len(n.Left.Val.U.Sval.S))
} else if Mpcmpfixfix(n.Right.Val.U.Xval, Maxintval[TINT]) > 0 {
Yyerror("invalid %s index %v (index too large)", why, Nconv(n.Right, 0))
goto error
}
- if !(t.Chan&Crecv != 0) {
+ if t.Chan&Crecv == 0 {
Yyerror("invalid operation: %v (receive from send-only type %v)", Nconv(n, 0), Tconv(t, 0))
goto error
}
goto error
}
- if !(t.Chan&Csend != 0) {
+ if t.Chan&Csend == 0 {
Yyerror("invalid operation: %v (send to receive-only type %v)", Nconv(n, 0), Tconv(t, 0))
goto error
}
indexlit(&n.Right.Left)
indexlit(&n.Right.Right)
l = n.Left
- if Isfixedarray(l.Type) != 0 {
- if !(islvalue(n.Left) != 0) {
+ if Isfixedarray(l.Type) {
+ if !islvalue(n.Left) {
Yyerror("invalid operation %v (slice of unaddressable value)", Nconv(n, 0))
goto error
}
goto error
}
tp = nil
- if Istype(t, TSTRING) != 0 {
+ if Istype(t, TSTRING) {
n.Type = t
n.Op = OSLICESTR
- } else if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) != 0 {
+ } else if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) {
tp = t.Type
n.Type = typ(TARRAY)
n.Type.Type = tp.Type
n.Type.Bound = -1
dowidth(n.Type)
n.Op = OSLICEARR
- } else if Isslice(t) != 0 {
+ } else if Isslice(t) {
n.Type = t
} else {
Yyerror("cannot slice %v (type %v)", Nconv(l, 0), Tconv(t, 0))
indexlit(&n.Right.Right.Left)
indexlit(&n.Right.Right.Right)
l = n.Left
- if Isfixedarray(l.Type) != 0 {
- if !(islvalue(n.Left) != 0) {
+ if Isfixedarray(l.Type) {
+ if !islvalue(n.Left) {
Yyerror("invalid operation %v (slice of unaddressable value)", Nconv(n, 0))
goto error
}
goto error
}
tp = nil
- if Istype(t, TSTRING) != 0 {
+ if Istype(t, TSTRING) {
Yyerror("invalid operation %v (3-index slice of string)", Nconv(n, 0))
goto error
}
- if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) != 0 {
+ if Isptr[t.Etype] != 0 && Isfixedarray(t.Type) {
tp = t.Type
n.Type = typ(TARRAY)
n.Type.Type = tp.Type
n.Type.Bound = -1
dowidth(n.Type)
n.Op = OSLICE3ARR
- } else if Isslice(t) != 0 {
+ } else if Isslice(t) {
n.Type = t
} else {
Yyerror("cannot slice %v (type %v)", Nconv(l, 0), Tconv(t, 0))
l = n.Left
if l.Op == OTYPE {
if n.Isddd != 0 || l.Type.Bound == -100 {
- if !(l.Type.Broke != 0) {
+ if l.Type.Broke == 0 {
Yyerror("invalid use of ... in type conversion", l)
}
n.Diag = 1
goto doconv
}
- if count(n.List) == 1 && !(n.Isddd != 0) {
+ if count(n.List) == 1 && n.Isddd == 0 {
typecheck(&n.List.N, Erv|Efnstruct)
} else {
typechecklist(n.List, Erv)
}
// multiple return
- if !(top&(Efnstruct|Etop) != 0) {
+ if top&(Efnstruct|Etop) == 0 {
Yyerror("multiple-value %v() in single-value context", Nconv(l, 0))
goto ret
}
}
switch n.Op {
case OCAP:
- if !(okforcap[t.Etype] != 0) {
+ if okforcap[t.Etype] == 0 {
goto badcall1
}
case OLEN:
- if !(okforlen[t.Etype] != 0) {
+ if okforlen[t.Etype] == 0 {
goto badcall1
}
case OREAL,
OIMAG:
- if !(Iscomplex[t.Etype] != 0) {
+ if Iscomplex[t.Etype] == 0 {
goto badcall1
}
- if Isconst(l, CTCPLX) != 0 {
+ if Isconst(l, CTCPLX) {
r = n
if n.Op == OREAL {
n = nodfltconst(&l.Val.U.Cval.Real)
// might be constant
switch t.Etype {
case TSTRING:
- if Isconst(l, CTSTR) != 0 {
+ if Isconst(l, CTSTR) {
r = Nod(OXXX, nil, nil)
Nodconst(r, Types[TINT], int64(len(l.Val.U.Sval.S)))
r.Orig = n
if t.Bound < 0 { // slice
break
}
- if callrecv(l) != 0 { // has call or receive
+ if callrecv(l) { // has call or receive
break
}
r = Nod(OXXX, nil, nil)
goto error
}
- if !(t.Chan&Csend != 0) {
+ if t.Chan&Csend == 0 {
Yyerror("invalid operation: %v (cannot close receive-only channel)", Nconv(n, 0))
goto error
}
goto error
}
- if count(args) == 1 && !(n.Isddd != 0) {
+ if count(args) == 1 && n.Isddd == 0 {
typecheck(&args.N, Erv|Efnstruct)
} else {
typechecklist(args, Erv)
}
// Unpack multiple-return result before type-checking.
- if Istype(t, TSTRUCT) != 0 && t.Funarg != 0 {
+ if Istype(t, TSTRUCT) && t.Funarg != 0 {
t = t.Type
- if Istype(t, TFIELD) != 0 {
+ if Istype(t, TFIELD) {
t = t.Type
}
}
n.Type = t
- if !(Isslice(t) != 0) {
- if Isconst(args.N, CTNIL) != 0 {
+ if !Isslice(t) {
+ if Isconst(args.N, CTNIL) {
Yyerror("first argument to append must be typed slice; have untyped nil", t)
goto error
}
goto error
}
- if Istype(t.Type, TUINT8) != 0 && Istype(args.Next.N.Type, TSTRING) != 0 {
+ if Istype(t.Type, TUINT8) && Istype(args.Next.N.Type, TSTRING) {
defaultlit(&args.Next.N, Types[TSTRING])
goto ret
}
}
// copy([]byte, string)
- if Isslice(n.Left.Type) != 0 && n.Right.Type.Etype == TSTRING {
+ if Isslice(n.Left.Type) && n.Right.Type.Etype == TSTRING {
if Eqtype(n.Left.Type.Type, bytetype) {
goto ret
}
goto error
}
- if !(Isslice(n.Left.Type) != 0) || !(Isslice(n.Right.Type) != 0) {
- if !(Isslice(n.Left.Type) != 0) && !(Isslice(n.Right.Type) != 0) {
+ if !Isslice(n.Left.Type) || !Isslice(n.Right.Type) {
+ if !Isslice(n.Left.Type) && !Isslice(n.Right.Type) {
Yyerror("arguments to copy must be slices; have %v, %v", Tconv(n.Left.Type, obj.FmtLong), Tconv(n.Right.Type, obj.FmtLong))
- } else if !(Isslice(n.Left.Type) != 0) {
+ } else if !Isslice(n.Left.Type) {
Yyerror("first argument to copy should be slice; have %v", Tconv(n.Left.Type, obj.FmtLong))
} else {
Yyerror("second argument to copy should be slice or string; have %v", Tconv(n.Right.Type, obj.FmtLong))
goto error
case TARRAY:
- if !(Isslice(t) != 0) {
+ if !Isslice(t) {
Yyerror("cannot make type %v", Tconv(t, 0))
goto error
}
if et != 0 {
goto error
}
- if Isconst(l, CTINT) != 0 && r != nil && Isconst(r, CTINT) != 0 && Mpcmpfixfix(l.Val.U.Xval, r.Val.U.Xval) > 0 {
+ if Isconst(l, CTINT) && r != nil && Isconst(r, CTINT) && Mpcmpfixfix(l.Val.U.Xval, r.Val.U.Xval) > 0 {
Yyerror("len larger than cap in make(%v)", Tconv(t, 0))
goto error
}
typechecklist(n.List, Erv|Eindir) // Eindir: address does not escape
for args = n.List; args != nil; args = args.Next {
// Special case for print: int constant is int64, not int.
- if Isconst(args.N, CTINT) != 0 {
+ if Isconst(args.N, CTINT) {
defaultlit(&args.N, Types[TINT64])
} else {
defaultlit(&args.N, nil)
if t == nil {
goto error
}
- if !(Isslice(t) != 0) && t.Etype != TSTRING {
+ if !Isslice(t) && t.Etype != TSTRING {
Fatal("OSPTR of %v", Tconv(t, 0))
}
if t.Etype == TSTRING {
case ODEFER:
ok |= Etop
typecheck(&n.Left, Etop|Erv)
- if !(n.Left.Diag != 0) {
+ if n.Left.Diag == 0 {
checkdefergo(n)
}
goto ret
case ODCLTYPE:
ok |= Etop
typecheck(&n.Left, Etype)
- if !(incannedimport != 0) {
+ if incannedimport == 0 {
checkwidth(n.Left.Type)
}
goto ret
if r.Type.Etype != TBLANK {
aop = assignop(l.Type, r.Type, nil)
if aop != 0 {
- if Isinter(r.Type) != 0 && !(Isinter(l.Type) != 0) && algtype1(l.Type, nil) == ANOEQ {
+ if Isinter(r.Type) && !Isinter(l.Type) && algtype1(l.Type, nil) == ANOEQ {
Yyerror("invalid operation: %v (operator %v not defined on %s)", Nconv(n, 0), Oconv(int(op), 0), typekind(l.Type))
goto error
}
if l.Type.Etype != TBLANK {
aop = assignop(r.Type, l.Type, nil)
if aop != 0 {
- if Isinter(l.Type) != 0 && !(Isinter(r.Type) != 0) && algtype1(r.Type, nil) == ANOEQ {
+ if Isinter(l.Type) && !Isinter(r.Type) && algtype1(r.Type, nil) == ANOEQ {
Yyerror("invalid operation: %v (operator %v not defined on %s)", Nconv(n, 0), Oconv(int(op), 0), typekind(r.Type))
goto error
}
}
}
- if !(okfor[op][et] != 0) {
+ if okfor[op][et] == 0 {
Yyerror("invalid operation: %v (operator %v not defined on %s)", Nconv(n, 0), Oconv(int(op), 0), typekind(t))
goto error
}
// okfor allows any array == array, map == map, func == func.
// restrict to slice/map/func == nil and nil == slice/map/func.
- if Isfixedarray(l.Type) != 0 && algtype1(l.Type, nil) == ANOEQ {
+ if Isfixedarray(l.Type) && algtype1(l.Type, nil) == ANOEQ {
Yyerror("invalid operation: %v (%v cannot be compared)", Nconv(n, 0), Tconv(l.Type, 0))
goto error
}
- if Isslice(l.Type) != 0 && !(isnil(l) != 0) && !(isnil(r) != 0) {
+ if Isslice(l.Type) && !isnil(l) && !isnil(r) {
Yyerror("invalid operation: %v (slice can only be compared to nil)", Nconv(n, 0))
goto error
}
- if l.Type.Etype == TMAP && !(isnil(l) != 0) && !(isnil(r) != 0) {
+ if l.Type.Etype == TMAP && !isnil(l) && !isnil(r) {
Yyerror("invalid operation: %v (map can only be compared to nil)", Nconv(n, 0))
goto error
}
- if l.Type.Etype == TFUNC && !(isnil(l) != 0) && !(isnil(r) != 0) {
+ if l.Type.Etype == TFUNC && !isnil(l) && !isnil(r) {
Yyerror("invalid operation: %v (func can only be compared to nil)", Nconv(n, 0))
goto error
}
}
}
- if (op == ODIV || op == OMOD) && Isconst(r, CTINT) != 0 {
+ if (op == ODIV || op == OMOD) && Isconst(r, CTINT) {
if mpcmpfixc(r.Val.U.Xval, 0) == 0 {
Yyerror("division by zero")
goto error
defaultlit(&r, Types[TUINT])
n.Right = r
t = r.Type
- if !(Isint[t.Etype] != 0) || Issigned[t.Etype] != 0 {
+ if Isint[t.Etype] == 0 || Issigned[t.Etype] != 0 {
Yyerror("invalid operation: %v (shift count type %v, must be unsigned integer)", Nconv(n, 0), Tconv(r.Type, 0))
goto error
}
t = l.Type
- if t != nil && t.Etype != TIDEAL && !(Isint[t.Etype] != 0) {
+ if t != nil && t.Etype != TIDEAL && Isint[t.Etype] == 0 {
Yyerror("invalid operation: %v (shift of type %v)", Nconv(n, 0), Tconv(t, 0))
goto error
}
ok |= Erv
saveorignode(n)
typecheck(&n.Left, Erv|top&(Eindir|Eiota))
- convlit1(&n.Left, n.Type, 1)
+ convlit1(&n.Left, n.Type, true)
t = n.Left.Type
if t == nil || n.Type == nil {
goto error
}
n.Op = uint8(convertop(t, n.Type, &why))
if (n.Op) == 0 {
- if !(n.Diag != 0) && !(n.Type.Broke != 0) {
+ if n.Diag == 0 && n.Type.Broke == 0 {
Yyerror("cannot convert %v to type %v%s", Nconv(n.Left, obj.FmtLong), Tconv(n.Type, 0), why)
n.Diag = 1
}
ret:
t = n.Type
- if t != nil && !(t.Funarg != 0) && n.Op != OTYPE {
+ if t != nil && t.Funarg == 0 && n.Op != OTYPE {
switch t.Etype {
case TFUNC, // might have TANY; wait until its called
TANY,
}
}
- if safemode != 0 && !(incannedimport != 0) && !(importpkg != nil) && !(compiling_wrappers != 0) && t != nil && t.Etype == TUNSAFEPTR {
+ if safemode != 0 && incannedimport == 0 && importpkg == nil && compiling_wrappers == 0 && t != nil && t.Etype == TUNSAFEPTR {
Yyerror("cannot use unsafe.Pointer")
}
evconst(n)
- if n.Op == OTYPE && !(top&Etype != 0) {
+ if n.Op == OTYPE && top&Etype == 0 {
Yyerror("type %v is not an expression", Tconv(n.Type, 0))
goto error
}
}
// TODO(rsc): simplify
- if (top&(Ecall|Erv|Etype) != 0) && !(top&Etop != 0) && !(ok&(Erv|Etype|Ecall) != 0) {
+ if (top&(Ecall|Erv|Etype) != 0) && top&Etop == 0 && ok&(Erv|Etype|Ecall) == 0 {
Yyerror("%v used as value", Nconv(n, 0))
goto error
}
- if (top&Etop != 0) && !(top&(Ecall|Erv|Etype) != 0) && !(ok&Etop != 0) {
+ if (top&Etop != 0) && top&(Ecall|Erv|Etype) == 0 && ok&Etop == 0 {
if n.Diag == 0 {
Yyerror("%v evaluated but not used", Nconv(n, 0))
n.Diag = 1
if t == nil {
return -1
}
- if !(Isint[t.Etype] != 0) {
+ if Isint[t.Etype] == 0 {
Yyerror("invalid slice index %v (type %v)", Nconv(r, 0), Tconv(t, 0))
return -1
}
} else if tp != nil && tp.Bound > 0 && Mpgetfix(r.Val.U.Xval) > tp.Bound {
Yyerror("invalid slice index %v (out of bounds for %d-element array)", Nconv(r, 0), tp.Bound)
return -1
- } else if Isconst(l, CTSTR) != 0 && Mpgetfix(r.Val.U.Xval) > int64(len(l.Val.U.Sval.S)) {
+ } else if Isconst(l, CTSTR) && Mpgetfix(r.Val.U.Xval) > int64(len(l.Val.U.Sval.S)) {
Yyerror("invalid slice index %v (out of bounds for %d-byte string)", Nconv(r, 0), len(l.Val.U.Sval.S))
return -1
} else if Mpcmpfixfix(r.Val.U.Xval, Maxintval[TINT]) > 0 {
return
}
- if !(n.Diag != 0) {
+ if n.Diag == 0 {
// The syntax made sure it was a call, so this must be
// a conversion.
n.Diag = 1
n = *nn
t = n.Type
- if t == nil || !(Isptr[t.Etype] != 0) {
+ if t == nil || Isptr[t.Etype] == 0 {
return
}
t = t.Type
if t == nil {
return
}
- if !(Isfixedarray(t) != 0) {
+ if !Isfixedarray(t) {
return
}
n = Nod(OIND, n, nil)
return r
}
-func looktypedot(n *Node, t *Type, dostrcmp int) int {
+func looktypedot(n *Node, t *Type, dostrcmp int) bool {
var f1 *Type
var f2 *Type
var s *Sym
if t.Etype == TINTER {
f1 = lookdot1(n, s, t, t.Type, dostrcmp)
if f1 == nil {
- return 0
+ return false
}
n.Right = methodname(n.Right, t)
n.Xoffset = f1.Width
n.Type = f1.Type
n.Op = ODOTINTER
- return 1
+ return true
}
// Find the base type: methtype will fail if t
f2 = methtype(t, 0)
if f2 == nil {
- return 0
+ return false
}
expandmeth(f2)
f2 = lookdot1(n, s, f2, f2.Xmethod, dostrcmp)
if f2 == nil {
- return 0
+ return false
}
// disallow T.m if m requires *T receiver
- if Isptr[getthisx(f2.Type).Type.Type.Etype] != 0 && !(Isptr[t.Etype] != 0) && f2.Embedded != 2 && !(isifacemethod(f2.Type) != 0) {
+ if Isptr[getthisx(f2.Type).Type.Type.Etype] != 0 && Isptr[t.Etype] == 0 && f2.Embedded != 2 && !isifacemethod(f2.Type) {
Yyerror("invalid method expression %v (needs pointer receiver: (*%v).%v)", Nconv(n, 0), Tconv(t, 0), Sconv(f2.Sym, obj.FmtShort))
- return 0
+ return false
}
n.Right = methodname(n.Right, t)
n.Xoffset = f2.Width
n.Type = f2.Type
n.Op = ODOTMETH
- return 1
+ return true
}
func derefall(t *Type) *Type {
return t
}
-func lookdot(n *Node, t *Type, dostrcmp int) int {
+func lookdot(n *Node, t *Type, dostrcmp int) bool {
var f1 *Type
var f2 *Type
var tt *Type
n.Op = ODOTINTER
}
- return 1
+ return true
}
if f2 != nil {
// print("lookdot found [%p] %T\n", f2->type, f2->type);
n.Op = ODOTMETH
- return 1
+ return true
}
- return 0
+ return false
}
-func nokeys(l *NodeList) int {
+func nokeys(l *NodeList) bool {
for ; l != nil; l = l.Next {
if l.N.Op == OKEY {
- return 0
+ return false
}
}
- return 1
+ return true
}
-func hasddd(t *Type) int {
+func hasddd(t *Type) bool {
var tl *Type
for tl = t.Type; tl != nil; tl = tl.Down {
if tl.Isddd != 0 {
- return 1
+ return true
}
}
- return 0
+ return false
}
func downcount(t *Type) int {
n = nl.N
if n.Type != nil {
if n.Type.Etype == TSTRUCT && n.Type.Funarg != 0 {
- if !(hasddd(tstruct) != 0) {
+ if !hasddd(tstruct) {
n1 = downcount(tstruct)
n2 = downcount(n.Type)
if n2 > n1 {
n1 = downcount(tstruct)
n2 = count(nl)
- if !(hasddd(tstruct) != 0) {
+ if !hasddd(tstruct) {
if n2 > n1 {
goto toomany
}
goto notenough
}
} else {
- if !(isddd != 0) {
+ if isddd == 0 {
if n2 < n1-1 {
goto notenough
}
return
notenough:
- if n == nil || !(n.Diag != 0) {
+ if n == nil || n.Diag == 0 {
if call != nil {
Yyerror("not enough arguments in call to %v", Nconv(call, 0))
} else {
hash[h] = n
}
-func prime(h uint32, sr uint32) int {
+func prime(h uint32, sr uint32) bool {
var n uint32
for n = 3; n <= sr; n += 2 {
if h%n == 0 {
- return 0
+ return false
}
}
- return 1
+ return true
}
func inithash(n *Node, autohash []*Node) []*Node {
}
// check for primeality
- for !(prime(h, sr) != 0) {
+ for !prime(h, sr) {
h += 2
}
return make([]*Node, h)
}
-func iscomptype(t *Type) int {
+func iscomptype(t *Type) bool {
switch t.Etype {
case TARRAY,
TSTRUCT,
TMAP:
- return 1
+ return true
case TPTR32,
TPTR64:
case TARRAY,
TSTRUCT,
TMAP:
- return 1
+ return true
}
}
- return 0
+ return false
}
func pushtype(n *Node, t *Type) {
- if n == nil || n.Op != OCOMPLIT || !(iscomptype(t) != 0) {
+ if n == nil || n.Op != OCOMPLIT || !iscomptype(t) {
return
}
if Isptr[t.Etype] != 0 {
// For better or worse, we don't allow pointers as the composite literal type,
// except when using the &T syntax, which sets implicit on the OIND.
- if !(n.Right.Implicit != 0) {
+ if n.Right.Implicit == 0 {
Yyerror("invalid pointer type %v for composite literal (use &%v instead)", Tconv(t, 0), Tconv(t.Type, 0))
goto error
}
// Also, the underlying type must be a struct, map, slice, or array.
- if !(iscomptype(t) != 0) {
+ if !iscomptype(t) {
Yyerror("invalid pointer type %v for composite literal", Tconv(t, 0))
goto error
}
typecheck(&l.Left, Erv)
evconst(l.Left)
i = nonnegconst(l.Left)
- if i < 0 && !(l.Left.Diag != 0) {
+ if i < 0 && l.Left.Diag == 0 {
Yyerror("array index must be non-negative integer constant")
l.Left.Diag = 1
i = -(1 << 30) // stay negative for a while
case TSTRUCT:
bad = 0
- if n.List != nil && nokeys(n.List) != 0 {
+ if n.List != nil && nokeys(n.List) {
// simple list of variables
f = t.Type
if f == nil {
tmp12 := bad
bad++
- if !(tmp12 != 0) {
+ if tmp12 == 0 {
Yyerror("too many values in struct initializer")
}
continue
if l.Op != OKEY {
tmp13 := bad
bad++
- if !(tmp13 != 0) {
+ if tmp13 == 0 {
Yyerror("mixture of field:value and value initializers")
}
typecheck(&ll.N, Erv)
/*
* lvalue etc
*/
-func islvalue(n *Node) int {
+func islvalue(n *Node) bool {
switch n.Op {
case OINDEX:
- if Isfixedarray(n.Left.Type) != 0 {
+ if Isfixedarray(n.Left.Type) {
return islvalue(n.Left)
}
if n.Left.Type != nil && n.Left.Type.Etype == TSTRING {
- return 0
+ return false
}
fallthrough
ODOTPTR,
OCLOSUREVAR,
OPARAM:
- return 1
+ return true
case ODOT:
return islvalue(n.Left)
case ONAME:
if n.Class == PFUNC {
- return 0
+ return false
}
- return 1
+ return true
}
- return 0
+ return false
}
func checklvalue(n *Node, verb string) {
- if !(islvalue(n) != 0) {
+ if !islvalue(n) {
Yyerror("cannot %s %v", verb, Nconv(n, 0))
}
}
}
}
- if islvalue(n) != 0 {
+ if islvalue(n) {
return
}
if n.Op == OINDEXMAP {
// Check whether l and r are the same side effect-free expression,
// so that it is safe to reuse one instead of computing both.
-func samesafeexpr(l *Node, r *Node) int {
+func samesafeexpr(l *Node, r *Node) bool {
if l.Op != r.Op || !Eqtype(l.Type, r.Type) {
- return 0
+ return false
}
switch l.Op {
case ONAME,
OCLOSUREVAR:
- return bool2int(l == r)
+ return l == r
case ODOT,
ODOTPTR:
- return bool2int(l.Right != nil && r.Right != nil && l.Right.Sym == r.Right.Sym && samesafeexpr(l.Left, r.Left) != 0)
+ return l.Right != nil && r.Right != nil && l.Right.Sym == r.Right.Sym && samesafeexpr(l.Left, r.Left)
case OIND:
return samesafeexpr(l.Left, r.Left)
case OINDEX:
- return bool2int(samesafeexpr(l.Left, r.Left) != 0 && samesafeexpr(l.Right, r.Right) != 0)
+ return samesafeexpr(l.Left, r.Left) && samesafeexpr(l.Right, r.Right)
}
- return 0
+ return false
}
/*
// Recognize slices being updated in place, for better code generation later.
// Don't rewrite if using race detector, to avoid needing to teach race detector
// about this optimization.
- if n.Left != nil && n.Left.Op != OINDEXMAP && n.Right != nil && !(flag_race != 0) {
+ if n.Left != nil && n.Left.Op != OINDEXMAP && n.Right != nil && flag_race == 0 {
switch n.Right.Op {
// For x = x[0:y], x can be updated in place, without touching pointer.
// TODO(rsc): Reenable once it is actually updated in place without touching the pointer.
case OSLICE,
OSLICE3,
OSLICESTR:
- if false && samesafeexpr(n.Left, n.Right.Left) != 0 && (n.Right.Right.Left == nil || iszero(n.Right.Right.Left) != 0) {
+ if false && samesafeexpr(n.Left, n.Right.Left) && (n.Right.Right.Left == nil || iszero(n.Right.Right.Left)) {
n.Right.Reslice = 1
}
// can take care of updating the pointer, and only in that case.
// TODO(rsc): Reenable once the emitted code does update the pointer.
case OAPPEND:
- if false && n.Right.List != nil && samesafeexpr(n.Left, n.Right.List.N) != 0 {
+ if false && n.Right.List != nil && samesafeexpr(n.Left, n.Right.List.N) {
n.Right.Reslice = 1
}
}
if ntypecheckdeftype == 1 {
for {
l = methodqueue
- if !(l != nil) {
+ if l == nil {
break
}
methodqueue = nil
setlineno(n)
if n.Op == ONONAME {
- if !(n.Diag != 0) {
+ if n.Diag == 0 {
n.Diag = 1
if n.Lineno != 0 {
lineno = n.Lineno
switch n.Op {
default:
Fatal("typecheckdef %v", Oconv(int(n.Op), 0))
- fallthrough
// not really syms
case OGOTO,
}
typecheck(&e, Erv|Eiota)
- if Isconst(e, CTNIL) != 0 {
+ if Isconst(e, CTNIL) {
Yyerror("const initializer cannot be nil")
goto ret
}
- if e.Type != nil && e.Op != OLITERAL || !(isgoconst(e) != 0) {
- if !(e.Diag != 0) {
+ if e.Type != nil && e.Op != OLITERAL || !isgoconst(e) {
+ if e.Diag == 0 {
Yyerror("const initializer %v is not a constant", Nconv(e, 0))
e.Diag = 1
}
t = n.Type
if t != nil {
- if !(okforconst[t.Etype] != 0) {
+ if okforconst[t.Etype] == 0 {
Yyerror("invalid constant type %v", Tconv(t, 0))
goto ret
}
- if !(isideal(e.Type) != 0) && !Eqtype(t, e.Type) {
+ if !isideal(e.Type) && !Eqtype(t, e.Type) {
Yyerror("cannot use %v as type %v in const initializer", Nconv(e, obj.FmtLong), Tconv(t, 0))
goto ret
}
}
ret:
- if n.Op != OLITERAL && n.Type != nil && isideal(n.Type) != 0 {
+ if n.Op != OLITERAL && n.Type != nil && isideal(n.Type) {
Fatal("got %v for %v", Tconv(n.Type, 0), Nconv(n, 0))
}
if typecheckdefstack.N != n {
}
}
- if !(Isint[n.Type.Etype] != 0) && n.Type.Etype != TIDEAL {
+ if Isint[n.Type.Etype] == 0 && n.Type.Etype != TIDEAL {
Yyerror("non-integer %s argument in make(%v) - %v", arg, Tconv(t, 0), Tconv(n.Type, 0))
return -1
}
}
}
-func isterminating(l *NodeList, top int) int {
+func isterminating(l *NodeList, top int) bool {
var def int
var n *Node
if l == nil {
- return 0
+ return false
}
if top != 0 {
for l.Next != nil && l.N.Op != OLABEL {
n = l.N
if n == nil {
- return 0
+ return false
}
switch n.Op {
ORETJMP,
OPANIC,
OXFALL:
- return 1
+ return true
case OFOR:
if n.Ntest != nil {
- return 0
+ return false
}
if n.Hasbreak != 0 {
- return 0
+ return false
}
- return 1
+ return true
case OIF:
- return bool2int(isterminating(n.Nbody, 0) != 0 && isterminating(n.Nelse, 0) != 0)
+ return isterminating(n.Nbody, 0) && isterminating(n.Nelse, 0)
case OSWITCH,
OTYPESW,
OSELECT:
if n.Hasbreak != 0 {
- return 0
+ return false
}
def = 0
for l = n.List; l != nil; l = l.Next {
- if !(isterminating(l.N.Nbody, 0) != 0) {
- return 0
+ if !isterminating(l.N.Nbody, 0) {
+ return false
}
if l.N.List == nil { // default
def = 1
}
}
- if n.Op != OSELECT && !(def != 0) {
- return 0
+ if n.Op != OSELECT && def == 0 {
+ return false
}
- return 1
+ return true
}
- return 0
+ return false
}
func checkreturn(fn *Node) {
if fn.Type.Outtuple != 0 && fn.Nbody != nil {
- if !(isterminating(fn.Nbody, 1) != 0) {
+ if !isterminating(fn.Nbody, 1) {
yyerrorl(int(fn.Endlineno), "missing return at end of function")
}
}
return n
}
-func isunsafebuiltin(n *Node) int {
+func isunsafebuiltin(n *Node) bool {
if n == nil || n.Op != ONAME || n.Sym == nil || n.Sym.Pkg != unsafepkg {
- return 0
+ return false
}
if n.Sym.Name == "Sizeof" {
- return 1
+ return true
}
if n.Sym.Name == "Offsetof" {
- return 1
+ return true
}
if n.Sym.Name == "Alignof" {
- return 1
+ return true
}
- return 0
+ return false
}
}
}
-func samelist(a *NodeList, b *NodeList) int {
+func samelist(a *NodeList, b *NodeList) bool {
for ; a != nil && b != nil; (func() { a = a.Next; b = b.Next })() {
if a.N != b.N {
- return 0
+ return false
}
}
- return bool2int(a == b)
+ return a == b
}
func paramoutheap(fn *Node) int {
}
}
- if samelist(rl, n.List) != 0 {
+ if samelist(rl, n.List) {
// special return in disguise
n.List = nil
if Isptr[t.Etype] != 0 {
t = t.Type
}
- if Isfixedarray(t) != 0 {
+ if Isfixedarray(t) {
safeexpr(n.Left, init)
Nodconst(n, n.Type, t.Bound)
n.Typecheck = 1
walkexpr(&n.Left, init)
walkexpr(&n.Right, init)
t = n.Left.Type
- n.Bounded = uint8(bounded(n.Right, 8*t.Width))
- if Debug['m'] != 0 && n.Etype != 0 && !(Isconst(n.Right, CTINT) != 0) {
+ n.Bounded = bounded(n.Right, 8*t.Width)
+ if Debug['m'] != 0 && n.Etype != 0 && !Isconst(n.Right, CTINT) {
Warn("shift bounds check elided")
}
goto ret
goto ret
case ONAME:
- if !(n.Class&PHEAP != 0) && n.Class != PPARAMREF {
+ if n.Class&PHEAP == 0 && n.Class != PPARAMREF {
n.Addable = 1
}
goto ret
walkexpr(&n.Left, init)
n.Left = safeexpr(n.Left, init)
- if oaslit(n, init) != 0 {
+ if oaslit(n, init) {
goto ret
}
- if n.Right == nil || iszero(n.Right) != 0 && !(flag_race != 0) {
+ if n.Right == nil || iszero(n.Right) && flag_race == 0 {
goto ret
}
from = "I"
to = "T"
- if isnilinter(r.Left.Type) != 0 {
+ if isnilinter(r.Left.Type) {
from = "E"
}
- if isnilinter(r.Type) != 0 {
+ if isnilinter(r.Type) {
to = "E"
- } else if Isinter(r.Type) != 0 {
+ } else if Isinter(r.Type) {
to = "I"
}
from = "I"
to = "T"
- if isnilinter(r.Left.Type) != 0 {
+ if isnilinter(r.Left.Type) {
from = "E"
}
- if isnilinter(r.Type) != 0 {
+ if isnilinter(r.Type) {
to = "E"
- } else if Isinter(r.Type) != 0 {
+ } else if Isinter(r.Type) {
to = "I"
}
buf = fmt.Sprintf("assert%s2%s2", from, to)
case ODOTTYPE,
ODOTTYPE2:
Fatal("walkexpr ODOTTYPE") // should see inside OAS or OAS2 only
- fallthrough
case OCONVIFACE:
walkexpr(&n.Left, init)
// Optimize convT2E as a two-word copy when T is pointer-shaped.
- if isnilinter(n.Type) != 0 && isdirectiface(n.Left.Type) != 0 {
+ if isnilinter(n.Type) && isdirectiface(n.Left.Type) {
l = Nod(OEFACE, typename(n.Left.Type), n.Left)
l.Type = n.Type
l.Typecheck = n.Typecheck
from = "T"
to = "I"
- if isnilinter(n.Left.Type) != 0 {
+ if isnilinter(n.Left.Type) {
from = "E"
- } else if Isinter(n.Left.Type) != 0 {
+ } else if Isinter(n.Left.Type) {
from = "I"
}
- if isnilinter(n.Type) != 0 {
+ if isnilinter(n.Type) {
to = "E"
}
buf = fmt.Sprintf("conv%s2%s", from, to)
fn = syslook(buf, 1)
ll = nil
- if !(Isinter(n.Left.Type) != 0) {
+ if !Isinter(n.Left.Type) {
ll = list(ll, typename(n.Left.Type))
}
- if !(isnilinter(n.Type) != 0) {
+ if !isnilinter(n.Type) {
ll = list(ll, typename(n.Type))
}
- if !(Isinter(n.Left.Type) != 0) && !(isnilinter(n.Type) != 0) {
+ if !Isinter(n.Left.Type) && !isnilinter(n.Type) {
sym = Pkglookup(fmt.Sprintf("%v.%v", Tconv(n.Left.Type, obj.FmtLeft), Tconv(n.Type, obj.FmtLeft)), itabpkg)
if sym.Def == nil {
l = Nod(ONAME, nil, nil)
l.Addable = 1
ll = list(ll, l)
- if isdirectiface(n.Left.Type) != 0 {
+ if isdirectiface(n.Left.Type) {
/* For pointer types, we can make a special form of optimization
*
* These statements are put onto the expression init list:
}
}
- if Isinter(n.Left.Type) != 0 {
+ if Isinter(n.Left.Type) {
ll = list(ll, n.Left)
} else {
// regular types are passed by reference to avoid C vararg calls
// with a non-interface, especially in a switch on interface value
// with non-interface cases, is not visible to orderstmt, so we
// have to fall back on allocating a temp here.
- if islvalue(n.Left) != 0 {
+ if islvalue(n.Left) {
ll = list(ll, Nod(OADDR, n.Left, nil))
} else {
ll = list(ll, Nod(OADDR, copyexpr(n.Left, n.Left.Type, init), nil))
// if range of type cannot exceed static array bound,
// disable bounds check.
- if n.Bounded != 0 {
+ if n.Bounded {
goto ret
}
t = n.Left.Type
if t != nil && Isptr[t.Etype] != 0 {
t = t.Type
}
- if Isfixedarray(t) != 0 {
- n.Bounded = uint8(bounded(r, t.Bound))
- if Debug['m'] != 0 && n.Bounded != 0 && !(Isconst(n.Right, CTINT) != 0) {
+ if Isfixedarray(t) {
+ n.Bounded = bounded(r, t.Bound)
+ if Debug['m'] != 0 && n.Bounded && !Isconst(n.Right, CTINT) {
Warn("index bounds check elided")
}
- if Smallintconst(n.Right) != 0 && !(n.Bounded != 0) {
+ if Smallintconst(n.Right) && !n.Bounded {
Yyerror("index out of bounds")
}
- } else if Isconst(n.Left, CTSTR) != 0 {
- n.Bounded = uint8(bounded(r, int64(len(n.Left.Val.U.Sval.S))))
- if Debug['m'] != 0 && n.Bounded != 0 && !(Isconst(n.Right, CTINT) != 0) {
+ } else if Isconst(n.Left, CTSTR) {
+ n.Bounded = bounded(r, int64(len(n.Left.Val.U.Sval.S)))
+ if Debug['m'] != 0 && n.Bounded && !Isconst(n.Right, CTINT) {
Warn("index bounds check elided")
}
- if Smallintconst(n.Right) != 0 {
- if !(n.Bounded != 0) {
+ if Smallintconst(n.Right) {
+ if !n.Bounded {
Yyerror("index out of bounds")
} else {
// replace "abc"[1] with 'b'.
}
}
- if Isconst(n.Right, CTINT) != 0 {
+ if Isconst(n.Right, CTINT) {
if Mpcmpfixfix(n.Right.Val.U.Xval, &mpzero) < 0 || Mpcmpfixfix(n.Right.Val.U.Xval, Maxintval[TINT]) > 0 {
Yyerror("index out of bounds")
}
case ORECV:
Fatal("walkexpr ORECV") // should see inside OAS only
- fallthrough
case OSLICE:
if n.Right != nil && n.Right.Left == nil && n.Right.Right == nil { // noop
// comparing the lengths instead will yield the same result
// without the function call.
case OCMPSTR:
- if (Isconst(n.Left, CTSTR) != 0 && len(n.Left.Val.U.Sval.S) == 0) || (Isconst(n.Right, CTSTR) != 0 && len(n.Right.Val.U.Sval.S) == 0) {
+ if (Isconst(n.Left, CTSTR) && len(n.Left.Val.U.Sval.S) == 0) || (Isconst(n.Right, CTSTR) && len(n.Right.Val.U.Sval.S) == 0) {
r = Nod(int(n.Etype), Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil))
typecheck(&r, Erv)
walkexpr(&r, init)
}
// s + "badgerbadgerbadger" == "badgerbadgerbadger"
- if (n.Etype == OEQ || n.Etype == ONE) && Isconst(n.Right, CTSTR) != 0 && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) != 0 && cmpslit(n.Right, n.Left.List.Next.N) == 0 {
+ if (n.Etype == OEQ || n.Etype == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && cmpslit(n.Right, n.Left.List.Next.N) == 0 {
r = Nod(int(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
typecheck(&r, Erv)
walkexpr(&r, init)
l = r
}
t = n.Type
- if n.Esc == EscNone && Smallintconst(l) != 0 && Smallintconst(r) != 0 && (t.Type.Width == 0 || Mpgetfix(r.Val.U.Xval) < (1<<16)/t.Type.Width) {
+ if n.Esc == EscNone && Smallintconst(l) && Smallintconst(r) && (t.Type.Width == 0 || Mpgetfix(r.Val.U.Xval) < (1<<16)/t.Type.Width) {
// var arr [r]T
// n = arr[:l]
t = aindex(r, t.Type) // [r]T
if !Eqtype(n.Left.Type, n.Right.Type) {
Fatal("ifaceeq %v %v %v", Oconv(int(n.Op), 0), Tconv(n.Left.Type, 0), Tconv(n.Right.Type, 0))
}
- if isnilinter(n.Left.Type) != 0 {
+ if isnilinter(n.Left.Type) {
fn = syslook("efaceeq", 1)
} else {
fn = syslook("ifaceeq", 1)
* evaluating the lv or a function call
* in the conversion of the types
*/
-func fncall(l *Node, rt *Type) int {
+func fncall(l *Node, rt *Type) bool {
var r Node
if l.Ullman >= UINF || l.Op == OINDEXMAP {
- return 1
+ return true
}
r = Node{}
- if needwritebarrier(l, &r) != 0 {
- return 1
+ if needwritebarrier(l, &r) {
+ return true
}
if Eqtype(l.Type, rt) {
- return 0
+ return false
}
- return 1
+ return true
}
func ascompatet(op int, nl *NodeList, nr **Type, fp int, init **NodeList) *NodeList {
// any lv that causes a fn call must be
// deferred until all the return arguments
// have been pulled from the output arguments
- if fncall(l, r.Type) != 0 {
+ if fncall(l, r.Type) {
tmp = temp(r.Type)
typecheck(&tmp, Erv)
a = Nod(OAS, l, tmp)
// f(g()) where g has multiple return values
if r != nil && lr.Next == nil && r.Type.Etype == TSTRUCT && r.Type.Funarg != 0 {
// optimization - can do block copy
- if eqtypenoname(r.Type, *nl) != 0 {
+ if eqtypenoname(r.Type, *nl) {
a = nodarg(*nl, fp)
r = Nod(OCONVNOP, r, nil)
r.Type = a.Type
var all *NodeList
var on *Node
var t *Type
- var notfirst int
+ var notfirst bool
var et int
var op int
var calls *NodeList
op = int(nn.Op)
all = nn.List
calls = nil
- notfirst = 0
+ notfirst = false
// Hoist all the argument evaluation up before the lock.
walkexprlistcheap(all, init)
calls = list(calls, mkcall("printlock", nil, init))
for l = all; l != nil; l = l.Next {
- if notfirst != 0 {
+ if notfirst {
calls = list(calls, mkcall("printsp", nil, init))
}
- notfirst = bool2int(op == OPRINTN)
+ notfirst = op == OPRINTN
n = l.N
if n.Op == OLITERAL {
t = n.Type
et = int(n.Type.Etype)
- if Isinter(n.Type) != 0 {
- if isnilinter(n.Type) != 0 {
+ if Isinter(n.Type) {
+ if isnilinter(n.Type) {
on = syslook("printeface", 1)
} else {
on = syslook("printiface", 1)
} else if Isptr[et] != 0 || et == TCHAN || et == TMAP || et == TFUNC || et == TUNSAFEPTR {
on = syslook("printpointer", 1)
argtype(on, n.Type) // any-1
- } else if Isslice(n.Type) != 0 {
+ } else if Isslice(n.Type) {
on = syslook("printslice", 1)
argtype(on, n.Type) // any-1
} else if Isint[et] != 0 {
return mkcall1(fn, Ptrto(t), nil, typename(t))
}
-func isstack(n *Node) int {
+func isstack(n *Node) bool {
var defn *Node
n = outervalue(n)
switch n.Op {
// OINDREG only ends up in walk if it's indirect of SP.
case OINDREG:
- return 1
+ return true
case ONAME:
switch n.Class {
case PAUTO,
PPARAM,
PPARAMOUT:
- return 1
+ return true
}
}
- return 0
+ return false
}
-func isglobal(n *Node) int {
+func isglobal(n *Node) bool {
n = outervalue(n)
switch n.Op {
case ONAME:
switch n.Class {
case PEXTERN:
- return 1
+ return true
}
}
- return 0
+ return false
}
// Do we need a write barrier for the assignment l = r?
-func needwritebarrier(l *Node, r *Node) int {
- if !(use_writebarrier != 0) {
- return 0
+func needwritebarrier(l *Node, r *Node) bool {
+ if use_writebarrier == 0 {
+ return false
}
if l == nil || isblank(l) {
- return 0
+ return false
}
// No write barrier for write of non-pointers.
dowidth(l.Type)
if !haspointers(l.Type) {
- return 0
+ return false
}
// No write barrier for write to stack.
- if isstack(l) != 0 {
- return 0
+ if isstack(l) {
+ return false
}
// No write barrier for implicit or explicit zeroing.
- if r == nil || iszero(r) != 0 {
- return 0
+ if r == nil || iszero(r) {
+ return false
}
// No write barrier for initialization to constant.
if r.Op == OLITERAL {
- return 0
+ return false
}
// No write barrier for storing static (read-only) data.
if r.Op == ONAME && strings.HasPrefix(r.Sym.Name, "statictmp_") {
- return 0
+ return false
}
// No write barrier for storing address of stack values,
// which are guaranteed only to be written to the stack.
- if r.Op == OADDR && isstack(r.Left) != 0 {
- return 0
+ if r.Op == OADDR && isstack(r.Left) {
+ return false
}
// No write barrier for storing address of global, which
// is live no matter what.
- if r.Op == OADDR && isglobal(r.Left) != 0 {
- return 0
+ if r.Op == OADDR && isglobal(r.Left) {
+ return false
}
// No write barrier for reslice: x = x[0:y] or x = append(x, ...).
Dump("bad reslice-r", r)
}
- return 0
+ return false
}
// Otherwise, be conservative and use write barrier.
- return 1
+ return true
}
// TODO(rsc): Perhaps componentgen should run before this.
var x int64
var name string
- if n.Left != nil && n.Right != nil && needwritebarrier(n.Left, n.Right) != 0 {
+ if n.Left != nil && n.Right != nil && needwritebarrier(n.Left, n.Right) {
if Curfn != nil && Curfn.Nowritebarrier {
Yyerror("write barrier prohibited")
}
n = mkcall1(writebarrierfn("writebarrierptr", t, n.Right.Type), nil, init, l, n.Right)
} else if t.Etype == TSTRING {
n = mkcall1(writebarrierfn("writebarrierstring", t, n.Right.Type), nil, init, l, n.Right)
- } else if Isslice(t) != 0 {
+ } else if Isslice(t) {
n = mkcall1(writebarrierfn("writebarrierslice", t, n.Right.Type), nil, init, l, n.Right)
- } else if Isinter(t) != 0 {
+ } else if Isinter(t) {
n = mkcall1(writebarrierfn("writebarrieriface", t, n.Right.Type), nil, init, l, n.Right)
} else if t.Width <= int64(4*Widthptr) {
x = 0
switch t.Width / int64(Widthptr) {
default:
Fatal("found writebarrierfat for %d-byte object of type %v", int(t.Width), Tconv(t, 0))
- fallthrough
case 2:
name = fmt.Sprintf("writebarrierfat%d%d", bvget(applywritebarrier_bv, PtrBit), bvget(applywritebarrier_bv, obj.BitsPerPointer+PtrBit))
continue
}
- if l.Op == OINDEX && Isfixedarray(l.Left.Type) != 0 {
+ if l.Op == OINDEX && Isfixedarray(l.Left.Type) {
reorder3save(&l.Right, all, list, &early)
l = l.Left
continue
switch l.Op {
default:
Fatal("reorder3 unexpected lvalue %v", Oconv(int(l.Op), obj.FmtSharp))
- fallthrough
case ONAME:
break
var q *Node
n = *np
- if !(aliased(n, all, stop) != 0) {
+ if !aliased(n, all, stop) {
return
}
continue
}
- if n.Op == OINDEX && Isfixedarray(n.Left.Type) != 0 {
+ if n.Op == OINDEX && Isfixedarray(n.Left.Type) {
n = n.Left
continue
}
* Is it possible that the computation of n might be
* affected by writes in as up to but not including stop?
*/
-func aliased(n *Node, all *NodeList, stop *NodeList) int {
+func aliased(n *Node, all *NodeList, stop *NodeList) bool {
var memwrite int
var varwrite int
var a *Node
var l *NodeList
if n == nil {
- return 0
+ return false
}
// Look for obvious aliasing: a variable being assigned
continue
}
- if vmatch2(a, n) != 0 {
+ if vmatch2(a, n) {
// Direct hit.
- return 1
+ return true
}
}
}
// that are being written.
// If no computed addresses are affected by the writes, no aliasing.
- if !(memwrite != 0) && !(varwrite != 0) {
- return 0
+ if memwrite == 0 && varwrite == 0 {
+ return false
}
// If n does not refer to computed addresses
// (that is, if n only refers to variables whose addresses
// have not been taken), no aliasing.
- if varexpr(n) != 0 {
- return 0
+ if varexpr(n) {
+ return false
}
// Otherwise, both the writes and n refer to computed memory addresses.
// Assume that they might conflict.
- return 1
+ return true
}
/*
* whose addresses have not been taken?
* (and no other memory)
*/
-func varexpr(n *Node) int {
+func varexpr(n *Node) bool {
if n == nil {
- return 1
+ return true
}
switch n.Op {
case OLITERAL:
- return 1
+ return true
case ONAME:
switch n.Class {
case PAUTO,
PPARAM,
PPARAMOUT:
- if !(n.Addrtaken != 0) {
- return 1
+ if n.Addrtaken == 0 {
+ return true
}
}
- return 0
+ return false
case OADD,
OSUB,
OCONVNOP,
OCONVIFACE,
ODOTTYPE:
- return bool2int(varexpr(n.Left) != 0 && varexpr(n.Right) != 0)
+ return varexpr(n.Left) && varexpr(n.Right)
}
// Be conservative.
- return 0
+ return false
}
/*
* is the name l mentioned in r?
*/
-func vmatch2(l *Node, r *Node) int {
+func vmatch2(l *Node, r *Node) bool {
var ll *NodeList
if r == nil {
- return 0
+ return false
}
switch r.Op {
// match each right given left
case ONAME:
- return bool2int(l == r)
+ return l == r
case OLITERAL:
- return 0
+ return false
}
- if vmatch2(l, r.Left) != 0 {
- return 1
+ if vmatch2(l, r.Left) {
+ return true
}
- if vmatch2(l, r.Right) != 0 {
- return 1
+ if vmatch2(l, r.Right) {
+ return true
}
for ll = r.List; ll != nil; ll = ll.Next {
- if vmatch2(l, ll.N) != 0 {
- return 1
+ if vmatch2(l, ll.N) {
+ return true
}
}
- return 0
+ return false
}
/*
* is any name mentioned in l also mentioned in r?
* called by sinit.c
*/
-func vmatch1(l *Node, r *Node) int {
+func vmatch1(l *Node, r *Node) bool {
var ll *NodeList
/*
* isolate all left sides
*/
if l == nil || r == nil {
- return 0
+ return false
}
switch l.Op {
case ONAME:
// must be delayed if right has function calls.
default:
if r.Ullman >= UINF {
- return 1
+ return true
}
}
return vmatch2(l, r)
case OLITERAL:
- return 0
+ return false
}
- if vmatch1(l.Left, r) != 0 {
- return 1
+ if vmatch1(l.Left, r) {
+ return true
}
- if vmatch1(l.Right, r) != 0 {
- return 1
+ if vmatch1(l.Right, r) {
+ return true
}
for ll = l.List; ll != nil; ll = ll.Next {
- if vmatch1(ll.N, r) != 0 {
- return 1
+ if vmatch1(ll.N, r) {
+ return true
}
}
- return 0
+ return false
}
/*
nn = list(nn, Nod(OAS, nodarg(t, 1), nil))
}
- if v == nil || !(v.Class&PHEAP != 0) {
+ if v == nil || v.Class&PHEAP == 0 {
continue
}
// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
nptr1 = Nod(OINDEX, s, Nod(OLEN, l1, nil))
- nptr1.Bounded = 1
+ nptr1.Bounded = true
nptr1 = Nod(OADDR, nptr1, nil)
nptr2 = Nod(OSPTR, l2, nil)
nsrc = n.List.N
// Resolve slice type of multi-valued return.
- if Istype(nsrc.Type, TSTRUCT) != 0 {
+ if Istype(nsrc.Type, TSTRUCT) {
nsrc.Type = nsrc.Type.Type.Type
}
argc = count(n.List) - 1
for a = n.List.Next; a != nil; a = a.Next {
nx = Nod(OINDEX, ns, nn) // s[n] ...
- nx.Bounded = 1
+ nx.Bounded = true
l = list(l, Nod(OAS, nx, a.N)) // s[n] = arg
if a.Next != nil {
l = list(l, Nod(OAS, nn, Nod(OADD, nn, Nodintconst(1)))) // n = n + 1
//
func sliceany(n *Node, init **NodeList) *Node {
var bounded int
- var slice3 int
+ var slice3 bool
var src *Node
var lb *Node
var hb *Node
src = n.Left
lb = n.Right.Left
- slice3 = bool2int(n.Op == OSLICE3 || n.Op == OSLICE3ARR)
- if slice3 != 0 {
+ slice3 = n.Op == OSLICE3 || n.Op == OSLICE3ARR
+ if slice3 {
hb = n.Right.Right.Left
cb = n.Right.Right.Right
} else {
// static checks if possible
bv = 1 << 50
- if Isconst(bound, CTINT) != 0 {
- if !(Smallintconst(bound) != 0) {
+ if Isconst(bound, CTINT) {
+ if !Smallintconst(bound) {
Yyerror("array len too large")
} else {
bv = Mpgetfix(bound.Val.U.Xval)
}
}
- if Isconst(cb, CTINT) != 0 {
+ if Isconst(cb, CTINT) {
cbv = Mpgetfix(cb.Val.U.Xval)
if cbv < 0 || cbv > bv {
Yyerror("slice index out of bounds")
}
}
- if Isconst(hb, CTINT) != 0 {
+ if Isconst(hb, CTINT) {
hbv = Mpgetfix(hb.Val.U.Xval)
if hbv < 0 || hbv > bv {
Yyerror("slice index out of bounds")
}
}
- if Isconst(lb, CTINT) != 0 {
+ if Isconst(lb, CTINT) {
lbv = Mpgetfix(lb.Val.U.Xval)
if lbv < 0 || lbv > bv {
Yyerror("slice index out of bounds")
if cb != nil {
cb = cheapexpr(conv(cb, bt), init)
- if !(bounded != 0) {
+ if bounded == 0 {
chk0 = Nod(OLT, bound, cb)
}
- } else if slice3 != 0 {
+ } else if slice3 {
// When we figure out what this means, implement it.
Fatal("slice3 with cb == N") // rejected by parser
}
if hb != nil {
hb = cheapexpr(conv(hb, bt), init)
- if !(bounded != 0) {
+ if bounded == 0 {
if cb != nil {
chk1 = Nod(OLT, cb, hb)
} else {
chk1 = Nod(OLT, bound, hb)
}
}
- } else if slice3 != 0 {
+ } else if slice3 {
// When we figure out what this means, implement it.
Fatal("slice3 with hb == N") // rejected by parser
} else if n.Op == OSLICEARR {
if lb != nil {
lb = cheapexpr(conv(lb, bt), init)
- if !(bounded != 0) {
+ if bounded == 0 {
chk2 = Nod(OLT, hb, lb)
}
}
n.Right = nil
n.List = nil
- if !(slice3 != 0) {
+ if !slice3 {
cb = bound
}
if lb == nil {
l = nil
r = nil
- if Isinter(n.Left.Type) != 0 && !(Isinter(n.Right.Type) != 0) {
+ if Isinter(n.Left.Type) && !Isinter(n.Right.Type) {
l = n.Left
r = n.Right
- } else if !(Isinter(n.Left.Type) != 0) && Isinter(n.Right.Type) != 0 {
+ } else if !Isinter(n.Left.Type) && Isinter(n.Right.Type) {
l = n.Right
r = n.Left
}
return
case TARRAY:
- if Isslice(t) != 0 {
+ if Isslice(t) {
return
}
cmpr = cmpr.Left
}
- if !(islvalue(cmpl) != 0) || !(islvalue(cmpr) != 0) {
+ if !islvalue(cmpl) || !islvalue(cmpr) {
Fatal("arguments of comparison must be lvalues - %v %v", Nconv(cmpl, 0), Nconv(cmpr, 0))
}
}
if expr == nil {
- expr = Nodbool(bool2int(n.Op == OEQ))
+ expr = Nodbool(n.Op == OEQ)
}
r = expr
goto ret
}
if expr == nil {
- expr = Nodbool(bool2int(n.Op == OEQ))
+ expr = Nodbool(n.Op == OEQ)
}
r = expr
goto ret
return
}
-func samecheap(a *Node, b *Node) int {
+func samecheap(a *Node, b *Node) bool {
var ar *Node
var br *Node
for a != nil && b != nil && a.Op == b.Op {
switch a.Op {
default:
- return 0
+ return false
case ONAME:
- return bool2int(a == b)
+ return a == b
case ODOT,
ODOTPTR:
ar = a.Right
br = b.Right
if ar.Op != ONAME || br.Op != ONAME || ar.Sym != br.Sym {
- return 0
+ return false
}
case OINDEX:
ar = a.Right
br = b.Right
- if !(Isconst(ar, CTINT) != 0) || !(Isconst(br, CTINT) != 0) || Mpcmpfixfix(ar.Val.U.Xval, br.Val.U.Xval) != 0 {
- return 0
+ if !Isconst(ar, CTINT) || !Isconst(br, CTINT) || Mpcmpfixfix(ar.Val.U.Xval, br.Val.U.Xval) != 0 {
+ return false
}
}
b = b.Left
}
- return 0
+ return false
}
func walkrotate(np **Node) {
}
// Want same, side effect-free expression on lhs of both shifts.
- if !(samecheap(l.Left, r.Left) != 0) {
+ if !samecheap(l.Left, r.Left) {
return
}
// Constants adding to width?
w = int(l.Type.Width * 8)
- if Smallintconst(l.Right) != 0 && Smallintconst(r.Right) != 0 {
+ if Smallintconst(l.Right) && Smallintconst(r.Right) {
sl = int(Mpgetfix(l.Right.Val.U.Xval))
if sl >= 0 {
sr = int(Mpgetfix(r.Right.Val.U.Xval))
var w int
n = *np
- if !(Isint[n.Type.Etype] != 0) {
+ if Isint[n.Type.Etype] == 0 {
return
}
}
// return 1 if integer n must be in range [0, max), 0 otherwise
-func bounded(n *Node, max int64) int {
+func bounded(n *Node, max int64) bool {
var v int64
var bits int32
var sign int
- if n.Type == nil || !(Isint[n.Type.Etype] != 0) {
- return 0
+ if n.Type == nil || Isint[n.Type.Etype] == 0 {
+ return false
}
sign = int(Issigned[n.Type.Etype])
bits = int32(8 * n.Type.Width)
- if Smallintconst(n) != 0 {
+ if Smallintconst(n) {
v = Mpgetfix(n.Val.U.Xval)
- return bool2int(0 <= v && v < max)
+ return 0 <= v && v < max
}
switch n.Op {
case OAND:
v = -1
- if Smallintconst(n.Left) != 0 {
+ if Smallintconst(n.Left) {
v = Mpgetfix(n.Left.Val.U.Xval)
- } else if Smallintconst(n.Right) != 0 {
+ } else if Smallintconst(n.Right) {
v = Mpgetfix(n.Right.Val.U.Xval)
}
if 0 <= v && v < max {
- return 1
+ return true
}
case OMOD:
- if !(sign != 0) && Smallintconst(n.Right) != 0 {
+ if sign == 0 && Smallintconst(n.Right) {
v = Mpgetfix(n.Right.Val.U.Xval)
if 0 <= v && v <= max {
- return 1
+ return true
}
}
case ODIV:
- if !(sign != 0) && Smallintconst(n.Right) != 0 {
+ if sign == 0 && Smallintconst(n.Right) {
v = Mpgetfix(n.Right.Val.U.Xval)
for bits > 0 && v >= 2 {
bits--
}
case ORSH:
- if !(sign != 0) && Smallintconst(n.Right) != 0 {
+ if sign == 0 && Smallintconst(n.Right) {
v = Mpgetfix(n.Right.Val.U.Xval)
if v > int64(bits) {
- return 1
+ return true
}
bits -= int32(v)
}
}
- if !(sign != 0) && bits <= 62 && 1<<uint(bits) <= max {
- return 1
+ if sign == 0 && bits <= 62 && 1<<uint(bits) <= max {
+ return true
}
- return 0
+ return false
}
func usefield(n *Node) {
var field *Type
var l *Type
- if !(obj.Fieldtrack_enabled != 0) {
+ if obj.Fieldtrack_enabled == 0 {
return
}
switch n.Op {
default:
Fatal("usefield %v", Oconv(int(n.Op), 0))
- fallthrough
case ODOT,
ODOTPTR:
Curfn.Paramfld = l
}
-func candiscardlist(l *NodeList) int {
+func candiscardlist(l *NodeList) bool {
for ; l != nil; l = l.Next {
- if !(candiscard(l.N) != 0) {
- return 0
+ if !candiscard(l.N) {
+ return false
}
}
- return 1
+ return true
}
-func candiscard(n *Node) int {
+func candiscard(n *Node) bool {
if n == nil {
- return 1
+ return true
}
switch n.Op {
default:
- return 0
+ return false
// Discardable as long as the subpieces are.
case ONAME,
// Discardable as long as we know it's not division by zero.
case ODIV,
OMOD:
- if Isconst(n.Right, CTINT) != 0 && mpcmpfixc(n.Right.Val.U.Xval, 0) != 0 {
+ if Isconst(n.Right, CTINT) && mpcmpfixc(n.Right.Val.U.Xval, 0) != 0 {
break
}
- if Isconst(n.Right, CTFLT) != 0 && mpcmpfltc(n.Right.Val.U.Fval, 0) != 0 {
+ if Isconst(n.Right, CTFLT) && mpcmpfltc(n.Right.Val.U.Fval, 0) != 0 {
break
}
- return 0
+ return false
// Discardable as long as we know it won't fail because of a bad size.
case OMAKECHAN,
OMAKEMAP:
- if Isconst(n.Left, CTINT) != 0 && mpcmpfixc(n.Left.Val.U.Xval, 0) == 0 {
+ if Isconst(n.Left, CTINT) && mpcmpfixc(n.Left.Val.U.Xval, 0) == 0 {
break
}
- return 0
+ return false
// Difficult to tell what sizes are okay.
case OMAKESLICE:
- return 0
+ return false
}
- if !(candiscard(n.Left) != 0) || !(candiscard(n.Right) != 0) || !(candiscard(n.Ntest) != 0) || !(candiscard(n.Nincr) != 0) || !(candiscardlist(n.Ninit) != 0) || !(candiscardlist(n.Nbody) != 0) || !(candiscardlist(n.Nelse) != 0) || !(candiscardlist(n.List) != 0) || !(candiscardlist(n.Rlist) != 0) {
- return 0
+ if !candiscard(n.Left) || !candiscard(n.Right) || !candiscard(n.Ntest) || !candiscard(n.Nincr) || !candiscardlist(n.Ninit) || !candiscardlist(n.Nbody) || !candiscardlist(n.Nelse) || !candiscardlist(n.List) || !candiscardlist(n.Rlist) {
+ return false
}
- return 1
+ return true
}
// rewrite
}
type yyParser interface {
+ Parse(yyLexer) int
Lookahead() int
}
return p.lookahead()
}
+func yyNewParser() yyParser {
+ p := &yyParserImpl{
+ lookahead: func() int { return -1 },
+ }
+ return p
+}
+
const yyFlag = -1000
func yyTokname(c int) string {
}
func yyParse(yylex yyLexer) int {
+ return yyNewParser().Parse(yylex)
+}
+
+func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int {
var yyn int
var yylval yySymType
var yyVAL yySymType
yystate := 0
yychar := -1
yytoken := -1 // yychar translated into internal numbering
- if lx, ok := yylex.(interface {
- SetParser(yyParser)
- }); ok {
- p := &yyParserImpl{
- lookahead: func() int { return yychar },
- }
- lx.SetParser(p)
- defer func() {
- // Make sure we report no lookahead when not parsing.
- yychar = -1
- yytoken = -1
- }()
- }
+ yyrcvr.lookahead = func() int { return yychar }
+ defer func() {
+ // Make sure we report no lookahead when not parsing.
+ yychar = -1
+ yytoken = -1
+ }()
yyp := -1
goto yystack
// THE SOFTWARE.
type ar_hdr struct {
- Name string
- Date string
- Uid string
- Gid string
- Mode string
- Size string
- Fmag string
+ name string
+ date string
+ uid string
+ gid string
+ mode string
+ size string
+ fmag string
}
// split it into two instructions:
// ADD $-100004, R13
// MOVW R14, 0(R13)
- q = new(obj.Prog)
+ q = ctxt.NewProg()
p.Scond &^= C_WBIT
*q = *p
p.From = *a
p.From.Reg = 0
p.From.Type = obj.TYPE_CONST
- p.To = obj.Zprog.To
+ p.To = obj.Addr{}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R13
// make q into p but load/store from 0(R13)
q.Spadj = 0
- *a2 = obj.Zprog.From
+ *a2 = obj.Addr{}
a2.Type = obj.TYPE_MEM
a2.Reg = REG_R13
a2.Sym = nil
if p.Scond&(C_PBIT|C_WBIT) != 0 {
ctxt.Diag("unsupported instruction (.P/.W): %v", p)
}
- q = new(obj.Prog)
+ q = ctxt.NewProg()
*q = *p
if p.To.Type == obj.TYPE_MEM {
a2 = &q.To
p.From = *a
p.From.Type = obj.TYPE_ADDR
- p.To = obj.Zprog.To
+ p.To = obj.Addr{}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_R11
// make q into p but load/store from 0(R11)
- *a2 = obj.Zprog.From
+ *a2 = obj.Addr{}
a2.Type = obj.TYPE_MEM
a2.Reg = REG_R11
p = p.Link
for ; p != nil || ctxt.Blitrl != nil; (func() { op = p; p = p.Link })() {
if p == nil {
- if checkpool(ctxt, op, 0) != 0 {
+ if checkpool(ctxt, op, 0) {
p = op
continue
}
if p.As == ACASE {
i = int(casesz(ctxt, p))
}
- if checkpool(ctxt, op, i) != 0 {
+ if checkpool(ctxt, op, i) {
p = op
continue
}
}
cursym.Size = int64(c)
- if !(bflag != 0) {
+ if bflag == 0 {
break
}
}
* drop the pool now, and branch round it.
* this happens only in extended basic blocks that exceed 4k.
*/
-func checkpool(ctxt *obj.Link, p *obj.Prog, sz int) int {
+func checkpool(ctxt *obj.Link, p *obj.Prog, sz int) bool {
if pool.size >= 0xff0 || immaddr(int32((p.Pc+int64(sz)+4)+4+int64(12+pool.size)-int64(pool.start+8))) == 0 {
return flushpool(ctxt, p, 1, 0)
} else if p.Link == nil {
return flushpool(ctxt, p, 2, 0)
}
- return 0
+ return false
}
-func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) int {
+func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) bool {
var q *obj.Prog
if ctxt.Blitrl != nil {
if false && skip == 1 {
fmt.Printf("note: flush literal pool at %x: len=%d ref=%x\n", uint64(p.Pc+4), pool.size, pool.start)
}
- q = new(obj.Prog)
+ q = ctxt.NewProg()
q.As = AB
q.To.Type = obj.TYPE_BRANCH
q.Pcond = p.Link
q.Link = ctxt.Blitrl
q.Lineno = p.Lineno
- q.Ctxt = p.Ctxt
ctxt.Blitrl = q
- } else if !(force != 0) && (p.Pc+int64(12+pool.size)-int64(pool.start) < 2048) { // 12 take into account the maximum nacl literal pool alignment padding size
- return 0
+ } else if force == 0 && (p.Pc+int64(12+pool.size)-int64(pool.start) < 2048) { // 12 take into account the maximum nacl literal pool alignment padding size
+ return false
}
if ctxt.Headtype == obj.Hnacl && pool.size%16 != 0 {
// if pool is not multiple of 16 bytes, add an alignment marker
- q = new(obj.Prog)
+ q = ctxt.NewProg()
q.As = ADATABUNDLEEND
- q.Ctxt = p.Ctxt
ctxt.Elitrl.Link = q
ctxt.Elitrl = q
}
pool.size = 0
pool.start = 0
pool.extra = 0
- return 1
+ return true
}
- return 0
+ return false
}
func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
c = aclass(ctxt, a)
- t = obj.Zprog
+ t.Ctxt = ctxt
t.As = AWORD
- t.Ctxt = p.Ctxt
switch c {
default:
if ctxt.Headtype == obj.Hnacl && pool.size%16 == 0 {
// start a new data bundle
- q = new(obj.Prog)
-
- *q = obj.Zprog
+ q = ctxt.NewProg()
q.As = ADATABUNDLE
q.Pc = int64(pool.size)
- q.Ctxt = p.Ctxt
pool.size += 4
if ctxt.Blitrl == nil {
ctxt.Blitrl = q
ctxt.Elitrl = q
}
- q = new(obj.Prog)
+ q = ctxt.NewProg()
*q = t
q.Pc = int64(pool.size)
case 13: /* op $lcon, [R], R */
o1 = omvl(ctxt, p, &p.From, REGTMP)
- if !(o1 != 0) {
+ if o1 == 0 {
break
}
o2 = oprrr(ctxt, int(p.As), int(p.Scond))
case 30: /* mov/movb/movbu R,L(R) */
o1 = omvl(ctxt, p, &p.To, REGTMP)
- if !(o1 != 0) {
+ if o1 == 0 {
break
}
r = int(p.To.Reg)
case 31: /* mov/movbu L(R),R -> lr[b] */
o1 = omvl(ctxt, p, &p.From, REGTMP)
- if !(o1 != 0) {
+ if o1 == 0 {
break
}
r = int(p.From.Reg)
case 34: /* mov $lacon,R */
o1 = omvl(ctxt, p, &p.From, REGTMP)
- if !(o1 != 0) {
+ if o1 == 0 {
break
}
case 52: /* floating point store, int32 offset UGLY */
o1 = omvl(ctxt, p, &p.To, REGTMP)
- if !(o1 != 0) {
+ if o1 == 0 {
break
}
r = int(p.To.Reg)
case 53: /* floating point load, int32 offset UGLY */
o1 = omvl(ctxt, p, &p.From, REGTMP)
- if !(o1 != 0) {
+ if o1 == 0 {
break
}
r = int(p.From.Reg)
case 64: /* mov/movb/movbu R,addr */
o1 = omvl(ctxt, p, &p.To, REGTMP)
- if !(o1 != 0) {
+ if o1 == 0 {
break
}
o2 = osr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond))
case 65: /* mov/movbu addr,R */
o1 = omvl(ctxt, p, &p.From, REGTMP)
- if !(o1 != 0) {
+ if o1 == 0 {
break
}
o2 = olr(ctxt, 0, REGTMP, int(p.To.Reg), int(p.Scond))
case 68: /* floating point store -> ADDR */
o1 = omvl(ctxt, p, &p.To, REGTMP)
- if !(o1 != 0) {
+ if o1 == 0 {
break
}
o2 = ofsr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
case 69: /* floating point load <- ADDR */
o1 = omvl(ctxt, p, &p.From, REGTMP)
- if !(o1 != 0) {
+ if o1 == 0 {
break
}
o2 = ofsr(ctxt, int(p.As), int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
case 72: /* movh/movhu R,L(R) -> strh */
o1 = omvl(ctxt, p, &p.To, REGTMP)
- if !(o1 != 0) {
+ if o1 == 0 {
break
}
r = int(p.To.Reg)
case 73: /* movb/movh/movhu L(R),R -> ldrsb/ldrsh/ldrh */
o1 = omvl(ctxt, p, &p.From, REGTMP)
- if !(o1 != 0) {
+ if o1 == 0 {
break
}
r = int(p.From.Reg)
case 93: /* movb/movh/movhu addr,R -> ldrsb/ldrsh/ldrh */
o1 = omvl(ctxt, p, &p.From, REGTMP)
- if !(o1 != 0) {
+ if o1 == 0 {
break
}
o2 = olhr(ctxt, 0, REGTMP, int(p.To.Reg), int(p.Scond))
case 94: /* movh/movhu R,addr -> strh */
o1 = omvl(ctxt, p, &p.To, REGTMP)
- if !(o1 != 0) {
+ if o1 == 0 {
break
}
o2 = oshr(ctxt, int(p.From.Reg), 0, REGTMP, int(p.Scond))
ctxt.Diag(".nil on LDR/STR instruction")
}
o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
- if !(sc&C_PBIT != 0) {
+ if sc&C_PBIT == 0 {
o |= 1 << 24
}
- if !(sc&C_UBIT != 0) {
+ if sc&C_UBIT == 0 {
o |= 1 << 23
}
if sc&C_WBIT != 0 {
ctxt.Diag(".nil on LDRH/STRH instruction")
}
o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
- if !(sc&C_PBIT != 0) {
+ if sc&C_PBIT == 0 {
o |= 1 << 24
}
if sc&C_WBIT != 0 {
ctxt.Diag(".nil on FLDR/FSTR instruction")
}
o = ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
- if !(sc&C_PBIT != 0) {
+ if sc&C_PBIT == 0 {
o |= 1 << 24
}
if sc&C_WBIT != 0 {
func omvl(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, dr int) uint32 {
var v int32
var o1 uint32
- if !(p.Pcond != nil) {
+ if p.Pcond == nil {
aclass(ctxt, a)
v = immrot(^uint32(ctxt.Instoffset))
if v == 0 {
cursym.Args = p.To.U.Argsize
if ctxt.Debugzerostack != 0 {
- if autoffset != 0 && !(p.From3.Offset&obj.NOSPLIT != 0) {
+ if autoffset != 0 && p.From3.Offset&obj.NOSPLIT == 0 {
// MOVW $4(R13), R1
p = obj.Appendp(ctxt, p)
}
}
- if !(autosize != 0) && !(cursym.Text.Mark&LEAF != 0) {
+ if autosize == 0 && cursym.Text.Mark&LEAF == 0 {
if ctxt.Debugvlog != 0 {
fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name)
obj.Bflush(ctxt.Bso)
if cursym.Text.Mark&LEAF != 0 {
cursym.Leaf = 1
- if !(autosize != 0) {
+ if autosize == 0 {
break
}
}
- if !(p.From3.Offset&obj.NOSPLIT != 0) {
- p = stacksplit(ctxt, p, autosize, bool2int(!(cursym.Text.From3.Offset&obj.NEEDCTXT != 0))) // emit split check
+ if p.From3.Offset&obj.NOSPLIT == 0 {
+ p = stacksplit(ctxt, p, autosize, cursym.Text.From3.Offset&obj.NEEDCTXT == 0) // emit split check
}
// MOVW.W R14,$-autosize(SP)
case obj.ARET:
obj.Nocache(p)
if cursym.Text.Mark&LEAF != 0 {
- if !(autosize != 0) {
+ if autosize == 0 {
p.As = AB
- p.From = obj.Zprog.From
+ p.From = obj.Addr{}
if p.To.Sym != nil { // retjmp
p.To.Type = obj.TYPE_BRANCH
} else {
}
}
-func isfloatreg(a *obj.Addr) int {
- return bool2int(a.Type == obj.TYPE_REG && REG_F0 <= a.Reg && a.Reg <= REG_F15)
+func isfloatreg(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && REG_F0 <= a.Reg && a.Reg <= REG_F15
}
func softfloat(ctxt *obj.Link, cursym *obj.LSym) {
for p = cursym.Text; p != nil; p = p.Link {
switch p.As {
case AMOVW:
- if isfloatreg(&p.To) != 0 || isfloatreg(&p.From) != 0 {
+ if isfloatreg(&p.To) || isfloatreg(&p.From) {
goto soft
}
goto notsoft
}
soft:
- if !(wasfloat != 0) || (p.Mark&LABEL != 0) {
- next = new(obj.Prog)
+ if wasfloat == 0 || (p.Mark&LABEL != 0) {
+ next = ctxt.NewProg()
*next = *p
// BL _sfloat(SB)
- *p = obj.Zprog
-
+ *p = obj.Prog{}
+ p.Ctxt = ctxt
p.Link = next
p.As = ABL
p.To.Type = obj.TYPE_BRANCH
}
}
-func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int) *obj.Prog {
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.Prog {
// MOVW g_stackguard(g), R1
p = obj.Appendp(ctxt, p)
if ctxt.Cursym.Cfunc != 0 {
p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
} else {
- p.To.Sym = ctxt.Symmorestack[noctxt]
+ p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
}
// BLS start
ctxt.Cursym = s
- firstp = new(obj.Prog)
+ firstp = ctxt.NewProg()
lastp = firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
if q != nil && q.As != obj.ATEXT {
p.Mark |= FOLL
p = q
- if !(p.Mark&FOLL != 0) {
+ if p.Mark&FOLL == 0 {
goto loop
}
}
copy:
for {
- r = new(obj.Prog)
+ r = ctxt.NewProg()
*r = *p
- if !(r.Mark&FOLL != 0) {
+ if r.Mark&FOLL == 0 {
fmt.Printf("can't happen 1\n")
}
r.Mark |= FOLL
}
r.Pcond = p.Link
r.Link = p.Pcond
- if !(r.Link.Mark&FOLL != 0) {
+ if r.Link.Mark&FOLL == 0 {
xfol(ctxt, r.Link, last)
}
- if !(r.Pcond.Mark&FOLL != 0) {
+ if r.Pcond.Mark&FOLL == 0 {
fmt.Printf("can't happen 2\n")
}
return
}
a = AB
- q = new(obj.Prog)
+ q = ctxt.NewProg()
q.As = int16(a)
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
q.To.Offset = p.Pc
q.Pcond = p
- q.Ctxt = p.Ctxt
p = q
}
return off + wid
}
-func Adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
+func adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
var off int64
off = s.Size
return off
}
-func Adduint8(ctxt *Link, s *LSym, v uint8) int64 {
- return Adduintxx(ctxt, s, uint64(v), 1)
+func adduint8(ctxt *Link, s *LSym, v uint8) int64 {
+ return adduintxx(ctxt, s, uint64(v), 1)
}
-func Adduint16(ctxt *Link, s *LSym, v uint16) int64 {
- return Adduintxx(ctxt, s, uint64(v), 2)
+func adduint16(ctxt *Link, s *LSym, v uint16) int64 {
+ return adduintxx(ctxt, s, uint64(v), 2)
}
func Adduint32(ctxt *Link, s *LSym, v uint32) int64 {
- return Adduintxx(ctxt, s, uint64(v), 4)
+ return adduintxx(ctxt, s, uint64(v), 4)
}
func Adduint64(ctxt *Link, s *LSym, v uint64) int64 {
- return Adduintxx(ctxt, s, v, 8)
+ return adduintxx(ctxt, s, v, 8)
}
-func Setuint8(ctxt *Link, s *LSym, r int64, v uint8) int64 {
+func setuint8(ctxt *Link, s *LSym, r int64, v uint8) int64 {
return Setuintxx(ctxt, s, r, uint64(v), 1)
}
return Setuintxx(ctxt, s, r, uint64(v), 2)
}
-func Setuint32(ctxt *Link, s *LSym, r int64, v uint32) int64 {
+func setuint32(ctxt *Link, s *LSym, r int64, v uint32) int64 {
return Setuintxx(ctxt, s, r, uint64(v), 4)
}
return Setuintxx(ctxt, s, r, v, 8)
}
-func Addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+func addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
var i int64
var r *Reloc
return i + int64(r.Siz)
}
-func Addaddr(ctxt *Link, s *LSym, t *LSym) int64 {
- return Addaddrplus(ctxt, s, t, 0)
+func addaddr(ctxt *Link, s *LSym, t *LSym) int64 {
+ return addaddrplus(ctxt, s, t, 0)
}
-func Setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
+func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
var r *Reloc
if s.Type == 0 {
return off + int64(r.Siz)
}
-func Setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 {
- return Setaddrplus(ctxt, s, off, t, 0)
+func setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 {
+ return setaddrplus(ctxt, s, off, t, 0)
}
-func Addsize(ctxt *Link, s *LSym, t *LSym) int64 {
+func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
var i int64
var r *Reloc
return i + int64(r.Siz)
}
-func Addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+func addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
var i int64
var r *Reloc
var Fieldtrack_enabled int
-var Zprog Prog
-
// Toolchain experiments.
// These are controlled by the GOEXPERIMENT environment
// variable recorded when the toolchain is built.
func Nopout(p *Prog) {
p.As = ANOP
- p.Scond = Zprog.Scond
- p.From = Zprog.From
- p.From3 = Zprog.From3
- p.Reg = Zprog.Reg
- p.To = Zprog.To
+ p.Scond = 0
+ p.From = Addr{}
+ p.From3 = Addr{}
+ p.Reg = 0
+ p.To = Addr{}
}
func Nocache(p *Prog) {
ctxt.Diag("span must be looping")
log.Fatalf("bad code")
}
- if !(loop != 0) {
+ if loop == 0 {
break
}
}
case Zlit:
for ; ; z++ {
op = int(o.op[z])
- if !(op != 0) {
+ if op == 0 {
break
}
ctxt.Andptr[0] = byte(op)
case Zlitm_r:
for ; ; z++ {
op = int(o.op[z])
- if !(op != 0) {
+ if op == 0 {
break
}
ctxt.Andptr[0] = byte(op)
tmp2 := z
z++
op = int(o.op[tmp2])
- if !(op != 0) {
+ if op == 0 {
break
}
ctxt.Andptr[0] = byte(op)
"math"
)
-func canuselocaltls(ctxt *obj.Link) int {
+func canuselocaltls(ctxt *obj.Link) bool {
switch ctxt.Headtype {
case obj.Hlinux,
obj.Hnacl,
obj.Hplan9,
obj.Hwindows:
- return 0
+ return false
}
- return 1
+ return true
}
func progedit(ctxt *obj.Link, p *obj.Prog) {
var q *obj.Prog
// See obj6.c for discussion of TLS.
- if canuselocaltls(ctxt) != 0 {
+ if canuselocaltls(ctxt) {
// Reduce TLS initial exec model to TLS local exec model.
// Sequences like
// MOVL TLS, BX
q = nil
- if !(p.From3.Offset&obj.NOSPLIT != 0) || (p.From3.Offset&obj.WRAPPER != 0) {
+ if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
p = obj.Appendp(ctxt, p)
p = load_g_cx(ctxt, p) // load g into CX
}
- if !(cursym.Text.From3.Offset&obj.NOSPLIT != 0) {
- p = stacksplit(ctxt, p, autoffset, bool2int(!(cursym.Text.From3.Offset&obj.NEEDCTXT != 0)), &q) // emit split check
+ if cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
+ p = stacksplit(ctxt, p, autoffset, cursym.Text.From3.Offset&obj.NEEDCTXT == 0, &q) // emit split check
}
if autoffset != 0 {
p2.Pcond = p
}
- if ctxt.Debugzerostack != 0 && autoffset != 0 && !(cursym.Text.From3.Offset&obj.NOSPLIT != 0) {
+ if ctxt.Debugzerostack != 0 && autoffset != 0 && cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
// 8l -Z means zero the stack frame on entry.
// This slows down function calls but can help avoid
// false positives in garbage collection.
// Returns last new instruction.
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
-func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int, jmpok **obj.Prog) *obj.Prog {
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
var q *obj.Prog
var q1 *obj.Prog
if ctxt.Cursym.Cfunc != 0 {
p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
} else {
- p.To.Sym = ctxt.Symmorestack[noctxt]
+ p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
}
p = obj.Appendp(ctxt, p)
ctxt.Cursym = s
- firstp = new(obj.Prog)
+ firstp = ctxt.NewProg()
lastp = firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link
}
-func nofollow(a int) int {
+func nofollow(a int) bool {
switch a {
case obj.AJMP,
obj.ARET,
AIRETL,
AIRETW,
obj.AUNDEF:
- return 1
+ return true
}
- return 0
+ return false
}
-func pushpop(a int) int {
+func pushpop(a int) bool {
switch a {
case APUSHL,
APUSHFL,
APOPFL,
APOPW,
APOPFW:
- return 1
+ return true
}
- return 0
+ return false
}
func relinv(a int) int {
continue
}
- if nofollow(a) != 0 || pushpop(a) != 0 {
+ if nofollow(a) || pushpop(a) {
break // NOTE(rsc): arm does goto copy
}
if q.Pcond == nil || q.Pcond.Mark != 0 {
/* */
}
}
- q = new(obj.Prog)
+ q = ctxt.NewProg()
q.As = obj.AJMP
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
a = int(p.As)
/* continue loop with what comes after p */
- if nofollow(a) != 0 {
+ if nofollow(a) {
return
}
if p.Pcond != nil && a != obj.ACALL {
package obj
-import (
- "fmt"
- "os"
- "path"
- "strings"
-)
-
-func addlib(ctxt *Link, src, obj, pathname string) {
- name := path.Clean(pathname)
-
- // runtime.a -> runtime
- short := strings.TrimSuffix(name, ".a")
-
- // already loaded?
- for i := range ctxt.Library {
- if ctxt.Library[i].Pkg == short {
- return
- }
- }
-
- var pname string
- // runtime -> runtime.a for search
- if (!(ctxt.Windows != 0) && name[0] == '/') || (ctxt.Windows != 0 && name[1] == ':') {
- pname = name
- } else {
- // try dot, -L "libdir", and then goroot.
- for _, dir := range ctxt.Libdir {
- pname = dir + "/" + name
- if _, err := os.Stat(pname); !os.IsNotExist(err) {
- break
- }
- }
- }
- pname = path.Clean(pname)
-
- // runtime.a -> runtime
- pname = strings.TrimSuffix(pname, ".a")
-
- if ctxt.Debugvlog > 1 && ctxt.Bso != nil {
- fmt.Fprintf(ctxt.Bso, "%5.2f addlib: %s %s pulls in %s\n", Cputime(), obj, src, pname)
- }
- Addlibpath(ctxt, src, obj, pname, name)
-}
-
/*
* add library to library list.
* srcref: src file referring to package
* file: object file, e.g., /home/rsc/go/pkg/container/vector.a
* pkg: package import path, e.g. container/vector
*/
-func Addlibpath(ctxt *Link, srcref, objref, file, pkg string) {
- for _, lib := range ctxt.Library {
- if lib.File == file {
- return
- }
- }
-
- if ctxt.Debugvlog > 1 && ctxt.Bso != nil {
- fmt.Fprintf(ctxt.Bso, "%5.2f addlibpath: srcref: %s objref: %s file: %s pkg: %s\n", Cputime(), srcref, objref, file, pkg)
- }
-
- ctxt.Library = append(ctxt.Library, Library{
- Objref: objref,
- Srcref: srcref,
- File: file,
- Pkg: pkg,
- })
-}
const (
LOG = 5
}
type Pciter struct {
- D Pcdata
- P []byte
- Pc uint32
- Nextpc uint32
- Pcscale uint32
- Value int32
- Start int
- Done int
+ d Pcdata
+ p []byte
+ pc uint32
+ nextpc uint32
+ pcscale uint32
+ value int32
+ start int
+ done int
}
// An Addr is an argument to an instruction.
}
}
- if !(found != 0) {
+ if found == 0 {
p = Appendp(ctxt, s.Text)
p.As = AFUNCDATA
p.From.Type = TYPE_CONST
v |= uint32(p[0]&0x7F) << uint(shift)
tmp7 := p
p = p[1:]
- if !(tmp7[0]&0x80 != 0) {
+ if tmp7[0]&0x80 == 0 {
break
}
}
return v
}
-func Pciternext(it *Pciter) {
+func pciternext(it *Pciter) {
var v uint32
var dv int32
- it.Pc = it.Nextpc
- if it.Done != 0 {
+ it.pc = it.nextpc
+ if it.done != 0 {
return
}
- if -cap(it.P) >= -cap(it.D.P[len(it.D.P):]) {
- it.Done = 1
+ if -cap(it.p) >= -cap(it.d.P[len(it.d.P):]) {
+ it.done = 1
return
}
// value delta
- v = getvarint(&it.P)
+ v = getvarint(&it.p)
- if v == 0 && !(it.Start != 0) {
- it.Done = 1
+ if v == 0 && it.start == 0 {
+ it.done = 1
return
}
- it.Start = 0
+ it.start = 0
dv = int32(v>>1) ^ (int32(v<<31) >> 31)
- it.Value += dv
+ it.value += dv
// pc delta
- v = getvarint(&it.P)
+ v = getvarint(&it.p)
- it.Nextpc = it.Pc + v*it.Pcscale
+ it.nextpc = it.pc + v*it.pcscale
}
-func Pciterinit(ctxt *Link, it *Pciter, d *Pcdata) {
- it.D = *d
- it.P = it.D.P
- it.Pc = 0
- it.Nextpc = 0
- it.Value = -1
- it.Start = 1
- it.Done = 0
- it.Pcscale = uint32(ctxt.Arch.Minlc)
- Pciternext(it)
+func pciterinit(ctxt *Link, it *Pciter, d *Pcdata) {
+ it.d = *d
+ it.p = it.d.P
+ it.pc = 0
+ it.nextpc = 0
+ it.value = -1
+ it.start = 1
+ it.done = 0
+ it.pcscale = uint32(ctxt.Arch.Minlc)
+ pciternext(it)
}
if (o.type_ == 16 || o.type_ == 17) && p.Pcond != nil {
otxt = p.Pcond.Pc - c
if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
- q = new(obj.Prog)
- q.Ctxt = p.Ctxt
+ q = ctxt.NewProg()
q.Link = p.Link
p.Link = q
q.As = ABR
q.To.Type = obj.TYPE_BRANCH
q.Pcond = p.Pcond
p.Pcond = q
- q = new(obj.Prog)
- q.Ctxt = p.Ctxt
+ q = ctxt.NewProg()
q.Link = p.Link
p.Link = q
q.As = ABR
}
}
-func isint32(v int64) int {
- return bool2int(int64(int32(v)) == v)
+func isint32(v int64) bool {
+ return int64(int32(v)) == v
}
-func isuint32(v uint64) int {
- return bool2int(uint64(uint32(v)) == v)
+func isuint32(v uint64) bool {
+ return uint64(uint32(v)) == v
}
func aclass(ctxt *obj.Link, a *obj.Addr) int {
if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG {
return C_SACON
}
- if isint32(ctxt.Instoffset) != 0 {
+ if isint32(ctxt.Instoffset) {
return C_LACON
}
return C_DACON
if ctxt.Instoffset <= 0xffff {
return C_ANDCON
}
- if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) != 0 { /* && (instoffset & (1<<31)) == 0) */
+ if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) { /* && (instoffset & (1<<31)) == 0) */
return C_UCON
}
- if isint32(ctxt.Instoffset) != 0 || isuint32(uint64(ctxt.Instoffset)) != 0 {
+ if isint32(ctxt.Instoffset) || isuint32(uint64(ctxt.Instoffset)) {
return C_LCON
}
return C_DCON
if ctxt.Instoffset >= -0x8000 {
return C_ADDCON
}
- if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) != 0 {
+ if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) {
return C_UCON
}
- if isint32(ctxt.Instoffset) != 0 {
+ if isint32(ctxt.Instoffset) {
return C_LCON
}
return C_DCON
/*
* 32-bit masks
*/
-func getmask(m []byte, v uint32) int {
+func getmask(m []byte, v uint32) bool {
var i int
m[1] = 0
m[0] = m[1]
if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
- if getmask(m, ^v) != 0 {
+ if getmask(m, ^v) {
i = int(m[0])
m[0] = m[1] + 1
m[1] = byte(i - 1)
- return 1
+ return true
}
- return 0
+ return false
}
for i = 0; i < 32; i++ {
for {
m[1] = byte(i)
i++
- if !(i < 32 && v&(1<<uint(31-i)) != 0) {
+ if i >= 32 || v&(1<<uint(31-i)) == 0 {
break
}
}
for ; i < 32; i++ {
if v&(1<<uint(31-i)) != 0 {
- return 0
+ return false
}
}
- return 1
+ return true
}
}
- return 0
+ return false
}
func maskgen(ctxt *obj.Link, p *obj.Prog, m []byte, v uint32) {
- if !(getmask(m, v) != 0) {
+ if !getmask(m, v) {
ctxt.Diag("cannot generate mask #%x\n%v", v, p)
}
}
/*
* 64-bit masks (rldic etc)
*/
-func getmask64(m []byte, v uint64) int {
+func getmask64(m []byte, v uint64) bool {
var i int
m[1] = 0
for {
m[1] = byte(i)
i++
- if !(i < 64 && v&(uint64(1)<<uint(63-i)) != 0) {
+ if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
break
}
}
for ; i < 64; i++ {
if v&(uint64(1)<<uint(63-i)) != 0 {
- return 0
+ return false
}
}
- return 1
+ return true
}
}
- return 0
+ return false
}
func maskgen64(ctxt *obj.Link, p *obj.Prog, m []byte, v uint64) {
- if !(getmask64(m, v) != 0) {
+ if !getmask64(m, v) {
ctxt.Diag("cannot generate mask #%x\n%v", v, p)
}
}
var v int32
v = int32(d >> 16)
- if isuint32(uint64(d)) != 0 {
+ if isuint32(uint64(d)) {
return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
}
return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
log.Fatalf("invalid handling of %v", p)
}
v >>= 16
- if r == REGZERO && isuint32(uint64(d)) != 0 {
+ if r == REGZERO && isuint32(uint64(d)) {
o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
break
}
if r == 0 {
r = int(p.To.Reg)
}
- if p.As == AADD && (!(r0iszero != 0 /*TypeKind(100016)*/) && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
+ if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
ctxt.Diag("literal operation on R0\n%v", p)
}
o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As)+ALAST)), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
p.Pcond = q1
}
- if !(q1.Mark&LEAF != 0) {
+ if q1.Mark&LEAF == 0 {
q1.Mark |= LABEL
}
} else {
}
p.To.Offset = int64(autosize) - 8
- if !(p.From3.Offset&obj.NOSPLIT != 0) {
- p = stacksplit(ctxt, p, autosize, bool2int(!(cursym.Text.From3.Offset&obj.NEEDCTXT != 0))) // emit split check
+ if p.From3.Offset&obj.NOSPLIT == 0 {
+ p = stacksplit(ctxt, p, autosize, cursym.Text.From3.Offset&obj.NEEDCTXT == 0) // emit split check
}
q = p
if autosize != 0 {
/* use MOVDU to adjust R1 when saving R31, if autosize is small */
- if !(cursym.Text.Mark&LEAF != 0) && autosize >= -BIG && autosize <= BIG {
+ if cursym.Text.Mark&LEAF == 0 && autosize >= -BIG && autosize <= BIG {
mov = AMOVDU
aoffset = int(-autosize)
} else {
q.To.Reg = REGSP
q.Spadj = +autosize
}
- } else if !(cursym.Text.Mark&LEAF != 0) {
+ } else if cursym.Text.Mark&LEAF == 0 {
if ctxt.Debugvlog != 0 {
fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name)
obj.Bflush(ctxt.Bso)
}
if cursym.Text.Mark&LEAF != 0 {
- if !(autosize != 0) {
+ if autosize == 0 {
p.As = ABR
- p.From = obj.Zprog.From
+ p.From = obj.Addr{}
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_LR
p.Mark |= BRANCH
p.To.Reg = REGSP
p.Spadj = -autosize
- q = p.Ctxt.NewProg()
+ q = ctxt.NewProg()
q.As = ABR
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = REGTMP
- q = p.Ctxt.NewProg()
+ q = ctxt.NewProg()
q.As = AMOVD
q.Lineno = p.Lineno
q.From.Type = obj.TYPE_REG
if false {
// Debug bad returns
- q = p.Ctxt.NewProg()
+ q = ctxt.NewProg()
q.As = AMOVD
q.Lineno = p.Lineno
q.From.Type = obj.TYPE_MEM
}
if autosize != 0 {
- q = p.Ctxt.NewProg()
+ q = ctxt.NewProg()
q.As = AADD
q.Lineno = p.Lineno
q.From.Type = obj.TYPE_CONST
p.Link = q
}
- q1 = p.Ctxt.NewProg()
+ q1 = ctxt.NewProg()
q1.As = ABR
q1.Lineno = p.Lineno
q1.To.Type = obj.TYPE_REG
q = p;
}
*/
-func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int) *obj.Prog {
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt bool) *obj.Prog {
var q *obj.Prog
var q1 *obj.Prog
if ctxt.Cursym.Cfunc != 0 {
p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
} else {
- p.To.Sym = ctxt.Symmorestack[noctxt]
+ p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
}
// BR start
ctxt.Cursym = s
- firstp = new(obj.Prog)
+ firstp = ctxt.NewProg()
lastp = firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
p = p.Link
xfol(ctxt, p, last)
p = q
- if p != nil && !(p.Mark&FOLL != 0) {
+ if p != nil && p.Mark&FOLL == 0 {
goto loop
}
return
if q != nil {
p.Mark |= FOLL
p = q
- if !(p.Mark&FOLL != 0) {
+ if p.Mark&FOLL == 0 {
goto loop
}
}
if a == ABR || a == ARETURN || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
goto copy
}
- if !(q.Pcond != nil) || (q.Pcond.Mark&FOLL != 0) {
+ if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
continue
}
b = relinv(a)
- if !(b != 0) {
+ if b == 0 {
continue
}
copy:
for {
- r = new(obj.Prog)
+ r = ctxt.NewProg()
*r = *p
- if !(r.Mark&FOLL != 0) {
+ if r.Mark&FOLL == 0 {
fmt.Printf("cant happen 1\n")
}
r.Mark |= FOLL
r.As = int16(b)
r.Pcond = p.Link
r.Link = p.Pcond
- if !(r.Link.Mark&FOLL != 0) {
+ if r.Link.Mark&FOLL == 0 {
xfol(ctxt, r.Link, last)
}
- if !(r.Pcond.Mark&FOLL != 0) {
+ if r.Pcond.Mark&FOLL == 0 {
fmt.Printf("cant happen 2\n")
}
return
}
a = ABR
- q = p.Ctxt.NewProg()
+ q = ctxt.NewProg()
q.As = int16(a)
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
}{"windowsgui", Hwindows},
}
-func Headtype(name string) int {
+func headtype(name string) int {
var i int
for i = 0; i < len(headers); i++ {
ctxt.Pathname = buf
- ctxt.Headtype = Headtype(Getgoos())
+ ctxt.Headtype = headtype(Getgoos())
if ctxt.Headtype < 0 {
log.Fatalf("unknown goos %s", Getgoos())
}
return ctxt
}
-func Linknewsym(ctxt *Link, symb string, v int) *LSym {
+func linknewsym(ctxt *Link, symb string, v int) *LSym {
var s *LSym
s = new(LSym)
return s
}
}
- if !(creat != 0) {
+ if creat == 0 {
return nil
}
- s = Linknewsym(ctxt, symb, v)
+ s = linknewsym(ctxt, symb, v)
s.Extname = s.Name
s.Hash = ctxt.Hash[h]
ctxt.Hash[h] = s
}
// read-only lookup
-func Linkrlookup(ctxt *Link, name string, v int) *LSym {
+func linkrlookup(ctxt *Link, name string, v int) *LSym {
return _lookup(ctxt, name, v, 0)
}
// It does not seem to be necessary for any other systems. This is probably working
// around a Solaris-specific bug that should be fixed differently, but we don't know
// what that bug is. And this does fix it.
-func isextern(s *obj.LSym) int {
+func isextern(s *obj.LSym) bool {
// All the Solaris dynamic imports from libc.so begin with "libc_".
- return bool2int(strings.HasPrefix(s.Name, "libc_"))
+ return strings.HasPrefix(s.Name, "libc_")
}
// single-instruction no-ops of various lengths.
ctxt.Diag("span must be looping")
log.Fatalf("loop")
}
- if !(loop != 0) {
+ if loop == 0 {
break
}
}
switch a.Name {
case obj.NAME_EXTERN,
obj.NAME_STATIC:
- if a.Sym != nil && isextern(a.Sym) != 0 {
+ if a.Sym != nil && isextern(a.Sym) {
return Yi32
}
return Yiauto // use pc-relative addressing
log.Fatalf("reloc")
}
- if isextern(s) != 0 {
+ if isextern(s) {
r.Siz = 4
r.Type = obj.R_ADDR
} else {
switch a.Name {
case obj.NAME_EXTERN,
obj.NAME_STATIC:
- if !(isextern(a.Sym) != 0) {
+ if !isextern(a.Sym) {
goto bad
}
base = REG_NONE
ctxt.Rexflag |= regrex[base]&Rxb | rex
if base == REG_NONE || (REG_CS <= base && base <= REG_GS) || base == REG_TLS {
- if (a.Sym == nil || !(isextern(a.Sym) != 0)) && base == REG_NONE && (a.Name == obj.NAME_STATIC || a.Name == obj.NAME_EXTERN) || ctxt.Asmode != 64 {
+ if (a.Sym == nil || !isextern(a.Sym)) && base == REG_NONE && (a.Name == obj.NAME_STATIC || a.Name == obj.NAME_EXTERN) || ctxt.Asmode != 64 {
ctxt.Andptr[0] = byte(0<<6 | 5<<0 | r<<3)
ctxt.Andptr = ctxt.Andptr[1:]
goto putrelv
Movtab{0, 0, 0, 0, [4]uint8{}},
}
-func isax(a *obj.Addr) int {
+func isax(a *obj.Addr) bool {
switch a.Reg {
case REG_AX,
REG_AL,
REG_AH:
- return 1
+ return true
}
if a.Index == REG_AX {
- return 1
+ return true
}
- return 0
+ return false
}
func subreg(p *obj.Prog, from int, to int) {
case Zlit:
for ; ; z++ {
op = int(o.op[z])
- if !(op != 0) {
+ if op == 0 {
break
}
ctxt.Andptr[0] = byte(op)
case Zlitm_r:
for ; ; z++ {
op = int(o.op[z])
- if !(op != 0) {
+ if op == 0 {
break
}
ctxt.Andptr[0] = byte(op)
tmp1 := z
z++
op = int(o.op[tmp1])
- if !(op != 0) {
+ if op == 0 {
break
}
ctxt.Andptr[0] = byte(op)
z = int(p.From.Reg)
if p.From.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
- if isax(&p.To) != 0 || p.To.Type == obj.TYPE_NONE {
+ if isax(&p.To) || p.To.Type == obj.TYPE_NONE {
// We certainly don't want to exchange
// with AX if the op is MUL or DIV.
ctxt.Andptr[0] = 0x87
z = int(p.To.Reg)
if p.To.Type == obj.TYPE_REG && z >= REG_BP && z <= REG_DI {
- if isax(&p.From) != 0 {
+ if isax(&p.From) {
ctxt.Andptr[0] = 0x87
ctxt.Andptr = ctxt.Andptr[1:] /* xchg rhs,bx */
asmando(ctxt, p, &p.To, reg[REG_BX])
"math"
)
-func canuselocaltls(ctxt *obj.Link) int {
+func canuselocaltls(ctxt *obj.Link) bool {
switch ctxt.Headtype {
case obj.Hplan9,
obj.Hwindows:
- return 0
+ return false
}
- return 1
+ return true
}
func progedit(ctxt *obj.Link, p *obj.Prog) {
// access TLS, and they are rewritten appropriately first here in
// liblink and then finally using relocations in the linker.
- if canuselocaltls(ctxt) != 0 {
+ if canuselocaltls(ctxt) {
// Reduce TLS initial exec model to TLS local exec model.
// Sequences like
// MOVQ TLS, BX
cursym.Args = int32(textarg)
cursym.Locals = int32(p.To.Offset)
- if autoffset < obj.StackSmall && !(p.From3.Offset&obj.NOSPLIT != 0) {
+ if autoffset < obj.StackSmall && p.From3.Offset&obj.NOSPLIT == 0 {
for q = p; q != nil; q = q.Link {
if q.As == obj.ACALL {
goto noleaf
}
q = nil
- if !(p.From3.Offset&obj.NOSPLIT != 0) || (p.From3.Offset&obj.WRAPPER != 0) {
+ if p.From3.Offset&obj.NOSPLIT == 0 || (p.From3.Offset&obj.WRAPPER != 0) {
p = obj.Appendp(ctxt, p)
p = load_g_cx(ctxt, p) // load g into CX
}
- if !(cursym.Text.From3.Offset&obj.NOSPLIT != 0) {
- p = stacksplit(ctxt, p, autoffset, int32(textarg), bool2int(!(cursym.Text.From3.Offset&obj.NEEDCTXT != 0)), &q) // emit split check
+ if cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
+ p = stacksplit(ctxt, p, autoffset, int32(textarg), cursym.Text.From3.Offset&obj.NEEDCTXT == 0, &q) // emit split check
}
if autoffset != 0 {
p2.Pcond = p
}
- if ctxt.Debugzerostack != 0 && autoffset != 0 && !(cursym.Text.From3.Offset&obj.NOSPLIT != 0) {
+ if ctxt.Debugzerostack != 0 && autoffset != 0 && cursym.Text.From3.Offset&obj.NOSPLIT == 0 {
// 6l -Z means zero the stack frame on entry.
// This slows down function calls but can help avoid
// false positives in garbage collection.
// Returns last new instruction.
// On return, *jmpok is the instruction that should jump
// to the stack frame allocation if no split is needed.
-func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noctxt int, jmpok **obj.Prog) *obj.Prog {
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noctxt bool, jmpok **obj.Prog) *obj.Prog {
var q *obj.Prog
var q1 *obj.Prog
var cmp int
if ctxt.Cursym.Cfunc != 0 {
p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
} else {
- p.To.Sym = ctxt.Symmorestack[noctxt]
+ p.To.Sym = ctxt.Symmorestack[bool2int(noctxt)]
}
p = obj.Appendp(ctxt, p)
ctxt.Cursym = s
- firstp = new(obj.Prog)
+ firstp = ctxt.NewProg()
lastp = firstp
xfol(ctxt, s.Text, &lastp)
lastp.Link = nil
s.Text = firstp.Link
}
-func nofollow(a int) int {
+func nofollow(a int) bool {
switch a {
case obj.AJMP,
obj.ARET,
ARETFQ,
ARETFW,
obj.AUNDEF:
- return 1
+ return true
}
- return 0
+ return false
}
-func pushpop(a int) int {
+func pushpop(a int) bool {
switch a {
case APUSHL,
APUSHFL,
APOPFQ,
APOPW,
APOPFW:
- return 1
+ return true
}
- return 0
+ return false
}
func relinv(a int) int {
continue
}
- if nofollow(a) != 0 || pushpop(a) != 0 {
+ if nofollow(a) || pushpop(a) {
break // NOTE(rsc): arm does goto copy
}
if q.Pcond == nil || q.Pcond.Mark != 0 {
/* */
}
}
- q = new(obj.Prog)
+ q = ctxt.NewProg()
q.As = obj.AJMP
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
a = int(p.As)
/* continue loop with what comes after p */
- if nofollow(a) != 0 {
+ if nofollow(a) {
return
}
if p.Pcond != nil && a != obj.ACALL {
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
- if res.Op != gc.ONAME || !(res.Addable != 0) {
+ if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
cgen(&n1, res)
return
case gc.OEFACE:
- if res.Op != gc.ONAME || !(res.Addable != 0) {
+ if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
cgen(&n1, res)
}
}
- if gc.Isfat(n.Type) != 0 {
+ if gc.Isfat(n.Type) {
if n.Type.Width < 0 {
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
switch n.Op {
case gc.OSPTR,
gc.OLEN:
- if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
+ if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
n.Addable = n.Left.Addable
}
case gc.OCAP:
- if gc.Isslice(n.Left.Type) != 0 {
+ if gc.Isslice(n.Left.Type) {
n.Addable = n.Left.Addable
}
// if both are addressable, move
if n.Addable != 0 && res.Addable != 0 {
- if gc.Is64(n.Type) != 0 || gc.Is64(res.Type) != 0 || n.Op == gc.OREGISTER || res.Op == gc.OREGISTER || gc.Iscomplex[n.Type.Etype] != 0 || gc.Iscomplex[res.Type.Etype] != 0 {
+ if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Op == gc.OREGISTER || res.Op == gc.OREGISTER || gc.Iscomplex[n.Type.Etype] != 0 || gc.Iscomplex[res.Type.Etype] != 0 {
gmove(n, res)
} else {
regalloc(&n1, n.Type, nil)
}
// if both are not addressable, use a temporary.
- if !(n.Addable != 0) && !(res.Addable != 0) {
+ if n.Addable == 0 && res.Addable == 0 {
// could use regalloc here sometimes,
// but have to check for ullman >= UINF.
gc.Tempname(&n1, n.Type)
// if result is not addressable directly but n is,
// compute its address and then store via the address.
- if !(res.Addable != 0) {
+ if res.Addable == 0 {
igen(res, &n1, nil)
cgen(n, &n1)
regfree(&n1)
return
}
- if gc.Complexop(n, res) != 0 {
+ if gc.Complexop(n, res) {
gc.Complexgen(n, res)
return
}
// if n is sudoaddable generate addr and move
- if !(gc.Is64(n.Type) != 0) && !(gc.Is64(res.Type) != 0) && !(gc.Iscomplex[n.Type.Etype] != 0) && !(gc.Iscomplex[res.Type.Etype] != 0) {
+ if !gc.Is64(n.Type) && !gc.Is64(res.Type) && gc.Iscomplex[n.Type.Etype] == 0 && gc.Iscomplex[res.Type.Etype] == 0 {
a = optoas(gc.OAS, n.Type)
- if sudoaddable(a, n, &addr, &w) != 0 {
+ if sudoaddable(a, n, &addr, &w) {
if res.Op != gc.OREGISTER {
regalloc(&n2, res.Type, nil)
p1 = gins(a, nil, &n2)
}
// 64-bit ops are hard on 32-bit machine.
- if gc.Is64(n.Type) != 0 || gc.Is64(res.Type) != 0 || n.Left != nil && gc.Is64(n.Left.Type) != 0 {
+ if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Left != nil && gc.Is64(n.Left.Type) {
switch n.Op {
// math goes to cgen64.
case gc.OMINUS,
p1 = gc.Gbranch(arm.AB, nil, 0)
p2 = gc.Pc
- gmove(gc.Nodbool(1), res)
+ gmove(gc.Nodbool(true), res)
p3 = gc.Gbranch(arm.AB, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
- gmove(gc.Nodbool(0), res)
+ gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
goto ret
case gc.OLROT,
gc.OLSH,
gc.ORSH:
- cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
+ cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
case gc.OCONV:
- if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) != 0 {
+ if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
cgen(nl, res)
break
}
- if nl.Addable != 0 && !(gc.Is64(nl.Type) != 0) {
+ if nl.Addable != 0 && !gc.Is64(nl.Type) {
regalloc(&n1, nl.Type, res)
gmove(nl, &n1)
} else {
- if n.Type.Width > int64(gc.Widthptr) || gc.Is64(nl.Type) != 0 || gc.Isfloat[nl.Type.Etype] != 0 {
+ if n.Type.Width > int64(gc.Widthptr) || gc.Is64(nl.Type) || gc.Isfloat[nl.Type.Etype] != 0 {
gc.Tempname(&n1, nl.Type)
} else {
regalloc(&n1, nl.Type, res)
cgen(nl, &n1)
}
- if n.Type.Width > int64(gc.Widthptr) || gc.Is64(n.Type) != 0 || gc.Isfloat[n.Type.Etype] != 0 {
+ if n.Type.Width > int64(gc.Widthptr) || gc.Is64(n.Type) || gc.Isfloat[n.Type.Etype] != 0 {
gc.Tempname(&n2, n.Type)
} else {
regalloc(&n2, n.Type, nil)
// pointer is the first word of string or slice.
case gc.OSPTR:
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n1, gc.Types[gc.Tptr], res)
p1 = gins(arm.AMOVW, nil, &n1)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
regfree(&n1)
case gc.OLEN:
- if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
// map has len in the first 32-bit word.
// a zero pointer means zero length
regalloc(&n1, gc.Types[gc.Tptr], res)
break
}
- if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
+ if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
// both slice and string have len one pointer into the struct.
igen(nl, &n1, res)
gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
case gc.OCAP:
- if gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ if gc.Istype(nl.Type, gc.TCHAN) {
// chan has cap in the second 32-bit word.
// a zero pointer means zero length
regalloc(&n1, gc.Types[gc.Tptr], res)
break
}
- if gc.Isslice(nl.Type) != 0 {
+ if gc.Isslice(nl.Type) {
igen(nl, &n1, res)
n1.Type = gc.Types[gc.TUINT32]
n1.Xoffset += int64(gc.Array_cap)
gc.OAND,
gc.OOR,
gc.OXOR:
- if gc.Smallintconst(nr) != 0 {
+ if gc.Smallintconst(nr) {
n2 = *nr
break
}
gc.OAND,
gc.OOR,
gc.OXOR:
- if gc.Smallintconst(nr) != 0 {
+ if gc.Smallintconst(nr) {
n2 = *nr
break
}
* n might be any size; res is 32-bit.
* returns Prog* to patch to panic call.
*/
-func cgenindex(n *gc.Node, res *gc.Node, bounded int) *obj.Prog {
+func cgenindex(n *gc.Node, res *gc.Node, bounded bool) *obj.Prog {
var tmp gc.Node
var lo gc.Node
var hi gc.Node
var n1 gc.Node
var n2 gc.Node
- if !(gc.Is64(n.Type) != 0) {
+ if !gc.Is64(n.Type) {
cgen(n, res)
return nil
}
cgen(n, &tmp)
split64(&tmp, &lo, &hi)
gmove(&lo, res)
- if bounded != 0 {
+ if bounded {
splitclean()
return nil
}
n = n.Left
}
- if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
+ if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
// The generated code is just going to panic, so it need not
}
// should only get here for heap vars or paramref
- if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
+ if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
gc.Dump("bad agen", n)
gc.Fatal("agen: bad ONAME class %#x", n.Class)
}
gc.Dump("cgenr-n", n)
}
- if gc.Isfat(n.Type) != 0 {
+ if gc.Isfat(n.Type) {
gc.Fatal("cgenr on fat node")
}
var p2 *obj.Prog
var w uint32
var v uint64
- var bounded int
+ var bounded bool
if gc.Debug['g'] != 0 {
gc.Dump("agenr-n", n)
case gc.OINDEX:
p2 = nil // to be patched to panicindex.
w = uint32(n.Type.Width)
- bounded = bool2int(gc.Debug['B'] != 0 || n.Bounded != 0)
+ bounded = gc.Debug['B'] != 0 || n.Bounded
if nr.Addable != 0 {
- if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ if !gc.Isconst(nr, gc.CTINT) {
gc.Tempname(&tmp, gc.Types[gc.TINT32])
}
- if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
- if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ if !gc.Isconst(nr, gc.CTINT) {
p2 = cgenindex(nr, &tmp, bounded)
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
} else if nl.Addable != 0 {
- if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ if !gc.Isconst(nr, gc.CTINT) {
gc.Tempname(&tmp, gc.Types[gc.TINT32])
p2 = cgenindex(nr, &tmp, bounded)
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
- if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
} else {
gc.Tempname(&tmp, gc.Types[gc.TINT32])
p2 = cgenindex(nr, &tmp, bounded)
nr = &tmp
- if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
regalloc(&n1, tmp.Type, nil)
// w is width
// constant index
- if gc.Isconst(nr, gc.CTINT) != 0 {
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nr, gc.CTINT) {
+ if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index")
}
v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
- if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
- if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ if gc.Debug['B'] == 0 && !n.Bounded {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
gmove(&n1, &n2)
regfree(&n1)
- if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ if gc.Debug['B'] == 0 && !n.Bounded {
// check bounds
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) {
gc.Nodconst(&n4, gc.Types[gc.TUINT32], int64(len(nl.Val.U.Sval.S)))
- } else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
gc.Patch(p1, gc.Pc)
}
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n3, gc.Types[gc.Tptr], res)
p1 = gins(arm.AMOVW, nil, &n3)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
p1.From.Type = obj.TYPE_ADDR
- } else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
}
if n == nil {
- n = gc.Nodbool(1)
+ n = gc.Nodbool(true)
}
if n.Ninit != nil {
// need to ask if it is bool?
case gc.OLITERAL:
- if !true_ == !(n.Val.U.Bval != 0) {
+ if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(arm.AB, nil, 0), to)
}
goto ret
nr = r
}
- if gc.Isslice(nl.Type) != 0 {
+ if gc.Isslice(nl.Type) {
// only valid to cmp darray to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal array comparison")
break
}
- if gc.Isinter(nl.Type) != 0 {
+ if gc.Isinter(nl.Type) {
// front end shold only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal interface comparison")
break
}
- if gc.Is64(nr.Type) != 0 {
- if !(nl.Addable != 0) {
+ if gc.Is64(nr.Type) {
+ if nl.Addable == 0 {
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
- if !(nr.Addable != 0) {
+ if nr.Addable == 0 {
gc.Tempname(&n2, nr.Type)
cgen(nr, &n2)
nr = &n2
}
if nr.Op == gc.OLITERAL {
- if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) == 0 {
+ if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) == 0 {
gencmp0(nl, nl.Type, a, likely, to)
break
}
case gc.OINDEX:
t = n.Left.Type
- if !(gc.Isfixedarray(t) != 0) {
+ if !gc.Isfixedarray(t) {
break
}
off = stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
- if gc.Isconst(n.Right, gc.CTINT) != 0 {
+ if gc.Isconst(n.Right, gc.CTINT) {
return int32(int64(off) + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval))
}
return 1000
}
// Avoid taking the address for simple enough types.
- if componentgen(n, res) != 0 {
+ if componentgen(n, res) {
return
}
switch align {
default:
gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
- fallthrough
case 1:
op = arm.AMOVB
for {
tmp14 := c
c--
- if !(tmp14 > 0) {
+ if tmp14 <= 0 {
break
}
p = gins(op, &src, &tmp)
regfree(&tmp)
}
-func cadable(n *gc.Node) int {
- if !(n.Addable != 0) {
+func cadable(n *gc.Node) bool {
+ if n.Addable == 0 {
// dont know how it happens,
// but it does
- return 0
+ return false
}
switch n.Op {
case gc.ONAME:
- return 1
+ return true
}
- return 0
+ return false
}
/*
* nr is N when assigning a zero value.
* return 1 if can do, 0 if cant.
*/
-func componentgen(nr *gc.Node, nl *gc.Node) int {
+func componentgen(nr *gc.Node, nl *gc.Node) bool {
var nodl gc.Node
var nodr gc.Node
var tmp gc.Node
t = nl.Type
// Slices are ok.
- if gc.Isslice(t) != 0 {
+ if gc.Isslice(t) {
break
}
// Small arrays are ok.
- if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
+ if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
break
}
fldcount = 0
for t = nl.Type.Type; t != nil; t = t.Down {
- if gc.Isfat(t.Type) != 0 {
+ if gc.Isfat(t.Type) {
goto no
}
if t.Etype != gc.TFIELD {
}
nodl = *nl
- if !(cadable(nl) != 0) {
- if nr != nil && !(cadable(nr) != 0) {
+ if !cadable(nl) {
+ if nr != nil && !cadable(nr) {
goto no
}
igen(nl, &nodl, nil)
if nr != nil {
nodr = *nr
- if !(cadable(nr) != 0) {
+ if !cadable(nr) {
igen(nr, &nodr, nil)
freer = 1
}
gc.Gvardef(nl)
}
t = nl.Type
- if !(gc.Isslice(t) != 0) {
+ if !gc.Isslice(t) {
nodl.Type = t.Type
nodr.Type = nodl.Type
for fldcount = 0; fldcount < t.Bound; fldcount++ {
if freel != 0 {
regfree(&nodl)
}
- return 0
+ return false
yes:
if freer != 0 {
if freel != 0 {
regfree(&nodl)
}
- return 1
+ return true
}
}
l = n.Left
- if !(l.Addable != 0) {
+ if l.Addable == 0 {
gc.Tempname(&t1, l.Type)
cgen(l, &t1)
l = &t1
switch n.Op {
default:
gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
- fallthrough
case gc.OMINUS:
split64(res, &lo2, &hi2)
// setup for binary operators
r = n.Right
- if r != nil && !(r.Addable != 0) {
+ if r != nil && r.Addable == 0 {
gc.Tempname(&t2, r.Type)
cgen(r, &t2)
r = &t2
}
- if gc.Is64(r.Type) != 0 {
+ if gc.Is64(r.Type) {
split64(r, &lo2, &hi2)
}
switch n.Op {
default:
gc.Fatal("cgen64: not implemented: %v\n", gc.Nconv(n, 0))
- fallthrough
// TODO: Constants
case gc.OADD:
regalloc(&s, gc.Types[gc.TUINT32], nil)
regalloc(&creg, gc.Types[gc.TUINT32], nil)
- if gc.Is64(r.Type) != 0 {
+ if gc.Is64(r.Type) {
// shift is >= 1<<32
split64(r, &cl, &ch)
regalloc(&s, gc.Types[gc.TUINT32], nil)
regalloc(&creg, gc.Types[gc.TUINT32], nil)
- if gc.Is64(r.Type) != 0 {
+ if gc.Is64(r.Type) {
// shift is >= 1<<32
split64(r, &cl, &ch)
regfree(&n1)
}
- if gc.Is64(r.Type) != 0 {
+ if gc.Is64(r.Type) {
splitclean()
}
splitclean()
switch op {
default:
gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
- fallthrough
// cmp hi
// bne L
r0 = 0
for l = gc.Curfn.Dcl; l != nil; l = l.Next {
n = l.N
- if !(n.Needzero != 0) {
+ if n.Needzero == 0 {
continue
}
if n.Class != gc.PAUTO {
p = gins(arm.ABL, nil, f)
gc.Afunclit(&p.To, f)
- if proc == -1 || gc.Noreturn(p) != 0 {
+ if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
reg[r]--
}
- if !(i.Addable != 0) {
+ if i.Addable == 0 {
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
* res = nl << nr
* res = nl >> nr
*/
-func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
w = uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
- if componentgen(nil, nl) != 0 {
+ if componentgen(nil, nl) {
return
}
}
}
-func anyregalloc() int {
+func anyregalloc() bool {
var i int
var j int
goto ok
}
}
- return 1
+ return true
ok:
}
- return 0
+ return false
}
var regpc [REGALLOC_FMAX + 1]uint32
gc.Fatal("regalloc: t nil")
}
et = int(gc.Simtype[t.Etype])
- if gc.Is64(t) != 0 {
+ if gc.Is64(t) {
gc.Fatal("regalloc: 64 bit type %v")
}
var n1 gc.Node
var i int64
- if !(gc.Is64(n.Type) != 0) {
+ if !gc.Is64(n.Type) {
gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
}
default:
switch n.Op {
default:
- if !(dotaddable(n, &n1) != 0) {
+ if !dotaddable(n, &n1) {
igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
// cannot have two memory operands;
// except 64-bit, which always copies via registers anyway.
- if !(gc.Is64(f.Type) != 0) && !(gc.Is64(t.Type) != 0) && gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
ft = gc.Simsimtype(con.Type)
// constants can't move directly to memory
- if gc.Ismem(t) != 0 && !(gc.Is64(t.Type) != 0) {
+ if gc.Ismem(t) && !gc.Is64(t.Type) {
goto hard
}
}
* integer copy and truncate
*/
case gc.TINT8<<16 | gc.TINT8: // same size
- if !(gc.Ismem(f) != 0) {
+ if !gc.Ismem(f) {
a = arm.AMOVB
break
}
a = arm.AMOVBS
case gc.TUINT8<<16 | gc.TUINT8:
- if !(gc.Ismem(f) != 0) {
+ if !gc.Ismem(f) {
a = arm.AMOVB
break
}
goto trunc64
case gc.TINT16<<16 | gc.TINT16: // same size
- if !(gc.Ismem(f) != 0) {
+ if !gc.Ismem(f) {
a = arm.AMOVH
break
}
a = arm.AMOVHS
case gc.TUINT16<<16 | gc.TUINT16:
- if !(gc.Ismem(f) != 0) {
+ if !gc.Ismem(f) {
a = arm.AMOVH
break
}
gc.Fatal("gmove %v -> %v", gc.Nconv(f, 0), gc.Nconv(t, 0))
}
-func samaddr(f *gc.Node, t *gc.Node) int {
+func samaddr(f *gc.Node, t *gc.Node) bool {
if f.Op != t.Op {
- return 0
+ return false
}
switch f.Op {
if f.Val.U.Reg != t.Val.U.Reg {
break
}
- return 1
+ return true
}
- return 0
+ return false
}
/*
cleani -= 2
}
-func dotaddable(n *gc.Node, n1 *gc.Node) int {
+func dotaddable(n *gc.Node, n1 *gc.Node) bool {
var o int
var oary [10]int64
var nn *gc.Node
if n.Op != gc.ODOT {
- return 0
+ return false
}
o = gc.Dotoffset(n, oary[:], &nn)
*n1 = *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
- return 1
+ return true
}
- return 0
+ return false
}
/*
* after successful sudoaddable,
* to release the register used for a.
*/
-func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) int {
+func sudoaddable(as int, n *gc.Node, a *obj.Addr, w *int) bool {
var o int
var i int
var oary [10]int64
var t *gc.Type
if n.Type == nil {
- return 0
+ return false
}
*a = obj.Addr{}
switch n.Op {
case gc.OLITERAL:
- if !(gc.Isconst(n, gc.CTINT) != 0) {
+ if !gc.Isconst(n, gc.CTINT) {
break
}
v = gc.Mpgetfix(n.Val.U.Xval)
goto odot
case gc.OINDEX:
- return 0
+ return false
// disabled: OINDEX case is now covered by agenr
// for a more suitable register allocation pattern.
if n.Left.Type.Etype == gc.TSTRING {
- return 0
+ return false
}
cleani += 2
reg = &clean[cleani-1]
goto oindex
}
- return 0
+ return false
lit:
switch as {
default:
- return 0
+ return false
case arm.AADD,
arm.ASUB,
}
*w = int(n.Type.Width)
- if gc.Isconst(r, gc.CTINT) != 0 {
+ if gc.Isconst(r, gc.CTINT) {
goto oindex_const
}
}
regalloc(reg1, t, nil)
regalloc(&n3, gc.Types[gc.TINT32], reg1)
- p2 = cgenindex(r, &n3, bool2int(gc.Debug['B'] != 0 || n.Bounded != 0))
+ p2 = cgenindex(r, &n3, gc.Debug['B'] != 0 || n.Bounded)
gmove(&n3, reg1)
regfree(&n3)
}
// check bounds
- if !(gc.Debug['B'] != 0) {
+ if gc.Debug['B'] == 0 {
if o&ODynam != 0 {
n2 = *reg
n2.Op = gc.OINDREG
v = gc.Mpgetfix(r.Val.U.Xval)
if o&ODynam != 0 {
- if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ if gc.Debug['B'] == 0 && !n.Bounded {
n1 = *reg
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
goto yes
yes:
- return 1
+ return true
no:
sudoclean()
- return 0
+ return false
}
arm.AMOVW,
arm.AMOVF,
arm.AMOVD:
- if regtyp(&p.From) != 0 {
+ if regtyp(&p.From) {
if p.From.Type == p.To.Type && isfloatreg(&p.From) == isfloatreg(&p.To) {
if p.Scond == arm.C_SCOND_NONE {
- if copyprop(g, r) != 0 {
+ if copyprop(g, r) {
excise(r)
t++
break
}
- if subprop(r) != 0 && copyprop(g, r) != 0 {
+ if subprop(r) && copyprop(g, r) {
excise(r)
t++
break
arm.AMOVBS,
arm.AMOVBU:
if p.From.Type == obj.TYPE_REG {
- if shortprop(r) != 0 {
+ if shortprop(r) {
t++
}
}
* EOR -1,x,y => MVN x,y
*/
case arm.AEOR:
- if isdconst(&p.From) != 0 && p.From.Offset == -1 {
+ if isdconst(&p.From) && p.From.Offset == -1 {
p.As = arm.AMVN
p.From.Type = obj.TYPE_REG
if p.Reg != 0 {
gc.Flowend(g)
}
-func regtyp(a *obj.Addr) int {
- return bool2int(a.Type == obj.TYPE_REG && (arm.REG_R0 <= a.Reg && a.Reg <= arm.REG_R15 || arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15))
+func regtyp(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && (arm.REG_R0 <= a.Reg && a.Reg <= arm.REG_R15 || arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15)
}
/*
* hopefully, then the former or latter MOV
* will be eliminated by copy propagation.
*/
-func subprop(r0 *gc.Flow) int {
+func subprop(r0 *gc.Flow) bool {
var p *obj.Prog
var v1 *obj.Addr
var v2 *obj.Addr
p = r0.Prog
v1 = &p.From
- if !(regtyp(v1) != 0) {
- return 0
+ if !regtyp(v1) {
+ return false
}
v2 = &p.To
- if !(regtyp(v2) != 0) {
- return 0
+ if !regtyp(v2) {
+ return false
}
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Uniqs(r) == nil {
}
proginfo(&info, p)
if info.Flags&gc.Call != 0 {
- return 0
+ return false
}
if (info.Flags&gc.CanRegRead != 0) && p.To.Type == obj.TYPE_REG {
case arm.AMULLU,
arm.AMULA,
arm.AMVN:
- return 0
+ return false
}
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
}
}
- if copyau(&p.From, v2) != 0 || copyau1(p, v2) != 0 || copyau(&p.To, v2) != 0 {
+ if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
break
}
if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
}
}
- return 0
+ return false
gotit:
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
- return 1
+ return true
}
/*
* set v1 F=1
* set v2 return success
*/
-func copyprop(g *gc.Graph, r0 *gc.Flow) int {
+func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
var p *obj.Prog
var v1 *obj.Addr
var v2 *obj.Addr
p = r0.Prog
v1 = &p.From
v2 = &p.To
- if copyas(v1, v2) != 0 {
- return 1
+ if copyas(v1, v2) {
+ return true
}
gactive++
return copy1(v1, v2, r0.S1, 0)
}
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
var t int
var p *obj.Prog
if gc.Debug['P'] != 0 {
fmt.Printf("act set; return 1\n")
}
- return 1
+ return true
}
r.Active = int32(gactive)
if gc.Debug['P'] != 0 {
fmt.Printf("%v", p)
}
- if !(f != 0) && gc.Uniqp(r) == nil {
+ if f == 0 && gc.Uniqp(r) == nil {
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; merge; f=%d", f)
if gc.Debug['P'] != 0 {
fmt.Printf("; %vrar; return 0\n", gc.Ctxt.Dconv(v2))
}
- return 0
+ return false
case 3: /* set */
if gc.Debug['P'] != 0 {
fmt.Printf("; %vset; return 1\n", gc.Ctxt.Dconv(v2))
}
- return 1
+ return true
case 1, /* used, substitute */
4: /* use and set */
if f != 0 {
- if !(gc.Debug['P'] != 0) {
- return 0
+ if gc.Debug['P'] == 0 {
+ return false
}
if t == 4 {
fmt.Printf("; %vused+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
} else {
fmt.Printf("; %vused and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
}
- return 0
+ return false
}
if copyu(p, v2, v1) != 0 {
if gc.Debug['P'] != 0 {
fmt.Printf("; sub fail; return 0\n")
}
- return 0
+ return false
}
if gc.Debug['P'] != 0 {
if gc.Debug['P'] != 0 {
fmt.Printf("; %vused+set; return 1\n", gc.Ctxt.Dconv(v2))
}
- return 1
+ return true
}
}
- if !(f != 0) {
+ if f == 0 {
t = copyu(p, v1, nil)
- if !(f != 0) && (t == 2 || t == 3 || t == 4) {
+ if f == 0 && (t == 2 || t == 3 || t == 4) {
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; %vset and !f; f=%d", gc.Ctxt.Dconv(v1), f)
fmt.Printf("\n")
}
if r.S2 != nil {
- if !(copy1(v1, v2, r.S2, f) != 0) {
- return 0
+ if !copy1(v1, v2, r.S2, f) {
+ return false
}
}
}
- return 1
+ return true
}
// UNUSED
return
}
- if p.As == arm.AMOVW && copyas(&p.From, c1) != 0 {
+ if p.As == arm.AMOVW && copyas(&p.From, c1) {
if gc.Debug['P'] != 0 {
fmt.Printf("; sub%v/%v", gc.Ctxt.Dconv(&p.From), gc.Ctxt.Dconv(v1))
}
*
* MOVBS above can be a MOVBS, MOVBU, MOVHS or MOVHU.
*/
-func shortprop(r *gc.Flow) int {
+func shortprop(r *gc.Flow) bool {
var p *obj.Prog
var p1 *obj.Prog
var r1 *gc.Flow
p = r.Prog
r1 = findpre(r, &p.From)
if r1 == nil {
- return 0
+ return false
}
p1 = r1.Prog
goto gotit
}
- if p1.As == arm.AMOVW && isdconst(&p1.From) != 0 && p1.From.Offset >= 0 && p1.From.Offset < 128 {
+ if p1.As == arm.AMOVW && isdconst(&p1.From) && p1.From.Offset >= 0 && p1.From.Offset < 128 {
// Loaded an immediate.
goto gotit
}
- return 0
+ return false
gotit:
if gc.Debug['P'] != 0 {
if gc.Debug['P'] != 0 {
fmt.Printf(" => %v\n", arm.Aconv(int(p.As)))
}
- return 1
+ return true
}
// UNUSED
* AXXX (x<<y),a,b
* ..
*/
-func shiftprop(r *gc.Flow) int {
+func shiftprop(r *gc.Flow) bool {
var r1 *gc.Flow
var p *obj.Prog
var p1 *obj.Prog
if gc.Debug['P'] != 0 {
fmt.Printf("\tBOTCH: result not reg; FAILURE\n")
}
- return 0
+ return false
}
n = int(p.To.Reg)
- a = obj.Zprog.From
+ a = obj.Addr{}
if p.Reg != 0 && p.Reg != p.To.Reg {
a.Type = obj.TYPE_REG
a.Reg = p.Reg
if gc.Debug['P'] != 0 {
fmt.Printf("\tbranch; FAILURE\n")
}
- return 0
+ return false
}
if gc.Uniqp(r1) == nil {
if gc.Debug['P'] != 0 {
fmt.Printf("\tmerge; FAILURE\n")
}
- return 0
+ return false
}
p1 = r1.Prog
if gc.Debug['P'] != 0 {
fmt.Printf("\targs modified; FAILURE\n")
}
- return 0
+ return false
}
continue
if gc.Debug['P'] != 0 {
fmt.Printf("\tBOTCH: noref; FAILURE\n")
}
- return 0
+ return false
}
}
if gc.Debug['P'] != 0 {
fmt.Printf("\tnon-dpi; FAILURE\n")
}
- return 0
+ return false
case arm.AAND,
arm.AEOR,
if gc.Debug['P'] != 0 {
fmt.Printf("\tcan't swap; FAILURE\n")
}
- return 0
+ return false
}
p1.Reg = p1.From.Reg
if gc.Debug['P'] != 0 {
fmt.Printf("\tcan't swap; FAILURE\n")
}
- return 0
+ return false
}
if p1.Reg == 0 && int(p1.To.Reg) == n {
if gc.Debug['P'] != 0 {
fmt.Printf("\tshift result used twice; FAILURE\n")
}
- return 0
+ return false
}
// case AMVN:
if gc.Debug['P'] != 0 {
fmt.Printf("\tshift result used in shift; FAILURE\n")
}
- return 0
+ return false
}
if p1.From.Type != obj.TYPE_REG || int(p1.From.Reg) != n {
if gc.Debug['P'] != 0 {
fmt.Printf("\tBOTCH: where is it used?; FAILURE\n")
}
- return 0
+ return false
}
}
if gc.Debug['P'] != 0 {
fmt.Printf("\tinconclusive; FAILURE\n")
}
- return 0
+ return false
}
p1 = r1.Prog
if gc.Debug['P'] != 0 {
fmt.Printf("\treused; FAILURE\n")
}
- return 0
+ return false
}
break
o |= 2 << 5
}
- p2.From = obj.Zprog.From
+ p2.From = obj.Addr{}
p2.From.Type = obj.TYPE_SHIFT
p2.From.Offset = int64(o)
if gc.Debug['P'] != 0 {
fmt.Printf("\t=>%v\tSUCCEED\n", p2)
}
- return 1
+ return true
}
/*
p = r1.Prog
if p.As == arm.AADD {
- if isdconst(&p.From) != 0 {
+ if isdconst(&p.From) {
if p.From.Offset > -4096 && p.From.Offset < 4096 {
return r1
}
return nil
}
-func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) int {
+func nochange(r *gc.Flow, r2 *gc.Flow, p *obj.Prog) bool {
var a [3]obj.Addr
var i int
var n int
if r == r2 {
- return 1
+ return true
}
n = 0
if p.Reg != 0 && p.Reg != p.To.Reg {
}
if n == 0 {
- return 1
+ return true
}
for ; r != nil && r != r2; r = gc.Uniqs(r) {
p = r.Prog
for i = 0; i < n; i++ {
if copyu(p, &a[i], nil) > 1 {
- return 0
+ return false
}
}
}
- return 1
+ return true
}
-func findu1(r *gc.Flow, v *obj.Addr) int {
+func findu1(r *gc.Flow, v *obj.Addr) bool {
for ; r != nil; r = r.S1 {
if r.Active != 0 {
- return 0
+ return false
}
r.Active = 1
switch copyu(r.Prog, v, nil) {
case 1, /* used */
2, /* read-alter-rewrite */
4: /* set and used */
- return 1
+ return true
case 3: /* set */
- return 0
+ return false
}
if r.S2 != nil {
- if findu1(r.S2, v) != 0 {
- return 1
+ if findu1(r.S2, v) {
+ return true
}
}
}
- return 0
+ return false
}
-func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) int {
+func finduse(g *gc.Graph, r *gc.Flow, v *obj.Addr) bool {
var r1 *gc.Flow
for r1 = g.Start; r1 != nil; r1 = r1.Link {
* into
* MOVBU R0<<0(R1),R0
*/
-func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) int {
+func xtramodes(g *gc.Graph, r *gc.Flow, a *obj.Addr) bool {
var r1 *gc.Flow
var r2 *gc.Flow
var r3 *gc.Flow
}
if p1.From.Type == obj.TYPE_REG || (p1.From.Type == obj.TYPE_SHIFT && p1.From.Offset&(1<<4) == 0 && ((p.As != arm.AMOVB && p.As != arm.AMOVBS) || (a == &p.From && p1.From.Offset&^0xf == 0))) || ((p1.From.Type == obj.TYPE_ADDR || p1.From.Type == obj.TYPE_CONST) && p1.From.Offset > -4096 && p1.From.Offset < 4096) {
- if nochange(gc.Uniqs(r1), r, p1) != 0 {
+ if nochange(gc.Uniqs(r1), r, p1) {
if a != &p.From || v.Reg != p.To.Reg {
- if finduse(g, r.S1, &v) != 0 {
+ if finduse(g, r.S1, &v) {
if p1.Reg == 0 || p1.Reg == v.Reg {
/* pre-indexing */
p.Scond |= arm.C_WBIT
} else {
- return 0
+ return false
}
}
}
/* register offset */
case obj.TYPE_REG:
if gc.Nacl {
- return 0
+ return false
}
- *a = obj.Zprog.From
+ *a = obj.Addr{}
a.Type = obj.TYPE_SHIFT
a.Offset = int64(p1.From.Reg) & 15
/* scaled register offset */
case obj.TYPE_SHIFT:
if gc.Nacl {
- return 0
+ return false
}
- *a = obj.Zprog.From
+ *a = obj.Addr{}
a.Type = obj.TYPE_SHIFT
fallthrough
a.Reg = p1.Reg
}
excise(r1)
- return 1
+ return true
}
}
a.Reg = p1.To.Reg
a.Offset = p1.From.Offset
p.Scond |= arm.C_PBIT
- if !(finduse(g, r, &r1.Prog.To) != 0) {
+ if !finduse(g, r, &r1.Prog.To) {
excise(r1)
}
excise(r2)
- return 1
+ return true
}
}
}
a.Offset = p1.From.Offset
p.Scond |= arm.C_PBIT
excise(r1)
- return 1
+ return true
}
}
- return 0
+ return false
}
/*
return 0
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
if p.Scond&arm.C_WBIT != 0 {
return 2
}
return 0
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
if p.Scond&arm.C_WBIT != 0 {
return 2
}
if copysub(&p.From, v, s, 1) != 0 {
return 1
}
- if !(copyas(&p.To, v) != 0) {
+ if !copyas(&p.To, v) {
if copysub(&p.To, v, s, 1) != 0 {
return 1
}
return 0
}
- if copyas(&p.To, v) != 0 {
+ if copyas(&p.To, v) {
if p.Scond != arm.C_SCOND_NONE {
return 2
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 4
}
return 3
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 1
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
return 1
}
return 0
if copysub1(p, v, s, 1) != 0 {
return 1
}
- if !(copyas(&p.To, v) != 0) {
+ if !copyas(&p.To, v) {
if copysub(&p.To, v, s, 1) != 0 {
return 1
}
return 0
}
- if copyas(&p.To, v) != 0 {
+ if copyas(&p.To, v) {
if p.Scond != arm.C_SCOND_NONE {
return 2
}
if p.Reg == 0 {
p.Reg = p.To.Reg
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 4
}
- if copyau1(p, v) != 0 {
+ if copyau1(p, v) {
return 4
}
return 3
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 1
}
- if copyau1(p, v) != 0 {
+ if copyau1(p, v) {
return 1
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
return 1
}
return 0
return copysub1(p, v, s, 1)
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 1
}
- if copyau1(p, v) != 0 {
+ if copyau1(p, v) {
return 1
}
return 0
return 0
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
return 1
}
return 0
return 0
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
return 4
}
return 3
* could be set/use depending on
* semantics
*/
-func copyas(a *obj.Addr, v *obj.Addr) int {
- if regtyp(v) != 0 {
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+ if regtyp(v) {
if a.Type == v.Type {
if a.Reg == v.Reg {
- return 1
+ return true
}
}
} else if v.Type == obj.TYPE_CONST { /* for constprop */
if a.Sym == v.Sym {
if a.Reg == v.Reg {
if a.Offset == v.Offset {
- return 1
+ return true
}
}
}
}
}
- return 0
+ return false
}
-func sameaddr(a *obj.Addr, v *obj.Addr) int {
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
if a.Type != v.Type {
- return 0
+ return false
}
- if regtyp(v) != 0 && a.Reg == v.Reg {
- return 1
+ if regtyp(v) && a.Reg == v.Reg {
+ return true
}
// TODO(rsc): Change v->type to v->name and enable.
// if(v->offset == a->offset)
// return 1;
//}
- return 0
+ return false
}
/*
* either direct or indirect
*/
-func copyau(a *obj.Addr, v *obj.Addr) int {
- if copyas(a, v) != 0 {
- return 1
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+ if copyas(a, v) {
+ return true
}
if v.Type == obj.TYPE_REG {
if a.Type == obj.TYPE_ADDR && a.Reg != 0 {
if a.Reg == v.Reg {
- return 1
+ return true
}
} else if a.Type == obj.TYPE_MEM {
if a.Reg == v.Reg {
- return 1
+ return true
}
} else if a.Type == obj.TYPE_REGREG || a.Type == obj.TYPE_REGREG2 {
if a.Reg == v.Reg {
- return 1
+ return true
}
if a.Offset == int64(v.Reg) {
- return 1
+ return true
}
} else if a.Type == obj.TYPE_SHIFT {
if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
- return 1
+ return true
}
if (a.Offset&(1<<4) != 0) && (a.Offset>>8)&0xf == int64(v.Reg-arm.REG_R0) {
- return 1
+ return true
}
}
}
- return 0
+ return false
}
/*
* compare v to the center
* register in p (p->reg)
*/
-func copyau1(p *obj.Prog, v *obj.Addr) int {
+func copyau1(p *obj.Prog, v *obj.Addr) bool {
if v.Type == obj.TYPE_REG && v.Reg == 0 {
- return 0
+ return false
}
- return bool2int(p.Reg == v.Reg)
+ return p.Reg == v.Reg
}
/*
*/
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
if f != 0 {
- if copyau(a, v) != 0 {
+ if copyau(a, v) {
if a.Type == obj.TYPE_SHIFT {
if a.Offset&0xf == int64(v.Reg-arm.REG_R0) {
a.Offset = a.Offset&^0xf | int64(s.Reg)&0xf
func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
if f != 0 {
- if copyau1(p1, v) != 0 {
+ if copyau1(p1, v) {
p1.Reg = s.Reg
}
}
Keepbranch
)
-func isbranch(p *obj.Prog) int {
- return bool2int((arm.ABEQ <= p.As) && (p.As <= arm.ABLE))
+func isbranch(p *obj.Prog) bool {
+ return (arm.ABEQ <= p.As) && (p.As <= arm.ABLE)
}
-func predicable(p *obj.Prog) int {
+func predicable(p *obj.Prog) bool {
switch p.As {
case obj.ANOP,
obj.AXXX,
arm.AWORD,
arm.ABCASE,
arm.ACASE:
- return 0
+ return false
}
- if isbranch(p) != 0 {
- return 0
+ if isbranch(p) {
+ return false
}
- return 1
+ return true
}
/*
*
* C_SBIT may also have been set explicitly in p->scond.
*/
-func modifiescpsr(p *obj.Prog) int {
+func modifiescpsr(p *obj.Prog) bool {
switch p.As {
case arm.AMULLU,
arm.AMULA,
arm.AMOD,
arm.AMODU,
arm.ABL:
- return 1
+ return true
}
if p.Scond&arm.C_SBIT != 0 {
- return 1
+ return true
}
- return 0
+ return false
}
/*
if r.Prog.As != obj.ANOP {
j.len++
}
- if !(r.S1 != nil) && !(r.S2 != nil) {
+ if r.S1 == nil && r.S2 == nil {
j.end = r.Link
return End
}
return Branch
}
- if modifiescpsr(r.Prog) != 0 {
+ if modifiescpsr(r.Prog) {
j.end = r.S1
return Setcond
}
r = r.S1
- if !(j.len < 4) {
+ if j.len >= 4 {
break
}
}
r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].notopcode)
}
}
- } else if predicable(r.Prog) != 0 {
+ } else if predicable(r.Prog) {
r.Prog.Scond = uint8(int(r.Prog.Scond&^arm.C_SCOND) | pred)
}
if r.S1 != r.Link {
var j2 Joininfo
for r = g.Start; r != nil; r = r.Link {
- if isbranch(r.Prog) != 0 {
+ if isbranch(r.Prog) {
t1 = joinsplit(r.S1, &j1)
t2 = joinsplit(r.S2, &j2)
if j1.last.Link != j2.start {
}
}
-func isdconst(a *obj.Addr) int {
- return bool2int(a.Type == obj.TYPE_CONST)
+func isdconst(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_CONST
}
-func isfloatreg(a *obj.Addr) int {
- return bool2int(arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15)
+func isfloatreg(a *obj.Addr) bool {
+ return arm.REG_F0 <= a.Reg && a.Reg <= arm.REG_F15
}
-func stackaddr(a *obj.Addr) int {
- return bool2int(regtyp(a) != 0 && a.Reg == arm.REGSP)
+func stackaddr(a *obj.Addr) bool {
+ return regtyp(a) && a.Reg == arm.REGSP
}
-func smallindir(a *obj.Addr, reg *obj.Addr) int {
- return bool2int(reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096)
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+ return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
}
func excise(r *gc.Flow) {
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
- if res.Op != gc.ONAME || !(res.Addable != 0) {
+ if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
cgen(&n1, res)
goto ret
case gc.OEFACE:
- if res.Op != gc.ONAME || !(res.Addable != 0) {
+ if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
cgen(&n1, res)
}
}
- if gc.Isfat(n.Type) != 0 {
+ if gc.Isfat(n.Type) {
if n.Type.Width < 0 {
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
goto ret
}
- if !(res.Addable != 0) {
+ if res.Addable == 0 {
if n.Ullman > res.Ullman {
regalloc(&n1, n.Type, res)
cgen(n, &n1)
goto gen
}
- if gc.Complexop(n, res) != 0 {
+ if gc.Complexop(n, res) {
gc.Complexgen(n, res)
goto ret
}
f = 1 // gen thru register
switch n.Op {
case gc.OLITERAL:
- if gc.Smallintconst(n) != 0 {
+ if gc.Smallintconst(n) {
f = 0
}
f = 0
}
- if !(gc.Iscomplex[n.Type.Etype] != 0) {
+ if gc.Iscomplex[n.Type.Etype] == 0 {
a = optoas(gc.OAS, res.Type)
- if sudoaddable(a, res, &addr) != 0 {
+ if sudoaddable(a, res, &addr) {
if f != 0 {
regalloc(&n2, res.Type, nil)
cgen(n, &n2)
switch n.Op {
case gc.OSPTR,
gc.OLEN:
- if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
+ if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
n.Addable = n.Left.Addable
}
case gc.OCAP:
- if gc.Isslice(n.Left.Type) != 0 {
+ if gc.Isslice(n.Left.Type) {
n.Addable = n.Left.Addable
}
n.Addable = n.Left.Addable
}
- if gc.Complexop(n, res) != 0 {
+ if gc.Complexop(n, res) {
gc.Complexgen(n, res)
goto ret
}
}
}
- if !(gc.Iscomplex[n.Type.Etype] != 0) {
+ if gc.Iscomplex[n.Type.Etype] == 0 {
a = optoas(gc.OAS, n.Type)
- if sudoaddable(a, n, &addr) != 0 {
+ if sudoaddable(a, n, &addr) {
if res.Op == gc.OREGISTER {
p1 = gins(a, nil, res)
p1.From = addr
p1 = gc.Gbranch(obj.AJMP, nil, 0)
p2 = gc.Pc
- gmove(gc.Nodbool(1), res)
+ gmove(gc.Nodbool(true), res)
p3 = gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
- gmove(gc.Nodbool(0), res)
+ gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
goto ret
// pointer is the first word of string or slice.
case gc.OSPTR:
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n1, gc.Types[gc.Tptr], res)
p1 = gins(x86.ALEAQ, nil, &n1)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
regfree(&n1)
case gc.OLEN:
- if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
// map and chan have len in the first int-sized word.
// a zero pointer means zero length
regalloc(&n1, gc.Types[gc.Tptr], res)
break
}
- if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
+ if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
// both slice and string have len one pointer into the struct.
// a zero pointer means zero length
igen(nl, &n1, res)
gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
case gc.OCAP:
- if gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ if gc.Istype(nl.Type, gc.TCHAN) {
// chan has cap in the second int-sized word.
// a zero pointer means zero length
regalloc(&n1, gc.Types[gc.Tptr], res)
break
}
- if gc.Isslice(nl.Type) != 0 {
+ if gc.Isslice(nl.Type) {
igen(nl, &n1, res)
n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
n1.Xoffset += int64(gc.Array_cap)
gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
case gc.OADDR:
- if n.Bounded != 0 { // let race detector avoid nil checks
+ if n.Bounded { // let race detector avoid nil checks
gc.Disable_checknil++
}
agen(nl, res)
- if n.Bounded != 0 {
+ if n.Bounded {
gc.Disable_checknil--
}
cgen_div(int(n.Op), &n1, nr, res)
regfree(&n1)
} else {
- if !(gc.Smallintconst(nr) != 0) {
+ if !gc.Smallintconst(nr) {
regalloc(&n2, nr.Type, res)
cgen(nr, &n2)
} else {
case gc.OLSH,
gc.ORSH,
gc.OLROT:
- cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
+ cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
}
goto ret
* register for the computation.
*/
sbop: // symmetric binary
- if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) != 0 || (nr.Op == gc.OLITERAL && !(gc.Smallintconst(nr) != 0)))) {
+ if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
r = nl
nl = nr
nr = r
}
*
*/
- if gc.Smallintconst(nr) != 0 {
+ if gc.Smallintconst(nr) {
n2 = *nr
} else {
regalloc(&n2, nr.Type, nil)
cgen(nr, &n2)
}
} else {
- if gc.Smallintconst(nr) != 0 {
+ if gc.Smallintconst(nr) {
n2 = *nr
} else {
regalloc(&n2, nr.Type, res)
gc.Dump("cgenr-n", n)
}
- if gc.Isfat(n.Type) != 0 {
+ if gc.Isfat(n.Type) {
gc.Fatal("cgenr on fat node")
}
}
if nl.Addable != 0 {
cgenr(nr, &n1, nil)
- if !(gc.Isconst(nl, gc.CTSTR) != 0) {
- if gc.Isfixedarray(nl.Type) != 0 {
+ if !gc.Isconst(nl, gc.CTSTR) {
+ if gc.Isfixedarray(nl.Type) {
agenr(nl, &n3, res)
} else {
igen(nl, &nlen, res)
nr = &tmp
irad:
- if !(gc.Isconst(nl, gc.CTSTR) != 0) {
- if gc.Isfixedarray(nl.Type) != 0 {
+ if !gc.Isconst(nl, gc.CTSTR) {
+ if gc.Isfixedarray(nl.Type) {
agenr(nl, &n3, res)
} else {
- if !(nl.Addable != 0) {
+ if nl.Addable == 0 {
// igen will need an addressable node.
gc.Tempname(&tmp2, nl.Type)
}
}
- if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ if !gc.Isconst(nr, gc.CTINT) {
cgenr(nr, &n1, nil)
}
// constant index
index:
- if gc.Isconst(nr, gc.CTINT) != 0 {
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nr, gc.CTINT) {
+ if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index") // front end should handle
}
v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
- if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
- if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ if gc.Debug['B'] == 0 && !n.Bounded {
gc.Nodconst(&n2, gc.Types[gc.Simtype[gc.TUINT]], int64(v))
- if gc.Smallintconst(nr) != 0 {
+ if gc.Smallintconst(nr) {
gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), &nlen, &n2)
} else {
regalloc(&tmp, gc.Types[gc.Simtype[gc.TUINT]], nil)
gmove(&n1, &n2)
regfree(&n1)
- if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ if gc.Debug['B'] == 0 && !n.Bounded {
// check bounds
t = gc.Types[gc.Simtype[gc.TUINT]]
- if gc.Is64(nr.Type) != 0 {
+ if gc.Is64(nr.Type) {
t = gc.Types[gc.TUINT64]
}
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) {
gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval.S)))
- } else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
- if gc.Is64(nr.Type) != 0 {
+ } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ if gc.Is64(nr.Type) {
regalloc(&n5, t, nil)
gmove(&nlen, &n5)
regfree(&nlen)
}
} else {
gc.Nodconst(&nlen, t, nl.Type.Bound)
- if !(gc.Smallintconst(&nlen) != 0) {
+ if !gc.Smallintconst(&nlen) {
regalloc(&n5, t, nil)
gmove(&nlen, &n5)
nlen = n5
gc.Patch(p1, gc.Pc)
}
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n3, gc.Types[gc.Tptr], res)
p1 = gins(x86.ALEAQ, nil, &n3)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
n = n.Left
}
- if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
+ if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
// The generated code is just going to panic, so it need not
}
// should only get here for heap vars or paramref
- if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
+ if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
gc.Dump("bad agen", n)
gc.Fatal("agen: bad ONAME class %#x", n.Class)
}
// Could do the same for slice except that we need
// to use the real index for the bounds checking.
case gc.OINDEX:
- if gc.Isfixedarray(n.Left.Type) != 0 || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type) != 0) {
- if gc.Isconst(n.Right, gc.CTINT) != 0 {
+ if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type)) {
+ if gc.Isconst(n.Right, gc.CTINT) {
// Compute &a.
- if !(gc.Isptr[n.Left.Type.Etype] != 0) {
+ if gc.Isptr[n.Left.Type.Etype] == 0 {
igen(n.Left, a, res)
} else {
igen(n.Left, &n1, res)
}
if n == nil {
- n = gc.Nodbool(1)
+ n = gc.Nodbool(true)
}
if n.Ninit != nil {
// need to ask if it is bool?
case gc.OLITERAL:
- if !true_ == !(n.Val.U.Bval != 0) {
+ if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(obj.AJMP, nil, likely), to)
}
goto ret
nr = r
}
- if gc.Isslice(nl.Type) != 0 {
+ if gc.Isslice(nl.Type) {
// front end should only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal slice comparison")
break
}
- if gc.Isinter(nl.Type) != 0 {
+ if gc.Isinter(nl.Type) {
// front end should only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal interface comparison")
regalloc(&n1, nl.Type, nil)
cgen(nl, &n1)
- if gc.Smallintconst(nr) != 0 {
+ if gc.Smallintconst(nr) {
gins(optoas(gc.OCMP, nr.Type), &n1, nr)
gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
regfree(&n1)
case gc.OINDEX:
t = n.Left.Type
- if !(gc.Isfixedarray(t) != 0) {
+ if !gc.Isfixedarray(t) {
break
}
off = stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
- if gc.Isconst(n.Right, gc.CTINT) != 0 {
+ if gc.Isconst(n.Right, gc.CTINT) {
return off + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval)
}
return 1000
}
// Avoid taking the address for simple enough types.
- if componentgen(n, ns) != 0 {
+ if componentgen(n, ns) {
return
}
restx(&cx, &oldcx)
}
-func cadable(n *gc.Node) int {
- if !(n.Addable != 0) {
+func cadable(n *gc.Node) bool {
+ if n.Addable == 0 {
// dont know how it happens,
// but it does
- return 0
+ return false
}
switch n.Op {
case gc.ONAME:
- return 1
+ return true
}
- return 0
+ return false
}
/*
* nr is N when assigning a zero value.
* return 1 if can do, 0 if can't.
*/
-func componentgen(nr *gc.Node, nl *gc.Node) int {
+func componentgen(nr *gc.Node, nl *gc.Node) bool {
var nodl gc.Node
var nodr gc.Node
var tmp gc.Node
t = nl.Type
// Slices are ok.
- if gc.Isslice(t) != 0 {
+ if gc.Isslice(t) {
break
}
// Small arrays are ok.
- if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
+ if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
break
}
fldcount = 0
for t = nl.Type.Type; t != nil; t = t.Down {
- if gc.Isfat(t.Type) != 0 {
+ if gc.Isfat(t.Type) {
goto no
}
if t.Etype != gc.TFIELD {
}
nodl = *nl
- if !(cadable(nl) != 0) {
- if nr != nil && !(cadable(nr) != 0) {
+ if !cadable(nl) {
+ if nr != nil && !cadable(nr) {
goto no
}
igen(nl, &nodl, nil)
if nr != nil {
nodr = *nr
- if !(cadable(nr) != 0) {
+ if !cadable(nr) {
igen(nr, &nodr, nil)
freer = 1
}
gc.Gvardef(nl)
}
t = nl.Type
- if !(gc.Isslice(t) != 0) {
+ if !gc.Isslice(t) {
nodl.Type = t.Type
nodr.Type = nodl.Type
for fldcount = 0; fldcount < t.Bound; fldcount++ {
if freel != 0 {
regfree(&nodl)
}
- return 0
+ return false
yes:
if freer != 0 {
if freel != 0 {
regfree(&nodl)
}
- return 1
+ return true
}
// iterate through declarations - they are sorted in decreasing xoffset order.
for l = gc.Curfn.Dcl; l != nil; l = l.Next {
n = l.N
- if !(n.Needzero != 0) {
+ if n.Needzero == 0 {
continue
}
if n.Class != gc.PAUTO {
p = gins(obj.ACALL, nil, f)
gc.Afunclit(&p.To, f)
- if proc == -1 || gc.Noreturn(p) != 0 {
+ if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
if proc == 1 {
ginscall(gc.Newproc, 0)
} else {
- if !(gc.Hasdefer != 0) {
+ if gc.Hasdefer == 0 {
gc.Fatal("hasdefer=0 but has defer")
}
ginscall(gc.Deferproc, 0)
i = i.Left // interface
- if !(i.Addable != 0) {
+ if i.Addable == 0 {
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
check = 0
if gc.Issigned[t.Etype] != 0 {
check = 1
- if gc.Isconst(nl, gc.CTINT) != 0 && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
+ if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
check = 0
- } else if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
+ } else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
check = 0
}
}
}
savex(x86.REG_DX, &dx, &olddx, res, t)
- if !(gc.Issigned[t.Etype] != 0) {
+ if gc.Issigned[t.Etype] == 0 {
gc.Nodconst(&n4, t, 0)
gmove(&n4, &dx)
} else {
*oldx = gc.Node{}
gc.Nodreg(x, t, dr)
- if r > 1 && !(gc.Samereg(x, res) != 0) {
+ if r > 1 && !gc.Samereg(x, res) {
regalloc(oldx, gc.Types[gc.TINT64], nil)
x.Type = gc.Types[gc.TINT64]
gmove(x, oldx)
a = x86.AIMULW
}
- if !(gc.Smallintconst(nr) != 0) {
+ if !gc.Smallintconst(nr) {
regalloc(&n3, nl.Type, nil)
cgen(nr, &n3)
gins(a, &n3, &n2)
* res = nl << nr
* res = nl >> nr
*/
-func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
gc.Nodreg(&cx, gc.Types[gc.TUINT64], x86.REG_CX)
oldcx = gc.Node{}
- if rcx > 0 && !(gc.Samereg(&cx, res) != 0) {
+ if rcx > 0 && !gc.Samereg(&cx, res) {
regalloc(&oldcx, gc.Types[gc.TUINT64], nil)
gmove(&cx, &oldcx)
}
cx.Type = tcount
- if gc.Samereg(&cx, res) != 0 {
+ if gc.Samereg(&cx, res) {
regalloc(&n2, nl.Type, nil)
} else {
regalloc(&n2, nl.Type, res)
regfree(&n3)
// test and fix up large shifts
- if !(bounded != 0) {
+ if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3)
p1 = gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
w = nl.Type.Width
// Avoid taking the address for simple enough types.
- if componentgen(nil, nl) != 0 {
+ if componentgen(nil, nl) {
return
}
for {
tmp14 := q
q--
- if !(tmp14 > 0) {
+ if tmp14 <= 0 {
break
}
n1.Type = z.Type
for {
tmp15 := c
c--
- if !(tmp15 > 0) {
+ if tmp15 <= 0 {
break
}
n1.Type = z.Type
p2.From.Type = obj.TYPE_REG
p2.From.Reg = x86.REG_AX
- if regtyp(&p.From) != 0 {
+ if regtyp(&p.From) {
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = p.From.Reg
} else {
}
}
-func anyregalloc() int {
+func anyregalloc() bool {
var i int
var j int
goto ok
}
}
- return 1
+ return true
ok:
}
- return 0
+ return false
}
var regpc [x86.REG_R15 + 1 - x86.REG_AX]uint32
fmt.Printf("%d %p\n", i, regpc[i])
}
gc.Fatal("out of fixed registers")
- fallthrough
case gc.TFLOAT32,
gc.TFLOAT64:
}
}
gc.Fatal("out of floating registers")
- fallthrough
case gc.TCOMPLEX64,
gc.TCOMPLEX128:
}
// cannot have two memory operands
- if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
ft = tt // so big switch will choose a simple mov
// some constants can't move directly to memory.
- if gc.Ismem(t) != 0 {
+ if gc.Ismem(t) {
// float constants come from memory.
if gc.Isfloat[tt] != 0 {
goto hard
switch uint32(ft)<<16 | uint32(tt) {
default:
gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
- fallthrough
/*
* integer copy and truncate
return
}
-func samaddr(f *gc.Node, t *gc.Node) int {
+func samaddr(f *gc.Node, t *gc.Node) bool {
if f.Op != t.Op {
- return 0
+ return false
}
switch f.Op {
if f.Val.U.Reg != t.Val.U.Reg {
break
}
- return 1
+ return true
}
- return 0
+ return false
}
/*
func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
var w int32
var p *obj.Prog
+ var af obj.Addr
// Node nod;
- var af obj.Addr
var at obj.Addr
// if(f != N && f->op == OINDEX) {
x86.AMOVQ,
x86.AMOVSS,
x86.AMOVSD:
- if f != nil && t != nil && samaddr(f, t) != 0 {
+ if f != nil && t != nil && samaddr(f, t) {
return nil
}
case x86.ALEAQ:
- if f != nil && gc.Isconst(f, gc.CTNIL) != 0 {
+ if f != nil && gc.Isconst(f, gc.CTNIL) {
gc.Fatal("gins LEAQ nil %v", gc.Tconv(f.Type, 0))
}
}
var cleani int = 0
-func xgen(n *gc.Node, a *gc.Node, o int) int {
+func xgen(n *gc.Node, a *gc.Node, o int) bool {
regalloc(a, gc.Types[gc.Tptr], nil)
if o&ODynam != 0 {
if n.Addable != 0 {
if n.Op != gc.OINDREG {
if n.Op != gc.OREGISTER {
- return 1
+ return true
}
}
}
}
agen(n, a)
- return 0
+ return false
}
func sudoclean() {
* after successful sudoaddable,
* to release the register used for a.
*/
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) int {
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
var o int
var i int
var oary [10]int64
var t *gc.Type
if n.Type == nil {
- return 0
+ return false
}
*a = obj.Addr{}
switch n.Op {
case gc.OLITERAL:
- if !(gc.Isconst(n, gc.CTINT) != 0) {
+ if !gc.Isconst(n, gc.CTINT) {
break
}
v = gc.Mpgetfix(n.Val.U.Xval)
goto odot
case gc.OINDEX:
- return 0
+ return false
// disabled: OINDEX case is now covered by agenr
// for a more suitable register allocation pattern.
if n.Left.Type.Etype == gc.TSTRING {
- return 0
+ return false
}
goto oindex
}
- return 0
+ return false
lit:
switch as {
default:
- return 0
+ return false
case x86.AADDB,
x86.AADDW,
l = n.Left
r = n.Right
if l.Ullman >= gc.UINF && r.Ullman >= gc.UINF {
- return 0
+ return false
}
// set o to type of array
}
w = n.Type.Width
- if gc.Isconst(r, gc.CTINT) != 0 {
+ if gc.Isconst(r, gc.CTINT) {
goto oindex_const
}
switch w {
default:
- return 0
+ return false
case 1,
2,
// load the array (reg)
if l.Ullman > r.Ullman {
- if xgen(l, reg, o) != 0 {
+ if xgen(l, reg, o) {
o |= OAddable
}
}
// load the array (reg)
if l.Ullman <= r.Ullman {
- if xgen(l, reg, o) != 0 {
+ if xgen(l, reg, o) {
o |= OAddable
}
}
// check bounds
- if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ if gc.Debug['B'] == 0 && !n.Bounded {
// check bounds
n4.Op = gc.OXXX
n2.Type = gc.Types[gc.Simtype[gc.TUINT]]
}
} else {
- if gc.Is64(r.Type) != 0 {
+ if gc.Is64(r.Type) {
t = gc.Types[gc.TUINT64]
}
gc.Nodconst(&n2, gc.Types[gc.TUINT64], l.Type.Bound)
oindex_const:
v = gc.Mpgetfix(r.Val.U.Xval)
- if sudoaddable(as, l, a) != 0 {
+ if sudoaddable(as, l, a) {
goto oindex_const_sudo
}
regalloc(reg, gc.Types[gc.Tptr], nil)
agen(l, reg)
- if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ if gc.Debug['B'] == 0 && !n.Bounded {
n1 = *reg
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
}
// slice indexed by a constant
- if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ if gc.Debug['B'] == 0 && !n.Bounded {
a.Offset += int64(gc.Array_nel)
gc.Nodconst(&n2, gc.Types[gc.TUINT64], v)
p1 = gins(optoas(gc.OCMP, gc.Types[gc.Simtype[gc.TUINT]]), nil, &n2)
goto yes
yes:
- return 1
+ return true
no:
sudoclean()
- return 0
+ return false
}
)
// do we need the carry bit
-func needc(p *obj.Prog) int {
+func needc(p *obj.Prog) bool {
var info gc.ProgInfo
for p != nil {
proginfo(&info, p)
if info.Flags&gc.UseCarry != 0 {
- return 1
+ return true
}
if info.Flags&(gc.SetCarry|gc.KillCarry) != 0 {
- return 0
+ return false
}
p = p.Link
}
- return 0
+ return false
}
func rnops(r *gc.Flow) *gc.Flow {
switch p.As {
case x86.ALEAL,
x86.ALEAQ:
- if regtyp(&p.To) != 0 {
+ if regtyp(&p.To) {
if p.From.Sym != nil {
if p.From.Index == x86.REG_NONE {
conprop(r)
x86.AMOVQ,
x86.AMOVSS,
x86.AMOVSD:
- if regtyp(&p.To) != 0 {
+ if regtyp(&p.To) {
if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
conprop(r)
}
x86.AMOVQ,
x86.AMOVSS,
x86.AMOVSD:
- if regtyp(&p.To) != 0 {
- if regtyp(&p.From) != 0 {
- if copyprop(g, r) != 0 {
+ if regtyp(&p.To) {
+ if regtyp(&p.From) {
+ if copyprop(g, r) {
excise(r)
t++
- } else if subprop(r) != 0 && copyprop(g, r) != 0 {
+ } else if subprop(r) && copyprop(g, r) {
excise(r)
t++
}
x86.AMOVWLZX,
x86.AMOVBLSX,
x86.AMOVWLSX:
- if regtyp(&p.To) != 0 {
+ if regtyp(&p.To) {
r1 = rnops(gc.Uniqs(r))
if r1 != nil {
p1 = r1.Prog
x86.AMOVLQSX,
x86.AMOVLQZX,
x86.AMOVQL:
- if regtyp(&p.To) != 0 {
+ if regtyp(&p.To) {
r1 = rnops(gc.Uniqs(r))
if r1 != nil {
p1 = r1.Prog
case x86.AADDL,
x86.AADDQ,
x86.AADDW:
- if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
+ if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
break
}
if p.From.Offset == -1 {
} else {
p.As = x86.ADECW
}
- p.From = obj.Zprog.From
+ p.From = obj.Addr{}
break
}
} else {
p.As = x86.AINCW
}
- p.From = obj.Zprog.From
+ p.From = obj.Addr{}
break
}
case x86.ASUBL,
x86.ASUBQ,
x86.ASUBW:
- if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
+ if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
break
}
if p.From.Offset == -1 {
} else {
p.As = x86.AINCW
}
- p.From = obj.Zprog.From
+ p.From = obj.Addr{}
break
}
} else {
p.As = x86.ADECW
}
- p.From = obj.Zprog.From
+ p.From = obj.Addr{}
break
}
}
for r = g.Start; r != nil; r = r.Link {
p = r.Prog
if p.As == x86.AMOVLQZX {
- if regtyp(&p.From) != 0 {
+ if regtyp(&p.From) {
if p.From.Type == p.To.Type && p.From.Reg == p.To.Reg {
- if prevl(r, int(p.From.Reg)) != 0 {
+ if prevl(r, int(p.From.Reg)) {
excise(r)
}
}
}
if p.As == x86.AMOVSD {
- if regtyp(&p.From) != 0 {
- if regtyp(&p.To) != 0 {
+ if regtyp(&p.From) {
+ if regtyp(&p.To) {
p.As = x86.AMOVAPD
}
}
x86.AMOVL,
x86.AMOVQ,
x86.AMOVLQZX:
- if regtyp(&p.To) != 0 && !(regconsttyp(&p.From) != 0) {
+ if regtyp(&p.To) && !regconsttyp(&p.From) {
pushback(r)
}
}
for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
p = r.Prog
if p.As != obj.ANOP {
- if !(regconsttyp(&p.From) != 0) || !(regtyp(&p.To) != 0) {
+ if !regconsttyp(&p.From) || !regtyp(&p.To) {
break
}
if copyu(p, &p0.To, nil) != 0 || copyu(p0, &p.To, nil) != 0 {
gc.Ostats.Ndelmov++
}
-func regtyp(a *obj.Addr) int {
- return bool2int(a.Type == obj.TYPE_REG && (x86.REG_AX <= a.Reg && a.Reg <= x86.REG_R15 || x86.REG_X0 <= a.Reg && a.Reg <= x86.REG_X15))
+func regtyp(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && (x86.REG_AX <= a.Reg && a.Reg <= x86.REG_R15 || x86.REG_X0 <= a.Reg && a.Reg <= x86.REG_X15)
}
// movb elimination.
for r = g.Start; r != nil; r = r.Link {
p = r.Prog
- if regtyp(&p.To) != 0 {
+ if regtyp(&p.To) {
switch p.As {
case x86.AINCB,
x86.AINCW:
p.As = x86.ANOTQ
}
- if regtyp(&p.From) != 0 || p.From.Type == obj.TYPE_CONST {
+ if regtyp(&p.From) || p.From.Type == obj.TYPE_CONST {
// move or artihmetic into partial register.
// from another register or constant can be movl.
// we don't switch to 64-bit arithmetic if it can
case x86.AADDB,
x86.AADDW:
- if !(needc(p.Link) != 0) {
+ if !needc(p.Link) {
p.As = x86.AADDQ
}
case x86.ASUBB,
x86.ASUBW:
- if !(needc(p.Link) != 0) {
+ if !needc(p.Link) {
p.As = x86.ASUBQ
}
}
// is 'a' a register or constant?
-func regconsttyp(a *obj.Addr) int {
- if regtyp(a) != 0 {
- return 1
+func regconsttyp(a *obj.Addr) bool {
+ if regtyp(a) {
+ return true
}
switch a.Type {
case obj.TYPE_CONST,
obj.TYPE_FCONST,
obj.TYPE_SCONST,
obj.TYPE_ADDR: // TODO(rsc): Not all TYPE_ADDRs are constants.
- return 1
+ return true
}
- return 0
+ return false
}
// is reg guaranteed to be truncated by a previous L instruction?
-func prevl(r0 *gc.Flow, reg int) int {
+func prevl(r0 *gc.Flow, reg int) bool {
var p *obj.Prog
var r *gc.Flow
var info gc.ProgInfo
proginfo(&info, p)
if info.Flags&gc.RightWrite != 0 {
if info.Flags&gc.SizeL != 0 {
- return 1
+ return true
}
- return 0
+ return false
}
}
}
- return 0
+ return false
}
/*
* hopefully, then the former or latter MOV
* will be eliminated by copy propagation.
*/
-func subprop(r0 *gc.Flow) int {
+func subprop(r0 *gc.Flow) bool {
var p *obj.Prog
var info gc.ProgInfo
var v1 *obj.Addr
}
p = r0.Prog
v1 = &p.From
- if !(regtyp(v1) != 0) {
+ if !regtyp(v1) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v1))
}
- return 0
+ return false
}
v2 = &p.To
- if !(regtyp(v2) != 0) {
+ if !regtyp(v2) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tnot regtype %v; return 0\n", gc.Ctxt.Dconv(v2))
}
- return 0
+ return false
}
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tfound %v; return 0\n", p)
}
- return 0
+ return false
}
if info.Reguse|info.Regset != 0 {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tfound %v; return 0\n", p)
}
- return 0
+ return false
}
if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
goto gotit
}
- if copyau(&p.From, v2) != 0 || copyau(&p.To, v2) != 0 {
+ if copyau(&p.From, v2) || copyau(&p.To, v2) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tcopyau %v failed\n", gc.Ctxt.Dconv(v2))
}
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tran off end; return 0\n")
}
- return 0
+ return false
gotit:
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
- return 1
+ return true
}
/*
* set v1 F=1
* set v2 return success
*/
-func copyprop(g *gc.Graph, r0 *gc.Flow) int {
+func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
var p *obj.Prog
var v1 *obj.Addr
var v2 *obj.Addr
p = r0.Prog
v1 = &p.From
v2 = &p.To
- if copyas(v1, v2) != 0 {
- return 1
+ if copyas(v1, v2) {
+ return true
}
gactive++
return copy1(v1, v2, r0.S1, 0)
}
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
var t int
var p *obj.Prog
if gc.Debug['P'] != 0 {
fmt.Printf("act set; return 1\n")
}
- return 1
+ return true
}
r.Active = int32(gactive)
if gc.Debug['P'] != 0 {
fmt.Printf("%v", p)
}
- if !(f != 0) && gc.Uniqp(r) == nil {
+ if f == 0 && gc.Uniqp(r) == nil {
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; merge; f=%d", f)
if gc.Debug['P'] != 0 {
fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
}
- return 0
+ return false
case 3: /* set */
if gc.Debug['P'] != 0 {
fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
}
- return 1
+ return true
case 1, /* used, substitute */
4: /* use and set */
if f != 0 {
- if !(gc.Debug['P'] != 0) {
- return 0
+ if gc.Debug['P'] == 0 {
+ return false
}
if t == 4 {
fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
} else {
fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
}
- return 0
+ return false
}
if copyu(p, v2, v1) != 0 {
if gc.Debug['P'] != 0 {
fmt.Printf("; sub fail; return 0\n")
}
- return 0
+ return false
}
if gc.Debug['P'] != 0 {
if gc.Debug['P'] != 0 {
fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
}
- return 1
+ return true
}
}
- if !(f != 0) {
+ if f == 0 {
t = copyu(p, v1, nil)
- if !(f != 0) && (t == 2 || t == 3 || t == 4) {
+ if f == 0 && (t == 2 || t == 3 || t == 4) {
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
fmt.Printf("\n")
}
if r.S2 != nil {
- if !(copy1(v1, v2, r.S2, f) != 0) {
- return 0
+ if !copy1(v1, v2, r.S2, f) {
+ return false
}
}
}
- return 1
+ return true
}
/*
return 0
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
return 1
}
return 0
return 0
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
return 4
}
return 3
}
if info.Flags&gc.LeftAddr != 0 {
- if copyas(&p.From, v) != 0 {
+ if copyas(&p.From, v) {
return 2
}
}
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
- if copyas(&p.To, v) != 0 {
+ if copyas(&p.To, v) {
return 2
}
}
if info.Flags&gc.RightWrite != 0 {
- if copyas(&p.To, v) != 0 {
+ if copyas(&p.To, v) {
if s != nil {
return copysub(&p.From, v, s, 1)
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 4
}
return 3
return copysub(&p.To, v, s, 1)
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 1
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
return 1
}
}
* could be set/use depending on
* semantics
*/
-func copyas(a *obj.Addr, v *obj.Addr) int {
+func copyas(a *obj.Addr, v *obj.Addr) bool {
if x86.REG_AL <= a.Reg && a.Reg <= x86.REG_R15B {
gc.Fatal("use of byte register")
}
}
if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
- return 0
+ return false
}
- if regtyp(v) != 0 {
- return 1
+ if regtyp(v) {
+ return true
}
if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if v.Offset == a.Offset {
- return 1
+ return true
}
}
- return 0
+ return false
}
-func sameaddr(a *obj.Addr, v *obj.Addr) int {
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
- return 0
+ return false
}
- if regtyp(v) != 0 {
- return 1
+ if regtyp(v) {
+ return true
}
if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if v.Offset == a.Offset {
- return 1
+ return true
}
}
- return 0
+ return false
}
/*
* either direct or indirect
*/
-func copyau(a *obj.Addr, v *obj.Addr) int {
- if copyas(a, v) != 0 {
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+ if copyas(a, v) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tcopyau: copyas returned 1\n")
}
- return 1
+ return true
}
- if regtyp(v) != 0 {
+ if regtyp(v) {
if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tcopyau: found indir use - return 1\n")
}
- return 1
+ return true
}
if a.Index == v.Reg {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
fmt.Printf("\tcopyau: found index use - return 1\n")
}
- return 1
+ return true
}
}
- return 0
+ return false
}
/*
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
var reg int
- if copyas(a, v) != 0 {
+ if copyas(a, v) {
reg = int(s.Reg)
if reg >= x86.REG_AX && reg <= x86.REG_R15 || reg >= x86.REG_X0 && reg <= x86.REG_X0+15 {
if f != 0 {
return 0
}
- if regtyp(v) != 0 {
+ if regtyp(v) {
reg = int(v.Reg)
if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
if (s.Reg == x86.REG_BP || s.Reg == x86.REG_R13) && a.Index != x86.REG_NONE {
}
}
-func smallindir(a *obj.Addr, reg *obj.Addr) int {
- return bool2int(regtyp(reg) != 0 && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == x86.REG_NONE && 0 <= a.Offset && a.Offset < 4096)
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+ return regtyp(reg) && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == x86.REG_NONE && 0 <= a.Offset && a.Offset < 4096
}
-func stackaddr(a *obj.Addr) int {
- return bool2int(a.Type == obj.TYPE_REG && a.Reg == x86.REG_SP)
+func stackaddr(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && a.Reg == x86.REG_SP
}
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
- if res.Op != gc.ONAME || !(res.Addable != 0) {
+ if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
cgen(&n1, res)
return
case gc.OEFACE:
- if res.Op != gc.ONAME || !(res.Addable != 0) {
+ if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
cgen(&n1, res)
}
// structs etc get handled specially
- if gc.Isfat(n.Type) != 0 {
+ if gc.Isfat(n.Type) {
if n.Type.Width < 0 {
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
switch n.Op {
case gc.OSPTR,
gc.OLEN:
- if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
+ if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
n.Addable = n.Left.Addable
}
case gc.OCAP:
- if gc.Isslice(n.Left.Type) != 0 {
+ if gc.Isslice(n.Left.Type) {
n.Addable = n.Left.Addable
}
}
// if both are not addressable, use a temporary.
- if !(n.Addable != 0) && !(res.Addable != 0) {
+ if n.Addable == 0 && res.Addable == 0 {
// could use regalloc here sometimes,
// but have to check for ullman >= UINF.
gc.Tempname(&n1, n.Type)
// if result is not addressable directly but n is,
// compute its address and then store via the address.
- if !(res.Addable != 0) {
+ if res.Addable == 0 {
igen(res, &n1, nil)
cgen(n, &n1)
regfree(&n1)
}
// complex types
- if gc.Complexop(n, res) != 0 {
+ if gc.Complexop(n, res) {
gc.Complexgen(n, res)
return
}
}
// 64-bit ops are hard on 32-bit machine.
- if gc.Is64(n.Type) != 0 || gc.Is64(res.Type) != 0 || n.Left != nil && gc.Is64(n.Left.Type) != 0 {
+ if gc.Is64(n.Type) || gc.Is64(res.Type) || n.Left != nil && gc.Is64(n.Left.Type) {
switch n.Op {
// math goes to cgen64.
case gc.OMINUS,
p1 = gc.Gbranch(obj.AJMP, nil, 0)
p2 = gc.Pc
- gmove(gc.Nodbool(1), res)
+ gmove(gc.Nodbool(true), res)
p3 = gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
- gmove(gc.Nodbool(0), res)
+ gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
return
cgen_hmul(nl, nr, res)
case gc.OCONV:
- if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) != 0 {
+ if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
cgen(nl, res)
break
}
// pointer is the first word of string or slice.
case gc.OSPTR:
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n1, gc.Types[gc.Tptr], res)
p1 = gins(i386.ALEAL, nil, &n1)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
regfree(&n1)
case gc.OLEN:
- if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
// map has len in the first 32-bit word.
// a zero pointer means zero length
gc.Tempname(&n1, gc.Types[gc.Tptr])
break
}
- if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
+ if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
// both slice and string have len one pointer into the struct.
igen(nl, &n1, res)
gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
case gc.OCAP:
- if gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ if gc.Istype(nl.Type, gc.TCHAN) {
// chan has cap in the second 32-bit word.
// a zero pointer means zero length
gc.Tempname(&n1, gc.Types[gc.Tptr])
break
}
- if gc.Isslice(nl.Type) != 0 {
+ if gc.Isslice(nl.Type) {
igen(nl, &n1, res)
n1.Type = gc.Types[gc.TUINT32]
n1.Xoffset += int64(gc.Array_cap)
case gc.OLSH,
gc.ORSH,
gc.OLROT:
- cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
+ cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
}
return
}
abop: // asymmetric binary
- if gc.Smallintconst(nr) != 0 {
+ if gc.Smallintconst(nr) {
mgen(nl, &n1, res)
regalloc(&n2, nl.Type, &n1)
gmove(&n1, &n2)
var hi gc.Node
var zero gc.Node
- if !(gc.Is64(n.Type) != 0) {
+ if !gc.Is64(n.Type) {
if n.Addable != 0 {
// nothing to do.
*res = *n
var v uint64
var p1 *obj.Prog
var p2 *obj.Prog
- var bounded int
+ var bounded bool
if gc.Debug['g'] != 0 {
gc.Dump("\nagen-res", res)
n = n.Left
}
- if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
+ if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
// The generated code is just going to panic, so it need not
switch n.Op {
default:
gc.Fatal("agen %v", gc.Oconv(int(n.Op), 0))
- fallthrough
case gc.OCALLMETH:
gc.Cgen_callmeth(n, 0)
case gc.OINDEX:
p2 = nil // to be patched to panicindex.
w = uint32(n.Type.Width)
- bounded = bool2int(gc.Debug['B'] != 0 || n.Bounded != 0)
+ bounded = gc.Debug['B'] != 0 || n.Bounded
if nr.Addable != 0 {
// Generate &nl first, and move nr into register.
- if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ if !gc.Isconst(nl, gc.CTSTR) {
igen(nl, &n3, res)
}
- if !(gc.Isconst(nr, gc.CTINT) != 0) {
- p2 = igenindex(nr, &tmp, bounded)
+ if !gc.Isconst(nr, gc.CTINT) {
+ p2 = igenindex(nr, &tmp, bool2int(bounded))
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
} else if nl.Addable != 0 {
// Generate nr first, and move &nl into register.
- if !(gc.Isconst(nr, gc.CTINT) != 0) {
- p2 = igenindex(nr, &tmp, bounded)
+ if !gc.Isconst(nr, gc.CTINT) {
+ p2 = igenindex(nr, &tmp, bool2int(bounded))
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
- if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ if !gc.Isconst(nl, gc.CTSTR) {
igen(nl, &n3, res)
}
} else {
- p2 = igenindex(nr, &tmp, bounded)
+ p2 = igenindex(nr, &tmp, bool2int(bounded))
nr = &tmp
- if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ if !gc.Isconst(nl, gc.CTSTR) {
igen(nl, &n3, res)
}
regalloc(&n1, tmp.Type, nil)
}
// For fixed array we really want the pointer in n3.
- if gc.Isfixedarray(nl.Type) != 0 {
+ if gc.Isfixedarray(nl.Type) {
regalloc(&n2, gc.Types[gc.Tptr], &n3)
agen(&n3, &n2)
regfree(&n3)
// w is width
// constant index
- if gc.Isconst(nr, gc.CTINT) != 0 {
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nr, gc.CTINT) {
+ if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index") // front end should handle
}
v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
- if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
- if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ if gc.Debug['B'] == 0 && !n.Bounded {
nlen = n3
nlen.Type = gc.Types[gc.TUINT32]
nlen.Xoffset += int64(gc.Array_nel)
gmove(&n1, &n2)
regfree(&n1)
- if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ if gc.Debug['B'] == 0 && !n.Bounded {
// check bounds
t = gc.Types[gc.TUINT32]
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) {
gc.Nodconst(&nlen, t, int64(len(nl.Val.U.Sval.S)))
- } else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
nlen = n3
nlen.Type = t
nlen.Xoffset += int64(gc.Array_nel)
gc.Patch(p1, gc.Pc)
}
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n3, gc.Types[gc.Tptr], res)
p1 = gins(i386.ALEAL, nil, &n3)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
// Load base pointer in n3.
regalloc(&tmp, gc.Types[gc.Tptr], &n3)
- if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
n3.Type = gc.Types[gc.Tptr]
n3.Xoffset += int64(gc.Array_array)
gmove(&n3, &tmp)
}
// should only get here for heap vars or paramref
- if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
+ if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
gc.Dump("bad agen", n)
gc.Fatal("agen: bad ONAME class %#x", n.Class)
}
case gc.ODOTPTR:
t = nl.Type
- if !(gc.Isptr[t.Etype] != 0) {
+ if gc.Isptr[t.Etype] == 0 {
gc.Fatal("agen: not ptr %v", gc.Nconv(n, 0))
}
cgen(nl, res)
// Could do the same for slice except that we need
// to use the real index for the bounds checking.
case gc.OINDEX:
- if gc.Isfixedarray(n.Left.Type) != 0 || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type) != 0) {
- if gc.Isconst(n.Right, gc.CTINT) != 0 {
+ if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type)) {
+ if gc.Isconst(n.Right, gc.CTINT) {
// Compute &a.
- if !(gc.Isptr[n.Left.Type.Etype] != 0) {
+ if gc.Isptr[n.Left.Type.Etype] == 0 {
igen(n.Left, a, res)
} else {
igen(n.Left, &n1, res)
}
if n == nil {
- n = gc.Nodbool(1)
+ n = gc.Nodbool(true)
}
if n.Ninit != nil {
// need to ask if it is bool?
case gc.OLITERAL:
- if !true_ == !(n.Val.U.Bval != 0) {
+ if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(obj.AJMP, nil, 0), to)
}
return
case gc.ONAME:
- if !(n.Addable != 0) {
+ if n.Addable == 0 {
goto def
}
gc.Nodconst(&n1, n.Type, 0)
nr = r
}
- if gc.Isslice(nl.Type) != 0 {
+ if gc.Isslice(nl.Type) {
// front end should only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal slice comparison")
break
}
- if gc.Isinter(nl.Type) != 0 {
+ if gc.Isinter(nl.Type) {
// front end should only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal interface comparison")
break
}
- if gc.Is64(nr.Type) != 0 {
- if !(nl.Addable != 0) || gc.Isconst(nl, gc.CTINT) != 0 {
+ if gc.Is64(nr.Type) {
+ if nl.Addable == 0 || gc.Isconst(nl, gc.CTINT) {
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
- if !(nr.Addable != 0) {
+ if nr.Addable == 0 {
gc.Tempname(&n2, nr.Type)
cgen(nr, &n2)
nr = &n2
}
if nr.Ullman >= gc.UINF {
- if !(nl.Addable != 0) {
+ if nl.Addable == 0 {
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
- if !(nr.Addable != 0) {
+ if nr.Addable == 0 {
gc.Tempname(&tmp, nr.Type)
cgen(nr, &tmp)
nr = &tmp
goto cmp
}
- if !(nl.Addable != 0) {
+ if nl.Addable == 0 {
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
- if gc.Smallintconst(nr) != 0 {
+ if gc.Smallintconst(nr) {
gins(optoas(gc.OCMP, nr.Type), nl, nr)
gc.Patch(gc.Gbranch(optoas(a, nr.Type), nr.Type, likely), to)
break
}
- if !(nr.Addable != 0) {
+ if nr.Addable == 0 {
gc.Tempname(&tmp, nr.Type)
cgen(nr, &tmp)
nr = &tmp
case gc.OINDEX:
t = n.Left.Type
- if !(gc.Isfixedarray(t) != 0) {
+ if !gc.Isfixedarray(t) {
break
}
off = stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
- if gc.Isconst(n.Right, gc.CTINT) != 0 {
+ if gc.Isconst(n.Right, gc.CTINT) {
return int32(int64(off) + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval))
}
return 1000
}
// Avoid taking the address for simple enough types.
- if componentgen(n, res) != 0 {
+ if componentgen(n, res) {
return
}
gc.Tempname(&tsrc, gc.Types[gc.Tptr])
gc.Tempname(&tdst, gc.Types[gc.Tptr])
- if !(n.Addable != 0) {
+ if n.Addable == 0 {
agen(n, &tsrc)
}
- if !(res.Addable != 0) {
+ if res.Addable == 0 {
agen(res, &tdst)
}
if n.Addable != 0 {
}
}
-func cadable(n *gc.Node) int {
- if !(n.Addable != 0) {
+func cadable(n *gc.Node) bool {
+ if n.Addable == 0 {
// dont know how it happens,
// but it does
- return 0
+ return false
}
switch n.Op {
case gc.ONAME:
- return 1
+ return true
}
- return 0
+ return false
}
/*
* nr is N when assigning a zero value.
* return 1 if can do, 0 if can't.
*/
-func componentgen(nr *gc.Node, nl *gc.Node) int {
+func componentgen(nr *gc.Node, nl *gc.Node) bool {
var nodl gc.Node
var nodr gc.Node
var tmp gc.Node
t = nl.Type
// Slices are ok.
- if gc.Isslice(t) != 0 {
+ if gc.Isslice(t) {
break
}
// Small arrays are ok.
- if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
+ if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
break
}
fldcount = 0
for t = nl.Type.Type; t != nil; t = t.Down {
- if gc.Isfat(t.Type) != 0 {
+ if gc.Isfat(t.Type) {
goto no
}
if t.Etype != gc.TFIELD {
}
nodl = *nl
- if !(cadable(nl) != 0) {
- if nr != nil && !(cadable(nr) != 0) {
+ if !cadable(nl) {
+ if nr != nil && !cadable(nr) {
goto no
}
igen(nl, &nodl, nil)
if nr != nil {
nodr = *nr
- if !(cadable(nr) != 0) {
+ if !cadable(nr) {
igen(nr, &nodr, nil)
freer = 1
}
gc.Gvardef(nl)
}
t = nl.Type
- if !(gc.Isslice(t) != 0) {
+ if !gc.Isslice(t) {
nodl.Type = t.Type
nodr.Type = nodl.Type
for fldcount = 0; fldcount < t.Bound; fldcount++ {
if freel != 0 {
regfree(&nodl)
}
- return 0
+ return false
yes:
if freer != 0 {
if freel != 0 {
regfree(&nodl)
}
- return 1
+ return true
}
switch n.Op {
default:
gc.Fatal("cgen64 %v", gc.Oconv(int(n.Op), 0))
- fallthrough
case gc.OMINUS:
cgen(n.Left, res)
l = n.Left
r = n.Right
- if !(l.Addable != 0) {
+ if l.Addable == 0 {
gc.Tempname(&t1, l.Type)
cgen(l, &t1)
l = &t1
}
- if r != nil && !(r.Addable != 0) {
+ if r != nil && r.Addable == 0 {
gc.Tempname(&t2, r.Type)
cgen(r, &t2)
r = &t2
// Setup for binary operation.
split64(l, &lo1, &hi1)
- if gc.Is64(r.Type) != 0 {
+ if gc.Is64(r.Type) {
split64(r, &lo2, &hi2)
}
if r.Op == gc.OLITERAL {
v = uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 64 {
- if gc.Is64(r.Type) != 0 {
+ if gc.Is64(r.Type) {
splitclean()
}
splitclean()
}
if v >= 32 {
- if gc.Is64(r.Type) != 0 {
+ if gc.Is64(r.Type) {
splitclean()
}
split64(res, &lo2, &hi2)
// if high bits are set, zero value.
p1 = nil
- if gc.Is64(r.Type) != 0 {
+ if gc.Is64(r.Type) {
gins(i386.ACMPL, &hi2, ncon(0))
p1 = gc.Gbranch(i386.AJNE, nil, +1)
gins(i386.AMOVL, &lo2, &cx)
if r.Op == gc.OLITERAL {
v = uint64(gc.Mpgetfix(r.Val.U.Xval))
if v >= 64 {
- if gc.Is64(r.Type) != 0 {
+ if gc.Is64(r.Type) {
splitclean()
}
splitclean()
}
if v >= 32 {
- if gc.Is64(r.Type) != 0 {
+ if gc.Is64(r.Type) {
splitclean()
}
split64(res, &lo2, &hi2)
// if high bits are set, zero value.
p1 = nil
- if gc.Is64(r.Type) != 0 {
+ if gc.Is64(r.Type) {
gins(i386.ACMPL, &hi2, ncon(0))
p1 = gc.Gbranch(i386.AJNE, nil, +1)
gins(i386.AMOVL, &lo2, &cx)
gins(optoas(int(n.Op), lo1.Type), &hi2, &dx)
}
- if gc.Is64(r.Type) != 0 {
+ if gc.Is64(r.Type) {
splitclean()
}
splitclean()
switch op {
default:
gc.Fatal("cmp64 %v %v", gc.Oconv(int(op), 0), gc.Tconv(t, 0))
- fallthrough
// cmp hi
// jne L
ax = 0
for l = gc.Curfn.Dcl; l != nil; l = l.Next {
n = l.N
- if !(n.Needzero != 0) {
+ if n.Needzero == 0 {
continue
}
if n.Class != gc.PAUTO {
w = uint32(nl.Type.Width)
// Avoid taking the address for simple enough types.
- if componentgen(nil, nl) != 0 {
+ if componentgen(nil, nl) {
return
}
for {
tmp14 := q
q--
- if !(tmp14 > 0) {
+ if tmp14 <= 0 {
break
}
n1.Type = z.Type
for {
tmp15 := c
c--
- if !(tmp15 > 0) {
+ if tmp15 <= 0 {
break
}
n1.Type = z.Type
p = gins(obj.ACALL, nil, f)
gc.Afunclit(&p.To, f)
- if proc == -1 || gc.Noreturn(p) != 0 {
+ if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
i = i.Left // interface
- if !(i.Addable != 0) {
+ if i.Addable == 0 {
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
check = 0
if gc.Issigned[t.Etype] != 0 {
check = 1
- if gc.Isconst(nl, gc.CTINT) != 0 && gc.Mpgetfix(nl.Val.U.Xval) != -1<<uint64(t.Width*8-1) {
+ if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -1<<uint64(t.Width*8-1) {
check = 0
- } else if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
+ } else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
check = 0
}
}
cgen(nr, &t2)
}
- if !(gc.Samereg(ax, res) != 0) && !(gc.Samereg(dx, res) != 0) {
+ if !gc.Samereg(ax, res) && !gc.Samereg(dx, res) {
regalloc(&n1, t, res)
} else {
regalloc(&n1, t, nil)
gc.Patch(p1, gc.Pc)
}
- if !(gc.Issigned[t.Etype] != 0) {
+ if gc.Issigned[t.Etype] == 0 {
gc.Nodconst(&nz, t, 0)
gmove(&nz, dx)
} else {
// and not the destination
*oldx = gc.Node{}
- if r > 0 && !(gc.Samereg(x, res) != 0) {
+ if r > 0 && !gc.Samereg(x, res) {
gc.Tempname(oldx, gc.Types[gc.TINT32])
gmove(x, oldx)
}
var olddx gc.Node
var t *gc.Type
- if gc.Is64(nl.Type) != 0 {
+ if gc.Is64(nl.Type) {
gc.Fatal("cgen_div %v", gc.Tconv(nl.Type, 0))
}
* res = nl << nr
* res = nl >> nr
*/
-func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node
var n2 gc.Node
var nt gc.Node
oldcx = gc.Node{}
gc.Nodreg(&cx, gc.Types[gc.TUINT32], i386.REG_CX)
- if reg[i386.REG_CX] > 1 && !(gc.Samereg(&cx, res) != 0) {
+ if reg[i386.REG_CX] > 1 && !gc.Samereg(&cx, res) {
gc.Tempname(&oldcx, gc.Types[gc.TUINT32])
gmove(&cx, &oldcx)
}
regalloc(&n1, nr.Type, &n1) // to hold the shift type in CX
}
- if gc.Samereg(&cx, res) != 0 {
+ if gc.Samereg(&cx, res) {
regalloc(&n2, nl.Type, nil)
} else {
regalloc(&n2, nl.Type, res)
}
// test and fix up large shifts
- if bounded != 0 {
+ if bounded {
if nr.Type.Width > 4 {
// delayed reg alloc
gc.Nodreg(&n1, gc.Types[gc.TUINT32], i386.REG_CX)
gc.OGE:
p1 = gc.Gbranch(obj.AJMP, nil, 0)
p2 = gc.Pc
- gmove(gc.Nodbool(1), res)
+ gmove(gc.Nodbool(true), res)
p3 = gc.Gbranch(obj.AJMP, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
- gmove(gc.Nodbool(0), res)
+ gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
return
return
case gc.OCONV:
- if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) != 0 {
+ if gc.Eqtype(n.Type, nl.Type) || gc.Noconv(n.Type, nl.Type) {
cgen(nl, res)
return
}
nl = n.Left
nr = n.Right
a = int(n.Op)
- if !(true_ != 0) {
+ if true_ == 0 {
// brcom is not valid on floats when NaN is involved.
p1 = gc.Gbranch(obj.AJMP, nil, 0)
goto ret
sse:
- if !(nl.Addable != 0) {
+ if nl.Addable == 0 {
gc.Tempname(&n1, nl.Type)
cgen(nl, &n1)
nl = &n1
}
- if !(nr.Addable != 0) {
+ if nr.Addable == 0 {
gc.Tempname(&tmp, nr.Type)
cgen(nr, &tmp)
nr = &tmp
p2.From.Type = obj.TYPE_REG
p2.From.Reg = i386.REG_AX
- if regtyp(&p.From) != 0 {
+ if regtyp(&p.From) {
p2.To.Type = obj.TYPE_MEM
p2.To.Reg = p.From.Reg
} else {
}
}
-func anyregalloc() int {
+func anyregalloc() bool {
var i int
var j int
goto ok
}
}
- return 1
+ return true
ok:
}
for i = i386.REG_X0; i <= i386.REG_X7; i++ {
if reg[i] != 0 {
- return 1
+ return true
}
}
- return 0
+ return false
}
/*
case gc.TINT64,
gc.TUINT64:
gc.Fatal("regalloc64")
- fallthrough
case gc.TINT8,
gc.TUINT8,
case gc.TFLOAT32,
gc.TFLOAT64:
- if !(gc.Use_sse != 0) {
+ if gc.Use_sse == 0 {
i = i386.REG_F0
goto out
}
var n1 gc.Node
var i int64
- if !(gc.Is64(n.Type) != 0) {
+ if !gc.Is64(n.Type) {
gc.Fatal("split64 %v", gc.Tconv(n.Type, 0))
}
default:
switch n.Op {
default:
- if !(dotaddable(n, &n1) != 0) {
+ if !dotaddable(n, &n1) {
igen(n, &n1, nil)
sclean[nsclean-1] = n1
}
// cannot have two integer memory operands;
// except 64-bit, which always copies via registers anyway.
- if gc.Isint[ft] != 0 && gc.Isint[tt] != 0 && !(gc.Is64(f.Type) != 0) && !(gc.Is64(t.Type) != 0) && gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ if gc.Isint[ft] != 0 && gc.Isint[tt] != 0 && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
cvt = t.Type
// cannot have two floating point memory operands.
- if gc.Isfloat[ft] != 0 && gc.Isfloat[tt] != 0 && gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ if gc.Isfloat[ft] != 0 && gc.Isfloat[tt] != 0 && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
ft = gc.Simsimtype(con.Type)
// some constants can't move directly to memory.
- if gc.Ismem(t) != 0 {
+ if gc.Ismem(t) {
// float constants come from memory.
if gc.Isfloat[tt] != 0 {
goto hard
case gc.TFLOAT32<<16 | gc.TUINT64,
gc.TFLOAT64<<16 | gc.TUINT64:
- if !(gc.Ismem(f) != 0) {
+ if !gc.Ismem(f) {
cvt = f.Type
goto hardmem
}
switch tt {
default:
gc.Fatal("gmove %v", gc.Nconv(t, 0))
- fallthrough
case gc.TINT8:
gins(i386.ACMPL, &t1, ncon(-0x80&(1<<32-1)))
*/
case gc.TFLOAT32<<16 | gc.TFLOAT32,
gc.TFLOAT64<<16 | gc.TFLOAT64:
- if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
if ft == gc.TFLOAT64 {
a = i386.AFMOVD
}
- if gc.Ismem(t) != 0 {
+ if gc.Ismem(t) {
if f.Op != gc.OREGISTER || f.Val.U.Reg != i386.REG_F0 {
gc.Fatal("gmove %v", gc.Nconv(f, 0))
}
}
case gc.TFLOAT32<<16 | gc.TFLOAT64:
- if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
return
case gc.TFLOAT64<<16 | gc.TFLOAT32:
- if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
if f.Op == gc.OREGISTER && t.Op == gc.OREGISTER {
return
}
-func samaddr(f *gc.Node, t *gc.Node) int {
+func samaddr(f *gc.Node, t *gc.Node) bool {
if f.Op != t.Op {
- return 0
+ return false
}
switch f.Op {
if f.Val.U.Reg != t.Val.U.Reg {
break
}
- return 1
+ return true
}
- return 0
+ return false
}
/*
case i386.AMOVB,
i386.AMOVW,
i386.AMOVL:
- if f != nil && t != nil && samaddr(f, t) != 0 {
+ if f != nil && t != nil && samaddr(f, t) {
return nil
}
case i386.ALEAL:
- if f != nil && gc.Isconst(f, gc.CTNIL) != 0 {
+ if f != nil && gc.Isconst(f, gc.CTNIL) {
gc.Fatal("gins LEAL nil %v", gc.Tconv(f.Type, 0))
}
}
return p
}
-func dotaddable(n *gc.Node, n1 *gc.Node) int {
+func dotaddable(n *gc.Node, n1 *gc.Node) bool {
var o int
var oary [10]int64
var nn *gc.Node
if n.Op != gc.ODOT {
- return 0
+ return false
}
o = gc.Dotoffset(n, oary[:], &nn)
*n1 = *nn
n1.Type = n.Type
n1.Xoffset += oary[0]
- return 1
+ return true
}
- return 0
+ return false
}
func sudoclean() {
}
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) int {
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
*a = obj.Addr{}
- return 0
+ return false
}
var gactive uint32
// do we need the carry bit
-func needc(p *obj.Prog) int {
+func needc(p *obj.Prog) bool {
var info gc.ProgInfo
for p != nil {
proginfo(&info, p)
if info.Flags&gc.UseCarry != 0 {
- return 1
+ return true
}
if info.Flags&(gc.SetCarry|gc.KillCarry) != 0 {
- return 0
+ return false
}
p = p.Link
}
- return 0
+ return false
}
func rnops(r *gc.Flow) *gc.Flow {
p = r.Prog
switch p.As {
case i386.ALEAL:
- if regtyp(&p.To) != 0 {
+ if regtyp(&p.To) {
if p.From.Sym != nil {
if p.From.Index == i386.REG_NONE {
conprop(r)
i386.AMOVL,
i386.AMOVSS,
i386.AMOVSD:
- if regtyp(&p.To) != 0 {
+ if regtyp(&p.To) {
if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
conprop(r)
}
case i386.AMOVL,
i386.AMOVSS,
i386.AMOVSD:
- if regtyp(&p.To) != 0 {
- if regtyp(&p.From) != 0 {
- if copyprop(g, r) != 0 {
+ if regtyp(&p.To) {
+ if regtyp(&p.From) {
+ if copyprop(g, r) {
excise(r)
t++
- } else if subprop(r) != 0 && copyprop(g, r) != 0 {
+ } else if subprop(r) && copyprop(g, r) {
excise(r)
t++
}
i386.AMOVWLZX,
i386.AMOVBLSX,
i386.AMOVWLSX:
- if regtyp(&p.To) != 0 {
+ if regtyp(&p.To) {
r1 = rnops(gc.Uniqs(r))
if r1 != nil {
p1 = r1.Prog
case i386.AADDL,
i386.AADDW:
- if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
+ if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
break
}
if p.From.Offset == -1 {
} else {
p.As = i386.ADECW
}
- p.From = obj.Zprog.From
+ p.From = obj.Addr{}
break
}
} else {
p.As = i386.AINCW
}
- p.From = obj.Zprog.From
+ p.From = obj.Addr{}
break
}
case i386.ASUBL,
i386.ASUBW:
- if p.From.Type != obj.TYPE_CONST || needc(p.Link) != 0 {
+ if p.From.Type != obj.TYPE_CONST || needc(p.Link) {
break
}
if p.From.Offset == -1 {
} else {
p.As = i386.AINCW
}
- p.From = obj.Zprog.From
+ p.From = obj.Addr{}
break
}
} else {
p.As = i386.ADECW
}
- p.From = obj.Zprog.From
+ p.From = obj.Addr{}
break
}
}
for r = g.Start; r != nil; r = r.Link {
p = r.Prog
if p.As == i386.AMOVSD {
- if regtyp(&p.From) != 0 {
- if regtyp(&p.To) != 0 {
+ if regtyp(&p.From) {
+ if regtyp(&p.To) {
p.As = i386.AMOVAPD
}
}
gc.Ostats.Ndelmov++
}
-func regtyp(a *obj.Addr) int {
- return bool2int(a.Type == obj.TYPE_REG && (i386.REG_AX <= a.Reg && a.Reg <= i386.REG_DI || i386.REG_X0 <= a.Reg && a.Reg <= i386.REG_X7))
+func regtyp(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && (i386.REG_AX <= a.Reg && a.Reg <= i386.REG_DI || i386.REG_X0 <= a.Reg && a.Reg <= i386.REG_X7)
}
// movb elimination.
for r = g.Start; r != nil; r = r.Link {
p = r.Prog
- if regtyp(&p.To) != 0 {
+ if regtyp(&p.To) {
switch p.As {
case i386.AINCB,
i386.AINCW:
p.As = i386.ANOTL
}
- if regtyp(&p.From) != 0 || p.From.Type == obj.TYPE_CONST {
+ if regtyp(&p.From) || p.From.Type == obj.TYPE_CONST {
// move or artihmetic into partial register.
// from another register or constant can be movl.
// we don't switch to 32-bit arithmetic if it can
case i386.AADDB,
i386.AADDW:
- if !(needc(p.Link) != 0) {
+ if !needc(p.Link) {
p.As = i386.AADDL
}
case i386.ASUBB,
i386.ASUBW:
- if !(needc(p.Link) != 0) {
+ if !needc(p.Link) {
p.As = i386.ASUBL
}
* hopefully, then the former or latter MOV
* will be eliminated by copy propagation.
*/
-func subprop(r0 *gc.Flow) int {
+func subprop(r0 *gc.Flow) bool {
var p *obj.Prog
var v1 *obj.Addr
var v2 *obj.Addr
p = r0.Prog
v1 = &p.From
- if !(regtyp(v1) != 0) {
- return 0
+ if !regtyp(v1) {
+ return false
}
v2 = &p.To
- if !(regtyp(v2) != 0) {
- return 0
+ if !regtyp(v2) {
+ return false
}
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
}
proginfo(&info, p)
if info.Flags&gc.Call != 0 {
- return 0
+ return false
}
if info.Reguse|info.Regset != 0 {
- return 0
+ return false
}
if (info.Flags&gc.Move != 0) && (info.Flags&(gc.SizeL|gc.SizeQ|gc.SizeF|gc.SizeD) != 0) && p.To.Type == v1.Type && p.To.Reg == v1.Reg {
goto gotit
}
- if copyau(&p.From, v2) != 0 || copyau(&p.To, v2) != 0 {
+ if copyau(&p.From, v2) || copyau(&p.To, v2) {
break
}
if copysub(&p.From, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
}
}
- return 0
+ return false
gotit:
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
- return 1
+ return true
}
/*
* set v1 F=1
* set v2 return success
*/
-func copyprop(g *gc.Graph, r0 *gc.Flow) int {
+func copyprop(g *gc.Graph, r0 *gc.Flow) bool {
var p *obj.Prog
var v1 *obj.Addr
var v2 *obj.Addr
p = r0.Prog
v1 = &p.From
v2 = &p.To
- if copyas(v1, v2) != 0 {
- return 1
+ if copyas(v1, v2) {
+ return true
}
gactive++
return copy1(v1, v2, r0.S1, 0)
}
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
var t int
var p *obj.Prog
if gc.Debug['P'] != 0 {
fmt.Printf("act set; return 1\n")
}
- return 1
+ return true
}
r.Active = int32(gactive)
if gc.Debug['P'] != 0 {
fmt.Printf("%v", p)
}
- if !(f != 0) && gc.Uniqp(r) == nil {
+ if f == 0 && gc.Uniqp(r) == nil {
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; merge; f=%d", f)
if gc.Debug['P'] != 0 {
fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
}
- return 0
+ return false
case 3: /* set */
if gc.Debug['P'] != 0 {
fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
}
- return 1
+ return true
case 1, /* used, substitute */
4: /* use and set */
if f != 0 {
- if !(gc.Debug['P'] != 0) {
- return 0
+ if gc.Debug['P'] == 0 {
+ return false
}
if t == 4 {
fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
} else {
fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
}
- return 0
+ return false
}
if copyu(p, v2, v1) != 0 {
if gc.Debug['P'] != 0 {
fmt.Printf("; sub fail; return 0\n")
}
- return 0
+ return false
}
if gc.Debug['P'] != 0 {
if gc.Debug['P'] != 0 {
fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
}
- return 1
+ return true
}
}
- if !(f != 0) {
+ if f == 0 {
t = copyu(p, v1, nil)
- if !(f != 0) && (t == 2 || t == 3 || t == 4) {
+ if f == 0 && (t == 2 || t == 3 || t == 4) {
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
fmt.Printf("\n")
}
if r.S2 != nil {
- if !(copy1(v1, v2, r.S2, f) != 0) {
- return 0
+ if !copy1(v1, v2, r.S2, f) {
+ return false
}
}
}
- return 1
+ return true
}
/*
return 0
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
return 1
}
return 0
return 0
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
return 4
}
return 3
}
if info.Flags&gc.LeftAddr != 0 {
- if copyas(&p.From, v) != 0 {
+ if copyas(&p.From, v) {
return 2
}
}
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightRead|gc.RightWrite {
- if copyas(&p.To, v) != 0 {
+ if copyas(&p.To, v) {
return 2
}
}
if info.Flags&gc.RightWrite != 0 {
- if copyas(&p.To, v) != 0 {
+ if copyas(&p.To, v) {
if s != nil {
return copysub(&p.From, v, s, 1)
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 4
}
return 3
return copysub(&p.To, v, s, 1)
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 1
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
return 1
}
}
* could be set/use depending on
* semantics
*/
-func copyas(a *obj.Addr, v *obj.Addr) int {
+func copyas(a *obj.Addr, v *obj.Addr) bool {
if i386.REG_AL <= a.Reg && a.Reg <= i386.REG_BL {
gc.Fatal("use of byte register")
}
}
if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
- return 0
+ return false
}
- if regtyp(v) != 0 {
- return 1
+ if regtyp(v) {
+ return true
}
if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if v.Offset == a.Offset {
- return 1
+ return true
}
}
- return 0
+ return false
}
-func sameaddr(a *obj.Addr, v *obj.Addr) int {
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
if a.Type != v.Type || a.Name != v.Name || a.Reg != v.Reg {
- return 0
+ return false
}
- if regtyp(v) != 0 {
- return 1
+ if regtyp(v) {
+ return true
}
if v.Type == obj.TYPE_MEM && (v.Name == obj.NAME_AUTO || v.Name == obj.NAME_PARAM) {
if v.Offset == a.Offset {
- return 1
+ return true
}
}
- return 0
+ return false
}
/*
* either direct or indirect
*/
-func copyau(a *obj.Addr, v *obj.Addr) int {
- if copyas(a, v) != 0 {
- return 1
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+ if copyas(a, v) {
+ return true
}
- if regtyp(v) != 0 {
+ if regtyp(v) {
if a.Type == obj.TYPE_MEM && a.Reg == v.Reg {
- return 1
+ return true
}
if a.Index == v.Reg {
- return 1
+ return true
}
}
- return 0
+ return false
}
/*
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
var reg int
- if copyas(a, v) != 0 {
+ if copyas(a, v) {
reg = int(s.Reg)
if reg >= i386.REG_AX && reg <= i386.REG_DI || reg >= i386.REG_X0 && reg <= i386.REG_X7 {
if f != 0 {
return 0
}
- if regtyp(v) != 0 {
+ if regtyp(v) {
reg = int(v.Reg)
if a.Type == obj.TYPE_MEM && int(a.Reg) == reg {
if (s.Reg == i386.REG_BP) && a.Index != obj.TYPE_NONE {
}
}
-func smallindir(a *obj.Addr, reg *obj.Addr) int {
- return bool2int(regtyp(reg) != 0 && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == i386.REG_NONE && 0 <= a.Offset && a.Offset < 4096)
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+ return regtyp(reg) && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && a.Index == i386.REG_NONE && 0 <= a.Offset && a.Offset < 4096
}
-func stackaddr(a *obj.Addr) int {
- return bool2int(a.Type == obj.TYPE_REG && a.Reg == i386.REG_SP)
+func stackaddr(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && a.Reg == i386.REG_SP
}
gc.OSLICESTR,
gc.OSLICE3,
gc.OSLICE3ARR:
- if res.Op != gc.ONAME || !(res.Addable != 0) {
+ if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_slice(n, &n1)
cgen(&n1, res)
goto ret
case gc.OEFACE:
- if res.Op != gc.ONAME || !(res.Addable != 0) {
+ if res.Op != gc.ONAME || res.Addable == 0 {
gc.Tempname(&n1, n.Type)
gc.Cgen_eface(n, &n1)
cgen(&n1, res)
}
}
- if gc.Isfat(n.Type) != 0 {
+ if gc.Isfat(n.Type) {
if n.Type.Width < 0 {
gc.Fatal("forgot to compute width for %v", gc.Tconv(n.Type, 0))
}
goto ret
}
- if !(res.Addable != 0) {
+ if res.Addable == 0 {
if n.Ullman > res.Ullman {
regalloc(&n1, n.Type, res)
cgen(n, &n1)
goto gen
}
- if gc.Complexop(n, res) != 0 {
+ if gc.Complexop(n, res) {
gc.Complexgen(n, res)
goto ret
}
f = 1 // gen thru register
switch n.Op {
case gc.OLITERAL:
- if gc.Smallintconst(n) != 0 {
+ if gc.Smallintconst(n) {
f = 0
}
f = 0
}
- if !(gc.Iscomplex[n.Type.Etype] != 0) {
+ if gc.Iscomplex[n.Type.Etype] == 0 {
a = optoas(gc.OAS, res.Type)
- if sudoaddable(a, res, &addr) != 0 {
+ if sudoaddable(a, res, &addr) {
if f != 0 {
regalloc(&n2, res.Type, nil)
cgen(n, &n2)
switch n.Op {
case gc.OSPTR,
gc.OLEN:
- if gc.Isslice(n.Left.Type) != 0 || gc.Istype(n.Left.Type, gc.TSTRING) != 0 {
+ if gc.Isslice(n.Left.Type) || gc.Istype(n.Left.Type, gc.TSTRING) {
n.Addable = n.Left.Addable
}
case gc.OCAP:
- if gc.Isslice(n.Left.Type) != 0 {
+ if gc.Isslice(n.Left.Type) {
n.Addable = n.Left.Addable
}
n.Addable = n.Left.Addable
}
- if gc.Complexop(n, res) != 0 {
+ if gc.Complexop(n, res) {
gc.Complexgen(n, res)
goto ret
}
}
}
- if !(gc.Iscomplex[n.Type.Etype] != 0) {
+ if gc.Iscomplex[n.Type.Etype] == 0 {
a = optoas(gc.OAS, n.Type)
- if sudoaddable(a, n, &addr) != 0 {
+ if sudoaddable(a, n, &addr) {
if res.Op == gc.OREGISTER {
p1 = gins(a, nil, res)
p1.From = addr
p1 = gc.Gbranch(ppc64.ABR, nil, 0)
p2 = gc.Pc
- gmove(gc.Nodbool(1), res)
+ gmove(gc.Nodbool(true), res)
p3 = gc.Gbranch(ppc64.ABR, nil, 0)
gc.Patch(p1, gc.Pc)
bgen(n, true, 0, p2)
- gmove(gc.Nodbool(0), res)
+ gmove(gc.Nodbool(false), res)
gc.Patch(p3, gc.Pc)
goto ret
// pointer is the first word of string or slice.
case gc.OSPTR:
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n1, gc.Types[gc.Tptr], res)
p1 = gins(ppc64.AMOVD, nil, &n1)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
regfree(&n1)
case gc.OLEN:
- if gc.Istype(nl.Type, gc.TMAP) != 0 || gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ if gc.Istype(nl.Type, gc.TMAP) || gc.Istype(nl.Type, gc.TCHAN) {
// map and chan have len in the first int-sized word.
// a zero pointer means zero length
regalloc(&n1, gc.Types[gc.Tptr], res)
break
}
- if gc.Istype(nl.Type, gc.TSTRING) != 0 || gc.Isslice(nl.Type) != 0 {
+ if gc.Istype(nl.Type, gc.TSTRING) || gc.Isslice(nl.Type) {
// both slice and string have len one pointer into the struct.
// a zero pointer means zero length
igen(nl, &n1, res)
gc.Fatal("cgen: OLEN: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
case gc.OCAP:
- if gc.Istype(nl.Type, gc.TCHAN) != 0 {
+ if gc.Istype(nl.Type, gc.TCHAN) {
// chan has cap in the second int-sized word.
// a zero pointer means zero length
regalloc(&n1, gc.Types[gc.Tptr], res)
break
}
- if gc.Isslice(nl.Type) != 0 {
+ if gc.Isslice(nl.Type) {
igen(nl, &n1, res)
n1.Type = gc.Types[gc.Simtype[gc.TUINT]]
n1.Xoffset += int64(gc.Array_cap)
gc.Fatal("cgen: OCAP: unknown type %v", gc.Tconv(nl.Type, obj.FmtLong))
case gc.OADDR:
- if n.Bounded != 0 { // let race detector avoid nil checks
+ if n.Bounded { // let race detector avoid nil checks
gc.Disable_checknil++
}
agen(nl, res)
- if n.Bounded != 0 {
+ if n.Bounded {
gc.Disable_checknil--
}
cgen_div(int(n.Op), &n1, nr, res)
regfree(&n1)
} else {
- if !(gc.Smallintconst(nr) != 0) {
+ if !gc.Smallintconst(nr) {
regalloc(&n2, nr.Type, res)
cgen(nr, &n2)
} else {
case gc.OLSH,
gc.ORSH,
gc.OLROT:
- cgen_shift(int(n.Op), int(n.Bounded), nl, nr, res)
+ cgen_shift(int(n.Op), n.Bounded, nl, nr, res)
}
goto ret
* register for the computation.
*/
sbop: // symmetric binary
- if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) != 0 || (nr.Op == gc.OLITERAL && !(gc.Smallintconst(nr) != 0)))) {
+ if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (gc.Smallintconst(nl) || (nr.Op == gc.OLITERAL && !gc.Smallintconst(nr)))) {
r = nl
nl = nr
nr = r
gc.Dump("cgenr-n", n)
}
- if gc.Isfat(n.Type) != 0 {
+ if gc.Isfat(n.Type) {
gc.Fatal("cgenr on fat node")
}
//bounded = debug['B'] || n->bounded;
if nr.Addable != 0 {
- if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ if !gc.Isconst(nr, gc.CTINT) {
gc.Tempname(&tmp, gc.Types[gc.TINT64])
}
- if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
- if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ if !gc.Isconst(nr, gc.CTINT) {
cgen(nr, &tmp)
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
} else if nl.Addable != 0 {
- if !(gc.Isconst(nr, gc.CTINT) != 0) {
+ if !gc.Isconst(nr, gc.CTINT) {
gc.Tempname(&tmp, gc.Types[gc.TINT64])
cgen(nr, &tmp)
regalloc(&n1, tmp.Type, nil)
gmove(&tmp, &n1)
}
- if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
} else {
gc.Tempname(&tmp, gc.Types[gc.TINT64])
cgen(nr, &tmp)
nr = &tmp
- if !(gc.Isconst(nl, gc.CTSTR) != 0) {
+ if !gc.Isconst(nl, gc.CTSTR) {
agenr(nl, &n3, res)
}
regalloc(&n1, tmp.Type, nil)
// w is width
// constant index
- if gc.Isconst(nr, gc.CTINT) != 0 {
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nr, gc.CTINT) {
+ if gc.Isconst(nl, gc.CTSTR) {
gc.Fatal("constant string constant index")
}
v = uint64(gc.Mpgetfix(nr.Val.U.Xval))
- if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
- if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
+ if gc.Debug['B'] == 0 && !n.Bounded {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
gmove(&n1, &n2)
regfree(&n1)
- if !(gc.Debug['B'] != 0) && !(n.Bounded != 0) {
+ if gc.Debug['B'] == 0 && !n.Bounded {
// check bounds
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) {
gc.Nodconst(&n4, gc.Types[gc.TUINT64], int64(len(nl.Val.U.Sval.S)))
- } else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
gc.Patch(p1, gc.Pc)
}
- if gc.Isconst(nl, gc.CTSTR) != 0 {
+ if gc.Isconst(nl, gc.CTSTR) {
regalloc(&n3, gc.Types[gc.Tptr], res)
p1 = gins(ppc64.AMOVD, nil, &n3)
gc.Datastring(nl.Val.U.Sval.S, &p1.From)
p1.From.Type = obj.TYPE_ADDR
- } else if gc.Isslice(nl.Type) != 0 || nl.Type.Etype == gc.TSTRING {
+ } else if gc.Isslice(nl.Type) || nl.Type.Etype == gc.TSTRING {
n1 = n3
n1.Op = gc.OINDREG
n1.Type = gc.Types[gc.Tptr]
n = n.Left
}
- if gc.Isconst(n, gc.CTNIL) != 0 && n.Type.Width > int64(gc.Widthptr) {
+ if gc.Isconst(n, gc.CTNIL) && n.Type.Width > int64(gc.Widthptr) {
// Use of a nil interface or nil slice.
// Create a temporary we can take the address of and read.
// The generated code is just going to panic, so it need not
}
// should only get here for heap vars or paramref
- if !(n.Class&gc.PHEAP != 0) && n.Class != gc.PPARAMREF {
+ if n.Class&gc.PHEAP == 0 && n.Class != gc.PPARAMREF {
gc.Dump("bad agen", n)
gc.Fatal("agen: bad ONAME class %#x", n.Class)
}
// Could do the same for slice except that we need
// to use the real index for the bounds checking.
case gc.OINDEX:
- if gc.Isfixedarray(n.Left.Type) != 0 || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type) != 0) {
- if gc.Isconst(n.Right, gc.CTINT) != 0 {
+ if gc.Isfixedarray(n.Left.Type) || (gc.Isptr[n.Left.Type.Etype] != 0 && gc.Isfixedarray(n.Left.Left.Type)) {
+ if gc.Isconst(n.Right, gc.CTINT) {
// Compute &a.
- if !(gc.Isptr[n.Left.Type.Etype] != 0) {
+ if gc.Isptr[n.Left.Type.Etype] == 0 {
igen(n.Left, a, res)
} else {
igen(n.Left, &n1, res)
}
if n == nil {
- n = gc.Nodbool(1)
+ n = gc.Nodbool(true)
}
if n.Ninit != nil {
// need to ask if it is bool?
case gc.OLITERAL:
- if !true_ == !(n.Val.U.Bval != 0) {
+ if !true_ == (n.Val.U.Bval == 0) {
gc.Patch(gc.Gbranch(ppc64.ABR, nil, likely), to)
}
goto ret
nr = r
}
- if gc.Isslice(nl.Type) != 0 {
+ if gc.Isslice(nl.Type) {
// front end should only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal slice comparison")
break
}
- if gc.Isinter(nl.Type) != 0 {
+ if gc.Isinter(nl.Type) {
// front end should only leave cmp to literal nil
if (a != gc.OEQ && a != gc.ONE) || nr.Op != gc.OLITERAL {
gc.Yyerror("illegal interface comparison")
case gc.OINDEX:
t = n.Left.Type
- if !(gc.Isfixedarray(t) != 0) {
+ if !gc.Isfixedarray(t) {
break
}
off = stkof(n.Left)
if off == -1000 || off == 1000 {
return off
}
- if gc.Isconst(n.Right, gc.CTINT) != 0 {
+ if gc.Isconst(n.Right, gc.CTINT) {
return off + t.Type.Width*gc.Mpgetfix(n.Right.Val.U.Xval)
}
return 1000
switch align {
default:
gc.Fatal("sgen: invalid alignment %d for %v", align, gc.Tconv(n.Type, 0))
- fallthrough
case 1:
op = ppc64.AMOVBU
for {
tmp14 := c
c--
- if !(tmp14 > 0) {
+ if tmp14 <= 0 {
break
}
regfree(&tmp)
}
-func cadable(n *gc.Node) int {
- if !(n.Addable != 0) {
+func cadable(n *gc.Node) bool {
+ if n.Addable == 0 {
// dont know how it happens,
// but it does
- return 0
+ return false
}
switch n.Op {
case gc.ONAME:
- return 1
+ return true
}
- return 0
+ return false
}
/*
* nr is N when assigning a zero value.
* return 1 if can do, 0 if can't.
*/
-func componentgen(nr *gc.Node, nl *gc.Node) int {
+func componentgen(nr *gc.Node, nl *gc.Node) bool {
var nodl gc.Node
var nodr gc.Node
var tmp gc.Node
t = nl.Type
// Slices are ok.
- if gc.Isslice(t) != 0 {
+ if gc.Isslice(t) {
break
}
// Small arrays are ok.
- if t.Bound > 0 && t.Bound <= 3 && !(gc.Isfat(t.Type) != 0) {
+ if t.Bound > 0 && t.Bound <= 3 && !gc.Isfat(t.Type) {
break
}
fldcount = 0
for t = nl.Type.Type; t != nil; t = t.Down {
- if gc.Isfat(t.Type) != 0 {
+ if gc.Isfat(t.Type) {
goto no
}
if t.Etype != gc.TFIELD {
}
nodl = *nl
- if !(cadable(nl) != 0) {
- if nr != nil && !(cadable(nr) != 0) {
+ if !cadable(nl) {
+ if nr != nil && !cadable(nr) {
goto no
}
igen(nl, &nodl, nil)
if nr != nil {
nodr = *nr
- if !(cadable(nr) != 0) {
+ if !cadable(nr) {
igen(nr, &nodr, nil)
freer = 1
}
gc.Gvardef(nl)
}
t = nl.Type
- if !(gc.Isslice(t) != 0) {
+ if !gc.Isslice(t) {
nodl.Type = t.Type
nodr.Type = nodl.Type
for fldcount = 0; fldcount < t.Bound; fldcount++ {
if freel != 0 {
regfree(&nodl)
}
- return 0
+ return false
yes:
if freer != 0 {
if freel != 0 {
regfree(&nodl)
}
- return 1
+ return true
}
// iterate through declarations - they are sorted in decreasing xoffset order.
for l = gc.Curfn.Dcl; l != nil; l = l.Next {
n = l.N
- if !(n.Needzero != 0) {
+ if n.Needzero == 0 {
continue
}
if n.Class != gc.PAUTO {
p = gins(ppc64.ABL, nil, f)
gc.Afunclit(&p.To, f)
- if proc == -1 || gc.Noreturn(p) != 0 {
+ if proc == -1 || gc.Noreturn(p) {
gins(obj.AUNDEF, nil, nil)
}
break
if proc == 1 {
ginscall(gc.Newproc, 0)
} else {
- if !(gc.Hasdefer != 0) {
+ if gc.Hasdefer == 0 {
gc.Fatal("hasdefer=0 but has defer")
}
ginscall(gc.Deferproc, 0)
i = i.Left // interface
- if !(i.Addable != 0) {
+ if i.Addable == 0 {
gc.Tempname(&tmpi, i.Type)
cgen(i, &tmpi)
i = &tmpi
check = 0
if gc.Issigned[t.Etype] != 0 {
check = 1
- if gc.Isconst(nl, gc.CTINT) != 0 && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
+ if gc.Isconst(nl, gc.CTINT) && gc.Mpgetfix(nl.Val.U.Xval) != -(1<<uint64(t.Width*8-1)) {
check = 0
- } else if gc.Isconst(nr, gc.CTINT) != 0 && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
+ } else if gc.Isconst(nr, gc.CTINT) && gc.Mpgetfix(nr.Val.U.Xval) != -1 {
check = 0
}
}
// use 2-operand 16-bit multiply
// because there is no 2-operand 8-bit multiply
//a = AIMULW;
- if !(gc.Smallintconst(nr) != 0) {
+ if !gc.Smallintconst(nr) {
regalloc(&n3, nl.Type, nil)
cgen(nr, &n3)
gins(a, &n3, &n2)
* res = nl << nr
* res = nl >> nr
*/
-func cgen_shift(op int, bounded int, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+func cgen_shift(op int, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
var n1 gc.Node
var n2 gc.Node
var n3 gc.Node
regfree(&n3)
// test and fix up large shifts
- if !(bounded != 0) {
+ if !bounded {
gc.Nodconst(&n3, tcount, nl.Type.Width*8)
gins(optoas(gc.OCMP, tcount), &n1, &n3)
p1 = gc.Gbranch(optoas(gc.OLT, tcount), nil, +1)
}
}
-func anyregalloc() int {
+func anyregalloc() bool {
var i int
var j int
goto ok
}
}
- return 1
+ return true
ok:
}
- return 0
+ return false
}
/*
fmt.Printf("R%d %p\n", i, regpc[i-ppc64.REG_R0])
}
gc.Fatal("out of fixed registers")
- fallthrough
case gc.TFLOAT32,
gc.TFLOAT64:
fmt.Printf("F%d %p\n", i, regpc[i-ppc64.REG_R0])
}
gc.Fatal("out of floating registers")
- fallthrough
case gc.TCOMPLEX64,
gc.TCOMPLEX128:
switch as {
default:
gc.Fatal("ginscon2")
- fallthrough
case ppc64.ACMP:
if -ppc64.BIG <= c && c <= ppc64.BIG {
}
// cannot have two memory operands
- if gc.Ismem(f) != 0 && gc.Ismem(t) != 0 {
+ if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
ft = tt // so big switch will choose a simple mov
// constants can't move directly to memory.
- if gc.Ismem(t) != 0 {
+ if gc.Ismem(t) {
goto hard
}
}
switch uint32(ft)<<16 | uint32(tt) {
default:
gc.Fatal("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
- fallthrough
/*
* integer copy and truncate
OAddable = 1 << 1
)
-func xgen(n *gc.Node, a *gc.Node, o int) int {
+func xgen(n *gc.Node, a *gc.Node, o int) bool {
// TODO(minux)
- return -1
+ return -1 != 0 /*TypeKind(100016)*/
}
func sudoclean() {
* after successful sudoaddable,
* to release the register used for a.
*/
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) int {
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
// TODO(minux)
*a = obj.Addr{}
- return 0
+ return false
}
// breaking moves that do care. This might let us
// simplify or remove the next peep loop, too.
if p.As == ppc64.AMOVD || p.As == ppc64.AFMOVD {
- if regtyp(&p.To) != 0 {
+ if regtyp(&p.To) {
// Try to eliminate reg->reg moves
- if regtyp(&p.From) != 0 {
+ if regtyp(&p.From) {
if p.From.Type == p.To.Type {
- if copyprop(r) != 0 {
+ if copyprop(r) {
excise(r)
t++
- } else if subprop(r) != 0 && copyprop(r) != 0 {
+ } else if subprop(r) && copyprop(r) {
excise(r)
t++
}
if p.To.Type == obj.TYPE_REG {
p.From.Type = obj.TYPE_REG
p.From.Reg = ppc64.REGZERO
- if copyprop(r) != 0 {
+ if copyprop(r) {
excise(r)
t++
- } else if subprop(r) != 0 && copyprop(r) != 0 {
+ } else if subprop(r) && copyprop(r) {
excise(r)
t++
}
switch p.As {
case ppc64.ACMP,
ppc64.ACMPW: /* always safe? */
- if !(regzer(&p.To) != 0) {
+ if regzer(&p.To) == 0 {
continue
}
r1 = r.S1
r1 = r
for {
r1 = gc.Uniqp(r1)
- if !(r1 != nil && r1.Prog.As == obj.ANOP) {
+ if r1 == nil || r1.Prog.As != obj.ANOP {
break
}
}
return 0
}
-func regtyp(a *obj.Addr) int {
+func regtyp(a *obj.Addr) bool {
// TODO(rsc): Floating point register exclusions?
- return bool2int(a.Type == obj.TYPE_REG && ppc64.REG_R0 <= a.Reg && a.Reg <= ppc64.REG_F31 && a.Reg != ppc64.REGZERO)
+ return a.Type == obj.TYPE_REG && ppc64.REG_R0 <= a.Reg && a.Reg <= ppc64.REG_F31 && a.Reg != ppc64.REGZERO
}
/*
* r0 (the argument, not the register) is the MOV at the end of the
* above sequences. This returns 1 if it modified any instructions.
*/
-func subprop(r0 *gc.Flow) int {
+func subprop(r0 *gc.Flow) bool {
var p *obj.Prog
var v1 *obj.Addr
var v2 *obj.Addr
p = r0.Prog
v1 = &p.From
- if !(regtyp(v1) != 0) {
- return 0
+ if !regtyp(v1) {
+ return false
}
v2 = &p.To
- if !(regtyp(v2) != 0) {
- return 0
+ if !regtyp(v2) {
+ return false
}
for r = gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
if gc.Uniqs(r) == nil {
}
proginfo(&info, p)
if info.Flags&gc.Call != 0 {
- return 0
+ return false
}
if info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
}
}
- if copyau(&p.From, v2) != 0 || copyau1(p, v2) != 0 || copyau(&p.To, v2) != 0 {
+ if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
break
}
if copysub(&p.From, v1, v2, 0) != 0 || copysub1(p, v1, v2, 0) != 0 || copysub(&p.To, v1, v2, 0) != 0 {
}
}
- return 0
+ return false
gotit:
copysub(&p.To, v1, v2, 1)
if gc.Debug['P'] != 0 {
fmt.Printf("%v last\n", r.Prog)
}
- return 1
+ return true
}
/*
* set v1 F=1
* set v2 return success (caller can remove v1->v2 move)
*/
-func copyprop(r0 *gc.Flow) int {
+func copyprop(r0 *gc.Flow) bool {
var p *obj.Prog
var v1 *obj.Addr
var v2 *obj.Addr
p = r0.Prog
v1 = &p.From
v2 = &p.To
- if copyas(v1, v2) != 0 {
+ if copyas(v1, v2) {
if gc.Debug['P'] != 0 {
fmt.Printf("eliminating self-move\n", r0.Prog)
}
- return 1
+ return true
}
gactive++
// copy1 replaces uses of v2 with v1 starting at r and returns 1 if
// all uses were rewritten.
-func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) int {
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
var t int
var p *obj.Prog
if gc.Debug['P'] != 0 {
fmt.Printf("act set; return 1\n")
}
- return 1
+ return true
}
r.Active = int32(gactive)
if gc.Debug['P'] != 0 {
fmt.Printf("%v", p)
}
- if !(f != 0) && gc.Uniqp(r) == nil {
+ if f == 0 && gc.Uniqp(r) == nil {
// Multiple predecessors; conservatively
// assume v1 was set on other path
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
}
- return 0
+ return false
case 3: /* set */
if gc.Debug['P'] != 0 {
fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
}
- return 1
+ return true
case 1, /* used, substitute */
4: /* use and set */
if f != 0 {
- if !(gc.Debug['P'] != 0) {
- return 0
+ if gc.Debug['P'] == 0 {
+ return false
}
if t == 4 {
fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
} else {
fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
}
- return 0
+ return false
}
if copyu(p, v2, v1) != 0 {
if gc.Debug['P'] != 0 {
fmt.Printf("; sub fail; return 0\n")
}
- return 0
+ return false
}
if gc.Debug['P'] != 0 {
if gc.Debug['P'] != 0 {
fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
}
- return 1
+ return true
}
}
- if !(f != 0) {
+ if f == 0 {
t = copyu(p, v1, nil)
- if !(f != 0) && (t == 2 || t == 3 || t == 4) {
+ if f == 0 && (t == 2 || t == 3 || t == 4) {
f = 1
if gc.Debug['P'] != 0 {
fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
fmt.Printf("\n")
}
if r.S2 != nil {
- if !(copy1(v1, v2, r.S2, f) != 0) {
- return 0
+ if !copy1(v1, v2, r.S2, f) {
+ return false
}
}
}
- return 1
+ return true
}
// If s==nil, copyu returns the set/use of v in p; otherwise, it
}
// Update only indirect uses of v in p->to
- if !(copyas(&p.To, v) != 0) {
+ if !copyas(&p.To, v) {
if copysub(&p.To, v, s, 1) != 0 {
return 1
}
return 0
}
- if copyas(&p.To, v) != 0 {
+ if copyas(&p.To, v) {
// Fix up implicit from
if p.From.Type == obj.TYPE_NONE {
p.From = p.To
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 4
}
return 3
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 1
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
// p->to only indirectly uses v
return 1
}
ppc64.AMOVWZU,
ppc64.AMOVDU:
if p.From.Type == obj.TYPE_MEM {
- if copyas(&p.From, v) != 0 {
+ if copyas(&p.From, v) {
// No s!=nil check; need to fail
// anyway in that case
return 2
return 0
}
- if copyas(&p.To, v) != 0 {
+ if copyas(&p.To, v) {
return 3
}
} else if p.To.Type == obj.TYPE_MEM {
- if copyas(&p.To, v) != 0 {
+ if copyas(&p.To, v) {
return 2
}
if s != nil {
return 0
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 1
}
} else {
case ppc64.ARLWMI, /* read p->from, read p->reg, rar p->to */
ppc64.ARLWMICC:
- if copyas(&p.To, v) != 0 {
+ if copyas(&p.To, v) {
return 2
}
fallthrough
}
// Update only indirect uses of v in p->to
- if !(copyas(&p.To, v) != 0) {
+ if !copyas(&p.To, v) {
if copysub(&p.To, v, s, 1) != 0 {
return 1
}
return 0
}
- if copyas(&p.To, v) != 0 {
+ if copyas(&p.To, v) {
if p.Reg == 0 {
// Fix up implicit reg (e.g., ADD
// R3,R4 -> ADD R3,R4,R4) so we can
p.Reg = p.To.Reg
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 4
}
- if copyau1(p, v) != 0 {
+ if copyau1(p, v) {
return 4
}
return 3
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 1
}
- if copyau1(p, v) != 0 {
+ if copyau1(p, v) {
return 1
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
return 1
}
return 0
return copysub(&p.To, v, s, 1)
}
- if copyau(&p.From, v) != 0 {
+ if copyau(&p.From, v) {
return 1
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
return 1
}
return 0
return 0
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
return 1
}
return 0
return 0
}
- if copyau(&p.To, v) != 0 {
+ if copyau(&p.To, v) {
return 4
}
return 3
// If a is the from operand, this means this operation reads the
// register in v. If a is the to operand, this means this operation
// writes the register in v.
-func copyas(a *obj.Addr, v *obj.Addr) int {
- if regtyp(v) != 0 {
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+ if regtyp(v) {
if a.Type == v.Type {
if a.Reg == v.Reg {
- return 1
+ return true
}
}
}
- return 0
+ return false
}
// copyau returns 1 if a either directly or indirectly addresses the
// register in v. If a is the to operand, this means the operation
// either reads or writes the register in v (if !copyas(a, v), then
// the operation reads the register in v).
-func copyau(a *obj.Addr, v *obj.Addr) int {
- if copyas(a, v) != 0 {
- return 1
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+ if copyas(a, v) {
+ return true
}
if v.Type == obj.TYPE_REG {
if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
if v.Reg == a.Reg {
- return 1
+ return true
}
}
}
- return 0
+ return false
}
// copyau1 returns 1 if p->reg references the same register as v and v
// is a direct reference.
-func copyau1(p *obj.Prog, v *obj.Addr) int {
- if regtyp(v) != 0 && v.Reg != 0 {
+func copyau1(p *obj.Prog, v *obj.Addr) bool {
+ if regtyp(v) && v.Reg != 0 {
if p.Reg == v.Reg {
- return 1
+ return true
}
}
- return 0
+ return false
}
// copysub replaces v with s in a if f!=0 or indicates it if could if f==0.
// Returns 1 on failure to substitute (it always succeeds on ppc64).
func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr, f int) int {
if f != 0 {
- if copyau(a, v) != 0 {
+ if copyau(a, v) {
a.Reg = s.Reg
}
}
// Returns 1 on failure to substitute (it always succeeds on ppc64).
func copysub1(p1 *obj.Prog, v *obj.Addr, s *obj.Addr, f int) int {
if f != 0 {
- if copyau1(p1, v) != 0 {
+ if copyau1(p1, v) {
p1.Reg = s.Reg
}
}
return 0
}
-func sameaddr(a *obj.Addr, v *obj.Addr) int {
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
if a.Type != v.Type {
- return 0
+ return false
}
- if regtyp(v) != 0 && a.Reg == v.Reg {
- return 1
+ if regtyp(v) && a.Reg == v.Reg {
+ return true
}
if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
if v.Offset == a.Offset {
- return 1
+ return true
}
}
- return 0
+ return false
}
-func smallindir(a *obj.Addr, reg *obj.Addr) int {
- return bool2int(reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096)
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+ return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
}
-func stackaddr(a *obj.Addr) int {
- return bool2int(a.Type == obj.TYPE_REG && a.Reg == ppc64.REGSP)
+func stackaddr(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && a.Reg == ppc64.REGSP
}