}
}
-func bvcmp(bv1 Bvec, bv2 Bvec) int {
+func bveq(bv1 Bvec, bv2 Bvec) bool {
if bv1.n != bv2.n {
Fatalf("bvequal: lengths %d and %d are not equal", bv1.n, bv2.n)
}
for i, x := range bv1.b {
if x != bv2.b[i] {
- return 1
+ return false
}
}
- return 0
+ return true
}
func bvcopy(dst Bvec, src Bvec) {
bvcopy(bb.avarinitany, bb.avarinit)
}
- change := int32(1)
- for change != 0 {
- change = 0
+ for change := true; change; {
+ change = false
for _, bb := range lv.cfg {
bvresetall(any)
bvresetall(all)
bvandnot(all, all, bb.varkill)
bvor(any, any, bb.avarinit)
bvor(all, all, bb.avarinit)
- if bvcmp(any, bb.avarinitany) != 0 {
- change = 1
+ if !bveq(any, bb.avarinitany) {
+ change = true
bvcopy(bb.avarinitany, any)
}
- if bvcmp(all, bb.avarinitall) != 0 {
- change = 1
+ if !bveq(all, bb.avarinitall) {
+ change = true
bvcopy(bb.avarinitall, all)
}
}
// Iterate through the blocks in reverse round-robin fashion. A work
// queue might be slightly faster. As is, the number of iterations is
// so low that it hardly seems to be worth the complexity.
- change = 1
- for change != 0 {
- change = 0
+ for change := true; change; {
+ change = false
// Walk blocks in the general direction of propagation. This
// improves convergence.
bvor(newliveout, newliveout, succ.livein)
}
- if bvcmp(bb.liveout, newliveout) != 0 {
- change = 1
+ if !bveq(bb.liveout, newliveout) {
+ change = true
bvcopy(bb.liveout, newliveout)
}
}
jlocal := lv.livepointers[j]
jarg := lv.argslivepointers[j]
- if bvcmp(local, jlocal) == 0 && bvcmp(arg, jarg) == 0 {
+ if bveq(local, jlocal) && bveq(arg, jarg) {
remap[i] = j
goto Next
}