Passes toolstash/buildall.
Fixes #14692.
Change-Id: I4352678d8251309f2b8b7793674c550fac948006
Reviewed-on: https://go-review.googlesource.com/20350
Reviewed-by: Dave Cheney <dave@cheney.net>
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
type Arch struct {
*obj.LinkArch
// Map of instruction names to enumeration.
- Instructions map[string]int
+ Instructions map[string]obj.As
// Map of register names to enumeration.
Register map[string]int16
// Table of register prefix names. These are things like R for R(0) and SPR for SPR(268).
return 0, false
}
-var Pseudos = map[string]int{
+var Pseudos = map[string]obj.As{
"DATA": obj.ADATA,
"FUNCDATA": obj.AFUNCDATA,
"GLOBL": obj.AGLOBL,
register["PC"] = RPC
// Register prefix not used on this architecture.
- instructions := make(map[string]int)
+ instructions := make(map[string]obj.As)
for i, s := range obj.Anames {
- instructions[s] = i
+ instructions[s] = obj.As(i)
}
for i, s := range x86.Anames {
- if i >= obj.A_ARCHSPECIFIC {
- instructions[s] = i + obj.ABaseAMD64
+ if obj.As(i) >= obj.A_ARCHSPECIFIC {
+ instructions[s] = obj.As(i) + obj.ABaseAMD64
}
}
// Annoying aliases.
"R": true,
}
- instructions := make(map[string]int)
+ instructions := make(map[string]obj.As)
for i, s := range obj.Anames {
- instructions[s] = i
+ instructions[s] = obj.As(i)
}
for i, s := range arm.Anames {
- if i >= obj.A_ARCHSPECIFIC {
- instructions[s] = i + obj.ABaseARM
+ if obj.As(i) >= obj.A_ARCHSPECIFIC {
+ instructions[s] = obj.As(i) + obj.ABaseARM
}
}
// Annoying aliases.
"V": true,
}
- instructions := make(map[string]int)
+ instructions := make(map[string]obj.As)
for i, s := range obj.Anames {
- instructions[s] = i
+ instructions[s] = obj.As(i)
}
for i, s := range arm64.Anames {
- if i >= obj.A_ARCHSPECIFIC {
- instructions[s] = i + obj.ABaseARM64
+ if obj.As(i) >= obj.A_ARCHSPECIFIC {
+ instructions[s] = obj.As(i) + obj.ABaseARM64
}
}
// Annoying aliases.
"SPR": true,
}
- instructions := make(map[string]int)
+ instructions := make(map[string]obj.As)
for i, s := range obj.Anames {
- instructions[s] = i
+ instructions[s] = obj.As(i)
}
for i, s := range ppc64.Anames {
- if i >= obj.A_ARCHSPECIFIC {
- instructions[s] = i + obj.ABasePPC64
+ if obj.As(i) >= obj.A_ARCHSPECIFIC {
+ instructions[s] = obj.As(i) + obj.ABasePPC64
}
}
// Annoying aliases.
"R": true,
}
- instructions := make(map[string]int)
+ instructions := make(map[string]obj.As)
for i, s := range obj.Anames {
- instructions[s] = i
+ instructions[s] = obj.As(i)
}
for i, s := range mips.Anames {
- if i >= obj.A_ARCHSPECIFIC {
- instructions[s] = i + obj.ABaseMIPS64
+ if obj.As(i) >= obj.A_ARCHSPECIFIC {
+ instructions[s] = obj.As(i) + obj.ABaseMIPS64
}
}
// Annoying alias.
// IsARMCMP reports whether the op (as defined by an arm.A* constant) is
// one of the comparison instructions that require special handling.
-func IsARMCMP(op int) bool {
+func IsARMCMP(op obj.As) bool {
switch op {
case arm.ACMN, arm.ACMP, arm.ATEQ, arm.ATST:
return true
// IsARMSTREX reports whether the op (as defined by an arm.A* constant) is
// one of the STREX-like instructions that require special handling.
-func IsARMSTREX(op int) bool {
+func IsARMSTREX(op obj.As) bool {
switch op {
case arm.ASTREX, arm.ASTREXD, arm.ASWPW, arm.ASWPBU:
return true
// IsARMMRC reports whether the op (as defined by an arm.A* constant) is
// MRC or MCR
-func IsARMMRC(op int) bool {
+func IsARMMRC(op obj.As) bool {
switch op {
case arm.AMRC, aMCR: // Note: aMCR is defined in this package.
return true
}
// IsARMFloatCmp reports whether the op is a floating comparison instruction.
-func IsARMFloatCmp(op int) bool {
+func IsARMFloatCmp(op obj.As) bool {
switch op {
case arm.ACMPF, arm.ACMPD:
return true
// The difference between MRC and MCR is represented by a bit high in the word, not
// in the usual way by the opcode itself. Asm must use AMRC for both instructions, so
// we return the opcode for MRC so that asm doesn't need to import obj/arm.
-func ARMMRCOffset(op int, cond string, x0, x1, x2, x3, x4, x5 int64) (offset int64, op0 int16, ok bool) {
+func ARMMRCOffset(op obj.As, cond string, x0, x1, x2, x3, x4, x5 int64) (offset int64, op0 obj.As, ok bool) {
op1 := int64(0)
if op == arm.AMRC {
op1 = 1
// IsARMMULA reports whether the op (as defined by an arm.A* constant) is
// MULA, MULAWT or MULAWB, the 4-operand instructions.
-func IsARMMULA(op int) bool {
+func IsARMMULA(op obj.As) bool {
switch op {
case arm.AMULA, arm.AMULAWB, arm.AMULAWT:
return true
return false
}
-var bcode = []int{
+var bcode = []obj.As{
arm.ABEQ,
arm.ABNE,
arm.ABCS,
}
/* hack to make B.NE etc. work: turn it into the corresponding conditional */
if prog.As == arm.AB {
- prog.As = int16(bcode[(bits^arm.C_SCOND_XOR)&0xf])
+ prog.As = bcode[(bits^arm.C_SCOND_XOR)&0xf]
bits = (bits &^ 0xf) | arm.C_SCOND_NONE
}
prog.Scond = bits
// IsARM64CMP reports whether the op (as defined by an arm.A* constant) is
// one of the comparison instructions that require special handling.
-func IsARM64CMP(op int) bool {
+func IsARM64CMP(op obj.As) bool {
switch op {
case arm64.ACMN, arm64.ACMP, arm64.ATST,
arm64.ACMNW, arm64.ACMPW, arm64.ATSTW:
// IsARM64STLXR reports whether the op (as defined by an arm64.A*
// constant) is one of the STLXR-like instructions that require special
// handling.
-func IsARM64STLXR(op int) bool {
+func IsARM64STLXR(op obj.As) bool {
switch op {
case arm64.ASTLXRB, arm64.ASTLXRH, arm64.ASTLXRW, arm64.ASTLXR:
return true
package arch
-import "cmd/internal/obj/mips"
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/mips"
+)
func jumpMIPS64(word string) bool {
switch word {
// IsMIPS64CMP reports whether the op (as defined by an mips.A* constant) is
// one of the CMP instructions that require special handling.
-func IsMIPS64CMP(op int) bool {
+func IsMIPS64CMP(op obj.As) bool {
switch op {
case mips.ACMPEQF, mips.ACMPEQD, mips.ACMPGEF, mips.ACMPGED,
mips.ACMPGTF, mips.ACMPGTD:
// IsMIPS64MUL reports whether the op (as defined by an mips.A* constant) is
// one of the MUL/DIV/REM instructions that require special handling.
-func IsMIPS64MUL(op int) bool {
+func IsMIPS64MUL(op obj.As) bool {
switch op {
case mips.AMUL, mips.AMULU, mips.AMULV, mips.AMULVU,
mips.ADIV, mips.ADIVU, mips.ADIVV, mips.ADIVVU,
package arch
-import "cmd/internal/obj/ppc64"
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+)
func jumpPPC64(word string) bool {
switch word {
// IsPPC64RLD reports whether the op (as defined by an ppc64.A* constant) is
// one of the RLD-like instructions that require special handling.
// The FMADD-like instructions behave similarly.
-func IsPPC64RLD(op int) bool {
+func IsPPC64RLD(op obj.As) bool {
switch op {
case ppc64.ARLDC, ppc64.ARLDCCC, ppc64.ARLDCL, ppc64.ARLDCLCC,
ppc64.ARLDCR, ppc64.ARLDCRCC, ppc64.ARLDMI, ppc64.ARLDMICC,
// IsPPC64CMP reports whether the op (as defined by an ppc64.A* constant) is
// one of the CMP instructions that require special handling.
-func IsPPC64CMP(op int) bool {
+func IsPPC64CMP(op obj.As) bool {
switch op {
case ppc64.ACMP, ppc64.ACMPU, ppc64.ACMPW, ppc64.ACMPWU:
return true
// IsPPC64NEG reports whether the op (as defined by an ppc64.A* constant) is
// one of the NEG-like instructions that require special handling.
-func IsPPC64NEG(op int) bool {
+func IsPPC64NEG(op obj.As) bool {
switch op {
case ppc64.AADDMECC, ppc64.AADDMEVCC, ppc64.AADDMEV, ppc64.AADDME,
ppc64.AADDZECC, ppc64.AADDZEVCC, ppc64.AADDZEV, ppc64.AADDZE,
// JMP R1
// JMP exit
// JMP 3(PC)
-func (p *Parser) asmJump(op int, cond string, a []obj.Addr) {
+func (p *Parser) asmJump(op obj.As, cond string, a []obj.Addr) {
var target *obj.Addr
prog := &obj.Prog{
Ctxt: p.ctxt,
Lineno: p.histLineNum,
- As: int16(op),
+ As: op,
}
switch len(a) {
case 1:
// asmInstruction assembles an instruction.
// MOVW R9, (R10)
-func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
+func (p *Parser) asmInstruction(op obj.As, cond string, a []obj.Addr) {
// fmt.Printf("%s %+v\n", obj.Aconv(op), a)
prog := &obj.Prog{
Ctxt: p.ctxt,
Lineno: p.histLineNum,
- As: int16(op),
+ As: op,
}
switch len(a) {
case 0:
}
// getConstant checks that addr represents a plain constant and returns its value.
-func (p *Parser) getConstant(prog *obj.Prog, op int, addr *obj.Addr) int64 {
+func (p *Parser) getConstant(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 {
if addr.Type != obj.TYPE_MEM || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
p.errorf("%s: expected integer constant; found %s", obj.Aconv(op), obj.Dconv(prog, addr))
}
}
// getImmediate checks that addr represents an immediate constant and returns its value.
-func (p *Parser) getImmediate(prog *obj.Prog, op int, addr *obj.Addr) int64 {
+func (p *Parser) getImmediate(prog *obj.Prog, op obj.As, addr *obj.Addr) int64 {
if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
p.errorf("%s: expected immediate constant; found %s", obj.Aconv(op), obj.Dconv(prog, addr))
}
}
// getRegister checks that addr represents a register and returns its value.
-func (p *Parser) getRegister(prog *obj.Prog, op int, addr *obj.Addr) int16 {
+func (p *Parser) getRegister(prog *obj.Prog, op obj.As, addr *obj.Addr) int16 {
if addr.Type != obj.TYPE_REG || addr.Offset != 0 || addr.Name != 0 || addr.Index != 0 {
p.errorf("%s: expected register; found %s", obj.Aconv(op), obj.Dconv(prog, addr))
}
return true
}
-func (p *Parser) instruction(op int, word, cond string, operands [][]lex.Token) {
+func (p *Parser) instruction(op obj.As, word, cond string, operands [][]lex.Token) {
p.addr = p.addr[0:0]
p.isJump = p.arch.IsJump(word)
for _, op := range operands {
p.asmInstruction(op, cond, p.addr)
}
-func (p *Parser) pseudo(op int, word string, operands [][]lex.Token) {
+func (p *Parser) pseudo(op obj.As, word string, operands [][]lex.Token) {
switch op {
case obj.ATEXT:
p.asmText(word, operands)
var MAXWIDTH int64 = 1 << 50
var (
- addptr int = x86.AADDQ
- movptr int = x86.AMOVQ
- leaptr int = x86.ALEAQ
- cmpptr int = x86.ACMPQ
+ addptr = x86.AADDQ
+ movptr = x86.AMOVQ
+ leaptr = x86.ALEAQ
+ cmpptr = x86.ACMPQ
)
func betypeinit() {
return p
}
-func appendpp(p *obj.Prog, as int, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
q := gc.Ctxt.NewProg()
gc.Clearp(q)
- q.As = int16(as)
+ q.As = as
q.Lineno = p.Lineno
q.From.Type = ftype
q.From.Reg = int16(freg)
p2.Lineno = p.Lineno
p1.Pc = 9999
p2.Pc = 9999
- p.As = int16(cmpptr)
+ p.As = cmpptr
p.To.Type = obj.TYPE_CONST
p.To.Offset = 0
p1.As = x86.AJNE
* generate
* as $c, reg
*/
-func gconreg(as int, c int64, reg int) {
+func gconreg(as obj.As, c int64, reg int) {
var nr gc.Node
switch as {
* generate
* as $c, n
*/
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
var n1 gc.Node
switch as {
return gc.Gbranch(optoas(op, t), nil, likely)
}
-func ginsboolval(a int, n *gc.Node) {
+func ginsboolval(a obj.As, n *gc.Node) {
gins(jmptoset(a), nil, n)
}
}
// cannot have two memory operands
- var a int
+ var a obj.As
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
* generate one instruction:
* as f, t
*/
-func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
// Node nod;
// if(f != N && f->op == OINDEX) {
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
}
// jmptoset returns ASETxx for AJxx.
-func jmptoset(jmp int) int {
+func jmptoset(jmp obj.As) obj.As {
switch jmp {
case x86.AJEQ:
return x86.ASETEQ
* after successful sudoaddable,
* to release the register used for a.
*/
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
if n.Type == nil {
return false
}
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
- var op int
+ var op obj.As
switch align {
default:
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
return p
}
-func appendpp(p *obj.Prog, as int, ftype obj.AddrType, freg int, foffset int32, ttype obj.AddrType, treg int, toffset int32) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int32, ttype obj.AddrType, treg int, toffset int32) *obj.Prog {
q := gc.Ctxt.NewProg()
gc.Clearp(q)
- q.As = int16(as)
+ q.As = as
q.Lineno = p.Lineno
q.From.Type = ftype
q.From.Reg = int16(freg)
* generate
* as $c, n
*/
-func ginscon(as int, c int64, n *gc.Node) {
+func ginscon(as obj.As, c int64, n *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
var n2 gc.Node
// cannot have two memory operands;
// except 64-bit, which always copies via registers anyway.
- var a int
+ var a obj.As
var r1 gc.Node
if !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
* generate one instruction:
* as f, t
*/
-func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
// Node nod;
// int32 v;
/* generate a constant shift
* arm encodes a shift by 32 as 0, thus asking for 0 shift is illegal.
*/
-func gshift(as int, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog {
+func gshift(as obj.As, lhs *gc.Node, stype int32, sval int32, rhs *gc.Node) *obj.Prog {
if sval <= 0 || sval > 32 {
gc.Fatalf("bad shift value: %d", sval)
}
/* generate a register shift
*/
-func gregshift(as int, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog {
+func gregshift(as obj.As, lhs *gc.Node, stype int32, reg *gc.Node, rhs *gc.Node) *obj.Prog {
p := gins(as, nil, rhs)
p.From.Type = obj.TYPE_SHIFT
p.From.Offset = int64(stype) | (int64(reg.Reg)&15)<<8 | 1<<4 | int64(lhs.Reg)&15
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
* after successful sudoaddable,
* to release the register used for a.
*/
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
if n.Type == nil {
return false
}
}
if gc.Debug['P'] != 0 {
- fmt.Printf(" => %v\n", obj.Aconv(int(p.As)))
+ fmt.Printf(" => %v\n", obj.Aconv(p.As))
}
return true
}
func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
switch p.As {
default:
- fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+ fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As))
return 2
case arm.AMOVM:
}
var predinfo = []struct {
- opcode int
- notopcode int
+ opcode obj.As
+ notopcode obj.As
scond int
notscond int
}{
excise(r)
} else {
if cond == Truecond {
- r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].opcode)
+ r.Prog.As = predinfo[rstart.Prog.As-arm.ABEQ].opcode
} else {
- r.Prog.As = int16(predinfo[rstart.Prog.As-arm.ABEQ].notopcode)
+ r.Prog.As = predinfo[rstart.Prog.As-arm.ABEQ].notopcode
}
}
} else if predicable(r.Prog) {
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
- var op int
+ var op obj.As
switch align {
default:
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
return p
}
-func appendpp(p *obj.Prog, as int, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
q := gc.Ctxt.NewProg()
gc.Clearp(q)
- q.As = int16(as)
+ q.As = as
q.Lineno = p.Lineno
q.From.Type = ftype
q.From.Reg = int16(freg)
* generate
* as $c, n
*/
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
* generate
* as n, $c (CMP)
*/
-func ginscon2(as int, n2 *gc.Node, c int64) {
+func ginscon2(as obj.As, n2 *gc.Node, c int64) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
// cannot have two memory operands
var r1 gc.Node
- var a int
+ var a obj.As
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
// gins is called by the front end.
// It synthesizes some multiple-instruction sequences
// so the front end can stay simpler.
-func gins(as int, f, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f, t *gc.Node) *obj.Prog {
if as >= obj.A_ARCHSPECIFIC {
if x, ok := f.IntLiteral(); ok {
ginscon(as, x, t)
* generate one instruction:
* as f, t
*/
-func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
// TODO(austin): Add self-move test like in 6g (but be careful
// of truncation moves)
}
}
-func gcmp(as int, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
+func gcmp(as obj.As, lhs *gc.Node, rhs *gc.Node) *obj.Prog {
if lhs.Op != gc.OREGISTER {
gc.Fatalf("bad operands to gcmp: %v %v", gc.Oconv(lhs.Op, 0), gc.Oconv(rhs.Op, 0))
}
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
OSQRT_ = uint32(gc.OSQRT) << 16
)
- a := int(obj.AXXX)
+ a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(op, 0), t)
* after successful sudoaddable,
* to release the register used for a.
*/
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
// TODO(minux)
*a = obj.Addr{}
continue
}
if gc.Debug['P'] != 0 {
- fmt.Printf("encoding $%d directly into %v in:\n%v\n%v\n", p.From.Offset, obj.Aconv(int(p1.As)), p, p1)
+ fmt.Printf("encoding $%d directly into %v in:\n%v\n%v\n", p.From.Offset, obj.Aconv(p1.As), p, p1)
}
p1.From.Type = obj.TYPE_CONST
p1.From = p.From
switch p.As {
default:
- fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+ fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As))
return 2
case obj.ANOP, /* read p->from, write p->to */
}
}
- var a int
+ var a obj.As
switch n.Op {
default:
Dump("cgen", n)
regalloc = func(n *Node, t *Type, reuse *Node) {
Tempname(n, t)
}
- ginscon = func(as int, c int64, n *Node) {
+ ginscon = func(as obj.As, c int64, n *Node) {
var n1 Node
Regalloc(&n1, n.Type, n)
Thearch.Gmove(n, &n1)
Thearch.Gmove(&n1, n)
Regfree(&n1)
}
- gins = func(as int, f, t *Node) *obj.Prog {
+ gins = func(as obj.As, f, t *Node) *obj.Prog {
var n1 Node
Regalloc(&n1, t.Type, t)
Thearch.Gmove(t, &n1)
Excise func(*Flow)
Expandchecks func(*obj.Prog)
Getg func(*Node)
- Gins func(int, *Node, *Node) *obj.Prog
+ Gins func(obj.As, *Node, *Node) *obj.Prog
// Ginscmp generates code comparing n1 to n2 and jumping away if op is satisfied.
// The returned prog should be Patch'ed with the jump target.
// corresponding to the desired value.
// The second argument is the destination.
// If not present, Ginsboolval will be emulated with jumps.
- Ginsboolval func(int, *Node)
+ Ginsboolval func(obj.As, *Node)
- Ginscon func(int, int64, *Node)
+ Ginscon func(obj.As, int64, *Node)
Ginsnop func()
Gmove func(*Node, *Node)
Igenindex func(*Node, *Node, bool) *obj.Prog
Smallindir func(*obj.Addr, *obj.Addr) bool
Stackaddr func(*obj.Addr) bool
Blockcopy func(*Node, *Node, int64, int64, int64)
- Sudoaddable func(int, *Node, *obj.Addr) bool
+ Sudoaddable func(obj.As, *Node, *obj.Addr) bool
Sudoclean func()
Excludedregs func() uint64
RtoB func(int) uint64
FtoB func(int) uint64
BtoR func(uint64) int
BtoF func(uint64) int
- Optoas func(Op, *Type) int
+ Optoas func(Op, *Type) obj.As
Doregbits func(int) uint64
Regnames func(*int) []string
Use387 bool // should 8g use 387 FP instructions instead of sse2.
return true
}
-func Gbranch(as int, t *Type, likely int) *obj.Prog {
+func Gbranch(as obj.As, t *Type, likely int) *obj.Prog {
p := Prog(as)
p.To.Type = obj.TYPE_BRANCH
p.To.Val = nil
return p
}
-func Prog(as int) *obj.Prog {
+func Prog(as obj.As) *obj.Prog {
var p *obj.Prog
if as == obj.ADATA || as == obj.AGLOBL {
}
}
- p.As = int16(as)
+ p.As = as
p.Lineno = lineno
return p
}
// that its argument is certainly dead, for use when the liveness analysis
// would not otherwise be able to deduce that fact.
-func gvardefx(n *Node, as int) {
+func gvardefx(n *Node, as obj.As) {
if n == nil {
Fatalf("gvardef nil")
}
}
// Construct a disembodied instruction.
-func unlinkedprog(as int) *obj.Prog {
+func unlinkedprog(as obj.As) *obj.Prog {
p := Ctxt.NewProg()
Clearp(p)
- p.As = int16(as)
+ p.As = as
return p
}
else if(a->sym == nil)
a->type = TYPE_CONST;
*/
- p1.As = int16(Thearch.Optoas(OAS, Types[uint8(v.etype)]))
+ p1.As = Thearch.Optoas(OAS, Types[uint8(v.etype)])
// TODO(rsc): Remove special case here.
if (Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL {
- p1.As = int16(Thearch.Optoas(OAS, Types[TUINT8]))
+ p1.As = Thearch.Optoas(OAS, Types[TUINT8])
}
p1.From.Type = obj.TYPE_REG
p1.From.Reg = int16(rn)
// dest := dest(To) op src(From)
// and also returns the created obj.Prog so it
// may be further adjusted (offset, scale, etc).
-func opregreg(op int, dest, src int16) *obj.Prog {
+func opregreg(op obj.As, dest, src int16) *obj.Prog {
p := Prog(op)
p.From.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Type = obj.TYPE_REG
p.To.Reg = r
default:
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64ADDQ:
asm = x86.ALEAQ
a := regnum(v.Args[0])
if r == a {
if v.AuxInt2Int64() == 1 {
- var asm int
+ var asm obj.As
switch v.Op {
// Software optimization manual recommends add $1,reg.
// But inc/dec is 1 byte smaller. ICC always uses inc
p.To.Reg = r
return
} else if v.AuxInt2Int64() == -1 {
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64ADDQconst:
asm = x86.ADECQ
return
}
}
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64ADDQconst:
asm = x86.ALEAQ
p.To.Type = obj.TYPE_REG
p.To.Reg = r
} else if x == r && v.AuxInt2Int64() == -1 {
- var asm int
+ var asm obj.As
// x = x - (-1) is the same as x++
// See OpAMD64ADDQconst comments about inc vs add $1,reg
switch v.Op {
p.To.Type = obj.TYPE_REG
p.To.Reg = r
} else if x == r && v.AuxInt2Int64() == 1 {
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64SUBQconst:
asm = x86.ADECQ
p.To.Type = obj.TYPE_REG
p.To.Reg = r
} else {
- var asm int
+ var asm obj.As
switch v.Op {
case ssa.OpAMD64SUBQconst:
asm = x86.ALEAQ
}
// movZero generates a register indirect move with a 0 immediate and keeps track of bytes left and next offset
-func movZero(as int, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) {
+func movZero(as obj.As, width int64, nbytes int64, offset int64, regnum int16) (nleft int64, noff int64) {
p := Prog(as)
// TODO: use zero register on archs that support it.
p.From.Type = obj.TYPE_CONST
}
var blockJump = [...]struct {
- asm, invasm int
+ asm, invasm obj.As
}{
ssa.BlockAMD64EQ: {x86.AJEQ, x86.AJNE},
ssa.BlockAMD64NE: {x86.AJNE, x86.AJEQ},
}
type floatingEQNEJump struct {
- jump, index int
+ jump obj.As
+ index int
}
var eqfJumps = [2][2]floatingEQNEJump{
}
// loadByType returns the load instruction of the given type.
-func loadByType(t ssa.Type) int {
+func loadByType(t ssa.Type) obj.As {
// Avoid partial register write
if !t.IsFloat() && t.Size() <= 2 {
if t.Size() == 1 {
}
// storeByType returns the store instruction of the given type.
-func storeByType(t ssa.Type) int {
+func storeByType(t ssa.Type) obj.As {
width := t.Size()
if t.IsFloat() {
switch width {
}
// moveByType returns the reg->reg move instruction of the given type.
-func moveByType(t ssa.Type) int {
+func moveByType(t ssa.Type) obj.As {
if t.IsFloat() {
// Moving the whole sse2 register is faster
// than moving just the correct low portion of it.
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
- var op int
+ var op obj.As
switch align {
default:
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
return p
}
-func appendpp(p *obj.Prog, as int, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
q := gc.Ctxt.NewProg()
gc.Clearp(q)
- q.As = int16(as)
+ q.As = as
q.Lineno = p.Lineno
q.From.Type = ftype
q.From.Reg = int16(freg)
* generate
* as $c, n
*/
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
// generate branch
// n1, n2 are registers
-func ginsbranch(as int, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+func ginsbranch(as obj.As, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
p := gc.Gbranch(as, t, likely)
gc.Naddr(&p.From, n1)
if n2 != nil {
// cannot have two memory operands
var r2 gc.Node
var r1 gc.Node
- var a int
+ var a obj.As
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
// gins is called by the front end.
// It synthesizes some multiple-instruction sequences
// so the front end can stay simpler.
-func gins(as int, f, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f, t *gc.Node) *obj.Prog {
if as >= obj.A_ARCHSPECIFIC {
if x, ok := f.IntLiteral(); ok {
ginscon(as, x, t)
* as f, r, t
* r must be register, if not nil
*/
-func gins3(as int, f, r, t *gc.Node) *obj.Prog {
+func gins3(as obj.As, f, r, t *gc.Node) *obj.Prog {
p := rawgins(as, f, t)
if r != nil {
p.Reg = r.Reg
* generate one instruction:
* as f, t
*/
-func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
// TODO(austin): Add self-move test like in 6g (but be careful
// of truncation moves)
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
OHMUL_ = uint32(gc.OHMUL) << 16
)
- a := int(obj.AXXX)
+ a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(op, 0), t)
* after successful sudoaddable,
* to release the register used for a.
*/
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
// TODO(minux)
*a = obj.Addr{}
switch p.As {
default:
- fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+ fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As))
return 2
case obj.ANOP, /* read p->from, write p->to */
// for example moving [4]byte must use 4 MOVB not 1 MOVW.
align := int(n.Type.Align)
- var op int
+ var op obj.As
switch align {
default:
gc.Fatalf("sgen: invalid alignment %d for %v", align, n.Type)
return p
}
-func appendpp(p *obj.Prog, as int, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
q := gc.Ctxt.NewProg()
gc.Clearp(q)
- q.As = int16(as)
+ q.As = as
q.Lineno = p.Lineno
q.From.Type = ftype
q.From.Reg = int16(freg)
* generate
* as $c, n
*/
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
* generate
* as n, $c (CMP/CMPU)
*/
-func ginscon2(as int, n2 *gc.Node, c int64) {
+func ginscon2(as obj.As, n2 *gc.Node, c int64) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
// cannot have two memory operands
var r2 gc.Node
var r1 gc.Node
- var a int
+ var a obj.As
if gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
// gins is called by the front end.
// It synthesizes some multiple-instruction sequences
// so the front end can stay simpler.
-func gins(as int, f, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f, t *gc.Node) *obj.Prog {
if as >= obj.A_ARCHSPECIFIC {
if x, ok := f.IntLiteral(); ok {
ginscon(as, x, t)
* generate one instruction:
* as f, t
*/
-func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func rawgins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
// TODO(austin): Add self-move test like in 6g (but be careful
// of truncation moves)
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
OHMUL_ = uint32(gc.OHMUL) << 16
)
- a := int(obj.AXXX)
+ a := obj.AXXX
switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
default:
gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(op, 0), t)
* after successful sudoaddable,
* to release the register used for a.
*/
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
// TODO(minux)
*a = obj.Addr{}
var p *obj.Prog
var r *gc.Flow
- var t int
+ var t obj.As
loop1:
if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
gc.Dumpit("loop1", g.Start, 0)
ppc64.ASUBZE,
ppc64.ASUBZEV,
ppc64.AXOR:
- t = variant2as(int(p1.As), as2variant(int(p1.As))|V_CC)
+ t = variant2as(p1.As, as2variant(p1.As)|V_CC)
}
if gc.Debug['D'] != 0 {
fmt.Printf("cmp %v; %v -> ", p1, p)
}
- p1.As = int16(t)
+ p1.As = t
if gc.Debug['D'] != 0 {
fmt.Printf("%v\n", p1)
}
switch p.As {
default:
- fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+ fmt.Printf("copyu: can't find %v\n", obj.Aconv(p.As))
return 2
case obj.ANOP, /* read p->from, write p->to */
// Perform one-time expansion of instructions in progtable to
// their CC, V, and VCC variants
- for as := range progtable {
+ for i := range progtable {
+ as := obj.As(i)
if progtable[as].Flags == 0 {
continue
}
// Instruction variants table. Initially this contains entries only
// for the "base" form of each instruction. On the first call to
// as2variant or variant2as, we'll add the variants to the table.
-var varianttable = [ppc64.ALAST][4]int{
+var varianttable = [ppc64.ALAST][4]obj.As{
ppc64.AADD: {ppc64.AADD, ppc64.AADDCC, ppc64.AADDV, ppc64.AADDVCC},
ppc64.AADDC: {ppc64.AADDC, ppc64.AADDCCC, ppc64.AADDCV, ppc64.AADDCVCC},
ppc64.AADDE: {ppc64.AADDE, ppc64.AADDECC, ppc64.AADDEV, ppc64.AADDEVCC},
for i := range varianttable {
if varianttable[i][0] == 0 {
// Instruction has no variants
- varianttable[i][0] = i
+ varianttable[i][0] = obj.As(i)
continue
}
// Copy base form to other variants
- if varianttable[i][0] == i {
+ if varianttable[i][0] == obj.As(i) {
for j := range varianttable[i] {
varianttable[varianttable[i][j]] = varianttable[i]
}
}
// as2variant returns the variant (V_*) flags of instruction as.
-func as2variant(as int) int {
+func as2variant(as obj.As) int {
for i := range varianttable[as] {
if varianttable[as][i] == as {
return i
// variant2as returns the instruction as with the given variant (V_*) flags.
// If no such variant exists, this returns 0.
-func variant2as(as int, flags int) int {
+func variant2as(as obj.As, flags int) obj.As {
return varianttable[as][flags]
}
fmt.Fprintln(w)
fmt.Fprintln(w, "package ssa")
- fmt.Fprintln(w, "import \"cmd/internal/obj/x86\"")
+ fmt.Fprintln(w, "import (")
+ fmt.Fprintln(w, "\"cmd/internal/obj\"")
+ fmt.Fprintln(w, "\"cmd/internal/obj/x86\"")
+ fmt.Fprintln(w, ")")
// generate Block* declarations
fmt.Fprintln(w, "const (")
}
fmt.Fprintln(w, "}")
- fmt.Fprintln(w, "func (o Op) Asm() int {return opcodeTable[o].asm}")
+ fmt.Fprintln(w, "func (o Op) Asm() obj.As {return opcodeTable[o].asm}")
// generate op string method
fmt.Fprintln(w, "func (o Op) String() string {return opcodeTable[o].name }")
package ssa
-import "fmt"
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
// An Op encodes the specific operation that a Value performs.
// Opcodes' semantics can be modified by the type and aux fields of the Value.
type opInfo struct {
name string
- asm int
reg regInfo
auxType auxType
argLen int32 // the number of arugments, -1 if variable length
- generic bool // this is a generic (arch-independent) opcode
- rematerializeable bool // this op is rematerializeable
- commutative bool // this operation is commutative (e.g. addition)
+ asm obj.As
+ generic bool // this is a generic (arch-independent) opcode
+ rematerializeable bool // this op is rematerializeable
+ commutative bool // this operation is commutative (e.g. addition)
}
type inputInfo struct {
package ssa
-import "cmd/internal/obj/x86"
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
const (
BlockInvalid BlockKind = iota
},
}
-func (o Op) Asm() int { return opcodeTable[o].asm }
+func (o Op) Asm() obj.As { return opcodeTable[o].asm }
func (o Op) String() string { return opcodeTable[o].name }
return p
}
-func appendpp(p *obj.Prog, as int, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
+func appendpp(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int, foffset int64, ttype obj.AddrType, treg int, toffset int64) *obj.Prog {
q := gc.Ctxt.NewProg()
gc.Clearp(q)
- q.As = int16(as)
+ q.As = as
q.Lineno = p.Lineno
q.From.Type = ftype
q.From.Reg = int16(freg)
}
func cgen_floatsse(n *gc.Node, res *gc.Node) {
- var a int
+ var a obj.As
nl := n.Left
nr := n.Right
/*
* return Axxx for Oxxx on type t.
*/
-func optoas(op gc.Op, t *gc.Type) int {
+func optoas(op gc.Op, t *gc.Type) obj.As {
if t == nil {
gc.Fatalf("optoas: t is nil")
}
return a
}
-func foptoas(op gc.Op, t *gc.Type, flg int) int {
+func foptoas(op gc.Op, t *gc.Type, flg int) obj.As {
a := obj.AXXX
et := gc.Simtype[t.Etype]
* generate
* as $c, reg
*/
-func gconreg(as int, c int64, reg int) {
+func gconreg(as obj.As, c int64, reg int) {
var n1 gc.Node
var n2 gc.Node
* generate
* as $c, n
*/
-func ginscon(as int, c int64, n2 *gc.Node) {
+func ginscon(as obj.As, c int64, n2 *gc.Node) {
var n1 gc.Node
gc.Nodconst(&n1, gc.Types[gc.TINT32], c)
gins(as, &n1, n2)
// cannot have two integer memory operands;
// except 64-bit, which always copies via registers anyway.
var r1 gc.Node
- var a int
+ var a obj.As
if gc.Isint[ft] && gc.Isint[tt] && !gc.Is64(f.Type) && !gc.Is64(t.Type) && gc.Ismem(f) && gc.Ismem(t) {
goto hard
}
func floatmove_387(f *gc.Node, t *gc.Node) {
var r1 gc.Node
- var a int
+ var a obj.As
ft := gc.Simsimtype(f.Type)
tt := gc.Simsimtype(t.Type)
func floatmove_sse(f *gc.Node, t *gc.Node) {
var r1 gc.Node
var cvt *gc.Type
- var a int
+ var a obj.As
ft := gc.Simsimtype(f.Type)
tt := gc.Simsimtype(t.Type)
* generate one instruction:
* as f, t
*/
-func gins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+func gins(as obj.As, f *gc.Node, t *gc.Node) *obj.Prog {
if as == x86.AFMOVF && f != nil && f.Op == gc.OREGISTER && t != nil && t.Op == gc.OREGISTER {
gc.Fatalf("gins MOVF reg, reg")
}
func sudoclean() {
}
-func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+func sudoaddable(as obj.As, n *gc.Node, a *obj.Addr) bool {
*a = obj.Addr{}
return false
}
)
type Optab struct {
- as uint16
+ as obj.As
a1 uint8
a2 int8
a3 uint8
}
if false { /*debug['O']*/
- fmt.Printf("oplook %v %v %v %v\n", obj.Aconv(int(p.As)), DRconv(a1), DRconv(a2), DRconv(a3))
+ fmt.Printf("oplook %v %v %v %v\n", obj.Aconv(p.As), DRconv(a1), DRconv(a2), DRconv(a3))
fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type)
}
return false
}
-func opset(a, b0 uint16) {
+func opset(a, b0 obj.As) {
oprange[a&obj.AMask] = oprange[b0]
}
switch r {
default:
- ctxt.Diag("unknown op in build: %v", obj.Aconv(int(r)))
+ ctxt.Diag("unknown op in build: %v", obj.Aconv(r))
log.Fatalf("bad code")
case AADD:
}
case 1: /* op R,[R],R */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
rf := int(p.From.Reg)
rt := int(p.To.Reg)
case 2: /* movbu $I,[R],R */
aclass(ctxt, &p.From)
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
rt := int(p.To.Reg)
r := int(p.Reg)
o1 |= (uint32(p.To.Reg) & 15) << 12
case 5: /* bra s */
- o1 = opbra(ctxt, p, int(p.As), int(p.Scond))
+ o1 = opbra(ctxt, p, p.As, int(p.Scond))
v := int32(-8)
if p.To.Sym != nil {
case 8: /* sll $c,[R],R -> mov (R<<$c),R */
aclass(ctxt, &p.From)
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
o1 |= (uint32(p.To.Reg) & 15) << 12
case 9: /* sll R,[R],R -> mov (R<<R),R */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
r := int(p.Reg)
if r == 0 {
o1 |= (uint32(p.To.Reg) & 15) << 12
case 10: /* swi [$con] */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
if p.To.Type != obj.TYPE_NONE {
aclass(ctxt, &p.To)
if o1 == 0 {
break
}
- o2 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o2 = oprrr(ctxt, p.As, int(p.Scond))
o2 |= REGTMP & 15
r := int(p.Reg)
if p.As == AMOVW || p.As == AMVN {
}
case 15: /* mul r,[r,]r */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
rf := int(p.From.Reg)
rt := int(p.To.Reg)
o2 = 0
case 17:
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
rf := int(p.From.Reg)
rt := int(p.To.Reg)
rt2 := int(p.To.Offset)
if r == 0 {
r = int(o.param)
}
- o1 = osr(ctxt, int(p.As), int(p.From.Reg), int32(ctxt.Instoffset), r, int(p.Scond))
+ o1 = osr(ctxt, p.As, int(p.From.Reg), int32(ctxt.Instoffset), r, int(p.Scond))
case 21: /* mov/movbu O(R),R -> lr */
aclass(ctxt, &p.From)
if r == 0 {
r = int(o.param)
}
- o1 = ofsr(ctxt, int(p.As), int(p.From.Reg), v, r, int(p.Scond), p)
+ o1 = ofsr(ctxt, p.As, int(p.From.Reg), v, r, int(p.Scond), p)
case 51: /* floating point load */
v := regoff(ctxt, &p.From)
if r == 0 {
r = int(o.param)
}
- o1 = ofsr(ctxt, int(p.As), int(p.To.Reg), v, r, int(p.Scond), p) | 1<<20
+ o1 = ofsr(ctxt, p.As, int(p.To.Reg), v, r, int(p.Scond), p) | 1<<20
case 52: /* floating point store, int32 offset UGLY */
o1 = omvl(ctxt, p, &p.To, REGTMP)
r = int(o.param)
}
o2 = oprrr(ctxt, AADD, int(p.Scond)) | (REGTMP&15)<<12 | (REGTMP&15)<<16 | (uint32(r)&15)<<0
- o3 = ofsr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
+ o3 = ofsr(ctxt, p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
case 53: /* floating point load, int32 offset UGLY */
o1 = omvl(ctxt, p, &p.From, REGTMP)
r = int(o.param)
}
o2 = oprrr(ctxt, AADD, int(p.Scond)) | (REGTMP&15)<<12 | (REGTMP&15)<<16 | (uint32(r)&15)<<0
- o3 = ofsr(ctxt, int(p.As), int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
+ o3 = ofsr(ctxt, p.As, int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
case 54: /* floating point arith */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
rf := int(p.From.Reg)
rt := int(p.To.Reg)
if o1 == 0 {
break
}
- o2 = osr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond))
+ o2 = osr(ctxt, p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond))
if o.flag&LPCREL != 0 {
o3 = o2
o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
if o1 == 0 {
break
}
- o2 = ofsr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
+ o2 = ofsr(ctxt, p.As, int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
if o.flag&LPCREL != 0 {
o3 = o2
o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
if o1 == 0 {
break
}
- o2 = ofsr(ctxt, int(p.As), int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
+ o2 = ofsr(ctxt, p.As, int(p.To.Reg), 0, (REGTMP&15), int(p.Scond), p) | 1<<20
if o.flag&LPCREL != 0 {
o3 = o2
o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP&15 | (REGPC&15)<<16 | (REGTMP&15)<<12
o1 |= (uint32(v) & 0xf0) << 12
case 82: /* fcmp freg,freg, */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.Reg)&15)<<12 | (uint32(p.From.Reg)&15)<<0
o2 = 0x0ef1fa10 // VMRS R15
o2 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
case 83: /* fcmp freg,, */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.From.Reg)&15)<<12 | 1<<16
o2 = 0x0ef1fa10 // VMRS R15
o2 |= ((uint32(p.Scond) & C_SCOND) ^ C_SCOND_XOR) << 28
case 84: /* movfw freg,freg - truncate float-to-fix */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 0
o1 |= (uint32(p.To.Reg) & 15) << 12
case 85: /* movwf freg,freg - fix-to-float */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 0
o1 |= (uint32(p.To.Reg) & 15) << 12
// macro for movfw freg,FTMP; movw FTMP,reg
case 86: /* movfw freg,reg - truncate float-to-fix */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.From.Reg) & 15) << 0
o1 |= (FREGTMP & 15) << 12
o1 |= (uint32(p.From.Reg) & 15) << 12
o1 |= (FREGTMP & 15) << 16
- o2 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o2 = oprrr(ctxt, p.As, int(p.Scond))
o2 |= (FREGTMP & 15) << 0
o2 |= (uint32(p.To.Reg) & 15) << 12
o1 = 0xf7fabcfd
case 97: /* CLZ Rm, Rd */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.To.Reg) & 15) << 12
o1 |= (uint32(p.From.Reg) & 15) << 0
case 98: /* MULW{T,B} Rs, Rm, Rd */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.To.Reg) & 15) << 16
o1 |= (uint32(p.From.Reg) & 15) << 8
o1 |= (uint32(p.Reg) & 15) << 0
case 99: /* MULAW{T,B} Rs, Rm, Rn, Rd */
- o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 = oprrr(ctxt, p.As, int(p.Scond))
o1 |= (uint32(p.To.Reg) & 15) << 12
o1 |= (uint32(p.From.Reg) & 15) << 8
func mov(ctxt *obj.Link, p *obj.Prog) uint32 {
aclass(ctxt, &p.From)
- o1 := oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 := oprrr(ctxt, p.As, int(p.Scond))
o1 |= uint32(p.From.Offset)
rt := int(p.To.Reg)
if p.To.Type == obj.TYPE_NONE {
return o1
}
-func oprrr(ctxt *obj.Link, a int, sc int) uint32 {
+func oprrr(ctxt *obj.Link, a obj.As, sc int) uint32 {
o := ((uint32(sc) & C_SCOND) ^ C_SCOND_XOR) << 28
if sc&C_SBIT != 0 {
o |= 1 << 20
return 0
}
-func opbra(ctxt *obj.Link, p *obj.Prog, a int, sc int) uint32 {
+func opbra(ctxt *obj.Link, p *obj.Prog, a obj.As, sc int) uint32 {
if sc&(C_SBIT|C_PBIT|C_WBIT) != 0 {
ctxt.Diag("%v: .nil/.nil/.W on bra instruction", p)
}
return o
}
-func osr(ctxt *obj.Link, a int, r int, v int32, b int, sc int) uint32 {
+func osr(ctxt *obj.Link, a obj.As, r int, v int32, b int, sc int) uint32 {
o := olr(ctxt, v, b, r, sc) ^ (1 << 20)
if a != AMOVW {
o |= 1 << 22
return olhr(ctxt, int32(i), b, r, sc) ^ (1 << 22)
}
-func ofsr(ctxt *obj.Link, a int, r int, v int32, b int, sc int, p *obj.Prog) uint32 {
+func ofsr(ctxt *obj.Link, a obj.As, r int, v int32, b int, sc int, p *obj.Prog) uint32 {
if sc&C_SBIT != 0 {
ctxt.Diag(".nil on FLDR/FSTR instruction: %v", p)
}
q = p
}
- var o int
var p1 *obj.Prog
var p2 *obj.Prog
var q2 *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
- o = int(p.As)
+ o := p.As
switch o {
case obj.ATEXT:
autosize = int32(p.To.Offset + 4)
s.Text = firstp.Link
}
-func relinv(a int) int {
+func relinv(a obj.As) obj.As {
switch a {
case ABEQ:
return ABNE
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
var q *obj.Prog
var r *obj.Prog
- var a int
var i int
loop:
if p == nil {
return
}
- a = int(p.As)
+ a := p.As
if a == AB {
q = p.Pcond
if q != nil && q.As != obj.ATEXT {
if q == *last || q == nil {
break
}
- a = int(q.As)
+ a = q.As
if a == obj.ANOP {
i--
continue
a = AB
q = ctxt.NewProg()
- q.As = int16(a)
+ q.As = a
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
q.To.Offset = p.Pc
q = obj.Brchain(ctxt, p.Link)
if a != obj.ATEXT {
if q != nil && (q.Mark&FOLL != 0) {
- p.As = int16(relinv(a))
+ p.As = relinv(a)
p.Link = p.Pcond
p.Pcond = q
}
goto loop
}
-var unaryDst = map[int]bool{
+var unaryDst = map[obj.As]bool{
ASWI: true,
AWORD: true,
}
)
type Optab struct {
- as uint16
+ as obj.As
a1 uint8
a2 uint8
a3 uint8
}
if false {
- fmt.Printf("oplook %v %d %d %d\n", obj.Aconv(int(p.As)), a1, a2, a3)
+ fmt.Printf("oplook %v %d %d %d\n", obj.Aconv(p.As), a1, a2, a3)
fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type)
}
func (x ocmp) Less(i, j int) bool {
p1 := &x[i]
p2 := &x[j]
- n := int(p1.as) - int(p2.as)
- if n != 0 {
- return n < 0
+ if p1.as != p2.as {
+ return p1.as < p2.as
}
- n = int(p1.a1) - int(p2.a1)
- if n != 0 {
- return n < 0
+ if p1.a1 != p2.a1 {
+ return p1.a1 < p2.a1
}
- n = int(p1.a2) - int(p2.a2)
- if n != 0 {
- return n < 0
+ if p1.a2 != p2.a2 {
+ return p1.a2 < p2.a2
}
- n = int(p1.a3) - int(p2.a3)
- if n != 0 {
- return n < 0
+ if p1.a3 != p2.a3 {
+ return p1.a3 < p2.a3
}
- n = int(p1.scond) - int(p2.scond)
- if n != 0 {
- return n < 0
+ if p1.scond != p2.scond {
+ return p1.scond < p2.scond
}
return false
}
for n = 0; optab[n].as != obj.AXXX; n++ {
}
sort.Sort(ocmp(optab[:n]))
- var r int
for i := 0; i < n; i++ {
- r = int(optab[i].as)
+ r := optab[i].as
start := i
- for int(optab[i].as) == r {
+ for optab[i].as == r {
i++
}
t := optab[start:i]
break
case 1: /* op Rm,[Rn],Rd; default Rn=Rd -> op Rm<<0,[Rn,]Rd (shifted register) */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
rf := int(p.From.Reg)
rt := int(p.To.Reg)
o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31)
case 2: /* add/sub $(uimm12|uimm24)[,R],R; cmp $(uimm12|uimm24),R */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
rt := int(p.To.Reg)
if p.To.Type == obj.TYPE_NONE {
o1 = oaddi(ctxt, int32(o1), v, r, rt)
case 3: /* op R<<n[,R],R (shifted register) */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
o1 |= uint32(p.From.Offset) /* includes reg, op, etc */
rt := int(p.To.Reg)
o1 |= (uint32(r&31) << 5) | uint32(rt&31)
case 4: /* mov $addcon, R; mov $recon, R; mov $racon, R */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
rt := int(p.To.Reg)
r := int(o.param)
o1 |= ((uint32(v) & 0xFFF) << 10) | (uint32(r&31) << 5) | uint32(rt&31)
case 5: /* b s; bl s */
- o1 = opbra(ctxt, int(p.As))
+ o1 = opbra(ctxt, p.As)
if p.To.Sym == nil {
o1 |= uint32(brdist(ctxt, p, 0, 26, 2))
rel.Type = obj.R_CALLARM64
case 6: /* b ,O(R); bl ,O(R) */
- o1 = opbrr(ctxt, int(p.As))
+ o1 = opbrr(ctxt, p.As)
o1 |= uint32(p.To.Reg&31) << 5
rel := obj.Addrel(ctxt.Cursym)
rel.Type = obj.R_CALLIND
case 7: /* beq s */
- o1 = opbra(ctxt, int(p.As))
+ o1 = opbra(ctxt, p.As)
o1 |= uint32(brdist(ctxt, p, 0, 19, 2) << 5)
}
case 9: /* lsl Rm,[Rn],Rd -> lslv Rm, Rn, Rd */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
r := int(p.Reg)
if r == 0 {
o1 |= (uint32(p.From.Reg&31) << 16) | (uint32(r&31) << 5) | uint32(p.To.Reg&31)
case 10: /* brk/hvc/.../svc [$con] */
- o1 = opimm(ctxt, int(p.As))
+ o1 = opimm(ctxt, p.As)
if p.To.Type != obj.TYPE_NONE {
o1 |= uint32((p.To.Offset & 0xffff) << 5)
}
case 12: /* movT $vcon, reg */
- o1 = omovlit(ctxt, int(p.As), p, &p.From, int(p.To.Reg))
+ o1 = omovlit(ctxt, p.As, p, &p.From, int(p.To.Reg))
case 13: /* addop $vcon, [R], R (64 bit literal); cmp $lcon,R -> addop $lcon,R, ZR */
o1 = omovlit(ctxt, AMOVD, p, &p.From, REGTMP)
r = rt
}
if p.To.Type != obj.TYPE_NONE && (p.To.Reg == REGSP || r == REGSP) {
- o2 = opxrrr(ctxt, int(p.As))
+ o2 = opxrrr(ctxt, p.As)
o2 |= REGTMP & 31 << 16
o2 |= LSL0_64
} else {
- o2 = oprrr(ctxt, int(p.As))
+ o2 = oprrr(ctxt, p.As)
o2 |= REGTMP & 31 << 16 /* shift is 0 */
}
}
case 15: /* mul/mneg/umulh/umull r,[r,]r; madd/msub Rm,Rn,Ra,Rd */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
rf := int(p.From.Reg)
rt := int(p.To.Reg)
o1 |= (uint32(rf&31) << 16) | (uint32(ra&31) << 10) | (uint32(r&31) << 5) | uint32(rt&31)
case 16: /* XremY R[,R],R -> XdivY; XmsubY */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
rf := int(p.From.Reg)
rt := int(p.To.Reg)
o2 |= (uint32(rf&31) << 16) | (uint32(r&31) << 10) | (REGTMP & 31 << 5) | uint32(rt&31)
case 17: /* op Rm,[Rn],Rd; default Rn=ZR */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
rf := int(p.From.Reg)
rt := int(p.To.Reg)
o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31)
case 18: /* csel cond,Rn,Rm,Rd; cinc/cinv/cneg cond,Rn,Rd; cset cond,Rd */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
cond := int(p.From.Reg)
r := int(p.Reg)
cond := int(p.From.Reg)
var rf int
if p.From3.Type == obj.TYPE_REG {
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
rf = int(p.From3.Reg) /* Rm */
} else {
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
rf = int(p.From3.Offset & 0x1F)
}
r = int(o.param)
}
if v < 0 { /* unscaled 9-bit signed */
- o1 = olsr9s(ctxt, int32(opstr9(ctxt, int(p.As))), v, r, int(p.From.Reg))
+ o1 = olsr9s(ctxt, int32(opstr9(ctxt, p.As)), v, r, int(p.From.Reg))
} else {
v = int32(offsetshift(ctxt, int64(v), int(o.a3)))
- o1 = olsr12u(ctxt, int32(opstr12(ctxt, int(p.As))), v, r, int(p.From.Reg))
+ o1 = olsr12u(ctxt, int32(opstr12(ctxt, p.As)), v, r, int(p.From.Reg))
}
case 21: /* movT O(R),R -> ldrT */
r = int(o.param)
}
if v < 0 { /* unscaled 9-bit signed */
- o1 = olsr9s(ctxt, int32(opldr9(ctxt, int(p.As))), v, r, int(p.To.Reg))
+ o1 = olsr9s(ctxt, int32(opldr9(ctxt, p.As)), v, r, int(p.To.Reg))
} else {
v = int32(offsetshift(ctxt, int64(v), int(o.a1)))
//print("offset=%lld v=%ld a1=%d\n", instoffset, v, o->a1);
- o1 = olsr12u(ctxt, int32(opldr12(ctxt, int(p.As))), v, r, int(p.To.Reg))
+ o1 = olsr12u(ctxt, int32(opldr12(ctxt, p.As)), v, r, int(p.To.Reg))
}
case 22: /* movT (R)O!,R; movT O(R)!, R -> ldrT */
if v < -256 || v > 255 {
ctxt.Diag("offset out of range\n%v", p)
}
- o1 = opldrpp(ctxt, int(p.As))
+ o1 = opldrpp(ctxt, p.As)
if o.scond == C_XPOST {
o1 |= 1 << 10
} else {
if v < -256 || v > 255 {
ctxt.Diag("offset out of range\n%v", p)
}
- o1 = LD2STR(opldrpp(ctxt, int(p.As)))
+ o1 = LD2STR(opldrpp(ctxt, p.As))
if o.scond == C_XPOST {
o1 |= 1 << 10
} else {
if s {
ctxt.Diag("illegal SP reference\n%v", p)
}
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
o1 |= (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31)
} else if s {
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
o1 |= (uint32(rf&31) << 5) | uint32(rt&31)
} else {
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
o1 |= (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31)
}
case 25: /* negX Rs, Rd -> subX Rs<<0, ZR, Rd */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
rf := int(p.From.Reg)
rt := int(p.To.Reg)
o1 |= (uint32(rf&31) << 16) | (REGZERO & 31 << 5) | uint32(rt&31)
case 26: /* negX Rm<<s, Rd -> subX Rm<<s, ZR, Rd */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
o1 |= uint32(p.From.Offset) /* includes reg, op, etc */
rt := int(p.To.Reg)
o1 |= (REGZERO & 31 << 5) | uint32(rt&31)
case 27: /* op Rm<<n[,Rn],Rd (extended register) */
- o1 = opxrrr(ctxt, int(p.As))
+ o1 = opxrrr(ctxt, p.As)
if (p.From.Reg-obj.RBaseARM64)®_EXT != 0 {
ctxt.Diag("extended register not implemented\n%v", p)
if r == 0 {
r = int(p.To.Reg)
}
- o2 = oprrr(ctxt, int(p.As))
+ o2 = oprrr(ctxt, p.As)
o2 |= REGTMP & 31 << 16 /* shift is 0 */
o2 |= uint32(r&31) << 5
o2 |= uint32(p.To.Reg & 31)
case 29: /* op Rn, Rd */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
o1 |= uint32(p.From.Reg&31)<<5 | uint32(p.To.Reg&31)
case 30: /* movT R,L(R) -> strT */
- s := movesize(int(o.as))
+ s := movesize(o.as)
if s < 0 {
- ctxt.Diag("unexpected long move, op %v tab %v\n%v", obj.Aconv(int(p.As)), obj.Aconv(int(o.as)), p)
+ ctxt.Diag("unexpected long move, op %v tab %v\n%v", obj.Aconv(p.As), obj.Aconv(o.as), p)
}
v := int32(regoff(ctxt, &p.To))
if v < 0 {
r = int(o.param)
}
o1 = oaddi(ctxt, int32(opirr(ctxt, AADD)), hi, r, REGTMP)
- o2 = olsr12u(ctxt, int32(opstr12(ctxt, int(p.As))), ((v-hi)>>uint(s))&0xFFF, REGTMP, int(p.From.Reg))
+ o2 = olsr12u(ctxt, int32(opstr12(ctxt, p.As)), ((v-hi)>>uint(s))&0xFFF, REGTMP, int(p.From.Reg))
case 31: /* movT L(R), R -> ldrT */
- s := movesize(int(o.as))
+ s := movesize(o.as)
if s < 0 {
- ctxt.Diag("unexpected long move, op %v tab %v\n%v", obj.Aconv(int(p.As)), obj.Aconv(int(o.as)), p)
+ ctxt.Diag("unexpected long move, op %v tab %v\n%v", obj.Aconv(p.As), obj.Aconv(o.as), p)
}
v := int32(regoff(ctxt, &p.From))
if v < 0 {
r = int(o.param)
}
o1 = oaddi(ctxt, int32(opirr(ctxt, AADD)), hi, r, REGTMP)
- o2 = olsr12u(ctxt, int32(opldr12(ctxt, int(p.As))), ((v-hi)>>uint(s))&0xFFF, REGTMP, int(p.To.Reg))
+ o2 = olsr12u(ctxt, int32(opldr12(ctxt, p.As)), ((v-hi)>>uint(s))&0xFFF, REGTMP, int(p.To.Reg))
case 32: /* mov $con, R -> movz/movn */
r := 32
o1 |= uint32((((d >> uint(s*16)) & 0xFFFF) << 5) | int64((uint32(s)&3)<<21) | int64(rt&31))
case 33: /* movk $uimm16 << pos */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
d := p.From.Offset
if (d >> 16) != 0 {
o1 |= uint32(v)
case 38: /* clrex [$imm] */
- o1 = opimm(ctxt, int(p.As))
+ o1 = opimm(ctxt, p.As)
if p.To.Type == obj.TYPE_NONE {
o1 |= 0xF << 8
}
case 39: /* cbz R, rel */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
o1 |= uint32(p.From.Reg & 31)
o1 |= uint32(brdist(ctxt, p, 0, 19, 2) << 5)
case 40: /* tbz */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
v := int32(p.From.Offset)
if v < 0 || v > 63 {
o1 |= uint32(p.Reg)
case 41: /* eret, nop, others with no operands */
- o1 = op0(ctxt, int(p.As))
+ o1 = op0(ctxt, p.As)
case 42: /* bfm R,r,s,R */
- o1 = opbfm(ctxt, int(p.As), int(p.From.Offset), int(p.From3.Offset), int(p.Reg), int(p.To.Reg))
+ o1 = opbfm(ctxt, p.As, int(p.From.Offset), int(p.From3.Offset), int(p.Reg), int(p.To.Reg))
case 43: /* bfm aliases */
r := int(p.From.Offset)
}
case 44: /* extr $b, Rn, Rm, Rd */
- o1 = opextr(ctxt, int(p.As), int32(p.From.Offset), int(p.From3.Reg), int(p.Reg), int(p.To.Reg))
+ o1 = opextr(ctxt, p.As, int32(p.From.Offset), int(p.From3.Reg), int(p.Reg), int(p.To.Reg))
case 45: /* sxt/uxt[bhw] R,R; movT R,R -> sxtT R,R */
rf := int(p.From.Reg)
rt := int(p.To.Reg)
- as := int(p.As)
+ as := p.As
if rf == REGZERO {
as = AMOVWU /* clearer in disassembly */
}
}
case 46: /* cls */
- o1 = opbit(ctxt, int(p.As))
+ o1 = opbit(ctxt, p.As)
o1 |= uint32(p.From.Reg&31) << 5
o1 |= uint32(p.To.Reg & 31)
if r == 0 {
r = int(o.param)
}
- o2 = olsxrr(ctxt, int(p.As), REGTMP, r, int(p.From.Reg))
+ o2 = olsxrr(ctxt, p.As, REGTMP, r, int(p.From.Reg))
case 48: /* movT V(R), R -> ldrT (huge offset) */
o1 = omovlit(ctxt, AMOVW, p, &p.From, REGTMP)
if r == 0 {
r = int(o.param)
}
- o2 = olsxrr(ctxt, int(p.As), REGTMP, r, int(p.To.Reg))
+ o2 = olsxrr(ctxt, p.As, REGTMP, r, int(p.To.Reg))
case 50: /* sys/sysl */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
if (p.From.Offset &^ int64(SYSARG4(0x7, 0xF, 0xF, 0x7))) != 0 {
ctxt.Diag("illegal SYS argument\n%v", p)
}
case 51: /* dmb */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
if p.From.Type == obj.TYPE_CONST {
o1 |= uint32((p.From.Offset & 0xF) << 8)
}
case 52: /* hint */
- o1 = opirr(ctxt, int(p.As))
+ o1 = opirr(ctxt, p.As)
o1 |= uint32((p.From.Offset & 0x7F) << 5)
ctxt.Diag("bitmask immediate not implemented\n%v", p)
case 54: /* floating point arith */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
var rf int
if p.From.Type == obj.TYPE_CONST {
o1 |= (uint32(rf&31) << 16) | (uint32(r&31) << 5) | uint32(rt&31)
case 56: /* floating point compare */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
var rf int
if p.From.Type == obj.TYPE_CONST {
o1 |= uint32(rf&31)<<16 | uint32(rt&31)<<5
case 57: /* floating point conditional compare */
- o1 = oprrr(ctxt, int(p.As))
+ o1 = oprrr(ctxt, p.As)
cond := int(p.From.Reg)
nzcv := int(p.To.Offset)
o1 |= uint32(rf&31)<<16 | uint32(cond)<<12 | uint32(rt&31)<<5 | uint32(nzcv)
case 58: /* ldar/ldxr/ldaxr */
- o1 = opload(ctxt, int(p.As))
+ o1 = opload(ctxt, p.As)
o1 |= 0x1F << 16
o1 |= uint32(p.From.Reg) << 5
o1 |= uint32(p.To.Reg & 31)
case 59: /* stxr/stlxr */
- o1 = opstore(ctxt, int(p.As))
+ o1 = opstore(ctxt, p.As)
if p.RegTo2 != obj.REG_NONE {
o1 |= uint32(p.RegTo2&31) << 16
rel.Sym = p.To.Sym
rel.Add = p.To.Offset
rel.Type = obj.R_ADDRARM64
- o3 = olsr12u(ctxt, int32(opstr12(ctxt, int(p.As))), 0, REGTMP, int(p.From.Reg))
+ o3 = olsr12u(ctxt, int32(opstr12(ctxt, p.As)), 0, REGTMP, int(p.From.Reg))
case 65: /* movT addr,R -> adrp + add + movT (REGTMP), R */
o1 = ADR(1, 0, REGTMP)
rel.Sym = p.From.Sym
rel.Add = p.From.Offset
rel.Type = obj.R_ADDRARM64
- o3 = olsr12u(ctxt, int32(opldr12(ctxt, int(p.As))), 0, REGTMP, int(p.To.Reg))
+ o3 = olsr12u(ctxt, int32(opldr12(ctxt, p.As)), 0, REGTMP, int(p.To.Reg))
case 66: /* ldp O(R)!, (r1, r2); ldp (R)O!, (r1, r2) */
v := int32(p.From.Offset)
* also op Rn -> Rt
* also Rm*Rn op Ra -> Rd
*/
-func oprrr(ctxt *obj.Link, a int) uint32 {
+func oprrr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AADC:
return S64 | 0<<30 | 0<<29 | 0xd0<<21 | 0<<10
* imm -> Rd
* imm op Rn -> Rd
*/
-func opirr(ctxt *obj.Link, a int) uint32 {
+func opirr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
/* op $addcon, Rn, Rd */
case AMOVD, AADD:
return 0
}
-func opbit(ctxt *obj.Link, a int) uint32 {
+func opbit(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case ACLS:
return S64 | OPBIT(5)
/*
* add/subtract extended register
*/
-func opxrrr(ctxt *obj.Link, a int) uint32 {
+func opxrrr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AADD:
return S64 | 0<<30 | 0<<29 | 0x0b<<24 | 0<<22 | 1<<21 | LSL0_64
return 0
}
-func opimm(ctxt *obj.Link, a int) uint32 {
+func opimm(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case ASVC:
return 0xD4<<24 | 0<<21 | 1 /* imm16<<5 */
/*
* pc-relative branches
*/
-func opbra(ctxt *obj.Link, a int) uint32 {
+func opbra(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case ABEQ:
return OPBcc(0x0)
return 0
}
-func opbrr(ctxt *obj.Link, a int) uint32 {
+func opbrr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case ABL:
return OPBLR(1) /* BLR */
return 0
}
-func op0(ctxt *obj.Link, a int) uint32 {
+func op0(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case ADRPS:
return 0x6B<<25 | 5<<21 | 0x1F<<16 | 0x1F<<5
/*
* register offset
*/
-func opload(ctxt *obj.Link, a int) uint32 {
+func opload(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case ALDAR:
return LDSTX(3, 1, 1, 0, 1) | 0x1F<<10
return 0
}
-func opstore(ctxt *obj.Link, a int) uint32 {
+func opstore(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case ASTLR:
return LDSTX(3, 1, 0, 0, 1) | 0x1F<<10
return uint32(o)
}
-func opldr12(ctxt *obj.Link, a int) uint32 {
+func opldr12(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AMOVD:
return LDSTR12U(3, 0, 1) /* imm12<<10 | Rn<<5 | Rt */
return 0
}
-func opstr12(ctxt *obj.Link, a int) uint32 {
+func opstr12(ctxt *obj.Link, a obj.As) uint32 {
return LD2STR(opldr12(ctxt, a))
}
return uint32(o)
}
-func opldr9(ctxt *obj.Link, a int) uint32 {
+func opldr9(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AMOVD:
return LDSTR9S(3, 0, 1) /* simm9<<12 | Rn<<5 | Rt */
return 0
}
-func opstr9(ctxt *obj.Link, a int) uint32 {
+func opstr9(ctxt *obj.Link, a obj.As) uint32 {
return LD2STR(opldr9(ctxt, a))
}
-func opldrpp(ctxt *obj.Link, a int) uint32 {
+func opldrpp(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AMOVD:
return 3<<30 | 7<<27 | 0<<26 | 0<<24 | 1<<22 /* simm9<<12 | Rn<<5 | Rt */
/*
* load/store register (extended register)
*/
-func olsxrr(ctxt *obj.Link, as int, rt int, r1 int, r2 int) uint32 {
+func olsxrr(ctxt *obj.Link, as obj.As, rt int, r1 int, r2 int) uint32 {
ctxt.Diag("need load/store extended register\n%v", ctxt.Curp)
return 0xffffffff
}
/*
* load a a literal value into dr
*/
-func omovlit(ctxt *obj.Link, as int, p *obj.Prog, a *obj.Addr, dr int) uint32 {
+func omovlit(ctxt *obj.Link, as obj.As, p *obj.Prog, a *obj.Addr, dr int) uint32 {
var o1 int32
if p.Pcond == nil { /* not in literal pool */
aclass(ctxt, a)
return uint32(o1)
}
-func opbfm(ctxt *obj.Link, a int, r int, s int, rf int, rt int) uint32 {
+func opbfm(ctxt *obj.Link, a obj.As, r int, s int, rf int, rt int) uint32 {
var c uint32
o := opirr(ctxt, a)
if (o & (1 << 31)) == 0 {
return o
}
-func opextr(ctxt *obj.Link, a int, v int32, rn int, rm int, rt int) uint32 {
+func opextr(ctxt *obj.Link, a obj.As, v int32, rn int, rm int, rt int) uint32 {
var c uint32
o := opirr(ctxt, a)
if (o & (1 << 31)) != 0 {
/*
* size in log2(bytes)
*/
-func movesize(a int) int {
+func movesize(a obj.As) int {
switch a {
case AMOVD:
return 3
"math"
)
-var complements = []int16{
+var complements = []obj.As{
AADD: ASUB,
AADDW: ASUBW,
ASUB: AADD,
s.Text = firstp.Link
}
-func relinv(a int) int {
+func relinv(a obj.As) obj.As {
switch a {
case ABEQ:
return ABNE
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
var q *obj.Prog
var r *obj.Prog
- var a int
var i int
loop:
if p == nil {
return
}
- a = int(p.As)
+ a := p.As
if a == AB {
q = p.Pcond
if q != nil {
if q == *last || q == nil {
break
}
- a = int(q.As)
+ a = q.As
if a == obj.ANOP {
i--
continue
a = AB
q = ctxt.NewProg()
- q.As = int16(a)
+ q.As = a
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
q.To.Offset = p.Pc
q = obj.Brchain(ctxt, p.Link)
if a != obj.ATEXT {
if q != nil && (q.Mark&FOLL != 0) {
- p.As = int16(relinv(a))
+ p.As = relinv(a)
p.Link = p.Pcond
p.Pcond = q
}
q = p
}
- var o int
var q2 *obj.Prog
var retjmp *obj.LSym
for p := cursym.Text; p != nil; p = p.Link {
- o = int(p.As)
+ o := p.As
switch o {
case obj.ATEXT:
cursym.Text = p
p.To.Class = 0
}
-var unaryDst = map[int]bool{
+var unaryDst = map[obj.As]bool{
AWORD: true,
ADWORD: true,
ABL: true,
Pc int64
Lineno int32
Spadj int32
- As int16
+ As As // Assembler opcode.
Reg int16
RegTo2 int16 // 2nd register output operand
Mark uint16 // bitmask of arch-specific items
Regindex uint64 // registers used by addressing mode
}
-// Prog.as opcodes.
-// These are the portable opcodes, common to all architectures.
-// Each architecture defines many more arch-specific opcodes,
-// with values starting at A_ARCHSPECIFIC.
-// Each architecture adds an offset to this so each machine has
-// distinct space for its instructions. The offset is a power of
-// two so it can be masked to return to origin zero.
-// See the definitions of ABase386 etc.
+// An As denotes an assembler opcode.
+// There are some portable opcodes, declared here in package obj,
+// that are common to all architectures.
+// However, the majority of opcodes are arch-specific
+// and are declared in their respective architecture's subpackage.
+type As int16
+
+// These are the portable opcodes.
const (
- AXXX = 0 + iota
+ AXXX As = iota
ACALL
ACHECKNIL
ADATA
A_ARCHSPECIFIC
)
+// Each architecture is allotted a distinct subspace of opcode values
+// for declaring its arch-specific opcodes.
+// Within this subspace, the first arch-specific opcode should be
+// at offset A_ARCHSPECIFIC.
+//
+// Subspaces are aligned to a power of two so opcodes can be masked
+// with AMask and used as compact array indices.
+const (
+ ABase386 = (1 + iota) << 12
+ ABaseARM
+ ABaseAMD64
+ ABasePPC64
+ ABaseARM64
+ ABaseMIPS64
+
+ AMask = 1<<12 - 1 // AND with this to use the opcode as an array index.
+)
+
// An LSym is the sort of symbol that is written to an object file.
type LSym struct {
Name string
Assemble func(*Link, *LSym)
Follow func(*Link, *LSym)
Progedit func(*Link, *Prog)
- UnaryDst map[int]bool // Instruction takes one operand, a destination.
+ UnaryDst map[As]bool // Instruction takes one operand, a destination.
Minlc int
Ptrsize int
Regsize int
)
type Optab struct {
- as int16
+ as obj.As
a1 uint8
a2 uint8
a3 uint8
}
}
- ctxt.Diag("illegal combination %v %v %v %v", obj.Aconv(int(p.As)), DRconv(a1), DRconv(a2), DRconv(a3))
+ ctxt.Diag("illegal combination %v %v %v %v", obj.Aconv(p.As), DRconv(a1), DRconv(a2), DRconv(a3))
prasm(p)
if o == nil {
o = optab
}
return false
}
-func opset(a, b0 int16) {
+func opset(a, b0 obj.As) {
oprange[a&obj.AMask] = oprange[b0]
}
switch r {
default:
- ctxt.Diag("unknown op in build: %v", obj.Aconv(int(r)))
+ ctxt.Diag("unknown op in build: %v", obj.Aconv(r))
log.Fatalf("bad code")
case AABSF:
if r == 0 {
r = int(p.To.Reg)
}
- o1 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
+ o1 = OP_RRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
case 3: /* mov $soreg, r ==> or/add $i,o,r */
v := regoff(ctxt, &p.From)
r = int(p.To.Reg)
}
- o1 = OP_IRR(opirr(ctxt, int(p.As)), uint32(v), uint32(r), uint32(p.To.Reg))
+ o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.To.Reg))
case 5: /* syscall */
- o1 = uint32(oprrr(ctxt, int(p.As)))
+ o1 = uint32(oprrr(ctxt, p.As))
case 6: /* beq r1,[r2],sbra */
v := int32(0)
if (v<<16)>>16 != v {
ctxt.Diag("short branch too far\n%v", p)
}
- o1 = OP_IRR(opirr(ctxt, int(p.As)), uint32(v), uint32(p.From.Reg), uint32(p.Reg))
+ o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(p.From.Reg), uint32(p.Reg))
// for ABFPT and ABFPF only: always fill delay slot with 0
// see comments in func preprocess for details.
o2 = 0
r = int(o.param)
}
v := regoff(ctxt, &p.To)
- o1 = OP_IRR(opirr(ctxt, int(p.As)), uint32(v), uint32(r), uint32(p.From.Reg))
+ o1 = OP_IRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.From.Reg))
case 8: /* mov soreg, r ==> lw o(r) */
r := int(p.From.Reg)
r = int(o.param)
}
v := regoff(ctxt, &p.From)
- o1 = OP_IRR(opirr(ctxt, -int(p.As)), uint32(v), uint32(r), uint32(p.To.Reg))
+ o1 = OP_IRR(opirr(ctxt, -p.As), uint32(v), uint32(r), uint32(p.To.Reg))
case 9: /* sll r1,[r2],r3 */
r := int(p.Reg)
if r == 0 {
r = int(p.To.Reg)
}
- o1 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
+ o1 = OP_RRR(oprrr(ctxt, p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
case 10: /* add $con,[r1],r2 ==> mov $con, t; add t,[r1],r2 */
v := regoff(ctxt, &p.From)
if r == 0 {
r = int(p.To.Reg)
}
- o2 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+ o2 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
case 11: /* jmp lbra */
v := int32(0)
} else {
v = int32(p.Pcond.Pc) >> 2
}
- o1 = OP_JMP(opirr(ctxt, int(p.As)), uint32(v))
+ o1 = OP_JMP(opirr(ctxt, p.As), uint32(v))
if p.To.Sym == nil {
p.To.Sym = ctxt.Cursym.Text.From.Sym
p.To.Offset = p.Pcond.Pc
/* OP_SRR will use only the low 5 bits of the shift value */
if v >= 32 && vshift(p.As) {
- o1 = OP_SRR(opirr(ctxt, -int(p.As)), uint32(v-32), uint32(r), uint32(p.To.Reg))
+ o1 = OP_SRR(opirr(ctxt, -p.As), uint32(v-32), uint32(r), uint32(p.To.Reg))
} else {
- o1 = OP_SRR(opirr(ctxt, int(p.As)), uint32(v), uint32(r), uint32(p.To.Reg))
+ o1 = OP_SRR(opirr(ctxt, p.As), uint32(v), uint32(r), uint32(p.To.Reg))
}
case 18: /* jmp [r1],0(r2) */
if r == 0 {
r = int(o.param)
}
- o1 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(0), uint32(p.To.Reg), uint32(r))
+ o1 = OP_RRR(oprrr(ctxt, p.As), uint32(0), uint32(p.To.Reg), uint32(r))
rel := obj.Addrel(ctxt.Cursym)
rel.Off = int32(ctxt.Pc)
rel.Siz = 0
o1 = OP_RRR(a, uint32(REGZERO), uint32(p.From.Reg), uint32(REGZERO))
case 22: /* mul r1,r2 */
- o1 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(p.From.Reg), uint32(p.Reg), uint32(REGZERO))
+ o1 = OP_RRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(REGZERO))
case 23: /* add $lcon,r1,r2 ==> lu+or+add */
v := regoff(ctxt, &p.From)
if r == 0 {
r = int(p.To.Reg)
}
- o3 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+ o3 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
case 24: /* mov $ucon,r ==> lu r */
v := regoff(ctxt, &p.From)
if r == 0 {
r = int(p.To.Reg)
}
- o2 = OP_RRR(oprrr(ctxt, int(p.As)), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
+ o2 = OP_RRR(oprrr(ctxt, p.As), uint32(REGTMP), uint32(r), uint32(p.To.Reg))
case 26: /* mov $lsext/auto/oreg,r ==> lu+or+add */
v := regoff(ctxt, &p.From)
if r == 0 {
r = int(p.To.Reg)
}
- o1 = OP_FRRR(oprrr(ctxt, int(p.As)), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
+ o1 = OP_FRRR(oprrr(ctxt, p.As), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
case 33: /* fabs fr1, fr3 */
- o1 = OP_FRRR(oprrr(ctxt, int(p.As)), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
+ o1 = OP_FRRR(oprrr(ctxt, p.As), uint32(0), uint32(p.From.Reg), uint32(p.To.Reg))
case 34: /* mov $con,fr ==> or/add $i,t; mov t,fr */
v := regoff(ctxt, &p.From)
o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
o3 = OP_RRR(oprrr(ctxt, AADDVU), uint32(r), uint32(REGTMP), uint32(REGTMP))
- o4 = OP_IRR(opirr(ctxt, int(p.As)), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
+ o4 = OP_IRR(opirr(ctxt, p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
case 36: /* mov lext/auto/oreg,r ==> lw o(r30) */
v := regoff(ctxt, &p.From)
o1 = OP_IRR(opirr(ctxt, ALUI), uint32(v>>16), uint32(REGZERO), uint32(REGTMP))
o2 = OP_IRR(opirr(ctxt, AOR), uint32(v), uint32(REGTMP), uint32(REGTMP))
o3 = OP_RRR(oprrr(ctxt, AADDVU), uint32(r), uint32(REGTMP), uint32(REGTMP))
- o4 = OP_IRR(opirr(ctxt, -int(p.As)), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
+ o4 = OP_IRR(opirr(ctxt, -p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
case 37: /* movw r,mr */
a := SP(2, 0) | (4 << 21) /* mtc0 */
rel.Sym = p.To.Sym
rel.Add = p.To.Offset
rel.Type = obj.R_ADDRMIPS
- o3 = OP_IRR(opirr(ctxt, int(p.As)), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
+ o3 = OP_IRR(opirr(ctxt, p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg))
case 51: /* mov addr,r ==> lu + or + lw (REGTMP) */
o1 = OP_IRR(opirr(ctxt, ALUI), uint32(0), uint32(REGZERO), uint32(REGTMP))
rel.Sym = p.From.Sym
rel.Add = p.From.Offset
rel.Type = obj.R_ADDRMIPS
- o3 = OP_IRR(opirr(ctxt, -int(p.As)), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
+ o3 = OP_IRR(opirr(ctxt, -p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg))
}
out[0] = o1
return int32(vregoff(ctxt, a))
}
-func oprrr(ctxt *obj.Link, a int) uint32 {
+func oprrr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AADD:
return OP(4, 0)
return 0
}
-func opirr(ctxt *obj.Link, a int) uint32 {
+func opirr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AADD:
return SP(1, 0)
return 0
}
-func vshift(a int16) bool {
+func vshift(a obj.As) bool {
switch a {
case ASLLV,
ASRLV,
}
autosize := int32(0)
- var o int
var p1 *obj.Prog
var p2 *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
- o = int(p.As)
+ o := p.As
switch o {
case obj.ATEXT:
autosize = int32(textstksiz + 8)
// instruction scheduling
q = nil // p - 1
q1 = cursym.Text // top of block
- o = 0 // count of instructions
+ o := 0 // count of instructions
for p = cursym.Text; p != nil; p = p1 {
p1 = p.Link
o++
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
var q *obj.Prog
var r *obj.Prog
- var a int
var i int
loop:
if p == nil {
return
}
- a = int(p.As)
+ a := p.As
if a == AJMP {
q = p.Pcond
if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) {
if q == *last || (q.Mark&NOSCHED != 0) {
break
}
- a = int(q.As)
+ a = q.As
if a == obj.ANOP {
i--
continue
a = AJMP
q = ctxt.NewProg()
- q.As = int16(a)
+ q.As = a
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
q.To.Offset = p.Pc
)
type Optab struct {
- as int16
+ as obj.As
a1 uint8
a2 uint8
a3 uint8
}
}
- ctxt.Diag("illegal combination %v %v %v %v %v", obj.Aconv(int(p.As)), DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
+ ctxt.Diag("illegal combination %v %v %v %v %v", obj.Aconv(p.As), DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
prasm(p)
if ops == nil {
ops = optab
}
return false
}
-func opset(a, b0 int16) {
+func opset(a, b0 obj.As) {
oprange[a&obj.AMask] = oprange[b0]
}
switch r {
default:
- ctxt.Diag("unknown op in build: %v", obj.Aconv(int(r)))
+ ctxt.Diag("unknown op in build: %v", obj.Aconv(r))
log.Fatalf("bad code")
case ADCBF: /* unary indexed: op (b+a); op (b) */
return int32(vregoff(ctxt, a))
}
-func oprrr(ctxt *obj.Link, a int16) uint32 {
+func oprrr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AADD:
return OPVCC(31, 266, 0, 0)
return OPVCC(31, 316, 0, 1)
}
- ctxt.Diag("bad r/r opcode %v", obj.Aconv(int(a)))
+ ctxt.Diag("bad r/r opcode %v", obj.Aconv(a))
return 0
}
-func opirr(ctxt *obj.Link, a int16) uint32 {
+func opirr(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AADD:
return OPVCC(14, 0, 0, 0)
return OPVCC(27, 0, 0, 0) /* XORIU */
}
- ctxt.Diag("bad opcode i/r %v", obj.Aconv(int(a)))
+ ctxt.Diag("bad opcode i/r %v", obj.Aconv(a))
return 0
}
/*
* load o(a),d
*/
-func opload(ctxt *obj.Link, a int16) uint32 {
+func opload(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AMOVD:
return OPVCC(58, 0, 0, 0) /* ld */
return OPVCC(46, 0, 0, 0) /* lmw */
}
- ctxt.Diag("bad load opcode %v", obj.Aconv(int(a)))
+ ctxt.Diag("bad load opcode %v", obj.Aconv(a))
return 0
}
/*
* indexed load a(b),d
*/
-func oploadx(ctxt *obj.Link, a int16) uint32 {
+func oploadx(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AMOVWZ:
return OPVCC(31, 23, 0, 0) /* lwzx */
return OPVCC(31, 53, 0, 0) /* ldux */
}
- ctxt.Diag("bad loadx opcode %v", obj.Aconv(int(a)))
+ ctxt.Diag("bad loadx opcode %v", obj.Aconv(a))
return 0
}
/*
* store s,o(d)
*/
-func opstore(ctxt *obj.Link, a int16) uint32 {
+func opstore(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AMOVB, AMOVBZ:
return OPVCC(38, 0, 0, 0) /* stb */
return OPVCC(62, 0, 0, 1) /* stdu */
}
- ctxt.Diag("unknown store opcode %v", obj.Aconv(int(a)))
+ ctxt.Diag("unknown store opcode %v", obj.Aconv(a))
return 0
}
/*
* indexed store s,a(b)
*/
-func opstorex(ctxt *obj.Link, a int16) uint32 {
+func opstorex(ctxt *obj.Link, a obj.As) uint32 {
switch a {
case AMOVB, AMOVBZ:
return OPVCC(31, 215, 0, 0) /* stbx */
return OPVCC(31, 181, 0, 0) /* stdux */
}
- ctxt.Diag("unknown storex opcode %v", obj.Aconv(int(a)))
+ ctxt.Diag("unknown storex opcode %v", obj.Aconv(a))
return 0
}
autosize := int32(0)
var aoffset int
- var mov int
- var o int
+ var mov obj.As
var p1 *obj.Prog
var p2 *obj.Prog
for p := cursym.Text; p != nil; p = p.Link {
- o = int(p.As)
+ o := p.As
switch o {
case obj.ATEXT:
mov = AMOVD
q.To.Reg = REGTMP
q = obj.Appendp(ctxt, q)
- q.As = int16(mov)
+ q.As = mov
q.Lineno = p.Lineno
q.From.Type = obj.TYPE_REG
q.From.Reg = REGTMP
s.Text = firstp.Link
}
-func relinv(a int) int {
+func relinv(a obj.As) obj.As {
switch a {
case ABEQ:
return ABNE
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
var q *obj.Prog
var r *obj.Prog
- var a int
- var b int
+ var b obj.As
var i int
loop:
if p == nil {
return
}
- a = int(p.As)
+ a := p.As
if a == ABR {
q = p.Pcond
if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) {
break
}
b = 0 /* set */
- a = int(q.As)
+ a = q.As
if a == obj.ANOP {
i--
continue
if a == ABR || a == obj.ARET || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
return
}
- r.As = int16(b)
+ r.As = b
r.Pcond = p.Link
r.Link = p.Pcond
if r.Link.Mark&FOLL == 0 {
a = ABR
q = ctxt.NewProg()
- q.As = int16(a)
+ q.As = a
q.Lineno = p.Lineno
q.To.Type = obj.TYPE_BRANCH
q.To.Offset = p.Pc
var buf bytes.Buffer
- fmt.Fprintf(&buf, "%.5d (%v)\t%v%s", p.Pc, p.Line(), Aconv(int(p.As)), sc)
+ fmt.Fprintf(&buf, "%.5d (%v)\t%v%s", p.Pc, p.Line(), Aconv(p.As), sc)
sep := "\t"
if p.From.Type != TYPE_NONE {
fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, &p.From))
return str
}
-/*
- Each architecture defines an instruction (A*) space as a unique
- integer range.
- Global opcodes like CALL start at 0; the architecture-specific ones
- start at a distinct, big-maskable offsets.
- Here is the list of architectures and the base of their opcode spaces.
-*/
-
-const (
- ABase386 = (1 + iota) << 12
- ABaseARM
- ABaseAMD64
- ABasePPC64
- ABaseARM64
- ABaseMIPS64
- AMask = 1<<12 - 1 // AND with this to use the opcode as an array index.
-)
-
type opSet struct {
- lo int
+ lo As
names []string
}
// RegisterOpcode binds a list of instruction names
// to a given instruction number range.
-func RegisterOpcode(lo int, Anames []string) {
+func RegisterOpcode(lo As, Anames []string) {
aSpace = append(aSpace, opSet{lo, Anames})
}
-func Aconv(a int) string {
- if 0 <= a && a < len(Anames) {
+func Aconv(a As) string {
+ if 0 <= a && int(a) < len(Anames) {
return Anames[a]
}
for i := range aSpace {
as := &aSpace[i]
- if as.lo <= a && a < as.lo+len(as.names) {
+ if as.lo <= a && int(a-as.lo) < len(as.names) {
return as.names[a-as.lo]
}
}
)
type Optab struct {
- as int16
+ as obj.As
ytab []ytab
prefix uint8
op [23]uint8
}
type Movtab struct {
- as int16
+ as obj.As
ft uint8
f3t uint8
tt uint8
return c + pad
}
-func spadjop(ctxt *obj.Link, p *obj.Prog, l int, q int) int {
+func spadjop(ctxt *obj.Link, p *obj.Prog, l, q obj.As) obj.As {
if p.Mode != 64 || ctxt.Arch.Ptrsize == 4 {
return l
}
p.To.Reg = REG_SP
v = int32(-p.From.Offset)
p.From.Offset = int64(v)
- p.As = int16(spadjop(ctxt, p, AADDL, AADDQ))
+ p.As = spadjop(ctxt, p, AADDL, AADDQ)
if v < 0 {
- p.As = int16(spadjop(ctxt, p, ASUBL, ASUBQ))
+ p.As = spadjop(ctxt, p, ASUBL, ASUBQ)
v = -v
p.From.Offset = int64(v)
}
p.To.Reg = REG_SP
v = int32(-p.From.Offset)
p.From.Offset = int64(v)
- p.As = int16(spadjop(ctxt, p, AADDL, AADDQ))
+ p.As = spadjop(ctxt, p, AADDL, AADDQ)
if v < 0 {
- p.As = int16(spadjop(ctxt, p, ASUBL, ASUBQ))
+ p.As = spadjop(ctxt, p, ASUBL, ASUBQ)
v = -v
p.From.Offset = int64(v)
}
}
func instinit() {
- var c int
-
for i := 1; optab[i].as != 0; i++ {
- c = int(optab[i].as)
+ c := optab[i].as
if opindex[c&obj.AMask] != nil {
log.Fatalf("phase error in optab: %d (%v)", i, obj.Aconv(c))
}
// Rewrite p, if necessary, to access global data via the global offset table.
func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) {
- var add, lea, mov, reg int16
+ var add, lea, mov obj.As
+ var reg int16
if p.Mode == 64 {
add = AADDQ
lea = ALEAQ
// CMPQ SP, stackguard
p = obj.Appendp(ctxt, p)
- p.As = int16(cmp)
+ p.As = cmp
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_SP
indir_cx(ctxt, p, &p.To)
// CMPQ AX, stackguard
p = obj.Appendp(ctxt, p)
- p.As = int16(lea)
+ p.As = lea
p.From.Type = obj.TYPE_MEM
p.From.Reg = REG_SP
p.From.Offset = -(int64(framesize) - obj.StackSmall)
p.To.Reg = REG_AX
p = obj.Appendp(ctxt, p)
- p.As = int16(cmp)
+ p.As = cmp
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_AX
indir_cx(ctxt, p, &p.To)
p = obj.Appendp(ctxt, p)
- p.As = int16(mov)
+ p.As = mov
indir_cx(ctxt, p, &p.From)
p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
if ctxt.Cursym.Cfunc != 0 {
p.To.Reg = REG_SI
p = obj.Appendp(ctxt, p)
- p.As = int16(cmp)
+ p.As = cmp
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_SI
p.To.Type = obj.TYPE_CONST
q1 = p
p = obj.Appendp(ctxt, p)
- p.As = int16(lea)
+ p.As = lea
p.From.Type = obj.TYPE_MEM
p.From.Reg = REG_SP
p.From.Offset = obj.StackGuard
p.To.Reg = REG_AX
p = obj.Appendp(ctxt, p)
- p.As = int16(sub)
+ p.As = sub
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_SI
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_AX
p = obj.Appendp(ctxt, p)
- p.As = int16(cmp)
+ p.As = cmp
p.From.Type = obj.TYPE_REG
p.From.Reg = REG_AX
p.To.Type = obj.TYPE_CONST
s.Text = firstp.Link
}
-func nofollow(a int) bool {
+func nofollow(a obj.As) bool {
switch a {
case obj.AJMP,
obj.ARET,
return false
}
-func pushpop(a int) bool {
+func pushpop(a obj.As) bool {
switch a {
case APUSHL,
APUSHFL,
return false
}
-func relinv(a int16) int16 {
+func relinv(a obj.As) obj.As {
switch a {
case AJEQ:
return AJNE
return AJOS
}
- log.Fatalf("unknown relation: %s", obj.Aconv(int(a)))
+ log.Fatalf("unknown relation: %s", obj.Aconv(a))
return 0
}
func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
var q *obj.Prog
var i int
- var a int
+ var a obj.As
loop:
if p == nil {
if q == *last {
break
}
- a = int(q.As)
+ a = q.As
if a == obj.ANOP {
i--
continue
q.Mark |= DONE
(*last).Link = q
*last = q
- if int(q.As) != a || q.Pcond == nil || q.Pcond.Mark&DONE != 0 {
+ if q.As != a || q.Pcond == nil || q.Pcond.Mark&DONE != 0 {
continue
}
(*last).Link = p
*last = p
- a = int(p.As)
+ a = p.As
/* continue loop with what comes after p */
if nofollow(a) {
* expect conditional jump to be taken.
* rewrite so that's the fall-through case.
*/
- p.As = relinv(int16(a))
+ p.As = relinv(a)
q = p.Link
p.Link = p.Pcond
q = p.Link
if q.Mark&DONE != 0 {
if a != ALOOP {
- p.As = relinv(int16(a))
+ p.As = relinv(a)
p.Link = p.Pcond
p.Pcond = q
}
goto loop
}
-var unaryDst = map[int]bool{
+var unaryDst = map[obj.As]bool{
ABSWAPL: true,
ABSWAPQ: true,
ACMPXCHG8B: true,