"go/doc",
"go/build",
"cmd/internal/obj",
+ "cmd/internal/obj/arm",
+ "cmd/internal/obj/i386",
+ "cmd/internal/obj/ppc64",
"cmd/internal/obj/x86",
"cmd/objwriter",
"cmd/go",
// packages supporting the commands.
var bootstrapDirs = []string{
"internal/obj",
+ "internal/obj/arm",
+ "internal/obj/i386",
+ "internal/obj/ppc64",
"internal/obj/x86",
"objwriter",
}
--- /dev/null
+// Inferno utils/5c/5.out.h
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/5.out.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package arm
+
+// list[5689].c
+
+// obj.c
+
+// objfile.c
+
+// pass.c
+
+// pcln.c
+
+// sym.c
+
+// TODO(ality): remove this workaround.
+// It's here because Pconv in liblink/list?.c references %L.
+
+const (
+ NSNAME = 8
+ NSYM = 50
+ NREG = 16
+)
+
+/* -1 disables use of REGARG */
+const (
+ REGARG = -1
+)
+
+const (
+ REGRET = 0
+ REGEXT = 10
+ REGG = REGEXT - 0
+ REGM = REGEXT - 1
+ REGTMP = 11
+ REGSP = 13
+ REGLINK = 14
+ REGPC = 15
+ NFREG = 16
+ FREGRET = 0
+ FREGEXT = 7
+ FREGTMP = 15
+)
+
+/* compiler allocates register variables F0 up */
+/* compiler allocates external registers F7 down */
+const (
+ C_NONE = iota
+ C_REG
+ C_REGREG
+ C_REGREG2
+ C_SHIFT
+ C_FREG
+ C_PSR
+ C_FCR
+ C_RCON
+ C_NCON
+ C_SCON
+ C_LCON
+ C_LCONADDR
+ C_ZFCON
+ C_SFCON
+ C_LFCON
+ C_RACON
+ C_LACON
+ C_SBRA
+ C_LBRA
+ C_HAUTO
+ C_FAUTO
+ C_HFAUTO
+ C_SAUTO
+ C_LAUTO
+ C_HOREG
+ C_FOREG
+ C_HFOREG
+ C_SOREG
+ C_ROREG
+ C_SROREG
+ C_LOREG
+ C_PC
+ C_SP
+ C_HREG
+ C_ADDR
+ C_GOK
+ C_NCLASS
+)
+
+const (
+ AXXX = iota
+ AAND
+ AEOR
+ ASUB
+ ARSB
+ AADD
+ AADC
+ ASBC
+ ARSC
+ ATST
+ ATEQ
+ ACMP
+ ACMN
+ AORR
+ ABIC
+ AMVN
+ AB
+ ABL
+ ABEQ
+ ABNE
+ ABCS
+ ABHS
+ ABCC
+ ABLO
+ ABMI
+ ABPL
+ ABVS
+ ABVC
+ ABHI
+ ABLS
+ ABGE
+ ABLT
+ ABGT
+ ABLE
+ AMOVWD
+ AMOVWF
+ AMOVDW
+ AMOVFW
+ AMOVFD
+ AMOVDF
+ AMOVF
+ AMOVD
+ ACMPF
+ ACMPD
+ AADDF
+ AADDD
+ ASUBF
+ ASUBD
+ AMULF
+ AMULD
+ ADIVF
+ ADIVD
+ ASQRTF
+ ASQRTD
+ AABSF
+ AABSD
+ ASRL
+ ASRA
+ ASLL
+ AMULU
+ ADIVU
+ AMUL
+ ADIV
+ AMOD
+ AMODU
+ AMOVB
+ AMOVBS
+ AMOVBU
+ AMOVH
+ AMOVHS
+ AMOVHU
+ AMOVW
+ AMOVM
+ ASWPBU
+ ASWPW
+ ANOP
+ ARFE
+ ASWI
+ AMULA
+ ADATA
+ AGLOBL
+ AGOK
+ AHISTORY
+ ANAME
+ ARET
+ ATEXT
+ AWORD
+ ADYNT_
+ AINIT_
+ ABCASE
+ ACASE
+ AEND
+ AMULL
+ AMULAL
+ AMULLU
+ AMULALU
+ ABX
+ ABXRET
+ ADWORD
+ ASIGNAME
+ ALDREX
+ ASTREX
+ ALDREXD
+ ASTREXD
+ APLD
+ AUNDEF
+ ACLZ
+ AMULWT
+ AMULWB
+ AMULAWT
+ AMULAWB
+ AUSEFIELD
+ ATYPE
+ AFUNCDATA
+ APCDATA
+ ACHECKNIL
+ AVARDEF
+ AVARKILL
+ ADUFFCOPY
+ ADUFFZERO
+ ADATABUNDLE
+ ADATABUNDLEEND
+ AMRC
+ ALAST
+)
+
+/* scond byte */
+const (
+ C_SCOND = (1 << 4) - 1
+ C_SBIT = 1 << 4
+ C_PBIT = 1 << 5
+ C_WBIT = 1 << 6
+ C_FBIT = 1 << 7
+ C_UBIT = 1 << 7
+ C_SCOND_EQ = 0
+ C_SCOND_NE = 1
+ C_SCOND_HS = 2
+ C_SCOND_LO = 3
+ C_SCOND_MI = 4
+ C_SCOND_PL = 5
+ C_SCOND_VS = 6
+ C_SCOND_VC = 7
+ C_SCOND_HI = 8
+ C_SCOND_LS = 9
+ C_SCOND_GE = 10
+ C_SCOND_LT = 11
+ C_SCOND_GT = 12
+ C_SCOND_LE = 13
+ C_SCOND_NONE = 14
+ C_SCOND_NV = 15
+ SHIFT_LL = 0 << 5
+ SHIFT_LR = 1 << 5
+ SHIFT_AR = 2 << 5
+ SHIFT_RR = 3 << 5
+)
+
+const (
+ D_GOK = 0
+ D_NONE = 1
+ D_BRANCH = D_NONE + 1
+ D_OREG = D_NONE + 2
+ D_CONST = D_NONE + 7
+ D_FCONST = D_NONE + 8
+ D_SCONST = D_NONE + 9
+ D_PSR = D_NONE + 10
+ D_REG = D_NONE + 12
+ D_FREG = D_NONE + 13
+ D_FILE = D_NONE + 16
+ D_OCONST = D_NONE + 17
+ D_FILE1 = D_NONE + 18
+ D_SHIFT = D_NONE + 19
+ D_FPCR = D_NONE + 20
+ D_REGREG = D_NONE + 21
+ D_ADDR = D_NONE + 22
+ D_SBIG = D_NONE + 23
+ D_CONST2 = D_NONE + 24
+ D_REGREG2 = D_NONE + 25
+ D_EXTERN = D_NONE + 3
+ D_STATIC = D_NONE + 4
+ D_AUTO = D_NONE + 5
+ D_PARAM = D_NONE + 6
+ D_LAST = D_NONE + 26
+)
+
+/*
+ * this is the ranlib header
+ */
+var SYMDEF string
--- /dev/null
+package arm
+
+var anames5 = []string{
+ "XXX",
+ "AND",
+ "EOR",
+ "SUB",
+ "RSB",
+ "ADD",
+ "ADC",
+ "SBC",
+ "RSC",
+ "TST",
+ "TEQ",
+ "CMP",
+ "CMN",
+ "ORR",
+ "BIC",
+ "MVN",
+ "B",
+ "BL",
+ "BEQ",
+ "BNE",
+ "BCS",
+ "BHS",
+ "BCC",
+ "BLO",
+ "BMI",
+ "BPL",
+ "BVS",
+ "BVC",
+ "BHI",
+ "BLS",
+ "BGE",
+ "BLT",
+ "BGT",
+ "BLE",
+ "MOVWD",
+ "MOVWF",
+ "MOVDW",
+ "MOVFW",
+ "MOVFD",
+ "MOVDF",
+ "MOVF",
+ "MOVD",
+ "CMPF",
+ "CMPD",
+ "ADDF",
+ "ADDD",
+ "SUBF",
+ "SUBD",
+ "MULF",
+ "MULD",
+ "DIVF",
+ "DIVD",
+ "SQRTF",
+ "SQRTD",
+ "ABSF",
+ "ABSD",
+ "SRL",
+ "SRA",
+ "SLL",
+ "MULU",
+ "DIVU",
+ "MUL",
+ "DIV",
+ "MOD",
+ "MODU",
+ "MOVB",
+ "MOVBS",
+ "MOVBU",
+ "MOVH",
+ "MOVHS",
+ "MOVHU",
+ "MOVW",
+ "MOVM",
+ "SWPBU",
+ "SWPW",
+ "NOP",
+ "RFE",
+ "SWI",
+ "MULA",
+ "DATA",
+ "GLOBL",
+ "GOK",
+ "HISTORY",
+ "NAME",
+ "RET",
+ "TEXT",
+ "WORD",
+ "DYNT_",
+ "INIT_",
+ "BCASE",
+ "CASE",
+ "END",
+ "MULL",
+ "MULAL",
+ "MULLU",
+ "MULALU",
+ "BX",
+ "BXRET",
+ "DWORD",
+ "SIGNAME",
+ "LDREX",
+ "STREX",
+ "LDREXD",
+ "STREXD",
+ "PLD",
+ "UNDEF",
+ "CLZ",
+ "MULWT",
+ "MULWB",
+ "MULAWT",
+ "MULAWB",
+ "USEFIELD",
+ "TYPE",
+ "FUNCDATA",
+ "PCDATA",
+ "CHECKNIL",
+ "VARDEF",
+ "VARKILL",
+ "DUFFCOPY",
+ "DUFFZERO",
+ "DATABUNDLE",
+ "DATABUNDLEEND",
+ "MRC",
+ "LAST",
+}
+
+var cnames5 = []string{
+ "NONE",
+ "REG",
+ "REGREG",
+ "REGREG2",
+ "SHIFT",
+ "FREG",
+ "PSR",
+ "FCR",
+ "RCON",
+ "NCON",
+ "SCON",
+ "LCON",
+ "LCONADDR",
+ "ZFCON",
+ "SFCON",
+ "LFCON",
+ "RACON",
+ "LACON",
+ "SBRA",
+ "LBRA",
+ "HAUTO",
+ "FAUTO",
+ "HFAUTO",
+ "SAUTO",
+ "LAUTO",
+ "HOREG",
+ "FOREG",
+ "HFOREG",
+ "SOREG",
+ "ROREG",
+ "SROREG",
+ "LOREG",
+ "PC",
+ "SP",
+ "HREG",
+ "ADDR",
+ "GOK",
+ "NCLASS",
+ "SCOND = (1<<4)-1",
+ "SBIT = 1<<4",
+ "PBIT = 1<<5",
+ "WBIT = 1<<6",
+ "FBIT = 1<<7",
+ "UBIT = 1<<7",
+ "SCOND_EQ = 0",
+ "SCOND_NE = 1",
+ "SCOND_HS = 2",
+ "SCOND_LO = 3",
+ "SCOND_MI = 4",
+ "SCOND_PL = 5",
+ "SCOND_VS = 6",
+ "SCOND_VC = 7",
+ "SCOND_HI = 8",
+ "SCOND_LS = 9",
+ "SCOND_GE = 10",
+ "SCOND_LT = 11",
+ "SCOND_GT = 12",
+ "SCOND_LE = 13",
+ "SCOND_NONE = 14",
+ "SCOND_NV = 15",
+}
+
+var dnames5 = []string{
+ D_GOK: "GOK",
+ D_NONE: "NONE",
+}
--- /dev/null
+// Inferno utils/5l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package arm
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "log"
+ "math"
+ "sort"
+)
+
+type Optab struct {
+ as uint8
+ a1 uint8
+ a2 int8
+ a3 uint8
+ type_ uint8
+ size int8
+ param int8
+ flag int8
+ pcrelsiz uint8
+}
+
+type Oprang struct {
+ start []Optab
+ stop []Optab
+}
+
+type Opcross [32][2][32]uint8
+
+const (
+ LFROM = 1 << 0
+ LTO = 1 << 1
+ LPOOL = 1 << 2
+ LPCREL = 1 << 3
+)
+
+var optab = []Optab{
+ /* struct Optab:
+ OPCODE, from, prog->reg, to, type,size,param,flag */
+ Optab{ATEXT, C_ADDR, C_NONE, C_LCON, 0, 0, 0, 0, 0},
+ Optab{ATEXT, C_ADDR, C_REG, C_LCON, 0, 0, 0, 0, 0},
+ Optab{AADD, C_REG, C_REG, C_REG, 1, 4, 0, 0, 0},
+ Optab{AADD, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
+ Optab{AMVN, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
+ Optab{ACMP, C_REG, C_REG, C_NONE, 1, 4, 0, 0, 0},
+ Optab{AADD, C_RCON, C_REG, C_REG, 2, 4, 0, 0, 0},
+ Optab{AADD, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0},
+ Optab{AMOVW, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0},
+ Optab{AMVN, C_RCON, C_NONE, C_REG, 2, 4, 0, 0, 0},
+ Optab{ACMP, C_RCON, C_REG, C_NONE, 2, 4, 0, 0, 0},
+ Optab{AADD, C_SHIFT, C_REG, C_REG, 3, 4, 0, 0, 0},
+ Optab{AADD, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0},
+ Optab{AMVN, C_SHIFT, C_NONE, C_REG, 3, 4, 0, 0, 0},
+ Optab{ACMP, C_SHIFT, C_REG, C_NONE, 3, 4, 0, 0, 0},
+ Optab{AMOVW, C_RACON, C_NONE, C_REG, 4, 4, REGSP, 0, 0},
+ Optab{AB, C_NONE, C_NONE, C_SBRA, 5, 4, 0, LPOOL, 0},
+ Optab{ABL, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
+ Optab{ABX, C_NONE, C_NONE, C_SBRA, 74, 20, 0, 0, 0},
+ Optab{ABEQ, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0},
+ Optab{AB, C_NONE, C_NONE, C_ROREG, 6, 4, 0, LPOOL, 0},
+ Optab{ABL, C_NONE, C_NONE, C_ROREG, 7, 4, 0, 0, 0},
+ Optab{ABL, C_REG, C_NONE, C_ROREG, 7, 4, 0, 0, 0},
+ Optab{ABX, C_NONE, C_NONE, C_ROREG, 75, 12, 0, 0, 0},
+ Optab{ABXRET, C_NONE, C_NONE, C_ROREG, 76, 4, 0, 0, 0},
+ Optab{ASLL, C_RCON, C_REG, C_REG, 8, 4, 0, 0, 0},
+ Optab{ASLL, C_RCON, C_NONE, C_REG, 8, 4, 0, 0, 0},
+ Optab{ASLL, C_REG, C_NONE, C_REG, 9, 4, 0, 0, 0},
+ Optab{ASLL, C_REG, C_REG, C_REG, 9, 4, 0, 0, 0},
+ Optab{ASWI, C_NONE, C_NONE, C_NONE, 10, 4, 0, 0, 0},
+ Optab{ASWI, C_NONE, C_NONE, C_LOREG, 10, 4, 0, 0, 0},
+ Optab{ASWI, C_NONE, C_NONE, C_LCON, 10, 4, 0, 0, 0},
+ Optab{AWORD, C_NONE, C_NONE, C_LCON, 11, 4, 0, 0, 0},
+ Optab{AWORD, C_NONE, C_NONE, C_LCONADDR, 11, 4, 0, 0, 0},
+ Optab{AWORD, C_NONE, C_NONE, C_ADDR, 11, 4, 0, 0, 0},
+ Optab{AMOVW, C_NCON, C_NONE, C_REG, 12, 4, 0, 0, 0},
+ Optab{AMOVW, C_LCON, C_NONE, C_REG, 12, 4, 0, LFROM, 0},
+ Optab{AMOVW, C_LCONADDR, C_NONE, C_REG, 12, 4, 0, LFROM | LPCREL, 4},
+ Optab{AADD, C_NCON, C_REG, C_REG, 13, 8, 0, 0, 0},
+ Optab{AADD, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0},
+ Optab{AMVN, C_NCON, C_NONE, C_REG, 13, 8, 0, 0, 0},
+ Optab{ACMP, C_NCON, C_REG, C_NONE, 13, 8, 0, 0, 0},
+ Optab{AADD, C_LCON, C_REG, C_REG, 13, 8, 0, LFROM, 0},
+ Optab{AADD, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0},
+ Optab{AMVN, C_LCON, C_NONE, C_REG, 13, 8, 0, LFROM, 0},
+ Optab{ACMP, C_LCON, C_REG, C_NONE, 13, 8, 0, LFROM, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
+ Optab{AMOVBS, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0},
+ Optab{AMOVBU, C_REG, C_NONE, C_REG, 58, 4, 0, 0, 0},
+ Optab{AMOVH, C_REG, C_NONE, C_REG, 1, 4, 0, 0, 0},
+ Optab{AMOVHS, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0},
+ Optab{AMOVHU, C_REG, C_NONE, C_REG, 14, 8, 0, 0, 0},
+ Optab{AMUL, C_REG, C_REG, C_REG, 15, 4, 0, 0, 0},
+ Optab{AMUL, C_REG, C_NONE, C_REG, 15, 4, 0, 0, 0},
+ Optab{ADIV, C_REG, C_REG, C_REG, 16, 4, 0, 0, 0},
+ Optab{ADIV, C_REG, C_NONE, C_REG, 16, 4, 0, 0, 0},
+ Optab{AMULL, C_REG, C_REG, C_REGREG, 17, 4, 0, 0, 0},
+ Optab{AMULA, C_REG, C_REG, C_REGREG2, 17, 4, 0, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0},
+ Optab{AMOVBS, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0},
+ Optab{AMOVBS, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0},
+ Optab{AMOVBU, C_REG, C_NONE, C_SAUTO, 20, 4, REGSP, 0, 0},
+ Optab{AMOVBU, C_REG, C_NONE, C_SOREG, 20, 4, 0, 0, 0},
+ Optab{AMOVW, C_SAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
+ Optab{AMOVW, C_SOREG, C_NONE, C_REG, 21, 4, 0, 0, 0},
+ Optab{AMOVBU, C_SAUTO, C_NONE, C_REG, 21, 4, REGSP, 0, 0},
+ Optab{AMOVBU, C_SOREG, C_NONE, C_REG, 21, 4, 0, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVB, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVBS, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
+ Optab{AMOVBS, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
+ Optab{AMOVBS, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVBU, C_REG, C_NONE, C_LAUTO, 30, 8, REGSP, LTO, 0},
+ Optab{AMOVBU, C_REG, C_NONE, C_LOREG, 30, 8, 0, LTO, 0},
+ Optab{AMOVBU, C_REG, C_NONE, C_ADDR, 64, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVW, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0},
+ Optab{AMOVW, C_LOREG, C_NONE, C_REG, 31, 8, 0, LFROM, 0},
+ Optab{AMOVW, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM | LPCREL, 4},
+ Optab{AMOVBU, C_LAUTO, C_NONE, C_REG, 31, 8, REGSP, LFROM, 0},
+ Optab{AMOVBU, C_LOREG, C_NONE, C_REG, 31, 8, 0, LFROM, 0},
+ Optab{AMOVBU, C_ADDR, C_NONE, C_REG, 65, 8, 0, LFROM | LPCREL, 4},
+ Optab{AMOVW, C_LACON, C_NONE, C_REG, 34, 8, REGSP, LFROM, 0},
+ Optab{AMOVW, C_PSR, C_NONE, C_REG, 35, 4, 0, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_PSR, 36, 4, 0, 0, 0},
+ Optab{AMOVW, C_RCON, C_NONE, C_PSR, 37, 4, 0, 0, 0},
+ Optab{AMOVM, C_LCON, C_NONE, C_SOREG, 38, 4, 0, 0, 0},
+ Optab{AMOVM, C_SOREG, C_NONE, C_LCON, 39, 4, 0, 0, 0},
+ Optab{ASWPW, C_SOREG, C_REG, C_REG, 40, 4, 0, 0, 0},
+ Optab{ARFE, C_NONE, C_NONE, C_NONE, 41, 4, 0, 0, 0},
+ Optab{AMOVF, C_FREG, C_NONE, C_FAUTO, 50, 4, REGSP, 0, 0},
+ Optab{AMOVF, C_FREG, C_NONE, C_FOREG, 50, 4, 0, 0, 0},
+ Optab{AMOVF, C_FAUTO, C_NONE, C_FREG, 51, 4, REGSP, 0, 0},
+ Optab{AMOVF, C_FOREG, C_NONE, C_FREG, 51, 4, 0, 0, 0},
+ Optab{AMOVF, C_FREG, C_NONE, C_LAUTO, 52, 12, REGSP, LTO, 0},
+ Optab{AMOVF, C_FREG, C_NONE, C_LOREG, 52, 12, 0, LTO, 0},
+ Optab{AMOVF, C_LAUTO, C_NONE, C_FREG, 53, 12, REGSP, LFROM, 0},
+ Optab{AMOVF, C_LOREG, C_NONE, C_FREG, 53, 12, 0, LFROM, 0},
+ Optab{AMOVF, C_FREG, C_NONE, C_ADDR, 68, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVF, C_ADDR, C_NONE, C_FREG, 69, 8, 0, LFROM | LPCREL, 4},
+ Optab{AADDF, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0},
+ Optab{AADDF, C_FREG, C_REG, C_FREG, 54, 4, 0, 0, 0},
+ Optab{AMOVF, C_FREG, C_NONE, C_FREG, 54, 4, 0, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_FCR, 56, 4, 0, 0, 0},
+ Optab{AMOVW, C_FCR, C_NONE, C_REG, 57, 4, 0, 0, 0},
+ Optab{AMOVW, C_SHIFT, C_NONE, C_REG, 59, 4, 0, 0, 0},
+ Optab{AMOVBU, C_SHIFT, C_NONE, C_REG, 59, 4, 0, 0, 0},
+ Optab{AMOVB, C_SHIFT, C_NONE, C_REG, 60, 4, 0, 0, 0},
+ Optab{AMOVBS, C_SHIFT, C_NONE, C_REG, 60, 4, 0, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0},
+ Optab{AMOVBS, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0},
+ Optab{AMOVBU, C_REG, C_NONE, C_SHIFT, 61, 4, 0, 0, 0},
+ Optab{ACASE, C_REG, C_NONE, C_NONE, 62, 4, 0, LPCREL, 8},
+ Optab{ABCASE, C_NONE, C_NONE, C_SBRA, 63, 4, 0, LPCREL, 0},
+ Optab{AMOVH, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0},
+ Optab{AMOVH, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0},
+ Optab{AMOVHS, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0},
+ Optab{AMOVHS, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0},
+ Optab{AMOVHU, C_REG, C_NONE, C_HAUTO, 70, 4, REGSP, 0, 0},
+ Optab{AMOVHU, C_REG, C_NONE, C_HOREG, 70, 4, 0, 0, 0},
+ Optab{AMOVB, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
+ Optab{AMOVB, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
+ Optab{AMOVBS, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
+ Optab{AMOVBS, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
+ Optab{AMOVH, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
+ Optab{AMOVH, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
+ Optab{AMOVHS, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
+ Optab{AMOVHS, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
+ Optab{AMOVHU, C_HAUTO, C_NONE, C_REG, 71, 4, REGSP, 0, 0},
+ Optab{AMOVHU, C_HOREG, C_NONE, C_REG, 71, 4, 0, 0, 0},
+ Optab{AMOVH, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0},
+ Optab{AMOVH, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0},
+ Optab{AMOVH, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVHS, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0},
+ Optab{AMOVHS, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0},
+ Optab{AMOVHS, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVHU, C_REG, C_NONE, C_LAUTO, 72, 8, REGSP, LTO, 0},
+ Optab{AMOVHU, C_REG, C_NONE, C_LOREG, 72, 8, 0, LTO, 0},
+ Optab{AMOVHU, C_REG, C_NONE, C_ADDR, 94, 8, 0, LTO | LPCREL, 4},
+ Optab{AMOVB, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
+ Optab{AMOVB, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
+ Optab{AMOVB, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
+ Optab{AMOVBS, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
+ Optab{AMOVBS, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
+ Optab{AMOVBS, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
+ Optab{AMOVH, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
+ Optab{AMOVH, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
+ Optab{AMOVH, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
+ Optab{AMOVHS, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
+ Optab{AMOVHS, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
+ Optab{AMOVHS, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
+ Optab{AMOVHU, C_LAUTO, C_NONE, C_REG, 73, 8, REGSP, LFROM, 0},
+ Optab{AMOVHU, C_LOREG, C_NONE, C_REG, 73, 8, 0, LFROM, 0},
+ Optab{AMOVHU, C_ADDR, C_NONE, C_REG, 93, 8, 0, LFROM | LPCREL, 4},
+ Optab{ALDREX, C_SOREG, C_NONE, C_REG, 77, 4, 0, 0, 0},
+ Optab{ASTREX, C_SOREG, C_REG, C_REG, 78, 4, 0, 0, 0},
+ Optab{AMOVF, C_ZFCON, C_NONE, C_FREG, 80, 8, 0, 0, 0},
+ Optab{AMOVF, C_SFCON, C_NONE, C_FREG, 81, 4, 0, 0, 0},
+ Optab{ACMPF, C_FREG, C_REG, C_NONE, 82, 8, 0, 0, 0},
+ Optab{ACMPF, C_FREG, C_NONE, C_NONE, 83, 8, 0, 0, 0},
+ Optab{AMOVFW, C_FREG, C_NONE, C_FREG, 84, 4, 0, 0, 0},
+ Optab{AMOVWF, C_FREG, C_NONE, C_FREG, 85, 4, 0, 0, 0},
+ Optab{AMOVFW, C_FREG, C_NONE, C_REG, 86, 8, 0, 0, 0},
+ Optab{AMOVWF, C_REG, C_NONE, C_FREG, 87, 8, 0, 0, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_FREG, 88, 4, 0, 0, 0},
+ Optab{AMOVW, C_FREG, C_NONE, C_REG, 89, 4, 0, 0, 0},
+ Optab{ATST, C_REG, C_NONE, C_NONE, 90, 4, 0, 0, 0},
+ Optab{ALDREXD, C_SOREG, C_NONE, C_REG, 91, 4, 0, 0, 0},
+ Optab{ASTREXD, C_SOREG, C_REG, C_REG, 92, 4, 0, 0, 0},
+ Optab{APLD, C_SOREG, C_NONE, C_NONE, 95, 4, 0, 0, 0},
+ Optab{AUNDEF, C_NONE, C_NONE, C_NONE, 96, 4, 0, 0, 0},
+ Optab{ACLZ, C_REG, C_NONE, C_REG, 97, 4, 0, 0, 0},
+ Optab{AMULWT, C_REG, C_REG, C_REG, 98, 4, 0, 0, 0},
+ Optab{AMULAWT, C_REG, C_REG, C_REGREG2, 99, 4, 0, 0, 0},
+ Optab{AUSEFIELD, C_ADDR, C_NONE, C_NONE, 0, 0, 0, 0, 0},
+ Optab{APCDATA, C_LCON, C_NONE, C_LCON, 0, 0, 0, 0, 0},
+ Optab{AFUNCDATA, C_LCON, C_NONE, C_ADDR, 0, 0, 0, 0, 0},
+ Optab{ANOP, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0, 0},
+ Optab{ADUFFZERO, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // same as ABL
+ Optab{ADUFFCOPY, C_NONE, C_NONE, C_SBRA, 5, 4, 0, 0, 0}, // same as ABL
+
+ Optab{ADATABUNDLE, C_NONE, C_NONE, C_NONE, 100, 4, 0, 0, 0},
+ Optab{ADATABUNDLEEND, C_NONE, C_NONE, C_NONE, 100, 0, 0, 0, 0},
+ Optab{AXXX, C_NONE, C_NONE, C_NONE, 0, 4, 0, 0, 0},
+}
+
+var pool struct {
+ start uint32
+ size uint32
+ extra uint32
+}
+
+var oprange [ALAST]Oprang
+
+var xcmp [C_GOK + 1][C_GOK + 1]uint8
+
+var zprg = obj.Prog{
+ As: AGOK,
+ Scond: C_SCOND_NONE,
+ Reg: NREG,
+ From: obj.Addr{
+ Name: D_NONE,
+ Type_: D_NONE,
+ Reg: NREG,
+ },
+ To: obj.Addr{
+ Name: D_NONE,
+ Type_: D_NONE,
+ Reg: NREG,
+ },
+}
+
+var deferreturn *obj.LSym
+
+func nocache(p *obj.Prog) {
+ p.Optab = 0
+ p.From.Class = 0
+ p.To.Class = 0
+}
+
+/* size of a case statement including jump table */
+func casesz(ctxt *obj.Link, p *obj.Prog) int32 {
+
+ var jt int = 0
+ var n int32 = 0
+ var o *Optab
+
+ for ; p != nil; p = p.Link {
+ if p.As == ABCASE {
+ jt = 1
+ } else if jt != 0 {
+ break
+ }
+ o = oplook(ctxt, p)
+ n += int32(o.size)
+ }
+
+ return n
+}
+
+// asmoutnacl assembles the instruction p. It replaces asmout for NaCl.
+// It returns the total number of bytes put in out, and it can change
+// p->pc if extra padding is necessary.
+// In rare cases, asmoutnacl might split p into two instructions.
+// origPC is the PC for this Prog (no padding is taken into account).
+func asmoutnacl(ctxt *obj.Link, origPC int32, p *obj.Prog, o *Optab, out []uint32) int {
+
+ var size int
+ var reg int
+ var q *obj.Prog
+ var a *obj.Addr
+ var a2 *obj.Addr
+
+ size = int(o.size)
+
+ // instruction specific
+ switch p.As {
+
+ default:
+ if out != nil {
+ asmout(ctxt, p, o, out)
+ }
+
+ case ADATABUNDLE, // align to 16-byte boundary
+ ADATABUNDLEEND: // zero width instruction, just to align next instruction to 16-byte boundary
+ p.Pc = (p.Pc + 15) &^ 15
+
+ if out != nil {
+ asmout(ctxt, p, o, out)
+ }
+
+ case AUNDEF,
+ APLD:
+ size = 4
+ if out != nil {
+ switch p.As {
+ case AUNDEF:
+ out[0] = 0xe7fedef0 // NACL_INSTR_ARM_ABORT_NOW (UDF #0xEDE0)
+
+ case APLD:
+ out[0] = 0xe1a01001 // (MOVW R1, R1)
+ break
+ }
+ }
+
+ case AB,
+ ABL:
+ if p.To.Type_ != D_OREG {
+ if out != nil {
+ asmout(ctxt, p, o, out)
+ }
+ } else {
+
+ if p.To.Offset != 0 || size != 4 || p.To.Reg >= 16 || p.To.Reg < 0 {
+ ctxt.Diag("unsupported instruction: %v", p)
+ }
+ if p.Pc&15 == 12 {
+ p.Pc += 4
+ }
+ if out != nil {
+ out[0] = (uint32(p.Scond)&C_SCOND)<<28 | 0x03c0013f | uint32(p.To.Reg)<<12 | uint32(p.To.Reg)<<16 // BIC $0xc000000f, Rx
+ if p.As == AB {
+ out[1] = (uint32(p.Scond)&C_SCOND)<<28 | 0x012fff10 | uint32(p.To.Reg) // BX Rx // ABL
+ } else {
+ out[1] = (uint32(p.Scond)&C_SCOND)<<28 | 0x012fff30 | uint32(p.To.Reg) // BLX Rx
+ }
+ }
+
+ size = 8
+ }
+
+ // align the last instruction (the actual BL) to the last instruction in a bundle
+ if p.As == ABL {
+
+ if deferreturn == nil {
+ deferreturn = obj.Linklookup(ctxt, "runtime.deferreturn", 0)
+ }
+ if p.To.Sym == deferreturn {
+ p.Pc = ((int64(origPC) + 15) &^ 15) + 16 - int64(size)
+ } else {
+
+ p.Pc += (16 - ((p.Pc + int64(size)) & 15)) & 15
+ }
+ }
+
+ case ALDREX,
+ ALDREXD,
+ AMOVB,
+ AMOVBS,
+ AMOVBU,
+ AMOVD,
+ AMOVF,
+ AMOVH,
+ AMOVHS,
+ AMOVHU,
+ AMOVM,
+ AMOVW,
+ ASTREX,
+ ASTREXD:
+ if p.To.Type_ == D_REG && p.To.Reg == 15 && p.From.Reg == 13 { // MOVW.W x(R13), PC
+ if out != nil {
+ asmout(ctxt, p, o, out)
+ }
+ if size == 4 {
+ if out != nil {
+ // Note: 5c and 5g reg.c know that DIV/MOD smashes R12
+ // so that this return instruction expansion is valid.
+ out[0] = out[0] &^ 0x3000 // change PC to R12
+ out[1] = (uint32(p.Scond)&C_SCOND)<<28 | 0x03ccc13f // BIC $0xc000000f, R12
+ out[2] = (uint32(p.Scond)&C_SCOND)<<28 | 0x012fff1c // BX R12
+ }
+
+ size += 8
+ if (p.Pc+int64(size))&15 == 4 {
+ p.Pc += 4
+ }
+ break
+ } else {
+
+ // if the instruction used more than 4 bytes, then it must have used a very large
+ // offset to update R13, so we need to additionally mask R13.
+ if out != nil {
+
+ out[size/4-1] &^= 0x3000 // change PC to R12
+ out[size/4] = (uint32(p.Scond)&C_SCOND)<<28 | 0x03cdd103 // BIC $0xc0000000, R13
+ out[size/4+1] = (uint32(p.Scond)&C_SCOND)<<28 | 0x03ccc13f // BIC $0xc000000f, R12
+ out[size/4+2] = (uint32(p.Scond)&C_SCOND)<<28 | 0x012fff1c // BX R12
+ }
+
+ // p->pc+size is only ok at 4 or 12 mod 16.
+ if (p.Pc+int64(size))%8 == 0 {
+
+ p.Pc += 4
+ }
+ size += 12
+ break
+ }
+ }
+
+ if p.To.Type_ == D_REG && p.To.Reg == 15 {
+ ctxt.Diag("unsupported instruction (move to another register and use indirect jump instead): %v", p)
+ }
+
+ if p.To.Type_ == D_OREG && p.To.Reg == 13 && (p.Scond&C_WBIT != 0) && size > 4 {
+ // function prolog with very large frame size: MOVW.W R14,-100004(R13)
+ // split it into two instructions:
+ // ADD $-100004, R13
+ // MOVW R14, 0(R13)
+ q = ctxt.Arch.Prg()
+
+ p.Scond &^= C_WBIT
+ *q = *p
+ a = &p.To
+ if p.To.Type_ == D_OREG {
+ a2 = &q.To
+ } else {
+
+ a2 = &q.From
+ }
+ nocache(q)
+ nocache(p)
+
+ // insert q after p
+ q.Link = p.Link
+
+ p.Link = q
+ q.Pcond = nil
+
+ // make p into ADD $X, R13
+ p.As = AADD
+
+ p.From = *a
+ p.From.Reg = NREG
+ p.From.Type_ = D_CONST
+ p.To = zprg.To
+ p.To.Type_ = D_REG
+ p.To.Reg = 13
+
+ // make q into p but load/store from 0(R13)
+ q.Spadj = 0
+
+ *a2 = zprg.From
+ a2.Type_ = D_OREG
+ a2.Reg = 13
+ a2.Sym = nil
+ a2.Offset = 0
+ size = int(oplook(ctxt, p).size)
+ break
+ }
+
+ if (p.To.Type_ == D_OREG && p.To.Reg != 13 && p.To.Reg != 9) || (p.From.Type_ == D_OREG && p.From.Reg != 13 && p.From.Reg != 9) { // MOVW Rx, X(Ry), y != 13 && y != 9 // MOVW X(Rx), Ry, x != 13 && x != 9
+ if p.To.Type_ == D_OREG {
+ a = &p.To
+ } else {
+
+ a = &p.From
+ }
+ reg = int(a.Reg)
+ if size == 4 {
+ // if addr.reg == NREG, then it is probably load from x(FP) with small x, no need to modify.
+ if reg == NREG {
+
+ if out != nil {
+ asmout(ctxt, p, o, out)
+ }
+ } else {
+
+ if out != nil {
+ out[0] = (uint32(p.Scond)&C_SCOND)<<28 | 0x03c00103 | uint32(reg)<<16 | uint32(reg)<<12 // BIC $0xc0000000, Rx
+ }
+ if p.Pc&15 == 12 {
+ p.Pc += 4
+ }
+ size += 4
+ if out != nil {
+ asmout(ctxt, p, o, out[1:])
+ }
+ }
+
+ break
+ } else {
+
+ // if a load/store instruction takes more than 1 word to implement, then
+ // we need to seperate the instruction into two:
+ // 1. explicitly load the address into R11.
+ // 2. load/store from R11.
+ // This won't handle .W/.P, so we should reject such code.
+ if p.Scond&(C_PBIT|C_WBIT) != 0 {
+
+ ctxt.Diag("unsupported instruction (.P/.W): %v", p)
+ }
+ q = ctxt.Arch.Prg()
+ *q = *p
+ if p.To.Type_ == D_OREG {
+ a2 = &q.To
+ } else {
+
+ a2 = &q.From
+ }
+ nocache(q)
+ nocache(p)
+
+ // insert q after p
+ q.Link = p.Link
+
+ p.Link = q
+ q.Pcond = nil
+
+ // make p into MOVW $X(R), R11
+ p.As = AMOVW
+
+ p.From = *a
+ p.From.Type_ = D_CONST
+ p.To = zprg.To
+ p.To.Type_ = D_REG
+ p.To.Reg = 11
+
+ // make q into p but load/store from 0(R11)
+ *a2 = zprg.From
+
+ a2.Type_ = D_OREG
+ a2.Reg = 11
+ a2.Sym = nil
+ a2.Offset = 0
+ size = int(oplook(ctxt, p).size)
+ break
+ }
+ } else if out != nil {
+ asmout(ctxt, p, o, out)
+ }
+ break
+ }
+
+ // destination register specific
+ if p.To.Type_ == D_REG {
+
+ switch p.To.Reg {
+ case 9:
+ ctxt.Diag("invalid instruction, cannot write to R9: %v", p)
+
+ case 13:
+ if out != nil {
+ out[size/4] = 0xe3cdd103 // BIC $0xc0000000, R13
+ }
+ if (p.Pc+int64(size))&15 == 0 {
+ p.Pc += 4
+ }
+ size += 4
+ break
+ }
+ }
+
+ return size
+}
+
+func span5(ctxt *obj.Link, cursym *obj.LSym) {
+ var p *obj.Prog
+ var op *obj.Prog
+ var o *Optab
+ var m int
+ var bflag int
+ var i int
+ var v int
+ var times int
+ var c int32
+ var opc int32
+ var out [6 + 3]uint32
+ var bp []byte
+
+ p = cursym.Text
+ if p == nil || p.Link == nil { // handle external functions and ELF section symbols
+ return
+ }
+
+ if oprange[AAND].start == nil {
+ buildop(ctxt)
+ }
+
+ ctxt.Cursym = cursym
+
+ ctxt.Autosize = int32(p.To.Offset + 4)
+ c = 0
+
+ op = p
+ p = p.Link
+ for ; p != nil || ctxt.Blitrl != nil; (func() { op = p; p = p.Link })() {
+ if p == nil {
+ if checkpool(ctxt, op, 0) != 0 {
+ p = op
+ continue
+ }
+
+ // can't happen: blitrl is not nil, but checkpool didn't flushpool
+ ctxt.Diag("internal inconsistency")
+
+ break
+ }
+
+ ctxt.Curp = p
+ p.Pc = int64(c)
+ o = oplook(ctxt, p)
+ if ctxt.Headtype != obj.Hnacl {
+ m = int(o.size)
+ } else {
+
+ m = asmoutnacl(ctxt, c, p, o, nil)
+ c = int32(p.Pc) // asmoutnacl might change pc for alignment
+ o = oplook(ctxt, p) // asmoutnacl might change p in rare cases
+ }
+
+ if m%4 != 0 || p.Pc%4 != 0 {
+ ctxt.Diag("!pc invalid: %v size=%d", p, m)
+ }
+
+ // must check literal pool here in case p generates many instructions
+ if ctxt.Blitrl != nil {
+
+ i = m
+ if p.As == ACASE {
+ i = int(casesz(ctxt, p))
+ }
+ if checkpool(ctxt, op, i) != 0 {
+ p = op
+ continue
+ }
+ }
+
+ if m == 0 && (p.As != AFUNCDATA && p.As != APCDATA && p.As != ADATABUNDLEEND && p.As != ANOP) {
+ ctxt.Diag("zero-width instruction\n%v", p)
+ continue
+ }
+
+ switch o.flag & (LFROM | LTO | LPOOL) {
+ case LFROM:
+ addpool(ctxt, p, &p.From)
+
+ case LTO:
+ addpool(ctxt, p, &p.To)
+
+ case LPOOL:
+ if p.Scond&C_SCOND == C_SCOND_NONE {
+ flushpool(ctxt, p, 0, 0)
+ }
+ break
+ }
+
+ if p.As == AMOVW && p.To.Type_ == D_REG && p.To.Reg == REGPC && p.Scond&C_SCOND == C_SCOND_NONE {
+ flushpool(ctxt, p, 0, 0)
+ }
+ c += int32(m)
+ }
+
+ cursym.Size = int64(c)
+
+ /*
+ * if any procedure is large enough to
+ * generate a large SBRA branch, then
+ * generate extra passes putting branches
+ * around jmps to fix. this is rare.
+ */
+ times = 0
+
+ for {
+ if ctxt.Debugvlog != 0 {
+ fmt.Fprintf(ctxt.Bso, "%5.2f span1\n", obj.Cputime())
+ }
+ bflag = 0
+ c = 0
+ times++
+ cursym.Text.Pc = 0 // force re-layout the code.
+ for p = cursym.Text; p != nil; p = p.Link {
+ ctxt.Curp = p
+ o = oplook(ctxt, p)
+ if int64(c) > p.Pc {
+ p.Pc = int64(c)
+ }
+
+ /* very large branches
+ if(o->type == 6 && p->pcond) {
+ otxt = p->pcond->pc - c;
+ if(otxt < 0)
+ otxt = -otxt;
+ if(otxt >= (1L<<17) - 10) {
+ q = ctxt->arch->prg();
+ q->link = p->link;
+ p->link = q;
+ q->as = AB;
+ q->to.type = D_BRANCH;
+ q->pcond = p->pcond;
+ p->pcond = q;
+ q = ctxt->arch->prg();
+ q->link = p->link;
+ p->link = q;
+ q->as = AB;
+ q->to.type = D_BRANCH;
+ q->pcond = q->link->link;
+ bflag = 1;
+ }
+ }
+ */
+ opc = int32(p.Pc)
+
+ if ctxt.Headtype != obj.Hnacl {
+ m = int(o.size)
+ } else {
+
+ m = asmoutnacl(ctxt, c, p, o, nil)
+ }
+ if p.Pc != int64(opc) {
+ bflag = 1
+ }
+
+ //print("%P pc changed %d to %d in iter. %d\n", p, opc, (int32)p->pc, times);
+ c = int32(p.Pc + int64(m))
+
+ if m%4 != 0 || p.Pc%4 != 0 {
+ ctxt.Diag("pc invalid: %v size=%d", p, m)
+ }
+
+ if m/4 > len(out) {
+ ctxt.Diag("instruction size too large: %d > %d", m/4, len(out))
+ }
+ if m == 0 && (p.As != AFUNCDATA && p.As != APCDATA && p.As != ADATABUNDLEEND && p.As != ANOP) {
+ if p.As == ATEXT {
+ ctxt.Autosize = int32(p.To.Offset + 4)
+ continue
+ }
+
+ ctxt.Diag("zero-width instruction\n%v", p)
+ continue
+ }
+ }
+
+ cursym.Size = int64(c)
+ if !(bflag != 0) {
+ break
+ }
+ }
+
+ if c%4 != 0 {
+ ctxt.Diag("sym->size=%d, invalid", c)
+ }
+
+ /*
+ * lay out the code. all the pc-relative code references,
+ * even cross-function, are resolved now;
+ * only data references need to be relocated.
+ * with more work we could leave cross-function
+ * code references to be relocated too, and then
+ * perhaps we'd be able to parallelize the span loop above.
+ */
+ if ctxt.Tlsg == nil {
+
+ ctxt.Tlsg = obj.Linklookup(ctxt, "runtime.tlsg", 0)
+ }
+
+ p = cursym.Text
+ ctxt.Autosize = int32(p.To.Offset + 4)
+ obj.Symgrow(ctxt, cursym, cursym.Size)
+
+ bp = cursym.P
+ c = int32(p.Pc) // even p->link might need extra padding
+ for p = p.Link; p != nil; p = p.Link {
+ ctxt.Pc = p.Pc
+ ctxt.Curp = p
+ o = oplook(ctxt, p)
+ opc = int32(p.Pc)
+ if ctxt.Headtype != obj.Hnacl {
+ asmout(ctxt, p, o, out[:])
+ m = int(o.size)
+ } else {
+
+ m = asmoutnacl(ctxt, c, p, o, out[:])
+ if int64(opc) != p.Pc {
+ ctxt.Diag("asmoutnacl broken: pc changed (%d->%d) in last stage: %v", opc, int32(p.Pc), p)
+ }
+ }
+
+ if m%4 != 0 || p.Pc%4 != 0 {
+ ctxt.Diag("final stage: pc invalid: %v size=%d", p, m)
+ }
+
+ if int64(c) > p.Pc {
+ ctxt.Diag("PC padding invalid: want %#d, has %#d: %v", p.Pc, c, p)
+ }
+ for int64(c) != p.Pc {
+ // emit 0xe1a00000 (MOVW R0, R0)
+ bp[0] = 0x00
+ bp = bp[1:]
+
+ bp[0] = 0x00
+ bp = bp[1:]
+ bp[0] = 0xa0
+ bp = bp[1:]
+ bp[0] = 0xe1
+ bp = bp[1:]
+ c += 4
+ }
+
+ for i = 0; i < m/4; i++ {
+ v = int(out[i])
+ bp[0] = byte(v)
+ bp = bp[1:]
+ bp[0] = byte(v >> 8)
+ bp = bp[1:]
+ bp[0] = byte(v >> 16)
+ bp = bp[1:]
+ bp[0] = byte(v >> 24)
+ bp = bp[1:]
+ }
+
+ c += int32(m)
+ }
+}
+
+/*
+ * when the first reference to the literal pool threatens
+ * to go out of range of a 12-bit PC-relative offset,
+ * drop the pool now, and branch round it.
+ * this happens only in extended basic blocks that exceed 4k.
+ */
+func checkpool(ctxt *obj.Link, p *obj.Prog, sz int) int {
+
+ if pool.size >= 0xff0 || immaddr(int32((p.Pc+int64(sz)+4)+4+int64(12+pool.size)-int64(pool.start+8))) == 0 {
+ return flushpool(ctxt, p, 1, 0)
+ } else if p.Link == nil {
+ return flushpool(ctxt, p, 2, 0)
+ }
+ return 0
+}
+
+func flushpool(ctxt *obj.Link, p *obj.Prog, skip int, force int) int {
+ var q *obj.Prog
+
+ if ctxt.Blitrl != nil {
+ if skip != 0 {
+ if false && skip == 1 {
+ fmt.Printf("note: flush literal pool at %x: len=%d ref=%x\n", uint64(p.Pc+4), pool.size, pool.start)
+ }
+ q = ctxt.Arch.Prg()
+ q.As = AB
+ q.To.Type_ = D_BRANCH
+ q.Pcond = p.Link
+ q.Link = ctxt.Blitrl
+ q.Lineno = p.Lineno
+ ctxt.Blitrl = q
+ } else if !(force != 0) && (p.Pc+int64(12+pool.size)-int64(pool.start) < 2048) { // 12 take into account the maximum nacl literal pool alignment padding size
+ return 0
+ }
+ if ctxt.Headtype == obj.Hnacl && pool.size%16 != 0 {
+ // if pool is not multiple of 16 bytes, add an alignment marker
+ q = ctxt.Arch.Prg()
+
+ q.As = ADATABUNDLEEND
+ ctxt.Elitrl.Link = q
+ ctxt.Elitrl = q
+ }
+
+ ctxt.Elitrl.Link = p.Link
+ p.Link = ctxt.Blitrl
+
+ // BUG(minux): how to correctly handle line number for constant pool entries?
+ // for now, we set line number to the last instruction preceding them at least
+ // this won't bloat the .debug_line tables
+ for ctxt.Blitrl != nil {
+
+ ctxt.Blitrl.Lineno = p.Lineno
+ ctxt.Blitrl = ctxt.Blitrl.Link
+ }
+
+ ctxt.Blitrl = nil /* BUG: should refer back to values until out-of-range */
+ ctxt.Elitrl = nil
+ pool.size = 0
+ pool.start = 0
+ pool.extra = 0
+ return 1
+ }
+
+ return 0
+}
+
+func addpool(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
+ var q *obj.Prog
+ var t obj.Prog
+ var c int
+
+ c = aclass(ctxt, a)
+
+ t = zprg
+ t.As = AWORD
+
+ switch c {
+ default:
+ t.To.Offset = a.Offset
+ t.To.Sym = a.Sym
+ t.To.Type_ = a.Type_
+ t.To.Name = a.Name
+
+ if ctxt.Flag_shared != 0 && t.To.Sym != nil {
+ t.Pcrel = p
+ }
+
+ case C_SROREG,
+ C_LOREG,
+ C_ROREG,
+ C_FOREG,
+ C_SOREG,
+ C_HOREG,
+ C_FAUTO,
+ C_SAUTO,
+ C_LAUTO,
+ C_LACON:
+ t.To.Type_ = D_CONST
+ t.To.Offset = ctxt.Instoffset
+ break
+ }
+
+ if t.Pcrel == nil {
+ for q = ctxt.Blitrl; q != nil; q = q.Link { /* could hash on t.t0.offset */
+ if q.Pcrel == nil && q.To == t.To {
+ p.Pcond = q
+ return
+ }
+ }
+ }
+
+ if ctxt.Headtype == obj.Hnacl && pool.size%16 == 0 {
+ // start a new data bundle
+ q = ctxt.Arch.Prg()
+
+ *q = zprg
+ q.As = ADATABUNDLE
+ q.Pc = int64(pool.size)
+ pool.size += 4
+ if ctxt.Blitrl == nil {
+ ctxt.Blitrl = q
+ pool.start = uint32(p.Pc)
+ } else {
+
+ ctxt.Elitrl.Link = q
+ }
+
+ ctxt.Elitrl = q
+ }
+
+ q = ctxt.Arch.Prg()
+ *q = t
+ q.Pc = int64(pool.size)
+
+ if ctxt.Blitrl == nil {
+ ctxt.Blitrl = q
+ pool.start = uint32(p.Pc)
+ } else {
+
+ ctxt.Elitrl.Link = q
+ }
+ ctxt.Elitrl = q
+ pool.size += 4
+
+ p.Pcond = q
+}
+
+func regoff(ctxt *obj.Link, a *obj.Addr) int32 {
+ ctxt.Instoffset = 0
+ aclass(ctxt, a)
+ return int32(ctxt.Instoffset)
+}
+
+func immrot(v uint32) int32 {
+ var i int
+
+ for i = 0; i < 16; i++ {
+ if v&^0xff == 0 {
+ return int32(uint32(int32(i)<<8) | v | 1<<25)
+ }
+ v = v<<2 | v>>30
+ }
+
+ return 0
+}
+
+func immaddr(v int32) int32 {
+ if v >= 0 && v <= 0xfff {
+ return v&0xfff | 1<<24 | 1<<23 /* pre indexing */ /* pre indexing, up */
+ }
+ if v >= -0xfff && v < 0 {
+ return -v&0xfff | 1<<24 /* pre indexing */
+ }
+ return 0
+}
+
+func immfloat(v int32) bool {
+ return v&0xC03 == 0 /* offset will fit in floating-point load/store */
+}
+
+func immhalf(v int32) bool {
+ if v >= 0 && v <= 0xff {
+ return v|1<<24|1<<23 != 0 /* pre indexing */ /* pre indexing, up */
+ }
+ if v >= -0xff && v < 0 {
+ return -v&0xff|1<<24 != 0 /* pre indexing */
+ }
+ return false
+}
+
+func aclass(ctxt *obj.Link, a *obj.Addr) int {
+ var s *obj.LSym
+ var t int
+
+ switch a.Type_ {
+ case D_NONE:
+ return C_NONE
+
+ case D_REG:
+ return C_REG
+
+ case D_REGREG:
+ return C_REGREG
+
+ case D_REGREG2:
+ return C_REGREG2
+
+ case D_SHIFT:
+ return C_SHIFT
+
+ case D_FREG:
+ return C_FREG
+
+ case D_FPCR:
+ return C_FCR
+
+ case D_OREG:
+ switch a.Name {
+ case D_EXTERN,
+ D_STATIC:
+ if a.Sym == nil || a.Sym.Name == "" {
+ fmt.Printf("null sym external\n")
+ return C_GOK
+ }
+
+ ctxt.Instoffset = 0 // s.b. unused but just in case
+ return C_ADDR
+
+ case D_AUTO:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
+ t = int(immaddr(int32(ctxt.Instoffset)))
+ if t != 0 {
+ if immhalf(int32(ctxt.Instoffset)) {
+ if immfloat(int32(t)) {
+ return C_HFAUTO
+ }
+ return C_HAUTO
+ }
+
+ if immfloat(int32(t)) {
+ return C_FAUTO
+ }
+ return C_SAUTO
+ }
+
+ return C_LAUTO
+
+ case D_PARAM:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 4
+ t = int(immaddr(int32(ctxt.Instoffset)))
+ if t != 0 {
+ if immhalf(int32(ctxt.Instoffset)) {
+ if immfloat(int32(t)) {
+ return C_HFAUTO
+ }
+ return C_HAUTO
+ }
+
+ if immfloat(int32(t)) {
+ return C_FAUTO
+ }
+ return C_SAUTO
+ }
+
+ return C_LAUTO
+
+ case D_NONE:
+ ctxt.Instoffset = a.Offset
+ t = int(immaddr(int32(ctxt.Instoffset)))
+ if t != 0 {
+ if immhalf(int32(ctxt.Instoffset)) { /* n.b. that it will also satisfy immrot */
+ if immfloat(int32(t)) {
+ return C_HFOREG
+ }
+ return C_HOREG
+ }
+
+ if immfloat(int32(t)) {
+ return C_FOREG /* n.b. that it will also satisfy immrot */
+ }
+ t = int(immrot(uint32(ctxt.Instoffset)))
+ if t != 0 {
+ return C_SROREG
+ }
+ if immhalf(int32(ctxt.Instoffset)) {
+ return C_HOREG
+ }
+ return C_SOREG
+ }
+
+ t = int(immrot(uint32(ctxt.Instoffset)))
+ if t != 0 {
+ return C_ROREG
+ }
+ return C_LOREG
+ }
+
+ return C_GOK
+
+ case D_PSR:
+ return C_PSR
+
+ case D_OCONST:
+ switch a.Name {
+ case D_EXTERN,
+ D_STATIC:
+ ctxt.Instoffset = 0 // s.b. unused but just in case
+ return C_ADDR
+ }
+
+ return C_GOK
+
+ case D_FCONST:
+ if chipzero5(ctxt, a.U.Dval) >= 0 {
+ return C_ZFCON
+ }
+ if chipfloat5(ctxt, a.U.Dval) >= 0 {
+ return C_SFCON
+ }
+ return C_LFCON
+
+ case D_CONST,
+ D_CONST2:
+ switch a.Name {
+ case D_NONE:
+ ctxt.Instoffset = a.Offset
+ if a.Reg != NREG {
+ return aconsize(ctxt)
+ }
+
+ t = int(immrot(uint32(ctxt.Instoffset)))
+ if t != 0 {
+ return C_RCON
+ }
+ t = int(immrot(uint32(^ctxt.Instoffset)))
+ if t != 0 {
+ return C_NCON
+ }
+ return C_LCON
+
+ case D_EXTERN,
+ D_STATIC:
+ s = a.Sym
+ if s == nil {
+ break
+ }
+ ctxt.Instoffset = 0 // s.b. unused but just in case
+ return C_LCONADDR
+
+ case D_AUTO:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
+ return aconsize(ctxt)
+
+ case D_PARAM:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 4
+ return aconsize(ctxt)
+ }
+
+ return C_GOK
+
+ case D_BRANCH:
+ return C_SBRA
+ }
+
+ return C_GOK
+}
+
+func aconsize(ctxt *obj.Link) int {
+ var t int
+
+ t = int(immrot(uint32(ctxt.Instoffset)))
+ if t != 0 {
+ return C_RACON
+ }
+ return C_LACON
+}
+
+func prasm(p *obj.Prog) {
+ fmt.Printf("%v\n", p)
+}
+
+func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
+ var a1 int
+ var a2 int
+ var a3 int
+ var r int
+ var c1 []byte
+ var c3 []byte
+ var o []Optab
+ var e []Optab
+
+ a1 = int(p.Optab)
+ if a1 != 0 {
+ return &optab[a1-1:][0]
+ }
+ a1 = int(p.From.Class)
+ if a1 == 0 {
+ a1 = aclass(ctxt, &p.From) + 1
+ p.From.Class = int8(a1)
+ }
+
+ a1--
+ a3 = int(p.To.Class)
+ if a3 == 0 {
+ a3 = aclass(ctxt, &p.To) + 1
+ p.To.Class = int8(a3)
+ }
+
+ a3--
+ a2 = C_NONE
+ if p.Reg != NREG {
+ a2 = C_REG
+ }
+ r = int(p.As)
+ o = oprange[r].start
+ if o == nil {
+ o = oprange[r].stop /* just generate an error */
+ }
+
+ if false { /*debug['O']*/
+ fmt.Printf("oplook %v %v %v %v\n", Aconv(int(p.As)), DRconv(a1), DRconv(a2), DRconv(a3))
+ fmt.Printf("\t\t%d %d\n", p.From.Type_, p.To.Type_)
+ }
+
+ e = oprange[r].stop
+ c1 = xcmp[a1][:]
+ c3 = xcmp[a3][:]
+ for ; -cap(o) < -cap(e); o = o[1:] {
+ if int(o[0].a2) == a2 {
+ if c1[o[0].a1] != 0 {
+ if c3[o[0].a3] != 0 {
+ p.Optab = uint16((-cap(o) + cap(optab)) + 1)
+ return &o[0]
+ }
+ }
+ }
+ }
+
+ ctxt.Diag("illegal combination %v; %v %v %v, %d %d", p, DRconv(a1), DRconv(a2), DRconv(a3), p.From.Type_, p.To.Type_)
+ ctxt.Diag("from %d %d to %d %d\n", p.From.Type_, p.From.Name, p.To.Type_, p.To.Name)
+ prasm(p)
+ if o == nil {
+ o = optab
+ }
+ return &o[0]
+}
+
+func cmp(a int, b int) bool {
+ if a == b {
+ return true
+ }
+ switch a {
+ case C_LCON:
+ if b == C_RCON || b == C_NCON {
+ return true
+ }
+
+ case C_LACON:
+ if b == C_RACON {
+ return true
+ }
+
+ case C_LFCON:
+ if b == C_ZFCON || b == C_SFCON {
+ return true
+ }
+
+ case C_HFAUTO:
+ return b == C_HAUTO || b == C_FAUTO
+
+ case C_FAUTO,
+ C_HAUTO:
+ return b == C_HFAUTO
+
+ case C_SAUTO:
+ return cmp(C_HFAUTO, b)
+
+ case C_LAUTO:
+ return cmp(C_SAUTO, b)
+
+ case C_HFOREG:
+ return b == C_HOREG || b == C_FOREG
+
+ case C_FOREG,
+ C_HOREG:
+ return b == C_HFOREG
+
+ case C_SROREG:
+ return cmp(C_SOREG, b) || cmp(C_ROREG, b)
+
+ case C_SOREG,
+ C_ROREG:
+ return b == C_SROREG || cmp(C_HFOREG, b)
+
+ case C_LOREG:
+ return cmp(C_SROREG, b)
+
+ case C_LBRA:
+ if b == C_SBRA {
+ return true
+ }
+
+ case C_HREG:
+ return cmp(C_SP, b) || cmp(C_PC, b)
+ }
+
+ return false
+}
+
+type ocmp []Optab
+
+func (x ocmp) Len() int {
+ return len(x)
+}
+
+func (x ocmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x ocmp) Less(i, j int) bool {
+ var p1 *Optab
+ var p2 *Optab
+ var n int
+
+ p1 = &x[i]
+ p2 = &x[j]
+ n = int(p1.as) - int(p2.as)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a1) - int(p2.a1)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a2) - int(p2.a2)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a3) - int(p2.a3)
+ if n != 0 {
+ return n < 0
+ }
+ return false
+}
+
+func buildop(ctxt *obj.Link) {
+ var i int
+ var n int
+ var r int
+
+ for i = 0; i < C_GOK; i++ {
+ for n = 0; n < C_GOK; n++ {
+ if cmp(n, i) {
+ xcmp[i][n] = 1
+ }
+ }
+ }
+ for n = 0; optab[n].as != AXXX; n++ {
+ if optab[n].flag&LPCREL != 0 {
+ if ctxt.Flag_shared != 0 {
+ optab[n].size += int8(optab[n].pcrelsiz)
+ } else {
+
+ optab[n].flag &^= LPCREL
+ }
+ }
+ }
+
+ sort.Sort(ocmp(optab[:n]))
+ for i = 0; i < n; i++ {
+ r = int(optab[i].as)
+ oprange[r].start = optab[i:]
+ for int(optab[i].as) == r {
+ i++
+ }
+ oprange[r].stop = optab[i:]
+ i--
+
+ switch r {
+ default:
+ ctxt.Diag("unknown op in build: %v", Aconv(r))
+ log.Fatalf("bad code")
+
+ case AADD:
+ oprange[AAND] = oprange[r]
+ oprange[AEOR] = oprange[r]
+ oprange[ASUB] = oprange[r]
+ oprange[ARSB] = oprange[r]
+ oprange[AADC] = oprange[r]
+ oprange[ASBC] = oprange[r]
+ oprange[ARSC] = oprange[r]
+ oprange[AORR] = oprange[r]
+ oprange[ABIC] = oprange[r]
+
+ case ACMP:
+ oprange[ATEQ] = oprange[r]
+ oprange[ACMN] = oprange[r]
+
+ case AMVN:
+ break
+
+ case ABEQ:
+ oprange[ABNE] = oprange[r]
+ oprange[ABCS] = oprange[r]
+ oprange[ABHS] = oprange[r]
+ oprange[ABCC] = oprange[r]
+ oprange[ABLO] = oprange[r]
+ oprange[ABMI] = oprange[r]
+ oprange[ABPL] = oprange[r]
+ oprange[ABVS] = oprange[r]
+ oprange[ABVC] = oprange[r]
+ oprange[ABHI] = oprange[r]
+ oprange[ABLS] = oprange[r]
+ oprange[ABGE] = oprange[r]
+ oprange[ABLT] = oprange[r]
+ oprange[ABGT] = oprange[r]
+ oprange[ABLE] = oprange[r]
+
+ case ASLL:
+ oprange[ASRL] = oprange[r]
+ oprange[ASRA] = oprange[r]
+
+ case AMUL:
+ oprange[AMULU] = oprange[r]
+
+ case ADIV:
+ oprange[AMOD] = oprange[r]
+ oprange[AMODU] = oprange[r]
+ oprange[ADIVU] = oprange[r]
+
+ case AMOVW,
+ AMOVB,
+ AMOVBS,
+ AMOVBU,
+ AMOVH,
+ AMOVHS,
+ AMOVHU:
+ break
+
+ case ASWPW:
+ oprange[ASWPBU] = oprange[r]
+
+ case AB,
+ ABL,
+ ABX,
+ ABXRET,
+ ADUFFZERO,
+ ADUFFCOPY,
+ ASWI,
+ AWORD,
+ AMOVM,
+ ARFE,
+ ATEXT,
+ AUSEFIELD,
+ ACASE,
+ ABCASE,
+ ATYPE:
+ break
+
+ case AADDF:
+ oprange[AADDD] = oprange[r]
+ oprange[ASUBF] = oprange[r]
+ oprange[ASUBD] = oprange[r]
+ oprange[AMULF] = oprange[r]
+ oprange[AMULD] = oprange[r]
+ oprange[ADIVF] = oprange[r]
+ oprange[ADIVD] = oprange[r]
+ oprange[ASQRTF] = oprange[r]
+ oprange[ASQRTD] = oprange[r]
+ oprange[AMOVFD] = oprange[r]
+ oprange[AMOVDF] = oprange[r]
+ oprange[AABSF] = oprange[r]
+ oprange[AABSD] = oprange[r]
+
+ case ACMPF:
+ oprange[ACMPD] = oprange[r]
+
+ case AMOVF:
+ oprange[AMOVD] = oprange[r]
+
+ case AMOVFW:
+ oprange[AMOVDW] = oprange[r]
+
+ case AMOVWF:
+ oprange[AMOVWD] = oprange[r]
+
+ case AMULL:
+ oprange[AMULAL] = oprange[r]
+ oprange[AMULLU] = oprange[r]
+ oprange[AMULALU] = oprange[r]
+
+ case AMULWT:
+ oprange[AMULWB] = oprange[r]
+
+ case AMULAWT:
+ oprange[AMULAWB] = oprange[r]
+
+ case AMULA,
+ ALDREX,
+ ASTREX,
+ ALDREXD,
+ ASTREXD,
+ ATST,
+ APLD,
+ AUNDEF,
+ ACLZ,
+ AFUNCDATA,
+ APCDATA,
+ ANOP,
+ ADATABUNDLE,
+ ADATABUNDLEEND:
+ break
+ }
+ }
+}
+
+func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
+ var o1 uint32
+ var o2 uint32
+ var o3 uint32
+ var o4 uint32
+ var o5 uint32
+ var o6 uint32
+ var v int32
+ var r int
+ var rf int
+ var rt int
+ var rt2 int
+ var rel *obj.Reloc
+
+ ctxt.Printp = p
+ o1 = 0
+ o2 = 0
+ o3 = 0
+ o4 = 0
+ o5 = 0
+ o6 = 0
+ ctxt.Armsize += int32(o.size)
+ if false { /*debug['P']*/
+ fmt.Printf("%x: %v\ttype %d\n", uint32(p.Pc), p, o.type_)
+ }
+ switch o.type_ {
+ default:
+ ctxt.Diag("unknown asm %d", o.type_)
+ prasm(p)
+
+ case 0: /* pseudo ops */
+ if false { /*debug['G']*/
+ fmt.Printf("%x: %s: arm %d\n", uint32(p.Pc), p.From.Sym.Name, p.From.Sym.Fnptr)
+ }
+
+ case 1: /* op R,[R],R */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ rf = int(p.From.Reg)
+ rt = int(p.To.Reg)
+ r = int(p.Reg)
+ if p.To.Type_ == D_NONE {
+ rt = 0
+ }
+ if p.As == AMOVB || p.As == AMOVH || p.As == AMOVW || p.As == AMVN {
+ r = 0
+ } else if r == NREG {
+ r = rt
+ }
+ o1 |= uint32(rf) | uint32(r)<<16 | uint32(rt)<<12
+
+ case 2: /* movbu $I,[R],R */
+ aclass(ctxt, &p.From)
+
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
+ rt = int(p.To.Reg)
+ r = int(p.Reg)
+ if p.To.Type_ == D_NONE {
+ rt = 0
+ }
+ if p.As == AMOVW || p.As == AMVN {
+ r = 0
+ } else if r == NREG {
+ r = rt
+ }
+ o1 |= uint32(r)<<16 | uint32(rt)<<12
+
+ case 3: /* add R<<[IR],[R],R */
+ o1 = mov(ctxt, p)
+
+ case 4: /* add $I,[R],R */
+ aclass(ctxt, &p.From)
+
+ o1 = oprrr(ctxt, AADD, int(p.Scond))
+ o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
+ r = int(p.From.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o1 |= uint32(r) << 16
+ o1 |= uint32(p.To.Reg) << 12
+
+ case 5: /* bra s */
+ o1 = opbra(ctxt, int(p.As), int(p.Scond))
+
+ v = -8
+ if p.To.Sym != nil {
+ rel = obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc)
+ rel.Siz = 4
+ rel.Sym = p.To.Sym
+ v += int32(p.To.Offset)
+ rel.Add = int64(o1) | (int64(v)>>2)&0xffffff
+ rel.Type_ = obj.R_CALLARM
+ break
+ }
+
+ if p.Pcond != nil {
+ v = int32((p.Pcond.Pc - ctxt.Pc) - 8)
+ }
+ o1 |= (uint32(v) >> 2) & 0xffffff
+
+ case 6: /* b ,O(R) -> add $O,R,PC */
+ aclass(ctxt, &p.To)
+
+ o1 = oprrr(ctxt, AADD, int(p.Scond))
+ o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
+ o1 |= uint32(p.To.Reg) << 16
+ o1 |= REGPC << 12
+
+ case 7: /* bl (R) -> blx R */
+ aclass(ctxt, &p.To)
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("%v: doesn't support BL offset(REG) where offset != 0", p)
+ }
+ o1 = oprrr(ctxt, ABL, int(p.Scond))
+ o1 |= uint32(p.To.Reg)
+ rel = obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc)
+ rel.Siz = 0
+ rel.Type_ = obj.R_CALLIND
+
+ case 8: /* sll $c,[R],R -> mov (R<<$c),R */
+ aclass(ctxt, &p.From)
+
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ r = int(p.Reg)
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ o1 |= uint32(r)
+ o1 |= uint32((ctxt.Instoffset & 31) << 7)
+ o1 |= uint32(p.To.Reg) << 12
+
+ case 9: /* sll R,[R],R -> mov (R<<R),R */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ r = int(p.Reg)
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ o1 |= uint32(r)
+ o1 |= uint32(p.From.Reg)<<8 | 1<<4
+ o1 |= uint32(p.To.Reg) << 12
+
+ case 10: /* swi [$con] */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ if p.To.Type_ != D_NONE {
+ aclass(ctxt, &p.To)
+ o1 |= uint32(ctxt.Instoffset & 0xffffff)
+ }
+
+ case 11: /* word */
+ aclass(ctxt, &p.To)
+
+ o1 = uint32(ctxt.Instoffset)
+ if p.To.Sym != nil {
+ // This case happens with words generated
+ // in the PC stream as part of the literal pool.
+ rel = obj.Addrel(ctxt.Cursym)
+
+ rel.Off = int32(ctxt.Pc)
+ rel.Siz = 4
+ rel.Sym = p.To.Sym
+ rel.Add = p.To.Offset
+
+ // runtime.tlsg is special.
+ // Its "address" is the offset from the TLS thread pointer
+ // to the thread-local g and m pointers.
+ // Emit a TLS relocation instead of a standard one.
+ if rel.Sym == ctxt.Tlsg {
+
+ rel.Type_ = obj.R_TLS
+ if ctxt.Flag_shared != 0 {
+ rel.Add += ctxt.Pc - p.Pcrel.Pc - 8 - int64(rel.Siz)
+ }
+ rel.Xadd = rel.Add
+ rel.Xsym = rel.Sym
+ } else if ctxt.Flag_shared != 0 {
+ rel.Type_ = obj.R_PCREL
+ rel.Add += ctxt.Pc - p.Pcrel.Pc - 8
+ } else {
+
+ rel.Type_ = obj.R_ADDR
+ }
+ o1 = 0
+ }
+
+ case 12: /* movw $lcon, reg */
+ o1 = omvl(ctxt, p, &p.From, int(p.To.Reg))
+
+ if o.flag&LPCREL != 0 {
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | uint32(p.To.Reg) | REGPC<<16 | uint32(p.To.Reg)<<12
+ }
+
+ case 13: /* op $lcon, [R], R */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if !(o1 != 0) {
+ break
+ }
+ o2 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o2 |= REGTMP
+ r = int(p.Reg)
+ if p.As == AMOVW || p.As == AMVN {
+ r = 0
+ } else if r == NREG {
+ r = int(p.To.Reg)
+ }
+ o2 |= uint32(r) << 16
+ if p.To.Type_ != D_NONE {
+ o2 |= uint32(p.To.Reg) << 12
+ }
+
+ case 14: /* movb/movbu/movh/movhu R,R */
+ o1 = oprrr(ctxt, ASLL, int(p.Scond))
+
+ if p.As == AMOVBU || p.As == AMOVHU {
+ o2 = oprrr(ctxt, ASRL, int(p.Scond))
+ } else {
+
+ o2 = oprrr(ctxt, ASRA, int(p.Scond))
+ }
+
+ r = int(p.To.Reg)
+ o1 |= uint32(p.From.Reg) | uint32(r)<<12
+ o2 |= uint32(r) | uint32(r)<<12
+ if p.As == AMOVB || p.As == AMOVBS || p.As == AMOVBU {
+ o1 |= 24 << 7
+ o2 |= 24 << 7
+ } else {
+
+ o1 |= 16 << 7
+ o2 |= 16 << 7
+ }
+
+ case 15: /* mul r,[r,]r */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ rf = int(p.From.Reg)
+ rt = int(p.To.Reg)
+ r = int(p.Reg)
+ if r == NREG {
+ r = rt
+ }
+ if rt == r {
+ r = rf
+ rf = rt
+ }
+
+ if false {
+ if rt == r || rf == REGPC || r == REGPC || rt == REGPC {
+ ctxt.Diag("bad registers in MUL")
+ prasm(p)
+ }
+ }
+
+ o1 |= uint32(rf)<<8 | uint32(r) | uint32(rt)<<16
+
+ case 16: /* div r,[r,]r */
+ o1 = 0xf << 28
+
+ o2 = 0
+
+ case 17:
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ rf = int(p.From.Reg)
+ rt = int(p.To.Reg)
+ rt2 = int(p.To.Offset)
+ r = int(p.Reg)
+ o1 |= uint32(rf)<<8 | uint32(r) | uint32(rt)<<16 | uint32(rt2)<<12
+
+ case 20: /* mov/movb/movbu R,O(R) */
+ aclass(ctxt, &p.To)
+
+ r = int(p.To.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o1 = osr(ctxt, int(p.As), int(p.From.Reg), int32(ctxt.Instoffset), r, int(p.Scond))
+
+ case 21: /* mov/movbu O(R),R -> lr */
+ aclass(ctxt, &p.From)
+
+ r = int(p.From.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o1 = olr(ctxt, int32(ctxt.Instoffset), r, int(p.To.Reg), int(p.Scond))
+ if p.As != AMOVW {
+ o1 |= 1 << 22
+ }
+
+ case 30: /* mov/movb/movbu R,L(R) */
+ o1 = omvl(ctxt, p, &p.To, REGTMP)
+
+ if !(o1 != 0) {
+ break
+ }
+ r = int(p.To.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o2 = osrr(ctxt, int(p.From.Reg), REGTMP, r, int(p.Scond))
+ if p.As != AMOVW {
+ o2 |= 1 << 22
+ }
+
+ case 31: /* mov/movbu L(R),R -> lr[b] */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if !(o1 != 0) {
+ break
+ }
+ r = int(p.From.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o2 = olrr(ctxt, REGTMP, r, int(p.To.Reg), int(p.Scond))
+ if p.As == AMOVBU || p.As == AMOVBS || p.As == AMOVB {
+ o2 |= 1 << 22
+ }
+
+ case 34: /* mov $lacon,R */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if !(o1 != 0) {
+ break
+ }
+
+ o2 = oprrr(ctxt, AADD, int(p.Scond))
+ o2 |= REGTMP
+ r = int(p.From.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o2 |= uint32(r) << 16
+ if p.To.Type_ != D_NONE {
+ o2 |= uint32(p.To.Reg) << 12
+ }
+
+ case 35: /* mov PSR,R */
+ o1 = 2<<23 | 0xf<<16 | 0<<0
+
+ o1 |= (uint32(p.Scond) & C_SCOND) << 28
+ o1 |= (uint32(p.From.Reg) & 1) << 22
+ o1 |= uint32(p.To.Reg) << 12
+
+ case 36: /* mov R,PSR */
+ o1 = 2<<23 | 0x29f<<12 | 0<<4
+
+ if p.Scond&C_FBIT != 0 {
+ o1 ^= 0x010 << 12
+ }
+ o1 |= (uint32(p.Scond) & C_SCOND) << 28
+ o1 |= (uint32(p.To.Reg) & 1) << 22
+ o1 |= uint32(p.From.Reg) << 0
+
+ case 37: /* mov $con,PSR */
+ aclass(ctxt, &p.From)
+
+ o1 = 2<<23 | 0x29f<<12 | 0<<4
+ if p.Scond&C_FBIT != 0 {
+ o1 ^= 0x010 << 12
+ }
+ o1 |= (uint32(p.Scond) & C_SCOND) << 28
+ o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
+ o1 |= (uint32(p.To.Reg) & 1) << 22
+ o1 |= uint32(p.From.Reg) << 0
+
+ case 38,
+ 39:
+ switch o.type_ {
+ case 38: /* movm $con,oreg -> stm */
+ o1 = 0x4 << 25
+
+ o1 |= uint32(p.From.Offset & 0xffff)
+ o1 |= uint32(p.To.Reg) << 16
+ aclass(ctxt, &p.To)
+
+ case 39: /* movm oreg,$con -> ldm */
+ o1 = 0x4<<25 | 1<<20
+
+ o1 |= uint32(p.To.Offset & 0xffff)
+ o1 |= uint32(p.From.Reg) << 16
+ aclass(ctxt, &p.From)
+ break
+ }
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("offset must be zero in MOVM; %v", p)
+ }
+ o1 |= (uint32(p.Scond) & C_SCOND) << 28
+ if p.Scond&C_PBIT != 0 {
+ o1 |= 1 << 24
+ }
+ if p.Scond&C_UBIT != 0 {
+ o1 |= 1 << 23
+ }
+ if p.Scond&C_SBIT != 0 {
+ o1 |= 1 << 22
+ }
+ if p.Scond&C_WBIT != 0 {
+ o1 |= 1 << 21
+ }
+
+ case 40: /* swp oreg,reg,reg */
+ aclass(ctxt, &p.From)
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("offset must be zero in SWP")
+ }
+ o1 = 0x2<<23 | 0x9<<4
+ if p.As != ASWPW {
+ o1 |= 1 << 22
+ }
+ o1 |= uint32(p.From.Reg) << 16
+ o1 |= uint32(p.Reg) << 0
+ o1 |= uint32(p.To.Reg) << 12
+ o1 |= (uint32(p.Scond) & C_SCOND) << 28
+
+ case 41: /* rfe -> movm.s.w.u 0(r13),[r15] */
+ o1 = 0xe8fd8000
+
+ case 50: /* floating point store */
+ v = regoff(ctxt, &p.To)
+
+ r = int(p.To.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o1 = ofsr(ctxt, int(p.As), int(p.From.Reg), v, r, int(p.Scond), p)
+
+ case 51: /* floating point load */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.From.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o1 = ofsr(ctxt, int(p.As), int(p.To.Reg), v, r, int(p.Scond), p) | 1<<20
+
+ case 52: /* floating point store, int32 offset UGLY */
+ o1 = omvl(ctxt, p, &p.To, REGTMP)
+
+ if !(o1 != 0) {
+ break
+ }
+ r = int(p.To.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP<<12 | REGTMP<<16 | uint32(r)
+ o3 = ofsr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
+
+ case 53: /* floating point load, int32 offset UGLY */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if !(o1 != 0) {
+ break
+ }
+ r = int(p.From.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP<<12 | REGTMP<<16 | uint32(r)
+ o3 = ofsr(ctxt, int(p.As), int(p.To.Reg), 0, REGTMP, int(p.Scond), p) | 1<<20
+
+ case 54: /* floating point arith */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ rf = int(p.From.Reg)
+ rt = int(p.To.Reg)
+ r = int(p.Reg)
+ if r == NREG {
+ r = rt
+ if p.As == AMOVF || p.As == AMOVD || p.As == AMOVFD || p.As == AMOVDF || p.As == ASQRTF || p.As == ASQRTD || p.As == AABSF || p.As == AABSD {
+ r = 0
+ }
+ }
+
+ o1 |= uint32(rf) | uint32(r)<<16 | uint32(rt)<<12
+
+ case 56: /* move to FP[CS]R */
+ o1 = (uint32(p.Scond)&C_SCOND)<<28 | 0xe<<24 | 1<<8 | 1<<4
+
+ o1 |= (uint32(p.To.Reg)+1)<<21 | uint32(p.From.Reg)<<12
+
+ case 57: /* move from FP[CS]R */
+ o1 = (uint32(p.Scond)&C_SCOND)<<28 | 0xe<<24 | 1<<8 | 1<<4
+
+ o1 |= (uint32(p.From.Reg)+1)<<21 | uint32(p.To.Reg)<<12 | 1<<20
+
+ case 58: /* movbu R,R */
+ o1 = oprrr(ctxt, AAND, int(p.Scond))
+
+ o1 |= uint32(immrot(0xff))
+ rt = int(p.To.Reg)
+ r = int(p.From.Reg)
+ if p.To.Type_ == D_NONE {
+ rt = 0
+ }
+ if r == NREG {
+ r = rt
+ }
+ o1 |= uint32(r)<<16 | uint32(rt)<<12
+
+ case 59: /* movw/bu R<<I(R),R -> ldr indexed */
+ if p.From.Reg == NREG {
+
+ if p.As != AMOVW {
+ ctxt.Diag("byte MOV from shifter operand")
+ }
+ o1 = mov(ctxt, p)
+ break
+ }
+
+ if p.From.Offset&(1<<4) != 0 {
+ ctxt.Diag("bad shift in LDR")
+ }
+ o1 = olrr(ctxt, int(p.From.Offset), int(p.From.Reg), int(p.To.Reg), int(p.Scond))
+ if p.As == AMOVBU {
+ o1 |= 1 << 22
+ }
+
+ case 60: /* movb R(R),R -> ldrsb indexed */
+ if p.From.Reg == NREG {
+
+ ctxt.Diag("byte MOV from shifter operand")
+ o1 = mov(ctxt, p)
+ break
+ }
+
+ if p.From.Offset&(^0xf) != 0 {
+ ctxt.Diag("bad shift in LDRSB")
+ }
+ o1 = olhrr(ctxt, int(p.From.Offset), int(p.From.Reg), int(p.To.Reg), int(p.Scond))
+ o1 ^= 1<<5 | 1<<6
+
+ case 61: /* movw/b/bu R,R<<[IR](R) -> str indexed */
+ if p.To.Reg == NREG {
+
+ ctxt.Diag("MOV to shifter operand")
+ }
+ o1 = osrr(ctxt, int(p.From.Reg), int(p.To.Offset), int(p.To.Reg), int(p.Scond))
+ if p.As == AMOVB || p.As == AMOVBS || p.As == AMOVBU {
+ o1 |= 1 << 22
+ }
+
+ case 62: /* case R -> movw R<<2(PC),PC */
+ if o.flag&LPCREL != 0 {
+
+ o1 = oprrr(ctxt, AADD, int(p.Scond)) | uint32(immrot(1)) | uint32(p.From.Reg)<<16 | REGTMP<<12
+ o2 = olrr(ctxt, REGTMP, REGPC, REGTMP, int(p.Scond))
+ o2 |= 2 << 7
+ o3 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP | REGPC<<16 | REGPC<<12
+ } else {
+
+ o1 = olrr(ctxt, int(p.From.Reg), REGPC, REGPC, int(p.Scond))
+ o1 |= 2 << 7
+ }
+
+ case 63: /* bcase */
+ if p.Pcond != nil {
+
+ rel = obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc)
+ rel.Siz = 4
+ if p.To.Sym != nil && p.To.Sym.Type_ != 0 {
+ rel.Sym = p.To.Sym
+ rel.Add = p.To.Offset
+ } else {
+
+ rel.Sym = ctxt.Cursym
+ rel.Add = p.Pcond.Pc
+ }
+
+ if o.flag&LPCREL != 0 {
+ rel.Type_ = obj.R_PCREL
+ rel.Add += ctxt.Pc - p.Pcrel.Pc - 16 + int64(rel.Siz)
+ } else {
+
+ rel.Type_ = obj.R_ADDR
+ }
+ o1 = 0
+ }
+
+ /* reloc ops */
+ case 64: /* mov/movb/movbu R,addr */
+ o1 = omvl(ctxt, p, &p.To, REGTMP)
+
+ if !(o1 != 0) {
+ break
+ }
+ o2 = osr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond))
+ if o.flag&LPCREL != 0 {
+ o3 = o2
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP | REGPC<<16 | REGTMP<<12
+ }
+
+ case 65: /* mov/movbu addr,R */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if !(o1 != 0) {
+ break
+ }
+ o2 = olr(ctxt, 0, REGTMP, int(p.To.Reg), int(p.Scond))
+ if p.As == AMOVBU || p.As == AMOVBS || p.As == AMOVB {
+ o2 |= 1 << 22
+ }
+ if o.flag&LPCREL != 0 {
+ o3 = o2
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP | REGPC<<16 | REGTMP<<12
+ }
+
+ case 68: /* floating point store -> ADDR */
+ o1 = omvl(ctxt, p, &p.To, REGTMP)
+
+ if !(o1 != 0) {
+ break
+ }
+ o2 = ofsr(ctxt, int(p.As), int(p.From.Reg), 0, REGTMP, int(p.Scond), p)
+ if o.flag&LPCREL != 0 {
+ o3 = o2
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP | REGPC<<16 | REGTMP<<12
+ }
+
+ case 69: /* floating point load <- ADDR */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if !(o1 != 0) {
+ break
+ }
+ o2 = ofsr(ctxt, int(p.As), int(p.To.Reg), 0, REGTMP, int(p.Scond), p) | 1<<20
+ if o.flag&LPCREL != 0 {
+ o3 = o2
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP | REGPC<<16 | REGTMP<<12
+ }
+
+ /* ArmV4 ops: */
+ case 70: /* movh/movhu R,O(R) -> strh */
+ aclass(ctxt, &p.To)
+
+ r = int(p.To.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o1 = oshr(ctxt, int(p.From.Reg), int32(ctxt.Instoffset), r, int(p.Scond))
+
+ case 71: /* movb/movh/movhu O(R),R -> ldrsb/ldrsh/ldrh */
+ aclass(ctxt, &p.From)
+
+ r = int(p.From.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o1 = olhr(ctxt, int32(ctxt.Instoffset), r, int(p.To.Reg), int(p.Scond))
+ if p.As == AMOVB || p.As == AMOVBS {
+ o1 ^= 1<<5 | 1<<6
+ } else if p.As == AMOVH || p.As == AMOVHS {
+ o1 ^= (1 << 6)
+ }
+
+ case 72: /* movh/movhu R,L(R) -> strh */
+ o1 = omvl(ctxt, p, &p.To, REGTMP)
+
+ if !(o1 != 0) {
+ break
+ }
+ r = int(p.To.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o2 = oshrr(ctxt, int(p.From.Reg), REGTMP, r, int(p.Scond))
+
+ case 73: /* movb/movh/movhu L(R),R -> ldrsb/ldrsh/ldrh */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if !(o1 != 0) {
+ break
+ }
+ r = int(p.From.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o2 = olhrr(ctxt, REGTMP, r, int(p.To.Reg), int(p.Scond))
+ if p.As == AMOVB || p.As == AMOVBS {
+ o2 ^= 1<<5 | 1<<6
+ } else if p.As == AMOVH || p.As == AMOVHS {
+ o2 ^= (1 << 6)
+ }
+
+ case 74: /* bx $I */
+ ctxt.Diag("ABX $I")
+
+ case 75: /* bx O(R) */
+ aclass(ctxt, &p.To)
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("non-zero offset in ABX")
+ }
+
+ /*
+ o1 = oprrr(ctxt, AADD, p->scond) | immrot(0) | (REGPC<<16) | (REGLINK<<12); // mov PC, LR
+ o2 = ((p->scond&C_SCOND)<<28) | (0x12fff<<8) | (1<<4) | p->to.reg; // BX R
+ */
+ // p->to.reg may be REGLINK
+ o1 = oprrr(ctxt, AADD, int(p.Scond))
+
+ o1 |= uint32(immrot(uint32(ctxt.Instoffset)))
+ o1 |= uint32(p.To.Reg) << 16
+ o1 |= REGTMP << 12
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | uint32(immrot(0)) | REGPC<<16 | REGLINK<<12 // mov PC, LR
+ o3 = (uint32(p.Scond)&C_SCOND)<<28 | 0x12fff<<8 | 1<<4 | REGTMP // BX Rtmp
+
+ case 76: /* bx O(R) when returning from fn*/
+ ctxt.Diag("ABXRET")
+
+ case 77: /* ldrex oreg,reg */
+ aclass(ctxt, &p.From)
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("offset must be zero in LDREX")
+ }
+ o1 = 0x19<<20 | 0xf9f
+ o1 |= uint32(p.From.Reg) << 16
+ o1 |= uint32(p.To.Reg) << 12
+ o1 |= (uint32(p.Scond) & C_SCOND) << 28
+
+ case 78: /* strex reg,oreg,reg */
+ aclass(ctxt, &p.From)
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("offset must be zero in STREX")
+ }
+ o1 = 0x18<<20 | 0xf90
+ o1 |= uint32(p.From.Reg) << 16
+ o1 |= uint32(p.Reg) << 0
+ o1 |= uint32(p.To.Reg) << 12
+ o1 |= (uint32(p.Scond) & C_SCOND) << 28
+
+ case 80: /* fmov zfcon,freg */
+ if p.As == AMOVD {
+
+ o1 = 0xeeb00b00 // VMOV imm 64
+ o2 = oprrr(ctxt, ASUBD, int(p.Scond))
+ } else {
+
+ o1 = 0x0eb00a00 // VMOV imm 32
+ o2 = oprrr(ctxt, ASUBF, int(p.Scond))
+ }
+
+ v = 0x70 // 1.0
+ r = int(p.To.Reg)
+
+ // movf $1.0, r
+ o1 |= (uint32(p.Scond) & C_SCOND) << 28
+
+ o1 |= uint32(r) << 12
+ o1 |= (uint32(v) & 0xf) << 0
+ o1 |= (uint32(v) & 0xf0) << 12
+
+ // subf r,r,r
+ o2 |= uint32(r) | uint32(r)<<16 | uint32(r)<<12
+
+ case 81: /* fmov sfcon,freg */
+ o1 = 0x0eb00a00 // VMOV imm 32
+ if p.As == AMOVD {
+ o1 = 0xeeb00b00 // VMOV imm 64
+ }
+ o1 |= (uint32(p.Scond) & C_SCOND) << 28
+ o1 |= uint32(p.To.Reg) << 12
+ v = int32(chipfloat5(ctxt, p.From.U.Dval))
+ o1 |= (uint32(v) & 0xf) << 0
+ o1 |= (uint32(v) & 0xf0) << 12
+
+ case 82: /* fcmp freg,freg, */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= uint32(p.Reg)<<12 | uint32(p.From.Reg)<<0
+ o2 = 0x0ef1fa10 // VMRS R15
+ o2 |= (uint32(p.Scond) & C_SCOND) << 28
+
+ case 83: /* fcmp freg,, */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= uint32(p.From.Reg)<<12 | 1<<16
+ o2 = 0x0ef1fa10 // VMRS R15
+ o2 |= (uint32(p.Scond) & C_SCOND) << 28
+
+ case 84: /* movfw freg,freg - truncate float-to-fix */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= uint32(p.From.Reg) << 0
+ o1 |= uint32(p.To.Reg) << 12
+
+ case 85: /* movwf freg,freg - fix-to-float */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= uint32(p.From.Reg) << 0
+ o1 |= uint32(p.To.Reg) << 12
+
+ // macro for movfw freg,FTMP; movw FTMP,reg
+ case 86: /* movfw freg,reg - truncate float-to-fix */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= uint32(p.From.Reg) << 0
+ o1 |= FREGTMP << 12
+ o2 = oprrr(ctxt, AMOVFW+AEND, int(p.Scond))
+ o2 |= FREGTMP << 16
+ o2 |= uint32(p.To.Reg) << 12
+
+ // macro for movw reg,FTMP; movwf FTMP,freg
+ case 87: /* movwf reg,freg - fix-to-float */
+ o1 = oprrr(ctxt, AMOVWF+AEND, int(p.Scond))
+
+ o1 |= uint32(p.From.Reg) << 12
+ o1 |= FREGTMP << 16
+ o2 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o2 |= FREGTMP << 0
+ o2 |= uint32(p.To.Reg) << 12
+
+ case 88: /* movw reg,freg */
+ o1 = oprrr(ctxt, AMOVWF+AEND, int(p.Scond))
+
+ o1 |= uint32(p.From.Reg) << 12
+ o1 |= uint32(p.To.Reg) << 16
+
+ case 89: /* movw freg,reg */
+ o1 = oprrr(ctxt, AMOVFW+AEND, int(p.Scond))
+
+ o1 |= uint32(p.From.Reg) << 16
+ o1 |= uint32(p.To.Reg) << 12
+
+ case 90: /* tst reg */
+ o1 = oprrr(ctxt, ACMP+AEND, int(p.Scond))
+
+ o1 |= uint32(p.From.Reg) << 16
+
+ case 91: /* ldrexd oreg,reg */
+ aclass(ctxt, &p.From)
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("offset must be zero in LDREX")
+ }
+ o1 = 0x1b<<20 | 0xf9f
+ o1 |= uint32(p.From.Reg) << 16
+ o1 |= uint32(p.To.Reg) << 12
+ o1 |= (uint32(p.Scond) & C_SCOND) << 28
+
+ case 92: /* strexd reg,oreg,reg */
+ aclass(ctxt, &p.From)
+
+ if ctxt.Instoffset != 0 {
+ ctxt.Diag("offset must be zero in STREX")
+ }
+ o1 = 0x1a<<20 | 0xf90
+ o1 |= uint32(p.From.Reg) << 16
+ o1 |= uint32(p.Reg) << 0
+ o1 |= uint32(p.To.Reg) << 12
+ o1 |= (uint32(p.Scond) & C_SCOND) << 28
+
+ case 93: /* movb/movh/movhu addr,R -> ldrsb/ldrsh/ldrh */
+ o1 = omvl(ctxt, p, &p.From, REGTMP)
+
+ if !(o1 != 0) {
+ break
+ }
+ o2 = olhr(ctxt, 0, REGTMP, int(p.To.Reg), int(p.Scond))
+ if p.As == AMOVB || p.As == AMOVBS {
+ o2 ^= 1<<5 | 1<<6
+ } else if p.As == AMOVH || p.As == AMOVHS {
+ o2 ^= (1 << 6)
+ }
+ if o.flag&LPCREL != 0 {
+ o3 = o2
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP | REGPC<<16 | REGTMP<<12
+ }
+
+ case 94: /* movh/movhu R,addr -> strh */
+ o1 = omvl(ctxt, p, &p.To, REGTMP)
+
+ if !(o1 != 0) {
+ break
+ }
+ o2 = oshr(ctxt, int(p.From.Reg), 0, REGTMP, int(p.Scond))
+ if o.flag&LPCREL != 0 {
+ o3 = o2
+ o2 = oprrr(ctxt, AADD, int(p.Scond)) | REGTMP | REGPC<<16 | REGTMP<<12
+ }
+
+ case 95: /* PLD off(reg) */
+ o1 = 0xf5d0f000
+
+ o1 |= uint32(p.From.Reg) << 16
+ if p.From.Offset < 0 {
+ o1 &^= (1 << 23)
+ o1 |= uint32((-p.From.Offset) & 0xfff)
+ } else {
+
+ o1 |= uint32(p.From.Offset & 0xfff)
+ }
+
+ // This is supposed to be something that stops execution.
+ // It's not supposed to be reached, ever, but if it is, we'd
+ // like to be able to tell how we got there. Assemble as
+ // 0xf7fabcfd which is guaranteed to raise undefined instruction
+ // exception.
+ case 96: /* UNDEF */
+ o1 = 0xf7fabcfd
+
+ case 97: /* CLZ Rm, Rd */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= uint32(p.To.Reg) << 12
+ o1 |= uint32(p.From.Reg)
+
+ case 98: /* MULW{T,B} Rs, Rm, Rd */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= uint32(p.To.Reg) << 16
+ o1 |= uint32(p.From.Reg) << 8
+ o1 |= uint32(p.Reg)
+
+ case 99: /* MULAW{T,B} Rs, Rm, Rn, Rd */
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+
+ o1 |= uint32(p.To.Reg) << 12
+ o1 |= uint32(p.From.Reg) << 8
+ o1 |= uint32(p.Reg)
+ o1 |= uint32(p.To.Offset << 16)
+
+ // DATABUNDLE: BKPT $0x5be0, signify the start of NaCl data bundle;
+ // DATABUNDLEEND: zero width alignment marker
+ case 100:
+ if p.As == ADATABUNDLE {
+
+ o1 = 0xe125be70
+ }
+ break
+ }
+
+ out[0] = o1
+ out[1] = o2
+ out[2] = o3
+ out[3] = o4
+ out[4] = o5
+ out[5] = o6
+ return
+}
+
+func mov(ctxt *obj.Link, p *obj.Prog) uint32 {
+ var o1 uint32
+ var rt int
+ var r int
+
+ aclass(ctxt, &p.From)
+ o1 = oprrr(ctxt, int(p.As), int(p.Scond))
+ o1 |= uint32(p.From.Offset)
+ rt = int(p.To.Reg)
+ r = int(p.Reg)
+ if p.To.Type_ == D_NONE {
+ rt = 0
+ }
+ if p.As == AMOVW || p.As == AMVN {
+ r = 0
+ } else if r == NREG {
+ r = rt
+ }
+ o1 |= uint32(r)<<16 | uint32(rt)<<12
+ return o1
+}
+
+func oprrr(ctxt *obj.Link, a int, sc int) uint32 {
+ var o uint32
+
+ o = (uint32(sc) & C_SCOND) << 28
+ if sc&C_SBIT != 0 {
+ o |= 1 << 20
+ }
+ if sc&(C_PBIT|C_WBIT) != 0 {
+ ctxt.Diag(".nil/.W on dp instruction")
+ }
+ switch a {
+ case AMULU,
+ AMUL:
+ return o | 0x0<<21 | 0x9<<4
+ case AMULA:
+ return o | 0x1<<21 | 0x9<<4
+ case AMULLU:
+ return o | 0x4<<21 | 0x9<<4
+ case AMULL:
+ return o | 0x6<<21 | 0x9<<4
+ case AMULALU:
+ return o | 0x5<<21 | 0x9<<4
+ case AMULAL:
+ return o | 0x7<<21 | 0x9<<4
+ case AAND:
+ return o | 0x0<<21
+ case AEOR:
+ return o | 0x1<<21
+ case ASUB:
+ return o | 0x2<<21
+ case ARSB:
+ return o | 0x3<<21
+ case AADD:
+ return o | 0x4<<21
+ case AADC:
+ return o | 0x5<<21
+ case ASBC:
+ return o | 0x6<<21
+ case ARSC:
+ return o | 0x7<<21
+ case ATST:
+ return o | 0x8<<21 | 1<<20
+ case ATEQ:
+ return o | 0x9<<21 | 1<<20
+ case ACMP:
+ return o | 0xa<<21 | 1<<20
+ case ACMN:
+ return o | 0xb<<21 | 1<<20
+ case AORR:
+ return o | 0xc<<21
+
+ case AMOVB,
+ AMOVH,
+ AMOVW:
+ return o | 0xd<<21
+ case ABIC:
+ return o | 0xe<<21
+ case AMVN:
+ return o | 0xf<<21
+ case ASLL:
+ return o | 0xd<<21 | 0<<5
+ case ASRL:
+ return o | 0xd<<21 | 1<<5
+ case ASRA:
+ return o | 0xd<<21 | 2<<5
+ case ASWI:
+ return o | 0xf<<24
+
+ case AADDD:
+ return o | 0xe<<24 | 0x3<<20 | 0xb<<8 | 0<<4
+ case AADDF:
+ return o | 0xe<<24 | 0x3<<20 | 0xa<<8 | 0<<4
+ case ASUBD:
+ return o | 0xe<<24 | 0x3<<20 | 0xb<<8 | 4<<4
+ case ASUBF:
+ return o | 0xe<<24 | 0x3<<20 | 0xa<<8 | 4<<4
+ case AMULD:
+ return o | 0xe<<24 | 0x2<<20 | 0xb<<8 | 0<<4
+ case AMULF:
+ return o | 0xe<<24 | 0x2<<20 | 0xa<<8 | 0<<4
+ case ADIVD:
+ return o | 0xe<<24 | 0x8<<20 | 0xb<<8 | 0<<4
+ case ADIVF:
+ return o | 0xe<<24 | 0x8<<20 | 0xa<<8 | 0<<4
+ case ASQRTD:
+ return o | 0xe<<24 | 0xb<<20 | 1<<16 | 0xb<<8 | 0xc<<4
+ case ASQRTF:
+ return o | 0xe<<24 | 0xb<<20 | 1<<16 | 0xa<<8 | 0xc<<4
+ case AABSD:
+ return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xb<<8 | 0xc<<4
+ case AABSF:
+ return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xa<<8 | 0xc<<4
+ case ACMPD:
+ return o | 0xe<<24 | 0xb<<20 | 4<<16 | 0xb<<8 | 0xc<<4
+ case ACMPF:
+ return o | 0xe<<24 | 0xb<<20 | 4<<16 | 0xa<<8 | 0xc<<4
+
+ case AMOVF:
+ return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xa<<8 | 4<<4
+ case AMOVD:
+ return o | 0xe<<24 | 0xb<<20 | 0<<16 | 0xb<<8 | 4<<4
+
+ case AMOVDF:
+ return o | 0xe<<24 | 0xb<<20 | 7<<16 | 0xa<<8 | 0xc<<4 | 1<<8 // dtof
+ case AMOVFD:
+ return o | 0xe<<24 | 0xb<<20 | 7<<16 | 0xa<<8 | 0xc<<4 | 0<<8 // dtof
+
+ case AMOVWF:
+ if sc&C_UBIT == 0 {
+
+ o |= 1 << 7 /* signed */
+ }
+ return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 0<<18 | 0<<8 // toint, double
+
+ case AMOVWD:
+ if sc&C_UBIT == 0 {
+ o |= 1 << 7 /* signed */
+ }
+ return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 0<<18 | 1<<8 // toint, double
+
+ case AMOVFW:
+ if sc&C_UBIT == 0 {
+
+ o |= 1 << 16 /* signed */
+ }
+ return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 1<<18 | 0<<8 | 1<<7 // toint, double, trunc
+
+ case AMOVDW:
+ if sc&C_UBIT == 0 {
+ o |= 1 << 16 /* signed */
+ }
+ return o | 0xe<<24 | 0xb<<20 | 8<<16 | 0xa<<8 | 4<<4 | 1<<18 | 1<<8 | 1<<7 // toint, double, trunc
+
+ case AMOVWF + AEND: // copy WtoF
+ return o | 0xe<<24 | 0x0<<20 | 0xb<<8 | 1<<4
+
+ case AMOVFW + AEND: // copy FtoW
+ return o | 0xe<<24 | 0x1<<20 | 0xb<<8 | 1<<4
+
+ case ACMP + AEND: // cmp imm
+ return o | 0x3<<24 | 0x5<<20
+
+ // CLZ doesn't support .nil
+ case ACLZ:
+ return o&(0xf<<28) | 0x16f<<16 | 0xf1<<4
+
+ case AMULWT:
+ return o&(0xf<<28) | 0x12<<20 | 0xe<<4
+
+ case AMULWB:
+ return o&(0xf<<28) | 0x12<<20 | 0xa<<4
+
+ case AMULAWT:
+ return o&(0xf<<28) | 0x12<<20 | 0xc<<4
+
+ case AMULAWB:
+ return o&(0xf<<28) | 0x12<<20 | 0x8<<4
+
+ case ABL: // BLX REG
+ return o&(0xf<<28) | 0x12fff3<<4
+ }
+
+ ctxt.Diag("bad rrr %d", a)
+ prasm(ctxt.Curp)
+ return 0
+}
+
+func opbra(ctxt *obj.Link, a int, sc int) uint32 {
+ if sc&(C_SBIT|C_PBIT|C_WBIT) != 0 {
+ ctxt.Diag(".nil/.nil/.W on bra instruction")
+ }
+ sc &= C_SCOND
+ if a == ABL || a == ADUFFZERO || a == ADUFFCOPY {
+ return uint32(sc)<<28 | 0x5<<25 | 0x1<<24
+ }
+ if sc != 0xe {
+ ctxt.Diag(".COND on bcond instruction")
+ }
+ switch a {
+ case ABEQ:
+ return 0x0<<28 | 0x5<<25
+ case ABNE:
+ return 0x1<<28 | 0x5<<25
+ case ABCS:
+ return 0x2<<28 | 0x5<<25
+ case ABHS:
+ return 0x2<<28 | 0x5<<25
+ case ABCC:
+ return 0x3<<28 | 0x5<<25
+ case ABLO:
+ return 0x3<<28 | 0x5<<25
+ case ABMI:
+ return 0x4<<28 | 0x5<<25
+ case ABPL:
+ return 0x5<<28 | 0x5<<25
+ case ABVS:
+ return 0x6<<28 | 0x5<<25
+ case ABVC:
+ return 0x7<<28 | 0x5<<25
+ case ABHI:
+ return 0x8<<28 | 0x5<<25
+ case ABLS:
+ return 0x9<<28 | 0x5<<25
+ case ABGE:
+ return 0xa<<28 | 0x5<<25
+ case ABLT:
+ return 0xb<<28 | 0x5<<25
+ case ABGT:
+ return 0xc<<28 | 0x5<<25
+ case ABLE:
+ return 0xd<<28 | 0x5<<25
+ case AB:
+ return 0xe<<28 | 0x5<<25
+ }
+
+ ctxt.Diag("bad bra %v", Aconv(a))
+ prasm(ctxt.Curp)
+ return 0
+}
+
+func olr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
+ var o uint32
+
+ if sc&C_SBIT != 0 {
+ ctxt.Diag(".nil on LDR/STR instruction")
+ }
+ o = (uint32(sc) & C_SCOND) << 28
+ if !(sc&C_PBIT != 0) {
+ o |= 1 << 24
+ }
+ if !(sc&C_UBIT != 0) {
+ o |= 1 << 23
+ }
+ if sc&C_WBIT != 0 {
+ o |= 1 << 21
+ }
+ o |= 1<<26 | 1<<20
+ if v < 0 {
+ if sc&C_UBIT != 0 {
+ ctxt.Diag(".U on neg offset")
+ }
+ v = -v
+ o ^= 1 << 23
+ }
+
+ if v >= 1<<12 || v < 0 {
+ ctxt.Diag("literal span too large: %d (R%d)\n%v", v, b, ctxt.Printp)
+ }
+ o |= uint32(v)
+ o |= uint32(b) << 16
+ o |= uint32(r) << 12
+ return o
+}
+
+func olhr(ctxt *obj.Link, v int32, b int, r int, sc int) uint32 {
+ var o uint32
+
+ if sc&C_SBIT != 0 {
+ ctxt.Diag(".nil on LDRH/STRH instruction")
+ }
+ o = (uint32(sc) & C_SCOND) << 28
+ if !(sc&C_PBIT != 0) {
+ o |= 1 << 24
+ }
+ if sc&C_WBIT != 0 {
+ o |= 1 << 21
+ }
+ o |= 1<<23 | 1<<20 | 0xb<<4
+ if v < 0 {
+ v = -v
+ o ^= 1 << 23
+ }
+
+ if v >= 1<<8 || v < 0 {
+ ctxt.Diag("literal span too large: %d (R%d)\n%v", v, b, ctxt.Printp)
+ }
+ o |= uint32(v)&0xf | (uint32(v)>>4)<<8 | 1<<22
+ o |= uint32(b) << 16
+ o |= uint32(r) << 12
+ return o
+}
+
+func osr(ctxt *obj.Link, a int, r int, v int32, b int, sc int) uint32 {
+ var o uint32
+
+ o = olr(ctxt, v, b, r, sc) ^ (1 << 20)
+ if a != AMOVW {
+ o |= 1 << 22
+ }
+ return o
+}
+
+func oshr(ctxt *obj.Link, r int, v int32, b int, sc int) uint32 {
+ var o uint32
+
+ o = olhr(ctxt, v, b, r, sc) ^ (1 << 20)
+ return o
+}
+
+func osrr(ctxt *obj.Link, r int, i int, b int, sc int) uint32 {
+ return olr(ctxt, int32(i), b, r, sc) ^ (1<<25 | 1<<20)
+}
+
+func oshrr(ctxt *obj.Link, r int, i int, b int, sc int) uint32 {
+ return olhr(ctxt, int32(i), b, r, sc) ^ (1<<22 | 1<<20)
+}
+
+func olrr(ctxt *obj.Link, i int, b int, r int, sc int) uint32 {
+ return olr(ctxt, int32(i), b, r, sc) ^ (1 << 25)
+}
+
+func olhrr(ctxt *obj.Link, i int, b int, r int, sc int) uint32 {
+ return olhr(ctxt, int32(i), b, r, sc) ^ (1 << 22)
+}
+
+func ofsr(ctxt *obj.Link, a int, r int, v int32, b int, sc int, p *obj.Prog) uint32 {
+ var o uint32
+
+ if sc&C_SBIT != 0 {
+ ctxt.Diag(".nil on FLDR/FSTR instruction")
+ }
+ o = (uint32(sc) & C_SCOND) << 28
+ if !(sc&C_PBIT != 0) {
+ o |= 1 << 24
+ }
+ if sc&C_WBIT != 0 {
+ o |= 1 << 21
+ }
+ o |= 6<<25 | 1<<24 | 1<<23 | 10<<8
+ if v < 0 {
+ v = -v
+ o ^= 1 << 23
+ }
+
+ if v&3 != 0 {
+ ctxt.Diag("odd offset for floating point op: %d\n%v", v, p)
+ } else if v >= 1<<10 || v < 0 {
+ ctxt.Diag("literal span too large: %d\n%v", v, p)
+ }
+ o |= (uint32(v) >> 2) & 0xFF
+ o |= uint32(b) << 16
+ o |= uint32(r) << 12
+
+ switch a {
+ default:
+ ctxt.Diag("bad fst %v", Aconv(a))
+ fallthrough
+
+ case AMOVD:
+ o |= 1 << 8
+ fallthrough
+
+ case AMOVF:
+ break
+ }
+
+ return o
+}
+
+func omvl(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, dr int) uint32 {
+ var v int32
+ var o1 uint32
+ if !(p.Pcond != nil) {
+ aclass(ctxt, a)
+ v = immrot(uint32(^ctxt.Instoffset))
+ if v == 0 {
+ ctxt.Diag("missing literal")
+ prasm(p)
+ return 0
+ }
+
+ o1 = oprrr(ctxt, AMVN, int(p.Scond)&C_SCOND)
+ o1 |= uint32(v)
+ o1 |= uint32(dr) << 12
+ } else {
+
+ v = int32(p.Pcond.Pc - p.Pc - 8)
+ o1 = olr(ctxt, v, REGPC, dr, int(p.Scond)&C_SCOND)
+ }
+
+ return o1
+}
+
+func chipzero5(ctxt *obj.Link, e float64) int {
+ // We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
+ if ctxt.Goarm < 7 || e != 0 {
+
+ return -1
+ }
+ return 0
+}
+
+func chipfloat5(ctxt *obj.Link, e float64) int {
+ var n int
+ var h1 uint32
+ var l uint32
+ var h uint32
+ var ei uint64
+
+ // We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions.
+ if ctxt.Goarm < 7 {
+
+ goto no
+ }
+
+ ei = math.Float64bits(e)
+ l = uint32(ei)
+ h = uint32(ei >> 32)
+
+ if l != 0 || h&0xffff != 0 {
+ goto no
+ }
+ h1 = h & 0x7fc00000
+ if h1 != 0x40000000 && h1 != 0x3fc00000 {
+ goto no
+ }
+ n = 0
+
+ // sign bit (a)
+ if h&0x80000000 != 0 {
+
+ n |= 1 << 7
+ }
+
+ // exp sign bit (b)
+ if h1 == 0x3fc00000 {
+
+ n |= 1 << 6
+ }
+
+ // rest of exp and mantissa (cd-efgh)
+ n |= int((h >> 16) & 0x3f)
+
+ //print("match %.8lux %.8lux %d\n", l, h, n);
+ return n
+
+no:
+ return -1
+}
--- /dev/null
+// Inferno utils/5c/list.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/list.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package arm
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+const (
+ STRINGSZ = 1000
+)
+
+var extra = []string{
+ ".EQ",
+ ".NE",
+ ".CS",
+ ".CC",
+ ".MI",
+ ".PL",
+ ".VS",
+ ".VC",
+ ".HI",
+ ".LS",
+ ".GE",
+ ".LT",
+ ".GT",
+ ".LE",
+ "",
+ ".NV",
+}
+
+var bigP *obj.Prog
+
+func Pconv(p *obj.Prog) string {
+ var str string
+ var sc string
+ var fp string
+
+ var a int
+ var s int
+
+ a = int(p.As)
+ s = int(p.Scond)
+ sc = extra[s&C_SCOND]
+ if s&C_SBIT != 0 {
+ sc += ".S"
+ }
+ if s&C_PBIT != 0 {
+ sc += ".P"
+ }
+ if s&C_WBIT != 0 {
+ sc += ".W"
+ }
+ if s&C_UBIT != 0 { /* ambiguous with FBIT */
+ sc += ".U"
+ }
+ if a == AMOVM {
+ if p.From.Type_ == D_CONST {
+ str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,%v", p.Pc, p.Line(), Aconv(a), sc, RAconv(&p.From), Dconv(p, 0, &p.To))
+ } else if p.To.Type_ == D_CONST {
+ str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,%v", p.Pc, p.Line(), Aconv(a), sc, Dconv(p, 0, &p.From), RAconv(&p.To))
+ } else {
+
+ str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,%v", p.Pc, p.Line(), Aconv(a), sc, Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+ }
+ } else if a == ADATA {
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), p.Reg, Dconv(p, 0, &p.To))
+ } else if p.As == ATEXT {
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%d,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), p.Reg, Dconv(p, 0, &p.To))
+ } else if p.Reg == NREG {
+ str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,%v", p.Pc, p.Line(), Aconv(a), sc, Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+ } else if p.From.Type_ != D_FREG {
+ str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,R%d,%v", p.Pc, p.Line(), Aconv(a), sc, Dconv(p, 0, &p.From), p.Reg, Dconv(p, 0, &p.To))
+ } else {
+
+ str = fmt.Sprintf("%.5d (%v)\t%v%s\t%v,F%d,%v", p.Pc, p.Line(), Aconv(a), sc, Dconv(p, 0, &p.From), p.Reg, Dconv(p, 0, &p.To))
+ }
+
+ fp += str
+ return fp
+}
+
+func Aconv(a int) string {
+ var s string
+ var fp string
+
+ s = "???"
+ if a >= AXXX && a < ALAST {
+ s = anames5[a]
+ }
+ fp += s
+ return fp
+}
+
+func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
+ var str string
+ var fp string
+
+ var op string
+ var v int
+
+ switch a.Type_ {
+ default:
+ str = fmt.Sprintf("GOK-type(%d)", a.Type_)
+
+ case D_NONE:
+ str = ""
+ if a.Name != D_NONE || a.Reg != NREG || a.Sym != nil {
+ str = fmt.Sprintf("%v(R%d)(NONE)", Mconv(a), a.Reg)
+ }
+
+ case D_CONST:
+ if a.Reg != NREG {
+ str = fmt.Sprintf("$%v(R%d)", Mconv(a), a.Reg)
+ } else {
+
+ str = fmt.Sprintf("$%v", Mconv(a))
+ }
+
+ case D_CONST2:
+ str = fmt.Sprintf("$%d-%d", a.Offset, a.Offset2)
+
+ case D_SHIFT:
+ v = int(a.Offset)
+ op = string("<<>>->@>"[((v>>5)&3)<<1:])
+ if v&(1<<4) != 0 {
+ str = fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
+ } else {
+
+ str = fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
+ }
+ if a.Reg != NREG {
+ str += fmt.Sprintf("(R%d)", a.Reg)
+ }
+
+ case D_OREG:
+ if a.Reg != NREG {
+ str = fmt.Sprintf("%v(R%d)", Mconv(a), a.Reg)
+ } else {
+
+ str = fmt.Sprintf("%v", Mconv(a))
+ }
+
+ case D_REG:
+ str = fmt.Sprintf("R%d", a.Reg)
+ if a.Name != D_NONE || a.Sym != nil {
+ str = fmt.Sprintf("%v(R%d)(REG)", Mconv(a), a.Reg)
+ }
+
+ case D_FREG:
+ str = fmt.Sprintf("F%d", a.Reg)
+ if a.Name != D_NONE || a.Sym != nil {
+ str = fmt.Sprintf("%v(R%d)(REG)", Mconv(a), a.Reg)
+ }
+
+ case D_PSR:
+ str = fmt.Sprintf("PSR")
+ if a.Name != D_NONE || a.Sym != nil {
+ str = fmt.Sprintf("%v(PSR)(REG)", Mconv(a))
+ }
+
+ case D_BRANCH:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s(SB)", a.Sym.Name)
+ } else if p != nil && p.Pcond != nil {
+ str = fmt.Sprintf("%d", p.Pcond.Pc)
+ } else if a.U.Branch != nil {
+ str = fmt.Sprintf("%d", a.U.Branch.Pc)
+ } else {
+
+ str = fmt.Sprintf("%d(PC)", a.Offset) /*-pc*/
+ }
+
+ case D_FCONST:
+ str = fmt.Sprintf("$%.17g", a.U.Dval)
+
+ case D_SCONST:
+ str = fmt.Sprintf("$\"%q\"", a.U.Sval)
+ break
+ }
+
+ fp += str
+ return fp
+}
+
+func RAconv(a *obj.Addr) string {
+ var str string
+ var fp string
+
+ var i int
+ var v int
+
+ str = fmt.Sprintf("GOK-reglist")
+ switch a.Type_ {
+ case D_CONST,
+ D_CONST2:
+ if a.Reg != NREG {
+ break
+ }
+ if a.Sym != nil {
+ break
+ }
+ v = int(a.Offset)
+ str = ""
+ for i = 0; i < NREG; i++ {
+ if v&(1<<uint(i)) != 0 {
+ if str[0] == 0 {
+ str += "[R"
+ } else {
+
+ str += ",R"
+ }
+ str += fmt.Sprintf("%d", i)
+ }
+ }
+
+ str += "]"
+ }
+
+ fp += str
+ return fp
+}
+
+func Rconv(r int) string {
+ var fp string
+
+ var str string
+
+ str = fmt.Sprintf("R%d", r)
+ fp += str
+ return fp
+}
+
+func DRconv(a int) string {
+ var s string
+ var fp string
+
+ s = "C_??"
+ if a >= C_NONE && a <= C_NCLASS {
+ s = cnames5[a]
+ }
+ fp += s
+ return fp
+}
+
+func Mconv(a *obj.Addr) string {
+ var str string
+ var fp string
+
+ var s *obj.LSym
+
+ s = a.Sym
+ if s == nil {
+ str = fmt.Sprintf("%d", int(a.Offset))
+ goto out
+ }
+
+ switch a.Name {
+ default:
+ str = fmt.Sprintf("GOK-name(%d)", a.Name)
+
+ case D_NONE:
+ str = fmt.Sprintf("%d", a.Offset)
+
+ case D_EXTERN:
+ str = fmt.Sprintf("%s+%d(SB)", s.Name, int(a.Offset))
+
+ case D_STATIC:
+ str = fmt.Sprintf("%s<>+%d(SB)", s.Name, int(a.Offset))
+
+ case D_AUTO:
+ str = fmt.Sprintf("%s-%d(SP)", s.Name, int(-a.Offset))
+
+ case D_PARAM:
+ str = fmt.Sprintf("%s+%d(FP)", s.Name, int(a.Offset))
+ break
+ }
+
+out:
+ fp += str
+ return fp
+}
--- /dev/null
+// Derived from Inferno utils/5c/swt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5c/swt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package arm
+
+import (
+ "cmd/internal/obj"
+ "encoding/binary"
+ "fmt"
+ "log"
+ "math"
+)
+
+var zprg5 = obj.Prog{
+ As: AGOK,
+ Scond: C_SCOND_NONE,
+ Reg: NREG,
+ From: obj.Addr{
+ Name: D_NONE,
+ Type_: D_NONE,
+ Reg: NREG,
+ },
+ To: obj.Addr{
+ Name: D_NONE,
+ Type_: D_NONE,
+ Reg: NREG,
+ },
+}
+
+func symtype(a *obj.Addr) int {
+ return int(a.Name)
+}
+
+func isdata(p *obj.Prog) bool {
+ return p.As == ADATA || p.As == AGLOBL
+}
+
+func iscall(p *obj.Prog) bool {
+ return p.As == ABL
+}
+
+func datasize(p *obj.Prog) int {
+ return int(p.Reg)
+}
+
+func textflag(p *obj.Prog) int {
+ return int(p.Reg)
+}
+
+func settextflag(p *obj.Prog, f int) {
+ p.Reg = uint8(f)
+}
+
+func progedit(ctxt *obj.Link, p *obj.Prog) {
+ var literal string
+ var s *obj.LSym
+ var tlsfallback *obj.LSym
+
+ p.From.Class = 0
+ p.To.Class = 0
+
+ // Rewrite B/BL to symbol as D_BRANCH.
+ switch p.As {
+
+ case AB,
+ ABL,
+ ADUFFZERO,
+ ADUFFCOPY:
+ if p.To.Type_ == D_OREG && (p.To.Name == D_EXTERN || p.To.Name == D_STATIC) && p.To.Sym != nil {
+ p.To.Type_ = D_BRANCH
+ }
+ break
+ }
+
+ // Replace TLS register fetches on older ARM procesors.
+ switch p.As {
+
+ // Treat MRC 15, 0, <reg>, C13, C0, 3 specially.
+ case AMRC:
+ if p.To.Offset&0xffff0fff == 0xee1d0f70 {
+
+ // Because the instruction might be rewriten to a BL which returns in R0
+ // the register must be zero.
+ if p.To.Offset&0xf000 != 0 {
+
+ ctxt.Diag("%v: TLS MRC instruction must write to R0 as it might get translated into a BL instruction", p.Line())
+ }
+
+ if ctxt.Goarm < 7 {
+ // Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension.
+ if tlsfallback == nil {
+
+ tlsfallback = obj.Linklookup(ctxt, "runtime.read_tls_fallback", 0)
+ }
+
+ // MOVW LR, R11
+ p.As = AMOVW
+
+ p.From.Type_ = D_REG
+ p.From.Reg = REGLINK
+ p.To.Type_ = D_REG
+ p.To.Reg = REGTMP
+
+ // BL runtime.read_tls_fallback(SB)
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ABL
+ p.To.Type_ = D_BRANCH
+ p.To.Sym = tlsfallback
+ p.To.Offset = 0
+
+ // MOVW R11, LR
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.From.Type_ = D_REG
+ p.From.Reg = REGTMP
+ p.To.Type_ = D_REG
+ p.To.Reg = REGLINK
+ break
+ }
+ }
+
+ // Otherwise, MRC/MCR instructions need no further treatment.
+ p.As = AWORD
+
+ break
+ }
+
+ // Rewrite float constants to values stored in memory.
+ switch p.As {
+
+ case AMOVF:
+ if p.From.Type_ == D_FCONST && chipfloat5(ctxt, p.From.U.Dval) < 0 && (chipzero5(ctxt, p.From.U.Dval) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
+ var i32 uint32
+ var f32 float32
+ f32 = float32(p.From.U.Dval)
+ i32 = math.Float32bits(f32)
+ literal = fmt.Sprintf("$f32.%08x", i32)
+ s = obj.Linklookup(ctxt, literal, 0)
+ if s.Type_ == 0 {
+ s.Type_ = obj.SRODATA
+ obj.Adduint32(ctxt, s, i32)
+ s.Reachable = 0
+ }
+
+ p.From.Type_ = D_OREG
+ p.From.Sym = s
+ p.From.Name = D_EXTERN
+ p.From.Offset = 0
+ }
+
+ case AMOVD:
+ if p.From.Type_ == D_FCONST && chipfloat5(ctxt, p.From.U.Dval) < 0 && (chipzero5(ctxt, p.From.U.Dval) < 0 || p.Scond&C_SCOND != C_SCOND_NONE) {
+ var i64 uint64
+ i64 = math.Float64bits(p.From.U.Dval)
+ literal = fmt.Sprintf("$f64.%016x", i64)
+ s = obj.Linklookup(ctxt, literal, 0)
+ if s.Type_ == 0 {
+ s.Type_ = obj.SRODATA
+ obj.Adduint64(ctxt, s, i64)
+ s.Reachable = 0
+ }
+
+ p.From.Type_ = D_OREG
+ p.From.Sym = s
+ p.From.Name = D_EXTERN
+ p.From.Offset = 0
+ }
+
+ break
+ }
+
+ if ctxt.Flag_shared != 0 {
+ // Shared libraries use R_ARM_TLS_IE32 instead of
+ // R_ARM_TLS_LE32, replacing the link time constant TLS offset in
+ // runtime.tlsg with an address to a GOT entry containing the
+ // offset. Rewrite $runtime.tlsg(SB) to runtime.tlsg(SB) to
+ // compensate.
+ if ctxt.Tlsg == nil {
+
+ ctxt.Tlsg = obj.Linklookup(ctxt, "runtime.tlsg", 0)
+ }
+
+ if p.From.Type_ == D_CONST && p.From.Name == D_EXTERN && p.From.Sym == ctxt.Tlsg {
+ p.From.Type_ = D_OREG
+ }
+ if p.To.Type_ == D_CONST && p.To.Name == D_EXTERN && p.To.Sym == ctxt.Tlsg {
+ p.To.Type_ = D_OREG
+ }
+ }
+}
+
+func prg() *obj.Prog {
+ var p *obj.Prog
+
+ p = new(obj.Prog)
+ *p = zprg5
+ return p
+}
+
+// Prog.mark
+const (
+ FOLL = 1 << 0
+ LABEL = 1 << 1
+ LEAF = 1 << 2
+)
+
+func linkcase(casep *obj.Prog) {
+ var p *obj.Prog
+
+ for p = casep; p != nil; p = p.Link {
+ if p.As == ABCASE {
+ for ; p != nil && p.As == ABCASE; p = p.Link {
+ p.Pcrel = casep
+ }
+ break
+ }
+ }
+}
+
+func nocache5(p *obj.Prog) {
+ p.Optab = 0
+ p.From.Class = 0
+ p.To.Class = 0
+}
+
+func addstacksplit(ctxt *obj.Link, cursym *obj.LSym) {
+ var p *obj.Prog
+ var pl *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var q *obj.Prog
+ var q1 *obj.Prog
+ var q2 *obj.Prog
+ var o int
+ var autosize int32
+ var autoffset int32
+
+ autosize = 0
+
+ if ctxt.Symmorestack[0] == nil {
+ ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
+ ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
+ }
+
+ q = nil
+
+ ctxt.Cursym = cursym
+
+ if cursym.Text == nil || cursym.Text.Link == nil {
+ return
+ }
+
+ softfloat(ctxt, cursym)
+
+ p = cursym.Text
+ autoffset = int32(p.To.Offset)
+ if autoffset < 0 {
+ autoffset = 0
+ }
+ cursym.Locals = autoffset
+ cursym.Args = p.To.Offset2
+
+ if ctxt.Debugzerostack != 0 {
+ if autoffset != 0 && !(p.Reg&obj.NOSPLIT != 0) {
+ // MOVW $4(R13), R1
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.From.Type_ = D_CONST
+ p.From.Reg = 13
+ p.From.Offset = 4
+ p.To.Type_ = D_REG
+ p.To.Reg = 1
+
+ // MOVW $n(R13), R2
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.From.Type_ = D_CONST
+ p.From.Reg = 13
+ p.From.Offset = 4 + int64(autoffset)
+ p.To.Type_ = D_REG
+ p.To.Reg = 2
+
+ // MOVW $0, R3
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.From.Type_ = D_CONST
+ p.From.Offset = 0
+ p.To.Type_ = D_REG
+ p.To.Reg = 3
+
+ // L:
+ // MOVW.nil R3, 0(R1) +4
+ // CMP R1, R2
+ // BNE L
+ pl = obj.Appendp(ctxt, p)
+ p = pl
+
+ p.As = AMOVW
+ p.From.Type_ = D_REG
+ p.From.Reg = 3
+ p.To.Type_ = D_OREG
+ p.To.Reg = 1
+ p.To.Offset = 4
+ p.Scond |= C_PBIT
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMP
+ p.From.Type_ = D_REG
+ p.From.Reg = 1
+ p.Reg = 2
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ABNE
+ p.To.Type_ = D_BRANCH
+ p.Pcond = pl
+ }
+ }
+
+ /*
+ * find leaf subroutines
+ * strip NOPs
+ * expand RET
+ * expand BECOME pseudo
+ */
+ for p = cursym.Text; p != nil; p = p.Link {
+
+ switch p.As {
+ case ACASE:
+ if ctxt.Flag_shared != 0 {
+ linkcase(p)
+ }
+
+ case ATEXT:
+ p.Mark |= LEAF
+
+ case ARET:
+ break
+
+ case ADIV,
+ ADIVU,
+ AMOD,
+ AMODU:
+ q = p
+ if ctxt.Sym_div == nil {
+ initdiv(ctxt)
+ }
+ cursym.Text.Mark &^= LEAF
+ continue
+
+ case ANOP:
+ q1 = p.Link
+ q.Link = q1 /* q is non-nop */
+ if q1 != nil {
+ q1.Mark |= p.Mark
+ }
+ continue
+
+ case ABL,
+ ABX,
+ ADUFFZERO,
+ ADUFFCOPY:
+ cursym.Text.Mark &^= LEAF
+ fallthrough
+
+ case ABCASE,
+ AB,
+ ABEQ,
+ ABNE,
+ ABCS,
+ ABHS,
+ ABCC,
+ ABLO,
+ ABMI,
+ ABPL,
+ ABVS,
+ ABVC,
+ ABHI,
+ ABLS,
+ ABGE,
+ ABLT,
+ ABGT,
+ ABLE:
+ q1 = p.Pcond
+ if q1 != nil {
+ for q1.As == ANOP {
+ q1 = q1.Link
+ p.Pcond = q1
+ }
+ }
+
+ break
+ }
+
+ q = p
+ }
+
+ for p = cursym.Text; p != nil; p = p.Link {
+ o = int(p.As)
+ switch o {
+ case ATEXT:
+ autosize = int32(p.To.Offset + 4)
+ if autosize <= 4 {
+ if cursym.Text.Mark&LEAF != 0 {
+ p.To.Offset = -4
+ autosize = 0
+ }
+ }
+
+ if !(autosize != 0) && !(cursym.Text.Mark&LEAF != 0) {
+ if ctxt.Debugvlog != 0 {
+ fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name)
+ obj.Bflush(ctxt.Bso)
+ }
+
+ cursym.Text.Mark |= LEAF
+ }
+
+ if cursym.Text.Mark&LEAF != 0 {
+ cursym.Leaf = 1
+ if !(autosize != 0) {
+ break
+ }
+ }
+
+ if !(p.Reg&obj.NOSPLIT != 0) {
+ p = stacksplit(ctxt, p, autosize, bool2int(!(cursym.Text.Reg&obj.NEEDCTXT != 0))) // emit split check
+ }
+
+ // MOVW.W R14,$-autosize(SP)
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.Scond |= C_WBIT
+ p.From.Type_ = D_REG
+ p.From.Reg = REGLINK
+ p.To.Type_ = D_OREG
+ p.To.Offset = int64(-autosize)
+ p.To.Reg = REGSP
+ p.Spadj = autosize
+
+ if cursym.Text.Reg&obj.WRAPPER != 0 {
+ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
+ //
+ // MOVW g_panic(g), R1
+ // CMP $0, R1
+ // B.EQ end
+ // MOVW panic_argp(R1), R2
+ // ADD $(autosize+4), R13, R3
+ // CMP R2, R3
+ // B.NE end
+ // ADD $4, R13, R4
+ // MOVW R4, panic_argp(R1)
+ // end:
+ // NOP
+ //
+ // The NOP is needed to give the jumps somewhere to land.
+ // It is a liblink NOP, not an ARM NOP: it encodes to 0 instruction bytes.
+
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.From.Type_ = D_OREG
+ p.From.Reg = REGG
+ p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
+ p.To.Type_ = D_REG
+ p.To.Reg = 1
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMP
+ p.From.Type_ = D_CONST
+ p.From.Offset = 0
+ p.Reg = 1
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ABEQ
+ p.To.Type_ = D_BRANCH
+ p1 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVW
+ p.From.Type_ = D_OREG
+ p.From.Reg = 1
+ p.From.Offset = 0 // Panic.argp
+ p.To.Type_ = D_REG
+ p.To.Reg = 2
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AADD
+ p.From.Type_ = D_CONST
+ p.From.Offset = int64(autosize) + 4
+ p.Reg = 13
+ p.To.Type_ = D_REG
+ p.To.Reg = 3
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMP
+ p.From.Type_ = D_REG
+ p.From.Reg = 2
+ p.Reg = 3
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ABNE
+ p.To.Type_ = D_BRANCH
+ p2 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AADD
+ p.From.Type_ = D_CONST
+ p.From.Offset = 4
+ p.Reg = 13
+ p.To.Type_ = D_REG
+ p.To.Reg = 4
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVW
+ p.From.Type_ = D_REG
+ p.From.Reg = 4
+ p.To.Type_ = D_OREG
+ p.To.Reg = 1
+ p.To.Offset = 0 // Panic.argp
+
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ANOP
+ p1.Pcond = p
+ p2.Pcond = p
+ }
+
+ case ARET:
+ nocache5(p)
+ if cursym.Text.Mark&LEAF != 0 {
+ if !(autosize != 0) {
+ p.As = AB
+ p.From = zprg5.From
+ if p.To.Sym != nil { // retjmp
+ p.To.Type_ = D_BRANCH
+ } else {
+
+ p.To.Type_ = D_OREG
+ p.To.Offset = 0
+ p.To.Reg = REGLINK
+ }
+
+ break
+ }
+ }
+
+ p.As = AMOVW
+ p.Scond |= C_PBIT
+ p.From.Type_ = D_OREG
+ p.From.Offset = int64(autosize)
+ p.From.Reg = REGSP
+ p.To.Type_ = D_REG
+ p.To.Reg = REGPC
+
+ // If there are instructions following
+ // this ARET, they come from a branch
+ // with the same stackframe, so no spadj.
+ if p.To.Sym != nil { // retjmp
+ p.To.Reg = REGLINK
+ q2 = obj.Appendp(ctxt, p)
+ q2.As = AB
+ q2.To.Type_ = D_BRANCH
+ q2.To.Sym = p.To.Sym
+ p.To.Sym = nil
+ p = q2
+ }
+
+ case AADD:
+ if p.From.Type_ == D_CONST && p.From.Reg == NREG && p.To.Type_ == D_REG && p.To.Reg == REGSP {
+ p.Spadj = int32(-p.From.Offset)
+ }
+
+ case ASUB:
+ if p.From.Type_ == D_CONST && p.From.Reg == NREG && p.To.Type_ == D_REG && p.To.Reg == REGSP {
+ p.Spadj = int32(p.From.Offset)
+ }
+
+ case ADIV,
+ ADIVU,
+ AMOD,
+ AMODU:
+ if ctxt.Debugdivmod != 0 {
+ break
+ }
+ if p.From.Type_ != D_REG {
+ break
+ }
+ if p.To.Type_ != D_REG {
+ break
+ }
+ q1 = p
+
+ /* MOV a,4(SP) */
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.Lineno = q1.Lineno
+ p.From.Type_ = D_REG
+ p.From.Reg = q1.From.Reg
+ p.To.Type_ = D_OREG
+ p.To.Reg = REGSP
+ p.To.Offset = 4
+
+ /* MOV b,REGTMP */
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.Lineno = q1.Lineno
+ p.From.Type_ = D_REG
+ p.From.Reg = int8(q1.Reg)
+ if q1.Reg == NREG {
+ p.From.Reg = q1.To.Reg
+ }
+ p.To.Type_ = D_REG
+ p.To.Reg = REGTMP
+ p.To.Offset = 0
+
+ /* CALL appropriate */
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ABL
+ p.Lineno = q1.Lineno
+ p.To.Type_ = D_BRANCH
+ switch o {
+ case ADIV:
+ p.To.Sym = ctxt.Sym_div
+
+ case ADIVU:
+ p.To.Sym = ctxt.Sym_divu
+
+ case AMOD:
+ p.To.Sym = ctxt.Sym_mod
+
+ case AMODU:
+ p.To.Sym = ctxt.Sym_modu
+ break
+ }
+
+ /* MOV REGTMP, b */
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.Lineno = q1.Lineno
+ p.From.Type_ = D_REG
+ p.From.Reg = REGTMP
+ p.From.Offset = 0
+ p.To.Type_ = D_REG
+ p.To.Reg = q1.To.Reg
+
+ /* ADD $8,SP */
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AADD
+ p.Lineno = q1.Lineno
+ p.From.Type_ = D_CONST
+ p.From.Reg = NREG
+ p.From.Offset = 8
+ p.Reg = NREG
+ p.To.Type_ = D_REG
+ p.To.Reg = REGSP
+ p.Spadj = -8
+
+ /* Keep saved LR at 0(SP) after SP change. */
+ /* MOVW 0(SP), REGTMP; MOVW REGTMP, -8!(SP) */
+ /* TODO: Remove SP adjustments; see issue 6699. */
+ q1.As = AMOVW
+
+ q1.From.Type_ = D_OREG
+ q1.From.Reg = REGSP
+ q1.From.Offset = 0
+ q1.Reg = NREG
+ q1.To.Type_ = D_REG
+ q1.To.Reg = REGTMP
+
+ /* SUB $8,SP */
+ q1 = obj.Appendp(ctxt, q1)
+
+ q1.As = AMOVW
+ q1.From.Type_ = D_REG
+ q1.From.Reg = REGTMP
+ q1.Reg = NREG
+ q1.To.Type_ = D_OREG
+ q1.To.Reg = REGSP
+ q1.To.Offset = -8
+ q1.Scond |= C_WBIT
+ q1.Spadj = 8
+
+ case AMOVW:
+ if (p.Scond&C_WBIT != 0) && p.To.Type_ == D_OREG && p.To.Reg == REGSP {
+ p.Spadj = int32(-p.To.Offset)
+ }
+ if (p.Scond&C_PBIT != 0) && p.From.Type_ == D_OREG && p.From.Reg == REGSP && p.To.Reg != REGPC {
+ p.Spadj = int32(-p.From.Offset)
+ }
+ if p.From.Type_ == D_CONST && p.From.Reg == REGSP && p.To.Type_ == D_REG && p.To.Reg == REGSP {
+ p.Spadj = int32(-p.From.Offset)
+ }
+ break
+ }
+ }
+}
+
+func softfloat(ctxt *obj.Link, cursym *obj.LSym) {
+ var p *obj.Prog
+ var next *obj.Prog
+ var symsfloat *obj.LSym
+ var wasfloat int
+
+ if ctxt.Goarm > 5 {
+ return
+ }
+
+ symsfloat = obj.Linklookup(ctxt, "_sfloat", 0)
+
+ wasfloat = 0
+ for p = cursym.Text; p != nil; p = p.Link {
+ if p.Pcond != nil {
+ p.Pcond.Mark |= LABEL
+ }
+ }
+ for p = cursym.Text; p != nil; p = p.Link {
+ switch p.As {
+ case AMOVW:
+ if p.To.Type_ == D_FREG || p.From.Type_ == D_FREG {
+ goto soft
+ }
+ goto notsoft
+
+ case AMOVWD,
+ AMOVWF,
+ AMOVDW,
+ AMOVFW,
+ AMOVFD,
+ AMOVDF,
+ AMOVF,
+ AMOVD,
+ ACMPF,
+ ACMPD,
+ AADDF,
+ AADDD,
+ ASUBF,
+ ASUBD,
+ AMULF,
+ AMULD,
+ ADIVF,
+ ADIVD,
+ ASQRTF,
+ ASQRTD,
+ AABSF,
+ AABSD:
+ goto soft
+
+ default:
+ goto notsoft
+ }
+
+ soft:
+ if !(wasfloat != 0) || (p.Mark&LABEL != 0) {
+ next = ctxt.Arch.Prg()
+ *next = *p
+
+ // BL _sfloat(SB)
+ *p = zprg5
+
+ p.Link = next
+ p.As = ABL
+ p.To.Type_ = D_BRANCH
+ p.To.Sym = symsfloat
+ p.Lineno = next.Lineno
+
+ p = next
+ wasfloat = 1
+ }
+
+ continue
+
+ notsoft:
+ wasfloat = 0
+ }
+}
+
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int) *obj.Prog {
+ // MOVW g_stackguard(g), R1
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.From.Type_ = D_OREG
+ p.From.Reg = REGG
+ p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ p.To.Type_ = D_REG
+ p.To.Reg = 1
+
+ if framesize <= obj.StackSmall {
+ // small stack: SP < stackguard
+ // CMP stackguard, SP
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ACMP
+ p.From.Type_ = D_REG
+ p.From.Reg = 1
+ p.Reg = REGSP
+ } else if framesize <= obj.StackBig {
+ // large stack: SP-framesize < stackguard-StackSmall
+ // MOVW $-framesize(SP), R2
+ // CMP stackguard, R2
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.From.Type_ = D_CONST
+ p.From.Reg = REGSP
+ p.From.Offset = int64(-framesize)
+ p.To.Type_ = D_REG
+ p.To.Reg = 2
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMP
+ p.From.Type_ = D_REG
+ p.From.Reg = 1
+ p.Reg = 2
+ } else {
+
+ // Such a large stack we need to protect against wraparound
+ // if SP is close to zero.
+ // SP-stackguard+StackGuard < framesize + (StackGuard-StackSmall)
+ // The +StackGuard on both sides is required to keep the left side positive:
+ // SP is allowed to be slightly below stackguard. See stack.h.
+ // CMP $StackPreempt, R1
+ // MOVW.NE $StackGuard(SP), R2
+ // SUB.NE R1, R2
+ // MOVW.NE $(framesize+(StackGuard-StackSmall)), R3
+ // CMP.NE R3, R2
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ACMP
+ p.From.Type_ = D_CONST
+ p.From.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
+ p.Reg = 1
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVW
+ p.From.Type_ = D_CONST
+ p.From.Reg = REGSP
+ p.From.Offset = obj.StackGuard
+ p.To.Type_ = D_REG
+ p.To.Reg = 2
+ p.Scond = C_SCOND_NE
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ASUB
+ p.From.Type_ = D_REG
+ p.From.Reg = 1
+ p.To.Type_ = D_REG
+ p.To.Reg = 2
+ p.Scond = C_SCOND_NE
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVW
+ p.From.Type_ = D_CONST
+ p.From.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
+ p.To.Type_ = D_REG
+ p.To.Reg = 3
+ p.Scond = C_SCOND_NE
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMP
+ p.From.Type_ = D_REG
+ p.From.Reg = 3
+ p.Reg = 2
+ p.Scond = C_SCOND_NE
+ }
+
+ // MOVW.LS R14, R3
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVW
+ p.Scond = C_SCOND_LS
+ p.From.Type_ = D_REG
+ p.From.Reg = REGLINK
+ p.To.Type_ = D_REG
+ p.To.Reg = 3
+
+ // BL.LS runtime.morestack(SB) // modifies LR, returns with LO still asserted
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ABL
+ p.Scond = C_SCOND_LS
+ p.To.Type_ = D_BRANCH
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
+ } else {
+
+ p.To.Sym = ctxt.Symmorestack[noctxt]
+ }
+
+ // BLS start
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ABLS
+ p.To.Type_ = D_BRANCH
+ p.Pcond = ctxt.Cursym.Text.Link
+
+ return p
+}
+
+func initdiv(ctxt *obj.Link) {
+ if ctxt.Sym_div != nil {
+ return
+ }
+ ctxt.Sym_div = obj.Linklookup(ctxt, "_div", 0)
+ ctxt.Sym_divu = obj.Linklookup(ctxt, "_divu", 0)
+ ctxt.Sym_mod = obj.Linklookup(ctxt, "_mod", 0)
+ ctxt.Sym_modu = obj.Linklookup(ctxt, "_modu", 0)
+}
+
+func follow(ctxt *obj.Link, s *obj.LSym) {
+ var firstp *obj.Prog
+ var lastp *obj.Prog
+
+ ctxt.Cursym = s
+
+ firstp = ctxt.Arch.Prg()
+ lastp = firstp
+ xfol(ctxt, s.Text, &lastp)
+ lastp.Link = nil
+ s.Text = firstp.Link
+}
+
+func relinv(a int) int {
+ switch a {
+ case ABEQ:
+ return ABNE
+ case ABNE:
+ return ABEQ
+ case ABCS:
+ return ABCC
+ case ABHS:
+ return ABLO
+ case ABCC:
+ return ABCS
+ case ABLO:
+ return ABHS
+ case ABMI:
+ return ABPL
+ case ABPL:
+ return ABMI
+ case ABVS:
+ return ABVC
+ case ABVC:
+ return ABVS
+ case ABHI:
+ return ABLS
+ case ABLS:
+ return ABHI
+ case ABGE:
+ return ABLT
+ case ABLT:
+ return ABGE
+ case ABGT:
+ return ABLE
+ case ABLE:
+ return ABGT
+ }
+
+ log.Fatalf("unknown relation: %s", anames5[a])
+ return 0
+}
+
+func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
+ var q *obj.Prog
+ var r *obj.Prog
+ var a int
+ var i int
+
+loop:
+ if p == nil {
+ return
+ }
+ a = int(p.As)
+ if a == AB {
+ q = p.Pcond
+ if q != nil && q.As != ATEXT {
+ p.Mark |= FOLL
+ p = q
+ if !(p.Mark&FOLL != 0) {
+ goto loop
+ }
+ }
+ }
+
+ if p.Mark&FOLL != 0 {
+ i = 0
+ q = p
+ for ; i < 4; (func() { i++; q = q.Link })() {
+ if q == *last || q == nil {
+ break
+ }
+ a = int(q.As)
+ if a == ANOP {
+ i--
+ continue
+ }
+
+ if a == AB || (a == ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == AUNDEF {
+ goto copy
+ }
+ if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
+ continue
+ }
+ if a != ABEQ && a != ABNE {
+ continue
+ }
+
+ copy:
+ for {
+ r = ctxt.Arch.Prg()
+ *r = *p
+ if !(r.Mark&FOLL != 0) {
+ fmt.Printf("can't happen 1\n")
+ }
+ r.Mark |= FOLL
+ if p != q {
+ p = p.Link
+ (*last).Link = r
+ *last = r
+ continue
+ }
+
+ (*last).Link = r
+ *last = r
+ if a == AB || (a == ARET && q.Scond == C_SCOND_NONE) || a == ARFE || a == AUNDEF {
+ return
+ }
+ r.As = ABNE
+ if a == ABNE {
+ r.As = ABEQ
+ }
+ r.Pcond = p.Link
+ r.Link = p.Pcond
+ if !(r.Link.Mark&FOLL != 0) {
+ xfol(ctxt, r.Link, last)
+ }
+ if !(r.Pcond.Mark&FOLL != 0) {
+ fmt.Printf("can't happen 2\n")
+ }
+ return
+ }
+ }
+
+ a = AB
+ q = ctxt.Arch.Prg()
+ q.As = int16(a)
+ q.Lineno = p.Lineno
+ q.To.Type_ = D_BRANCH
+ q.To.Offset = p.Pc
+ q.Pcond = p
+ p = q
+ }
+
+ p.Mark |= FOLL
+ (*last).Link = p
+ *last = p
+ if a == AB || (a == ARET && p.Scond == C_SCOND_NONE) || a == ARFE || a == AUNDEF {
+ return
+ }
+
+ if p.Pcond != nil {
+ if a != ABL && a != ABX && p.Link != nil {
+ q = obj.Brchain(ctxt, p.Link)
+ if a != ATEXT && a != ABCASE {
+ if q != nil && (q.Mark&FOLL != 0) {
+ p.As = int16(relinv(a))
+ p.Link = p.Pcond
+ p.Pcond = q
+ }
+ }
+
+ xfol(ctxt, p.Link, last)
+ q = obj.Brchain(ctxt, p.Pcond)
+ if q == nil {
+ q = p.Pcond
+ }
+ if q.Mark&FOLL != 0 {
+ p.Pcond = q
+ return
+ }
+
+ p = q
+ goto loop
+ }
+ }
+
+ p = p.Link
+ goto loop
+}
+
+var Linkarm = obj.LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Pconv: Pconv,
+ Name: "arm",
+ Thechar: '5',
+ Endian: obj.LittleEndian,
+ Addstacksplit: addstacksplit,
+ Assemble: span5,
+ Datasize: datasize,
+ Follow: follow,
+ Iscall: iscall,
+ Isdata: isdata,
+ Prg: prg,
+ Progedit: progedit,
+ Settextflag: settextflag,
+ Symtype: symtype,
+ Textflag: textflag,
+ Minlc: 4,
+ Ptrsize: 4,
+ Regsize: 4,
+ D_ADDR: D_ADDR,
+ D_AUTO: D_AUTO,
+ D_BRANCH: D_BRANCH,
+ D_CONST: D_CONST,
+ D_EXTERN: D_EXTERN,
+ D_FCONST: D_FCONST,
+ D_NONE: D_NONE,
+ D_PARAM: D_PARAM,
+ D_SCONST: D_SCONST,
+ D_STATIC: D_STATIC,
+ D_OREG: D_OREG,
+ ACALL: ABL,
+ ADATA: ADATA,
+ AEND: AEND,
+ AFUNCDATA: AFUNCDATA,
+ AGLOBL: AGLOBL,
+ AJMP: AB,
+ ANOP: ANOP,
+ APCDATA: APCDATA,
+ ARET: ARET,
+ ATEXT: ATEXT,
+ ATYPE: ATYPE,
+ AUSEFIELD: AUSEFIELD,
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Dummy placeholder for the real obj package.
+package arm
-package obj
-
-var Exported bool
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
--- /dev/null
+// Derived from Inferno utils/6l/obj.c and utils/6l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/obj.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+import (
+ "log"
+ "math"
+)
+
+func mangle(file string) {
+
+ log.Fatalf("%s: mangled input file", file)
+}
+
+func Symgrow(ctxt *Link, s *LSym, lsiz int64) {
+ var siz int
+ siz = int(lsiz)
+ if int64(siz) != lsiz {
+ log.Fatal("Symgrow size %d too long", lsiz)
+ }
+ if len(s.P) >= siz {
+ return
+ }
+ for cap(s.P) < siz {
+ s.P = append(s.P[:cap(s.P)], 0)
+ }
+ s.P = s.P[:siz]
+}
+
+func savedata(ctxt *Link, s *LSym, p *Prog, pn string) {
+ off := int32(p.From.Offset)
+ siz := int32(ctxt.Arch.Datasize(p))
+ if off < 0 || siz < 0 || off >= 1<<30 || siz >= 100 {
+ mangle(pn)
+ }
+ if ctxt.Enforce_data_order != 0 && off < int32(len(s.P)) {
+ ctxt.Diag("data out of order (already have %d)\n%P", len(s.P), p)
+ }
+ Symgrow(ctxt, s, int64(off+siz))
+
+ switch int(p.To.Type_) {
+ default:
+ ctxt.Diag("bad data: %P", p)
+
+ case ctxt.Arch.D_FCONST:
+ switch siz {
+ default:
+ ctxt.Diag("unexpected %d-byte floating point constant", siz)
+
+ case 4:
+ flt := math.Float32bits(float32(p.To.U.Dval))
+ ctxt.Arch.ByteOrder.PutUint32(s.P[off:], flt)
+
+ case 8:
+ flt := math.Float64bits(p.To.U.Dval)
+ ctxt.Arch.ByteOrder.PutUint64(s.P[off:], flt)
+ }
+
+ case ctxt.Arch.D_SCONST:
+ copy(s.P[off:off+siz], p.To.U.Sval)
+
+ case ctxt.Arch.D_CONST, ctxt.Arch.D_ADDR:
+ if p.To.Sym != nil || int(p.To.Type_) == ctxt.Arch.D_ADDR {
+ r := Addrel(s)
+ r.Off = off
+ r.Siz = uint8(siz)
+ r.Sym = p.To.Sym
+ r.Type_ = R_ADDR
+ r.Add = p.To.Offset
+ break
+ }
+ o := p.To.Offset
+ switch siz {
+ default:
+ ctxt.Diag("unexpected %d-byte integer constant", siz)
+ case 1:
+ s.P[off] = byte(o)
+ case 2:
+ ctxt.Arch.ByteOrder.PutUint16(s.P[off:], uint16(o))
+ case 4:
+ ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(o))
+ case 8:
+ ctxt.Arch.ByteOrder.PutUint64(s.P[off:], uint64(o))
+ }
+ }
+}
+
+func Addrel(s *LSym) *Reloc {
+ s.R = append(s.R, Reloc{})
+ return &s.R[len(s.R)-1]
+}
+
+func setuintxx(ctxt *Link, s *LSym, off int64, v uint64, wid int64) int64 {
+ if s.Type_ == 0 {
+ s.Type_ = SDATA
+ }
+ s.Reachable = 1
+ if s.Size < off+wid {
+ s.Size = off + wid
+ Symgrow(ctxt, s, s.Size)
+ }
+
+ switch wid {
+ case 1:
+ s.P[off] = uint8(v)
+ case 2:
+ ctxt.Arch.ByteOrder.PutUint16(s.P[off:], uint16(v))
+ case 4:
+ ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(v))
+ case 8:
+ ctxt.Arch.ByteOrder.PutUint64(s.P[off:], uint64(v))
+ }
+
+ return off + wid
+}
+
+func adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
+ var off int64
+
+ off = s.Size
+ setuintxx(ctxt, s, off, v, int64(wid))
+ return off
+}
+
+func adduint8(ctxt *Link, s *LSym, v uint8) int64 {
+ return adduintxx(ctxt, s, uint64(v), 1)
+}
+
+func adduint16(ctxt *Link, s *LSym, v uint16) int64 {
+ return adduintxx(ctxt, s, uint64(v), 2)
+}
+
+func Adduint32(ctxt *Link, s *LSym, v uint32) int64 {
+ return adduintxx(ctxt, s, uint64(v), 4)
+}
+
+func Adduint64(ctxt *Link, s *LSym, v uint64) int64 {
+ return adduintxx(ctxt, s, v, 8)
+}
+
+func setuint8(ctxt *Link, s *LSym, r int64, v uint8) int64 {
+ return setuintxx(ctxt, s, r, uint64(v), 1)
+}
+
+func setuint16(ctxt *Link, s *LSym, r int64, v uint16) int64 {
+ return setuintxx(ctxt, s, r, uint64(v), 2)
+}
+
+func setuint32(ctxt *Link, s *LSym, r int64, v uint32) int64 {
+ return setuintxx(ctxt, s, r, uint64(v), 4)
+}
+
+func setuint64(ctxt *Link, s *LSym, r int64, v uint64) int64 {
+ return setuintxx(ctxt, s, r, v, 8)
+}
+
+func addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+ var i int64
+ var r *Reloc
+
+ if s.Type_ == 0 {
+ s.Type_ = SDATA
+ }
+ s.Reachable = 1
+ i = s.Size
+ s.Size += int64(ctxt.Arch.Ptrsize)
+ Symgrow(ctxt, s, s.Size)
+ r = Addrel(s)
+ r.Sym = t
+ r.Off = int32(i)
+ r.Siz = uint8(ctxt.Arch.Ptrsize)
+ r.Type_ = R_ADDR
+ r.Add = add
+ return i + int64(r.Siz)
+}
+
+func addpcrelplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+ var i int64
+ var r *Reloc
+
+ if s.Type_ == 0 {
+ s.Type_ = SDATA
+ }
+ s.Reachable = 1
+ i = s.Size
+ s.Size += 4
+ Symgrow(ctxt, s, s.Size)
+ r = Addrel(s)
+ r.Sym = t
+ r.Off = int32(i)
+ r.Add = add
+ r.Type_ = R_PCREL
+ r.Siz = 4
+ return i + int64(r.Siz)
+}
+
+func addaddr(ctxt *Link, s *LSym, t *LSym) int64 {
+ return addaddrplus(ctxt, s, t, 0)
+}
+
+func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
+ var r *Reloc
+
+ if s.Type_ == 0 {
+ s.Type_ = SDATA
+ }
+ s.Reachable = 1
+ if off+int64(ctxt.Arch.Ptrsize) > s.Size {
+ s.Size = off + int64(ctxt.Arch.Ptrsize)
+ Symgrow(ctxt, s, s.Size)
+ }
+
+ r = Addrel(s)
+ r.Sym = t
+ r.Off = int32(off)
+ r.Siz = uint8(ctxt.Arch.Ptrsize)
+ r.Type_ = R_ADDR
+ r.Add = add
+ return off + int64(r.Siz)
+}
+
+func setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 {
+ return setaddrplus(ctxt, s, off, t, 0)
+}
+
+func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
+ var i int64
+ var r *Reloc
+
+ if s.Type_ == 0 {
+ s.Type_ = SDATA
+ }
+ s.Reachable = 1
+ i = s.Size
+ s.Size += int64(ctxt.Arch.Ptrsize)
+ Symgrow(ctxt, s, s.Size)
+ r = Addrel(s)
+ r.Sym = t
+ r.Off = int32(i)
+ r.Siz = uint8(ctxt.Arch.Ptrsize)
+ r.Type_ = R_SIZE
+ return i + int64(r.Siz)
+}
+
+func addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+ var i int64
+ var r *Reloc
+
+ if s.Type_ == 0 {
+ s.Type_ = SDATA
+ }
+ s.Reachable = 1
+ i = s.Size
+ s.Size += 4
+ Symgrow(ctxt, s, s.Size)
+ r = Addrel(s)
+ r.Sym = t
+ r.Off = int32(i)
+ r.Siz = 4
+ r.Type_ = R_ADDR
+ r.Add = add
+ return i + int64(r.Siz)
+}
--- /dev/null
+/*
+ * The authors of this software are Rob Pike and Ken Thompson.
+ * Copyright (c) 2002 by Lucent Technologies.
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose without fee is hereby granted, provided that this entire notice
+ * is included in all copies of any software which is or includes a copy
+ * or modification of this software and in all copies of the supporting
+ * documentation for such software.
+ * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED
+ * WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY
+ * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY
+ * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE.
+ */
+
+package obj
+
+// (The comments in this file were copied from the manpage files rune.3,
+// isalpharune.3, and runestrcat.3. Some formatting changes were also made
+// to conform to Google style. /JRM 11/11/05)
+
+type Fmt struct {
+ runes uint8
+ start interface{}
+ to interface{}
+ stop interface{}
+ flush func(*Fmt) int
+ farg interface{}
+ nfmt int
+ args []interface{}
+ r uint
+ width int
+ prec int
+ flags uint32
+ decimal string
+ thousands string
+ grouping string
+}
+
+const (
+ FmtWidth = 1
+ FmtLeft = FmtWidth << 1
+ FmtPrec = FmtLeft << 1
+ FmtSharp = FmtPrec << 1
+ FmtSpace = FmtSharp << 1
+ FmtSign = FmtSpace << 1
+ FmtApost = FmtSign << 1
+ FmtZero = FmtApost << 1
+ FmtUnsigned = FmtZero << 1
+ FmtShort = FmtUnsigned << 1
+ FmtLong = FmtShort << 1
+ FmtVLong = FmtLong << 1
+ FmtComma = FmtVLong << 1
+ FmtByte = FmtComma << 1
+ FmtLDouble = FmtByte << 1
+ FmtFlag = FmtLDouble << 1
+)
+
+var fmtdoquote func(int) int
+
+/* Edit .+1,/^$/ | cfn $PLAN9/src/lib9/fmt/?*.c | grep -v static |grep -v __ */
--- /dev/null
+// cmd/9l/noop.c, cmd/9l/pass.c, cmd/9l/span.c from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file defines the IDs for PCDATA and FUNCDATA instructions
+// in Go binaries. It is included by assembly sources, so it must
+// be written using #defines.
+//
+// The Go compiler also #includes this file, for now.
+//
+// symtab.go also contains a copy of these constants.
+
+// Pseudo-assembly statements.
+
+// GO_ARGS, GO_RESULTS_INITIALIZED, and NO_LOCAL_POINTERS are macros
+// that communicate to the runtime information about the location and liveness
+// of pointers in an assembly function's arguments, results, and stack frame.
+// This communication is only required in assembly functions that make calls
+// to other functions that might be preempted or grow the stack.
+// NOSPLIT functions that make no calls do not need to use these macros.
+
+// GO_ARGS indicates that the Go prototype for this assembly function
+// defines the pointer map for the function's arguments.
+// GO_ARGS should be the first instruction in a function that uses it.
+// It can be omitted if there are no arguments at all.
+// GO_ARGS is inserted implicitly by the linker for any function
+// that also has a Go prototype and therefore is usually not necessary
+// to write explicitly.
+
+// GO_RESULTS_INITIALIZED indicates that the assembly function
+// has initialized the stack space for its results and that those results
+// should be considered live for the remainder of the function.
+
+// NO_LOCAL_POINTERS indicates that the assembly function stores
+// no pointers to heap objects in its local stack variables.
+
+// ArgsSizeUnknown is set in Func.argsize to mark all functions
+// whose argument size is unknown (C vararg functions, and
+// assembly code without an explicit specification).
+// This value is generated by the compiler, assembler, or linker.
+const (
+ PCDATA_StackMapIndex = 0
+ FUNCDATA_ArgsPointerMaps = 0
+ FUNCDATA_LocalsPointerMaps = 1
+ FUNCDATA_DeadValueMaps = 2
+ ArgsSizeUnknown = 0x80000000
+)
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+import (
+ "math"
+ "strings"
+)
+
+// go-specific code shared across loaders (5l, 6l, 8l).
+
+// replace all "". with pkg.
+func expandpkg(t0 string, pkg string) string {
+ return strings.Replace(t0, `"".`, pkg+".", -1)
+}
+
+func double2ieee(ieee *uint64, f float64) {
+ *ieee = math.Float64bits(f)
+}
--- /dev/null
+// Inferno utils/8c/8.out.h
+// http://code.google.com/p/inferno-os/source/browse/utils/8c/8.out.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package i386
+
+const (
+ AXXX = iota
+ AAAA
+ AAAD
+ AAAM
+ AAAS
+ AADCB
+ AADCL
+ AADCW
+ AADDB
+ AADDL
+ AADDW
+ AADJSP
+ AANDB
+ AANDL
+ AANDW
+ AARPL
+ ABOUNDL
+ ABOUNDW
+ ABSFL
+ ABSFW
+ ABSRL
+ ABSRW
+ ABTL
+ ABTW
+ ABTCL
+ ABTCW
+ ABTRL
+ ABTRW
+ ABTSL
+ ABTSW
+ ABYTE
+ ACALL
+ ACLC
+ ACLD
+ ACLI
+ ACLTS
+ ACMC
+ ACMPB
+ ACMPL
+ ACMPW
+ ACMPSB
+ ACMPSL
+ ACMPSW
+ ADAA
+ ADAS
+ ADATA
+ ADECB
+ ADECL
+ ADECW
+ ADIVB
+ ADIVL
+ ADIVW
+ AENTER
+ AGLOBL
+ AGOK
+ AHISTORY
+ AHLT
+ AIDIVB
+ AIDIVL
+ AIDIVW
+ AIMULB
+ AIMULL
+ AIMULW
+ AINB
+ AINL
+ AINW
+ AINCB
+ AINCL
+ AINCW
+ AINSB
+ AINSL
+ AINSW
+ AINT
+ AINTO
+ AIRETL
+ AIRETW
+ AJCC
+ AJCS
+ AJCXZL
+ AJCXZW
+ AJEQ
+ AJGE
+ AJGT
+ AJHI
+ AJLE
+ AJLS
+ AJLT
+ AJMI
+ AJMP
+ AJNE
+ AJOC
+ AJOS
+ AJPC
+ AJPL
+ AJPS
+ ALAHF
+ ALARL
+ ALARW
+ ALEAL
+ ALEAW
+ ALEAVEL
+ ALEAVEW
+ ALOCK
+ ALODSB
+ ALODSL
+ ALODSW
+ ALONG
+ ALOOP
+ ALOOPEQ
+ ALOOPNE
+ ALSLL
+ ALSLW
+ AMOVB
+ AMOVL
+ AMOVW
+ AMOVQ
+ AMOVBLSX
+ AMOVBLZX
+ AMOVBWSX
+ AMOVBWZX
+ AMOVWLSX
+ AMOVWLZX
+ AMOVSB
+ AMOVSL
+ AMOVSW
+ AMULB
+ AMULL
+ AMULW
+ ANAME
+ ANEGB
+ ANEGL
+ ANEGW
+ ANOP
+ ANOTB
+ ANOTL
+ ANOTW
+ AORB
+ AORL
+ AORW
+ AOUTB
+ AOUTL
+ AOUTW
+ AOUTSB
+ AOUTSL
+ AOUTSW
+ APAUSE
+ APOPAL
+ APOPAW
+ APOPFL
+ APOPFW
+ APOPL
+ APOPW
+ APUSHAL
+ APUSHAW
+ APUSHFL
+ APUSHFW
+ APUSHL
+ APUSHW
+ ARCLB
+ ARCLL
+ ARCLW
+ ARCRB
+ ARCRL
+ ARCRW
+ AREP
+ AREPN
+ ARET
+ AROLB
+ AROLL
+ AROLW
+ ARORB
+ ARORL
+ ARORW
+ ASAHF
+ ASALB
+ ASALL
+ ASALW
+ ASARB
+ ASARL
+ ASARW
+ ASBBB
+ ASBBL
+ ASBBW
+ ASCASB
+ ASCASL
+ ASCASW
+ ASETCC
+ ASETCS
+ ASETEQ
+ ASETGE
+ ASETGT
+ ASETHI
+ ASETLE
+ ASETLS
+ ASETLT
+ ASETMI
+ ASETNE
+ ASETOC
+ ASETOS
+ ASETPC
+ ASETPL
+ ASETPS
+ ACDQ
+ ACWD
+ ASHLB
+ ASHLL
+ ASHLW
+ ASHRB
+ ASHRL
+ ASHRW
+ ASTC
+ ASTD
+ ASTI
+ ASTOSB
+ ASTOSL
+ ASTOSW
+ ASUBB
+ ASUBL
+ ASUBW
+ ASYSCALL
+ ATESTB
+ ATESTL
+ ATESTW
+ ATEXT
+ AVERR
+ AVERW
+ AWAIT
+ AWORD
+ AXCHGB
+ AXCHGL
+ AXCHGW
+ AXLAT
+ AXORB
+ AXORL
+ AXORW
+ AFMOVB
+ AFMOVBP
+ AFMOVD
+ AFMOVDP
+ AFMOVF
+ AFMOVFP
+ AFMOVL
+ AFMOVLP
+ AFMOVV
+ AFMOVVP
+ AFMOVW
+ AFMOVWP
+ AFMOVX
+ AFMOVXP
+ AFCOMB
+ AFCOMBP
+ AFCOMD
+ AFCOMDP
+ AFCOMDPP
+ AFCOMF
+ AFCOMFP
+ AFCOMI
+ AFCOMIP
+ AFCOML
+ AFCOMLP
+ AFCOMW
+ AFCOMWP
+ AFUCOM
+ AFUCOMI
+ AFUCOMIP
+ AFUCOMP
+ AFUCOMPP
+ AFADDDP
+ AFADDW
+ AFADDL
+ AFADDF
+ AFADDD
+ AFMULDP
+ AFMULW
+ AFMULL
+ AFMULF
+ AFMULD
+ AFSUBDP
+ AFSUBW
+ AFSUBL
+ AFSUBF
+ AFSUBD
+ AFSUBRDP
+ AFSUBRW
+ AFSUBRL
+ AFSUBRF
+ AFSUBRD
+ AFDIVDP
+ AFDIVW
+ AFDIVL
+ AFDIVF
+ AFDIVD
+ AFDIVRDP
+ AFDIVRW
+ AFDIVRL
+ AFDIVRF
+ AFDIVRD
+ AFXCHD
+ AFFREE
+ AFLDCW
+ AFLDENV
+ AFRSTOR
+ AFSAVE
+ AFSTCW
+ AFSTENV
+ AFSTSW
+ AF2XM1
+ AFABS
+ AFCHS
+ AFCLEX
+ AFCOS
+ AFDECSTP
+ AFINCSTP
+ AFINIT
+ AFLD1
+ AFLDL2E
+ AFLDL2T
+ AFLDLG2
+ AFLDLN2
+ AFLDPI
+ AFLDZ
+ AFNOP
+ AFPATAN
+ AFPREM
+ AFPREM1
+ AFPTAN
+ AFRNDINT
+ AFSCALE
+ AFSIN
+ AFSINCOS
+ AFSQRT
+ AFTST
+ AFXAM
+ AFXTRACT
+ AFYL2X
+ AFYL2XP1
+ AEND
+ ADYNT_
+ AINIT_
+ ASIGNAME
+ ACMPXCHGB
+ ACMPXCHGL
+ ACMPXCHGW
+ ACMPXCHG8B
+ ACPUID
+ ARDTSC
+ AXADDB
+ AXADDL
+ AXADDW
+ ACMOVLCC
+ ACMOVLCS
+ ACMOVLEQ
+ ACMOVLGE
+ ACMOVLGT
+ ACMOVLHI
+ ACMOVLLE
+ ACMOVLLS
+ ACMOVLLT
+ ACMOVLMI
+ ACMOVLNE
+ ACMOVLOC
+ ACMOVLOS
+ ACMOVLPC
+ ACMOVLPL
+ ACMOVLPS
+ ACMOVWCC
+ ACMOVWCS
+ ACMOVWEQ
+ ACMOVWGE
+ ACMOVWGT
+ ACMOVWHI
+ ACMOVWLE
+ ACMOVWLS
+ ACMOVWLT
+ ACMOVWMI
+ ACMOVWNE
+ ACMOVWOC
+ ACMOVWOS
+ ACMOVWPC
+ ACMOVWPL
+ ACMOVWPS
+ AFCMOVCC
+ AFCMOVCS
+ AFCMOVEQ
+ AFCMOVHI
+ AFCMOVLS
+ AFCMOVNE
+ AFCMOVNU
+ AFCMOVUN
+ ALFENCE
+ AMFENCE
+ ASFENCE
+ AEMMS
+ APREFETCHT0
+ APREFETCHT1
+ APREFETCHT2
+ APREFETCHNTA
+ ABSWAPL
+ AUNDEF
+ AADDPD
+ AADDPS
+ AADDSD
+ AADDSS
+ AANDNPD
+ AANDNPS
+ AANDPD
+ AANDPS
+ ACMPPD
+ ACMPPS
+ ACMPSD
+ ACMPSS
+ ACOMISD
+ ACOMISS
+ ACVTPL2PD
+ ACVTPL2PS
+ ACVTPD2PL
+ ACVTPD2PS
+ ACVTPS2PL
+ ACVTPS2PD
+ ACVTSD2SL
+ ACVTSD2SS
+ ACVTSL2SD
+ ACVTSL2SS
+ ACVTSS2SD
+ ACVTSS2SL
+ ACVTTPD2PL
+ ACVTTPS2PL
+ ACVTTSD2SL
+ ACVTTSS2SL
+ ADIVPD
+ ADIVPS
+ ADIVSD
+ ADIVSS
+ AMASKMOVOU
+ AMAXPD
+ AMAXPS
+ AMAXSD
+ AMAXSS
+ AMINPD
+ AMINPS
+ AMINSD
+ AMINSS
+ AMOVAPD
+ AMOVAPS
+ AMOVO
+ AMOVOU
+ AMOVHLPS
+ AMOVHPD
+ AMOVHPS
+ AMOVLHPS
+ AMOVLPD
+ AMOVLPS
+ AMOVMSKPD
+ AMOVMSKPS
+ AMOVNTO
+ AMOVNTPD
+ AMOVNTPS
+ AMOVSD
+ AMOVSS
+ AMOVUPD
+ AMOVUPS
+ AMULPD
+ AMULPS
+ AMULSD
+ AMULSS
+ AORPD
+ AORPS
+ APADDQ
+ APAND
+ APCMPEQB
+ APMAXSW
+ APMAXUB
+ APMINSW
+ APMINUB
+ APMOVMSKB
+ APSADBW
+ APSUBB
+ APSUBL
+ APSUBQ
+ APSUBSB
+ APSUBSW
+ APSUBUSB
+ APSUBUSW
+ APSUBW
+ APUNPCKHQDQ
+ APUNPCKLQDQ
+ APXOR
+ ARCPPS
+ ARCPSS
+ ARSQRTPS
+ ARSQRTSS
+ ASQRTPD
+ ASQRTPS
+ ASQRTSD
+ ASQRTSS
+ ASUBPD
+ ASUBPS
+ ASUBSD
+ ASUBSS
+ AUCOMISD
+ AUCOMISS
+ AUNPCKHPD
+ AUNPCKHPS
+ AUNPCKLPD
+ AUNPCKLPS
+ AXORPD
+ AXORPS
+ APSHUFHW
+ APSHUFL
+ APSHUFLW
+ AAESENC
+ APINSRD
+ APSHUFB
+ AUSEFIELD
+ ATYPE
+ AFUNCDATA
+ APCDATA
+ ACHECKNIL
+ AVARDEF
+ AVARKILL
+ ADUFFCOPY
+ ADUFFZERO
+ ALAST
+)
+
+const (
+ D_AL = 0 + iota
+ D_CL
+ D_DL
+ D_BL
+ D_AH = 4 + iota - 4
+ D_CH
+ D_DH
+ D_BH
+ D_AX = 8 + iota - 8
+ D_CX
+ D_DX
+ D_BX
+ D_SP
+ D_BP
+ D_SI
+ D_DI
+ D_F0 = 16
+ D_F7 = D_F0 + 7
+ D_CS = 24 + iota - 18
+ D_SS
+ D_DS
+ D_ES
+ D_FS
+ D_GS
+ D_GDTR
+ D_IDTR
+ D_LDTR
+ D_MSW
+ D_TASK
+ D_CR = 35
+ D_DR = 43
+ D_TR = 51
+ D_X0 = 59 + iota - 32
+ D_X1
+ D_X2
+ D_X3
+ D_X4
+ D_X5
+ D_X6
+ D_X7
+ D_TLS = 67
+ D_NONE = 68
+ D_BRANCH = 69
+ D_EXTERN = 70
+ D_STATIC = 71
+ D_AUTO = 72
+ D_PARAM = 73
+ D_CONST = 74
+ D_FCONST = 75
+ D_SCONST = 76
+ D_ADDR = 77 + iota - 50
+ D_INDIR
+ D_CONST2 = D_INDIR + D_INDIR + iota - 52
+ D_LAST
+ T_TYPE = 1 << 0
+ T_INDEX = 1 << 1
+ T_OFFSET = 1 << 2
+ T_FCONST = 1 << 3
+ T_SYM = 1 << 4
+ T_SCONST = 1 << 5
+ T_OFFSET2 = 1 << 6
+ T_GOTYPE = 1 << 7
+ REGARG = -1
+ REGRET = D_AX
+ FREGRET = D_F0
+ REGSP = D_SP
+ REGTMP = D_DI
+)
--- /dev/null
+package i386
+
+/*
+ * this is the ranlib header
+ */
+var anames8 = []string{
+ "XXX",
+ "AAA",
+ "AAD",
+ "AAM",
+ "AAS",
+ "ADCB",
+ "ADCL",
+ "ADCW",
+ "ADDB",
+ "ADDL",
+ "ADDW",
+ "ADJSP",
+ "ANDB",
+ "ANDL",
+ "ANDW",
+ "ARPL",
+ "BOUNDL",
+ "BOUNDW",
+ "BSFL",
+ "BSFW",
+ "BSRL",
+ "BSRW",
+ "BTL",
+ "BTW",
+ "BTCL",
+ "BTCW",
+ "BTRL",
+ "BTRW",
+ "BTSL",
+ "BTSW",
+ "BYTE",
+ "CALL",
+ "CLC",
+ "CLD",
+ "CLI",
+ "CLTS",
+ "CMC",
+ "CMPB",
+ "CMPL",
+ "CMPW",
+ "CMPSB",
+ "CMPSL",
+ "CMPSW",
+ "DAA",
+ "DAS",
+ "DATA",
+ "DECB",
+ "DECL",
+ "DECW",
+ "DIVB",
+ "DIVL",
+ "DIVW",
+ "ENTER",
+ "GLOBL",
+ "GOK",
+ "HISTORY",
+ "HLT",
+ "IDIVB",
+ "IDIVL",
+ "IDIVW",
+ "IMULB",
+ "IMULL",
+ "IMULW",
+ "INB",
+ "INL",
+ "INW",
+ "INCB",
+ "INCL",
+ "INCW",
+ "INSB",
+ "INSL",
+ "INSW",
+ "INT",
+ "INTO",
+ "IRETL",
+ "IRETW",
+ "JCC",
+ "JCS",
+ "JCXZL",
+ "JCXZW",
+ "JEQ",
+ "JGE",
+ "JGT",
+ "JHI",
+ "JLE",
+ "JLS",
+ "JLT",
+ "JMI",
+ "JMP",
+ "JNE",
+ "JOC",
+ "JOS",
+ "JPC",
+ "JPL",
+ "JPS",
+ "LAHF",
+ "LARL",
+ "LARW",
+ "LEAL",
+ "LEAW",
+ "LEAVEL",
+ "LEAVEW",
+ "LOCK",
+ "LODSB",
+ "LODSL",
+ "LODSW",
+ "LONG",
+ "LOOP",
+ "LOOPEQ",
+ "LOOPNE",
+ "LSLL",
+ "LSLW",
+ "MOVB",
+ "MOVL",
+ "MOVW",
+ "MOVQ",
+ "MOVBLSX",
+ "MOVBLZX",
+ "MOVBWSX",
+ "MOVBWZX",
+ "MOVWLSX",
+ "MOVWLZX",
+ "MOVSB",
+ "MOVSL",
+ "MOVSW",
+ "MULB",
+ "MULL",
+ "MULW",
+ "NAME",
+ "NEGB",
+ "NEGL",
+ "NEGW",
+ "NOP",
+ "NOTB",
+ "NOTL",
+ "NOTW",
+ "ORB",
+ "ORL",
+ "ORW",
+ "OUTB",
+ "OUTL",
+ "OUTW",
+ "OUTSB",
+ "OUTSL",
+ "OUTSW",
+ "PAUSE",
+ "POPAL",
+ "POPAW",
+ "POPFL",
+ "POPFW",
+ "POPL",
+ "POPW",
+ "PUSHAL",
+ "PUSHAW",
+ "PUSHFL",
+ "PUSHFW",
+ "PUSHL",
+ "PUSHW",
+ "RCLB",
+ "RCLL",
+ "RCLW",
+ "RCRB",
+ "RCRL",
+ "RCRW",
+ "REP",
+ "REPN",
+ "RET",
+ "ROLB",
+ "ROLL",
+ "ROLW",
+ "RORB",
+ "RORL",
+ "RORW",
+ "SAHF",
+ "SALB",
+ "SALL",
+ "SALW",
+ "SARB",
+ "SARL",
+ "SARW",
+ "SBBB",
+ "SBBL",
+ "SBBW",
+ "SCASB",
+ "SCASL",
+ "SCASW",
+ "SETCC",
+ "SETCS",
+ "SETEQ",
+ "SETGE",
+ "SETGT",
+ "SETHI",
+ "SETLE",
+ "SETLS",
+ "SETLT",
+ "SETMI",
+ "SETNE",
+ "SETOC",
+ "SETOS",
+ "SETPC",
+ "SETPL",
+ "SETPS",
+ "CDQ",
+ "CWD",
+ "SHLB",
+ "SHLL",
+ "SHLW",
+ "SHRB",
+ "SHRL",
+ "SHRW",
+ "STC",
+ "STD",
+ "STI",
+ "STOSB",
+ "STOSL",
+ "STOSW",
+ "SUBB",
+ "SUBL",
+ "SUBW",
+ "SYSCALL",
+ "TESTB",
+ "TESTL",
+ "TESTW",
+ "TEXT",
+ "VERR",
+ "VERW",
+ "WAIT",
+ "WORD",
+ "XCHGB",
+ "XCHGL",
+ "XCHGW",
+ "XLAT",
+ "XORB",
+ "XORL",
+ "XORW",
+ "FMOVB",
+ "FMOVBP",
+ "FMOVD",
+ "FMOVDP",
+ "FMOVF",
+ "FMOVFP",
+ "FMOVL",
+ "FMOVLP",
+ "FMOVV",
+ "FMOVVP",
+ "FMOVW",
+ "FMOVWP",
+ "FMOVX",
+ "FMOVXP",
+ "FCOMB",
+ "FCOMBP",
+ "FCOMD",
+ "FCOMDP",
+ "FCOMDPP",
+ "FCOMF",
+ "FCOMFP",
+ "FCOMI",
+ "FCOMIP",
+ "FCOML",
+ "FCOMLP",
+ "FCOMW",
+ "FCOMWP",
+ "FUCOM",
+ "FUCOMI",
+ "FUCOMIP",
+ "FUCOMP",
+ "FUCOMPP",
+ "FADDDP",
+ "FADDW",
+ "FADDL",
+ "FADDF",
+ "FADDD",
+ "FMULDP",
+ "FMULW",
+ "FMULL",
+ "FMULF",
+ "FMULD",
+ "FSUBDP",
+ "FSUBW",
+ "FSUBL",
+ "FSUBF",
+ "FSUBD",
+ "FSUBRDP",
+ "FSUBRW",
+ "FSUBRL",
+ "FSUBRF",
+ "FSUBRD",
+ "FDIVDP",
+ "FDIVW",
+ "FDIVL",
+ "FDIVF",
+ "FDIVD",
+ "FDIVRDP",
+ "FDIVRW",
+ "FDIVRL",
+ "FDIVRF",
+ "FDIVRD",
+ "FXCHD",
+ "FFREE",
+ "FLDCW",
+ "FLDENV",
+ "FRSTOR",
+ "FSAVE",
+ "FSTCW",
+ "FSTENV",
+ "FSTSW",
+ "F2XM1",
+ "FABS",
+ "FCHS",
+ "FCLEX",
+ "FCOS",
+ "FDECSTP",
+ "FINCSTP",
+ "FINIT",
+ "FLD1",
+ "FLDL2E",
+ "FLDL2T",
+ "FLDLG2",
+ "FLDLN2",
+ "FLDPI",
+ "FLDZ",
+ "FNOP",
+ "FPATAN",
+ "FPREM",
+ "FPREM1",
+ "FPTAN",
+ "FRNDINT",
+ "FSCALE",
+ "FSIN",
+ "FSINCOS",
+ "FSQRT",
+ "FTST",
+ "FXAM",
+ "FXTRACT",
+ "FYL2X",
+ "FYL2XP1",
+ "END",
+ "DYNT_",
+ "INIT_",
+ "SIGNAME",
+ "CMPXCHGB",
+ "CMPXCHGL",
+ "CMPXCHGW",
+ "CMPXCHG8B",
+ "CPUID",
+ "RDTSC",
+ "XADDB",
+ "XADDL",
+ "XADDW",
+ "CMOVLCC",
+ "CMOVLCS",
+ "CMOVLEQ",
+ "CMOVLGE",
+ "CMOVLGT",
+ "CMOVLHI",
+ "CMOVLLE",
+ "CMOVLLS",
+ "CMOVLLT",
+ "CMOVLMI",
+ "CMOVLNE",
+ "CMOVLOC",
+ "CMOVLOS",
+ "CMOVLPC",
+ "CMOVLPL",
+ "CMOVLPS",
+ "CMOVWCC",
+ "CMOVWCS",
+ "CMOVWEQ",
+ "CMOVWGE",
+ "CMOVWGT",
+ "CMOVWHI",
+ "CMOVWLE",
+ "CMOVWLS",
+ "CMOVWLT",
+ "CMOVWMI",
+ "CMOVWNE",
+ "CMOVWOC",
+ "CMOVWOS",
+ "CMOVWPC",
+ "CMOVWPL",
+ "CMOVWPS",
+ "FCMOVCC",
+ "FCMOVCS",
+ "FCMOVEQ",
+ "FCMOVHI",
+ "FCMOVLS",
+ "FCMOVNE",
+ "FCMOVNU",
+ "FCMOVUN",
+ "LFENCE",
+ "MFENCE",
+ "SFENCE",
+ "EMMS",
+ "PREFETCHT0",
+ "PREFETCHT1",
+ "PREFETCHT2",
+ "PREFETCHNTA",
+ "BSWAPL",
+ "UNDEF",
+ "ADDPD",
+ "ADDPS",
+ "ADDSD",
+ "ADDSS",
+ "ANDNPD",
+ "ANDNPS",
+ "ANDPD",
+ "ANDPS",
+ "CMPPD",
+ "CMPPS",
+ "CMPSD",
+ "CMPSS",
+ "COMISD",
+ "COMISS",
+ "CVTPL2PD",
+ "CVTPL2PS",
+ "CVTPD2PL",
+ "CVTPD2PS",
+ "CVTPS2PL",
+ "CVTPS2PD",
+ "CVTSD2SL",
+ "CVTSD2SS",
+ "CVTSL2SD",
+ "CVTSL2SS",
+ "CVTSS2SD",
+ "CVTSS2SL",
+ "CVTTPD2PL",
+ "CVTTPS2PL",
+ "CVTTSD2SL",
+ "CVTTSS2SL",
+ "DIVPD",
+ "DIVPS",
+ "DIVSD",
+ "DIVSS",
+ "MASKMOVOU",
+ "MAXPD",
+ "MAXPS",
+ "MAXSD",
+ "MAXSS",
+ "MINPD",
+ "MINPS",
+ "MINSD",
+ "MINSS",
+ "MOVAPD",
+ "MOVAPS",
+ "MOVO",
+ "MOVOU",
+ "MOVHLPS",
+ "MOVHPD",
+ "MOVHPS",
+ "MOVLHPS",
+ "MOVLPD",
+ "MOVLPS",
+ "MOVMSKPD",
+ "MOVMSKPS",
+ "MOVNTO",
+ "MOVNTPD",
+ "MOVNTPS",
+ "MOVSD",
+ "MOVSS",
+ "MOVUPD",
+ "MOVUPS",
+ "MULPD",
+ "MULPS",
+ "MULSD",
+ "MULSS",
+ "ORPD",
+ "ORPS",
+ "PADDQ",
+ "PAND",
+ "PCMPEQB",
+ "PMAXSW",
+ "PMAXUB",
+ "PMINSW",
+ "PMINUB",
+ "PMOVMSKB",
+ "PSADBW",
+ "PSUBB",
+ "PSUBL",
+ "PSUBQ",
+ "PSUBSB",
+ "PSUBSW",
+ "PSUBUSB",
+ "PSUBUSW",
+ "PSUBW",
+ "PUNPCKHQDQ",
+ "PUNPCKLQDQ",
+ "PXOR",
+ "RCPPS",
+ "RCPSS",
+ "RSQRTPS",
+ "RSQRTSS",
+ "SQRTPD",
+ "SQRTPS",
+ "SQRTSD",
+ "SQRTSS",
+ "SUBPD",
+ "SUBPS",
+ "SUBSD",
+ "SUBSS",
+ "UCOMISD",
+ "UCOMISS",
+ "UNPCKHPD",
+ "UNPCKHPS",
+ "UNPCKLPD",
+ "UNPCKLPS",
+ "XORPD",
+ "XORPS",
+ "PSHUFHW",
+ "PSHUFL",
+ "PSHUFLW",
+ "AESENC",
+ "PINSRD",
+ "PSHUFB",
+ "USEFIELD",
+ "TYPE",
+ "FUNCDATA",
+ "PCDATA",
+ "CHECKNIL",
+ "VARDEF",
+ "VARKILL",
+ "DUFFCOPY",
+ "DUFFZERO",
+ "LAST",
+}
+
+var dnames8 = []string{
+ D_AL: "AL",
+ D_CL: "CL",
+ D_DL: "DL",
+ D_BL: "BL",
+ D_AH: "AH",
+ D_CH: "CH",
+ D_DH: "DH",
+ D_BH: "BH",
+ D_AX: "AX",
+ D_CX: "CX",
+ D_DX: "DX",
+ D_BX: "BX",
+ D_SP: "SP",
+ D_BP: "BP",
+ D_SI: "SI",
+ D_DI: "DI",
+ D_F0: "F0",
+ D_CS: "CS",
+ D_SS: "SS",
+ D_DS: "DS",
+ D_ES: "ES",
+ D_FS: "FS",
+ D_GS: "GS",
+ D_GDTR: "GDTR",
+ D_IDTR: "IDTR",
+ D_LDTR: "LDTR",
+ D_MSW: "MSW",
+ D_TASK: "TASK",
+ D_CR: "CR",
+ D_DR: "DR",
+ D_TR: "TR",
+ D_X0: "X0",
+ D_X1: "X1",
+ D_X2: "X2",
+ D_X3: "X3",
+ D_X4: "X4",
+ D_X5: "X5",
+ D_X6: "X6",
+ D_X7: "X7",
+ D_TLS: "TLS",
+ D_NONE: "NONE",
+ D_BRANCH: "BRANCH",
+ D_EXTERN: "EXTERN",
+ D_STATIC: "STATIC",
+ D_AUTO: "AUTO",
+ D_PARAM: "PARAM",
+ D_CONST: "CONST",
+ D_FCONST: "FCONST",
+ D_SCONST: "SCONST",
+ D_ADDR: "ADDR",
+ D_INDIR: "INDIR",
+}
--- /dev/null
+// Inferno utils/8l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/8l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package i386
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "log"
+)
+
+// Instruction layout.
+
+const (
+ MaxAlign = 32
+ FuncAlign = 16
+)
+
+type Optab struct {
+ as int16
+ ytab []byte
+ prefix uint8
+ op [13]uint8
+}
+
+const (
+ Yxxx = 0 + iota
+ Ynone
+ Yi0
+ Yi1
+ Yi8
+ Yi32
+ Yiauto
+ Yal
+ Ycl
+ Yax
+ Ycx
+ Yrb
+ Yrl
+ Yrf
+ Yf0
+ Yrx
+ Ymb
+ Yml
+ Ym
+ Ybr
+ Ycol
+ Ytls
+ Ycs
+ Yss
+ Yds
+ Yes
+ Yfs
+ Ygs
+ Ygdtr
+ Yidtr
+ Yldtr
+ Ymsw
+ Ytask
+ Ycr0
+ Ycr1
+ Ycr2
+ Ycr3
+ Ycr4
+ Ycr5
+ Ycr6
+ Ycr7
+ Ydr0
+ Ydr1
+ Ydr2
+ Ydr3
+ Ydr4
+ Ydr5
+ Ydr6
+ Ydr7
+ Ytr0
+ Ytr1
+ Ytr2
+ Ytr3
+ Ytr4
+ Ytr5
+ Ytr6
+ Ytr7
+ Ymr
+ Ymm
+ Yxr
+ Yxm
+ Ymax
+ Zxxx = 0 + iota - 62
+ Zlit
+ Zlitm_r
+ Z_rp
+ Zbr
+ Zcall
+ Zcallcon
+ Zcallind
+ Zcallindreg
+ Zib_
+ Zib_rp
+ Zibo_m
+ Zil_
+ Zil_rp
+ Zilo_m
+ Zjmp
+ Zjmpcon
+ Zloop
+ Zm_o
+ Zm_r
+ Zm2_r
+ Zm_r_xm
+ Zm_r_i_xm
+ Zaut_r
+ Zo_m
+ Zpseudo
+ Zr_m
+ Zr_m_xm
+ Zr_m_i_xm
+ Zrp_
+ Z_ib
+ Z_il
+ Zm_ibo
+ Zm_ilo
+ Zib_rr
+ Zil_rr
+ Zclr
+ Zibm_r
+ Zbyte
+ Zmov
+ Zmax
+ Px = 0
+ Pe = 0x66
+ Pm = 0x0f
+ Pq = 0xff
+ Pb = 0xfe
+ Pf2 = 0xf2
+ Pf3 = 0xf3
+)
+
+var ycover [Ymax * Ymax]uint8
+
+var reg [D_NONE]int
+
+var ynone = []uint8{
+ Ynone,
+ Ynone,
+ Zlit,
+ 1,
+ 0,
+}
+
+var ytext = []uint8{
+ Ymb,
+ Yi32,
+ Zpseudo,
+ 1,
+ 0,
+}
+
+var ynop = []uint8{
+ Ynone,
+ Ynone,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yiauto,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yml,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yrf,
+ Zpseudo,
+ 0,
+ Yiauto,
+ Ynone,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yxr,
+ Zpseudo,
+ 0,
+ Yml,
+ Ynone,
+ Zpseudo,
+ 0,
+ Yrf,
+ Ynone,
+ Zpseudo,
+ 0,
+ Yxr,
+ Ynone,
+ Zpseudo,
+ 1,
+ 0,
+}
+
+var yfuncdata = []uint8{
+ Yi32,
+ Ym,
+ Zpseudo,
+ 0,
+ 0,
+}
+
+var ypcdata = []uint8{
+ Yi32,
+ Yi32,
+ Zpseudo,
+ 0,
+ 0,
+}
+
+var yxorb = []uint8{
+ Yi32,
+ Yal,
+ Zib_,
+ 1,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yxorl = []uint8{
+ Yi8,
+ Yml,
+ Zibo_m,
+ 2,
+ Yi32,
+ Yax,
+ Zil_,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yaddl = []uint8{
+ Yi8,
+ Yml,
+ Zibo_m,
+ 2,
+ Yi32,
+ Yax,
+ Zil_,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yincb = []uint8{
+ Ynone,
+ Ymb,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yincl = []uint8{
+ Ynone,
+ Yrl,
+ Z_rp,
+ 1,
+ Ynone,
+ Yml,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var ycmpb = []uint8{
+ Yal,
+ Yi32,
+ Z_ib,
+ 1,
+ Ymb,
+ Yi32,
+ Zm_ibo,
+ 2,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var ycmpl = []uint8{
+ Yml,
+ Yi8,
+ Zm_ibo,
+ 2,
+ Yax,
+ Yi32,
+ Z_il,
+ 1,
+ Yml,
+ Yi32,
+ Zm_ilo,
+ 2,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var yshb = []uint8{
+ Yi1,
+ Ymb,
+ Zo_m,
+ 2,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ Ycx,
+ Ymb,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yshl = []uint8{
+ Yi1,
+ Yml,
+ Zo_m,
+ 2,
+ Yi32,
+ Yml,
+ Zibo_m,
+ 2,
+ Ycl,
+ Yml,
+ Zo_m,
+ 2,
+ Ycx,
+ Yml,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var ytestb = []uint8{
+ Yi32,
+ Yal,
+ Zib_,
+ 1,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ytestl = []uint8{
+ Yi32,
+ Yax,
+ Zil_,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ymovb = []uint8{
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ Yi32,
+ Yrb,
+ Zib_rp,
+ 1,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ 0,
+}
+
+var ymovw = []uint8{
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ Yi0,
+ Yrl,
+ Zclr,
+ 1 + 2,
+ // Yi0, Yml, Zibo_m, 2, // shorter but slower AND $0,dst
+ Yi32,
+ Yrl,
+ Zil_rp,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yiauto,
+ Yrl,
+ Zaut_r,
+ 1,
+ 0,
+}
+
+var ymovl = []uint8{
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ Yi0,
+ Yrl,
+ Zclr,
+ 1 + 2,
+ // Yi0, Yml, Zibo_m, 2, // shorter but slower AND $0,dst
+ Yi32,
+ Yrl,
+ Zil_rp,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yml,
+ Yxr,
+ Zm_r_xm,
+ 2, // XMM MOVD (32 bit)
+ Yxr,
+ Yml,
+ Zr_m_xm,
+ 2, // XMM MOVD (32 bit)
+ Yiauto,
+ Yrl,
+ Zaut_r,
+ 1,
+ 0,
+}
+
+var ymovq = []uint8{
+ Yml,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var ym_rl = []uint8{
+ Ym,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yrl_m = []uint8{
+ Yrl,
+ Ym,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var ymb_rl = []uint8{
+ Ymb,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yml_rl = []uint8{
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yrb_mb = []uint8{
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var yrl_ml = []uint8{
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var yml_mb = []uint8{
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yxchg = []uint8{
+ Yax,
+ Yrl,
+ Z_rp,
+ 1,
+ Yrl,
+ Yax,
+ Zrp_,
+ 1,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ydivl = []uint8{
+ Yml,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ydivb = []uint8{
+ Ymb,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var yimul = []uint8{
+ Yml,
+ Ynone,
+ Zm_o,
+ 2,
+ Yi8,
+ Yrl,
+ Zib_rr,
+ 1,
+ Yi32,
+ Yrl,
+ Zil_rr,
+ 1,
+ 0,
+}
+
+var ybyte = []uint8{
+ Yi32,
+ Ynone,
+ Zbyte,
+ 1,
+ 0,
+}
+
+var yin = []uint8{
+ Yi32,
+ Ynone,
+ Zib_,
+ 1,
+ Ynone,
+ Ynone,
+ Zlit,
+ 1,
+ 0,
+}
+
+var yint = []uint8{
+ Yi32,
+ Ynone,
+ Zib_,
+ 1,
+ 0,
+}
+
+var ypushl = []uint8{
+ Yrl,
+ Ynone,
+ Zrp_,
+ 1,
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ Yi8,
+ Ynone,
+ Zib_,
+ 1,
+ Yi32,
+ Ynone,
+ Zil_,
+ 1,
+ 0,
+}
+
+var ypopl = []uint8{
+ Ynone,
+ Yrl,
+ Z_rp,
+ 1,
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var ybswap = []uint8{
+ Ynone,
+ Yrl,
+ Z_rp,
+ 1,
+ 0,
+}
+
+var yscond = []uint8{
+ Ynone,
+ Ymb,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yjcond = []uint8{
+ Ynone,
+ Ybr,
+ Zbr,
+ 0,
+ Yi0,
+ Ybr,
+ Zbr,
+ 0,
+ Yi1,
+ Ybr,
+ Zbr,
+ 1,
+ 0,
+}
+
+var yloop = []uint8{
+ Ynone,
+ Ybr,
+ Zloop,
+ 1,
+ 0,
+}
+
+var ycall = []uint8{
+ Ynone,
+ Yml,
+ Zcallindreg,
+ 0,
+ Yrx,
+ Yrx,
+ Zcallindreg,
+ 2,
+ Ynone,
+ Ycol,
+ Zcallind,
+ 2,
+ Ynone,
+ Ybr,
+ Zcall,
+ 0,
+ Ynone,
+ Yi32,
+ Zcallcon,
+ 1,
+ 0,
+}
+
+var yduff = []uint8{
+ Ynone,
+ Yi32,
+ Zcall,
+ 1,
+ 0,
+}
+
+var yjmp = []uint8{
+ Ynone,
+ Yml,
+ Zo_m,
+ 2,
+ Ynone,
+ Ybr,
+ Zjmp,
+ 0,
+ Ynone,
+ Yi32,
+ Zjmpcon,
+ 1,
+ 0,
+}
+
+var yfmvd = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ Yrf,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfmvdp = []uint8{
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfmvf = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfmvx = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var yfmvp = []uint8{
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfcmv = []uint8{
+ Yrf,
+ Yf0,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var yfadd = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ Yrf,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfaddp = []uint8{
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfxch = []uint8{
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ Yrf,
+ Yf0,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ycompp = []uint8{
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2, /* botch is really f0,f1 */
+ 0,
+}
+
+var ystsw = []uint8{
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ Ynone,
+ Yax,
+ Zlit,
+ 1,
+ 0,
+}
+
+var ystcw = []uint8{
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ysvrs = []uint8{
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ymskb = []uint8{
+ Yxr,
+ Yrl,
+ Zm_r_xm,
+ 2,
+ Ymr,
+ Yrl,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxm = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxcvm1 = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ Yxm,
+ Ymr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var yxcvm2 = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ Ymm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var yxmq = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var yxr = []uint8{
+ Yxr,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxr_ml = []uint8{
+ Yxr,
+ Yml,
+ Zr_m_xm,
+ 1,
+ 0,
+}
+
+var yxcmp = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxcmpi = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_i_xm,
+ 2,
+ 0,
+}
+
+var yxmov = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ Yxr,
+ Yxm,
+ Zr_m_xm,
+ 1,
+ 0,
+}
+
+var yxcvfl = []uint8{
+ Yxm,
+ Yrl,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxcvlf = []uint8{
+ Yml,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+/*
+static uchar yxcvfq[] =
+{
+ Yxm, Yrl, Zm_r_xm, 2,
+ 0
+};
+static uchar yxcvqf[] =
+{
+ Yml, Yxr, Zm_r_xm, 2,
+ 0
+};
+*/
+var yxrrl = []uint8{
+ Yxr,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yprefetch = []uint8{
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var yaes = []uint8{
+ Yxm,
+ Yxr,
+ Zlitm_r,
+ 2,
+ 0,
+}
+
+var yinsrd = []uint8{
+ Yml,
+ Yxr,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+var ymshufb = []uint8{
+ Yxm,
+ Yxr,
+ Zm2_r,
+ 2,
+ 0,
+}
+
+var yxshuf = []uint8{
+ Yxm,
+ Yxr,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+var optab = /* as, ytab, andproto, opcode */
+[]Optab{
+ Optab{AXXX, nil, 0, [13]uint8{}},
+ Optab{AAAA, ynone, Px, [13]uint8{0x37}},
+ Optab{AAAD, ynone, Px, [13]uint8{0xd5, 0x0a}},
+ Optab{AAAM, ynone, Px, [13]uint8{0xd4, 0x0a}},
+ Optab{AAAS, ynone, Px, [13]uint8{0x3f}},
+ Optab{AADCB, yxorb, Pb, [13]uint8{0x14, 0x80, 02, 0x10, 0x10}},
+ Optab{AADCL, yxorl, Px, [13]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}},
+ Optab{AADCW, yxorl, Pe, [13]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}},
+ Optab{AADDB, yxorb, Px, [13]uint8{0x04, 0x80, 00, 0x00, 0x02}},
+ Optab{AADDL, yaddl, Px, [13]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}},
+ Optab{AADDW, yaddl, Pe, [13]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}},
+ Optab{AADJSP, nil, 0, [13]uint8{}},
+ Optab{AANDB, yxorb, Pb, [13]uint8{0x24, 0x80, 04, 0x20, 0x22}},
+ Optab{AANDL, yxorl, Px, [13]uint8{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}},
+ Optab{AANDW, yxorl, Pe, [13]uint8{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}},
+ Optab{AARPL, yrl_ml, Px, [13]uint8{0x63}},
+ Optab{ABOUNDL, yrl_m, Px, [13]uint8{0x62}},
+ Optab{ABOUNDW, yrl_m, Pe, [13]uint8{0x62}},
+ Optab{ABSFL, yml_rl, Pm, [13]uint8{0xbc}},
+ Optab{ABSFW, yml_rl, Pq, [13]uint8{0xbc}},
+ Optab{ABSRL, yml_rl, Pm, [13]uint8{0xbd}},
+ Optab{ABSRW, yml_rl, Pq, [13]uint8{0xbd}},
+ Optab{ABTL, yml_rl, Pm, [13]uint8{0xa3}},
+ Optab{ABTW, yml_rl, Pq, [13]uint8{0xa3}},
+ Optab{ABTCL, yml_rl, Pm, [13]uint8{0xbb}},
+ Optab{ABTCW, yml_rl, Pq, [13]uint8{0xbb}},
+ Optab{ABTRL, yml_rl, Pm, [13]uint8{0xb3}},
+ Optab{ABTRW, yml_rl, Pq, [13]uint8{0xb3}},
+ Optab{ABTSL, yml_rl, Pm, [13]uint8{0xab}},
+ Optab{ABTSW, yml_rl, Pq, [13]uint8{0xab}},
+ Optab{ABYTE, ybyte, Px, [13]uint8{1}},
+ Optab{ACALL, ycall, Px, [13]uint8{0xff, 02, 0xff, 0x15, 0xe8}},
+ Optab{ACLC, ynone, Px, [13]uint8{0xf8}},
+ Optab{ACLD, ynone, Px, [13]uint8{0xfc}},
+ Optab{ACLI, ynone, Px, [13]uint8{0xfa}},
+ Optab{ACLTS, ynone, Pm, [13]uint8{0x06}},
+ Optab{ACMC, ynone, Px, [13]uint8{0xf5}},
+ Optab{ACMPB, ycmpb, Pb, [13]uint8{0x3c, 0x80, 07, 0x38, 0x3a}},
+ Optab{ACMPL, ycmpl, Px, [13]uint8{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}},
+ Optab{ACMPW, ycmpl, Pe, [13]uint8{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}},
+ Optab{ACMPSB, ynone, Pb, [13]uint8{0xa6}},
+ Optab{ACMPSL, ynone, Px, [13]uint8{0xa7}},
+ Optab{ACMPSW, ynone, Pe, [13]uint8{0xa7}},
+ Optab{ADAA, ynone, Px, [13]uint8{0x27}},
+ Optab{ADAS, ynone, Px, [13]uint8{0x2f}},
+ Optab{ADATA, nil, 0, [13]uint8{}},
+ Optab{ADECB, yincb, Pb, [13]uint8{0xfe, 01}},
+ Optab{ADECL, yincl, Px, [13]uint8{0x48, 0xff, 01}},
+ Optab{ADECW, yincl, Pe, [13]uint8{0x48, 0xff, 01}},
+ Optab{ADIVB, ydivb, Pb, [13]uint8{0xf6, 06}},
+ Optab{ADIVL, ydivl, Px, [13]uint8{0xf7, 06}},
+ Optab{ADIVW, ydivl, Pe, [13]uint8{0xf7, 06}},
+ Optab{AENTER, nil, 0, [13]uint8{}}, /* botch */
+ Optab{AGLOBL, nil, 0, [13]uint8{}},
+ Optab{AGOK, nil, 0, [13]uint8{}},
+ Optab{AHISTORY, nil, 0, [13]uint8{}},
+ Optab{AHLT, ynone, Px, [13]uint8{0xf4}},
+ Optab{AIDIVB, ydivb, Pb, [13]uint8{0xf6, 07}},
+ Optab{AIDIVL, ydivl, Px, [13]uint8{0xf7, 07}},
+ Optab{AIDIVW, ydivl, Pe, [13]uint8{0xf7, 07}},
+ Optab{AIMULB, ydivb, Pb, [13]uint8{0xf6, 05}},
+ Optab{AIMULL, yimul, Px, [13]uint8{0xf7, 05, 0x6b, 0x69}},
+ Optab{AIMULW, yimul, Pe, [13]uint8{0xf7, 05, 0x6b, 0x69}},
+ Optab{AINB, yin, Pb, [13]uint8{0xe4, 0xec}},
+ Optab{AINL, yin, Px, [13]uint8{0xe5, 0xed}},
+ Optab{AINW, yin, Pe, [13]uint8{0xe5, 0xed}},
+ Optab{AINCB, yincb, Pb, [13]uint8{0xfe, 00}},
+ Optab{AINCL, yincl, Px, [13]uint8{0x40, 0xff, 00}},
+ Optab{AINCW, yincl, Pe, [13]uint8{0x40, 0xff, 00}},
+ Optab{AINSB, ynone, Pb, [13]uint8{0x6c}},
+ Optab{AINSL, ynone, Px, [13]uint8{0x6d}},
+ Optab{AINSW, ynone, Pe, [13]uint8{0x6d}},
+ Optab{AINT, yint, Px, [13]uint8{0xcd}},
+ Optab{AINTO, ynone, Px, [13]uint8{0xce}},
+ Optab{AIRETL, ynone, Px, [13]uint8{0xcf}},
+ Optab{AIRETW, ynone, Pe, [13]uint8{0xcf}},
+ Optab{AJCC, yjcond, Px, [13]uint8{0x73, 0x83, 00}},
+ Optab{AJCS, yjcond, Px, [13]uint8{0x72, 0x82}},
+ Optab{AJCXZL, yloop, Px, [13]uint8{0xe3}},
+ Optab{AJCXZW, yloop, Px, [13]uint8{0xe3}},
+ Optab{AJEQ, yjcond, Px, [13]uint8{0x74, 0x84}},
+ Optab{AJGE, yjcond, Px, [13]uint8{0x7d, 0x8d}},
+ Optab{AJGT, yjcond, Px, [13]uint8{0x7f, 0x8f}},
+ Optab{AJHI, yjcond, Px, [13]uint8{0x77, 0x87}},
+ Optab{AJLE, yjcond, Px, [13]uint8{0x7e, 0x8e}},
+ Optab{AJLS, yjcond, Px, [13]uint8{0x76, 0x86}},
+ Optab{AJLT, yjcond, Px, [13]uint8{0x7c, 0x8c}},
+ Optab{AJMI, yjcond, Px, [13]uint8{0x78, 0x88}},
+ Optab{AJMP, yjmp, Px, [13]uint8{0xff, 04, 0xeb, 0xe9}},
+ Optab{AJNE, yjcond, Px, [13]uint8{0x75, 0x85}},
+ Optab{AJOC, yjcond, Px, [13]uint8{0x71, 0x81, 00}},
+ Optab{AJOS, yjcond, Px, [13]uint8{0x70, 0x80, 00}},
+ Optab{AJPC, yjcond, Px, [13]uint8{0x7b, 0x8b}},
+ Optab{AJPL, yjcond, Px, [13]uint8{0x79, 0x89}},
+ Optab{AJPS, yjcond, Px, [13]uint8{0x7a, 0x8a}},
+ Optab{ALAHF, ynone, Px, [13]uint8{0x9f}},
+ Optab{ALARL, yml_rl, Pm, [13]uint8{0x02}},
+ Optab{ALARW, yml_rl, Pq, [13]uint8{0x02}},
+ Optab{ALEAL, ym_rl, Px, [13]uint8{0x8d}},
+ Optab{ALEAW, ym_rl, Pe, [13]uint8{0x8d}},
+ Optab{ALEAVEL, ynone, Px, [13]uint8{0xc9}},
+ Optab{ALEAVEW, ynone, Pe, [13]uint8{0xc9}},
+ Optab{ALOCK, ynone, Px, [13]uint8{0xf0}},
+ Optab{ALODSB, ynone, Pb, [13]uint8{0xac}},
+ Optab{ALODSL, ynone, Px, [13]uint8{0xad}},
+ Optab{ALODSW, ynone, Pe, [13]uint8{0xad}},
+ Optab{ALONG, ybyte, Px, [13]uint8{4}},
+ Optab{ALOOP, yloop, Px, [13]uint8{0xe2}},
+ Optab{ALOOPEQ, yloop, Px, [13]uint8{0xe1}},
+ Optab{ALOOPNE, yloop, Px, [13]uint8{0xe0}},
+ Optab{ALSLL, yml_rl, Pm, [13]uint8{0x03}},
+ Optab{ALSLW, yml_rl, Pq, [13]uint8{0x03}},
+ Optab{AMOVB, ymovb, Pb, [13]uint8{0x88, 0x8a, 0xb0, 0xc6, 00}},
+ Optab{AMOVL, ymovl, Px, [13]uint8{0x89, 0x8b, 0x31, 0x83, 04, 0xb8, 0xc7, 00, Pe, 0x6e, Pe, 0x7e, 0}},
+ Optab{AMOVW, ymovw, Pe, [13]uint8{0x89, 0x8b, 0x31, 0x83, 04, 0xb8, 0xc7, 00, 0}},
+ Optab{AMOVQ, ymovq, Pf3, [13]uint8{0x7e}},
+ Optab{AMOVBLSX, ymb_rl, Pm, [13]uint8{0xbe}},
+ Optab{AMOVBLZX, ymb_rl, Pm, [13]uint8{0xb6}},
+ Optab{AMOVBWSX, ymb_rl, Pq, [13]uint8{0xbe}},
+ Optab{AMOVBWZX, ymb_rl, Pq, [13]uint8{0xb6}},
+ Optab{AMOVWLSX, yml_rl, Pm, [13]uint8{0xbf}},
+ Optab{AMOVWLZX, yml_rl, Pm, [13]uint8{0xb7}},
+ Optab{AMOVSB, ynone, Pb, [13]uint8{0xa4}},
+ Optab{AMOVSL, ynone, Px, [13]uint8{0xa5}},
+ Optab{AMOVSW, ynone, Pe, [13]uint8{0xa5}},
+ Optab{AMULB, ydivb, Pb, [13]uint8{0xf6, 04}},
+ Optab{AMULL, ydivl, Px, [13]uint8{0xf7, 04}},
+ Optab{AMULW, ydivl, Pe, [13]uint8{0xf7, 04}},
+ Optab{ANAME, nil, 0, [13]uint8{}},
+ Optab{ANEGB, yscond, Px, [13]uint8{0xf6, 03}},
+ Optab{ANEGL, yscond, Px, [13]uint8{0xf7, 03}},
+ Optab{ANEGW, yscond, Pe, [13]uint8{0xf7, 03}},
+ Optab{ANOP, ynop, Px, [13]uint8{0, 0}},
+ Optab{ANOTB, yscond, Px, [13]uint8{0xf6, 02}},
+ Optab{ANOTL, yscond, Px, [13]uint8{0xf7, 02}},
+ Optab{ANOTW, yscond, Pe, [13]uint8{0xf7, 02}},
+ Optab{AORB, yxorb, Pb, [13]uint8{0x0c, 0x80, 01, 0x08, 0x0a}},
+ Optab{AORL, yxorl, Px, [13]uint8{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}},
+ Optab{AORW, yxorl, Pe, [13]uint8{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}},
+ Optab{AOUTB, yin, Pb, [13]uint8{0xe6, 0xee}},
+ Optab{AOUTL, yin, Px, [13]uint8{0xe7, 0xef}},
+ Optab{AOUTW, yin, Pe, [13]uint8{0xe7, 0xef}},
+ Optab{AOUTSB, ynone, Pb, [13]uint8{0x6e}},
+ Optab{AOUTSL, ynone, Px, [13]uint8{0x6f}},
+ Optab{AOUTSW, ynone, Pe, [13]uint8{0x6f}},
+ Optab{APAUSE, ynone, Px, [13]uint8{0xf3, 0x90}},
+ Optab{APOPAL, ynone, Px, [13]uint8{0x61}},
+ Optab{APOPAW, ynone, Pe, [13]uint8{0x61}},
+ Optab{APOPFL, ynone, Px, [13]uint8{0x9d}},
+ Optab{APOPFW, ynone, Pe, [13]uint8{0x9d}},
+ Optab{APOPL, ypopl, Px, [13]uint8{0x58, 0x8f, 00}},
+ Optab{APOPW, ypopl, Pe, [13]uint8{0x58, 0x8f, 00}},
+ Optab{APUSHAL, ynone, Px, [13]uint8{0x60}},
+ Optab{APUSHAW, ynone, Pe, [13]uint8{0x60}},
+ Optab{APUSHFL, ynone, Px, [13]uint8{0x9c}},
+ Optab{APUSHFW, ynone, Pe, [13]uint8{0x9c}},
+ Optab{APUSHL, ypushl, Px, [13]uint8{0x50, 0xff, 06, 0x6a, 0x68}},
+ Optab{APUSHW, ypushl, Pe, [13]uint8{0x50, 0xff, 06, 0x6a, 0x68}},
+ Optab{ARCLB, yshb, Pb, [13]uint8{0xd0, 02, 0xc0, 02, 0xd2, 02}},
+ Optab{ARCLL, yshl, Px, [13]uint8{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}},
+ Optab{ARCLW, yshl, Pe, [13]uint8{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}},
+ Optab{ARCRB, yshb, Pb, [13]uint8{0xd0, 03, 0xc0, 03, 0xd2, 03}},
+ Optab{ARCRL, yshl, Px, [13]uint8{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}},
+ Optab{ARCRW, yshl, Pe, [13]uint8{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}},
+ Optab{AREP, ynone, Px, [13]uint8{0xf3}},
+ Optab{AREPN, ynone, Px, [13]uint8{0xf2}},
+ Optab{ARET, ynone, Px, [13]uint8{0xc3}},
+ Optab{AROLB, yshb, Pb, [13]uint8{0xd0, 00, 0xc0, 00, 0xd2, 00}},
+ Optab{AROLL, yshl, Px, [13]uint8{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}},
+ Optab{AROLW, yshl, Pe, [13]uint8{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}},
+ Optab{ARORB, yshb, Pb, [13]uint8{0xd0, 01, 0xc0, 01, 0xd2, 01}},
+ Optab{ARORL, yshl, Px, [13]uint8{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}},
+ Optab{ARORW, yshl, Pe, [13]uint8{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}},
+ Optab{ASAHF, ynone, Px, [13]uint8{0x9e}},
+ Optab{ASALB, yshb, Pb, [13]uint8{0xd0, 04, 0xc0, 04, 0xd2, 04}},
+ Optab{ASALL, yshl, Px, [13]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASALW, yshl, Pe, [13]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASARB, yshb, Pb, [13]uint8{0xd0, 07, 0xc0, 07, 0xd2, 07}},
+ Optab{ASARL, yshl, Px, [13]uint8{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}},
+ Optab{ASARW, yshl, Pe, [13]uint8{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}},
+ Optab{ASBBB, yxorb, Pb, [13]uint8{0x1c, 0x80, 03, 0x18, 0x1a}},
+ Optab{ASBBL, yxorl, Px, [13]uint8{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}},
+ Optab{ASBBW, yxorl, Pe, [13]uint8{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}},
+ Optab{ASCASB, ynone, Pb, [13]uint8{0xae}},
+ Optab{ASCASL, ynone, Px, [13]uint8{0xaf}},
+ Optab{ASCASW, ynone, Pe, [13]uint8{0xaf}},
+ Optab{ASETCC, yscond, Pm, [13]uint8{0x93, 00}},
+ Optab{ASETCS, yscond, Pm, [13]uint8{0x92, 00}},
+ Optab{ASETEQ, yscond, Pm, [13]uint8{0x94, 00}},
+ Optab{ASETGE, yscond, Pm, [13]uint8{0x9d, 00}},
+ Optab{ASETGT, yscond, Pm, [13]uint8{0x9f, 00}},
+ Optab{ASETHI, yscond, Pm, [13]uint8{0x97, 00}},
+ Optab{ASETLE, yscond, Pm, [13]uint8{0x9e, 00}},
+ Optab{ASETLS, yscond, Pm, [13]uint8{0x96, 00}},
+ Optab{ASETLT, yscond, Pm, [13]uint8{0x9c, 00}},
+ Optab{ASETMI, yscond, Pm, [13]uint8{0x98, 00}},
+ Optab{ASETNE, yscond, Pm, [13]uint8{0x95, 00}},
+ Optab{ASETOC, yscond, Pm, [13]uint8{0x91, 00}},
+ Optab{ASETOS, yscond, Pm, [13]uint8{0x90, 00}},
+ Optab{ASETPC, yscond, Pm, [13]uint8{0x9b, 00}},
+ Optab{ASETPL, yscond, Pm, [13]uint8{0x99, 00}},
+ Optab{ASETPS, yscond, Pm, [13]uint8{0x9a, 00}},
+ Optab{ACDQ, ynone, Px, [13]uint8{0x99}},
+ Optab{ACWD, ynone, Pe, [13]uint8{0x99}},
+ Optab{ASHLB, yshb, Pb, [13]uint8{0xd0, 04, 0xc0, 04, 0xd2, 04}},
+ Optab{ASHLL, yshl, Px, [13]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASHLW, yshl, Pe, [13]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASHRB, yshb, Pb, [13]uint8{0xd0, 05, 0xc0, 05, 0xd2, 05}},
+ Optab{ASHRL, yshl, Px, [13]uint8{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}},
+ Optab{ASHRW, yshl, Pe, [13]uint8{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}},
+ Optab{ASTC, ynone, Px, [13]uint8{0xf9}},
+ Optab{ASTD, ynone, Px, [13]uint8{0xfd}},
+ Optab{ASTI, ynone, Px, [13]uint8{0xfb}},
+ Optab{ASTOSB, ynone, Pb, [13]uint8{0xaa}},
+ Optab{ASTOSL, ynone, Px, [13]uint8{0xab}},
+ Optab{ASTOSW, ynone, Pe, [13]uint8{0xab}},
+ Optab{ASUBB, yxorb, Pb, [13]uint8{0x2c, 0x80, 05, 0x28, 0x2a}},
+ Optab{ASUBL, yaddl, Px, [13]uint8{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}},
+ Optab{ASUBW, yaddl, Pe, [13]uint8{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}},
+ Optab{ASYSCALL, ynone, Px, [13]uint8{0xcd, 100}},
+ Optab{ATESTB, ytestb, Pb, [13]uint8{0xa8, 0xf6, 00, 0x84, 0x84}},
+ Optab{ATESTL, ytestl, Px, [13]uint8{0xa9, 0xf7, 00, 0x85, 0x85}},
+ Optab{ATESTW, ytestl, Pe, [13]uint8{0xa9, 0xf7, 00, 0x85, 0x85}},
+ Optab{ATEXT, ytext, Px, [13]uint8{}},
+ Optab{AVERR, ydivl, Pm, [13]uint8{0x00, 04}},
+ Optab{AVERW, ydivl, Pm, [13]uint8{0x00, 05}},
+ Optab{AWAIT, ynone, Px, [13]uint8{0x9b}},
+ Optab{AWORD, ybyte, Px, [13]uint8{2}},
+ Optab{AXCHGB, yml_mb, Pb, [13]uint8{0x86, 0x86}},
+ Optab{AXCHGL, yxchg, Px, [13]uint8{0x90, 0x90, 0x87, 0x87}},
+ Optab{AXCHGW, yxchg, Pe, [13]uint8{0x90, 0x90, 0x87, 0x87}},
+ Optab{AXLAT, ynone, Px, [13]uint8{0xd7}},
+ Optab{AXORB, yxorb, Pb, [13]uint8{0x34, 0x80, 06, 0x30, 0x32}},
+ Optab{AXORL, yxorl, Px, [13]uint8{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}},
+ Optab{AXORW, yxorl, Pe, [13]uint8{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}},
+ Optab{AFMOVB, yfmvx, Px, [13]uint8{0xdf, 04}},
+ Optab{AFMOVBP, yfmvp, Px, [13]uint8{0xdf, 06}},
+ Optab{AFMOVD, yfmvd, Px, [13]uint8{0xdd, 00, 0xdd, 02, 0xd9, 00, 0xdd, 02}},
+ Optab{AFMOVDP, yfmvdp, Px, [13]uint8{0xdd, 03, 0xdd, 03}},
+ Optab{AFMOVF, yfmvf, Px, [13]uint8{0xd9, 00, 0xd9, 02}},
+ Optab{AFMOVFP, yfmvp, Px, [13]uint8{0xd9, 03}},
+ Optab{AFMOVL, yfmvf, Px, [13]uint8{0xdb, 00, 0xdb, 02}},
+ Optab{AFMOVLP, yfmvp, Px, [13]uint8{0xdb, 03}},
+ Optab{AFMOVV, yfmvx, Px, [13]uint8{0xdf, 05}},
+ Optab{AFMOVVP, yfmvp, Px, [13]uint8{0xdf, 07}},
+ Optab{AFMOVW, yfmvf, Px, [13]uint8{0xdf, 00, 0xdf, 02}},
+ Optab{AFMOVWP, yfmvp, Px, [13]uint8{0xdf, 03}},
+ Optab{AFMOVX, yfmvx, Px, [13]uint8{0xdb, 05}},
+ Optab{AFMOVXP, yfmvp, Px, [13]uint8{0xdb, 07}},
+ Optab{AFCOMB, nil, 0, [13]uint8{}},
+ Optab{AFCOMBP, nil, 0, [13]uint8{}},
+ Optab{AFCOMD, yfadd, Px, [13]uint8{0xdc, 02, 0xd8, 02, 0xdc, 02}}, /* botch */
+ Optab{AFCOMDP, yfadd, Px, [13]uint8{0xdc, 03, 0xd8, 03, 0xdc, 03}}, /* botch */
+ Optab{AFCOMDPP, ycompp, Px, [13]uint8{0xde, 03}},
+ Optab{AFCOMF, yfmvx, Px, [13]uint8{0xd8, 02}},
+ Optab{AFCOMFP, yfmvx, Px, [13]uint8{0xd8, 03}},
+ Optab{AFCOMI, yfmvx, Px, [13]uint8{0xdb, 06}},
+ Optab{AFCOMIP, yfmvx, Px, [13]uint8{0xdf, 06}},
+ Optab{AFCOML, yfmvx, Px, [13]uint8{0xda, 02}},
+ Optab{AFCOMLP, yfmvx, Px, [13]uint8{0xda, 03}},
+ Optab{AFCOMW, yfmvx, Px, [13]uint8{0xde, 02}},
+ Optab{AFCOMWP, yfmvx, Px, [13]uint8{0xde, 03}},
+ Optab{AFUCOM, ycompp, Px, [13]uint8{0xdd, 04}},
+ Optab{AFUCOMI, ycompp, Px, [13]uint8{0xdb, 05}},
+ Optab{AFUCOMIP, ycompp, Px, [13]uint8{0xdf, 05}},
+ Optab{AFUCOMP, ycompp, Px, [13]uint8{0xdd, 05}},
+ Optab{AFUCOMPP, ycompp, Px, [13]uint8{0xda, 13}},
+ Optab{AFADDDP, yfaddp, Px, [13]uint8{0xde, 00}},
+ Optab{AFADDW, yfmvx, Px, [13]uint8{0xde, 00}},
+ Optab{AFADDL, yfmvx, Px, [13]uint8{0xda, 00}},
+ Optab{AFADDF, yfmvx, Px, [13]uint8{0xd8, 00}},
+ Optab{AFADDD, yfadd, Px, [13]uint8{0xdc, 00, 0xd8, 00, 0xdc, 00}},
+ Optab{AFMULDP, yfaddp, Px, [13]uint8{0xde, 01}},
+ Optab{AFMULW, yfmvx, Px, [13]uint8{0xde, 01}},
+ Optab{AFMULL, yfmvx, Px, [13]uint8{0xda, 01}},
+ Optab{AFMULF, yfmvx, Px, [13]uint8{0xd8, 01}},
+ Optab{AFMULD, yfadd, Px, [13]uint8{0xdc, 01, 0xd8, 01, 0xdc, 01}},
+ Optab{AFSUBDP, yfaddp, Px, [13]uint8{0xde, 05}},
+ Optab{AFSUBW, yfmvx, Px, [13]uint8{0xde, 04}},
+ Optab{AFSUBL, yfmvx, Px, [13]uint8{0xda, 04}},
+ Optab{AFSUBF, yfmvx, Px, [13]uint8{0xd8, 04}},
+ Optab{AFSUBD, yfadd, Px, [13]uint8{0xdc, 04, 0xd8, 04, 0xdc, 05}},
+ Optab{AFSUBRDP, yfaddp, Px, [13]uint8{0xde, 04}},
+ Optab{AFSUBRW, yfmvx, Px, [13]uint8{0xde, 05}},
+ Optab{AFSUBRL, yfmvx, Px, [13]uint8{0xda, 05}},
+ Optab{AFSUBRF, yfmvx, Px, [13]uint8{0xd8, 05}},
+ Optab{AFSUBRD, yfadd, Px, [13]uint8{0xdc, 05, 0xd8, 05, 0xdc, 04}},
+ Optab{AFDIVDP, yfaddp, Px, [13]uint8{0xde, 07}},
+ Optab{AFDIVW, yfmvx, Px, [13]uint8{0xde, 06}},
+ Optab{AFDIVL, yfmvx, Px, [13]uint8{0xda, 06}},
+ Optab{AFDIVF, yfmvx, Px, [13]uint8{0xd8, 06}},
+ Optab{AFDIVD, yfadd, Px, [13]uint8{0xdc, 06, 0xd8, 06, 0xdc, 07}},
+ Optab{AFDIVRDP, yfaddp, Px, [13]uint8{0xde, 06}},
+ Optab{AFDIVRW, yfmvx, Px, [13]uint8{0xde, 07}},
+ Optab{AFDIVRL, yfmvx, Px, [13]uint8{0xda, 07}},
+ Optab{AFDIVRF, yfmvx, Px, [13]uint8{0xd8, 07}},
+ Optab{AFDIVRD, yfadd, Px, [13]uint8{0xdc, 07, 0xd8, 07, 0xdc, 06}},
+ Optab{AFXCHD, yfxch, Px, [13]uint8{0xd9, 01, 0xd9, 01}},
+ Optab{AFFREE, nil, 0, [13]uint8{}},
+ Optab{AFLDCW, ystcw, Px, [13]uint8{0xd9, 05, 0xd9, 05}},
+ Optab{AFLDENV, ystcw, Px, [13]uint8{0xd9, 04, 0xd9, 04}},
+ Optab{AFRSTOR, ysvrs, Px, [13]uint8{0xdd, 04, 0xdd, 04}},
+ Optab{AFSAVE, ysvrs, Px, [13]uint8{0xdd, 06, 0xdd, 06}},
+ Optab{AFSTCW, ystcw, Px, [13]uint8{0xd9, 07, 0xd9, 07}},
+ Optab{AFSTENV, ystcw, Px, [13]uint8{0xd9, 06, 0xd9, 06}},
+ Optab{AFSTSW, ystsw, Px, [13]uint8{0xdd, 07, 0xdf, 0xe0}},
+ Optab{AF2XM1, ynone, Px, [13]uint8{0xd9, 0xf0}},
+ Optab{AFABS, ynone, Px, [13]uint8{0xd9, 0xe1}},
+ Optab{AFCHS, ynone, Px, [13]uint8{0xd9, 0xe0}},
+ Optab{AFCLEX, ynone, Px, [13]uint8{0xdb, 0xe2}},
+ Optab{AFCOS, ynone, Px, [13]uint8{0xd9, 0xff}},
+ Optab{AFDECSTP, ynone, Px, [13]uint8{0xd9, 0xf6}},
+ Optab{AFINCSTP, ynone, Px, [13]uint8{0xd9, 0xf7}},
+ Optab{AFINIT, ynone, Px, [13]uint8{0xdb, 0xe3}},
+ Optab{AFLD1, ynone, Px, [13]uint8{0xd9, 0xe8}},
+ Optab{AFLDL2E, ynone, Px, [13]uint8{0xd9, 0xea}},
+ Optab{AFLDL2T, ynone, Px, [13]uint8{0xd9, 0xe9}},
+ Optab{AFLDLG2, ynone, Px, [13]uint8{0xd9, 0xec}},
+ Optab{AFLDLN2, ynone, Px, [13]uint8{0xd9, 0xed}},
+ Optab{AFLDPI, ynone, Px, [13]uint8{0xd9, 0xeb}},
+ Optab{AFLDZ, ynone, Px, [13]uint8{0xd9, 0xee}},
+ Optab{AFNOP, ynone, Px, [13]uint8{0xd9, 0xd0}},
+ Optab{AFPATAN, ynone, Px, [13]uint8{0xd9, 0xf3}},
+ Optab{AFPREM, ynone, Px, [13]uint8{0xd9, 0xf8}},
+ Optab{AFPREM1, ynone, Px, [13]uint8{0xd9, 0xf5}},
+ Optab{AFPTAN, ynone, Px, [13]uint8{0xd9, 0xf2}},
+ Optab{AFRNDINT, ynone, Px, [13]uint8{0xd9, 0xfc}},
+ Optab{AFSCALE, ynone, Px, [13]uint8{0xd9, 0xfd}},
+ Optab{AFSIN, ynone, Px, [13]uint8{0xd9, 0xfe}},
+ Optab{AFSINCOS, ynone, Px, [13]uint8{0xd9, 0xfb}},
+ Optab{AFSQRT, ynone, Px, [13]uint8{0xd9, 0xfa}},
+ Optab{AFTST, ynone, Px, [13]uint8{0xd9, 0xe4}},
+ Optab{AFXAM, ynone, Px, [13]uint8{0xd9, 0xe5}},
+ Optab{AFXTRACT, ynone, Px, [13]uint8{0xd9, 0xf4}},
+ Optab{AFYL2X, ynone, Px, [13]uint8{0xd9, 0xf1}},
+ Optab{AFYL2XP1, ynone, Px, [13]uint8{0xd9, 0xf9}},
+ Optab{AEND, nil, 0, [13]uint8{}},
+ Optab{ADYNT_, nil, 0, [13]uint8{}},
+ Optab{AINIT_, nil, 0, [13]uint8{}},
+ Optab{ASIGNAME, nil, 0, [13]uint8{}},
+ Optab{ACMPXCHGB, yrb_mb, Pm, [13]uint8{0xb0}},
+ Optab{ACMPXCHGL, yrl_ml, Pm, [13]uint8{0xb1}},
+ Optab{ACMPXCHGW, yrl_ml, Pm, [13]uint8{0xb1}},
+ Optab{ACMPXCHG8B, yscond, Pm, [13]uint8{0xc7, 01}},
+ Optab{ACPUID, ynone, Pm, [13]uint8{0xa2}},
+ Optab{ARDTSC, ynone, Pm, [13]uint8{0x31}},
+ Optab{AXADDB, yrb_mb, Pb, [13]uint8{0x0f, 0xc0}},
+ Optab{AXADDL, yrl_ml, Pm, [13]uint8{0xc1}},
+ Optab{AXADDW, yrl_ml, Pe, [13]uint8{0x0f, 0xc1}},
+ Optab{ACMOVLCC, yml_rl, Pm, [13]uint8{0x43}},
+ Optab{ACMOVLCS, yml_rl, Pm, [13]uint8{0x42}},
+ Optab{ACMOVLEQ, yml_rl, Pm, [13]uint8{0x44}},
+ Optab{ACMOVLGE, yml_rl, Pm, [13]uint8{0x4d}},
+ Optab{ACMOVLGT, yml_rl, Pm, [13]uint8{0x4f}},
+ Optab{ACMOVLHI, yml_rl, Pm, [13]uint8{0x47}},
+ Optab{ACMOVLLE, yml_rl, Pm, [13]uint8{0x4e}},
+ Optab{ACMOVLLS, yml_rl, Pm, [13]uint8{0x46}},
+ Optab{ACMOVLLT, yml_rl, Pm, [13]uint8{0x4c}},
+ Optab{ACMOVLMI, yml_rl, Pm, [13]uint8{0x48}},
+ Optab{ACMOVLNE, yml_rl, Pm, [13]uint8{0x45}},
+ Optab{ACMOVLOC, yml_rl, Pm, [13]uint8{0x41}},
+ Optab{ACMOVLOS, yml_rl, Pm, [13]uint8{0x40}},
+ Optab{ACMOVLPC, yml_rl, Pm, [13]uint8{0x4b}},
+ Optab{ACMOVLPL, yml_rl, Pm, [13]uint8{0x49}},
+ Optab{ACMOVLPS, yml_rl, Pm, [13]uint8{0x4a}},
+ Optab{ACMOVWCC, yml_rl, Pq, [13]uint8{0x43}},
+ Optab{ACMOVWCS, yml_rl, Pq, [13]uint8{0x42}},
+ Optab{ACMOVWEQ, yml_rl, Pq, [13]uint8{0x44}},
+ Optab{ACMOVWGE, yml_rl, Pq, [13]uint8{0x4d}},
+ Optab{ACMOVWGT, yml_rl, Pq, [13]uint8{0x4f}},
+ Optab{ACMOVWHI, yml_rl, Pq, [13]uint8{0x47}},
+ Optab{ACMOVWLE, yml_rl, Pq, [13]uint8{0x4e}},
+ Optab{ACMOVWLS, yml_rl, Pq, [13]uint8{0x46}},
+ Optab{ACMOVWLT, yml_rl, Pq, [13]uint8{0x4c}},
+ Optab{ACMOVWMI, yml_rl, Pq, [13]uint8{0x48}},
+ Optab{ACMOVWNE, yml_rl, Pq, [13]uint8{0x45}},
+ Optab{ACMOVWOC, yml_rl, Pq, [13]uint8{0x41}},
+ Optab{ACMOVWOS, yml_rl, Pq, [13]uint8{0x40}},
+ Optab{ACMOVWPC, yml_rl, Pq, [13]uint8{0x4b}},
+ Optab{ACMOVWPL, yml_rl, Pq, [13]uint8{0x49}},
+ Optab{ACMOVWPS, yml_rl, Pq, [13]uint8{0x4a}},
+ Optab{AFCMOVCC, yfcmv, Px, [13]uint8{0xdb, 00}},
+ Optab{AFCMOVCS, yfcmv, Px, [13]uint8{0xda, 00}},
+ Optab{AFCMOVEQ, yfcmv, Px, [13]uint8{0xda, 01}},
+ Optab{AFCMOVHI, yfcmv, Px, [13]uint8{0xdb, 02}},
+ Optab{AFCMOVLS, yfcmv, Px, [13]uint8{0xda, 02}},
+ Optab{AFCMOVNE, yfcmv, Px, [13]uint8{0xdb, 01}},
+ Optab{AFCMOVNU, yfcmv, Px, [13]uint8{0xdb, 03}},
+ Optab{AFCMOVUN, yfcmv, Px, [13]uint8{0xda, 03}},
+ Optab{ALFENCE, ynone, Pm, [13]uint8{0xae, 0xe8}},
+ Optab{AMFENCE, ynone, Pm, [13]uint8{0xae, 0xf0}},
+ Optab{ASFENCE, ynone, Pm, [13]uint8{0xae, 0xf8}},
+ Optab{AEMMS, ynone, Pm, [13]uint8{0x77}},
+ Optab{APREFETCHT0, yprefetch, Pm, [13]uint8{0x18, 01}},
+ Optab{APREFETCHT1, yprefetch, Pm, [13]uint8{0x18, 02}},
+ Optab{APREFETCHT2, yprefetch, Pm, [13]uint8{0x18, 03}},
+ Optab{APREFETCHNTA, yprefetch, Pm, [13]uint8{0x18, 00}},
+ Optab{ABSWAPL, ybswap, Pm, [13]uint8{0xc8}},
+ Optab{AUNDEF, ynone, Px, [13]uint8{0x0f, 0x0b}},
+ Optab{AADDPD, yxm, Pq, [13]uint8{0x58}},
+ Optab{AADDPS, yxm, Pm, [13]uint8{0x58}},
+ Optab{AADDSD, yxm, Pf2, [13]uint8{0x58}},
+ Optab{AADDSS, yxm, Pf3, [13]uint8{0x58}},
+ Optab{AANDNPD, yxm, Pq, [13]uint8{0x55}},
+ Optab{AANDNPS, yxm, Pm, [13]uint8{0x55}},
+ Optab{AANDPD, yxm, Pq, [13]uint8{0x54}},
+ Optab{AANDPS, yxm, Pq, [13]uint8{0x54}},
+ Optab{ACMPPD, yxcmpi, Px, [13]uint8{Pe, 0xc2}},
+ Optab{ACMPPS, yxcmpi, Pm, [13]uint8{0xc2, 0}},
+ Optab{ACMPSD, yxcmpi, Px, [13]uint8{Pf2, 0xc2}},
+ Optab{ACMPSS, yxcmpi, Px, [13]uint8{Pf3, 0xc2}},
+ Optab{ACOMISD, yxcmp, Pe, [13]uint8{0x2f}},
+ Optab{ACOMISS, yxcmp, Pm, [13]uint8{0x2f}},
+ Optab{ACVTPL2PD, yxcvm2, Px, [13]uint8{Pf3, 0xe6, Pe, 0x2a}},
+ Optab{ACVTPL2PS, yxcvm2, Pm, [13]uint8{0x5b, 0, 0x2a, 0}},
+ Optab{ACVTPD2PL, yxcvm1, Px, [13]uint8{Pf2, 0xe6, Pe, 0x2d}},
+ Optab{ACVTPD2PS, yxm, Pe, [13]uint8{0x5a}},
+ Optab{ACVTPS2PL, yxcvm1, Px, [13]uint8{Pe, 0x5b, Pm, 0x2d}},
+ Optab{ACVTPS2PD, yxm, Pm, [13]uint8{0x5a}},
+ Optab{ACVTSD2SL, yxcvfl, Pf2, [13]uint8{0x2d}},
+ Optab{ACVTSD2SS, yxm, Pf2, [13]uint8{0x5a}},
+ Optab{ACVTSL2SD, yxcvlf, Pf2, [13]uint8{0x2a}},
+ Optab{ACVTSL2SS, yxcvlf, Pf3, [13]uint8{0x2a}},
+ Optab{ACVTSS2SD, yxm, Pf3, [13]uint8{0x5a}},
+ Optab{ACVTSS2SL, yxcvfl, Pf3, [13]uint8{0x2d}},
+ Optab{ACVTTPD2PL, yxcvm1, Px, [13]uint8{Pe, 0xe6, Pe, 0x2c}},
+ Optab{ACVTTPS2PL, yxcvm1, Px, [13]uint8{Pf3, 0x5b, Pm, 0x2c}},
+ Optab{ACVTTSD2SL, yxcvfl, Pf2, [13]uint8{0x2c}},
+ Optab{ACVTTSS2SL, yxcvfl, Pf3, [13]uint8{0x2c}},
+ Optab{ADIVPD, yxm, Pe, [13]uint8{0x5e}},
+ Optab{ADIVPS, yxm, Pm, [13]uint8{0x5e}},
+ Optab{ADIVSD, yxm, Pf2, [13]uint8{0x5e}},
+ Optab{ADIVSS, yxm, Pf3, [13]uint8{0x5e}},
+ Optab{AMASKMOVOU, yxr, Pe, [13]uint8{0xf7}},
+ Optab{AMAXPD, yxm, Pe, [13]uint8{0x5f}},
+ Optab{AMAXPS, yxm, Pm, [13]uint8{0x5f}},
+ Optab{AMAXSD, yxm, Pf2, [13]uint8{0x5f}},
+ Optab{AMAXSS, yxm, Pf3, [13]uint8{0x5f}},
+ Optab{AMINPD, yxm, Pe, [13]uint8{0x5d}},
+ Optab{AMINPS, yxm, Pm, [13]uint8{0x5d}},
+ Optab{AMINSD, yxm, Pf2, [13]uint8{0x5d}},
+ Optab{AMINSS, yxm, Pf3, [13]uint8{0x5d}},
+ Optab{AMOVAPD, yxmov, Pe, [13]uint8{0x28, 0x29}},
+ Optab{AMOVAPS, yxmov, Pm, [13]uint8{0x28, 0x29}},
+ Optab{AMOVO, yxmov, Pe, [13]uint8{0x6f, 0x7f}},
+ Optab{AMOVOU, yxmov, Pf3, [13]uint8{0x6f, 0x7f}},
+ Optab{AMOVHLPS, yxr, Pm, [13]uint8{0x12}},
+ Optab{AMOVHPD, yxmov, Pe, [13]uint8{0x16, 0x17}},
+ Optab{AMOVHPS, yxmov, Pm, [13]uint8{0x16, 0x17}},
+ Optab{AMOVLHPS, yxr, Pm, [13]uint8{0x16}},
+ Optab{AMOVLPD, yxmov, Pe, [13]uint8{0x12, 0x13}},
+ Optab{AMOVLPS, yxmov, Pm, [13]uint8{0x12, 0x13}},
+ Optab{AMOVMSKPD, yxrrl, Pq, [13]uint8{0x50}},
+ Optab{AMOVMSKPS, yxrrl, Pm, [13]uint8{0x50}},
+ Optab{AMOVNTO, yxr_ml, Pe, [13]uint8{0xe7}},
+ Optab{AMOVNTPD, yxr_ml, Pe, [13]uint8{0x2b}},
+ Optab{AMOVNTPS, yxr_ml, Pm, [13]uint8{0x2b}},
+ Optab{AMOVSD, yxmov, Pf2, [13]uint8{0x10, 0x11}},
+ Optab{AMOVSS, yxmov, Pf3, [13]uint8{0x10, 0x11}},
+ Optab{AMOVUPD, yxmov, Pe, [13]uint8{0x10, 0x11}},
+ Optab{AMOVUPS, yxmov, Pm, [13]uint8{0x10, 0x11}},
+ Optab{AMULPD, yxm, Pe, [13]uint8{0x59}},
+ Optab{AMULPS, yxm, Ym, [13]uint8{0x59}},
+ Optab{AMULSD, yxm, Pf2, [13]uint8{0x59}},
+ Optab{AMULSS, yxm, Pf3, [13]uint8{0x59}},
+ Optab{AORPD, yxm, Pq, [13]uint8{0x56}},
+ Optab{AORPS, yxm, Pm, [13]uint8{0x56}},
+ Optab{APADDQ, yxm, Pe, [13]uint8{0xd4}},
+ Optab{APAND, yxm, Pe, [13]uint8{0xdb}},
+ Optab{APCMPEQB, yxmq, Pe, [13]uint8{0x74}},
+ Optab{APMAXSW, yxm, Pe, [13]uint8{0xee}},
+ Optab{APMAXUB, yxm, Pe, [13]uint8{0xde}},
+ Optab{APMINSW, yxm, Pe, [13]uint8{0xea}},
+ Optab{APMINUB, yxm, Pe, [13]uint8{0xda}},
+ Optab{APMOVMSKB, ymskb, Px, [13]uint8{Pe, 0xd7, 0xd7}},
+ Optab{APSADBW, yxm, Pq, [13]uint8{0xf6}},
+ Optab{APSUBB, yxm, Pe, [13]uint8{0xf8}},
+ Optab{APSUBL, yxm, Pe, [13]uint8{0xfa}},
+ Optab{APSUBQ, yxm, Pe, [13]uint8{0xfb}},
+ Optab{APSUBSB, yxm, Pe, [13]uint8{0xe8}},
+ Optab{APSUBSW, yxm, Pe, [13]uint8{0xe9}},
+ Optab{APSUBUSB, yxm, Pe, [13]uint8{0xd8}},
+ Optab{APSUBUSW, yxm, Pe, [13]uint8{0xd9}},
+ Optab{APSUBW, yxm, Pe, [13]uint8{0xf9}},
+ Optab{APUNPCKHQDQ, yxm, Pe, [13]uint8{0x6d}},
+ Optab{APUNPCKLQDQ, yxm, Pe, [13]uint8{0x6c}},
+ Optab{APXOR, yxm, Pe, [13]uint8{0xef}},
+ Optab{ARCPPS, yxm, Pm, [13]uint8{0x53}},
+ Optab{ARCPSS, yxm, Pf3, [13]uint8{0x53}},
+ Optab{ARSQRTPS, yxm, Pm, [13]uint8{0x52}},
+ Optab{ARSQRTSS, yxm, Pf3, [13]uint8{0x52}},
+ Optab{ASQRTPD, yxm, Pe, [13]uint8{0x51}},
+ Optab{ASQRTPS, yxm, Pm, [13]uint8{0x51}},
+ Optab{ASQRTSD, yxm, Pf2, [13]uint8{0x51}},
+ Optab{ASQRTSS, yxm, Pf3, [13]uint8{0x51}},
+ Optab{ASUBPD, yxm, Pe, [13]uint8{0x5c}},
+ Optab{ASUBPS, yxm, Pm, [13]uint8{0x5c}},
+ Optab{ASUBSD, yxm, Pf2, [13]uint8{0x5c}},
+ Optab{ASUBSS, yxm, Pf3, [13]uint8{0x5c}},
+ Optab{AUCOMISD, yxcmp, Pe, [13]uint8{0x2e}},
+ Optab{AUCOMISS, yxcmp, Pm, [13]uint8{0x2e}},
+ Optab{AUNPCKHPD, yxm, Pe, [13]uint8{0x15}},
+ Optab{AUNPCKHPS, yxm, Pm, [13]uint8{0x15}},
+ Optab{AUNPCKLPD, yxm, Pe, [13]uint8{0x14}},
+ Optab{AUNPCKLPS, yxm, Pm, [13]uint8{0x14}},
+ Optab{AXORPD, yxm, Pe, [13]uint8{0x57}},
+ Optab{AXORPS, yxm, Pm, [13]uint8{0x57}},
+ Optab{APSHUFHW, yxshuf, Pf3, [13]uint8{0x70, 00}},
+ Optab{APSHUFL, yxshuf, Pq, [13]uint8{0x70, 00}},
+ Optab{APSHUFLW, yxshuf, Pf2, [13]uint8{0x70, 00}},
+ Optab{AAESENC, yaes, Pq, [13]uint8{0x38, 0xdc, 0}},
+ Optab{APINSRD, yinsrd, Pq, [13]uint8{0x3a, 0x22, 00}},
+ Optab{APSHUFB, ymshufb, Pq, [13]uint8{0x38, 0x00}},
+ Optab{AUSEFIELD, ynop, Px, [13]uint8{0, 0}},
+ Optab{ATYPE, nil, 0, [13]uint8{}},
+ Optab{AFUNCDATA, yfuncdata, Px, [13]uint8{0, 0}},
+ Optab{APCDATA, ypcdata, Px, [13]uint8{0, 0}},
+ Optab{ACHECKNIL, nil, 0, [13]uint8{}},
+ Optab{AVARDEF, nil, 0, [13]uint8{}},
+ Optab{AVARKILL, nil, 0, [13]uint8{}},
+ Optab{ADUFFCOPY, yduff, Px, [13]uint8{0xe8}},
+ Optab{ADUFFZERO, yduff, Px, [13]uint8{0xe8}},
+ Optab{0, nil, 0, [13]uint8{}},
+}
+
+// single-instruction no-ops of various lengths.
+// constructed by hand and disassembled with gdb to verify.
+// see http://www.agner.org/optimize/optimizing_assembly.pdf for discussion.
+var nop = [][16]uint8{
+ [16]uint8{0x90},
+ [16]uint8{0x66, 0x90},
+ [16]uint8{0x0F, 0x1F, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x40, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x44, 0x00, 0x00},
+ [16]uint8{0x66, 0x0F, 0x1F, 0x44, 0x00, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+ [16]uint8{0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+}
+
+// Native Client rejects the repeated 0x66 prefix.
+// {0x66, 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+func fillnop(p []byte, n int) {
+
+ var m int
+
+ for n > 0 {
+ m = n
+ if m > len(nop) {
+ m = len(nop)
+ }
+ copy(p[:m], nop[m-1][:m])
+ p = p[m:]
+ n -= m
+ }
+}
+
+func naclpad(ctxt *obj.Link, s *obj.LSym, c int32, pad int32) int32 {
+ obj.Symgrow(ctxt, s, int64(c)+int64(pad))
+ fillnop(s.P[c:], int(pad))
+ return c + pad
+}
+
+func span8(ctxt *obj.Link, s *obj.LSym) {
+ var p *obj.Prog
+ var q *obj.Prog
+ var c int32
+ var v int32
+ var loop int32
+ var bp []byte
+ var n int
+ var m int
+ var i int
+
+ ctxt.Cursym = s
+
+ if s.Text == nil || s.Text.Link == nil {
+ return
+ }
+
+ if ycover[0] == 0 {
+ instinit()
+ }
+
+ for p = s.Text; p != nil; p = p.Link {
+ n = 0
+ if p.To.Type_ == D_BRANCH {
+ if p.Pcond == nil {
+ p.Pcond = p
+ }
+ }
+ q = p.Pcond
+ if q != nil {
+ if q.Back != 2 {
+ n = 1
+ }
+ }
+ p.Back = uint8(n)
+ if p.As == AADJSP {
+ p.To.Type_ = D_SP
+ v = int32(-p.From.Offset)
+ p.From.Offset = int64(v)
+ p.As = AADDL
+ if v < 0 {
+ p.As = ASUBL
+ v = -v
+ p.From.Offset = int64(v)
+ }
+
+ if v == 0 {
+ p.As = ANOP
+ }
+ }
+ }
+
+ for p = s.Text; p != nil; p = p.Link {
+ p.Back = 2 // use short branches first time through
+ q = p.Pcond
+ if q != nil && (q.Back&2 != 0) {
+ p.Back |= 1 // backward jump
+ }
+
+ if p.As == AADJSP {
+
+ p.To.Type_ = D_SP
+ v = int32(-p.From.Offset)
+ p.From.Offset = int64(v)
+ p.As = AADDL
+ if v < 0 {
+ p.As = ASUBL
+ v = -v
+ p.From.Offset = int64(v)
+ }
+
+ if v == 0 {
+ p.As = ANOP
+ }
+ }
+ }
+
+ n = 0
+ for {
+ loop = 0
+ for i = 0; i < len(s.R); i++ {
+ s.R[i] = obj.Reloc{}
+ }
+ s.R = s.R[:0]
+ s.P = s.P[:0]
+ c = 0
+ for p = s.Text; p != nil; p = p.Link {
+ if ctxt.Headtype == obj.Hnacl && p.Isize > 0 {
+ var deferreturn *obj.LSym
+
+ if deferreturn == nil {
+ deferreturn = obj.Linklookup(ctxt, "runtime.deferreturn", 0)
+ }
+
+ // pad everything to avoid crossing 32-byte boundary
+ if c>>5 != (c+int32(p.Isize)-1)>>5 {
+
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+
+ // pad call deferreturn to start at 32-byte boundary
+ // so that subtracting 5 in jmpdefer will jump back
+ // to that boundary and rerun the call.
+ if p.As == ACALL && p.To.Sym == deferreturn {
+
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+
+ // pad call to end at 32-byte boundary
+ if p.As == ACALL {
+
+ c = naclpad(ctxt, s, c, -(c+int32(p.Isize))&31)
+ }
+
+ // the linker treats REP and STOSQ as different instructions
+ // but in fact the REP is a prefix on the STOSQ.
+ // make sure REP has room for 2 more bytes, so that
+ // padding will not be inserted before the next instruction.
+ if p.As == AREP && c>>5 != (c+3-1)>>5 {
+
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+
+ // same for LOCK.
+ // various instructions follow; the longest is 4 bytes.
+ // give ourselves 8 bytes so as to avoid surprises.
+ if p.As == ALOCK && c>>5 != (c+8-1)>>5 {
+
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+ }
+
+ p.Pc = int64(c)
+
+ // process forward jumps to p
+ for q = p.Comefrom; q != nil; q = q.Forwd {
+
+ v = int32(p.Pc - (q.Pc + int64(q.Mark)))
+ if q.Back&2 != 0 { // short
+ if v > 127 {
+ loop++
+ q.Back ^= 2
+ }
+
+ if q.As == AJCXZW {
+ s.P[q.Pc+2] = byte(v)
+ } else {
+
+ s.P[q.Pc+1] = byte(v)
+ }
+ } else {
+
+ bp = s.P[q.Pc+int64(q.Mark)-4:]
+ bp[0] = byte(v)
+ bp = bp[1:]
+ bp[0] = byte(v >> 8)
+ bp = bp[1:]
+ bp[0] = byte(v >> 16)
+ bp = bp[1:]
+ bp[0] = byte(v >> 24)
+ }
+ }
+
+ p.Comefrom = nil
+
+ p.Pc = int64(c)
+ asmins(ctxt, p)
+ m = -cap(ctxt.Andptr) + cap(ctxt.And[:])
+ if int(p.Isize) != m {
+ p.Isize = uint8(m)
+ loop++
+ }
+
+ obj.Symgrow(ctxt, s, p.Pc+int64(m))
+ copy(s.P[p.Pc:][:m], ctxt.And[:m])
+ p.Mark = uint16(m)
+ c += int32(m)
+ }
+
+ n++
+ if n > 20 {
+ ctxt.Diag("span must be looping")
+ log.Fatalf("bad code")
+ }
+ if !(loop != 0) {
+ break
+ }
+ }
+
+ if ctxt.Headtype == obj.Hnacl {
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+ c += -c & (FuncAlign - 1)
+ s.Size = int64(c)
+
+ if false { /* debug['a'] > 1 */
+ fmt.Printf("span1 %s %d (%d tries)\n %.6x", s.Name, s.Size, n, 0)
+ for i = 0; i < len(s.P); i++ {
+ fmt.Printf(" %.2x", s.P[i])
+ if i%16 == 15 {
+ fmt.Printf("\n %.6x", uint(i+1))
+ }
+ }
+
+ if i%16 != 0 {
+ fmt.Printf("\n")
+ }
+
+ for i = 0; i < len(s.R); i++ {
+ var r *obj.Reloc
+
+ r = &s.R[i]
+ fmt.Printf(" rel %#.4x/%d %s%+d\n", uint32(r.Off), r.Siz, r.Sym.Name, r.Add)
+ }
+ }
+}
+
+func instinit() {
+ var i int
+
+ for i = 1; optab[i].as != 0; i++ {
+ if i != int(optab[i].as) {
+ log.Fatalf("phase error in optab: at %v found %v", Aconv(i), Aconv(int(optab[i].as)))
+ }
+ }
+
+ for i = 0; i < Ymax; i++ {
+ ycover[i*Ymax+i] = 1
+ }
+
+ ycover[Yi0*Ymax+Yi8] = 1
+ ycover[Yi1*Ymax+Yi8] = 1
+
+ ycover[Yi0*Ymax+Yi32] = 1
+ ycover[Yi1*Ymax+Yi32] = 1
+ ycover[Yi8*Ymax+Yi32] = 1
+
+ ycover[Yal*Ymax+Yrb] = 1
+ ycover[Ycl*Ymax+Yrb] = 1
+ ycover[Yax*Ymax+Yrb] = 1
+ ycover[Ycx*Ymax+Yrb] = 1
+ ycover[Yrx*Ymax+Yrb] = 1
+
+ ycover[Yax*Ymax+Yrx] = 1
+ ycover[Ycx*Ymax+Yrx] = 1
+
+ ycover[Yax*Ymax+Yrl] = 1
+ ycover[Ycx*Ymax+Yrl] = 1
+ ycover[Yrx*Ymax+Yrl] = 1
+
+ ycover[Yf0*Ymax+Yrf] = 1
+
+ ycover[Yal*Ymax+Ymb] = 1
+ ycover[Ycl*Ymax+Ymb] = 1
+ ycover[Yax*Ymax+Ymb] = 1
+ ycover[Ycx*Ymax+Ymb] = 1
+ ycover[Yrx*Ymax+Ymb] = 1
+ ycover[Yrb*Ymax+Ymb] = 1
+ ycover[Ym*Ymax+Ymb] = 1
+
+ ycover[Yax*Ymax+Yml] = 1
+ ycover[Ycx*Ymax+Yml] = 1
+ ycover[Yrx*Ymax+Yml] = 1
+ ycover[Yrl*Ymax+Yml] = 1
+ ycover[Ym*Ymax+Yml] = 1
+
+ ycover[Yax*Ymax+Ymm] = 1
+ ycover[Ycx*Ymax+Ymm] = 1
+ ycover[Yrx*Ymax+Ymm] = 1
+ ycover[Yrl*Ymax+Ymm] = 1
+ ycover[Ym*Ymax+Ymm] = 1
+ ycover[Ymr*Ymax+Ymm] = 1
+
+ ycover[Ym*Ymax+Yxm] = 1
+ ycover[Yxr*Ymax+Yxm] = 1
+
+ for i = 0; i < D_NONE; i++ {
+ reg[i] = -1
+ if i >= D_AL && i <= D_BH {
+ reg[i] = (i - D_AL) & 7
+ }
+ if i >= D_AX && i <= D_DI {
+ reg[i] = (i - D_AX) & 7
+ }
+ if i >= D_F0 && i <= D_F0+7 {
+ reg[i] = (i - D_F0) & 7
+ }
+ if i >= D_X0 && i <= D_X0+7 {
+ reg[i] = (i - D_X0) & 7
+ }
+ }
+}
+
+func prefixof(ctxt *obj.Link, a *obj.Addr) int {
+ switch a.Type_ {
+ case D_INDIR + D_CS:
+ return 0x2e
+
+ case D_INDIR + D_DS:
+ return 0x3e
+
+ case D_INDIR + D_ES:
+ return 0x26
+
+ case D_INDIR + D_FS:
+ return 0x64
+
+ case D_INDIR + D_GS:
+ return 0x65
+
+ // NOTE: Systems listed here should be only systems that
+ // support direct TLS references like 8(TLS) implemented as
+ // direct references from FS or GS. Systems that require
+ // the initial-exec model, where you load the TLS base into
+ // a register and then index from that register, do not reach
+ // this code and should not be listed.
+ case D_INDIR + D_TLS:
+ switch ctxt.Headtype {
+
+ default:
+ log.Fatalf("unknown TLS base register for %s", obj.Headstr(ctxt.Headtype))
+
+ case obj.Hdarwin,
+ obj.Hdragonfly,
+ obj.Hfreebsd,
+ obj.Hnetbsd,
+ obj.Hopenbsd:
+ return 0x65 // GS
+ }
+ }
+
+ return 0
+}
+
+func oclass(a *obj.Addr) int {
+ var v int32
+
+ if (a.Type_ >= D_INDIR && a.Type_ < 2*D_INDIR) || a.Index != D_NONE {
+ if a.Index != D_NONE && a.Scale == 0 {
+ if a.Type_ == D_ADDR {
+ switch a.Index {
+ case D_EXTERN,
+ D_STATIC:
+ return Yi32
+
+ case D_AUTO,
+ D_PARAM:
+ return Yiauto
+ }
+
+ return Yxxx
+ }
+
+ //if(a->type == D_INDIR+D_ADDR)
+ // print("*Ycol\n");
+ return Ycol
+ }
+
+ return Ym
+ }
+
+ switch a.Type_ {
+ case D_AL:
+ return Yal
+
+ case D_AX:
+ return Yax
+
+ case D_CL,
+ D_DL,
+ D_BL,
+ D_AH,
+ D_CH,
+ D_DH,
+ D_BH:
+ return Yrb
+
+ case D_CX:
+ return Ycx
+
+ case D_DX,
+ D_BX:
+ return Yrx
+
+ case D_SP,
+ D_BP,
+ D_SI,
+ D_DI:
+ return Yrl
+
+ case D_F0 + 0:
+ return Yf0
+
+ case D_F0 + 1,
+ D_F0 + 2,
+ D_F0 + 3,
+ D_F0 + 4,
+ D_F0 + 5,
+ D_F0 + 6,
+ D_F0 + 7:
+ return Yrf
+
+ case D_X0 + 0,
+ D_X0 + 1,
+ D_X0 + 2,
+ D_X0 + 3,
+ D_X0 + 4,
+ D_X0 + 5,
+ D_X0 + 6,
+ D_X0 + 7:
+ return Yxr
+
+ case D_NONE:
+ return Ynone
+
+ case D_CS:
+ return Ycs
+ case D_SS:
+ return Yss
+ case D_DS:
+ return Yds
+ case D_ES:
+ return Yes
+ case D_FS:
+ return Yfs
+ case D_GS:
+ return Ygs
+ case D_TLS:
+ return Ytls
+
+ case D_GDTR:
+ return Ygdtr
+ case D_IDTR:
+ return Yidtr
+ case D_LDTR:
+ return Yldtr
+ case D_MSW:
+ return Ymsw
+ case D_TASK:
+ return Ytask
+
+ case D_CR + 0:
+ return Ycr0
+ case D_CR + 1:
+ return Ycr1
+ case D_CR + 2:
+ return Ycr2
+ case D_CR + 3:
+ return Ycr3
+ case D_CR + 4:
+ return Ycr4
+ case D_CR + 5:
+ return Ycr5
+ case D_CR + 6:
+ return Ycr6
+ case D_CR + 7:
+ return Ycr7
+
+ case D_DR + 0:
+ return Ydr0
+ case D_DR + 1:
+ return Ydr1
+ case D_DR + 2:
+ return Ydr2
+ case D_DR + 3:
+ return Ydr3
+ case D_DR + 4:
+ return Ydr4
+ case D_DR + 5:
+ return Ydr5
+ case D_DR + 6:
+ return Ydr6
+ case D_DR + 7:
+ return Ydr7
+
+ case D_TR + 0:
+ return Ytr0
+ case D_TR + 1:
+ return Ytr1
+ case D_TR + 2:
+ return Ytr2
+ case D_TR + 3:
+ return Ytr3
+ case D_TR + 4:
+ return Ytr4
+ case D_TR + 5:
+ return Ytr5
+ case D_TR + 6:
+ return Ytr6
+ case D_TR + 7:
+ return Ytr7
+
+ case D_EXTERN,
+ D_STATIC,
+ D_AUTO,
+ D_PARAM:
+ return Ym
+
+ case D_CONST,
+ D_CONST2,
+ D_ADDR:
+ if a.Sym == nil {
+ v = int32(a.Offset)
+ if v == 0 {
+ return Yi0
+ }
+ if v == 1 {
+ return Yi1
+ }
+ if v >= -128 && v <= 127 {
+ return Yi8
+ }
+ }
+
+ return Yi32
+
+ case D_BRANCH:
+ return Ybr
+ }
+
+ return Yxxx
+}
+
+func asmidx(ctxt *obj.Link, scale int, index int, base int) {
+ var i int
+
+ switch index {
+ default:
+ goto bad
+
+ case D_NONE:
+ i = 4 << 3
+ goto bas
+
+ case D_AX,
+ D_CX,
+ D_DX,
+ D_BX,
+ D_BP,
+ D_SI,
+ D_DI:
+ i = reg[index] << 3
+ break
+ }
+
+ switch scale {
+ default:
+ goto bad
+
+ case 1:
+ break
+
+ case 2:
+ i |= 1 << 6
+
+ case 4:
+ i |= 2 << 6
+
+ case 8:
+ i |= 3 << 6
+ break
+ }
+
+bas:
+ switch base {
+ default:
+ goto bad
+
+ case D_NONE: /* must be mod=00 */
+ i |= 5
+
+ case D_AX,
+ D_CX,
+ D_DX,
+ D_BX,
+ D_SP,
+ D_BP,
+ D_SI,
+ D_DI:
+ i |= reg[base]
+ break
+ }
+
+ ctxt.Andptr[0] = byte(i)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+
+bad:
+ ctxt.Diag("asmidx: bad address %d,%d,%d", scale, index, base)
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+}
+
+func put4(ctxt *obj.Link, v int32) {
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr[1] = byte(v >> 8)
+ ctxt.Andptr[2] = byte(v >> 16)
+ ctxt.Andptr[3] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[4:]
+}
+
+func relput4(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
+ var v int64
+ var rel obj.Reloc
+ var r *obj.Reloc
+
+ v = int64(vaddr(ctxt, p, a, &rel))
+ if rel.Siz != 0 {
+ if rel.Siz != 4 {
+ ctxt.Diag("bad reloc")
+ }
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ put4(ctxt, int32(v))
+}
+
+func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int32 {
+ var t int
+ var v int32
+ var s *obj.LSym
+
+ if r != nil {
+ *r = obj.Reloc{}
+ }
+
+ t = int(a.Type_)
+ v = int32(a.Offset)
+ if t == D_ADDR {
+ t = int(a.Index)
+ }
+ switch t {
+ case D_STATIC,
+ D_EXTERN:
+ s = a.Sym
+ if s != nil {
+ if r == nil {
+ ctxt.Diag("need reloc for %v", Dconv(p, 0, a))
+ log.Fatalf("bad code")
+ }
+
+ r.Type_ = obj.R_ADDR
+ r.Siz = 4
+ r.Off = -1
+ r.Sym = s
+ r.Add = int64(v)
+ v = 0
+ }
+
+ case D_INDIR + D_TLS:
+ if r == nil {
+ ctxt.Diag("need reloc for %v", Dconv(p, 0, a))
+ log.Fatalf("bad code")
+ }
+
+ r.Type_ = obj.R_TLS_LE
+ r.Siz = 4
+ r.Off = -1 // caller must fill in
+ r.Add = int64(v)
+ v = 0
+ break
+ }
+
+ return v
+}
+
+func asmand(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int) {
+ var v int32
+ var t int
+ var scale int
+ var rel obj.Reloc
+
+ v = int32(a.Offset)
+ t = int(a.Type_)
+ rel.Siz = 0
+ if a.Index != D_NONE && a.Index != D_TLS {
+ if t < D_INDIR || t >= 2*D_INDIR {
+ switch t {
+ default:
+ goto bad
+
+ case D_STATIC,
+ D_EXTERN:
+ t = D_NONE
+ v = vaddr(ctxt, p, a, &rel)
+
+ case D_AUTO,
+ D_PARAM:
+ t = D_SP
+ break
+ }
+ } else {
+
+ t -= D_INDIR
+ }
+
+ if t == D_NONE {
+ ctxt.Andptr[0] = byte(0<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), t)
+ goto putrelv
+ }
+
+ if v == 0 && rel.Siz == 0 && t != D_BP {
+ ctxt.Andptr[0] = byte(0<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), t)
+ return
+ }
+
+ if v >= -128 && v < 128 && rel.Siz == 0 {
+ ctxt.Andptr[0] = byte(1<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), t)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+
+ ctxt.Andptr[0] = byte(2<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), t)
+ goto putrelv
+ }
+
+ if t >= D_AL && t <= D_F7 || t >= D_X0 && t <= D_X7 {
+ if v != 0 {
+ goto bad
+ }
+ ctxt.Andptr[0] = byte(3<<6 | reg[t]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+
+ scale = int(a.Scale)
+ if t < D_INDIR || t >= 2*D_INDIR {
+ switch a.Type_ {
+ default:
+ goto bad
+
+ case D_STATIC,
+ D_EXTERN:
+ t = D_NONE
+ v = vaddr(ctxt, p, a, &rel)
+
+ case D_AUTO,
+ D_PARAM:
+ t = D_SP
+ break
+ }
+
+ scale = 1
+ } else {
+
+ t -= D_INDIR
+ }
+ if t == D_TLS {
+ v = vaddr(ctxt, p, a, &rel)
+ }
+
+ if t == D_NONE || (D_CS <= t && t <= D_GS) || t == D_TLS {
+ ctxt.Andptr[0] = byte(0<<6 | 5<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ goto putrelv
+ }
+
+ if t == D_SP {
+ if v == 0 && rel.Siz == 0 {
+ ctxt.Andptr[0] = byte(0<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, scale, D_NONE, t)
+ return
+ }
+
+ if v >= -128 && v < 128 && rel.Siz == 0 {
+ ctxt.Andptr[0] = byte(1<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, scale, D_NONE, t)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+
+ ctxt.Andptr[0] = byte(2<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, scale, D_NONE, t)
+ goto putrelv
+ }
+
+ if t >= D_AX && t <= D_DI {
+ if a.Index == D_TLS {
+ rel = obj.Reloc{}
+ rel.Type_ = obj.R_TLS_IE
+ rel.Siz = 4
+ rel.Sym = nil
+ rel.Add = int64(v)
+ v = 0
+ }
+
+ if v == 0 && rel.Siz == 0 && t != D_BP {
+ ctxt.Andptr[0] = byte(0<<6 | reg[t]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+
+ if v >= -128 && v < 128 && rel.Siz == 0 {
+ ctxt.Andptr[0] = byte(1<<6 | reg[t]<<0 | r<<3)
+ ctxt.Andptr[1] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[2:]
+ return
+ }
+
+ ctxt.Andptr[0] = byte(2<<6 | reg[t]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ goto putrelv
+ }
+
+ goto bad
+
+putrelv:
+ if rel.Siz != 0 {
+ var r *obj.Reloc
+
+ if rel.Siz != 4 {
+ ctxt.Diag("bad rel")
+ goto bad
+ }
+
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(ctxt.Curp.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ put4(ctxt, v)
+ return
+
+bad:
+ ctxt.Diag("asmand: bad address %v", Dconv(p, 0, a))
+ return
+}
+
+const (
+ E = 0xff
+)
+
+var ymovtab = []uint8{
+ /* push */
+ APUSHL,
+ Ycs,
+ Ynone,
+ 0,
+ 0x0e,
+ E,
+ 0,
+ 0,
+ APUSHL,
+ Yss,
+ Ynone,
+ 0,
+ 0x16,
+ E,
+ 0,
+ 0,
+ APUSHL,
+ Yds,
+ Ynone,
+ 0,
+ 0x1e,
+ E,
+ 0,
+ 0,
+ APUSHL,
+ Yes,
+ Ynone,
+ 0,
+ 0x06,
+ E,
+ 0,
+ 0,
+ APUSHL,
+ Yfs,
+ Ynone,
+ 0,
+ 0x0f,
+ 0xa0,
+ E,
+ 0,
+ APUSHL,
+ Ygs,
+ Ynone,
+ 0,
+ 0x0f,
+ 0xa8,
+ E,
+ 0,
+ APUSHW,
+ Ycs,
+ Ynone,
+ 0,
+ Pe,
+ 0x0e,
+ E,
+ 0,
+ APUSHW,
+ Yss,
+ Ynone,
+ 0,
+ Pe,
+ 0x16,
+ E,
+ 0,
+ APUSHW,
+ Yds,
+ Ynone,
+ 0,
+ Pe,
+ 0x1e,
+ E,
+ 0,
+ APUSHW,
+ Yes,
+ Ynone,
+ 0,
+ Pe,
+ 0x06,
+ E,
+ 0,
+ APUSHW,
+ Yfs,
+ Ynone,
+ 0,
+ Pe,
+ 0x0f,
+ 0xa0,
+ E,
+ APUSHW,
+ Ygs,
+ Ynone,
+ 0,
+ Pe,
+ 0x0f,
+ 0xa8,
+ E,
+
+ /* pop */
+ APOPL,
+ Ynone,
+ Yds,
+ 0,
+ 0x1f,
+ E,
+ 0,
+ 0,
+ APOPL,
+ Ynone,
+ Yes,
+ 0,
+ 0x07,
+ E,
+ 0,
+ 0,
+ APOPL,
+ Ynone,
+ Yss,
+ 0,
+ 0x17,
+ E,
+ 0,
+ 0,
+ APOPL,
+ Ynone,
+ Yfs,
+ 0,
+ 0x0f,
+ 0xa1,
+ E,
+ 0,
+ APOPL,
+ Ynone,
+ Ygs,
+ 0,
+ 0x0f,
+ 0xa9,
+ E,
+ 0,
+ APOPW,
+ Ynone,
+ Yds,
+ 0,
+ Pe,
+ 0x1f,
+ E,
+ 0,
+ APOPW,
+ Ynone,
+ Yes,
+ 0,
+ Pe,
+ 0x07,
+ E,
+ 0,
+ APOPW,
+ Ynone,
+ Yss,
+ 0,
+ Pe,
+ 0x17,
+ E,
+ 0,
+ APOPW,
+ Ynone,
+ Yfs,
+ 0,
+ Pe,
+ 0x0f,
+ 0xa1,
+ E,
+ APOPW,
+ Ynone,
+ Ygs,
+ 0,
+ Pe,
+ 0x0f,
+ 0xa9,
+ E,
+
+ /* mov seg */
+ AMOVW,
+ Yes,
+ Yml,
+ 1,
+ 0x8c,
+ 0,
+ 0,
+ 0,
+ AMOVW,
+ Ycs,
+ Yml,
+ 1,
+ 0x8c,
+ 1,
+ 0,
+ 0,
+ AMOVW,
+ Yss,
+ Yml,
+ 1,
+ 0x8c,
+ 2,
+ 0,
+ 0,
+ AMOVW,
+ Yds,
+ Yml,
+ 1,
+ 0x8c,
+ 3,
+ 0,
+ 0,
+ AMOVW,
+ Yfs,
+ Yml,
+ 1,
+ 0x8c,
+ 4,
+ 0,
+ 0,
+ AMOVW,
+ Ygs,
+ Yml,
+ 1,
+ 0x8c,
+ 5,
+ 0,
+ 0,
+ AMOVW,
+ Yml,
+ Yes,
+ 2,
+ 0x8e,
+ 0,
+ 0,
+ 0,
+ AMOVW,
+ Yml,
+ Ycs,
+ 2,
+ 0x8e,
+ 1,
+ 0,
+ 0,
+ AMOVW,
+ Yml,
+ Yss,
+ 2,
+ 0x8e,
+ 2,
+ 0,
+ 0,
+ AMOVW,
+ Yml,
+ Yds,
+ 2,
+ 0x8e,
+ 3,
+ 0,
+ 0,
+ AMOVW,
+ Yml,
+ Yfs,
+ 2,
+ 0x8e,
+ 4,
+ 0,
+ 0,
+ AMOVW,
+ Yml,
+ Ygs,
+ 2,
+ 0x8e,
+ 5,
+ 0,
+ 0,
+
+ /* mov cr */
+ AMOVL,
+ Ycr0,
+ Yml,
+ 3,
+ 0x0f,
+ 0x20,
+ 0,
+ 0,
+ AMOVL,
+ Ycr2,
+ Yml,
+ 3,
+ 0x0f,
+ 0x20,
+ 2,
+ 0,
+ AMOVL,
+ Ycr3,
+ Yml,
+ 3,
+ 0x0f,
+ 0x20,
+ 3,
+ 0,
+ AMOVL,
+ Ycr4,
+ Yml,
+ 3,
+ 0x0f,
+ 0x20,
+ 4,
+ 0,
+ AMOVL,
+ Yml,
+ Ycr0,
+ 4,
+ 0x0f,
+ 0x22,
+ 0,
+ 0,
+ AMOVL,
+ Yml,
+ Ycr2,
+ 4,
+ 0x0f,
+ 0x22,
+ 2,
+ 0,
+ AMOVL,
+ Yml,
+ Ycr3,
+ 4,
+ 0x0f,
+ 0x22,
+ 3,
+ 0,
+ AMOVL,
+ Yml,
+ Ycr4,
+ 4,
+ 0x0f,
+ 0x22,
+ 4,
+ 0,
+
+ /* mov dr */
+ AMOVL,
+ Ydr0,
+ Yml,
+ 3,
+ 0x0f,
+ 0x21,
+ 0,
+ 0,
+ AMOVL,
+ Ydr6,
+ Yml,
+ 3,
+ 0x0f,
+ 0x21,
+ 6,
+ 0,
+ AMOVL,
+ Ydr7,
+ Yml,
+ 3,
+ 0x0f,
+ 0x21,
+ 7,
+ 0,
+ AMOVL,
+ Yml,
+ Ydr0,
+ 4,
+ 0x0f,
+ 0x23,
+ 0,
+ 0,
+ AMOVL,
+ Yml,
+ Ydr6,
+ 4,
+ 0x0f,
+ 0x23,
+ 6,
+ 0,
+ AMOVL,
+ Yml,
+ Ydr7,
+ 4,
+ 0x0f,
+ 0x23,
+ 7,
+ 0,
+
+ /* mov tr */
+ AMOVL,
+ Ytr6,
+ Yml,
+ 3,
+ 0x0f,
+ 0x24,
+ 6,
+ 0,
+ AMOVL,
+ Ytr7,
+ Yml,
+ 3,
+ 0x0f,
+ 0x24,
+ 7,
+ 0,
+ AMOVL,
+ Yml,
+ Ytr6,
+ 4,
+ 0x0f,
+ 0x26,
+ 6,
+ E,
+ AMOVL,
+ Yml,
+ Ytr7,
+ 4,
+ 0x0f,
+ 0x26,
+ 7,
+ E,
+
+ /* lgdt, sgdt, lidt, sidt */
+ AMOVL,
+ Ym,
+ Ygdtr,
+ 4,
+ 0x0f,
+ 0x01,
+ 2,
+ 0,
+ AMOVL,
+ Ygdtr,
+ Ym,
+ 3,
+ 0x0f,
+ 0x01,
+ 0,
+ 0,
+ AMOVL,
+ Ym,
+ Yidtr,
+ 4,
+ 0x0f,
+ 0x01,
+ 3,
+ 0,
+ AMOVL,
+ Yidtr,
+ Ym,
+ 3,
+ 0x0f,
+ 0x01,
+ 1,
+ 0,
+
+ /* lldt, sldt */
+ AMOVW,
+ Yml,
+ Yldtr,
+ 4,
+ 0x0f,
+ 0x00,
+ 2,
+ 0,
+ AMOVW,
+ Yldtr,
+ Yml,
+ 3,
+ 0x0f,
+ 0x00,
+ 0,
+ 0,
+
+ /* lmsw, smsw */
+ AMOVW,
+ Yml,
+ Ymsw,
+ 4,
+ 0x0f,
+ 0x01,
+ 6,
+ 0,
+ AMOVW,
+ Ymsw,
+ Yml,
+ 3,
+ 0x0f,
+ 0x01,
+ 4,
+ 0,
+
+ /* ltr, str */
+ AMOVW,
+ Yml,
+ Ytask,
+ 4,
+ 0x0f,
+ 0x00,
+ 3,
+ 0,
+ AMOVW,
+ Ytask,
+ Yml,
+ 3,
+ 0x0f,
+ 0x00,
+ 1,
+ 0,
+
+ /* load full pointer */
+ AMOVL,
+ Yml,
+ Ycol,
+ 5,
+ 0,
+ 0,
+ 0,
+ 0,
+ AMOVW,
+ Yml,
+ Ycol,
+ 5,
+ Pe,
+ 0,
+ 0,
+ 0,
+
+ /* double shift */
+ ASHLL,
+ Ycol,
+ Yml,
+ 6,
+ 0xa4,
+ 0xa5,
+ 0,
+ 0,
+ ASHRL,
+ Ycol,
+ Yml,
+ 6,
+ 0xac,
+ 0xad,
+ 0,
+ 0,
+
+ /* extra imul */
+ AIMULW,
+ Yml,
+ Yrl,
+ 7,
+ Pq,
+ 0xaf,
+ 0,
+ 0,
+ AIMULL,
+ Yml,
+ Yrl,
+ 7,
+ Pm,
+ 0xaf,
+ 0,
+ 0,
+
+ /* load TLS base pointer */
+ AMOVL,
+ Ytls,
+ Yrl,
+ 8,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+}
+
+// byteswapreg returns a byte-addressable register (AX, BX, CX, DX)
+// which is not referenced in a->type.
+// If a is empty, it returns BX to account for MULB-like instructions
+// that might use DX and AX.
+func byteswapreg(ctxt *obj.Link, a *obj.Addr) int {
+
+ var cana int
+ var canb int
+ var canc int
+ var cand int
+
+ cand = 1
+ canc = cand
+ canb = canc
+ cana = canb
+
+ switch a.Type_ {
+ case D_NONE:
+ cand = 0
+ cana = cand
+
+ case D_AX,
+ D_AL,
+ D_AH,
+ D_INDIR + D_AX:
+ cana = 0
+
+ case D_BX,
+ D_BL,
+ D_BH,
+ D_INDIR + D_BX:
+ canb = 0
+
+ case D_CX,
+ D_CL,
+ D_CH,
+ D_INDIR + D_CX:
+ canc = 0
+
+ case D_DX,
+ D_DL,
+ D_DH,
+ D_INDIR + D_DX:
+ cand = 0
+ break
+ }
+
+ switch a.Index {
+ case D_AX:
+ cana = 0
+
+ case D_BX:
+ canb = 0
+
+ case D_CX:
+ canc = 0
+
+ case D_DX:
+ cand = 0
+ break
+ }
+
+ if cana != 0 {
+ return D_AX
+ }
+ if canb != 0 {
+ return D_BX
+ }
+ if canc != 0 {
+ return D_CX
+ }
+ if cand != 0 {
+ return D_DX
+ }
+
+ ctxt.Diag("impossible byte register")
+ log.Fatalf("bad code")
+ return 0
+}
+
+func subreg(p *obj.Prog, from int, to int) {
+ if false { /* debug['Q'] */
+ fmt.Printf("\n%v\ts/%v/%v/\n", p, Rconv(from), Rconv(to))
+ }
+
+ if int(p.From.Type_) == from {
+ p.From.Type_ = int16(to)
+ p.Ft = 0
+ }
+
+ if int(p.To.Type_) == from {
+ p.To.Type_ = int16(to)
+ p.Tt = 0
+ }
+
+ if int(p.From.Index) == from {
+ p.From.Index = uint8(to)
+ p.Ft = 0
+ }
+
+ if int(p.To.Index) == from {
+ p.To.Index = uint8(to)
+ p.Tt = 0
+ }
+
+ from += D_INDIR
+ if int(p.From.Type_) == from {
+ p.From.Type_ = int16(to + D_INDIR)
+ p.Ft = 0
+ }
+
+ if int(p.To.Type_) == from {
+ p.To.Type_ = int16(to + D_INDIR)
+ p.Tt = 0
+ }
+
+ if false { /* debug['Q'] */
+ fmt.Printf("%v\n", p)
+ }
+}
+
+func mediaop(ctxt *obj.Link, o *Optab, op int, osize int, z int) int {
+ switch op {
+ case Pm,
+ Pe,
+ Pf2,
+ Pf3:
+ if osize != 1 {
+ if op != Pm {
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+ z++
+ op = int(o.op[z])
+ break
+ }
+ fallthrough
+
+ default:
+ if -cap(ctxt.Andptr) == -cap(ctxt.And) || ctxt.And[-cap(ctxt.Andptr)+cap(ctxt.And[:])-1] != Pm {
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ break
+ }
+
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return z
+}
+
+func doasm(ctxt *obj.Link, p *obj.Prog) {
+ var o *Optab
+ var q *obj.Prog
+ var pp obj.Prog
+ var t []byte
+ var z int
+ var op int
+ var ft int
+ var tt int
+ var breg int
+ var v int32
+ var pre int32
+ var rel obj.Reloc
+ var r *obj.Reloc
+ var a *obj.Addr
+
+ ctxt.Curp = p // TODO
+
+ pre = int32(prefixof(ctxt, &p.From))
+
+ if pre != 0 {
+ ctxt.Andptr[0] = byte(pre)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ pre = int32(prefixof(ctxt, &p.To))
+ if pre != 0 {
+ ctxt.Andptr[0] = byte(pre)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ if p.Ft == 0 {
+ p.Ft = uint8(oclass(&p.From))
+ }
+ if p.Tt == 0 {
+ p.Tt = uint8(oclass(&p.To))
+ }
+
+ ft = int(p.Ft) * Ymax
+ tt = int(p.Tt) * Ymax
+ o = &optab[p.As]
+ t = o.ytab
+ if t == nil {
+ ctxt.Diag("asmins: noproto %v", p)
+ return
+ }
+
+ for z = 0; t[0] != 0; (func() { z += int(t[3]); t = t[4:] })() {
+ if ycover[ft+int(t[0])] != 0 {
+ if ycover[tt+int(t[1])] != 0 {
+ goto found
+ }
+ }
+ }
+ goto domov
+
+found:
+ switch o.prefix {
+ case Pq: /* 16 bit escape and opcode escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pf2, /* xmm opcode escape */
+ Pf3:
+ ctxt.Andptr[0] = byte(o.prefix)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pm: /* opcode escape */
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pe: /* 16 bit escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pb: /* botch */
+ break
+ }
+
+ op = int(o.op[z])
+ switch t[2] {
+ default:
+ ctxt.Diag("asmins: unknown z %d %v", t[2], p)
+ return
+
+ case Zpseudo:
+ break
+
+ case Zlit:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if !(op != 0) {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case Zlitm_r:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if !(op != 0) {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, reg[p.To.Type_])
+
+ case Zm_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, reg[p.To.Type_])
+
+ case Zm2_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, reg[p.To.Type_])
+
+ case Zm_r_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.From, reg[p.To.Type_])
+
+ case Zm_r_i_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.From, reg[p.To.Type_])
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibm_r:
+ for {
+ tmp2 := z
+ z++
+ op = int(o.op[tmp2])
+ if !(op != 0) {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, reg[p.To.Type_])
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zaut_r:
+ ctxt.Andptr[0] = 0x8d
+ ctxt.Andptr = ctxt.Andptr[1:] /* leal */
+ if p.From.Type_ != D_ADDR {
+ ctxt.Diag("asmins: Zaut sb type ADDR")
+ }
+ p.From.Type_ = int16(p.From.Index)
+ p.From.Index = D_NONE
+ p.Ft = 0
+ asmand(ctxt, p, &p.From, reg[p.To.Type_])
+ p.From.Index = uint8(p.From.Type_)
+ p.From.Type_ = D_ADDR
+ p.Ft = 0
+
+ case Zm_o:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, int(o.op[z+1]))
+
+ case Zr_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.From.Type_])
+
+ case Zr_m_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.To, reg[p.From.Type_])
+
+ case Zr_m_i_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.To, reg[p.From.Type_])
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zcallindreg:
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc)
+ r.Type_ = obj.R_CALLIND
+ r.Siz = 0
+ fallthrough
+
+ // fallthrough
+ case Zo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmand(ctxt, p, &p.To, int(o.op[z+1]))
+
+ case Zm_ibo:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.To, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Z_ib,
+ Zib_:
+ if t[2] == Zib_ {
+ a = &p.From
+ } else {
+
+ a = &p.To
+ }
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zib_rp:
+ ctxt.Andptr[0] = byte(op + reg[p.To.Type_])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zil_rp:
+ ctxt.Andptr[0] = byte(op + reg[p.To.Type_])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+
+ relput4(ctxt, p, &p.From)
+ }
+
+ case Zib_rr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.To.Type_])
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Z_il,
+ Zil_:
+ if t[2] == Zil_ {
+ a = &p.From
+ } else {
+
+ a = &p.To
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+
+ relput4(ctxt, p, a)
+ }
+
+ case Zm_ilo,
+ Zilo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if t[2] == Zilo_m {
+ a = &p.From
+ asmand(ctxt, p, &p.To, int(o.op[z+1]))
+ } else {
+
+ a = &p.To
+ asmand(ctxt, p, &p.From, int(o.op[z+1]))
+ }
+
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+
+ relput4(ctxt, p, a)
+ }
+
+ case Zil_rr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.To.Type_])
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+
+ relput4(ctxt, p, &p.From)
+ }
+
+ case Z_rp:
+ ctxt.Andptr[0] = byte(op + reg[p.To.Type_])
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zrp_:
+ ctxt.Andptr[0] = byte(op + reg[p.From.Type_])
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zclr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.To.Type_])
+
+ case Zcall:
+ if p.To.Sym == nil {
+ ctxt.Diag("call without target")
+ log.Fatalf("bad code")
+ }
+
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Type_ = obj.R_CALL
+ r.Siz = 4
+ r.Sym = p.To.Sym
+ r.Add = p.To.Offset
+ put4(ctxt, 0)
+
+ case Zbr,
+ Zjmp,
+ Zloop:
+ if p.To.Sym != nil {
+ if t[2] != Zjmp {
+ ctxt.Diag("branch to ATEXT")
+ log.Fatalf("bad code")
+ }
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Sym = p.To.Sym
+ r.Type_ = obj.R_PCREL
+ r.Siz = 4
+ put4(ctxt, 0)
+ break
+ }
+
+ // Assumes q is in this function.
+ // Fill in backward jump now.
+ q = p.Pcond
+
+ if q == nil {
+ ctxt.Diag("jmp/branch/loop without target")
+ log.Fatalf("bad code")
+ }
+
+ if p.Back&1 != 0 {
+ v = int32(q.Pc - (p.Pc + 2))
+ if v >= -128 {
+ if p.As == AJCXZW {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if t[2] == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+
+ v -= 5 - 2
+ if t[2] == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ v--
+ }
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ break
+ }
+
+ // Annotate target; will fill in later.
+ p.Forwd = q.Comefrom
+
+ q.Comefrom = p
+ if p.Back&2 != 0 { // short
+ if p.As == AJCXZW {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if t[2] == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+
+ if t[2] == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case Zcallcon,
+ Zjmpcon:
+ if t[2] == Zcallcon {
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Type_ = obj.R_PCREL
+ r.Siz = 4
+ r.Add = p.To.Offset
+ put4(ctxt, 0)
+
+ case Zcallind:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Type_ = obj.R_ADDR
+ r.Siz = 4
+ r.Add = p.To.Offset
+ r.Sym = p.To.Sym
+ put4(ctxt, 0)
+
+ case Zbyte:
+ v = vaddr(ctxt, p, &p.From, &rel)
+ if rel.Siz != 0 {
+ rel.Siz = uint8(op)
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 1 {
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 2 {
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ }
+
+ case Zmov:
+ goto domov
+ }
+
+ return
+
+domov:
+ for t = []byte(ymovtab); t[0] != 0; t = t[8:] {
+ if p.As == int16(t[0]) {
+ if ycover[ft+int(t[1])] != 0 {
+ if ycover[tt+int(t[2])] != 0 {
+ goto mfound
+ }
+ }
+ }
+ }
+
+ /*
+ * here, the assembly has failed.
+ * if its a byte instruction that has
+ * unaddressable registers, try to
+ * exchange registers and reissue the
+ * instruction with the operands renamed.
+ */
+bad:
+ pp = *p
+
+ z = int(p.From.Type_)
+ if z >= D_BP && z <= D_DI {
+ breg = byteswapreg(ctxt, &p.To)
+ if breg != D_AX {
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lhs,bx */
+ asmand(ctxt, p, &p.From, reg[breg])
+ subreg(&pp, z, breg)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lhs,bx */
+ asmand(ctxt, p, &p.From, reg[breg])
+ } else {
+
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lsh,ax */
+ subreg(&pp, z, D_AX)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lsh,ax */
+ }
+
+ return
+ }
+
+ z = int(p.To.Type_)
+ if z >= D_BP && z <= D_DI {
+ breg = byteswapreg(ctxt, &p.From)
+ if breg != D_AX {
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rhs,bx */
+ asmand(ctxt, p, &p.To, reg[breg])
+ subreg(&pp, z, breg)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rhs,bx */
+ asmand(ctxt, p, &p.To, reg[breg])
+ } else {
+
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rsh,ax */
+ subreg(&pp, z, D_AX)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rsh,ax */
+ }
+
+ return
+ }
+
+ ctxt.Diag("doasm: notfound t2=%x from=%x to=%x %v", t[2], uint16(p.From.Type_), uint16(p.To.Type_), p)
+ return
+
+mfound:
+ switch t[3] {
+ default:
+ ctxt.Diag("asmins: unknown mov %d %v", t[3], p)
+
+ case 0: /* lit */
+ for z = 4; t[z] != E; z++ {
+
+ ctxt.Andptr[0] = t[z]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case 1: /* r,m */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmand(ctxt, p, &p.To, int(t[5]))
+
+ case 2: /* m,r */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmand(ctxt, p, &p.From, int(t[5]))
+
+ case 3: /* r,m - 2op */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, int(t[6]))
+
+ case 4: /* m,r - 2op */
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, int(t[6]))
+
+ case 5: /* load full pointer, trash heap */
+ if t[4] != 0 {
+
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ switch p.To.Index {
+ default:
+ goto bad
+
+ case D_DS:
+ ctxt.Andptr[0] = 0xc5
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case D_SS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb2
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case D_ES:
+ ctxt.Andptr[0] = 0xc4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case D_FS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case D_GS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb5
+ ctxt.Andptr = ctxt.Andptr[1:]
+ break
+ }
+
+ asmand(ctxt, p, &p.From, reg[p.To.Type_])
+
+ case 6: /* double shift */
+ z = int(p.From.Type_)
+
+ switch z {
+ default:
+ goto bad
+
+ case D_CONST:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.From.Index])
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case D_CL,
+ D_CX:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, reg[p.From.Index])
+ break
+ }
+
+ case 7: /* imul rm,r */
+ if t[4] == Pq {
+
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+
+ ctxt.Andptr[0] = t[4]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = t[5]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, reg[p.To.Type_])
+
+ // NOTE: The systems listed here are the ones that use the "TLS initial exec" model,
+ // where you load the TLS base register into a register and then index off that
+ // register to access the actual TLS variables. Systems that allow direct TLS access
+ // are handled in prefixof above and should not be listed here.
+ case 8: /* mov tls, r */
+ switch ctxt.Headtype {
+
+ default:
+ log.Fatalf("unknown TLS base location for %s", obj.Headstr(ctxt.Headtype))
+
+ // ELF TLS base is 0(GS).
+ case obj.Hlinux,
+ obj.Hnacl:
+ pp.From = p.From
+
+ pp.From.Type_ = D_INDIR + D_GS
+ pp.From.Offset = 0
+ pp.From.Index = D_NONE
+ pp.From.Scale = 0
+ ctxt.Andptr[0] = 0x65
+ ctxt.Andptr = ctxt.Andptr[1:] // GS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, reg[p.To.Type_])
+
+ case obj.Hplan9:
+ if ctxt.Plan9privates == nil {
+ ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
+ }
+ pp.From = obj.Addr{}
+ pp.From.Type_ = D_EXTERN
+ pp.From.Sym = ctxt.Plan9privates
+ pp.From.Offset = 0
+ pp.From.Index = D_NONE
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, reg[p.To.Type_])
+
+ // Windows TLS base is always 0x14(FS).
+ case obj.Hwindows:
+ pp.From = p.From
+
+ pp.From.Type_ = D_INDIR + D_FS
+ pp.From.Offset = 0x14
+ pp.From.Index = D_NONE
+ pp.From.Scale = 0
+ ctxt.Andptr[0] = 0x64
+ ctxt.Andptr = ctxt.Andptr[1:] // FS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, reg[p.To.Type_])
+ break
+ }
+
+ break
+ }
+}
+
+var naclret = []uint8{
+ 0x5d, // POPL BP
+ // 0x8b, 0x7d, 0x00, // MOVL (BP), DI - catch return to invalid address, for debugging
+ 0x83,
+ 0xe5,
+ 0xe0, // ANDL $~31, BP
+ 0xff,
+ 0xe5, // JMP BP
+}
+
+func asmins(ctxt *obj.Link, p *obj.Prog) {
+ var r *obj.Reloc
+
+ ctxt.Andptr = ctxt.And[:]
+
+ if p.As == AUSEFIELD {
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = 0
+ r.Sym = p.From.Sym
+ r.Type_ = obj.R_USEFIELD
+ r.Siz = 0
+ return
+ }
+
+ if ctxt.Headtype == obj.Hnacl {
+ switch p.As {
+ case ARET:
+ copy(ctxt.Andptr, naclret)
+ ctxt.Andptr = ctxt.Andptr[len(naclret):]
+ return
+
+ case ACALL,
+ AJMP:
+ if D_AX <= p.To.Type_ && p.To.Type_ <= D_DI {
+ ctxt.Andptr[0] = 0x83
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(0xe0 | (p.To.Type_ - D_AX))
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xe0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case AINT:
+ ctxt.Andptr[0] = 0xf4
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+ }
+
+ doasm(ctxt, p)
+ if -cap(ctxt.Andptr) > -cap(ctxt.And[len(ctxt.And):]) {
+ fmt.Printf("and[] is too short - %d byte instruction\n", -cap(ctxt.Andptr)+cap(ctxt.And[:]))
+ log.Fatalf("bad code")
+ }
+}
--- /dev/null
+// Inferno utils/8c/list.c
+// http://code.google.com/p/inferno-os/source/browse/utils/8c/list.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package i386
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+const (
+ STRINGSZ = 1000
+)
+
+var bigP *obj.Prog
+
+func Pconv(p *obj.Prog) string {
+ var str string
+ var fp string
+
+ switch p.As {
+ case ADATA:
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), p.From.Scale, Dconv(p, 0, &p.To))
+
+ case ATEXT:
+ if p.From.Scale != 0 {
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%d,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), p.From.Scale, Dconv(p, fmtLong, &p.To))
+ break
+ }
+
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), Dconv(p, fmtLong, &p.To))
+
+ default:
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+ break
+ }
+
+ fp += str
+ return fp
+}
+
+func Aconv(i int) string {
+ var fp string
+
+ fp += anames8[i]
+ return fp
+}
+
+func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
+ var str string
+ var s string
+ var fp string
+
+ var i int
+
+ i = int(a.Type_)
+
+ if flag&fmtLong != 0 /*untyped*/ {
+ if i == D_CONST2 {
+ str = fmt.Sprintf("$%d-%d", a.Offset, a.Offset2)
+ } else {
+
+ // ATEXT dst is not constant
+ str = fmt.Sprintf("!!%v", Dconv(p, 0, a))
+ }
+
+ goto brk
+ }
+
+ if i >= D_INDIR {
+ if a.Offset != 0 {
+ str = fmt.Sprintf("%d(%v)", a.Offset, Rconv(i-D_INDIR))
+ } else {
+
+ str = fmt.Sprintf("(%v)", Rconv(i-D_INDIR))
+ }
+ goto brk
+ }
+
+ switch i {
+ default:
+ if a.Offset != 0 {
+ str = fmt.Sprintf("$%d,%v", a.Offset, Rconv(i))
+ } else {
+
+ str = fmt.Sprintf("%v", Rconv(i))
+ }
+
+ case D_NONE:
+ str = ""
+
+ case D_BRANCH:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s(SB)", a.Sym.Name)
+ } else if p != nil && p.Pcond != nil {
+ str = fmt.Sprintf("%d", p.Pcond.Pc)
+ } else if a.U.Branch != nil {
+ str = fmt.Sprintf("%d", a.U.Branch.Pc)
+ } else {
+
+ str = fmt.Sprintf("%d(PC)", a.Offset)
+ }
+
+ case D_EXTERN:
+ str = fmt.Sprintf("%s+%d(SB)", a.Sym.Name, a.Offset)
+
+ case D_STATIC:
+ str = fmt.Sprintf("%s<>+%d(SB)", a.Sym.Name, a.Offset)
+
+ case D_AUTO:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s+%d(SP)", a.Sym.Name, a.Offset)
+ } else {
+
+ str = fmt.Sprintf("%d(SP)", a.Offset)
+ }
+
+ case D_PARAM:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s+%d(FP)", a.Sym.Name, a.Offset)
+ } else {
+
+ str = fmt.Sprintf("%d(FP)", a.Offset)
+ }
+
+ case D_CONST:
+ str = fmt.Sprintf("$%d", a.Offset)
+
+ case D_CONST2:
+ if !(flag&fmtLong != 0 /*untyped*/) {
+ // D_CONST2 outside of ATEXT should not happen
+ str = fmt.Sprintf("!!$%d-%d", a.Offset, a.Offset2)
+ }
+
+ case D_FCONST:
+ str = fmt.Sprintf("$(%.17g)", a.U.Dval)
+
+ case D_SCONST:
+ str = fmt.Sprintf("$\"%q\"", a.U.Sval)
+
+ case D_ADDR:
+ a.Type_ = int16(a.Index)
+ a.Index = D_NONE
+ str = fmt.Sprintf("$%v", Dconv(p, 0, a))
+ a.Index = uint8(a.Type_)
+ a.Type_ = D_ADDR
+ goto conv
+ }
+
+brk:
+ if a.Index != D_NONE {
+ s = fmt.Sprintf("(%v*%d)", Rconv(int(a.Index)), int(a.Scale))
+ str += s
+ }
+
+conv:
+ fp += str
+ return fp
+}
+
+var regstr = []string{
+ "AL", /* [D_AL] */
+ "CL",
+ "DL",
+ "BL",
+ "AH",
+ "CH",
+ "DH",
+ "BH",
+ "AX", /* [D_AX] */
+ "CX",
+ "DX",
+ "BX",
+ "SP",
+ "BP",
+ "SI",
+ "DI",
+ "F0", /* [D_F0] */
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "CS", /* [D_CS] */
+ "SS",
+ "DS",
+ "ES",
+ "FS",
+ "GS",
+ "GDTR", /* [D_GDTR] */
+ "IDTR", /* [D_IDTR] */
+ "LDTR", /* [D_LDTR] */
+ "MSW", /* [D_MSW] */
+ "TASK", /* [D_TASK] */
+ "CR0", /* [D_CR] */
+ "CR1",
+ "CR2",
+ "CR3",
+ "CR4",
+ "CR5",
+ "CR6",
+ "CR7",
+ "DR0", /* [D_DR] */
+ "DR1",
+ "DR2",
+ "DR3",
+ "DR4",
+ "DR5",
+ "DR6",
+ "DR7",
+ "TR0", /* [D_TR] */
+ "TR1",
+ "TR2",
+ "TR3",
+ "TR4",
+ "TR5",
+ "TR6",
+ "TR7",
+ "X0", /* [D_X0] */
+ "X1",
+ "X2",
+ "X3",
+ "X4",
+ "X5",
+ "X6",
+ "X7",
+ "TLS", /* [D_TLS] */
+ "NONE", /* [D_NONE] */
+}
+
+func Rconv(r int) string {
+ var str string
+ var fp string
+
+ if r >= D_AL && r <= D_NONE {
+ str = fmt.Sprintf("%s", regstr[r-D_AL])
+ } else {
+
+ str = fmt.Sprintf("gok(%d)", r)
+ }
+
+ fp += str
+ return fp
+}
--- /dev/null
+// Inferno utils/8l/pass.c
+// http://code.google.com/p/inferno-os/source/browse/utils/8l/pass.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package i386
+
+import (
+ "cmd/internal/obj"
+ "encoding/binary"
+ "fmt"
+ "log"
+ "math"
+)
+
+var zprg = obj.Prog{
+ Back: 2,
+ As: AGOK,
+ From: obj.Addr{
+ Type_: D_NONE,
+ Index: D_NONE,
+ Scale: 1,
+ },
+ To: obj.Addr{
+ Type_: D_NONE,
+ Index: D_NONE,
+ Scale: 1,
+ },
+}
+
+func symtype(a *obj.Addr) int {
+ var t int
+
+ t = int(a.Type_)
+ if t == D_ADDR {
+ t = int(a.Index)
+ }
+ return t
+}
+
+func isdata(p *obj.Prog) bool {
+ return p.As == ADATA || p.As == AGLOBL
+}
+
+func iscall(p *obj.Prog) bool {
+ return p.As == ACALL
+}
+
+func datasize(p *obj.Prog) int {
+ return int(p.From.Scale)
+}
+
+func textflag(p *obj.Prog) int {
+ return int(p.From.Scale)
+}
+
+func settextflag(p *obj.Prog, f int) {
+ p.From.Scale = int8(f)
+}
+
+func canuselocaltls(ctxt *obj.Link) int {
+ switch ctxt.Headtype {
+ case obj.Hlinux,
+ obj.Hnacl,
+ obj.Hplan9,
+ obj.Hwindows:
+ return 0
+ }
+
+ return 1
+}
+
+func progedit(ctxt *obj.Link, p *obj.Prog) {
+ var literal string
+ var s *obj.LSym
+ var q *obj.Prog
+
+ // See obj6.c for discussion of TLS.
+ if canuselocaltls(ctxt) != 0 {
+
+ // Reduce TLS initial exec model to TLS local exec model.
+ // Sequences like
+ // MOVL TLS, BX
+ // ... off(BX)(TLS*1) ...
+ // become
+ // NOP
+ // ... off(TLS) ...
+ if p.As == AMOVL && p.From.Type_ == D_TLS && D_AX <= p.To.Type_ && p.To.Type_ <= D_DI {
+
+ p.As = ANOP
+ p.From.Type_ = D_NONE
+ p.To.Type_ = D_NONE
+ }
+
+ if p.From.Index == D_TLS && D_INDIR+D_AX <= p.From.Type_ && p.From.Type_ <= D_INDIR+D_DI {
+ p.From.Type_ = D_INDIR + D_TLS
+ p.From.Scale = 0
+ p.From.Index = D_NONE
+ }
+
+ if p.To.Index == D_TLS && D_INDIR+D_AX <= p.To.Type_ && p.To.Type_ <= D_INDIR+D_DI {
+ p.To.Type_ = D_INDIR + D_TLS
+ p.To.Scale = 0
+ p.To.Index = D_NONE
+ }
+ } else {
+
+ // As a courtesy to the C compilers, rewrite TLS local exec load as TLS initial exec load.
+ // The instruction
+ // MOVL off(TLS), BX
+ // becomes the sequence
+ // MOVL TLS, BX
+ // MOVL off(BX)(TLS*1), BX
+ // This allows the C compilers to emit references to m and g using the direct off(TLS) form.
+ if p.As == AMOVL && p.From.Type_ == D_INDIR+D_TLS && D_AX <= p.To.Type_ && p.To.Type_ <= D_DI {
+
+ q = obj.Appendp(ctxt, p)
+ q.As = p.As
+ q.From = p.From
+ q.From.Type_ = D_INDIR + p.To.Type_
+ q.From.Index = D_TLS
+ q.From.Scale = 2 // TODO: use 1
+ q.To = p.To
+ p.From.Type_ = D_TLS
+ p.From.Index = D_NONE
+ p.From.Offset = 0
+ }
+ }
+
+ // TODO: Remove.
+ if ctxt.Headtype == obj.Hplan9 {
+
+ if p.From.Scale == 1 && p.From.Index == D_TLS {
+ p.From.Scale = 2
+ }
+ if p.To.Scale == 1 && p.To.Index == D_TLS {
+ p.To.Scale = 2
+ }
+ }
+
+ // Rewrite CALL/JMP/RET to symbol as D_BRANCH.
+ switch p.As {
+
+ case ACALL,
+ AJMP,
+ ARET:
+ if (p.To.Type_ == D_EXTERN || p.To.Type_ == D_STATIC) && p.To.Sym != nil {
+ p.To.Type_ = D_BRANCH
+ }
+ break
+ }
+
+ // Rewrite float constants to values stored in memory.
+ switch p.As {
+
+ // Convert AMOVSS $(0), Xx to AXORPS Xx, Xx
+ case AMOVSS:
+ if p.From.Type_ == D_FCONST {
+
+ if p.From.U.Dval == 0 {
+ if p.To.Type_ >= D_X0 {
+ if p.To.Type_ <= D_X7 {
+ p.As = AXORPS
+ p.From.Type_ = p.To.Type_
+ p.From.Index = p.To.Index
+ break
+ }
+ }
+ }
+ }
+ fallthrough
+
+ // fallthrough
+
+ case AFMOVF,
+ AFADDF,
+ AFSUBF,
+ AFSUBRF,
+ AFMULF,
+ AFDIVF,
+ AFDIVRF,
+ AFCOMF,
+ AFCOMFP,
+ AADDSS,
+ ASUBSS,
+ AMULSS,
+ ADIVSS,
+ ACOMISS,
+ AUCOMISS:
+ if p.From.Type_ == D_FCONST {
+
+ var i32 uint32
+ var f32 float32
+ f32 = float32(p.From.U.Dval)
+ i32 = math.Float32bits(f32)
+ literal = fmt.Sprintf("$f32.%08x", i32)
+ s = obj.Linklookup(ctxt, literal, 0)
+ if s.Type_ == 0 {
+ s.Type_ = obj.SRODATA
+ obj.Adduint32(ctxt, s, i32)
+ s.Reachable = 0
+ }
+
+ p.From.Type_ = D_EXTERN
+ p.From.Sym = s
+ p.From.Offset = 0
+ }
+
+ // Convert AMOVSD $(0), Xx to AXORPS Xx, Xx
+ case AMOVSD:
+ if p.From.Type_ == D_FCONST {
+
+ if p.From.U.Dval == 0 {
+ if p.To.Type_ >= D_X0 {
+ if p.To.Type_ <= D_X7 {
+ p.As = AXORPS
+ p.From.Type_ = p.To.Type_
+ p.From.Index = p.To.Index
+ break
+ }
+ }
+ }
+ }
+ fallthrough
+
+ // fallthrough
+
+ case AFMOVD,
+ AFADDD,
+ AFSUBD,
+ AFSUBRD,
+ AFMULD,
+ AFDIVD,
+ AFDIVRD,
+ AFCOMD,
+ AFCOMDP,
+ AADDSD,
+ ASUBSD,
+ AMULSD,
+ ADIVSD,
+ ACOMISD,
+ AUCOMISD:
+ if p.From.Type_ == D_FCONST {
+
+ var i64 uint64
+ i64 = math.Float64bits(p.From.U.Dval)
+ literal = fmt.Sprintf("$f64.%016x", i64)
+ s = obj.Linklookup(ctxt, literal, 0)
+ if s.Type_ == 0 {
+ s.Type_ = obj.SRODATA
+ obj.Adduint64(ctxt, s, i64)
+ s.Reachable = 0
+ }
+
+ p.From.Type_ = D_EXTERN
+ p.From.Sym = s
+ p.From.Offset = 0
+ }
+
+ break
+ }
+}
+
+func prg() *obj.Prog {
+ var p *obj.Prog
+
+ p = new(obj.Prog)
+ *p = zprg
+ return p
+}
+
+func addstacksplit(ctxt *obj.Link, cursym *obj.LSym) {
+ var p *obj.Prog
+ var q *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var autoffset int32
+ var deltasp int32
+ var a int
+
+ if ctxt.Symmorestack[0] == nil {
+ ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
+ ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
+ }
+
+ if ctxt.Headtype == obj.Hplan9 && ctxt.Plan9privates == nil {
+ ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
+ }
+
+ ctxt.Cursym = cursym
+
+ if cursym.Text == nil || cursym.Text.Link == nil {
+ return
+ }
+
+ p = cursym.Text
+ autoffset = int32(p.To.Offset)
+ if autoffset < 0 {
+ autoffset = 0
+ }
+
+ cursym.Locals = autoffset
+ cursym.Args = p.To.Offset2
+
+ q = nil
+
+ if !(p.From.Scale&obj.NOSPLIT != 0) || (p.From.Scale&obj.WRAPPER != 0) {
+ p = obj.Appendp(ctxt, p)
+ p = load_g_cx(ctxt, p) // load g into CX
+ }
+
+ if !(cursym.Text.From.Scale&obj.NOSPLIT != 0) {
+ p = stacksplit(ctxt, p, autoffset, bool2int(!(cursym.Text.From.Scale&obj.NEEDCTXT != 0)), &q) // emit split check
+ }
+
+ if autoffset != 0 {
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AADJSP
+ p.From.Type_ = D_CONST
+ p.From.Offset = int64(autoffset)
+ p.Spadj = autoffset
+ } else {
+
+ // zero-byte stack adjustment.
+ // Insert a fake non-zero adjustment so that stkcheck can
+ // recognize the end of the stack-splitting prolog.
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ANOP
+ p.Spadj = int32(-ctxt.Arch.Ptrsize)
+ p = obj.Appendp(ctxt, p)
+ p.As = ANOP
+ p.Spadj = int32(ctxt.Arch.Ptrsize)
+ }
+
+ if q != nil {
+ q.Pcond = p
+ }
+ deltasp = autoffset
+
+ if cursym.Text.From.Scale&obj.WRAPPER != 0 {
+ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
+ //
+ // MOVL g_panic(CX), BX
+ // TESTL BX, BX
+ // JEQ end
+ // LEAL (autoffset+4)(SP), DI
+ // CMPL panic_argp(BX), DI
+ // JNE end
+ // MOVL SP, panic_argp(BX)
+ // end:
+ // NOP
+ //
+ // The NOP is needed to give the jumps somewhere to land.
+ // It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes.
+
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVL
+ p.From.Type_ = D_INDIR + D_CX
+ p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
+ p.To.Type_ = D_BX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ATESTL
+ p.From.Type_ = D_BX
+ p.To.Type_ = D_BX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJEQ
+ p.To.Type_ = D_BRANCH
+ p1 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ALEAL
+ p.From.Type_ = D_INDIR + D_SP
+ p.From.Offset = int64(autoffset) + 4
+ p.To.Type_ = D_DI
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPL
+ p.From.Type_ = D_INDIR + D_BX
+ p.From.Offset = 0 // Panic.argp
+ p.To.Type_ = D_DI
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJNE
+ p.To.Type_ = D_BRANCH
+ p2 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVL
+ p.From.Type_ = D_SP
+ p.To.Type_ = D_INDIR + D_BX
+ p.To.Offset = 0 // Panic.argp
+
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ANOP
+ p1.Pcond = p
+ p2.Pcond = p
+ }
+
+ if ctxt.Debugzerostack != 0 && autoffset != 0 && !(cursym.Text.From.Scale&obj.NOSPLIT != 0) {
+ // 8l -Z means zero the stack frame on entry.
+ // This slows down function calls but can help avoid
+ // false positives in garbage collection.
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVL
+ p.From.Type_ = D_SP
+ p.To.Type_ = D_DI
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVL
+ p.From.Type_ = D_CONST
+ p.From.Offset = int64(autoffset) / 4
+ p.To.Type_ = D_CX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVL
+ p.From.Type_ = D_CONST
+ p.From.Offset = 0
+ p.To.Type_ = D_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AREP
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ASTOSL
+ }
+
+ for ; p != nil; p = p.Link {
+ a = int(p.From.Type_)
+ if a == D_AUTO {
+ p.From.Offset += int64(deltasp)
+ }
+ if a == D_PARAM {
+ p.From.Offset += int64(deltasp) + 4
+ }
+ a = int(p.To.Type_)
+ if a == D_AUTO {
+ p.To.Offset += int64(deltasp)
+ }
+ if a == D_PARAM {
+ p.To.Offset += int64(deltasp) + 4
+ }
+
+ switch p.As {
+ default:
+ continue
+
+ case APUSHL,
+ APUSHFL:
+ deltasp += 4
+ p.Spadj = 4
+ continue
+
+ case APUSHW,
+ APUSHFW:
+ deltasp += 2
+ p.Spadj = 2
+ continue
+
+ case APOPL,
+ APOPFL:
+ deltasp -= 4
+ p.Spadj = -4
+ continue
+
+ case APOPW,
+ APOPFW:
+ deltasp -= 2
+ p.Spadj = -2
+ continue
+
+ case ARET:
+ break
+ }
+
+ if autoffset != deltasp {
+ ctxt.Diag("unbalanced PUSH/POP")
+ }
+
+ if autoffset != 0 {
+ p.As = AADJSP
+ p.From.Type_ = D_CONST
+ p.From.Offset = int64(-autoffset)
+ p.Spadj = -autoffset
+ p = obj.Appendp(ctxt, p)
+ p.As = ARET
+
+ // If there are instructions following
+ // this ARET, they come from a branch
+ // with the same stackframe, so undo
+ // the cleanup.
+ p.Spadj = +autoffset
+ }
+
+ if p.To.Sym != nil { // retjmp
+ p.As = AJMP
+ }
+ }
+}
+
+// Append code to p to load g into cx.
+// Overwrites p with the first instruction (no first appendp).
+// Overwriting p is unusual but it lets use this in both the
+// prologue (caller must call appendp first) and in the epilogue.
+// Returns last new instruction.
+func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
+
+ var next *obj.Prog
+
+ p.As = AMOVL
+ p.From.Type_ = D_INDIR + D_TLS
+ p.From.Offset = 0
+ p.To.Type_ = D_CX
+
+ next = p.Link
+ progedit(ctxt, p)
+ for p.Link != next {
+ p = p.Link
+ }
+
+ if p.From.Index == D_TLS {
+ p.From.Scale = 2
+ }
+
+ return p
+}
+
+// Append code to p to check for stack split.
+// Appends to (does not overwrite) p.
+// Assumes g is in CX.
+// Returns last new instruction.
+// On return, *jmpok is the instruction that should jump
+// to the stack frame allocation if no split is needed.
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int, jmpok **obj.Prog) *obj.Prog {
+
+ var q *obj.Prog
+ var q1 *obj.Prog
+
+ if ctxt.Debugstack != 0 {
+ // 8l -K means check not only for stack
+ // overflow but stack underflow.
+ // On underflow, INT 3 (breakpoint).
+ // Underflow itself is rare but this also
+ // catches out-of-sync stack guard info.
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ACMPL
+ p.From.Type_ = D_INDIR + D_CX
+ p.From.Offset = 4
+ p.To.Type_ = D_SP
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJCC
+ p.To.Type_ = D_BRANCH
+ p.To.Offset = 4
+ q1 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AINT
+ p.From.Type_ = D_CONST
+ p.From.Offset = 3
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ANOP
+ q1.Pcond = p
+ }
+
+ q1 = nil
+
+ if framesize <= obj.StackSmall {
+ // small stack: SP <= stackguard
+ // CMPL SP, stackguard
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ACMPL
+ p.From.Type_ = D_SP
+ p.To.Type_ = D_INDIR + D_CX
+ p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ } else if framesize <= obj.StackBig {
+ // large stack: SP-framesize <= stackguard-StackSmall
+ // LEAL -(framesize-StackSmall)(SP), AX
+ // CMPL AX, stackguard
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ALEAL
+ p.From.Type_ = D_INDIR + D_SP
+ p.From.Offset = -(int64(framesize) - obj.StackSmall)
+ p.To.Type_ = D_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPL
+ p.From.Type_ = D_AX
+ p.To.Type_ = D_INDIR + D_CX
+ p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ } else {
+
+ // Such a large stack we need to protect against wraparound
+ // if SP is close to zero.
+ // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
+ // The +StackGuard on both sides is required to keep the left side positive:
+ // SP is allowed to be slightly below stackguard. See stack.h.
+ //
+ // Preemption sets stackguard to StackPreempt, a very large value.
+ // That breaks the math above, so we have to check for that explicitly.
+ // MOVL stackguard, CX
+ // CMPL CX, $StackPreempt
+ // JEQ label-of-call-to-morestack
+ // LEAL StackGuard(SP), AX
+ // SUBL stackguard, AX
+ // CMPL AX, $(framesize+(StackGuard-StackSmall))
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVL
+ p.From.Type_ = D_INDIR + D_CX
+ p.From.Offset = 0
+ p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ p.To.Type_ = D_SI
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPL
+ p.From.Type_ = D_SI
+ p.To.Type_ = D_CONST
+ p.To.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJEQ
+ p.To.Type_ = D_BRANCH
+ q1 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ALEAL
+ p.From.Type_ = D_INDIR + D_SP
+ p.From.Offset = obj.StackGuard
+ p.To.Type_ = D_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ASUBL
+ p.From.Type_ = D_SI
+ p.From.Offset = 0
+ p.To.Type_ = D_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPL
+ p.From.Type_ = D_AX
+ p.To.Type_ = D_CONST
+ p.To.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
+ }
+
+ // common
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AJHI
+ p.To.Type_ = D_BRANCH
+ p.To.Offset = 4
+ q = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACALL
+ p.To.Type_ = D_BRANCH
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
+ } else {
+
+ p.To.Sym = ctxt.Symmorestack[noctxt]
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJMP
+ p.To.Type_ = D_BRANCH
+ p.Pcond = ctxt.Cursym.Text.Link
+
+ if q != nil {
+ q.Pcond = p.Link
+ }
+ if q1 != nil {
+ q1.Pcond = q.Link
+ }
+
+ *jmpok = q
+ return p
+}
+
+func follow(ctxt *obj.Link, s *obj.LSym) {
+ var firstp *obj.Prog
+ var lastp *obj.Prog
+
+ ctxt.Cursym = s
+
+ firstp = ctxt.Arch.Prg()
+ lastp = firstp
+ xfol(ctxt, s.Text, &lastp)
+ lastp.Link = nil
+ s.Text = firstp.Link
+}
+
+func nofollow(a int) int {
+ switch a {
+ case AJMP,
+ ARET,
+ AIRETL,
+ AIRETW,
+ AUNDEF:
+ return 1
+ }
+
+ return 0
+}
+
+func pushpop(a int) int {
+ switch a {
+ case APUSHL,
+ APUSHFL,
+ APUSHW,
+ APUSHFW,
+ APOPL,
+ APOPFL,
+ APOPW,
+ APOPFW:
+ return 1
+ }
+
+ return 0
+}
+
+func relinv(a int) int {
+ switch a {
+ case AJEQ:
+ return AJNE
+ case AJNE:
+ return AJEQ
+ case AJLE:
+ return AJGT
+ case AJLS:
+ return AJHI
+ case AJLT:
+ return AJGE
+ case AJMI:
+ return AJPL
+ case AJGE:
+ return AJLT
+ case AJPL:
+ return AJMI
+ case AJGT:
+ return AJLE
+ case AJHI:
+ return AJLS
+ case AJCS:
+ return AJCC
+ case AJCC:
+ return AJCS
+ case AJPS:
+ return AJPC
+ case AJPC:
+ return AJPS
+ case AJOS:
+ return AJOC
+ case AJOC:
+ return AJOS
+ }
+
+ log.Fatalf("unknown relation: %s", anames8[a])
+ return 0
+}
+
+func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
+ var q *obj.Prog
+ var i int
+ var a int
+
+loop:
+ if p == nil {
+ return
+ }
+ if p.As == AJMP {
+ q = p.Pcond
+ if q != nil && q.As != ATEXT {
+ /* mark instruction as done and continue layout at target of jump */
+ p.Mark = 1
+
+ p = q
+ if p.Mark == 0 {
+ goto loop
+ }
+ }
+ }
+
+ if p.Mark != 0 {
+ /*
+ * p goes here, but already used it elsewhere.
+ * copy up to 4 instructions or else branch to other copy.
+ */
+ i = 0
+ q = p
+ for ; i < 4; (func() { i++; q = q.Link })() {
+
+ if q == nil {
+ break
+ }
+ if q == *last {
+ break
+ }
+ a = int(q.As)
+ if a == ANOP {
+ i--
+ continue
+ }
+
+ if nofollow(a) != 0 || pushpop(a) != 0 {
+ break // NOTE(rsc): arm does goto copy
+ }
+ if q.Pcond == nil || q.Pcond.Mark != 0 {
+ continue
+ }
+ if a == ACALL || a == ALOOP {
+ continue
+ }
+ for {
+ if p.As == ANOP {
+ p = p.Link
+ continue
+ }
+
+ q = obj.Copyp(ctxt, p)
+ p = p.Link
+ q.Mark = 1
+ (*last).Link = q
+ *last = q
+ if int(q.As) != a || q.Pcond == nil || q.Pcond.Mark != 0 {
+ continue
+ }
+
+ q.As = int16(relinv(int(q.As)))
+ p = q.Pcond
+ q.Pcond = q.Link
+ q.Link = p
+ xfol(ctxt, q.Link, last)
+ p = q.Link
+ if p.Mark != 0 {
+ return
+ }
+ goto loop
+ /* */
+ }
+ }
+ q = ctxt.Arch.Prg()
+ q.As = AJMP
+ q.Lineno = p.Lineno
+ q.To.Type_ = D_BRANCH
+ q.To.Offset = p.Pc
+ q.Pcond = p
+ p = q
+ }
+
+ /* emit p */
+ p.Mark = 1
+
+ (*last).Link = p
+ *last = p
+ a = int(p.As)
+
+ /* continue loop with what comes after p */
+ if nofollow(a) != 0 {
+
+ return
+ }
+ if p.Pcond != nil && a != ACALL {
+ /*
+ * some kind of conditional branch.
+ * recurse to follow one path.
+ * continue loop on the other.
+ */
+ q = obj.Brchain(ctxt, p.Pcond)
+ if q != nil {
+
+ p.Pcond = q
+ }
+ q = obj.Brchain(ctxt, p.Link)
+ if q != nil {
+ p.Link = q
+ }
+ if p.From.Type_ == D_CONST {
+ if p.From.Offset == 1 {
+ /*
+ * expect conditional jump to be taken.
+ * rewrite so that's the fall-through case.
+ */
+ p.As = int16(relinv(a))
+
+ q = p.Link
+ p.Link = p.Pcond
+ p.Pcond = q
+ }
+ } else {
+
+ q = p.Link
+ if q.Mark != 0 {
+ if a != ALOOP {
+ p.As = int16(relinv(a))
+ p.Link = p.Pcond
+ p.Pcond = q
+ }
+ }
+ }
+
+ xfol(ctxt, p.Link, last)
+ if p.Pcond.Mark != 0 {
+ return
+ }
+ p = p.Pcond
+ goto loop
+ }
+
+ p = p.Link
+ goto loop
+}
+
+var Link386 = obj.LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Pconv: Pconv,
+ Name: "386",
+ Thechar: '8',
+ Endian: obj.LittleEndian,
+ Addstacksplit: addstacksplit,
+ Assemble: span8,
+ Datasize: datasize,
+ Follow: follow,
+ Iscall: iscall,
+ Isdata: isdata,
+ Prg: prg,
+ Progedit: progedit,
+ Settextflag: settextflag,
+ Symtype: symtype,
+ Textflag: textflag,
+ Minlc: 1,
+ Ptrsize: 4,
+ Regsize: 4,
+ D_ADDR: D_ADDR,
+ D_AUTO: D_AUTO,
+ D_BRANCH: D_BRANCH,
+ D_CONST: D_CONST,
+ D_EXTERN: D_EXTERN,
+ D_FCONST: D_FCONST,
+ D_NONE: D_NONE,
+ D_PARAM: D_PARAM,
+ D_SCONST: D_SCONST,
+ D_STATIC: D_STATIC,
+ ACALL: ACALL,
+ ADATA: ADATA,
+ AEND: AEND,
+ AFUNCDATA: AFUNCDATA,
+ AGLOBL: AGLOBL,
+ AJMP: AJMP,
+ ANOP: ANOP,
+ APCDATA: APCDATA,
+ ARET: ARET,
+ ATEXT: ATEXT,
+ ATYPE: ATYPE,
+ AUSEFIELD: AUSEFIELD,
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package i386
+
+const (
+ fmtLong = 1 << iota
+)
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
--- /dev/null
+// Derived from Inferno utils/6l/obj.c and utils/6l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/obj.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "strings"
+)
+
+func addlib(ctxt *Link, src, obj, pathname string) {
+ name := path.Clean(pathname)
+
+ // runtime.a -> runtime
+ short := strings.TrimSuffix(name, ".a")
+
+ // already loaded?
+ for i := range ctxt.Library {
+ if ctxt.Library[i].Pkg == short {
+ return
+ }
+ }
+
+ var pname string
+ // runtime -> runtime.a for search
+ if (!(ctxt.Windows != 0) && name[0] == '/') || (ctxt.Windows != 0 && name[1] == ':') {
+ pname = name
+ } else {
+ // try dot, -L "libdir", and then goroot.
+ for _, dir := range ctxt.Libdir {
+ pname = dir + "/" + name
+ if _, err := os.Stat(pname); !os.IsNotExist(err) {
+ break
+ }
+ }
+ }
+ pname = path.Clean(pname)
+
+ // runtime.a -> runtime
+ pname = strings.TrimSuffix(pname, ".a")
+
+ if ctxt.Debugvlog > 1 && ctxt.Bso != nil {
+ fmt.Fprintf(ctxt.Bso, "%5.2f addlib: %s %s pulls in %s\n", Cputime(), obj, src, pname)
+ }
+ addlibpath(ctxt, src, obj, pname, name)
+}
+
+/*
+ * add library to library list.
+ * srcref: src file referring to package
+ * objref: object file referring to package
+ * file: object file, e.g., /home/rsc/go/pkg/container/vector.a
+ * pkg: package import path, e.g. container/vector
+ */
+func addlibpath(ctxt *Link, srcref, objref, file, pkg string) {
+ for _, lib := range ctxt.Library {
+ if lib.File == file {
+ return
+ }
+ }
+
+ if ctxt.Debugvlog > 1 && ctxt.Bso != nil {
+ fmt.Fprintf(ctxt.Bso, "%5.2f addlibpath: srcref: %s objref: %s file: %s pkg: %s\n", Cputime(), srcref, objref, file, pkg)
+ }
+
+ ctxt.Library = append(ctxt.Library, Library{
+ Objref: objref,
+ Srcref: srcref,
+ File: file,
+ Pkg: pkg,
+ })
+}
+
+const (
+ LOG = 5
+)
+
+func mkfwd(sym *LSym) {
+ var p *Prog
+ var i int
+ var dwn [LOG]int32
+ var cnt [LOG]int32
+ var lst [LOG]*Prog
+
+ for i = 0; i < LOG; i++ {
+ if i == 0 {
+ cnt[i] = 1
+ } else {
+
+ cnt[i] = LOG * cnt[i-1]
+ }
+ dwn[i] = 1
+ lst[i] = nil
+ }
+
+ i = 0
+ for p = sym.Text; p != nil && p.Link != nil; p = p.Link {
+ i--
+ if i < 0 {
+ i = LOG - 1
+ }
+ p.Forwd = nil
+ dwn[i]--
+ if dwn[i] <= 0 {
+ dwn[i] = cnt[i]
+ if lst[i] != nil {
+ lst[i].Forwd = p
+ }
+ lst[i] = p
+ }
+ }
+}
+
+func Copyp(ctxt *Link, q *Prog) *Prog {
+ var p *Prog
+
+ p = ctxt.Arch.Prg()
+ *p = *q
+ return p
+}
+
+func Appendp(ctxt *Link, q *Prog) *Prog {
+ var p *Prog
+
+ p = ctxt.Arch.Prg()
+ p.Link = q.Link
+ q.Link = p
+ p.Lineno = q.Lineno
+ p.Mode = q.Mode
+ return p
+}
--- /dev/null
+// Derived from Inferno utils/6l/l.h and related files.
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/l.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+import "encoding/binary"
+
+type Addr struct {
+ Offset int64
+ U struct {
+ Sval string
+ Dval float64
+ Branch *Prog
+ }
+ Sym *LSym
+ Gotype *LSym
+ Type_ int16
+ Index uint8
+ Scale int8
+ Reg int8
+ Name int8
+ Class int8
+ Etype uint8
+ Offset2 int32
+ Node *struct{}
+ Width int64
+}
+
+type Prog struct {
+ Ctxt *Link
+ Pc int64
+ Lineno int32
+ Link *Prog
+ As int16
+ Scond uint8
+ From Addr
+ Reg uint8
+ From3 Addr
+ To Addr
+ Opt interface{}
+ Forwd *Prog
+ Pcond *Prog
+ Comefrom *Prog
+ Pcrel *Prog
+ Spadj int32
+ Mark uint16
+ Optab uint16
+ Back uint8
+ Ft uint8
+ Tt uint8
+ Isize uint8
+ Printed uint8
+ Width int8
+ Mode int8
+ TEXTFLAG uint8
+}
+
+type LSym struct {
+ Name string
+ Extname string
+ Type_ int16
+ Version int16
+ Dupok uint8
+ Cfunc uint8
+ External uint8
+ Nosplit uint8
+ Reachable uint8
+ Cgoexport uint8
+ Special uint8
+ Stkcheck uint8
+ Hide uint8
+ Leaf uint8
+ Fnptr uint8
+ Localentry uint8
+ Seenglobl uint8
+ Onlist uint8
+ Printed uint8
+ Symid int16
+ Dynid int32
+ Sig int32
+ Plt int32
+ Got int32
+ Align int32
+ Elfsym int32
+ Args int32
+ Locals int32
+ Value int64
+ Size int64
+ Hash *LSym
+ Allsym *LSym
+ Next *LSym
+ Sub *LSym
+ Outer *LSym
+ Gotype *LSym
+ Reachparent *LSym
+ Queue *LSym
+ File string
+ Dynimplib string
+ Dynimpvers string
+ Sect *struct{}
+ Autom *Auto
+ Text *Prog
+ Etext *Prog
+ Pcln *Pcln
+ P []byte
+ R []Reloc
+}
+
+type Reloc struct {
+ Off int32
+ Siz uint8
+ Done uint8
+ Type_ int32
+ Variant int32
+ Add int64
+ Xadd int64
+ Sym *LSym
+ Xsym *LSym
+}
+
+type Auto struct {
+ Asym *LSym
+ Link *Auto
+ Aoffset int32
+ Type_ int16
+ Gotype *LSym
+}
+
+type Hist struct {
+ Link *Hist
+ Name string
+ Line int32
+ Offset int32
+ Printed uint8
+}
+
+type Link struct {
+ Thechar int32
+ Thestring string
+ Goarm int32
+ Headtype int
+ Arch *LinkArch
+ Ignore func(string) int32
+ Debugasm int32
+ Debugline int32
+ Debughist int32
+ Debugread int32
+ Debugvlog int32
+ Debugstack int32
+ Debugzerostack int32
+ Debugdivmod int32
+ Debugfloat int32
+ Debugpcln int32
+ Flag_shared int32
+ Iself int32
+ Bso *Biobuf
+ Pathname string
+ Windows int32
+ Trimpath string
+ Goroot string
+ Goroot_final string
+ Enforce_data_order int32
+ Hash [LINKHASH]*LSym
+ Allsym *LSym
+ Nsymbol int32
+ Hist *Hist
+ Ehist *Hist
+ Plist *Plist
+ Plast *Plist
+ Sym_div *LSym
+ Sym_divu *LSym
+ Sym_mod *LSym
+ Sym_modu *LSym
+ Symmorestack [2]*LSym
+ Tlsg *LSym
+ Plan9privates *LSym
+ Curp *Prog
+ Printp *Prog
+ Blitrl *Prog
+ Elitrl *Prog
+ Rexflag int
+ Rep int
+ Repn int
+ Lock int
+ Asmode int
+ Andptr []byte
+ And [100]uint8
+ Instoffset int64
+ Autosize int32
+ Armsize int32
+ Pc int64
+ Libdir []string
+ Library []Library
+ Tlsoffset int
+ Diag func(string, ...interface{})
+ Mode int
+ Curauto *Auto
+ Curhist *Auto
+ Cursym *LSym
+ Version int
+ Textp *LSym
+ Etextp *LSym
+ Histdepth int32
+ Nhistfile int32
+ Filesyms *LSym
+}
+
+type Plist struct {
+ Name *LSym
+ Firstpc *Prog
+ Recur int
+ Link *Plist
+}
+
+type LinkArch struct {
+ Pconv func(*Prog) string
+ Name string
+ Thechar int
+ Endian int32
+ ByteOrder binary.ByteOrder
+ Addstacksplit func(*Link, *LSym)
+ Assemble func(*Link, *LSym)
+ Datasize func(*Prog) int
+ Follow func(*Link, *LSym)
+ Iscall func(*Prog) bool
+ Isdata func(*Prog) bool
+ Prg func() *Prog
+ Progedit func(*Link, *Prog)
+ Settextflag func(*Prog, int)
+ Symtype func(*Addr) int
+ Textflag func(*Prog) int
+ Minlc int
+ Ptrsize int
+ Regsize int
+ D_ADDR int
+ D_AUTO int
+ D_BRANCH int
+ D_CONST int
+ D_EXTERN int
+ D_FCONST int
+ D_NONE int
+ D_PARAM int
+ D_SCONST int
+ D_STATIC int
+ D_OREG int
+ ACALL int
+ ADATA int
+ AEND int
+ AFUNCDATA int
+ AGLOBL int
+ AJMP int
+ ANOP int
+ APCDATA int
+ ARET int
+ ATEXT int
+ ATYPE int
+ AUSEFIELD int
+}
+
+type Library struct {
+ Objref string
+ Srcref string
+ File string
+ Pkg string
+}
+
+type Pcln struct {
+ Pcsp Pcdata
+ Pcfile Pcdata
+ Pcline Pcdata
+ Pcdata []Pcdata
+ Funcdata []*LSym
+ Funcdataoff []int64
+ File []*LSym
+ Lastfile *LSym
+ Lastindex int
+}
+
+type Pcdata struct {
+ P []byte
+}
+
+type Pciter struct {
+ d Pcdata
+ p []byte
+ pc uint32
+ nextpc uint32
+ pcscale uint32
+ value int32
+ start int
+ done int
+}
+
+// prevent incompatible type signatures between liblink and 8l on Plan 9
+
+// prevent incompatible type signatures between liblink and 8l on Plan 9
+
+// LSym.type
+const (
+ Sxxx = iota
+ STEXT
+ SELFRXSECT
+ STYPE
+ SSTRING
+ SGOSTRING
+ SGOFUNC
+ SRODATA
+ SFUNCTAB
+ STYPELINK
+ SSYMTAB
+ SPCLNTAB
+ SELFROSECT
+ SMACHOPLT
+ SELFSECT
+ SMACHO
+ SMACHOGOT
+ SWINDOWS
+ SELFGOT
+ SNOPTRDATA
+ SINITARR
+ SDATA
+ SBSS
+ SNOPTRBSS
+ STLSBSS
+ SXREF
+ SMACHOSYMSTR
+ SMACHOSYMTAB
+ SMACHOINDIRECTPLT
+ SMACHOINDIRECTGOT
+ SFILE
+ SFILEPATH
+ SCONST
+ SDYNIMPORT
+ SHOSTOBJ
+ SSUB = 1 << 8
+ SMASK = SSUB - 1
+ SHIDDEN = 1 << 9
+)
+
+// Reloc.type
+const (
+ R_ADDR = 1 + iota
+ R_ADDRPOWER
+ R_SIZE
+ R_CALL
+ R_CALLARM
+ R_CALLIND
+ R_CALLPOWER
+ R_CONST
+ R_PCREL
+ R_TLS
+ R_TLS_LE
+ R_TLS_IE
+ R_GOTOFF
+ R_PLT0
+ R_PLT1
+ R_PLT2
+ R_USEFIELD
+ R_POWER_TOC
+)
+
+// Reloc.variant
+const (
+ RV_NONE = iota
+ RV_POWER_LO
+ RV_POWER_HI
+ RV_POWER_HA
+ RV_POWER_DS
+ RV_CHECK_OVERFLOW = 1 << 8
+ RV_TYPE_MASK = RV_CHECK_OVERFLOW - 1
+)
+
+// Auto.type
+const (
+ A_AUTO = 1 + iota
+ A_PARAM
+)
+
+const (
+ LINKHASH = 100003
+)
+
+// Pcdata iterator.
+// for(pciterinit(ctxt, &it, &pcd); !it.done; pciternext(&it)) { it.value holds in [it.pc, it.nextpc) }
+
+// symbol version, incremented each time a file is loaded.
+// version==1 is reserved for savehist.
+const (
+ HistVersion = 1
+)
+
+// Link holds the context for writing object code from a compiler
+// to be linker input or for reading that input into the linker.
+
+const (
+ LittleEndian = 0x04030201
+ BigEndian = 0x01020304
+)
+
+// LinkArch is the definition of a single architecture.
+
+/* executable header types */
+const (
+ Hunknown = 0 + iota
+ Hdarwin
+ Hdragonfly
+ Helf
+ Hfreebsd
+ Hlinux
+ Hnacl
+ Hnetbsd
+ Hopenbsd
+ Hplan9
+ Hsolaris
+ Hwindows
+)
+
+const (
+ LinkAuto = 0 + iota
+ LinkInternal
+ LinkExternal
+)
+
+// asm5.c
+
+// asm6.c
+
+// asm8.c
+
+// asm9.c
+
+// data.c
+
+// go.c
+
+// ld.c
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+const (
+ HISTSZ = 10
+ NSYM = 50
+)
+
+func linklinefmt(ctxt *Link, lno0 int, showAll, showFullPath bool) string {
+ var a [HISTSZ]struct {
+ incl *Hist
+ idel int32
+ line *Hist
+ ldel int32
+ }
+ lno := int32(lno0)
+ lno1 := lno
+ var d int32
+ var i int
+ var n int
+ var h *Hist
+ n = 0
+ var fp string
+ for h = ctxt.Hist; h != nil; h = h.Link {
+ if h.Offset < 0 {
+ continue
+ }
+ if lno < h.Line {
+ break
+ }
+ if h.Name != "<no name>" {
+ if h.Offset > 0 {
+ // #line directive
+ if n > 0 && n < int(HISTSZ) {
+ a[n-1].line = h
+ a[n-1].ldel = h.Line - h.Offset + 1
+ }
+ } else {
+ // beginning of file
+ if n < int(HISTSZ) {
+ a[n].incl = h
+ a[n].idel = h.Line
+ a[n].line = nil
+ }
+ n++
+ }
+ continue
+ }
+ n--
+ if n > 0 && n < int(HISTSZ) {
+ d = h.Line - a[n].incl.Line
+ a[n-1].ldel += d
+ a[n-1].idel += d
+ }
+ }
+ if n > int(HISTSZ) {
+ n = int(HISTSZ)
+ }
+ for i = n - 1; i >= 0; i-- {
+ if i != n-1 {
+ if !showAll {
+ break
+ }
+ fp += " "
+ }
+ if ctxt.Debugline != 0 || showFullPath {
+ fp += fmt.Sprintf("%s/", ctxt.Pathname)
+ }
+ if a[i].line != nil {
+ fp += fmt.Sprintf("%s:%d[%s:%d]", a[i].line.Name, lno-a[i].ldel+1, a[i].incl.Name, lno-a[i].idel+1)
+ } else {
+ fp += fmt.Sprintf("%s:%d", a[i].incl.Name, lno-a[i].idel+1)
+ }
+ lno = a[i].incl.Line - 1 // now print out start of this file
+ }
+ if n == 0 {
+ fp += fmt.Sprintf("<unknown line number %d %d %d %s>", lno1, ctxt.Hist.Offset, ctxt.Hist.Line, ctxt.Hist.Name)
+ }
+ return fp
+}
+
+// Does s have t as a path prefix?
+// That is, does s == t or does s begin with t followed by a slash?
+// For portability, we allow ASCII case folding, so that haspathprefix("a/b/c", "A/B") is true.
+// Similarly, we allow slash folding, so that haspathprefix("a/b/c", "a\\b") is true.
+func haspathprefix(s string, t string) bool {
+ var i int
+ var cs int
+ var ct int
+ if len(t) > len(s) {
+ return false
+ }
+ for i = 0; i < len(t); i++ {
+ cs = int(s[i])
+ ct = int(t[i])
+ if 'A' <= cs && cs <= 'Z' {
+ cs += 'a' - 'A'
+ }
+ if 'A' <= ct && ct <= 'Z' {
+ ct += 'a' - 'A'
+ }
+ if cs == '\\' {
+ cs = '/'
+ }
+ if ct == '\\' {
+ ct = '/'
+ }
+ if cs != ct {
+ return false
+ }
+ }
+ return i >= len(s) || s[i] == '/' || s[i] == '\\'
+}
+
+// This is a simplified copy of linklinefmt above.
+// It doesn't allow printing the full stack, and it returns the file name and line number separately.
+// TODO: Unify with linklinefmt somehow.
+func linkgetline(ctxt *Link, line int32, f **LSym, l *int32) {
+ var a [HISTSZ]struct {
+ incl *Hist
+ idel int32
+ line *Hist
+ ldel int32
+ }
+ var lno int32
+ var d int32
+ var dlno int32
+ var n int
+ var h *Hist
+ var buf string
+ var buf1 string
+ var file string
+ lno = int32(line)
+ n = 0
+ for h = ctxt.Hist; h != nil; h = h.Link {
+ if h.Offset < 0 {
+ continue
+ }
+ if lno < h.Line {
+ break
+ }
+ if h.Name != "<no name>" {
+ if h.Offset > 0 {
+ // #line directive
+ if n > 0 && n < HISTSZ {
+ a[n-1].line = h
+ a[n-1].ldel = h.Line - h.Offset + 1
+ }
+ } else {
+ // beginning of file
+ if n < HISTSZ {
+ a[n].incl = h
+ a[n].idel = h.Line
+ a[n].line = nil
+ }
+ n++
+ }
+ continue
+ }
+ n--
+ if n > 0 && n < HISTSZ {
+ d = h.Line - a[n].incl.Line
+ a[n-1].ldel += d
+ a[n-1].idel += d
+ }
+ }
+ if n > HISTSZ {
+ n = HISTSZ
+ }
+ if n <= 0 {
+ *f = Linklookup(ctxt, "??", HistVersion)
+ *l = 0
+ return
+ }
+ n--
+ if a[n].line != nil {
+ file = a[n].line.Name
+ dlno = a[n].ldel - 1
+ } else {
+ file = a[n].incl.Name
+ dlno = a[n].idel - 1
+ }
+ if filepath.IsAbs(file) || strings.HasPrefix(file, "<") {
+ buf = fmt.Sprintf("%s", file)
+ } else {
+ buf = fmt.Sprintf("%s/%s", ctxt.Pathname, file)
+ }
+ // Remove leading ctxt->trimpath, or else rewrite $GOROOT to $GOROOT_FINAL.
+ if ctxt.Trimpath != "" && haspathprefix(buf, ctxt.Trimpath) {
+ if len(buf) == len(ctxt.Trimpath) {
+ buf = "??"
+ } else {
+ buf1 = fmt.Sprintf("%s", buf[len(ctxt.Trimpath)+1:])
+ if buf1[0] == '\x00' {
+ buf1 = "??"
+ }
+ buf = buf1
+ }
+ } else if ctxt.Goroot_final != "" && haspathprefix(buf, ctxt.Goroot) {
+ buf1 = fmt.Sprintf("%s%s", ctxt.Goroot_final, buf[len(ctxt.Goroot):])
+ buf = buf1
+ }
+ lno -= dlno
+ *f = Linklookup(ctxt, buf, HistVersion)
+ *l = lno
+}
+
+func linklinehist(ctxt *Link, lineno int, f string, offset int) {
+ var h *Hist
+
+ if false { // debug['f']
+ if f != "" {
+ if offset != 0 {
+ fmt.Printf("%4d: %s (#line %d)\n", lineno, f, offset)
+ } else {
+
+ fmt.Printf("%4d: %s\n", lineno, f)
+ }
+ } else {
+
+ fmt.Printf("%4d: <pop>\n", lineno)
+ }
+ }
+
+ h = new(Hist)
+ *h = Hist{}
+ h.Name = f
+ h.Line = int32(lineno)
+ h.Offset = int32(offset)
+ h.Link = nil
+ if ctxt.Ehist == nil {
+ ctxt.Hist = h
+ ctxt.Ehist = h
+ return
+ }
+
+ ctxt.Ehist.Link = h
+ ctxt.Ehist = h
+}
+
+func linkprfile(ctxt *Link, line int) {
+ l := int32(line)
+ var i int
+ var n int
+ var a [HISTSZ]Hist
+ var h *Hist
+ var d int32
+ n = 0
+ for h = ctxt.Hist; h != nil; h = h.Link {
+ if l < h.Line {
+ break
+ }
+ if h.Name != "<no name>" {
+ if h.Offset == 0 {
+ if n >= 0 && n < HISTSZ {
+ a[n] = *h
+ }
+ n++
+ continue
+ }
+ if n > 0 && n < HISTSZ {
+ if a[n-1].Offset == 0 {
+ a[n] = *h
+ n++
+ } else {
+ a[n-1] = *h
+ }
+ }
+ continue
+ }
+ n--
+ if n >= 0 && n < HISTSZ {
+ d = h.Line - a[n].Line
+ for i = 0; i < n; i++ {
+ a[i].Line += d
+ }
+ }
+ }
+ if n > HISTSZ {
+ n = HISTSZ
+ }
+ for i = 0; i < n; i++ {
+ fmt.Printf("%s:%d ", a[i].Name, int(l-a[i].Line+a[i].Offset+1))
+ }
+}
+
+/*
+ * start a new Prog list.
+ */
+func linknewplist(ctxt *Link) *Plist {
+
+ var pl *Plist
+
+ pl = new(Plist)
+ *pl = Plist{}
+ if ctxt.Plist == nil {
+ ctxt.Plist = pl
+ } else {
+
+ ctxt.Plast.Link = pl
+ }
+ ctxt.Plast = pl
+
+ return pl
+}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+import (
+ "fmt"
+ "log"
+ "path/filepath"
+ "strings"
+)
+
+var outfile string
+
+// The Go and C compilers, and the assembler, call writeobj to write
+// out a Go object file. The linker does not call this; the linker
+// does not write out object files.
+func Writeobjdirect(ctxt *Link, b *Biobuf) {
+
+ var flag int
+ var found int
+ var h *Hist
+ var s *LSym
+ var text *LSym
+ var etext *LSym
+ var curtext *LSym
+ var data *LSym
+ var edata *LSym
+ var pl *Plist
+ var p *Prog
+ var plink *Prog
+ var a *Auto
+
+ // Build list of symbols, and assign instructions to lists.
+ // Ignore ctxt->plist boundaries. There are no guarantees there,
+ // and the C compilers and assemblers just use one big list.
+ text = nil
+
+ curtext = nil
+ data = nil
+ etext = nil
+ edata = nil
+ for pl = ctxt.Plist; pl != nil; pl = pl.Link {
+ for p = pl.Firstpc; p != nil; p = plink {
+ if ctxt.Debugasm != 0 && ctxt.Debugvlog != 0 {
+ fmt.Printf("obj: %p %v\n", p, p)
+ }
+ plink = p.Link
+ p.Link = nil
+
+ if int(p.As) == ctxt.Arch.AEND {
+ continue
+ }
+
+ if int(p.As) == ctxt.Arch.ATYPE {
+ // Assume each TYPE instruction describes
+ // a different local variable or parameter,
+ // so no dedup.
+ // Using only the TYPE instructions means
+ // that we discard location information about local variables
+ // in C and assembly functions; that information is inferred
+ // from ordinary references, because there are no TYPE
+ // instructions there. Without the type information, gdb can't
+ // use the locations, so we don't bother to save them.
+ // If something else could use them, we could arrange to
+ // preserve them.
+ if curtext == nil {
+
+ continue
+ }
+ a = new(Auto)
+ a.Asym = p.From.Sym
+ a.Aoffset = int32(p.From.Offset)
+ a.Type_ = int16(ctxt.Arch.Symtype(&p.From))
+ a.Gotype = p.From.Gotype
+ a.Link = curtext.Autom
+ curtext.Autom = a
+ continue
+ }
+
+ if int(p.As) == ctxt.Arch.AGLOBL {
+ s = p.From.Sym
+ tmp6 := s.Seenglobl
+ s.Seenglobl++
+ if tmp6 != 0 {
+ fmt.Printf("duplicate %v\n", p)
+ }
+ if s.Onlist != 0 {
+ log.Fatalf("symbol %s listed multiple times", s.Name)
+ }
+ s.Onlist = 1
+ if data == nil {
+ data = s
+ } else {
+
+ edata.Next = s
+ }
+ s.Next = nil
+ s.Size = p.To.Offset
+ if s.Type_ == 0 || s.Type_ == SXREF {
+ s.Type_ = SBSS
+ }
+ flag = ctxt.Arch.Textflag(p)
+ if flag&DUPOK != 0 {
+ s.Dupok = 1
+ }
+ if flag&RODATA != 0 {
+ s.Type_ = SRODATA
+ } else if flag&NOPTR != 0 {
+ s.Type_ = SNOPTRBSS
+ }
+ edata = s
+ continue
+ }
+
+ if int(p.As) == ctxt.Arch.ADATA {
+ savedata(ctxt, p.From.Sym, p, "<input>")
+ continue
+ }
+
+ if int(p.As) == ctxt.Arch.ATEXT {
+ s = p.From.Sym
+ if s == nil {
+ // func _() { }
+ curtext = nil
+
+ continue
+ }
+
+ if s.Text != nil {
+ log.Fatalf("duplicate TEXT for %s", s.Name)
+ }
+ if s.Onlist != 0 {
+ log.Fatalf("symbol %s listed multiple times", s.Name)
+ }
+ s.Onlist = 1
+ if text == nil {
+ text = s
+ } else {
+
+ etext.Next = s
+ }
+ etext = s
+ flag = ctxt.Arch.Textflag(p)
+ if flag&DUPOK != 0 {
+ s.Dupok = 1
+ }
+ if flag&NOSPLIT != 0 {
+ s.Nosplit = 1
+ }
+ s.Next = nil
+ s.Type_ = STEXT
+ s.Text = p
+ s.Etext = p
+ curtext = s
+ continue
+ }
+
+ if int(p.As) == ctxt.Arch.AFUNCDATA {
+ // Rewrite reference to go_args_stackmap(SB) to the Go-provided declaration information.
+ if curtext == nil { // func _() {}
+ continue
+ }
+ if p.To.Sym.Name == "go_args_stackmap" {
+ if int(p.From.Type_) != ctxt.Arch.D_CONST || p.From.Offset != FUNCDATA_ArgsPointerMaps {
+ ctxt.Diag("FUNCDATA use of go_args_stackmap(SB) without FUNCDATA_ArgsPointerMaps")
+ }
+ p.To.Sym = Linklookup(ctxt, string(fmt.Sprintf("%s.args_stackmap", curtext.Name)), int(curtext.Version))
+ }
+ }
+
+ if curtext == nil {
+ continue
+ }
+ s = curtext
+ s.Etext.Link = p
+ s.Etext = p
+ }
+ }
+
+ // Add reference to Go arguments for C or assembly functions without them.
+ for s = text; s != nil; s = s.Next {
+
+ if !strings.HasPrefix(s.Name, "\"\".") {
+ continue
+ }
+ found = 0
+ for p = s.Text; p != nil; p = p.Link {
+ if int(p.As) == ctxt.Arch.AFUNCDATA && int(p.From.Type_) == ctxt.Arch.D_CONST && p.From.Offset == FUNCDATA_ArgsPointerMaps {
+ found = 1
+ break
+ }
+ }
+
+ if !(found != 0) {
+ p = Appendp(ctxt, s.Text)
+ p.As = int16(ctxt.Arch.AFUNCDATA)
+ p.From.Type_ = int16(ctxt.Arch.D_CONST)
+ p.From.Offset = FUNCDATA_ArgsPointerMaps
+ if ctxt.Arch.Thechar == '6' || ctxt.Arch.Thechar == '8' {
+ p.To.Type_ = int16(ctxt.Arch.D_EXTERN)
+ } else {
+
+ p.To.Type_ = int16(ctxt.Arch.D_OREG)
+ p.To.Name = int8(ctxt.Arch.D_EXTERN)
+ }
+
+ p.To.Sym = Linklookup(ctxt, string(fmt.Sprintf("%s.args_stackmap", s.Name)), int(s.Version))
+ }
+ }
+
+ // Turn functions into machine code images.
+ for s = text; s != nil; s = s.Next {
+
+ mkfwd(s)
+ linkpatch(ctxt, s)
+ ctxt.Arch.Follow(ctxt, s)
+ ctxt.Arch.Addstacksplit(ctxt, s)
+ ctxt.Arch.Assemble(ctxt, s)
+ linkpcln(ctxt, s)
+ }
+
+ // Emit header.
+ Bputc(b, 0)
+
+ Bputc(b, 0)
+ fmt.Fprintf(b, "go13ld")
+ Bputc(b, 1) // version
+
+ // Emit autolib.
+ for h = ctxt.Hist; h != nil; h = h.Link {
+
+ if h.Offset < 0 {
+ wrstring(b, h.Name)
+ }
+ }
+ wrstring(b, "")
+
+ // Emit symbols.
+ for s = text; s != nil; s = s.Next {
+
+ writesym(ctxt, b, s)
+ }
+ for s = data; s != nil; s = s.Next {
+ writesym(ctxt, b, s)
+ }
+
+ // Emit footer.
+ Bputc(b, 0xff)
+
+ Bputc(b, 0xff)
+ fmt.Fprintf(b, "go13ld")
+}
+
+func writesym(ctxt *Link, b *Biobuf, s *LSym) {
+ var r *Reloc
+ var i int
+ var j int
+ var c int
+ var n int
+ var pc *Pcln
+ var p *Prog
+ var a *Auto
+ var name string
+
+ if ctxt.Debugasm != 0 {
+ fmt.Fprintf(ctxt.Bso, "%s ", s.Name)
+ if s.Version != 0 {
+ fmt.Fprintf(ctxt.Bso, "v=%d ", s.Version)
+ }
+ if s.Type_ != 0 {
+ fmt.Fprintf(ctxt.Bso, "t=%d ", s.Type_)
+ }
+ if s.Dupok != 0 {
+ fmt.Fprintf(ctxt.Bso, "dupok ")
+ }
+ if s.Cfunc != 0 {
+ fmt.Fprintf(ctxt.Bso, "cfunc ")
+ }
+ if s.Nosplit != 0 {
+ fmt.Fprintf(ctxt.Bso, "nosplit ")
+ }
+ fmt.Fprintf(ctxt.Bso, "size=%d value=%d", int64(s.Size), int64(s.Value))
+ if s.Type_ == STEXT {
+ fmt.Fprintf(ctxt.Bso, " args=%#x locals=%#x", uint64(s.Args), uint64(s.Locals))
+ if s.Leaf != 0 {
+ fmt.Fprintf(ctxt.Bso, " leaf")
+ }
+ }
+
+ fmt.Fprintf(ctxt.Bso, "\n")
+ for p = s.Text; p != nil; p = p.Link {
+ fmt.Fprintf(ctxt.Bso, "\t%#04x %v\n", uint(int(p.Pc)), p)
+ }
+ for i = 0; i < len(s.P); {
+ fmt.Fprintf(ctxt.Bso, "\t%#04x", uint(i))
+ for j = i; j < i+16 && j < len(s.P); j++ {
+ fmt.Fprintf(ctxt.Bso, " %02x", s.P[j])
+ }
+ for ; j < i+16; j++ {
+ fmt.Fprintf(ctxt.Bso, " ")
+ }
+ fmt.Fprintf(ctxt.Bso, " ")
+ for j = i; j < i+16 && j < len(s.P); j++ {
+ c = int(s.P[j])
+ if ' ' <= c && c <= 0x7e {
+ fmt.Fprintf(ctxt.Bso, "%c", c)
+ } else {
+
+ fmt.Fprintf(ctxt.Bso, ".")
+ }
+ }
+
+ fmt.Fprintf(ctxt.Bso, "\n")
+ i += 16
+ }
+
+ for i = 0; i < len(s.R); i++ {
+ r = &s.R[i]
+ name = ""
+ if r.Sym != nil {
+ name = r.Sym.Name
+ }
+ if ctxt.Arch.Thechar == '5' || ctxt.Arch.Thechar == '9' {
+ fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s+%x\n", int(r.Off), r.Siz, r.Type_, name, uint64(int64(r.Add)))
+ } else {
+
+ fmt.Fprintf(ctxt.Bso, "\trel %d+%d t=%d %s+%d\n", int(r.Off), r.Siz, r.Type_, name, int64(r.Add))
+ }
+ }
+ }
+
+ Bputc(b, 0xfe)
+ wrint(b, int64(s.Type_))
+ wrstring(b, s.Name)
+ wrint(b, int64(s.Version))
+ wrint(b, int64(s.Dupok))
+ wrint(b, s.Size)
+ wrsym(b, s.Gotype)
+ wrdata(b, s.P)
+
+ wrint(b, int64(len(s.R)))
+ for i = 0; i < len(s.R); i++ {
+ r = &s.R[i]
+ wrint(b, int64(r.Off))
+ wrint(b, int64(r.Siz))
+ wrint(b, int64(r.Type_))
+ wrint(b, r.Add)
+ wrint(b, r.Xadd)
+ wrsym(b, r.Sym)
+ wrsym(b, r.Xsym)
+ }
+
+ if s.Type_ == STEXT {
+ wrint(b, int64(s.Args))
+ wrint(b, int64(s.Locals))
+ wrint(b, int64(s.Nosplit))
+ wrint(b, int64(s.Leaf)|int64(s.Cfunc)<<1)
+ n = 0
+ for a = s.Autom; a != nil; a = a.Link {
+ n++
+ }
+ wrint(b, int64(n))
+ for a = s.Autom; a != nil; a = a.Link {
+ wrsym(b, a.Asym)
+ wrint(b, int64(a.Aoffset))
+ if int(a.Type_) == ctxt.Arch.D_AUTO {
+ wrint(b, A_AUTO)
+ } else if int(a.Type_) == ctxt.Arch.D_PARAM {
+ wrint(b, A_PARAM)
+ } else {
+
+ log.Fatalf("%s: invalid local variable type %d", s.Name, a.Type_)
+ }
+ wrsym(b, a.Gotype)
+ }
+
+ pc = s.Pcln
+ wrdata(b, pc.Pcsp.P)
+ wrdata(b, pc.Pcfile.P)
+ wrdata(b, pc.Pcline.P)
+ wrint(b, int64(len(pc.Pcdata)))
+ for i = 0; i < len(pc.Pcdata); i++ {
+ wrdata(b, pc.Pcdata[i].P)
+ }
+ wrint(b, int64(len(pc.Funcdataoff)))
+ for i = 0; i < len(pc.Funcdataoff); i++ {
+ wrsym(b, pc.Funcdata[i])
+ }
+ for i = 0; i < len(pc.Funcdataoff); i++ {
+ wrint(b, pc.Funcdataoff[i])
+ }
+ wrint(b, int64(len(pc.File)))
+ for i = 0; i < len(pc.File); i++ {
+ wrpathsym(ctxt, b, pc.File[i])
+ }
+ }
+}
+
+func wrint(b *Biobuf, sval int64) {
+ var uv uint64
+ var v uint64
+ var buf [10]uint8
+ var p []uint8
+ uv = (uint64(sval) << 1) ^ uint64(int64(sval>>63))
+ p = buf[:]
+ for v = uv; v >= 0x80; v >>= 7 {
+ p[0] = uint8(v | 0x80)
+ p = p[1:]
+ }
+ p[0] = uint8(v)
+ p = p[1:]
+ Bwrite(b, buf[:len(buf)-len(p)])
+}
+
+func wrstring(b *Biobuf, s string) {
+ wrint(b, int64(len(s)))
+ b.w.WriteString(s)
+}
+
+// wrpath writes a path just like a string, but on windows, it
+// translates '\\' to '/' in the process.
+func wrpath(ctxt *Link, b *Biobuf, p string) {
+ wrstring(b, filepath.ToSlash(p))
+}
+
+func wrdata(b *Biobuf, v []byte) {
+ wrint(b, int64(len(v)))
+ Bwrite(b, v)
+}
+
+func wrpathsym(ctxt *Link, b *Biobuf, s *LSym) {
+ if s == nil {
+ wrint(b, 0)
+ wrint(b, 0)
+ return
+ }
+
+ wrpath(ctxt, b, s.Name)
+ wrint(b, int64(s.Version))
+}
+
+func wrsym(b *Biobuf, s *LSym) {
+ if s == nil {
+ wrint(b, 0)
+ wrint(b, 0)
+ return
+ }
+
+ wrstring(b, s.Name)
+ wrint(b, int64(s.Version))
+}
+
+var startmagic string = "\x00\x00go13ld"
+
+var endmagic string = "\xff\xffgo13ld"
--- /dev/null
+// Inferno utils/6l/pass.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/pass.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+// Code and data passes.
+
+func Brchain(ctxt *Link, p *Prog) *Prog {
+
+ var i int
+
+ for i = 0; i < 20; i++ {
+ if p == nil || int(p.As) != ctxt.Arch.AJMP || p.Pcond == nil {
+ return p
+ }
+ p = p.Pcond
+ }
+
+ return nil
+}
+
+func brloop(ctxt *Link, p *Prog) *Prog {
+ var c int
+ var q *Prog
+
+ c = 0
+ for q = p; q != nil; q = q.Pcond {
+ if int(q.As) != ctxt.Arch.AJMP || q.Pcond == nil {
+ break
+ }
+ c++
+ if c >= 5000 {
+ return nil
+ }
+ }
+
+ return q
+}
+
+func linkpatch(ctxt *Link, sym *LSym) {
+ var c int32
+ var name string
+ var p *Prog
+ var q *Prog
+
+ ctxt.Cursym = sym
+
+ for p = sym.Text; p != nil; p = p.Link {
+ if ctxt.Arch.Progedit != nil {
+ ctxt.Arch.Progedit(ctxt, p)
+ }
+ if int(p.To.Type_) != ctxt.Arch.D_BRANCH {
+ continue
+ }
+ if p.To.U.Branch != nil {
+ // TODO: Remove to.u.branch in favor of p->pcond.
+ p.Pcond = p.To.U.Branch
+
+ continue
+ }
+
+ if p.To.Sym != nil {
+ continue
+ }
+ c = int32(p.To.Offset)
+ for q = sym.Text; q != nil; {
+ if int64(c) == q.Pc {
+ break
+ }
+ if q.Forwd != nil && int64(c) >= q.Forwd.Pc {
+ q = q.Forwd
+ } else {
+
+ q = q.Link
+ }
+ }
+
+ if q == nil {
+ name = "<nil>"
+ if p.To.Sym != nil {
+ name = p.To.Sym.Name
+ }
+ ctxt.Diag("branch out of range (%#x)\n%v [%s]", uint32(c), p, name)
+ p.To.Type_ = int16(ctxt.Arch.D_NONE)
+ }
+
+ p.To.U.Branch = q
+ p.Pcond = q
+ }
+
+ for p = sym.Text; p != nil; p = p.Link {
+ p.Mark = 0 /* initialization for follow */
+ if p.Pcond != nil {
+ p.Pcond = brloop(ctxt, p.Pcond)
+ if p.Pcond != nil {
+ if int(p.To.Type_) == ctxt.Arch.D_BRANCH {
+ p.To.Offset = p.Pcond.Pc
+ }
+ }
+ }
+ }
+}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+import (
+ "fmt"
+ "log"
+)
+
+func addvarint(ctxt *Link, d *Pcdata, val uint32) {
+ var v uint32
+ for v = val; v >= 0x80; v >>= 7 {
+ d.P = append(d.P, uint8(v|0x80))
+ }
+ d.P = append(d.P, uint8(v))
+}
+
+// funcpctab writes to dst a pc-value table mapping the code in func to the values
+// returned by valfunc parameterized by arg. The invocation of valfunc to update the
+// current value is, for each p,
+//
+// val = valfunc(func, val, p, 0, arg);
+// record val as value at p->pc;
+// val = valfunc(func, val, p, 1, arg);
+//
+// where func is the function, val is the current value, p is the instruction being
+// considered, and arg can be used to further parameterize valfunc.
+func funcpctab(ctxt *Link, dst *Pcdata, func_ *LSym, desc string, valfunc func(*Link, *LSym, int32, *Prog, int32, interface{}) int32, arg interface{}) {
+
+ var dbg int
+ var i int
+ var oldval int32
+ var val int32
+ var started int32
+ var delta uint32
+ var pc int64
+ var p *Prog
+
+ // To debug a specific function, uncomment second line and change name.
+ dbg = 0
+
+ //dbg = strcmp(func->name, "main.main") == 0;
+ //dbg = strcmp(desc, "pctofile") == 0;
+
+ ctxt.Debugpcln += int32(dbg)
+
+ dst.P = dst.P[:0]
+
+ if ctxt.Debugpcln != 0 {
+ fmt.Fprintf(ctxt.Bso, "funcpctab %s [valfunc=%s]\n", func_.Name, desc)
+ }
+
+ val = -1
+ oldval = val
+ if func_.Text == nil {
+ ctxt.Debugpcln -= int32(dbg)
+ return
+ }
+
+ pc = func_.Text.Pc
+
+ if ctxt.Debugpcln != 0 {
+ fmt.Fprintf(ctxt.Bso, "%6x %6d %v\n", uint64(pc), val, func_.Text)
+ }
+
+ started = 0
+ for p = func_.Text; p != nil; p = p.Link {
+ // Update val. If it's not changing, keep going.
+ val = valfunc(ctxt, func_, val, p, 0, arg)
+
+ if val == oldval && started != 0 {
+ val = valfunc(ctxt, func_, val, p, 1, arg)
+ if ctxt.Debugpcln != 0 {
+ fmt.Fprintf(ctxt.Bso, "%6x %6s %v\n", uint64(int64(p.Pc)), "", p)
+ }
+ continue
+ }
+
+ // If the pc of the next instruction is the same as the
+ // pc of this instruction, this instruction is not a real
+ // instruction. Keep going, so that we only emit a delta
+ // for a true instruction boundary in the program.
+ if p.Link != nil && p.Link.Pc == p.Pc {
+
+ val = valfunc(ctxt, func_, val, p, 1, arg)
+ if ctxt.Debugpcln != 0 {
+ fmt.Fprintf(ctxt.Bso, "%6x %6s %v\n", uint64(int64(p.Pc)), "", p)
+ }
+ continue
+ }
+
+ // The table is a sequence of (value, pc) pairs, where each
+ // pair states that the given value is in effect from the current position
+ // up to the given pc, which becomes the new current position.
+ // To generate the table as we scan over the program instructions,
+ // we emit a "(value" when pc == func->value, and then
+ // each time we observe a change in value we emit ", pc) (value".
+ // When the scan is over, we emit the closing ", pc)".
+ //
+ // The table is delta-encoded. The value deltas are signed and
+ // transmitted in zig-zag form, where a complement bit is placed in bit 0,
+ // and the pc deltas are unsigned. Both kinds of deltas are sent
+ // as variable-length little-endian base-128 integers,
+ // where the 0x80 bit indicates that the integer continues.
+
+ if ctxt.Debugpcln != 0 {
+
+ fmt.Fprintf(ctxt.Bso, "%6x %6d %v\n", uint64(int64(p.Pc)), val, p)
+ }
+
+ if started != 0 {
+ addvarint(ctxt, dst, uint32((p.Pc-pc)/int64(ctxt.Arch.Minlc)))
+ pc = p.Pc
+ }
+
+ delta = uint32(val) - uint32(oldval)
+ if delta>>31 != 0 {
+ delta = 1 | ^(delta << 1)
+ } else {
+
+ delta <<= 1
+ }
+ addvarint(ctxt, dst, delta)
+ oldval = val
+ started = 1
+ val = valfunc(ctxt, func_, val, p, 1, arg)
+ }
+
+ if started != 0 {
+ if ctxt.Debugpcln != 0 {
+ fmt.Fprintf(ctxt.Bso, "%6x done\n", uint64(int64(func_.Text.Pc)+func_.Size))
+ }
+ addvarint(ctxt, dst, uint32((func_.Value+func_.Size-pc)/int64(ctxt.Arch.Minlc)))
+ addvarint(ctxt, dst, 0) // terminator
+ }
+
+ if ctxt.Debugpcln != 0 {
+ fmt.Fprintf(ctxt.Bso, "wrote %d bytes to %p\n", len(dst.P), dst)
+ for i = 0; i < len(dst.P); i++ {
+ fmt.Fprintf(ctxt.Bso, " %02x", dst.P[i])
+ }
+ fmt.Fprintf(ctxt.Bso, "\n")
+ }
+
+ ctxt.Debugpcln -= int32(dbg)
+}
+
+// pctofileline computes either the file number (arg == 0)
+// or the line number (arg == 1) to use at p.
+// Because p->lineno applies to p, phase == 0 (before p)
+// takes care of the update.
+func pctofileline(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 {
+
+ var i int32
+ var l int32
+ var f *LSym
+ var pcln *Pcln
+
+ if int(p.As) == ctxt.Arch.ATEXT || int(p.As) == ctxt.Arch.ANOP || int(p.As) == ctxt.Arch.AUSEFIELD || p.Lineno == 0 || phase == 1 {
+ return oldval
+ }
+ linkgetline(ctxt, p.Lineno, &f, &l)
+ if f == nil {
+ // print("getline failed for %s %P\n", ctxt->cursym->name, p);
+ return oldval
+ }
+
+ if arg == nil {
+ return l
+ }
+ pcln = arg.(*Pcln)
+
+ if f == pcln.Lastfile {
+ return int32(pcln.Lastindex)
+ }
+
+ for i = 0; i < int32(len(pcln.File)); i++ {
+ file := pcln.File[i]
+ if file == f {
+ pcln.Lastfile = f
+ pcln.Lastindex = int(i)
+ return int32(i)
+ }
+ }
+ pcln.File = append(pcln.File, f)
+ pcln.Lastfile = f
+ pcln.Lastindex = int(i)
+ return i
+}
+
+// pctospadj computes the sp adjustment in effect.
+// It is oldval plus any adjustment made by p itself.
+// The adjustment by p takes effect only after p, so we
+// apply the change during phase == 1.
+func pctospadj(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 {
+
+ if oldval == -1 { // starting
+ oldval = 0
+ }
+ if phase == 0 {
+ return oldval
+ }
+ if oldval+p.Spadj < -10000 || oldval+p.Spadj > 1100000000 {
+ ctxt.Diag("overflow in spadj: %d + %d = %d", oldval, p.Spadj, oldval+p.Spadj)
+ log.Fatalf("bad code")
+ }
+
+ return oldval + p.Spadj
+}
+
+// pctopcdata computes the pcdata value in effect at p.
+// A PCDATA instruction sets the value in effect at future
+// non-PCDATA instructions.
+// Since PCDATA instructions have no width in the final code,
+// it does not matter which phase we use for the update.
+func pctopcdata(ctxt *Link, sym *LSym, oldval int32, p *Prog, phase int32, arg interface{}) int32 {
+
+ if phase == 0 || int(p.As) != ctxt.Arch.APCDATA || p.From.Offset != int64(arg.(uint32)) {
+ return oldval
+ }
+ if int64(int32(p.To.Offset)) != p.To.Offset {
+ ctxt.Diag("overflow in PCDATA instruction: %v", p)
+ log.Fatalf("bad code")
+ }
+
+ return int32(p.To.Offset)
+}
+
+func linkpcln(ctxt *Link, cursym *LSym) {
+ var p *Prog
+ var pcln *Pcln
+ var i int
+ var npcdata int
+ var nfuncdata int
+
+ ctxt.Cursym = cursym
+
+ pcln = new(Pcln)
+ cursym.Pcln = pcln
+
+ npcdata = 0
+ nfuncdata = 0
+ for p = cursym.Text; p != nil; p = p.Link {
+ if int(p.As) == ctxt.Arch.APCDATA && p.From.Offset >= int64(npcdata) {
+ npcdata = int(p.From.Offset + 1)
+ }
+ if int(p.As) == ctxt.Arch.AFUNCDATA && p.From.Offset >= int64(nfuncdata) {
+ nfuncdata = int(p.From.Offset + 1)
+ }
+ }
+
+ pcln.Pcdata = make([]Pcdata, npcdata)
+ pcln.Pcdata = pcln.Pcdata[:npcdata]
+ pcln.Funcdata = make([]*LSym, nfuncdata)
+ pcln.Funcdataoff = make([]int64, nfuncdata)
+ pcln.Funcdataoff = pcln.Funcdataoff[:nfuncdata]
+
+ funcpctab(ctxt, &pcln.Pcsp, cursym, "pctospadj", pctospadj, nil)
+ funcpctab(ctxt, &pcln.Pcfile, cursym, "pctofile", pctofileline, pcln)
+ funcpctab(ctxt, &pcln.Pcline, cursym, "pctoline", pctofileline, nil)
+
+ // tabulate which pc and func data we have.
+ havepc := make([]uint32, (npcdata+31)/32)
+ havefunc := make([]uint32, (nfuncdata+31)/32)
+ for p = cursym.Text; p != nil; p = p.Link {
+ if int(p.As) == ctxt.Arch.AFUNCDATA {
+ if (havefunc[p.From.Offset/32]>>uint64(p.From.Offset%32))&1 != 0 {
+ ctxt.Diag("multiple definitions for FUNCDATA $%d", p.From.Offset)
+ }
+ havefunc[p.From.Offset/32] |= 1 << uint64(p.From.Offset%32)
+ }
+
+ if int(p.As) == ctxt.Arch.APCDATA {
+ havepc[p.From.Offset/32] |= 1 << uint64(p.From.Offset%32)
+ }
+ }
+
+ // pcdata.
+ for i = 0; i < npcdata; i++ {
+
+ if (havepc[i/32]>>uint(i%32))&1 == 0 {
+ continue
+ }
+ funcpctab(ctxt, &pcln.Pcdata[i], cursym, "pctopcdata", pctopcdata, interface{}(uint32(i)))
+ }
+
+ // funcdata
+ if nfuncdata > 0 {
+
+ for p = cursym.Text; p != nil; p = p.Link {
+ if int(p.As) == ctxt.Arch.AFUNCDATA {
+ i = int(p.From.Offset)
+ pcln.Funcdataoff[i] = p.To.Offset
+ if int(p.To.Type_) != ctxt.Arch.D_CONST {
+ // TODO: Dedup.
+ //funcdata_bytes += p->to.sym->size;
+ pcln.Funcdata[i] = p.To.Sym
+ }
+ }
+ }
+ }
+}
+
+// iteration over encoded pcdata tables.
+
+func getvarint(pp *[]byte) uint32 {
+
+ var p []byte
+ var shift int
+ var v uint32
+
+ v = 0
+ p = *pp
+ for shift = 0; ; shift += 7 {
+ v |= uint32(p[0]&0x7F) << uint(shift)
+ tmp7 := p
+ p = p[1:]
+ if !(tmp7[0]&0x80 != 0) {
+ break
+ }
+ }
+
+ *pp = p
+ return v
+}
+
+func pciternext(it *Pciter) {
+ var v uint32
+ var dv int32
+
+ it.pc = it.nextpc
+ if it.done != 0 {
+ return
+ }
+ if -cap(it.p) >= -cap(it.d.P[len(it.d.P):]) {
+ it.done = 1
+ return
+ }
+
+ // value delta
+ v = getvarint(&it.p)
+
+ if v == 0 && !(it.start != 0) {
+ it.done = 1
+ return
+ }
+
+ it.start = 0
+ dv = int32(v>>1) ^ (int32(v<<31) >> 31)
+ it.value += dv
+
+ // pc delta
+ v = getvarint(&it.p)
+
+ it.nextpc = it.pc + v*it.pcscale
+}
+
+func pciterinit(ctxt *Link, it *Pciter, d *Pcdata) {
+ it.d = *d
+ it.p = it.d.P
+ it.pc = 0
+ it.nextpc = 0
+ it.value = -1
+ it.start = 1
+ it.done = 0
+ it.pcscale = uint32(ctxt.Arch.Minlc)
+ pciternext(it)
+}
--- /dev/null
+// cmd/9c/9.out.h from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+// auto generated by go tool dist
+
+/*
+ * powerpc 64
+ */
+const (
+ NSNAME = 8
+ NSYM = 50
+ NREG = 32
+ NFREG = 32
+)
+
+const (
+ REGZERO = 0
+ REGSP = 1
+ REGSB = 2
+ REGRET = 3
+ REGARG = -1
+ REGRT1 = 3
+ REGRT2 = 4
+ REGMIN = 7
+ REGENV = 11
+ REGTLS = 13
+ REGMAX = 27
+ REGEXT = 30
+ REGG = 30
+ REGTMP = 31
+ FREGRET = 0
+ FREGMIN = 17
+ FREGMAX = 26
+ FREGEXT = 26
+ FREGCVI = 27
+ FREGZERO = 28
+ FREGHALF = 29
+ FREGONE = 30
+ FREGTWO = 31
+)
+
+/*
+ * GENERAL:
+ *
+ * compiler allocates R3 up as temps
+ * compiler allocates register variables R7-R27
+ * compiler allocates external registers R30 down
+ *
+ * compiler allocates register variables F17-F26
+ * compiler allocates external registers F26 down
+ */
+const (
+ BIG = 32768 - 8
+)
+
+const (
+ LABEL = 1 << 0
+ LEAF = 1 << 1
+ FLOAT = 1 << 2
+ BRANCH = 1 << 3
+ LOAD = 1 << 4
+ FCMP = 1 << 5
+ SYNC = 1 << 6
+ LIST = 1 << 7
+ FOLL = 1 << 8
+ NOSCHED = 1 << 9
+)
+
+const (
+ C_NONE = iota
+ C_REG
+ C_FREG
+ C_CREG
+ C_SPR
+ C_ZCON
+ C_SCON
+ C_UCON
+ C_ADDCON
+ C_ANDCON
+ C_LCON
+ C_DCON
+ C_SACON
+ C_SECON
+ C_LACON
+ C_LECON
+ C_DACON
+ C_SBRA
+ C_LBRA
+ C_SAUTO
+ C_LAUTO
+ C_SEXT
+ C_LEXT
+ C_ZOREG
+ C_SOREG
+ C_LOREG
+ C_FPSCR
+ C_MSR
+ C_XER
+ C_LR
+ C_CTR
+ C_ANY
+ C_GOK
+ C_ADDR
+ C_NCLASS
+)
+
+const (
+ AXXX = iota
+ AADD
+ AADDCC
+ AADDV
+ AADDVCC
+ AADDC
+ AADDCCC
+ AADDCV
+ AADDCVCC
+ AADDME
+ AADDMECC
+ AADDMEVCC
+ AADDMEV
+ AADDE
+ AADDECC
+ AADDEVCC
+ AADDEV
+ AADDZE
+ AADDZECC
+ AADDZEVCC
+ AADDZEV
+ AAND
+ AANDCC
+ AANDN
+ AANDNCC
+ ABC
+ ABCL
+ ABEQ
+ ABGE
+ ABGT
+ ABL
+ ABLE
+ ABLT
+ ABNE
+ ABR
+ ABVC
+ ABVS
+ ACMP
+ ACMPU
+ ACNTLZW
+ ACNTLZWCC
+ ACRAND
+ ACRANDN
+ ACREQV
+ ACRNAND
+ ACRNOR
+ ACROR
+ ACRORN
+ ACRXOR
+ ADIVW
+ ADIVWCC
+ ADIVWVCC
+ ADIVWV
+ ADIVWU
+ ADIVWUCC
+ ADIVWUVCC
+ ADIVWUV
+ AEQV
+ AEQVCC
+ AEXTSB
+ AEXTSBCC
+ AEXTSH
+ AEXTSHCC
+ AFABS
+ AFABSCC
+ AFADD
+ AFADDCC
+ AFADDS
+ AFADDSCC
+ AFCMPO
+ AFCMPU
+ AFCTIW
+ AFCTIWCC
+ AFCTIWZ
+ AFCTIWZCC
+ AFDIV
+ AFDIVCC
+ AFDIVS
+ AFDIVSCC
+ AFMADD
+ AFMADDCC
+ AFMADDS
+ AFMADDSCC
+ AFMOVD
+ AFMOVDCC
+ AFMOVDU
+ AFMOVS
+ AFMOVSU
+ AFMSUB
+ AFMSUBCC
+ AFMSUBS
+ AFMSUBSCC
+ AFMUL
+ AFMULCC
+ AFMULS
+ AFMULSCC
+ AFNABS
+ AFNABSCC
+ AFNEG
+ AFNEGCC
+ AFNMADD
+ AFNMADDCC
+ AFNMADDS
+ AFNMADDSCC
+ AFNMSUB
+ AFNMSUBCC
+ AFNMSUBS
+ AFNMSUBSCC
+ AFRSP
+ AFRSPCC
+ AFSUB
+ AFSUBCC
+ AFSUBS
+ AFSUBSCC
+ AMOVMW
+ ALSW
+ ALWAR
+ AMOVWBR
+ AMOVB
+ AMOVBU
+ AMOVBZ
+ AMOVBZU
+ AMOVH
+ AMOVHBR
+ AMOVHU
+ AMOVHZ
+ AMOVHZU
+ AMOVW
+ AMOVWU
+ AMOVFL
+ AMOVCRFS
+ AMTFSB0
+ AMTFSB0CC
+ AMTFSB1
+ AMTFSB1CC
+ AMULHW
+ AMULHWCC
+ AMULHWU
+ AMULHWUCC
+ AMULLW
+ AMULLWCC
+ AMULLWVCC
+ AMULLWV
+ ANAND
+ ANANDCC
+ ANEG
+ ANEGCC
+ ANEGVCC
+ ANEGV
+ ANOR
+ ANORCC
+ AOR
+ AORCC
+ AORN
+ AORNCC
+ AREM
+ AREMCC
+ AREMV
+ AREMVCC
+ AREMU
+ AREMUCC
+ AREMUV
+ AREMUVCC
+ ARFI
+ ARLWMI
+ ARLWMICC
+ ARLWNM
+ ARLWNMCC
+ ASLW
+ ASLWCC
+ ASRW
+ ASRAW
+ ASRAWCC
+ ASRWCC
+ ASTSW
+ ASTWCCC
+ ASUB
+ ASUBCC
+ ASUBVCC
+ ASUBC
+ ASUBCCC
+ ASUBCV
+ ASUBCVCC
+ ASUBME
+ ASUBMECC
+ ASUBMEVCC
+ ASUBMEV
+ ASUBV
+ ASUBE
+ ASUBECC
+ ASUBEV
+ ASUBEVCC
+ ASUBZE
+ ASUBZECC
+ ASUBZEVCC
+ ASUBZEV
+ ASYNC
+ AXOR
+ AXORCC
+ ADCBF
+ ADCBI
+ ADCBST
+ ADCBT
+ ADCBTST
+ ADCBZ
+ AECIWX
+ AECOWX
+ AEIEIO
+ AICBI
+ AISYNC
+ APTESYNC
+ ATLBIE
+ ATLBIEL
+ ATLBSYNC
+ ATW
+ ASYSCALL
+ ADATA
+ AGLOBL
+ AGOK
+ AHISTORY
+ ANAME
+ ANOP
+ ARETURN
+ ATEXT
+ AWORD
+ AEND
+ ADYNT
+ AINIT
+ ASIGNAME
+ ARFCI
+ AFRES
+ AFRESCC
+ AFRSQRTE
+ AFRSQRTECC
+ AFSEL
+ AFSELCC
+ AFSQRT
+ AFSQRTCC
+ AFSQRTS
+ AFSQRTSCC
+ ACNTLZD
+ ACNTLZDCC
+ ACMPW
+ ACMPWU
+ ADIVD
+ ADIVDCC
+ ADIVDVCC
+ ADIVDV
+ ADIVDU
+ ADIVDUCC
+ ADIVDUVCC
+ ADIVDUV
+ AEXTSW
+ AEXTSWCC
+ AFCFID
+ AFCFIDCC
+ AFCTID
+ AFCTIDCC
+ AFCTIDZ
+ AFCTIDZCC
+ ALDAR
+ AMOVD
+ AMOVDU
+ AMOVWZ
+ AMOVWZU
+ AMULHD
+ AMULHDCC
+ AMULHDU
+ AMULHDUCC
+ AMULLD
+ AMULLDCC
+ AMULLDVCC
+ AMULLDV
+ ARFID
+ ARLDMI
+ ARLDMICC
+ ARLDC
+ ARLDCCC
+ ARLDCR
+ ARLDCRCC
+ ARLDCL
+ ARLDCLCC
+ ASLBIA
+ ASLBIE
+ ASLBMFEE
+ ASLBMFEV
+ ASLBMTE
+ ASLD
+ ASLDCC
+ ASRD
+ ASRAD
+ ASRADCC
+ ASRDCC
+ ASTDCCC
+ ATD
+ ADWORD
+ AREMD
+ AREMDCC
+ AREMDV
+ AREMDVCC
+ AREMDU
+ AREMDUCC
+ AREMDUV
+ AREMDUVCC
+ AHRFID
+ AUNDEF
+ AUSEFIELD
+ ATYPE
+ AFUNCDATA
+ APCDATA
+ ACHECKNIL
+ AVARDEF
+ AVARKILL
+ ADUFFCOPY
+ ADUFFZERO
+ ALAST
+)
+
+/* type/name */
+const (
+ D_GOK = 0 + iota
+ D_NONE
+ D_EXTERN
+ D_STATIC
+ D_AUTO
+ D_PARAM
+ D_BRANCH
+ D_OREG
+ D_CONST
+ D_FCONST
+ D_SCONST
+ D_REG
+ D_FPSCR
+ D_MSR
+ D_FREG
+ D_CREG
+ D_SPR
+ D_OPT
+ D_FILE
+ D_FILE1
+ D_DCR
+ D_DCONST
+ D_ADDR
+ D_LAST
+ D_R0 = 0
+ D_F0 = D_R0 + NREG
+ D_XER = 1
+ D_LR = 8
+ D_CTR = 9
+)
--- /dev/null
+package ppc64
+
+/* and many supervisor level registers */
+
+/*
+ * this is the ranlib header
+ */
+var anames9 = []string{
+ "XXX",
+ "ADD",
+ "ADDCC",
+ "ADDV",
+ "ADDVCC",
+ "ADDC",
+ "ADDCCC",
+ "ADDCV",
+ "ADDCVCC",
+ "ADDME",
+ "ADDMECC",
+ "ADDMEVCC",
+ "ADDMEV",
+ "ADDE",
+ "ADDECC",
+ "ADDEVCC",
+ "ADDEV",
+ "ADDZE",
+ "ADDZECC",
+ "ADDZEVCC",
+ "ADDZEV",
+ "AND",
+ "ANDCC",
+ "ANDN",
+ "ANDNCC",
+ "BC",
+ "BCL",
+ "BEQ",
+ "BGE",
+ "BGT",
+ "BL",
+ "BLE",
+ "BLT",
+ "BNE",
+ "BR",
+ "BVC",
+ "BVS",
+ "CMP",
+ "CMPU",
+ "CNTLZW",
+ "CNTLZWCC",
+ "CRAND",
+ "CRANDN",
+ "CREQV",
+ "CRNAND",
+ "CRNOR",
+ "CROR",
+ "CRORN",
+ "CRXOR",
+ "DIVW",
+ "DIVWCC",
+ "DIVWVCC",
+ "DIVWV",
+ "DIVWU",
+ "DIVWUCC",
+ "DIVWUVCC",
+ "DIVWUV",
+ "EQV",
+ "EQVCC",
+ "EXTSB",
+ "EXTSBCC",
+ "EXTSH",
+ "EXTSHCC",
+ "FABS",
+ "FABSCC",
+ "FADD",
+ "FADDCC",
+ "FADDS",
+ "FADDSCC",
+ "FCMPO",
+ "FCMPU",
+ "FCTIW",
+ "FCTIWCC",
+ "FCTIWZ",
+ "FCTIWZCC",
+ "FDIV",
+ "FDIVCC",
+ "FDIVS",
+ "FDIVSCC",
+ "FMADD",
+ "FMADDCC",
+ "FMADDS",
+ "FMADDSCC",
+ "FMOVD",
+ "FMOVDCC",
+ "FMOVDU",
+ "FMOVS",
+ "FMOVSU",
+ "FMSUB",
+ "FMSUBCC",
+ "FMSUBS",
+ "FMSUBSCC",
+ "FMUL",
+ "FMULCC",
+ "FMULS",
+ "FMULSCC",
+ "FNABS",
+ "FNABSCC",
+ "FNEG",
+ "FNEGCC",
+ "FNMADD",
+ "FNMADDCC",
+ "FNMADDS",
+ "FNMADDSCC",
+ "FNMSUB",
+ "FNMSUBCC",
+ "FNMSUBS",
+ "FNMSUBSCC",
+ "FRSP",
+ "FRSPCC",
+ "FSUB",
+ "FSUBCC",
+ "FSUBS",
+ "FSUBSCC",
+ "MOVMW",
+ "LSW",
+ "LWAR",
+ "MOVWBR",
+ "MOVB",
+ "MOVBU",
+ "MOVBZ",
+ "MOVBZU",
+ "MOVH",
+ "MOVHBR",
+ "MOVHU",
+ "MOVHZ",
+ "MOVHZU",
+ "MOVW",
+ "MOVWU",
+ "MOVFL",
+ "MOVCRFS",
+ "MTFSB0",
+ "MTFSB0CC",
+ "MTFSB1",
+ "MTFSB1CC",
+ "MULHW",
+ "MULHWCC",
+ "MULHWU",
+ "MULHWUCC",
+ "MULLW",
+ "MULLWCC",
+ "MULLWVCC",
+ "MULLWV",
+ "NAND",
+ "NANDCC",
+ "NEG",
+ "NEGCC",
+ "NEGVCC",
+ "NEGV",
+ "NOR",
+ "NORCC",
+ "OR",
+ "ORCC",
+ "ORN",
+ "ORNCC",
+ "REM",
+ "REMCC",
+ "REMV",
+ "REMVCC",
+ "REMU",
+ "REMUCC",
+ "REMUV",
+ "REMUVCC",
+ "RFI",
+ "RLWMI",
+ "RLWMICC",
+ "RLWNM",
+ "RLWNMCC",
+ "SLW",
+ "SLWCC",
+ "SRW",
+ "SRAW",
+ "SRAWCC",
+ "SRWCC",
+ "STSW",
+ "STWCCC",
+ "SUB",
+ "SUBCC",
+ "SUBVCC",
+ "SUBC",
+ "SUBCCC",
+ "SUBCV",
+ "SUBCVCC",
+ "SUBME",
+ "SUBMECC",
+ "SUBMEVCC",
+ "SUBMEV",
+ "SUBV",
+ "SUBE",
+ "SUBECC",
+ "SUBEV",
+ "SUBEVCC",
+ "SUBZE",
+ "SUBZECC",
+ "SUBZEVCC",
+ "SUBZEV",
+ "SYNC",
+ "XOR",
+ "XORCC",
+ "DCBF",
+ "DCBI",
+ "DCBST",
+ "DCBT",
+ "DCBTST",
+ "DCBZ",
+ "ECIWX",
+ "ECOWX",
+ "EIEIO",
+ "ICBI",
+ "ISYNC",
+ "PTESYNC",
+ "TLBIE",
+ "TLBIEL",
+ "TLBSYNC",
+ "TW",
+ "SYSCALL",
+ "DATA",
+ "GLOBL",
+ "GOK",
+ "HISTORY",
+ "NAME",
+ "NOP",
+ "RETURN",
+ "TEXT",
+ "WORD",
+ "END",
+ "DYNT",
+ "INIT",
+ "SIGNAME",
+ "RFCI",
+ "FRES",
+ "FRESCC",
+ "FRSQRTE",
+ "FRSQRTECC",
+ "FSEL",
+ "FSELCC",
+ "FSQRT",
+ "FSQRTCC",
+ "FSQRTS",
+ "FSQRTSCC",
+ "CNTLZD",
+ "CNTLZDCC",
+ "CMPW",
+ "CMPWU",
+ "DIVD",
+ "DIVDCC",
+ "DIVDVCC",
+ "DIVDV",
+ "DIVDU",
+ "DIVDUCC",
+ "DIVDUVCC",
+ "DIVDUV",
+ "EXTSW",
+ "EXTSWCC",
+ "FCFID",
+ "FCFIDCC",
+ "FCTID",
+ "FCTIDCC",
+ "FCTIDZ",
+ "FCTIDZCC",
+ "LDAR",
+ "MOVD",
+ "MOVDU",
+ "MOVWZ",
+ "MOVWZU",
+ "MULHD",
+ "MULHDCC",
+ "MULHDU",
+ "MULHDUCC",
+ "MULLD",
+ "MULLDCC",
+ "MULLDVCC",
+ "MULLDV",
+ "RFID",
+ "RLDMI",
+ "RLDMICC",
+ "RLDC",
+ "RLDCCC",
+ "RLDCR",
+ "RLDCRCC",
+ "RLDCL",
+ "RLDCLCC",
+ "SLBIA",
+ "SLBIE",
+ "SLBMFEE",
+ "SLBMFEV",
+ "SLBMTE",
+ "SLD",
+ "SLDCC",
+ "SRD",
+ "SRAD",
+ "SRADCC",
+ "SRDCC",
+ "STDCCC",
+ "TD",
+ "DWORD",
+ "REMD",
+ "REMDCC",
+ "REMDV",
+ "REMDVCC",
+ "REMDU",
+ "REMDUCC",
+ "REMDUV",
+ "REMDUVCC",
+ "HRFID",
+ "UNDEF",
+ "USEFIELD",
+ "TYPE",
+ "FUNCDATA",
+ "PCDATA",
+ "CHECKNIL",
+ "VARDEF",
+ "VARKILL",
+ "DUFFCOPY",
+ "DUFFZERO",
+ "LAST",
+}
+
+var cnames9 = []string{
+ "NONE",
+ "REG",
+ "FREG",
+ "CREG",
+ "SPR",
+ "ZCON",
+ "SCON",
+ "UCON",
+ "ADDCON",
+ "ANDCON",
+ "LCON",
+ "DCON",
+ "SACON",
+ "SECON",
+ "LACON",
+ "LECON",
+ "DACON",
+ "SBRA",
+ "LBRA",
+ "SAUTO",
+ "LAUTO",
+ "SEXT",
+ "LEXT",
+ "ZOREG",
+ "SOREG",
+ "LOREG",
+ "FPSCR",
+ "MSR",
+ "XER",
+ "LR",
+ "CTR",
+ "ANY",
+ "GOK",
+ "ADDR",
+ "NCLASS",
+}
+
+var dnames9 = []string{
+ D_GOK: "GOK/R0",
+ D_NONE: "NONE/XER",
+ D_EXTERN: "EXTERN",
+ D_STATIC: "STATIC",
+ D_AUTO: "AUTO",
+ D_PARAM: "PARAM",
+ D_BRANCH: "BRANCH",
+ D_OREG: "OREG",
+ D_CONST: "CONST/LR",
+ D_FCONST: "FCONST/CTR",
+ D_SCONST: "SCONST",
+ D_REG: "REG",
+ D_FPSCR: "FPSCR",
+ D_MSR: "MSR",
+ D_FREG: "FREG",
+ D_CREG: "CREG",
+ D_SPR: "SPR",
+ D_OPT: "OPT",
+ D_FILE: "FILE",
+ D_FILE1: "FILE1",
+ D_DCR: "DCR",
+ D_DCONST: "DCONST",
+ D_ADDR: "ADDR",
+}
--- /dev/null
+// cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "log"
+ "sort"
+)
+
+// Instruction layout.
+
+const (
+ FuncAlign = 8
+)
+
+const (
+ r0iszero = 1
+)
+
+type Optab struct {
+ as int16
+ a1 uint8
+ a2 uint8
+ a3 uint8
+ a4 uint8
+ type_ int8
+ size int8
+ param int8
+}
+
+var optab = []Optab{
+ Optab{ATEXT, C_LEXT, C_NONE, C_NONE, C_LCON, 0, 0, 0},
+ Optab{ATEXT, C_LEXT, C_REG, C_NONE, C_LCON, 0, 0, 0},
+ Optab{ATEXT, C_LEXT, C_NONE, C_LCON, C_LCON, 0, 0, 0},
+ Optab{ATEXT, C_LEXT, C_REG, C_LCON, C_LCON, 0, 0, 0},
+ Optab{ATEXT, C_ADDR, C_NONE, C_NONE, C_LCON, 0, 0, 0},
+ Optab{ATEXT, C_ADDR, C_REG, C_NONE, C_LCON, 0, 0, 0},
+ Optab{ATEXT, C_ADDR, C_NONE, C_LCON, C_LCON, 0, 0, 0},
+ Optab{ATEXT, C_ADDR, C_REG, C_LCON, C_LCON, 0, 0, 0},
+ /* move register */
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 4, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_REG, 12, 4, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 13, 4, 0},
+ Optab{AADD, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
+ Optab{AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
+ Optab{AADD, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
+ Optab{AADD, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
+ Optab{AADD, C_UCON, C_REG, C_NONE, C_REG, 20, 4, 0},
+ Optab{AADD, C_UCON, C_NONE, C_NONE, C_REG, 20, 4, 0},
+ Optab{AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
+ Optab{AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
+ Optab{AADDC, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
+ Optab{AADDC, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
+ Optab{AADDC, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
+ Optab{AADDC, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
+ Optab{AADDC, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
+ Optab{AADDC, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
+ Optab{AAND, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, no literal */
+ Optab{AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
+ Optab{AANDCC, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
+ Optab{AANDCC, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
+ Optab{AANDCC, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
+ Optab{AANDCC, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
+ Optab{AANDCC, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
+ Optab{AANDCC, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
+ Optab{AANDCC, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
+ Optab{AANDCC, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
+ Optab{AMULLW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0},
+ Optab{AMULLW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
+ Optab{AMULLW, C_ADDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
+ Optab{AMULLW, C_ADDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
+ Optab{AMULLW, C_ANDCON, C_REG, C_NONE, C_REG, 4, 4, 0},
+ Optab{AMULLW, C_ANDCON, C_NONE, C_NONE, C_REG, 4, 4, 0},
+ Optab{AMULLW, C_LCON, C_REG, C_NONE, C_REG, 22, 12, 0},
+ Optab{AMULLW, C_LCON, C_NONE, C_NONE, C_REG, 22, 12, 0},
+ Optab{ASUBC, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0},
+ Optab{ASUBC, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
+ Optab{ASUBC, C_REG, C_NONE, C_ADDCON, C_REG, 27, 4, 0},
+ Optab{ASUBC, C_REG, C_NONE, C_LCON, C_REG, 28, 12, 0},
+ Optab{AOR, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0}, /* logical, literal not cc (or/xor) */
+ Optab{AOR, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
+ Optab{AOR, C_ANDCON, C_NONE, C_NONE, C_REG, 58, 4, 0},
+ Optab{AOR, C_ANDCON, C_REG, C_NONE, C_REG, 58, 4, 0},
+ Optab{AOR, C_UCON, C_NONE, C_NONE, C_REG, 59, 4, 0},
+ Optab{AOR, C_UCON, C_REG, C_NONE, C_REG, 59, 4, 0},
+ Optab{AOR, C_LCON, C_NONE, C_NONE, C_REG, 23, 12, 0},
+ Optab{AOR, C_LCON, C_REG, C_NONE, C_REG, 23, 12, 0},
+ Optab{ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 4, 0}, /* op r1[,r2],r3 */
+ Optab{ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 4, 0},
+ Optab{ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 4, 0}, /* op r2[,r1],r3 */
+ Optab{ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 4, 0},
+ Optab{ASLW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASLW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASLD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASLD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASLD, C_SCON, C_REG, C_NONE, C_REG, 25, 4, 0},
+ Optab{ASLD, C_SCON, C_NONE, C_NONE, C_REG, 25, 4, 0},
+ Optab{ASLW, C_SCON, C_REG, C_NONE, C_REG, 57, 4, 0},
+ Optab{ASLW, C_SCON, C_NONE, C_NONE, C_REG, 57, 4, 0},
+ Optab{ASRAW, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASRAW, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASRAW, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
+ Optab{ASRAW, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
+ Optab{ASRAD, C_REG, C_NONE, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASRAD, C_REG, C_REG, C_NONE, C_REG, 6, 4, 0},
+ Optab{ASRAD, C_SCON, C_REG, C_NONE, C_REG, 56, 4, 0},
+ Optab{ASRAD, C_SCON, C_NONE, C_NONE, C_REG, 56, 4, 0},
+ Optab{ARLWMI, C_SCON, C_REG, C_LCON, C_REG, 62, 4, 0},
+ Optab{ARLWMI, C_REG, C_REG, C_LCON, C_REG, 63, 4, 0},
+ Optab{ARLDMI, C_SCON, C_REG, C_LCON, C_REG, 30, 4, 0},
+ Optab{ARLDC, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
+ Optab{ARLDCL, C_SCON, C_REG, C_LCON, C_REG, 29, 4, 0},
+ Optab{ARLDCL, C_REG, C_REG, C_LCON, C_REG, 14, 4, 0},
+ Optab{ARLDCL, C_REG, C_NONE, C_LCON, C_REG, 14, 4, 0},
+ Optab{AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 4, 0},
+ Optab{AFADD, C_FREG, C_REG, C_NONE, C_FREG, 2, 4, 0},
+ Optab{AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
+ Optab{AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 4, 0},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 33, 4, 0},
+ Optab{AFMADD, C_FREG, C_REG, C_FREG, C_FREG, 34, 4, 0},
+ Optab{AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 4, 0},
+ Optab{AFMUL, C_FREG, C_REG, C_NONE, C_FREG, 32, 4, 0},
+
+ /* store, short offset */
+ Optab{AMOVD, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
+ Optab{AMOVW, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
+ Optab{AMOVWZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
+ Optab{AMOVBZ, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
+ Optab{AMOVBZU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
+ Optab{AMOVB, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
+ Optab{AMOVBU, C_REG, C_REG, C_NONE, C_ZOREG, 7, 4, REGZERO},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+ Optab{AMOVBZU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+ Optab{AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+
+ /* load, short offset */
+ Optab{AMOVD, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVW, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVWZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVBZ, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVBZU, C_ZOREG, C_REG, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVB, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
+ Optab{AMOVBU, C_ZOREG, C_REG, C_NONE, C_REG, 9, 8, REGZERO},
+ Optab{AMOVD, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
+ Optab{AMOVW, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
+ Optab{AMOVWZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
+ Optab{AMOVBZ, C_SEXT, C_NONE, C_NONE, C_REG, 8, 4, REGSB},
+ Optab{AMOVB, C_SEXT, C_NONE, C_NONE, C_REG, 9, 8, REGSB},
+ Optab{AMOVD, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
+ Optab{AMOVW, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
+ Optab{AMOVWZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
+ Optab{AMOVBZ, C_SAUTO, C_NONE, C_NONE, C_REG, 8, 4, REGSP},
+ Optab{AMOVB, C_SAUTO, C_NONE, C_NONE, C_REG, 9, 8, REGSP},
+ Optab{AMOVD, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVW, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVWZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVBZ, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVBZU, C_SOREG, C_NONE, C_NONE, C_REG, 8, 4, REGZERO},
+ Optab{AMOVB, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
+ Optab{AMOVBU, C_SOREG, C_NONE, C_NONE, C_REG, 9, 8, REGZERO},
+
+ /* store, long offset */
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
+
+ /* load, long offset */
+ Optab{AMOVD, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
+ Optab{AMOVW, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
+ Optab{AMOVWZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
+ Optab{AMOVBZ, C_LEXT, C_NONE, C_NONE, C_REG, 36, 8, REGSB},
+ Optab{AMOVB, C_LEXT, C_NONE, C_NONE, C_REG, 37, 12, REGSB},
+ Optab{AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
+ Optab{AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
+ Optab{AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
+ Optab{AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, 8, REGSP},
+ Optab{AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 37, 12, REGSP},
+ Optab{AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
+ Optab{AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
+ Optab{AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
+ Optab{AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 8, REGZERO},
+ Optab{AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 37, 12, REGZERO},
+ Optab{AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
+ Optab{AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
+ Optab{AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
+ Optab{AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 8, 0},
+ Optab{AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 76, 12, 0},
+
+ /* load constant */
+ Optab{AMOVD, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB},
+ Optab{AMOVD, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
+ Optab{AMOVD, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
+ Optab{AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
+ Optab{AMOVD, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
+ Optab{AMOVW, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
+ Optab{AMOVW, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
+ Optab{AMOVW, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
+ Optab{AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
+ Optab{AMOVW, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
+ Optab{AMOVWZ, C_SECON, C_NONE, C_NONE, C_REG, 3, 4, REGSB}, /* TO DO: check */
+ Optab{AMOVWZ, C_SACON, C_NONE, C_NONE, C_REG, 3, 4, REGSP},
+ Optab{AMOVWZ, C_LECON, C_NONE, C_NONE, C_REG, 26, 8, REGSB},
+ Optab{AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, 8, REGSP},
+ Optab{AMOVWZ, C_ADDCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
+
+ /* load unsigned/long constants (TO DO: check) */
+ Optab{AMOVD, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
+ Optab{AMOVD, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
+ Optab{AMOVW, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
+ Optab{AMOVW, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
+ Optab{AMOVWZ, C_UCON, C_NONE, C_NONE, C_REG, 3, 4, REGZERO},
+ Optab{AMOVWZ, C_LCON, C_NONE, C_NONE, C_REG, 19, 8, 0},
+ Optab{AMOVHBR, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
+ Optab{AMOVHBR, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
+ Optab{AMOVHBR, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
+ Optab{AMOVHBR, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
+ Optab{ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 4, 0},
+ Optab{ASYSCALL, C_REG, C_NONE, C_NONE, C_NONE, 77, 12, 0},
+ Optab{ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 12, 0},
+ Optab{ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
+ Optab{ABEQ, C_CREG, C_NONE, C_NONE, C_SBRA, 16, 4, 0},
+ Optab{ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0},
+ Optab{ABC, C_SCON, C_REG, C_NONE, C_SBRA, 16, 4, 0},
+ Optab{ABC, C_SCON, C_REG, C_NONE, C_LBRA, 17, 4, 0},
+ Optab{ABR, C_NONE, C_NONE, C_NONE, C_LR, 18, 4, 0},
+ Optab{ABR, C_NONE, C_NONE, C_NONE, C_CTR, 18, 4, 0},
+ Optab{ABR, C_REG, C_NONE, C_NONE, C_CTR, 18, 4, 0},
+ Optab{ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
+ Optab{ABC, C_NONE, C_REG, C_NONE, C_LR, 18, 4, 0},
+ Optab{ABC, C_NONE, C_REG, C_NONE, C_CTR, 18, 4, 0},
+ Optab{ABC, C_SCON, C_REG, C_NONE, C_LR, 18, 4, 0},
+ Optab{ABC, C_SCON, C_REG, C_NONE, C_CTR, 18, 4, 0},
+ Optab{ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 8, 0},
+ Optab{AFMOVD, C_SEXT, C_NONE, C_NONE, C_FREG, 8, 4, REGSB},
+ Optab{AFMOVD, C_SAUTO, C_NONE, C_NONE, C_FREG, 8, 4, REGSP},
+ Optab{AFMOVD, C_SOREG, C_NONE, C_NONE, C_FREG, 8, 4, REGZERO},
+ Optab{AFMOVD, C_LEXT, C_NONE, C_NONE, C_FREG, 36, 8, REGSB},
+ Optab{AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, 8, REGSP},
+ Optab{AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 8, REGZERO},
+ Optab{AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 8, 0},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_SEXT, 7, 4, REGSB},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_SAUTO, 7, 4, REGSP},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_SOREG, 7, 4, REGZERO},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_LEXT, 35, 8, REGSB},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, 8, REGSP},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 8, REGZERO},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 8, 0},
+ Optab{ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
+ Optab{AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 4, 0},
+ Optab{ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
+ Optab{ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 8, 0},
+ Optab{AADDME, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
+ Optab{AEXTSB, C_REG, C_NONE, C_NONE, C_REG, 48, 4, 0},
+ Optab{AEXTSB, C_NONE, C_NONE, C_NONE, C_REG, 48, 4, 0},
+ Optab{ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 4, 0},
+ Optab{ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 4, 0},
+ Optab{AREM, C_REG, C_NONE, C_NONE, C_REG, 50, 12, 0},
+ Optab{AREM, C_REG, C_REG, C_NONE, C_REG, 50, 12, 0},
+ Optab{AREMU, C_REG, C_NONE, C_NONE, C_REG, 50, 16, 0},
+ Optab{AREMU, C_REG, C_REG, C_NONE, C_REG, 50, 16, 0},
+ Optab{AREMD, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
+ Optab{AREMD, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
+ Optab{AREMDU, C_REG, C_NONE, C_NONE, C_REG, 51, 12, 0},
+ Optab{AREMDU, C_REG, C_REG, C_NONE, C_REG, 51, 12, 0},
+ Optab{AMTFSB0, C_SCON, C_NONE, C_NONE, C_NONE, 52, 4, 0},
+ Optab{AMOVFL, C_FPSCR, C_NONE, C_NONE, C_FREG, 53, 4, 0},
+ Optab{AMOVFL, C_FREG, C_NONE, C_NONE, C_FPSCR, 64, 4, 0},
+ Optab{AMOVFL, C_FREG, C_NONE, C_LCON, C_FPSCR, 64, 4, 0},
+ Optab{AMOVFL, C_LCON, C_NONE, C_NONE, C_FPSCR, 65, 4, 0},
+ Optab{AMOVD, C_MSR, C_NONE, C_NONE, C_REG, 54, 4, 0}, /* mfmsr */
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsrd */
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_MSR, 54, 4, 0}, /* mtmsr */
+
+ /* 64-bit special registers */
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_LR, 66, 4, 0},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
+ Optab{AMOVD, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
+ Optab{AMOVD, C_LR, C_NONE, C_NONE, C_REG, 66, 4, 0},
+ Optab{AMOVD, C_CTR, C_NONE, C_NONE, C_REG, 66, 4, 0},
+ Optab{AMOVD, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
+
+ /* 32-bit special registers (gloss over sign-extension or not?) */
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
+ Optab{AMOVW, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
+ Optab{AMOVW, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_SPR, 66, 4, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_CTR, 66, 4, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_XER, 66, 4, 0},
+ Optab{AMOVWZ, C_SPR, C_NONE, C_NONE, C_REG, 66, 4, 0},
+ Optab{AMOVWZ, C_XER, C_NONE, C_NONE, C_REG, 66, 4, 0},
+ Optab{AMOVFL, C_FPSCR, C_NONE, C_NONE, C_CREG, 73, 4, 0},
+ Optab{AMOVFL, C_CREG, C_NONE, C_NONE, C_CREG, 67, 4, 0},
+ Optab{AMOVW, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
+ Optab{AMOVWZ, C_CREG, C_NONE, C_NONE, C_REG, 68, 4, 0},
+ Optab{AMOVFL, C_REG, C_NONE, C_LCON, C_CREG, 69, 4, 0},
+ Optab{AMOVFL, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_CREG, 69, 4, 0},
+ Optab{ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
+ Optab{ACMP, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
+ Optab{ACMP, C_REG, C_NONE, C_NONE, C_ADDCON, 71, 4, 0},
+ Optab{ACMP, C_REG, C_REG, C_NONE, C_ADDCON, 71, 4, 0},
+ Optab{ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 4, 0},
+ Optab{ACMPU, C_REG, C_REG, C_NONE, C_REG, 70, 4, 0},
+ Optab{ACMPU, C_REG, C_NONE, C_NONE, C_ANDCON, 71, 4, 0},
+ Optab{ACMPU, C_REG, C_REG, C_NONE, C_ANDCON, 71, 4, 0},
+ Optab{AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 4, 0},
+ Optab{AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 4, 0},
+ Optab{ATW, C_LCON, C_REG, C_NONE, C_REG, 60, 4, 0},
+ Optab{ATW, C_LCON, C_REG, C_NONE, C_ADDCON, 61, 4, 0},
+ Optab{ADCBF, C_ZOREG, C_NONE, C_NONE, C_NONE, 43, 4, 0},
+ Optab{ADCBF, C_ZOREG, C_REG, C_NONE, C_NONE, 43, 4, 0},
+ Optab{AECOWX, C_REG, C_REG, C_NONE, C_ZOREG, 44, 4, 0},
+ Optab{AECIWX, C_ZOREG, C_REG, C_NONE, C_REG, 45, 4, 0},
+ Optab{AECOWX, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
+ Optab{AECIWX, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
+ Optab{AEIEIO, C_NONE, C_NONE, C_NONE, C_NONE, 46, 4, 0},
+ Optab{ATLBIE, C_REG, C_NONE, C_NONE, C_NONE, 49, 4, 0},
+ Optab{ATLBIE, C_SCON, C_NONE, C_NONE, C_REG, 49, 4, 0},
+ Optab{ASLBMFEE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
+ Optab{ASLBMTE, C_REG, C_NONE, C_NONE, C_REG, 55, 4, 0},
+ Optab{ASTSW, C_REG, C_NONE, C_NONE, C_ZOREG, 44, 4, 0},
+ Optab{ASTSW, C_REG, C_NONE, C_LCON, C_ZOREG, 41, 4, 0},
+ Optab{ALSW, C_ZOREG, C_NONE, C_NONE, C_REG, 45, 4, 0},
+ Optab{ALSW, C_ZOREG, C_NONE, C_LCON, C_REG, 42, 4, 0},
+ Optab{AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 4, 0},
+ Optab{AUSEFIELD, C_ADDR, C_NONE, C_NONE, C_NONE, 0, 0, 0},
+ Optab{APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0, 0},
+ Optab{AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0, 0},
+ Optab{ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0},
+ Optab{ADUFFZERO, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
+ Optab{ADUFFCOPY, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 4, 0}, // same as ABR/ABL
+
+ Optab{AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 4, 0},
+}
+
+type Oprang struct {
+ start []Optab
+ stop []Optab
+}
+
+var oprange [ALAST]Oprang
+
+var xcmp [C_NCLASS][C_NCLASS]uint8
+
+func span9(ctxt *obj.Link, cursym *obj.LSym) {
+ var p *obj.Prog
+ var q *obj.Prog
+ var o *Optab
+ var m int
+ var bflag int
+ var c int64
+ var otxt int64
+ var out [6]uint32
+ var i int32
+ var bp []byte
+
+ p = cursym.Text
+ if p == nil || p.Link == nil { // handle external functions and ELF section symbols
+ return
+ }
+ ctxt.Cursym = cursym
+ ctxt.Autosize = int32(p.To.Offset&0xffffffff) + 8
+
+ if oprange[AANDN].start == nil {
+ buildop(ctxt)
+ }
+
+ c = 0
+ p.Pc = c
+
+ for p = p.Link; p != nil; p = p.Link {
+ ctxt.Curp = p
+ p.Pc = c
+ o = oplook(ctxt, p)
+ m = int(o.size)
+ if m == 0 {
+ if p.As != ANOP && p.As != AFUNCDATA && p.As != APCDATA {
+ ctxt.Diag("zero-width instruction\n%v", p)
+ }
+ continue
+ }
+
+ c += int64(m)
+ }
+
+ cursym.Size = c
+
+ /*
+ * if any procedure is large enough to
+ * generate a large SBRA branch, then
+ * generate extra passes putting branches
+ * around jmps to fix. this is rare.
+ */
+ bflag = 1
+
+ for bflag != 0 {
+ if ctxt.Debugvlog != 0 {
+ fmt.Fprintf(ctxt.Bso, "%5.2f span1\n", obj.Cputime())
+ }
+ bflag = 0
+ c = 0
+ for p = cursym.Text.Link; p != nil; p = p.Link {
+ p.Pc = c
+ o = oplook(ctxt, p)
+
+ // very large conditional branches
+ if (o.type_ == 16 || o.type_ == 17) && p.Pcond != nil {
+
+ otxt = p.Pcond.Pc - c
+ if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
+ q = ctxt.Arch.Prg()
+ q.Link = p.Link
+ p.Link = q
+ q.As = ABR
+ q.To.Type_ = D_BRANCH
+ q.Pcond = p.Pcond
+ p.Pcond = q
+ q = ctxt.Arch.Prg()
+ q.Link = p.Link
+ p.Link = q
+ q.As = ABR
+ q.To.Type_ = D_BRANCH
+ q.Pcond = q.Link.Link
+
+ //addnop(p->link);
+ //addnop(p);
+ bflag = 1
+ }
+ }
+
+ m = int(o.size)
+ if m == 0 {
+ if p.As != ANOP && p.As != AFUNCDATA && p.As != APCDATA {
+ ctxt.Diag("zero-width instruction\n%v", p)
+ }
+ continue
+ }
+
+ c += int64(m)
+ }
+
+ cursym.Size = c
+ }
+
+ c += -c & (FuncAlign - 1)
+ cursym.Size = c
+
+ /*
+ * lay out the code, emitting code and data relocations.
+ */
+ if ctxt.Tlsg == nil {
+
+ ctxt.Tlsg = obj.Linklookup(ctxt, "runtime.tlsg", 0)
+ }
+
+ obj.Symgrow(ctxt, cursym, cursym.Size)
+
+ bp = cursym.P
+ for p = cursym.Text.Link; p != nil; p = p.Link {
+ ctxt.Pc = p.Pc
+ ctxt.Curp = p
+ o = oplook(ctxt, p)
+ if int(o.size) > 4*len(out) {
+ log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
+ }
+ asmout(ctxt, p, o, out[:])
+ for i = 0; i < int32(o.size/4); i++ {
+ ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
+ bp = bp[4:]
+ }
+ }
+}
+
+func isint32(v int64) int {
+ return bool2int(int64(int32(v)) == v)
+}
+
+func isuint32(v uint64) int {
+ return bool2int(uint64(uint32(v)) == v)
+}
+
+func aclass(ctxt *obj.Link, a *obj.Addr) int {
+ var s *obj.LSym
+
+ switch a.Type_ {
+ case D_NONE:
+ return C_NONE
+
+ case D_REG:
+ return C_REG
+
+ case D_FREG:
+ return C_FREG
+
+ case D_CREG:
+ return C_CREG
+
+ case D_SPR:
+ if a.Offset == D_LR {
+ return C_LR
+ }
+ if a.Offset == D_XER {
+ return C_XER
+ }
+ if a.Offset == D_CTR {
+ return C_CTR
+ }
+ return C_SPR
+
+ case D_DCR:
+ return C_SPR
+
+ case D_FPSCR:
+ return C_FPSCR
+
+ case D_MSR:
+ return C_MSR
+
+ case D_OREG:
+ switch a.Name {
+ case D_EXTERN,
+ D_STATIC:
+ if a.Sym == nil {
+ break
+ }
+ ctxt.Instoffset = a.Offset
+ if a.Sym != nil { // use relocation
+ return C_ADDR
+ }
+ return C_LEXT
+
+ case D_AUTO:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SAUTO
+ }
+ return C_LAUTO
+
+ case D_PARAM:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 8
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SAUTO
+ }
+ return C_LAUTO
+
+ case D_NONE:
+ ctxt.Instoffset = a.Offset
+ if ctxt.Instoffset == 0 {
+ return C_ZOREG
+ }
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SOREG
+ }
+ return C_LOREG
+ }
+
+ return C_GOK
+
+ case D_OPT:
+ ctxt.Instoffset = a.Offset & 31
+ if a.Name == D_NONE {
+ return C_SCON
+ }
+ return C_GOK
+
+ case D_CONST:
+ switch a.Name {
+ case D_NONE:
+ ctxt.Instoffset = a.Offset
+ if a.Reg != NREG {
+ if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG {
+ return C_SACON
+ }
+ if isint32(ctxt.Instoffset) != 0 {
+ return C_LACON
+ }
+ return C_DACON
+ }
+
+ goto consize
+
+ case D_EXTERN,
+ D_STATIC:
+ s = a.Sym
+ if s == nil {
+ break
+ }
+ if s.Type_ == obj.SCONST {
+ ctxt.Instoffset = s.Value + a.Offset
+ goto consize
+ }
+
+ ctxt.Instoffset = s.Value + a.Offset
+
+ /* not sure why this barfs */
+ return C_LCON
+
+ case D_AUTO:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SACON
+ }
+ return C_LACON
+
+ case D_PARAM:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + 8
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SACON
+ }
+ return C_LACON
+ }
+
+ return C_GOK
+
+ consize:
+ if ctxt.Instoffset >= 0 {
+ if ctxt.Instoffset == 0 {
+ return C_ZCON
+ }
+ if ctxt.Instoffset <= 0x7fff {
+ return C_SCON
+ }
+ if ctxt.Instoffset <= 0xffff {
+ return C_ANDCON
+ }
+ if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) != 0 { /* && (instoffset & (1<<31)) == 0) */
+ return C_UCON
+ }
+ if isint32(ctxt.Instoffset) != 0 || isuint32(uint64(ctxt.Instoffset)) != 0 {
+ return C_LCON
+ }
+ return C_DCON
+ }
+
+ if ctxt.Instoffset >= -0x8000 {
+ return C_ADDCON
+ }
+ if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) != 0 {
+ return C_UCON
+ }
+ if isint32(ctxt.Instoffset) != 0 {
+ return C_LCON
+ }
+ return C_DCON
+
+ case D_BRANCH:
+ return C_SBRA
+ }
+
+ return C_GOK
+}
+
+func prasm(p *obj.Prog) {
+ fmt.Printf("%v\n", p)
+}
+
+func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
+ var a1 int
+ var a2 int
+ var a3 int
+ var a4 int
+ var r int
+ var c1 []byte
+ var c3 []byte
+ var c4 []byte
+ var o []Optab
+ var e []Optab
+
+ a1 = int(p.Optab)
+ if a1 != 0 {
+ return &optab[a1-1:][0]
+ }
+ a1 = int(p.From.Class)
+ if a1 == 0 {
+ a1 = aclass(ctxt, &p.From) + 1
+ p.From.Class = int8(a1)
+ }
+
+ a1--
+ a3 = int(p.From3.Class)
+ if a3 == 0 {
+ a3 = aclass(ctxt, &p.From3) + 1
+ p.From3.Class = int8(a3)
+ }
+
+ a3--
+ a4 = int(p.To.Class)
+ if a4 == 0 {
+ a4 = aclass(ctxt, &p.To) + 1
+ p.To.Class = int8(a4)
+ }
+
+ a4--
+ a2 = C_NONE
+ if p.Reg != NREG {
+ a2 = C_REG
+ }
+
+ //print("oplook %P %d %d %d %d\n", p, a1, a2, a3, a4);
+ r = int(p.As)
+
+ o = oprange[r].start
+ if o == nil {
+ o = oprange[r].stop /* just generate an error */
+ }
+ e = oprange[r].stop
+ c1 = xcmp[a1][:]
+ c3 = xcmp[a3][:]
+ c4 = xcmp[a4][:]
+ for ; -cap(o) < -cap(e); o = o[1:] {
+ if int(o[0].a2) == a2 {
+ if c1[o[0].a1] != 0 {
+ if c3[o[0].a3] != 0 {
+ if c4[o[0].a4] != 0 {
+ p.Optab = uint16((-cap(o) + cap(optab)) + 1)
+ return &o[0]
+ }
+ }
+ }
+ }
+ }
+
+ ctxt.Diag("illegal combination %v %v %v %v %v", Aconv(int(p.As)), DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
+ prasm(p)
+ if o == nil {
+ o = optab
+ }
+ return &o[0]
+}
+
+func cmp(a int, b int) bool {
+ if a == b {
+ return true
+ }
+ switch a {
+ case C_LCON:
+ if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
+ return true
+ }
+
+ case C_ADDCON:
+ if b == C_ZCON || b == C_SCON {
+ return true
+ }
+
+ case C_ANDCON:
+ if b == C_ZCON || b == C_SCON {
+ return true
+ }
+
+ case C_SPR:
+ if b == C_LR || b == C_XER || b == C_CTR {
+ return true
+ }
+
+ case C_UCON:
+ if b == C_ZCON {
+ return true
+ }
+
+ case C_SCON:
+ if b == C_ZCON {
+ return true
+ }
+
+ case C_LACON:
+ if b == C_SACON {
+ return true
+ }
+
+ case C_LBRA:
+ if b == C_SBRA {
+ return true
+ }
+
+ case C_LEXT:
+ if b == C_SEXT {
+ return true
+ }
+
+ case C_LAUTO:
+ if b == C_SAUTO {
+ return true
+ }
+
+ case C_REG:
+ if b == C_ZCON {
+ return r0iszero != 0 /*TypeKind(100016)*/
+ }
+
+ case C_LOREG:
+ if b == C_ZOREG || b == C_SOREG {
+ return true
+ }
+
+ case C_SOREG:
+ if b == C_ZOREG {
+ return true
+ }
+
+ case C_ANY:
+ return true
+ }
+
+ return false
+}
+
+type ocmp []Optab
+
+func (x ocmp) Len() int {
+ return len(x)
+}
+
+func (x ocmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x ocmp) Less(i, j int) bool {
+ var p1 *Optab
+ var p2 *Optab
+ var n int
+
+ p1 = &x[i]
+ p2 = &x[j]
+ n = int(p1.as) - int(p2.as)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a1) - int(p2.a1)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a2) - int(p2.a2)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a3) - int(p2.a3)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a4) - int(p2.a4)
+ if n != 0 {
+ return n < 0
+ }
+ return false
+}
+
+func buildop(ctxt *obj.Link) {
+ var i int
+ var n int
+ var r int
+
+ for i = 0; i < C_NCLASS; i++ {
+ for n = 0; n < C_NCLASS; n++ {
+ if cmp(n, i) {
+ xcmp[i][n] = 1
+ }
+ }
+ }
+ for n = 0; optab[n].as != AXXX; n++ {
+
+ }
+ sort.Sort(ocmp(optab[:n]))
+ for i = 0; i < n; i++ {
+ r = int(optab[i].as)
+ oprange[r].start = optab[i:]
+ for int(optab[i].as) == r {
+ i++
+ }
+ oprange[r].stop = optab[i:]
+ i--
+
+ switch r {
+ default:
+ ctxt.Diag("unknown op in build: %v", Aconv(r))
+ log.Fatalf("bad code")
+
+ case ADCBF: /* unary indexed: op (b+a); op (b) */
+ oprange[ADCBI] = oprange[r]
+
+ oprange[ADCBST] = oprange[r]
+ oprange[ADCBT] = oprange[r]
+ oprange[ADCBTST] = oprange[r]
+ oprange[ADCBZ] = oprange[r]
+ oprange[AICBI] = oprange[r]
+
+ case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
+ oprange[ASTWCCC] = oprange[r]
+
+ oprange[ASTDCCC] = oprange[r]
+
+ case AREM: /* macro */
+ oprange[AREMCC] = oprange[r]
+
+ oprange[AREMV] = oprange[r]
+ oprange[AREMVCC] = oprange[r]
+
+ case AREMU:
+ oprange[AREMU] = oprange[r]
+ oprange[AREMUCC] = oprange[r]
+ oprange[AREMUV] = oprange[r]
+ oprange[AREMUVCC] = oprange[r]
+
+ case AREMD:
+ oprange[AREMDCC] = oprange[r]
+ oprange[AREMDV] = oprange[r]
+ oprange[AREMDVCC] = oprange[r]
+
+ case AREMDU:
+ oprange[AREMDU] = oprange[r]
+ oprange[AREMDUCC] = oprange[r]
+ oprange[AREMDUV] = oprange[r]
+ oprange[AREMDUVCC] = oprange[r]
+
+ case ADIVW: /* op Rb[,Ra],Rd */
+ oprange[AMULHW] = oprange[r]
+
+ oprange[AMULHWCC] = oprange[r]
+ oprange[AMULHWU] = oprange[r]
+ oprange[AMULHWUCC] = oprange[r]
+ oprange[AMULLWCC] = oprange[r]
+ oprange[AMULLWVCC] = oprange[r]
+ oprange[AMULLWV] = oprange[r]
+ oprange[ADIVWCC] = oprange[r]
+ oprange[ADIVWV] = oprange[r]
+ oprange[ADIVWVCC] = oprange[r]
+ oprange[ADIVWU] = oprange[r]
+ oprange[ADIVWUCC] = oprange[r]
+ oprange[ADIVWUV] = oprange[r]
+ oprange[ADIVWUVCC] = oprange[r]
+ oprange[AADDCC] = oprange[r]
+ oprange[AADDCV] = oprange[r]
+ oprange[AADDCVCC] = oprange[r]
+ oprange[AADDV] = oprange[r]
+ oprange[AADDVCC] = oprange[r]
+ oprange[AADDE] = oprange[r]
+ oprange[AADDECC] = oprange[r]
+ oprange[AADDEV] = oprange[r]
+ oprange[AADDEVCC] = oprange[r]
+ oprange[ACRAND] = oprange[r]
+ oprange[ACRANDN] = oprange[r]
+ oprange[ACREQV] = oprange[r]
+ oprange[ACRNAND] = oprange[r]
+ oprange[ACRNOR] = oprange[r]
+ oprange[ACROR] = oprange[r]
+ oprange[ACRORN] = oprange[r]
+ oprange[ACRXOR] = oprange[r]
+ oprange[AMULHD] = oprange[r]
+ oprange[AMULHDCC] = oprange[r]
+ oprange[AMULHDU] = oprange[r]
+ oprange[AMULHDUCC] = oprange[r]
+ oprange[AMULLD] = oprange[r]
+ oprange[AMULLDCC] = oprange[r]
+ oprange[AMULLDVCC] = oprange[r]
+ oprange[AMULLDV] = oprange[r]
+ oprange[ADIVD] = oprange[r]
+ oprange[ADIVDCC] = oprange[r]
+ oprange[ADIVDVCC] = oprange[r]
+ oprange[ADIVDV] = oprange[r]
+ oprange[ADIVDU] = oprange[r]
+ oprange[ADIVDUCC] = oprange[r]
+ oprange[ADIVDUVCC] = oprange[r]
+ oprange[ADIVDUCC] = oprange[r]
+
+ case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
+ oprange[AMOVH] = oprange[r]
+
+ oprange[AMOVHZ] = oprange[r]
+
+ case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
+ oprange[AMOVHU] = oprange[r]
+
+ oprange[AMOVHZU] = oprange[r]
+ oprange[AMOVWU] = oprange[r]
+ oprange[AMOVWZU] = oprange[r]
+ oprange[AMOVDU] = oprange[r]
+ oprange[AMOVMW] = oprange[r]
+
+ case AAND: /* logical op Rb,Rs,Ra; no literal */
+ oprange[AANDN] = oprange[r]
+
+ oprange[AANDNCC] = oprange[r]
+ oprange[AEQV] = oprange[r]
+ oprange[AEQVCC] = oprange[r]
+ oprange[ANAND] = oprange[r]
+ oprange[ANANDCC] = oprange[r]
+ oprange[ANOR] = oprange[r]
+ oprange[ANORCC] = oprange[r]
+ oprange[AORCC] = oprange[r]
+ oprange[AORN] = oprange[r]
+ oprange[AORNCC] = oprange[r]
+ oprange[AXORCC] = oprange[r]
+
+ case AADDME: /* op Ra, Rd */
+ oprange[AADDMECC] = oprange[r]
+
+ oprange[AADDMEV] = oprange[r]
+ oprange[AADDMEVCC] = oprange[r]
+ oprange[AADDZE] = oprange[r]
+ oprange[AADDZECC] = oprange[r]
+ oprange[AADDZEV] = oprange[r]
+ oprange[AADDZEVCC] = oprange[r]
+ oprange[ASUBME] = oprange[r]
+ oprange[ASUBMECC] = oprange[r]
+ oprange[ASUBMEV] = oprange[r]
+ oprange[ASUBMEVCC] = oprange[r]
+ oprange[ASUBZE] = oprange[r]
+ oprange[ASUBZECC] = oprange[r]
+ oprange[ASUBZEV] = oprange[r]
+ oprange[ASUBZEVCC] = oprange[r]
+
+ case AADDC:
+ oprange[AADDCCC] = oprange[r]
+
+ case ABEQ:
+ oprange[ABGE] = oprange[r]
+ oprange[ABGT] = oprange[r]
+ oprange[ABLE] = oprange[r]
+ oprange[ABLT] = oprange[r]
+ oprange[ABNE] = oprange[r]
+ oprange[ABVC] = oprange[r]
+ oprange[ABVS] = oprange[r]
+
+ case ABR:
+ oprange[ABL] = oprange[r]
+
+ case ABC:
+ oprange[ABCL] = oprange[r]
+
+ case AEXTSB: /* op Rs, Ra */
+ oprange[AEXTSBCC] = oprange[r]
+
+ oprange[AEXTSH] = oprange[r]
+ oprange[AEXTSHCC] = oprange[r]
+ oprange[ACNTLZW] = oprange[r]
+ oprange[ACNTLZWCC] = oprange[r]
+ oprange[ACNTLZD] = oprange[r]
+ oprange[AEXTSW] = oprange[r]
+ oprange[AEXTSWCC] = oprange[r]
+ oprange[ACNTLZDCC] = oprange[r]
+
+ case AFABS: /* fop [s,]d */
+ oprange[AFABSCC] = oprange[r]
+
+ oprange[AFNABS] = oprange[r]
+ oprange[AFNABSCC] = oprange[r]
+ oprange[AFNEG] = oprange[r]
+ oprange[AFNEGCC] = oprange[r]
+ oprange[AFRSP] = oprange[r]
+ oprange[AFRSPCC] = oprange[r]
+ oprange[AFCTIW] = oprange[r]
+ oprange[AFCTIWCC] = oprange[r]
+ oprange[AFCTIWZ] = oprange[r]
+ oprange[AFCTIWZCC] = oprange[r]
+ oprange[AFCTID] = oprange[r]
+ oprange[AFCTIDCC] = oprange[r]
+ oprange[AFCTIDZ] = oprange[r]
+ oprange[AFCTIDZCC] = oprange[r]
+ oprange[AFCFID] = oprange[r]
+ oprange[AFCFIDCC] = oprange[r]
+ oprange[AFRES] = oprange[r]
+ oprange[AFRESCC] = oprange[r]
+ oprange[AFRSQRTE] = oprange[r]
+ oprange[AFRSQRTECC] = oprange[r]
+ oprange[AFSQRT] = oprange[r]
+ oprange[AFSQRTCC] = oprange[r]
+ oprange[AFSQRTS] = oprange[r]
+ oprange[AFSQRTSCC] = oprange[r]
+
+ case AFADD:
+ oprange[AFADDS] = oprange[r]
+ oprange[AFADDCC] = oprange[r]
+ oprange[AFADDSCC] = oprange[r]
+ oprange[AFDIV] = oprange[r]
+ oprange[AFDIVS] = oprange[r]
+ oprange[AFDIVCC] = oprange[r]
+ oprange[AFDIVSCC] = oprange[r]
+ oprange[AFSUB] = oprange[r]
+ oprange[AFSUBS] = oprange[r]
+ oprange[AFSUBCC] = oprange[r]
+ oprange[AFSUBSCC] = oprange[r]
+
+ case AFMADD:
+ oprange[AFMADDCC] = oprange[r]
+ oprange[AFMADDS] = oprange[r]
+ oprange[AFMADDSCC] = oprange[r]
+ oprange[AFMSUB] = oprange[r]
+ oprange[AFMSUBCC] = oprange[r]
+ oprange[AFMSUBS] = oprange[r]
+ oprange[AFMSUBSCC] = oprange[r]
+ oprange[AFNMADD] = oprange[r]
+ oprange[AFNMADDCC] = oprange[r]
+ oprange[AFNMADDS] = oprange[r]
+ oprange[AFNMADDSCC] = oprange[r]
+ oprange[AFNMSUB] = oprange[r]
+ oprange[AFNMSUBCC] = oprange[r]
+ oprange[AFNMSUBS] = oprange[r]
+ oprange[AFNMSUBSCC] = oprange[r]
+ oprange[AFSEL] = oprange[r]
+ oprange[AFSELCC] = oprange[r]
+
+ case AFMUL:
+ oprange[AFMULS] = oprange[r]
+ oprange[AFMULCC] = oprange[r]
+ oprange[AFMULSCC] = oprange[r]
+
+ case AFCMPO:
+ oprange[AFCMPU] = oprange[r]
+
+ case AMTFSB0:
+ oprange[AMTFSB0CC] = oprange[r]
+ oprange[AMTFSB1] = oprange[r]
+ oprange[AMTFSB1CC] = oprange[r]
+
+ case ANEG: /* op [Ra,] Rd */
+ oprange[ANEGCC] = oprange[r]
+
+ oprange[ANEGV] = oprange[r]
+ oprange[ANEGVCC] = oprange[r]
+
+ case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,Ra; oris/xoris $uimm,Rs,Ra */
+ oprange[AXOR] = oprange[r]
+
+ case ASLW:
+ oprange[ASLWCC] = oprange[r]
+ oprange[ASRW] = oprange[r]
+ oprange[ASRWCC] = oprange[r]
+
+ case ASLD:
+ oprange[ASLDCC] = oprange[r]
+ oprange[ASRD] = oprange[r]
+ oprange[ASRDCC] = oprange[r]
+
+ case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
+ oprange[ASRAWCC] = oprange[r]
+
+ case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
+ oprange[ASRADCC] = oprange[r]
+
+ case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
+ oprange[ASUB] = oprange[r]
+
+ oprange[ASUBCC] = oprange[r]
+ oprange[ASUBV] = oprange[r]
+ oprange[ASUBVCC] = oprange[r]
+ oprange[ASUBCCC] = oprange[r]
+ oprange[ASUBCV] = oprange[r]
+ oprange[ASUBCVCC] = oprange[r]
+ oprange[ASUBE] = oprange[r]
+ oprange[ASUBECC] = oprange[r]
+ oprange[ASUBEV] = oprange[r]
+ oprange[ASUBEVCC] = oprange[r]
+
+ case ASYNC:
+ oprange[AISYNC] = oprange[r]
+ oprange[APTESYNC] = oprange[r]
+ oprange[ATLBSYNC] = oprange[r]
+
+ case ARLWMI:
+ oprange[ARLWMICC] = oprange[r]
+ oprange[ARLWNM] = oprange[r]
+ oprange[ARLWNMCC] = oprange[r]
+
+ case ARLDMI:
+ oprange[ARLDMICC] = oprange[r]
+
+ case ARLDC:
+ oprange[ARLDCCC] = oprange[r]
+
+ case ARLDCL:
+ oprange[ARLDCR] = oprange[r]
+ oprange[ARLDCLCC] = oprange[r]
+ oprange[ARLDCRCC] = oprange[r]
+
+ case AFMOVD:
+ oprange[AFMOVDCC] = oprange[r]
+ oprange[AFMOVDU] = oprange[r]
+ oprange[AFMOVS] = oprange[r]
+ oprange[AFMOVSU] = oprange[r]
+
+ case AECIWX:
+ oprange[ALWAR] = oprange[r]
+ oprange[ALDAR] = oprange[r]
+
+ case ASYSCALL: /* just the op; flow of control */
+ oprange[ARFI] = oprange[r]
+
+ oprange[ARFCI] = oprange[r]
+ oprange[ARFID] = oprange[r]
+ oprange[AHRFID] = oprange[r]
+
+ case AMOVHBR:
+ oprange[AMOVWBR] = oprange[r]
+
+ case ASLBMFEE:
+ oprange[ASLBMFEV] = oprange[r]
+
+ case ATW:
+ oprange[ATD] = oprange[r]
+
+ case ATLBIE:
+ oprange[ASLBIE] = oprange[r]
+ oprange[ATLBIEL] = oprange[r]
+
+ case AEIEIO:
+ oprange[ASLBIA] = oprange[r]
+
+ case ACMP:
+ oprange[ACMPW] = oprange[r]
+
+ case ACMPU:
+ oprange[ACMPWU] = oprange[r]
+
+ case AADD,
+ AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra; andis. $uimm,Rs,Ra */
+ ALSW,
+ AMOVW,
+ /* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */
+ AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals */
+ AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
+ AMOVB, /* macro: move byte with sign extension */
+ AMOVBU, /* macro: move byte with sign extension & update */
+ AMOVFL,
+ AMULLW,
+ /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
+ ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
+ ASTSW,
+ ASLBMTE,
+ AWORD,
+ ADWORD,
+ ANOP,
+ ATEXT,
+ AUNDEF,
+ AUSEFIELD,
+ AFUNCDATA,
+ APCDATA,
+ ADUFFZERO,
+ ADUFFCOPY:
+ break
+ }
+ }
+}
+
+func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
+ return o<<26 | xo<<1 | oe<<10 | rc&1
+}
+
+func OPCC(o uint32, xo uint32, rc uint32) uint32 {
+ return OPVCC(o, xo, 0, rc)
+}
+
+func OP(o uint32, xo uint32) uint32 {
+ return OPVCC(o, xo, 0, 0)
+}
+
+/* the order is dest, a/s, b/imm for both arithmetic and logical operations */
+func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
+
+ return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
+}
+
+func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
+ return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
+}
+
+func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
+ return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
+}
+
+func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
+ return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
+}
+
+func OP_BR(op uint32, li uint32, aa uint32) uint32 {
+ return op | li&0x03FFFFFC | aa<<1
+}
+
+func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
+ return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
+}
+
+func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
+ return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
+}
+
+func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
+ return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
+}
+
+const (
+ OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
+ OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
+ OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
+ OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
+ OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
+ OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
+ OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
+ OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
+ OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
+ OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
+ OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
+ OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
+ OP_MFMSR = 31<<26 | 83<<1 | 0<<10 | 0
+ OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
+ OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
+ OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
+ OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
+ OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
+ OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
+ OP_MTMSR = 31<<26 | 146<<1 | 0<<10 | 0
+ OP_MTMSRD = 31<<26 | 178<<1 | 0<<10 | 0
+ OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
+ OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
+ OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
+ OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
+ OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
+ OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
+ OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
+ OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
+ OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
+ OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
+ OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
+ OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
+ OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
+)
+
+func oclass(a *obj.Addr) int {
+ return int(a.Class) - 1
+}
+
+// add R_ADDRPOWER relocation to symbol s for the two instructions o1 and o2.
+func addaddrreloc(ctxt *obj.Link, s *obj.LSym, o1 *uint32, o2 *uint32) {
+
+ var rel *obj.Reloc
+
+ rel = obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc)
+ rel.Siz = 8
+ rel.Sym = s
+ rel.Add = int64(uint64(*o1)<<32 | uint64(uint32(*o2)))
+ rel.Type_ = obj.R_ADDRPOWER
+}
+
+/*
+ * 32-bit masks
+ */
+func getmask(m []byte, v uint32) int {
+
+ var i int
+
+ m[1] = 0
+ m[0] = m[1]
+ if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
+ if getmask(m, ^v) != 0 {
+ i = int(m[0])
+ m[0] = m[1] + 1
+ m[1] = byte(i - 1)
+ return 1
+ }
+
+ return 0
+ }
+
+ for i = 0; i < 32; i++ {
+ if v&(1<<uint(31-i)) != 0 {
+ m[0] = byte(i)
+ for {
+ m[1] = byte(i)
+ i++
+ if !(i < 32 && v&(1<<uint(31-i)) != 0) {
+ break
+ }
+ }
+
+ for ; i < 32; i++ {
+ if v&(1<<uint(31-i)) != 0 {
+ return 0
+ }
+ }
+ return 1
+ }
+ }
+
+ return 0
+}
+
+func maskgen(ctxt *obj.Link, p *obj.Prog, m []byte, v uint32) {
+ if !(getmask(m, v) != 0) {
+ ctxt.Diag("cannot generate mask #%x\n%v", v, p)
+ }
+}
+
+/*
+ * 64-bit masks (rldic etc)
+ */
+func getmask64(m []byte, v uint64) int {
+
+ var i int
+
+ m[1] = 0
+ m[0] = m[1]
+ for i = 0; i < 64; i++ {
+ if v&(uint64(1)<<uint(63-i)) != 0 {
+ m[0] = byte(i)
+ for {
+ m[1] = byte(i)
+ i++
+ if !(i < 64 && v&(uint64(1)<<uint(63-i)) != 0) {
+ break
+ }
+ }
+
+ for ; i < 64; i++ {
+ if v&(uint64(1)<<uint(63-i)) != 0 {
+ return 0
+ }
+ }
+ return 1
+ }
+ }
+
+ return 0
+}
+
+func maskgen64(ctxt *obj.Link, p *obj.Prog, m []byte, v uint64) {
+ if !(getmask64(m, v) != 0) {
+ ctxt.Diag("cannot generate mask #%x\n%v", v, p)
+ }
+}
+
+func loadu32(r int, d int64) uint32 {
+ var v int32
+
+ v = int32(d >> 16)
+ if isuint32(uint64(d)) != 0 {
+ return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
+ }
+ return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
+}
+
+func high16adjusted(d int32) uint16 {
+ if d&0x8000 != 0 {
+ return uint16((d >> 16) + 1)
+ }
+ return uint16(d >> 16)
+}
+
+func asmout(ctxt *obj.Link, p *obj.Prog, o *Optab, out []uint32) {
+ var o1 uint32
+ var o2 uint32
+ var o3 uint32
+ var o4 uint32
+ var o5 uint32
+ var v int32
+ var t int32
+ var d int64
+ var r int
+ var a int
+ var mask [2]uint8
+ var rel *obj.Reloc
+
+ o1 = 0
+ o2 = 0
+ o3 = 0
+ o4 = 0
+ o5 = 0
+
+ //print("%P => case %d\n", p, o->type);
+ switch o.type_ {
+
+ default:
+ ctxt.Diag("unknown type %d", o.type_)
+ prasm(p)
+
+ case 0: /* pseudo ops */
+ break
+
+ case 1: /* mov r1,r2 ==> OR Rs,Rs,Ra */
+ if p.To.Reg == REGZERO && p.From.Type_ == D_CONST {
+
+ v = regoff(ctxt, &p.From)
+ if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
+ //nerrors--;
+ ctxt.Diag("literal operation on R0\n%v", p)
+ }
+
+ o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
+ break
+ }
+
+ o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
+
+ case 2: /* int/cr/fp op Rb,[Ra],Rd */
+ r = int(p.Reg)
+
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+
+ case 3: /* mov $soreg/addcon/ucon, r ==> addis/addi $i,reg',r */
+ d = vregoff(ctxt, &p.From)
+
+ v = int32(d)
+ r = int(p.From.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
+ ctxt.Diag("literal operation on R0\n%v", p)
+ }
+ a = OP_ADDI
+ if o.a1 == C_UCON {
+ if d&0xffff != 0 {
+ log.Fatalf("invalid handling of %v", p)
+ }
+ v >>= 16
+ if r == REGZERO && isuint32(uint64(d)) != 0 {
+ o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
+ break
+ }
+
+ a = OP_ADDIS
+ } else {
+
+ if int64(int16(d)) != d {
+ log.Fatalf("invalid handling of %v", p)
+ }
+ }
+
+ o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
+
+ case 4: /* add/mul $scon,[r1],r2 */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.Reg)
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
+ ctxt.Diag("literal operation on R0\n%v", p)
+ }
+ if int32(int16(v)) != v {
+ log.Fatalf("mishandled instruction %v", p)
+ }
+ o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+
+ case 5: /* syscall */
+ o1 = uint32(oprrr(ctxt, int(p.As)))
+
+ case 6: /* logical op Rb,[Rs,]Ra; no literal */
+ r = int(p.Reg)
+
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ o1 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+
+ case 7: /* mov r, soreg ==> stw o(r) */
+ r = int(p.To.Reg)
+
+ if r == NREG {
+ r = int(o.param)
+ }
+ v = regoff(ctxt, &p.To)
+ if p.To.Type_ == D_OREG && p.Reg != NREG {
+ if v != 0 {
+ ctxt.Diag("illegal indexed instruction\n%v", p)
+ }
+ o1 = AOP_RRR(uint32(opstorex(ctxt, int(p.As))), uint32(p.From.Reg), uint32(p.Reg), uint32(r))
+ } else {
+
+ if int32(int16(v)) != v {
+ log.Fatalf("mishandled instruction %v", p)
+ }
+ o1 = AOP_IRR(uint32(opstore(ctxt, int(p.As))), uint32(p.From.Reg), uint32(r), uint32(v))
+ }
+
+ case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
+ r = int(p.From.Reg)
+
+ if r == NREG {
+ r = int(o.param)
+ }
+ v = regoff(ctxt, &p.From)
+ if p.From.Type_ == D_OREG && p.Reg != NREG {
+ if v != 0 {
+ ctxt.Diag("illegal indexed instruction\n%v", p)
+ }
+ o1 = AOP_RRR(uint32(oploadx(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.Reg), uint32(r))
+ } else {
+
+ if int32(int16(v)) != v {
+ log.Fatalf("mishandled instruction %v", p)
+ }
+ o1 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+ }
+
+ case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
+ r = int(p.From.Reg)
+
+ if r == NREG {
+ r = int(o.param)
+ }
+ v = regoff(ctxt, &p.From)
+ if p.From.Type_ == D_OREG && p.Reg != NREG {
+ if v != 0 {
+ ctxt.Diag("illegal indexed instruction\n%v", p)
+ }
+ o1 = AOP_RRR(uint32(oploadx(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.Reg), uint32(r))
+ } else {
+
+ o1 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+ }
+ o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
+
+ case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
+ r = int(p.Reg)
+
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
+
+ case 11: /* br/bl lbra */
+ v = 0
+
+ if p.Pcond != nil {
+ v = int32(p.Pcond.Pc - p.Pc)
+ if v&03 != 0 {
+ ctxt.Diag("odd branch target address\n%v", p)
+ v &^= 03
+ }
+
+ if v < -(1<<25) || v >= 1<<24 {
+ ctxt.Diag("branch too far\n%v", p)
+ }
+ }
+
+ o1 = OP_BR(uint32(opirr(ctxt, int(p.As))), uint32(v), 0)
+ if p.To.Sym != nil {
+ rel = obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc)
+ rel.Siz = 4
+ rel.Sym = p.To.Sym
+ v += int32(p.To.Offset)
+ if v&03 != 0 {
+ ctxt.Diag("odd branch target address\n%v", p)
+ v &^= 03
+ }
+
+ rel.Add = int64(v)
+ rel.Type_ = obj.R_CALLPOWER
+ }
+
+ case 12: /* movb r,r (extsb); movw r,r (extsw) */
+ if p.To.Reg == REGZERO && p.From.Type_ == D_CONST {
+
+ v = regoff(ctxt, &p.From)
+ if r0iszero != 0 /*TypeKind(100016)*/ && v != 0 {
+ ctxt.Diag("literal operation on R0\n%v", p)
+ }
+
+ o1 = LOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(v))
+ break
+ }
+
+ if p.As == AMOVW {
+ o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
+ } else {
+
+ o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
+ }
+
+ case 13: /* mov[bhw]z r,r; uses rlwinm not andi. to avoid changing CC */
+ if p.As == AMOVBZ {
+
+ o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
+ } else if p.As == AMOVH {
+ o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
+ } else if p.As == AMOVHZ {
+ o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
+ } else if p.As == AMOVWZ {
+ o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
+ } else {
+
+ ctxt.Diag("internal: bad mov[bhw]z\n%v", p)
+ }
+
+ case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
+ r = int(p.Reg)
+
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ d = vregoff(ctxt, &p.From3)
+ maskgen64(ctxt, p, mask[:], uint64(d))
+ switch p.As {
+ case ARLDCL,
+ ARLDCLCC:
+ a = int(mask[0]) /* MB */
+ if mask[1] != 63 {
+ ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
+ }
+
+ case ARLDCR,
+ ARLDCRCC:
+ a = int(mask[1]) /* ME */
+ if mask[0] != 0 {
+ ctxt.Diag("invalid mask for rotate: %x (start != 0)\n%v", uint64(d), p)
+ }
+
+ default:
+ ctxt.Diag("unexpected op in rldc case\n%v", p)
+ a = 0
+ }
+
+ o1 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+ o1 |= (uint32(a) & 31) << 6
+ if a&0x20 != 0 {
+ o1 |= 1 << 5 /* mb[5] is top bit */
+ }
+
+ case 17, /* bc bo,bi,lbra (same for now) */
+ 16: /* bc bo,bi,sbra */
+ a = 0
+
+ if p.From.Type_ == D_CONST {
+ a = int(regoff(ctxt, &p.From))
+ }
+ r = int(p.Reg)
+ if r == NREG {
+ r = 0
+ }
+ v = 0
+ if p.Pcond != nil {
+ v = int32(p.Pcond.Pc - p.Pc)
+ }
+ if v&03 != 0 {
+ ctxt.Diag("odd branch target address\n%v", p)
+ v &^= 03
+ }
+
+ if v < -(1<<16) || v >= 1<<15 {
+ ctxt.Diag("branch too far\n%v", p)
+ }
+ o1 = OP_BC(uint32(opirr(ctxt, int(p.As))), uint32(a), uint32(r), uint32(v), 0)
+
+ case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
+ if p.As == ABC || p.As == ABCL {
+
+ v = regoff(ctxt, &p.To) & 31
+ } else {
+
+ v = 20 /* unconditional */
+ }
+ r = int(p.Reg)
+ if r == NREG {
+ r = 0
+ }
+ o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (D_LR&0x1f)<<16 | ((D_LR>>5)&0x1f)<<11
+ o2 = OPVCC(19, 16, 0, 0)
+ if p.As == ABL || p.As == ABCL {
+ o2 |= 1
+ }
+ o2 = OP_BCR(o2, uint32(v), uint32(r))
+
+ case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
+ if p.As == ABC || p.As == ABCL {
+
+ v = regoff(ctxt, &p.From) & 31
+ } else {
+
+ v = 20 /* unconditional */
+ }
+ r = int(p.Reg)
+ if r == NREG {
+ r = 0
+ }
+ switch oclass(&p.To) {
+ case C_CTR:
+ o1 = OPVCC(19, 528, 0, 0)
+
+ case C_LR:
+ o1 = OPVCC(19, 16, 0, 0)
+
+ default:
+ ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
+ v = 0
+ }
+
+ if p.As == ABL || p.As == ABCL {
+ o1 |= 1
+ }
+ o1 = OP_BCR(o1, uint32(v), uint32(r))
+
+ case 19: /* mov $lcon,r ==> cau+or */
+ d = vregoff(ctxt, &p.From)
+
+ if p.From.Sym == nil {
+ o1 = loadu32(int(p.To.Reg), d)
+ o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
+ } else {
+
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(int32(d))))
+ o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(d))
+ addaddrreloc(ctxt, p.From.Sym, &o1, &o2)
+ }
+
+ //if(dlm) reloc(&p->from, p->pc, 0);
+
+ case 20: /* add $ucon,,r */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.Reg)
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ if p.As == AADD && (!(r0iszero != 0 /*TypeKind(100016)*/) && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
+ ctxt.Diag("literal operation on R0\n%v", p)
+ }
+ o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As)+AEND)), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
+
+ case 22: /* add $lcon,r1,r2 ==> cau+or+add */ /* could do add/sub more efficiently */
+ if p.To.Reg == REGTMP || p.Reg == REGTMP {
+
+ ctxt.Diag("cant synthesize large constant\n%v", p)
+ }
+ d = vregoff(ctxt, &p.From)
+ o1 = loadu32(REGTMP, d)
+ o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
+ r = int(p.Reg)
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ o3 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(r))
+ if p.From.Sym != nil {
+ ctxt.Diag("%v is not supported", p)
+ }
+
+ //if(dlm) reloc(&p->from, p->pc, 0);
+
+ case 23: /* and $lcon,r1,r2 ==> cau+or+and */ /* masks could be done using rlnm etc. */
+ if p.To.Reg == REGTMP || p.Reg == REGTMP {
+
+ ctxt.Diag("cant synthesize large constant\n%v", p)
+ }
+ d = vregoff(ctxt, &p.From)
+ o1 = loadu32(REGTMP, d)
+ o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
+ r = int(p.Reg)
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ o3 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(r))
+ if p.From.Sym != nil {
+ ctxt.Diag("%v is not supported", p)
+ }
+
+ //if(dlm) reloc(&p->from, p->pc, 0);
+
+ /*24*/
+ case 25:
+ /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
+ v = regoff(ctxt, &p.From)
+
+ if v < 0 {
+ v = 0
+ } else if v > 63 {
+ v = 63
+ }
+ r = int(p.Reg)
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ switch p.As {
+ case ASLD,
+ ASLDCC:
+ a = int(63 - v)
+ o1 = OP_RLDICR
+
+ case ASRD,
+ ASRDCC:
+ a = int(v)
+ v = 64 - v
+ o1 = OP_RLDICL
+
+ default:
+ ctxt.Diag("unexpected op in sldi case\n%v", p)
+ a = 0
+ o1 = 0
+ }
+
+ o1 = AOP_RRR(o1, uint32(r), uint32(p.To.Reg), (uint32(v) & 0x1F))
+ o1 |= (uint32(a) & 31) << 6
+ if v&0x20 != 0 {
+ o1 |= 1 << 1
+ }
+ if a&0x20 != 0 {
+ o1 |= 1 << 5 /* mb[5] is top bit */
+ }
+ if p.As == ASLDCC || p.As == ASRDCC {
+ o1 |= 1 /* Rc */
+ }
+
+ case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
+ if p.To.Reg == REGTMP {
+
+ ctxt.Diag("can't synthesize large constant\n%v", p)
+ }
+ v = regoff(ctxt, &p.From)
+ r = int(p.From.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
+ o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
+
+ case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
+ v = regoff(ctxt, &p.From3)
+
+ r = int(p.From.Reg)
+ o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+
+ case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
+ if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
+
+ ctxt.Diag("can't synthesize large constant\n%v", p)
+ }
+ v = regoff(ctxt, &p.From3)
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
+ o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
+ o3 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
+ if p.From.Sym != nil {
+ ctxt.Diag("%v is not supported", p)
+ }
+
+ //if(dlm) reloc(&p->from3, p->pc, 0);
+
+ case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
+ v = regoff(ctxt, &p.From)
+
+ d = vregoff(ctxt, &p.From3)
+ maskgen64(ctxt, p, mask[:], uint64(d))
+ switch p.As {
+ case ARLDC,
+ ARLDCCC:
+ a = int(mask[0]) /* MB */
+ if int32(mask[1]) != (63 - v) {
+ ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
+ }
+
+ case ARLDCL,
+ ARLDCLCC:
+ a = int(mask[0]) /* MB */
+ if mask[1] != 63 {
+ ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
+ }
+
+ case ARLDCR,
+ ARLDCRCC:
+ a = int(mask[1]) /* ME */
+ if mask[0] != 0 {
+ ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
+ }
+
+ default:
+ ctxt.Diag("unexpected op in rldic case\n%v", p)
+ a = 0
+ }
+
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
+ o1 |= (uint32(a) & 31) << 6
+ if v&0x20 != 0 {
+ o1 |= 1 << 1
+ }
+ if a&0x20 != 0 {
+ o1 |= 1 << 5 /* mb[5] is top bit */
+ }
+
+ case 30: /* rldimi $sh,s,$mask,a */
+ v = regoff(ctxt, &p.From)
+
+ d = vregoff(ctxt, &p.From3)
+ maskgen64(ctxt, p, mask[:], uint64(d))
+ if int32(mask[1]) != (63 - v) {
+ ctxt.Diag("invalid mask for shift: %x (shift %d)\n%v", uint64(d), v, p)
+ }
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
+ o1 |= (uint32(mask[0]) & 31) << 6
+ if v&0x20 != 0 {
+ o1 |= 1 << 1
+ }
+ if mask[0]&0x20 != 0 {
+ o1 |= 1 << 5 /* mb[5] is top bit */
+ }
+
+ case 31: /* dword */
+ d = vregoff(ctxt, &p.From)
+
+ if ctxt.Arch.Endian == obj.BigEndian {
+ o1 = uint32(d >> 32)
+ o2 = uint32(d)
+ } else {
+
+ o1 = uint32(d)
+ o2 = uint32(d >> 32)
+ }
+
+ if p.From.Sym != nil {
+ rel = obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc)
+ rel.Siz = 8
+ rel.Sym = p.From.Sym
+ rel.Add = p.From.Offset
+ rel.Type_ = obj.R_ADDR
+ o2 = 0
+ o1 = o2
+ }
+
+ case 32: /* fmul frc,fra,frd */
+ r = int(p.Reg)
+
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
+
+ case 33: /* fabs [frb,]frd; fmr. frb,frd */
+ r = int(p.From.Reg)
+
+ if oclass(&p.From) == C_NONE {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), 0, uint32(r))
+
+ case 34: /* FMADDx fra,frb,frc,frd (d=a*b+c); FSELx a<0? (d=b): (d=c) */
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.From3.Reg)&31)<<6
+
+ case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
+ v = regoff(ctxt, &p.To)
+
+ r = int(p.To.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
+ o2 = AOP_IRR(uint32(opstore(ctxt, int(p.As))), uint32(p.From.Reg), REGTMP, uint32(v))
+
+ case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.From.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
+ o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
+
+ case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.From.Reg)
+ if r == NREG {
+ r = int(o.param)
+ }
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
+ o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
+ o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
+
+ case 40: /* word */
+ o1 = uint32(regoff(ctxt, &p.From))
+
+ case 41: /* stswi */
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(regoff(ctxt, &p.From3))&0x7F)<<11
+
+ case 42: /* lswi */
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(regoff(ctxt, &p.From3))&0x7F)<<11
+
+ case 43: /* unary indexed source: dcbf (b); dcbf (a+b) */
+ r = int(p.Reg)
+
+ if r == NREG {
+ r = 0
+ }
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, uint32(r), uint32(p.From.Reg))
+
+ case 44: /* indexed store */
+ r = int(p.Reg)
+
+ if r == NREG {
+ r = 0
+ }
+ o1 = AOP_RRR(uint32(opstorex(ctxt, int(p.As))), uint32(p.From.Reg), uint32(r), uint32(p.To.Reg))
+
+ case 45: /* indexed load */
+ r = int(p.Reg)
+
+ if r == NREG {
+ r = 0
+ }
+ o1 = AOP_RRR(uint32(oploadx(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+
+ case 46: /* plain op */
+ o1 = uint32(oprrr(ctxt, int(p.As)))
+
+ case 47: /* op Ra, Rd; also op [Ra,] Rd */
+ r = int(p.From.Reg)
+
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0)
+
+ case 48: /* op Rs, Ra */
+ r = int(p.From.Reg)
+
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ o1 = LOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), 0)
+
+ case 49: /* op Rb; op $n, Rb */
+ if p.From.Type_ != D_REG { /* tlbie $L, rB */
+ v = regoff(ctxt, &p.From) & 1
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
+ } else {
+
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), 0, 0, uint32(p.From.Reg))
+ }
+
+ case 50: /* rem[u] r1[,r2],r3 */
+ r = int(p.Reg)
+
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ v = oprrr(ctxt, int(p.As))
+ t = v & (1<<10 | 1) /* OE|Rc */
+ o1 = AOP_RRR(uint32(v)&^uint32(t), REGTMP, uint32(r), uint32(p.From.Reg))
+ o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
+ o3 = AOP_RRR(OP_SUBF|uint32(t), uint32(p.To.Reg), REGTMP, uint32(r))
+ if p.As == AREMU {
+ o4 = o3
+
+ /* Clear top 32 bits */
+ o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
+ }
+
+ case 51: /* remd[u] r1[,r2],r3 */
+ r = int(p.Reg)
+
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ v = oprrr(ctxt, int(p.As))
+ t = v & (1<<10 | 1) /* OE|Rc */
+ o1 = AOP_RRR(uint32(v)&^uint32(t), REGTMP, uint32(r), uint32(p.From.Reg))
+ o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
+ o3 = AOP_RRR(OP_SUBF|uint32(t), uint32(p.To.Reg), REGTMP, uint32(r))
+
+ case 52: /* mtfsbNx cr(n) */
+ v = regoff(ctxt, &p.From) & 31
+
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(v), 0, 0)
+
+ case 53: /* mffsX ,fr1 */
+ o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
+
+ case 54: /* mov msr,r1; mov r1, msr*/
+ if oclass(&p.From) == C_REG {
+
+ if p.As == AMOVD {
+ o1 = AOP_RRR(OP_MTMSRD, uint32(p.From.Reg), 0, 0)
+ } else {
+
+ o1 = AOP_RRR(OP_MTMSR, uint32(p.From.Reg), 0, 0)
+ }
+ } else {
+
+ o1 = AOP_RRR(OP_MFMSR, uint32(p.To.Reg), 0, 0)
+ }
+
+ case 55: /* op Rb, Rd */
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.To.Reg), 0, uint32(p.From.Reg))
+
+ case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.Reg)
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(r), uint32(p.To.Reg), uint32(v)&31)
+ if p.As == ASRAD && (v&0x20 != 0) {
+ o1 |= 1 << 1 /* mb[5] */
+ }
+
+ case 57: /* slw $sh,[s,]a -> rlwinm ... */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.Reg)
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+
+ /*
+ * Let user (gs) shoot himself in the foot.
+ * qc has already complained.
+ *
+ if(v < 0 || v > 31)
+ ctxt->diag("illegal shift %ld\n%P", v, p);
+ */
+ if v < 0 {
+
+ v = 0
+ } else if v > 32 {
+ v = 32
+ }
+ if p.As == ASRW || p.As == ASRWCC { /* shift right */
+ mask[0] = uint8(v)
+ mask[1] = 31
+ v = 32 - v
+ } else {
+
+ mask[0] = 0
+ mask[1] = uint8(31 - v)
+ }
+
+ o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
+ if p.As == ASLWCC || p.As == ASRWCC {
+ o1 |= 1 /* Rc */
+ }
+
+ case 58: /* logical $andcon,[s],a */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.Reg)
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ o1 = LOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(p.To.Reg), uint32(r), uint32(v))
+
+ case 59: /* or/and $ucon,,r */
+ v = regoff(ctxt, &p.From)
+
+ r = int(p.Reg)
+ if r == NREG {
+ r = int(p.To.Reg)
+ }
+ o1 = LOP_IRR(uint32(opirr(ctxt, int(p.As)+AEND)), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis */
+
+ case 60: /* tw to,a,b */
+ r = int(regoff(ctxt, &p.From) & 31)
+
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
+
+ case 61: /* tw to,a,$simm */
+ r = int(regoff(ctxt, &p.From) & 31)
+
+ v = regoff(ctxt, &p.To)
+ o1 = AOP_IRR(uint32(opirr(ctxt, int(p.As))), uint32(r), uint32(p.Reg), uint32(v))
+
+ case 62: /* rlwmi $sh,s,$mask,a */
+ v = regoff(ctxt, &p.From)
+
+ maskgen(ctxt, p, mask[:], uint32(regoff(ctxt, &p.From3)))
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
+ o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
+
+ case 63: /* rlwmi b,s,$mask,a */
+ maskgen(ctxt, p, mask[:], uint32(regoff(ctxt, &p.From3)))
+
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
+ o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
+
+ case 64: /* mtfsf fr[, $m] {,fpcsr} */
+ if p.From3.Type_ != D_NONE {
+
+ v = regoff(ctxt, &p.From3) & 255
+ } else {
+
+ v = 255
+ }
+ o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
+
+ case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
+ if p.To.Reg == NREG {
+
+ ctxt.Diag("must specify FPSCR(n)\n%v", p)
+ }
+ o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(regoff(ctxt, &p.From))&31)<<12
+
+ case 66: /* mov spr,r1; mov r1,spr, also dcr */
+ if p.From.Type_ == D_REG {
+
+ r = int(p.From.Reg)
+ v = int32(p.To.Offset)
+ if p.To.Type_ == D_DCR {
+ o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
+ } else {
+
+ o1 = OPVCC(31, 467, 0, 0) /* mtspr */
+ }
+ } else {
+
+ r = int(p.To.Reg)
+ v = int32(p.From.Offset)
+ if p.From.Type_ == D_DCR {
+ o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
+ } else {
+
+ o1 = OPVCC(31, 339, 0, 0) /* mfspr */
+ }
+ }
+
+ o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
+
+ case 67: /* mcrf crfD,crfS */
+ if p.From.Type_ != D_CREG || p.From.Reg == NREG || p.To.Type_ != D_CREG || p.To.Reg == NREG {
+
+ ctxt.Diag("illegal CR field number\n%v", p)
+ }
+ o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
+
+ case 68: /* mfcr rD; mfocrf CRM,rD */
+ if p.From.Type_ == D_CREG && p.From.Reg != NREG {
+
+ v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
+ o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
+ } else {
+
+ o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
+ }
+
+ case 69: /* mtcrf CRM,rS */
+ if p.From3.Type_ != D_NONE {
+
+ if p.To.Reg != NREG {
+ ctxt.Diag("can't use both mask and CR(n)\n%v", p)
+ }
+ v = regoff(ctxt, &p.From3) & 0xff
+ } else {
+
+ if p.To.Reg == NREG {
+ v = 0xff /* CR */
+ } else {
+
+ v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
+ }
+ }
+
+ o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
+
+ case 70: /* [f]cmp r,r,cr*/
+ if p.Reg == NREG {
+
+ r = 0
+ } else {
+
+ r = (int(p.Reg) & 7) << 2
+ }
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
+
+ case 71: /* cmp[l] r,i,cr*/
+ if p.Reg == NREG {
+
+ r = 0
+ } else {
+
+ r = (int(p.Reg) & 7) << 2
+ }
+ o1 = AOP_RRR(uint32(opirr(ctxt, int(p.As))), uint32(r), uint32(p.From.Reg), 0) | uint32(regoff(ctxt, &p.To))&0xffff
+
+ case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
+ o1 = AOP_RRR(uint32(oprrr(ctxt, int(p.As))), uint32(p.From.Reg), 0, uint32(p.To.Reg))
+
+ case 73: /* mcrfs crfD,crfS */
+ if p.From.Type_ != D_FPSCR || p.From.Reg == NREG || p.To.Type_ != D_CREG || p.To.Reg == NREG {
+
+ ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
+ }
+ o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
+
+ case 77: /* syscall $scon, syscall Rx */
+ if p.From.Type_ == D_CONST {
+
+ if p.From.Offset > BIG || p.From.Offset < -BIG {
+ ctxt.Diag("illegal syscall, sysnum too large: %v", p)
+ }
+ o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
+ } else if p.From.Type_ == D_REG {
+ o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
+ } else {
+
+ ctxt.Diag("illegal syscall: %v", p)
+ o1 = 0x7fe00008 // trap always
+ }
+
+ o2 = uint32(oprrr(ctxt, int(p.As)))
+ o3 = AOP_RRR(uint32(oprrr(ctxt, AXOR)), REGZERO, REGZERO, REGZERO) // XOR R0, R0
+
+ case 78: /* undef */
+ o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
+ always to be an illegal instruction." */
+
+ /* relocation operations */
+ case 74:
+ v = regoff(ctxt, &p.To)
+
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
+ o2 = AOP_IRR(uint32(opstore(ctxt, int(p.As))), uint32(p.From.Reg), REGTMP, uint32(v))
+ addaddrreloc(ctxt, p.To.Sym, &o1, &o2)
+
+ //if(dlm) reloc(&p->to, p->pc, 1);
+
+ case 75:
+ v = regoff(ctxt, &p.From)
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
+ o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
+ addaddrreloc(ctxt, p.From.Sym, &o1, &o2)
+
+ //if(dlm) reloc(&p->from, p->pc, 1);
+
+ case 76:
+ v = regoff(ctxt, &p.From)
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(high16adjusted(v)))
+ o2 = AOP_IRR(uint32(opload(ctxt, int(p.As))), uint32(p.To.Reg), REGTMP, uint32(v))
+ addaddrreloc(ctxt, p.From.Sym, &o1, &o2)
+ o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
+
+ //if(dlm) reloc(&p->from, p->pc, 1);
+ break
+ }
+
+ out[0] = o1
+ out[1] = o2
+ out[2] = o3
+ out[3] = o4
+ out[4] = o5
+ return
+}
+
+func vregoff(ctxt *obj.Link, a *obj.Addr) int64 {
+ ctxt.Instoffset = 0
+ aclass(ctxt, a)
+ return ctxt.Instoffset
+}
+
+func regoff(ctxt *obj.Link, a *obj.Addr) int32 {
+ return int32(vregoff(ctxt, a))
+}
+
+func oprrr(ctxt *obj.Link, a int) int32 {
+ switch a {
+ case AADD:
+ return int32(OPVCC(31, 266, 0, 0))
+ case AADDCC:
+ return int32(OPVCC(31, 266, 0, 1))
+ case AADDV:
+ return int32(OPVCC(31, 266, 1, 0))
+ case AADDVCC:
+ return int32(OPVCC(31, 266, 1, 1))
+ case AADDC:
+ return int32(OPVCC(31, 10, 0, 0))
+ case AADDCCC:
+ return int32(OPVCC(31, 10, 0, 1))
+ case AADDCV:
+ return int32(OPVCC(31, 10, 1, 0))
+ case AADDCVCC:
+ return int32(OPVCC(31, 10, 1, 1))
+ case AADDE:
+ return int32(OPVCC(31, 138, 0, 0))
+ case AADDECC:
+ return int32(OPVCC(31, 138, 0, 1))
+ case AADDEV:
+ return int32(OPVCC(31, 138, 1, 0))
+ case AADDEVCC:
+ return int32(OPVCC(31, 138, 1, 1))
+ case AADDME:
+ return int32(OPVCC(31, 234, 0, 0))
+ case AADDMECC:
+ return int32(OPVCC(31, 234, 0, 1))
+ case AADDMEV:
+ return int32(OPVCC(31, 234, 1, 0))
+ case AADDMEVCC:
+ return int32(OPVCC(31, 234, 1, 1))
+ case AADDZE:
+ return int32(OPVCC(31, 202, 0, 0))
+ case AADDZECC:
+ return int32(OPVCC(31, 202, 0, 1))
+ case AADDZEV:
+ return int32(OPVCC(31, 202, 1, 0))
+ case AADDZEVCC:
+ return int32(OPVCC(31, 202, 1, 1))
+
+ case AAND:
+ return int32(OPVCC(31, 28, 0, 0))
+ case AANDCC:
+ return int32(OPVCC(31, 28, 0, 1))
+ case AANDN:
+ return int32(OPVCC(31, 60, 0, 0))
+ case AANDNCC:
+ return int32(OPVCC(31, 60, 0, 1))
+
+ case ACMP:
+ return int32(OPVCC(31, 0, 0, 0) | 1<<21) /* L=1 */
+ case ACMPU:
+ return int32(OPVCC(31, 32, 0, 0) | 1<<21)
+ case ACMPW:
+ return int32(OPVCC(31, 0, 0, 0)) /* L=0 */
+ case ACMPWU:
+ return int32(OPVCC(31, 32, 0, 0))
+
+ case ACNTLZW:
+ return int32(OPVCC(31, 26, 0, 0))
+ case ACNTLZWCC:
+ return int32(OPVCC(31, 26, 0, 1))
+ case ACNTLZD:
+ return int32(OPVCC(31, 58, 0, 0))
+ case ACNTLZDCC:
+ return int32(OPVCC(31, 58, 0, 1))
+
+ case ACRAND:
+ return int32(OPVCC(19, 257, 0, 0))
+ case ACRANDN:
+ return int32(OPVCC(19, 129, 0, 0))
+ case ACREQV:
+ return int32(OPVCC(19, 289, 0, 0))
+ case ACRNAND:
+ return int32(OPVCC(19, 225, 0, 0))
+ case ACRNOR:
+ return int32(OPVCC(19, 33, 0, 0))
+ case ACROR:
+ return int32(OPVCC(19, 449, 0, 0))
+ case ACRORN:
+ return int32(OPVCC(19, 417, 0, 0))
+ case ACRXOR:
+ return int32(OPVCC(19, 193, 0, 0))
+
+ case ADCBF:
+ return int32(OPVCC(31, 86, 0, 0))
+ case ADCBI:
+ return int32(OPVCC(31, 470, 0, 0))
+ case ADCBST:
+ return int32(OPVCC(31, 54, 0, 0))
+ case ADCBT:
+ return int32(OPVCC(31, 278, 0, 0))
+ case ADCBTST:
+ return int32(OPVCC(31, 246, 0, 0))
+ case ADCBZ:
+ return int32(OPVCC(31, 1014, 0, 0))
+
+ case AREM,
+ ADIVW:
+ return int32(OPVCC(31, 491, 0, 0))
+
+ case AREMCC,
+ ADIVWCC:
+ return int32(OPVCC(31, 491, 0, 1))
+
+ case AREMV,
+ ADIVWV:
+ return int32(OPVCC(31, 491, 1, 0))
+
+ case AREMVCC,
+ ADIVWVCC:
+ return int32(OPVCC(31, 491, 1, 1))
+
+ case AREMU,
+ ADIVWU:
+ return int32(OPVCC(31, 459, 0, 0))
+
+ case AREMUCC,
+ ADIVWUCC:
+ return int32(OPVCC(31, 459, 0, 1))
+
+ case AREMUV,
+ ADIVWUV:
+ return int32(OPVCC(31, 459, 1, 0))
+
+ case AREMUVCC,
+ ADIVWUVCC:
+ return int32(OPVCC(31, 459, 1, 1))
+
+ case AREMD,
+ ADIVD:
+ return int32(OPVCC(31, 489, 0, 0))
+
+ case AREMDCC,
+ ADIVDCC:
+ return int32(OPVCC(31, 489, 0, 1))
+
+ case AREMDV,
+ ADIVDV:
+ return int32(OPVCC(31, 489, 1, 0))
+
+ case AREMDVCC,
+ ADIVDVCC:
+ return int32(OPVCC(31, 489, 1, 1))
+
+ case AREMDU,
+ ADIVDU:
+ return int32(OPVCC(31, 457, 0, 0))
+
+ case AREMDUCC,
+ ADIVDUCC:
+ return int32(OPVCC(31, 457, 0, 1))
+
+ case AREMDUV,
+ ADIVDUV:
+ return int32(OPVCC(31, 457, 1, 0))
+
+ case AREMDUVCC,
+ ADIVDUVCC:
+ return int32(OPVCC(31, 457, 1, 1))
+
+ case AEIEIO:
+ return int32(OPVCC(31, 854, 0, 0))
+
+ case AEQV:
+ return int32(OPVCC(31, 284, 0, 0))
+ case AEQVCC:
+ return int32(OPVCC(31, 284, 0, 1))
+
+ case AEXTSB:
+ return int32(OPVCC(31, 954, 0, 0))
+ case AEXTSBCC:
+ return int32(OPVCC(31, 954, 0, 1))
+ case AEXTSH:
+ return int32(OPVCC(31, 922, 0, 0))
+ case AEXTSHCC:
+ return int32(OPVCC(31, 922, 0, 1))
+ case AEXTSW:
+ return int32(OPVCC(31, 986, 0, 0))
+ case AEXTSWCC:
+ return int32(OPVCC(31, 986, 0, 1))
+
+ case AFABS:
+ return int32(OPVCC(63, 264, 0, 0))
+ case AFABSCC:
+ return int32(OPVCC(63, 264, 0, 1))
+ case AFADD:
+ return int32(OPVCC(63, 21, 0, 0))
+ case AFADDCC:
+ return int32(OPVCC(63, 21, 0, 1))
+ case AFADDS:
+ return int32(OPVCC(59, 21, 0, 0))
+ case AFADDSCC:
+ return int32(OPVCC(59, 21, 0, 1))
+ case AFCMPO:
+ return int32(OPVCC(63, 32, 0, 0))
+ case AFCMPU:
+ return int32(OPVCC(63, 0, 0, 0))
+ case AFCFID:
+ return int32(OPVCC(63, 846, 0, 0))
+ case AFCFIDCC:
+ return int32(OPVCC(63, 846, 0, 1))
+ case AFCTIW:
+ return int32(OPVCC(63, 14, 0, 0))
+ case AFCTIWCC:
+ return int32(OPVCC(63, 14, 0, 1))
+ case AFCTIWZ:
+ return int32(OPVCC(63, 15, 0, 0))
+ case AFCTIWZCC:
+ return int32(OPVCC(63, 15, 0, 1))
+ case AFCTID:
+ return int32(OPVCC(63, 814, 0, 0))
+ case AFCTIDCC:
+ return int32(OPVCC(63, 814, 0, 1))
+ case AFCTIDZ:
+ return int32(OPVCC(63, 815, 0, 0))
+ case AFCTIDZCC:
+ return int32(OPVCC(63, 815, 0, 1))
+ case AFDIV:
+ return int32(OPVCC(63, 18, 0, 0))
+ case AFDIVCC:
+ return int32(OPVCC(63, 18, 0, 1))
+ case AFDIVS:
+ return int32(OPVCC(59, 18, 0, 0))
+ case AFDIVSCC:
+ return int32(OPVCC(59, 18, 0, 1))
+ case AFMADD:
+ return int32(OPVCC(63, 29, 0, 0))
+ case AFMADDCC:
+ return int32(OPVCC(63, 29, 0, 1))
+ case AFMADDS:
+ return int32(OPVCC(59, 29, 0, 0))
+ case AFMADDSCC:
+ return int32(OPVCC(59, 29, 0, 1))
+
+ case AFMOVS,
+ AFMOVD:
+ return int32(OPVCC(63, 72, 0, 0)) /* load */
+ case AFMOVDCC:
+ return int32(OPVCC(63, 72, 0, 1))
+ case AFMSUB:
+ return int32(OPVCC(63, 28, 0, 0))
+ case AFMSUBCC:
+ return int32(OPVCC(63, 28, 0, 1))
+ case AFMSUBS:
+ return int32(OPVCC(59, 28, 0, 0))
+ case AFMSUBSCC:
+ return int32(OPVCC(59, 28, 0, 1))
+ case AFMUL:
+ return int32(OPVCC(63, 25, 0, 0))
+ case AFMULCC:
+ return int32(OPVCC(63, 25, 0, 1))
+ case AFMULS:
+ return int32(OPVCC(59, 25, 0, 0))
+ case AFMULSCC:
+ return int32(OPVCC(59, 25, 0, 1))
+ case AFNABS:
+ return int32(OPVCC(63, 136, 0, 0))
+ case AFNABSCC:
+ return int32(OPVCC(63, 136, 0, 1))
+ case AFNEG:
+ return int32(OPVCC(63, 40, 0, 0))
+ case AFNEGCC:
+ return int32(OPVCC(63, 40, 0, 1))
+ case AFNMADD:
+ return int32(OPVCC(63, 31, 0, 0))
+ case AFNMADDCC:
+ return int32(OPVCC(63, 31, 0, 1))
+ case AFNMADDS:
+ return int32(OPVCC(59, 31, 0, 0))
+ case AFNMADDSCC:
+ return int32(OPVCC(59, 31, 0, 1))
+ case AFNMSUB:
+ return int32(OPVCC(63, 30, 0, 0))
+ case AFNMSUBCC:
+ return int32(OPVCC(63, 30, 0, 1))
+ case AFNMSUBS:
+ return int32(OPVCC(59, 30, 0, 0))
+ case AFNMSUBSCC:
+ return int32(OPVCC(59, 30, 0, 1))
+ case AFRES:
+ return int32(OPVCC(59, 24, 0, 0))
+ case AFRESCC:
+ return int32(OPVCC(59, 24, 0, 1))
+ case AFRSP:
+ return int32(OPVCC(63, 12, 0, 0))
+ case AFRSPCC:
+ return int32(OPVCC(63, 12, 0, 1))
+ case AFRSQRTE:
+ return int32(OPVCC(63, 26, 0, 0))
+ case AFRSQRTECC:
+ return int32(OPVCC(63, 26, 0, 1))
+ case AFSEL:
+ return int32(OPVCC(63, 23, 0, 0))
+ case AFSELCC:
+ return int32(OPVCC(63, 23, 0, 1))
+ case AFSQRT:
+ return int32(OPVCC(63, 22, 0, 0))
+ case AFSQRTCC:
+ return int32(OPVCC(63, 22, 0, 1))
+ case AFSQRTS:
+ return int32(OPVCC(59, 22, 0, 0))
+ case AFSQRTSCC:
+ return int32(OPVCC(59, 22, 0, 1))
+ case AFSUB:
+ return int32(OPVCC(63, 20, 0, 0))
+ case AFSUBCC:
+ return int32(OPVCC(63, 20, 0, 1))
+ case AFSUBS:
+ return int32(OPVCC(59, 20, 0, 0))
+ case AFSUBSCC:
+ return int32(OPVCC(59, 20, 0, 1))
+
+ case AICBI:
+ return int32(OPVCC(31, 982, 0, 0))
+ case AISYNC:
+ return int32(OPVCC(19, 150, 0, 0))
+
+ case AMTFSB0:
+ return int32(OPVCC(63, 70, 0, 0))
+ case AMTFSB0CC:
+ return int32(OPVCC(63, 70, 0, 1))
+ case AMTFSB1:
+ return int32(OPVCC(63, 38, 0, 0))
+ case AMTFSB1CC:
+ return int32(OPVCC(63, 38, 0, 1))
+
+ case AMULHW:
+ return int32(OPVCC(31, 75, 0, 0))
+ case AMULHWCC:
+ return int32(OPVCC(31, 75, 0, 1))
+ case AMULHWU:
+ return int32(OPVCC(31, 11, 0, 0))
+ case AMULHWUCC:
+ return int32(OPVCC(31, 11, 0, 1))
+ case AMULLW:
+ return int32(OPVCC(31, 235, 0, 0))
+ case AMULLWCC:
+ return int32(OPVCC(31, 235, 0, 1))
+ case AMULLWV:
+ return int32(OPVCC(31, 235, 1, 0))
+ case AMULLWVCC:
+ return int32(OPVCC(31, 235, 1, 1))
+
+ case AMULHD:
+ return int32(OPVCC(31, 73, 0, 0))
+ case AMULHDCC:
+ return int32(OPVCC(31, 73, 0, 1))
+ case AMULHDU:
+ return int32(OPVCC(31, 9, 0, 0))
+ case AMULHDUCC:
+ return int32(OPVCC(31, 9, 0, 1))
+ case AMULLD:
+ return int32(OPVCC(31, 233, 0, 0))
+ case AMULLDCC:
+ return int32(OPVCC(31, 233, 0, 1))
+ case AMULLDV:
+ return int32(OPVCC(31, 233, 1, 0))
+ case AMULLDVCC:
+ return int32(OPVCC(31, 233, 1, 1))
+
+ case ANAND:
+ return int32(OPVCC(31, 476, 0, 0))
+ case ANANDCC:
+ return int32(OPVCC(31, 476, 0, 1))
+ case ANEG:
+ return int32(OPVCC(31, 104, 0, 0))
+ case ANEGCC:
+ return int32(OPVCC(31, 104, 0, 1))
+ case ANEGV:
+ return int32(OPVCC(31, 104, 1, 0))
+ case ANEGVCC:
+ return int32(OPVCC(31, 104, 1, 1))
+ case ANOR:
+ return int32(OPVCC(31, 124, 0, 0))
+ case ANORCC:
+ return int32(OPVCC(31, 124, 0, 1))
+ case AOR:
+ return int32(OPVCC(31, 444, 0, 0))
+ case AORCC:
+ return int32(OPVCC(31, 444, 0, 1))
+ case AORN:
+ return int32(OPVCC(31, 412, 0, 0))
+ case AORNCC:
+ return int32(OPVCC(31, 412, 0, 1))
+
+ case ARFI:
+ return int32(OPVCC(19, 50, 0, 0))
+ case ARFCI:
+ return int32(OPVCC(19, 51, 0, 0))
+ case ARFID:
+ return int32(OPVCC(19, 18, 0, 0))
+ case AHRFID:
+ return int32(OPVCC(19, 274, 0, 0))
+
+ case ARLWMI:
+ return int32(OPVCC(20, 0, 0, 0))
+ case ARLWMICC:
+ return int32(OPVCC(20, 0, 0, 1))
+ case ARLWNM:
+ return int32(OPVCC(23, 0, 0, 0))
+ case ARLWNMCC:
+ return int32(OPVCC(23, 0, 0, 1))
+
+ case ARLDCL:
+ return int32(OPVCC(30, 8, 0, 0))
+ case ARLDCR:
+ return int32(OPVCC(30, 9, 0, 0))
+
+ case ASYSCALL:
+ return int32(OPVCC(17, 1, 0, 0))
+
+ case ASLW:
+ return int32(OPVCC(31, 24, 0, 0))
+ case ASLWCC:
+ return int32(OPVCC(31, 24, 0, 1))
+ case ASLD:
+ return int32(OPVCC(31, 27, 0, 0))
+ case ASLDCC:
+ return int32(OPVCC(31, 27, 0, 1))
+
+ case ASRAW:
+ return int32(OPVCC(31, 792, 0, 0))
+ case ASRAWCC:
+ return int32(OPVCC(31, 792, 0, 1))
+ case ASRAD:
+ return int32(OPVCC(31, 794, 0, 0))
+ case ASRADCC:
+ return int32(OPVCC(31, 794, 0, 1))
+
+ case ASRW:
+ return int32(OPVCC(31, 536, 0, 0))
+ case ASRWCC:
+ return int32(OPVCC(31, 536, 0, 1))
+ case ASRD:
+ return int32(OPVCC(31, 539, 0, 0))
+ case ASRDCC:
+ return int32(OPVCC(31, 539, 0, 1))
+
+ case ASUB:
+ return int32(OPVCC(31, 40, 0, 0))
+ case ASUBCC:
+ return int32(OPVCC(31, 40, 0, 1))
+ case ASUBV:
+ return int32(OPVCC(31, 40, 1, 0))
+ case ASUBVCC:
+ return int32(OPVCC(31, 40, 1, 1))
+ case ASUBC:
+ return int32(OPVCC(31, 8, 0, 0))
+ case ASUBCCC:
+ return int32(OPVCC(31, 8, 0, 1))
+ case ASUBCV:
+ return int32(OPVCC(31, 8, 1, 0))
+ case ASUBCVCC:
+ return int32(OPVCC(31, 8, 1, 1))
+ case ASUBE:
+ return int32(OPVCC(31, 136, 0, 0))
+ case ASUBECC:
+ return int32(OPVCC(31, 136, 0, 1))
+ case ASUBEV:
+ return int32(OPVCC(31, 136, 1, 0))
+ case ASUBEVCC:
+ return int32(OPVCC(31, 136, 1, 1))
+ case ASUBME:
+ return int32(OPVCC(31, 232, 0, 0))
+ case ASUBMECC:
+ return int32(OPVCC(31, 232, 0, 1))
+ case ASUBMEV:
+ return int32(OPVCC(31, 232, 1, 0))
+ case ASUBMEVCC:
+ return int32(OPVCC(31, 232, 1, 1))
+ case ASUBZE:
+ return int32(OPVCC(31, 200, 0, 0))
+ case ASUBZECC:
+ return int32(OPVCC(31, 200, 0, 1))
+ case ASUBZEV:
+ return int32(OPVCC(31, 200, 1, 0))
+ case ASUBZEVCC:
+ return int32(OPVCC(31, 200, 1, 1))
+
+ case ASYNC:
+ return int32(OPVCC(31, 598, 0, 0))
+ case APTESYNC:
+ return int32(OPVCC(31, 598, 0, 0) | 2<<21)
+
+ case ATLBIE:
+ return int32(OPVCC(31, 306, 0, 0))
+ case ATLBIEL:
+ return int32(OPVCC(31, 274, 0, 0))
+ case ATLBSYNC:
+ return int32(OPVCC(31, 566, 0, 0))
+ case ASLBIA:
+ return int32(OPVCC(31, 498, 0, 0))
+ case ASLBIE:
+ return int32(OPVCC(31, 434, 0, 0))
+ case ASLBMFEE:
+ return int32(OPVCC(31, 915, 0, 0))
+ case ASLBMFEV:
+ return int32(OPVCC(31, 851, 0, 0))
+ case ASLBMTE:
+ return int32(OPVCC(31, 402, 0, 0))
+
+ case ATW:
+ return int32(OPVCC(31, 4, 0, 0))
+ case ATD:
+ return int32(OPVCC(31, 68, 0, 0))
+
+ case AXOR:
+ return int32(OPVCC(31, 316, 0, 0))
+ case AXORCC:
+ return int32(OPVCC(31, 316, 0, 1))
+ }
+
+ ctxt.Diag("bad r/r opcode %v", Aconv(a))
+ return 0
+}
+
+func opirr(ctxt *obj.Link, a int) int32 {
+ switch a {
+ case AADD:
+ return int32(OPVCC(14, 0, 0, 0))
+ case AADDC:
+ return int32(OPVCC(12, 0, 0, 0))
+ case AADDCCC:
+ return int32(OPVCC(13, 0, 0, 0))
+ case AADD + AEND:
+ return int32(OPVCC(15, 0, 0, 0)) /* ADDIS/CAU */
+
+ case AANDCC:
+ return int32(OPVCC(28, 0, 0, 0))
+ case AANDCC + AEND:
+ return int32(OPVCC(29, 0, 0, 0)) /* ANDIS./ANDIU. */
+
+ case ABR:
+ return int32(OPVCC(18, 0, 0, 0))
+ case ABL:
+ return int32(OPVCC(18, 0, 0, 0) | 1)
+ case ADUFFZERO:
+ return int32(OPVCC(18, 0, 0, 0) | 1)
+ case ADUFFCOPY:
+ return int32(OPVCC(18, 0, 0, 0) | 1)
+ case ABC:
+ return int32(OPVCC(16, 0, 0, 0))
+ case ABCL:
+ return int32(OPVCC(16, 0, 0, 0) | 1)
+
+ case ABEQ:
+ return int32(AOP_RRR(16<<26, 12, 2, 0))
+ case ABGE:
+ return int32(AOP_RRR(16<<26, 4, 0, 0))
+ case ABGT:
+ return int32(AOP_RRR(16<<26, 12, 1, 0))
+ case ABLE:
+ return int32(AOP_RRR(16<<26, 4, 1, 0))
+ case ABLT:
+ return int32(AOP_RRR(16<<26, 12, 0, 0))
+ case ABNE:
+ return int32(AOP_RRR(16<<26, 4, 2, 0))
+ case ABVC:
+ return int32(AOP_RRR(16<<26, 4, 3, 0))
+ case ABVS:
+ return int32(AOP_RRR(16<<26, 12, 3, 0))
+
+ case ACMP:
+ return int32(OPVCC(11, 0, 0, 0) | 1<<21) /* L=1 */
+ case ACMPU:
+ return int32(OPVCC(10, 0, 0, 0) | 1<<21)
+ case ACMPW:
+ return int32(OPVCC(11, 0, 0, 0)) /* L=0 */
+ case ACMPWU:
+ return int32(OPVCC(10, 0, 0, 0))
+ case ALSW:
+ return int32(OPVCC(31, 597, 0, 0))
+
+ case AMULLW:
+ return int32(OPVCC(7, 0, 0, 0))
+
+ case AOR:
+ return int32(OPVCC(24, 0, 0, 0))
+ case AOR + AEND:
+ return int32(OPVCC(25, 0, 0, 0)) /* ORIS/ORIU */
+
+ case ARLWMI:
+ return int32(OPVCC(20, 0, 0, 0)) /* rlwimi */
+ case ARLWMICC:
+ return int32(OPVCC(20, 0, 0, 1))
+ case ARLDMI:
+ return int32(OPVCC(30, 0, 0, 0) | 3<<2) /* rldimi */
+ case ARLDMICC:
+ return int32(OPVCC(30, 0, 0, 1) | 3<<2)
+
+ case ARLWNM:
+ return int32(OPVCC(21, 0, 0, 0)) /* rlwinm */
+ case ARLWNMCC:
+ return int32(OPVCC(21, 0, 0, 1))
+
+ case ARLDCL:
+ return int32(OPVCC(30, 0, 0, 0)) /* rldicl */
+ case ARLDCLCC:
+ return int32(OPVCC(30, 0, 0, 1))
+ case ARLDCR:
+ return int32(OPVCC(30, 1, 0, 0)) /* rldicr */
+ case ARLDCRCC:
+ return int32(OPVCC(30, 1, 0, 1))
+ case ARLDC:
+ return int32(OPVCC(30, 0, 0, 0) | 2<<2)
+ case ARLDCCC:
+ return int32(OPVCC(30, 0, 0, 1) | 2<<2)
+
+ case ASRAW:
+ return int32(OPVCC(31, 824, 0, 0))
+ case ASRAWCC:
+ return int32(OPVCC(31, 824, 0, 1))
+ case ASRAD:
+ return int32(OPVCC(31, (413 << 1), 0, 0))
+ case ASRADCC:
+ return int32(OPVCC(31, (413 << 1), 0, 1))
+
+ case ASTSW:
+ return int32(OPVCC(31, 725, 0, 0))
+
+ case ASUBC:
+ return int32(OPVCC(8, 0, 0, 0))
+
+ case ATW:
+ return int32(OPVCC(3, 0, 0, 0))
+ case ATD:
+ return int32(OPVCC(2, 0, 0, 0))
+
+ case AXOR:
+ return int32(OPVCC(26, 0, 0, 0)) /* XORIL */
+ case AXOR + AEND:
+ return int32(OPVCC(27, 0, 0, 0)) /* XORIU */
+ }
+
+ ctxt.Diag("bad opcode i/r %v", Aconv(a))
+ return 0
+}
+
+/*
+ * load o(a),d
+ */
+func opload(ctxt *obj.Link, a int) int32 {
+
+ switch a {
+ case AMOVD:
+ return int32(OPVCC(58, 0, 0, 0)) /* ld */
+ case AMOVDU:
+ return int32(OPVCC(58, 0, 0, 1)) /* ldu */
+ case AMOVWZ:
+ return int32(OPVCC(32, 0, 0, 0)) /* lwz */
+ case AMOVWZU:
+ return int32(OPVCC(33, 0, 0, 0)) /* lwzu */
+ case AMOVW:
+ return int32(OPVCC(58, 0, 0, 0) | 1<<1) /* lwa */
+
+ /* no AMOVWU */
+ case AMOVB,
+ AMOVBZ:
+ return int32(OPVCC(34, 0, 0, 0))
+ /* load */
+
+ case AMOVBU,
+ AMOVBZU:
+ return int32(OPVCC(35, 0, 0, 0))
+ case AFMOVD:
+ return int32(OPVCC(50, 0, 0, 0))
+ case AFMOVDU:
+ return int32(OPVCC(51, 0, 0, 0))
+ case AFMOVS:
+ return int32(OPVCC(48, 0, 0, 0))
+ case AFMOVSU:
+ return int32(OPVCC(49, 0, 0, 0))
+ case AMOVH:
+ return int32(OPVCC(42, 0, 0, 0))
+ case AMOVHU:
+ return int32(OPVCC(43, 0, 0, 0))
+ case AMOVHZ:
+ return int32(OPVCC(40, 0, 0, 0))
+ case AMOVHZU:
+ return int32(OPVCC(41, 0, 0, 0))
+ case AMOVMW:
+ return int32(OPVCC(46, 0, 0, 0)) /* lmw */
+ }
+
+ ctxt.Diag("bad load opcode %v", Aconv(a))
+ return 0
+}
+
+/*
+ * indexed load a(b),d
+ */
+func oploadx(ctxt *obj.Link, a int) int32 {
+
+ switch a {
+ case AMOVWZ:
+ return int32(OPVCC(31, 23, 0, 0)) /* lwzx */
+ case AMOVWZU:
+ return int32(OPVCC(31, 55, 0, 0)) /* lwzux */
+ case AMOVW:
+ return int32(OPVCC(31, 341, 0, 0)) /* lwax */
+ case AMOVWU:
+ return int32(OPVCC(31, 373, 0, 0)) /* lwaux */
+
+ case AMOVB,
+ AMOVBZ:
+ return int32(OPVCC(31, 87, 0, 0)) /* lbzx */
+
+ case AMOVBU,
+ AMOVBZU:
+ return int32(OPVCC(31, 119, 0, 0)) /* lbzux */
+ case AFMOVD:
+ return int32(OPVCC(31, 599, 0, 0)) /* lfdx */
+ case AFMOVDU:
+ return int32(OPVCC(31, 631, 0, 0)) /* lfdux */
+ case AFMOVS:
+ return int32(OPVCC(31, 535, 0, 0)) /* lfsx */
+ case AFMOVSU:
+ return int32(OPVCC(31, 567, 0, 0)) /* lfsux */
+ case AMOVH:
+ return int32(OPVCC(31, 343, 0, 0)) /* lhax */
+ case AMOVHU:
+ return int32(OPVCC(31, 375, 0, 0)) /* lhaux */
+ case AMOVHBR:
+ return int32(OPVCC(31, 790, 0, 0)) /* lhbrx */
+ case AMOVWBR:
+ return int32(OPVCC(31, 534, 0, 0)) /* lwbrx */
+ case AMOVHZ:
+ return int32(OPVCC(31, 279, 0, 0)) /* lhzx */
+ case AMOVHZU:
+ return int32(OPVCC(31, 311, 0, 0)) /* lhzux */
+ case AECIWX:
+ return int32(OPVCC(31, 310, 0, 0)) /* eciwx */
+ case ALWAR:
+ return int32(OPVCC(31, 20, 0, 0)) /* lwarx */
+ case ALDAR:
+ return int32(OPVCC(31, 84, 0, 0))
+ case ALSW:
+ return int32(OPVCC(31, 533, 0, 0)) /* lswx */
+ case AMOVD:
+ return int32(OPVCC(31, 21, 0, 0)) /* ldx */
+ case AMOVDU:
+ return int32(OPVCC(31, 53, 0, 0)) /* ldux */
+ }
+
+ ctxt.Diag("bad loadx opcode %v", Aconv(a))
+ return 0
+}
+
+/*
+ * store s,o(d)
+ */
+func opstore(ctxt *obj.Link, a int) int32 {
+
+ switch a {
+ case AMOVB,
+ AMOVBZ:
+ return int32(OPVCC(38, 0, 0, 0)) /* stb */
+
+ case AMOVBU,
+ AMOVBZU:
+ return int32(OPVCC(39, 0, 0, 0)) /* stbu */
+ case AFMOVD:
+ return int32(OPVCC(54, 0, 0, 0)) /* stfd */
+ case AFMOVDU:
+ return int32(OPVCC(55, 0, 0, 0)) /* stfdu */
+ case AFMOVS:
+ return int32(OPVCC(52, 0, 0, 0)) /* stfs */
+ case AFMOVSU:
+ return int32(OPVCC(53, 0, 0, 0)) /* stfsu */
+
+ case AMOVHZ,
+ AMOVH:
+ return int32(OPVCC(44, 0, 0, 0)) /* sth */
+
+ case AMOVHZU,
+ AMOVHU:
+ return int32(OPVCC(45, 0, 0, 0)) /* sthu */
+ case AMOVMW:
+ return int32(OPVCC(47, 0, 0, 0)) /* stmw */
+ case ASTSW:
+ return int32(OPVCC(31, 725, 0, 0)) /* stswi */
+
+ case AMOVWZ,
+ AMOVW:
+ return int32(OPVCC(36, 0, 0, 0)) /* stw */
+
+ case AMOVWZU,
+ AMOVWU:
+ return int32(OPVCC(37, 0, 0, 0)) /* stwu */
+ case AMOVD:
+ return int32(OPVCC(62, 0, 0, 0)) /* std */
+ case AMOVDU:
+ return int32(OPVCC(62, 0, 0, 1)) /* stdu */
+ }
+
+ ctxt.Diag("unknown store opcode %v", Aconv(a))
+ return 0
+}
+
+/*
+ * indexed store s,a(b)
+ */
+func opstorex(ctxt *obj.Link, a int) int32 {
+
+ switch a {
+ case AMOVB,
+ AMOVBZ:
+ return int32(OPVCC(31, 215, 0, 0)) /* stbx */
+
+ case AMOVBU,
+ AMOVBZU:
+ return int32(OPVCC(31, 247, 0, 0)) /* stbux */
+ case AFMOVD:
+ return int32(OPVCC(31, 727, 0, 0)) /* stfdx */
+ case AFMOVDU:
+ return int32(OPVCC(31, 759, 0, 0)) /* stfdux */
+ case AFMOVS:
+ return int32(OPVCC(31, 663, 0, 0)) /* stfsx */
+ case AFMOVSU:
+ return int32(OPVCC(31, 695, 0, 0)) /* stfsux */
+
+ case AMOVHZ,
+ AMOVH:
+ return int32(OPVCC(31, 407, 0, 0)) /* sthx */
+ case AMOVHBR:
+ return int32(OPVCC(31, 918, 0, 0)) /* sthbrx */
+
+ case AMOVHZU,
+ AMOVHU:
+ return int32(OPVCC(31, 439, 0, 0)) /* sthux */
+
+ case AMOVWZ,
+ AMOVW:
+ return int32(OPVCC(31, 151, 0, 0)) /* stwx */
+
+ case AMOVWZU,
+ AMOVWU:
+ return int32(OPVCC(31, 183, 0, 0)) /* stwux */
+ case ASTSW:
+ return int32(OPVCC(31, 661, 0, 0)) /* stswx */
+ case AMOVWBR:
+ return int32(OPVCC(31, 662, 0, 0)) /* stwbrx */
+ case ASTWCCC:
+ return int32(OPVCC(31, 150, 0, 1)) /* stwcx. */
+ case ASTDCCC:
+ return int32(OPVCC(31, 214, 0, 1)) /* stwdx. */
+ case AECOWX:
+ return int32(OPVCC(31, 438, 0, 0)) /* ecowx */
+ case AMOVD:
+ return int32(OPVCC(31, 149, 0, 0)) /* stdx */
+ case AMOVDU:
+ return int32(OPVCC(31, 181, 0, 0)) /* stdux */
+ }
+
+ ctxt.Diag("unknown storex opcode %v", Aconv(a))
+ return 0
+}
--- /dev/null
+// cmd/9l/list.c from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+const (
+ STRINGSZ = 1000
+)
+
+//
+// Format conversions
+// %A int Opcodes (instruction mnemonics)
+//
+// %D Addr* Addresses (instruction operands)
+// Flags: "%lD": seperate the high and low words of a constant by "-"
+//
+// %P Prog* Instructions
+//
+// %R int Registers
+//
+// %$ char* String constant addresses (for internal use only)
+// %^ int C_* classes (for liblink internal use)
+
+var bigP *obj.Prog
+
+func Pconv(p *obj.Prog) string {
+ var str string
+ var fp string
+
+ var a int
+ var ch int
+
+ a = int(p.As)
+
+ if a == ADATA || a == AINIT || a == ADYNT {
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), p.Reg, Dconv(p, 0, &p.To))
+ } else if a == ATEXT {
+ if p.Reg != 0 {
+ str = fmt.Sprintf("%.5d (%v) %v %v,%d,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), p.Reg, Dconv(p, fmtLong, &p.To))
+ } else {
+
+ str = fmt.Sprintf("%.5d (%v) %v %v,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), Dconv(p, fmtLong, &p.To))
+ }
+ } else if a == AGLOBL {
+ if p.Reg != 0 {
+ str = fmt.Sprintf("%.5d (%v) %v %v,%d,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), p.Reg, Dconv(p, 0, &p.To))
+ } else {
+
+ str = fmt.Sprintf("%.5d (%v) %v %v,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+ }
+ } else {
+
+ if p.Mark&NOSCHED != 0 {
+ str += fmt.Sprintf("*")
+ }
+ if p.Reg == NREG && p.From3.Type_ == D_NONE {
+ str += fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+ } else if a != ATEXT && p.From.Type_ == D_OREG {
+ str += fmt.Sprintf("%.5d (%v)\t%v\t%d(R%d+R%d),%v", p.Pc, p.Line(), Aconv(a), p.From.Offset, p.From.Reg, p.Reg, Dconv(p, 0, &p.To))
+ } else if p.To.Type_ == D_OREG {
+ str += fmt.Sprintf("%.5d (%v)\t%v\t%v,%d(R%d+R%d)", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From), p.To.Offset, p.To.Reg, p.Reg)
+ } else {
+
+ str += fmt.Sprintf("%.5d (%v)\t%v\t%v", p.Pc, p.Line(), Aconv(a), Dconv(p, 0, &p.From))
+ if p.Reg != NREG {
+ ch = 'R'
+ if p.From.Type_ == D_FREG {
+ ch = 'F'
+ }
+ str += fmt.Sprintf(",%c%d", ch, p.Reg)
+ }
+
+ if p.From3.Type_ != D_NONE {
+ str += fmt.Sprintf(",%v", Dconv(p, 0, &p.From3))
+ }
+ str += fmt.Sprintf(",%v", Dconv(p, 0, &p.To))
+ }
+
+ if p.Spadj != 0 {
+ fp += fmt.Sprintf("%s # spadj=%d", str, p.Spadj)
+ return fp
+ }
+ }
+
+ fp += str
+ return fp
+}
+
+func Aconv(a int) string {
+ var s string
+ var fp string
+
+ s = "???"
+ if a >= AXXX && a < ALAST {
+ s = anames9[a]
+ }
+ fp += s
+ return fp
+}
+
+func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
+ var str string
+ var fp string
+
+ var v int32
+
+ if flag&fmtLong != 0 /*untyped*/ {
+ if a.Type_ == D_CONST {
+ str = fmt.Sprintf("$%d-%d", int32(a.Offset), int32(a.Offset>>32))
+ } else {
+
+ // ATEXT dst is not constant
+ str = fmt.Sprintf("!!%v", Dconv(p, 0, a))
+ }
+
+ goto ret
+ }
+
+ switch a.Type_ {
+ default:
+ str = fmt.Sprintf("GOK-type(%d)", a.Type_)
+
+ case D_NONE:
+ str = ""
+ if a.Name != D_NONE || a.Reg != NREG || a.Sym != nil {
+ str = fmt.Sprintf("%v(R%d)(NONE)", Mconv(a), a.Reg)
+ }
+
+ case D_CONST,
+ D_DCONST:
+ if a.Reg != NREG {
+ str = fmt.Sprintf("$%v(R%d)", Mconv(a), a.Reg)
+ } else {
+
+ str = fmt.Sprintf("$%v", Mconv(a))
+ }
+
+ case D_OREG:
+ if a.Reg != NREG {
+ str = fmt.Sprintf("%v(R%d)", Mconv(a), a.Reg)
+ } else {
+
+ str = fmt.Sprintf("%v", Mconv(a))
+ }
+
+ case D_REG:
+ str = fmt.Sprintf("R%d", a.Reg)
+ if a.Name != D_NONE || a.Sym != nil {
+ str = fmt.Sprintf("%v(R%d)(REG)", Mconv(a), a.Reg)
+ }
+
+ case D_FREG:
+ str = fmt.Sprintf("F%d", a.Reg)
+ if a.Name != D_NONE || a.Sym != nil {
+ str = fmt.Sprintf("%v(F%d)(REG)", Mconv(a), a.Reg)
+ }
+
+ case D_CREG:
+ if a.Reg == NREG {
+ str = "CR"
+ } else {
+
+ str = fmt.Sprintf("CR%d", a.Reg)
+ }
+ if a.Name != D_NONE || a.Sym != nil {
+ str = fmt.Sprintf("%v(C%d)(REG)", Mconv(a), a.Reg)
+ }
+
+ case D_SPR:
+ if a.Name == D_NONE && a.Sym == nil {
+ switch uint32(a.Offset) {
+ case D_XER:
+ str = fmt.Sprintf("XER")
+ case D_LR:
+ str = fmt.Sprintf("LR")
+ case D_CTR:
+ str = fmt.Sprintf("CTR")
+ default:
+ str = fmt.Sprintf("SPR(%d)", a.Offset)
+ break
+ }
+
+ break
+ }
+
+ str = fmt.Sprintf("SPR-GOK(%d)", a.Reg)
+ if a.Name != D_NONE || a.Sym != nil {
+ str = fmt.Sprintf("%v(SPR-GOK%d)(REG)", Mconv(a), a.Reg)
+ }
+
+ case D_DCR:
+ if a.Name == D_NONE && a.Sym == nil {
+ str = fmt.Sprintf("DCR(%d)", a.Offset)
+ break
+ }
+
+ str = fmt.Sprintf("DCR-GOK(%d)", a.Reg)
+ if a.Name != D_NONE || a.Sym != nil {
+ str = fmt.Sprintf("%v(DCR-GOK%d)(REG)", Mconv(a), a.Reg)
+ }
+
+ case D_OPT:
+ str = fmt.Sprintf("OPT(%d)", a.Reg)
+
+ case D_FPSCR:
+ if a.Reg == NREG {
+ str = "FPSCR"
+ } else {
+
+ str = fmt.Sprintf("FPSCR(%d)", a.Reg)
+ }
+
+ case D_MSR:
+ str = fmt.Sprintf("MSR")
+
+ case D_BRANCH:
+ if p.Pcond != nil {
+ v = int32(p.Pcond.Pc)
+
+ //if(v >= INITTEXT)
+ // v -= INITTEXT-HEADR;
+ if a.Sym != nil {
+
+ str = fmt.Sprintf("%s+%.5x(BRANCH)", a.Sym.Name, uint32(v))
+ } else {
+
+ str = fmt.Sprintf("%.5x(BRANCH)", uint32(v))
+ }
+ } else if a.U.Branch != nil {
+ str = fmt.Sprintf("%d", a.U.Branch.Pc)
+ } else if a.Sym != nil {
+ str = fmt.Sprintf("%s+%d(APC)", a.Sym.Name, a.Offset)
+ } else {
+
+ str = fmt.Sprintf("%d(APC)", a.Offset)
+ }
+
+ //sprint(str, "$%lux-%lux", a->ieee.h, a->ieee.l);
+ case D_FCONST:
+ str = fmt.Sprintf("$%.17g", a.U.Dval)
+
+ case D_SCONST:
+ str = fmt.Sprintf("$\"%q\"", a.U.Sval)
+ break
+ }
+
+ret:
+ fp += str
+ return fp
+}
+
+func Mconv(a *obj.Addr) string {
+ var str string
+ var fp string
+
+ var s *obj.LSym
+ var l int32
+
+ s = a.Sym
+
+ //if(s == nil) {
+ // l = a->offset;
+ // if((vlong)l != a->offset)
+ // sprint(str, "0x%llux", a->offset);
+ // else
+ // sprint(str, "%lld", a->offset);
+ // goto out;
+ //}
+ switch a.Name {
+
+ default:
+ str = fmt.Sprintf("GOK-name(%d)", a.Name)
+
+ case D_NONE:
+ l = int32(a.Offset)
+ if int64(l) != a.Offset {
+ str = fmt.Sprintf("0x%x", uint64(a.Offset))
+ } else {
+
+ str = fmt.Sprintf("%d", a.Offset)
+ }
+
+ case D_EXTERN:
+ if a.Offset != 0 {
+ str = fmt.Sprintf("%s+%d(SB)", s.Name, a.Offset)
+ } else {
+
+ str = fmt.Sprintf("%s(SB)", s.Name)
+ }
+
+ case D_STATIC:
+ str = fmt.Sprintf("%s<>+%d(SB)", s.Name, a.Offset)
+
+ case D_AUTO:
+ if s == nil {
+ str = fmt.Sprintf("%d(SP)", -a.Offset)
+ } else {
+
+ str = fmt.Sprintf("%s-%d(SP)", s.Name, -a.Offset)
+ }
+
+ case D_PARAM:
+ if s == nil {
+ str = fmt.Sprintf("%d(FP)", a.Offset)
+ } else {
+
+ str = fmt.Sprintf("%s+%d(FP)", s.Name, a.Offset)
+ }
+ break
+ }
+
+ //out:
+ fp += str
+ return fp
+}
+
+func Rconv(r int) string {
+ var str string
+ var fp string
+
+ if r < NREG {
+ str = fmt.Sprintf("r%d", r)
+ } else {
+
+ str = fmt.Sprintf("f%d", r-NREG)
+ }
+ fp += str
+ return fp
+}
+
+func DRconv(a int) string {
+ var s string
+ var fp string
+
+ s = "C_??"
+ if a >= C_NONE && a <= C_NCLASS {
+ s = cnames9[a]
+ }
+ fp += s
+ return fp
+}
--- /dev/null
+// cmd/9l/noop.c, cmd/9l/pass.c, cmd/9l/span.c from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+ "cmd/internal/obj"
+ "encoding/binary"
+ "fmt"
+ "math"
+)
+
+var zprg = obj.Prog{
+ As: AGOK,
+ Reg: NREG,
+ From: obj.Addr{
+ Name: D_NONE,
+ Type_: D_NONE,
+ Reg: NREG,
+ },
+ From3: obj.Addr{
+ Name: D_NONE,
+ Type_: D_NONE,
+ Reg: NREG,
+ },
+ To: obj.Addr{
+ Name: D_NONE,
+ Type_: D_NONE,
+ Reg: NREG,
+ },
+}
+
+func symtype(a *obj.Addr) int {
+ return int(a.Name)
+}
+
+func isdata(p *obj.Prog) bool {
+ return p.As == ADATA || p.As == AGLOBL
+}
+
+func iscall(p *obj.Prog) bool {
+ return p.As == ABL
+}
+
+func datasize(p *obj.Prog) int {
+ return int(p.Reg)
+}
+
+func textflag(p *obj.Prog) int {
+ return int(p.Reg)
+}
+
+func settextflag(p *obj.Prog, f int) {
+ p.Reg = uint8(f)
+}
+
+func progedit(ctxt *obj.Link, p *obj.Prog) {
+ var literal string
+ var s *obj.LSym
+
+ p.From.Class = 0
+ p.To.Class = 0
+
+ // Rewrite BR/BL to symbol as D_BRANCH.
+ switch p.As {
+
+ case ABR,
+ ABL,
+ ARETURN,
+ ADUFFZERO,
+ ADUFFCOPY:
+ if p.To.Sym != nil {
+ p.To.Type_ = D_BRANCH
+ }
+ break
+ }
+
+ // Rewrite float constants to values stored in memory.
+ switch p.As {
+
+ case AFMOVS:
+ if p.From.Type_ == D_FCONST {
+ var i32 uint32
+ var f32 float32
+ f32 = float32(p.From.U.Dval)
+ i32 = math.Float32bits(f32)
+ literal = fmt.Sprintf("$f32.%08x", i32)
+ s = obj.Linklookup(ctxt, literal, 0)
+ s.Size = 4
+ p.From.Type_ = D_OREG
+ p.From.Sym = s
+ p.From.Name = D_EXTERN
+ p.From.Offset = 0
+ }
+
+ case AFMOVD:
+ if p.From.Type_ == D_FCONST {
+ var i64 uint64
+ i64 = math.Float64bits(p.From.U.Dval)
+ literal = fmt.Sprintf("$f64.%016x", i64)
+ s = obj.Linklookup(ctxt, literal, 0)
+ s.Size = 8
+ p.From.Type_ = D_OREG
+ p.From.Sym = s
+ p.From.Name = D_EXTERN
+ p.From.Offset = 0
+ }
+
+ // Put >32-bit constants in memory and load them
+ case AMOVD:
+ if p.From.Type_ == D_CONST && p.From.Name == D_NONE && p.From.Reg == NREG && int64(int32(p.From.Offset)) != p.From.Offset {
+
+ literal = fmt.Sprintf("$i64.%016x", uint64(p.From.Offset))
+ s = obj.Linklookup(ctxt, literal, 0)
+ s.Size = 8
+ p.From.Type_ = D_OREG
+ p.From.Sym = s
+ p.From.Name = D_EXTERN
+ p.From.Offset = 0
+ }
+ }
+
+ // Rewrite SUB constants into ADD.
+ switch p.As {
+
+ case ASUBC:
+ if p.From.Type_ == D_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADDC
+ }
+
+ case ASUBCCC:
+ if p.From.Type_ == D_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADDCCC
+ }
+
+ case ASUB:
+ if p.From.Type_ == D_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADD
+ }
+
+ break
+ }
+}
+
+func parsetextconst(arg int64, textstksiz *int64, textarg *int64) {
+ *textstksiz = arg & 0xffffffff
+ if *textstksiz&0x80000000 != 0 {
+ *textstksiz = -(-*textstksiz & 0xffffffff)
+ }
+
+ *textarg = (arg >> 32) & 0xffffffff
+ if *textarg&0x80000000 != 0 {
+ *textarg = 0
+ }
+ *textarg = (*textarg + 7) &^ 7
+}
+
+func addstacksplit(ctxt *obj.Link, cursym *obj.LSym) {
+ var p *obj.Prog
+ var q *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var q1 *obj.Prog
+ var o int
+ var mov int
+ var aoffset int
+ var textstksiz int64
+ var textarg int64
+ var autosize int32
+
+ if ctxt.Symmorestack[0] == nil {
+ ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
+ ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
+ }
+
+ // TODO(minux): add morestack short-cuts with small fixed frame-size.
+ ctxt.Cursym = cursym
+
+ if cursym.Text == nil || cursym.Text.Link == nil {
+ return
+ }
+
+ p = cursym.Text
+ parsetextconst(p.To.Offset, &textstksiz, &textarg)
+
+ cursym.Args = int32(p.To.Offset >> 32)
+ cursym.Locals = int32(textstksiz)
+
+ /*
+ * find leaf subroutines
+ * strip NOPs
+ * expand RET
+ * expand BECOME pseudo
+ */
+ if ctxt.Debugvlog != 0 {
+
+ fmt.Fprintf(ctxt.Bso, "%5.2f noops\n", obj.Cputime())
+ }
+ obj.Bflush(ctxt.Bso)
+
+ q = nil
+ for p = cursym.Text; p != nil; p = p.Link {
+ switch p.As {
+ /* too hard, just leave alone */
+ case ATEXT:
+ q = p
+
+ p.Mark |= LABEL | LEAF | SYNC
+ if p.Link != nil {
+ p.Link.Mark |= LABEL
+ }
+
+ case ANOR:
+ q = p
+ if p.To.Type_ == D_REG {
+ if p.To.Reg == REGZERO {
+ p.Mark |= LABEL | SYNC
+ }
+ }
+
+ case ALWAR,
+ ASTWCCC,
+ AECIWX,
+ AECOWX,
+ AEIEIO,
+ AICBI,
+ AISYNC,
+ ATLBIE,
+ ATLBIEL,
+ ASLBIA,
+ ASLBIE,
+ ASLBMFEE,
+ ASLBMFEV,
+ ASLBMTE,
+ ADCBF,
+ ADCBI,
+ ADCBST,
+ ADCBT,
+ ADCBTST,
+ ADCBZ,
+ ASYNC,
+ ATLBSYNC,
+ APTESYNC,
+ ATW,
+ AWORD,
+ ARFI,
+ ARFCI,
+ ARFID,
+ AHRFID:
+ q = p
+ p.Mark |= LABEL | SYNC
+ continue
+
+ case AMOVW,
+ AMOVWZ,
+ AMOVD:
+ q = p
+ switch p.From.Type_ {
+ case D_MSR,
+ D_SPR,
+ D_FPSCR,
+ D_CREG,
+ D_DCR:
+ p.Mark |= LABEL | SYNC
+ }
+
+ switch p.To.Type_ {
+ case D_MSR,
+ D_SPR,
+ D_FPSCR,
+ D_CREG,
+ D_DCR:
+ p.Mark |= LABEL | SYNC
+ }
+
+ continue
+
+ case AFABS,
+ AFABSCC,
+ AFADD,
+ AFADDCC,
+ AFCTIW,
+ AFCTIWCC,
+ AFCTIWZ,
+ AFCTIWZCC,
+ AFDIV,
+ AFDIVCC,
+ AFMADD,
+ AFMADDCC,
+ AFMOVD,
+ AFMOVDU,
+ /* case AFMOVDS: */
+ AFMOVS,
+ AFMOVSU,
+
+ /* case AFMOVSD: */
+ AFMSUB,
+ AFMSUBCC,
+ AFMUL,
+ AFMULCC,
+ AFNABS,
+ AFNABSCC,
+ AFNEG,
+ AFNEGCC,
+ AFNMADD,
+ AFNMADDCC,
+ AFNMSUB,
+ AFNMSUBCC,
+ AFRSP,
+ AFRSPCC,
+ AFSUB,
+ AFSUBCC:
+ q = p
+
+ p.Mark |= FLOAT
+ continue
+
+ case ABL,
+ ABCL,
+ ADUFFZERO,
+ ADUFFCOPY:
+ cursym.Text.Mark &^= LEAF
+ fallthrough
+
+ case ABC,
+ ABEQ,
+ ABGE,
+ ABGT,
+ ABLE,
+ ABLT,
+ ABNE,
+ ABR,
+ ABVC,
+ ABVS:
+ p.Mark |= BRANCH
+ q = p
+ q1 = p.Pcond
+ if q1 != nil {
+ for q1.As == ANOP {
+ q1 = q1.Link
+ p.Pcond = q1
+ }
+
+ if !(q1.Mark&LEAF != 0) {
+ q1.Mark |= LABEL
+ }
+ } else {
+
+ p.Mark |= LABEL
+ }
+ q1 = p.Link
+ if q1 != nil {
+ q1.Mark |= LABEL
+ }
+ continue
+
+ case AFCMPO,
+ AFCMPU:
+ q = p
+ p.Mark |= FCMP | FLOAT
+ continue
+
+ case ARETURN:
+ q = p
+ if p.Link != nil {
+ p.Link.Mark |= LABEL
+ }
+ continue
+
+ case ANOP:
+ q1 = p.Link
+ q.Link = q1 /* q is non-nop */
+ q1.Mark |= p.Mark
+ continue
+
+ default:
+ q = p
+ continue
+ }
+ }
+
+ autosize = 0
+ for p = cursym.Text; p != nil; p = p.Link {
+ o = int(p.As)
+ switch o {
+ case ATEXT:
+ mov = AMOVD
+ aoffset = 0
+ autosize = int32(textstksiz + 8)
+ if (p.Mark&LEAF != 0) && autosize <= 8 {
+ autosize = 0
+ } else if autosize&4 != 0 {
+ autosize += 4
+ }
+ p.To.Offset = int64(uint64(p.To.Offset)&(0xffffffff<<32) | uint64(uint32(autosize-8)))
+
+ if !(p.Reg&obj.NOSPLIT != 0) {
+ p = stacksplit(ctxt, p, autosize, bool2int(!(cursym.Text.Reg&obj.NEEDCTXT != 0))) // emit split check
+ }
+
+ q = p
+
+ if autosize != 0 {
+ /* use MOVDU to adjust R1 when saving R31, if autosize is small */
+ if !(cursym.Text.Mark&LEAF != 0) && autosize >= -BIG && autosize <= BIG {
+
+ mov = AMOVDU
+ aoffset = int(-autosize)
+ } else {
+
+ q = obj.Appendp(ctxt, p)
+ q.As = AADD
+ q.Lineno = p.Lineno
+ q.From.Type_ = D_CONST
+ q.From.Offset = int64(-autosize)
+ q.To.Type_ = D_REG
+ q.To.Reg = REGSP
+ q.Spadj = +autosize
+ }
+ } else if !(cursym.Text.Mark&LEAF != 0) {
+ if ctxt.Debugvlog != 0 {
+ fmt.Fprintf(ctxt.Bso, "save suppressed in: %s\n", cursym.Name)
+ obj.Bflush(ctxt.Bso)
+ }
+
+ cursym.Text.Mark |= LEAF
+ }
+
+ if cursym.Text.Mark&LEAF != 0 {
+ cursym.Leaf = 1
+ break
+ }
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AMOVD
+ q.Lineno = p.Lineno
+ q.From.Type_ = D_SPR
+ q.From.Offset = D_LR
+ q.To.Type_ = D_REG
+ q.To.Reg = REGTMP
+
+ q = obj.Appendp(ctxt, q)
+ q.As = int16(mov)
+ q.Lineno = p.Lineno
+ q.From.Type_ = D_REG
+ q.From.Reg = REGTMP
+ q.To.Type_ = D_OREG
+ q.To.Offset = int64(aoffset)
+ q.To.Reg = REGSP
+ if q.As == AMOVDU {
+ q.Spadj = int32(-aoffset)
+ }
+
+ if cursym.Text.Reg&obj.WRAPPER != 0 {
+ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
+ //
+ // MOVD g_panic(g), R3
+ // CMP R0, R3
+ // BEQ end
+ // MOVD panic_argp(R3), R4
+ // ADD $(autosize+8), R1, R5
+ // CMP R4, R5
+ // BNE end
+ // ADD $8, R1, R6
+ // MOVD R6, panic_argp(R3)
+ // end:
+ // NOP
+ //
+ // The NOP is needed to give the jumps somewhere to land.
+ // It is a liblink NOP, not a ppc64 NOP: it encodes to 0 instruction bytes.
+
+ q = obj.Appendp(ctxt, q)
+
+ q.As = AMOVD
+ q.From.Type_ = D_OREG
+ q.From.Reg = REGG
+ q.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
+ q.To.Type_ = D_REG
+ q.To.Reg = 3
+
+ q = obj.Appendp(ctxt, q)
+ q.As = ACMP
+ q.From.Type_ = D_REG
+ q.From.Reg = 0
+ q.To.Type_ = D_REG
+ q.To.Reg = 3
+
+ q = obj.Appendp(ctxt, q)
+ q.As = ABEQ
+ q.To.Type_ = D_BRANCH
+ p1 = q
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AMOVD
+ q.From.Type_ = D_OREG
+ q.From.Reg = 3
+ q.From.Offset = 0 // Panic.argp
+ q.To.Type_ = D_REG
+ q.To.Reg = 4
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AADD
+ q.From.Type_ = D_CONST
+ q.From.Offset = int64(autosize) + 8
+ q.Reg = REGSP
+ q.To.Type_ = D_REG
+ q.To.Reg = 5
+
+ q = obj.Appendp(ctxt, q)
+ q.As = ACMP
+ q.From.Type_ = D_REG
+ q.From.Reg = 4
+ q.To.Type_ = D_REG
+ q.To.Reg = 5
+
+ q = obj.Appendp(ctxt, q)
+ q.As = ABNE
+ q.To.Type_ = D_BRANCH
+ p2 = q
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AADD
+ q.From.Type_ = D_CONST
+ q.From.Offset = 8
+ q.Reg = REGSP
+ q.To.Type_ = D_REG
+ q.To.Reg = 6
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AMOVD
+ q.From.Type_ = D_REG
+ q.From.Reg = 6
+ q.To.Type_ = D_OREG
+ q.To.Reg = 3
+ q.To.Offset = 0 // Panic.argp
+
+ q = obj.Appendp(ctxt, q)
+
+ q.As = ANOP
+ p1.Pcond = q
+ p2.Pcond = q
+ }
+
+ case ARETURN:
+ if p.From.Type_ == D_CONST {
+ ctxt.Diag("using BECOME (%v) is not supported!", p)
+ break
+ }
+
+ if p.To.Sym != nil { // retjmp
+ p.As = ABR
+ p.To.Type_ = D_BRANCH
+ break
+ }
+
+ if cursym.Text.Mark&LEAF != 0 {
+ if !(autosize != 0) {
+ p.As = ABR
+ p.From = zprg.From
+ p.To.Type_ = D_SPR
+ p.To.Offset = D_LR
+ p.Mark |= BRANCH
+ break
+ }
+
+ p.As = AADD
+ p.From.Type_ = D_CONST
+ p.From.Offset = int64(autosize)
+ p.To.Type_ = D_REG
+ p.To.Reg = REGSP
+ p.Spadj = -autosize
+
+ q = ctxt.Arch.Prg()
+ q.As = ABR
+ q.Lineno = p.Lineno
+ q.To.Type_ = D_SPR
+ q.To.Offset = D_LR
+ q.Mark |= BRANCH
+ q.Spadj = +autosize
+
+ q.Link = p.Link
+ p.Link = q
+ break
+ }
+
+ p.As = AMOVD
+ p.From.Type_ = D_OREG
+ p.From.Offset = 0
+ p.From.Reg = REGSP
+ p.To.Type_ = D_REG
+ p.To.Reg = REGTMP
+
+ q = ctxt.Arch.Prg()
+ q.As = AMOVD
+ q.Lineno = p.Lineno
+ q.From.Type_ = D_REG
+ q.From.Reg = REGTMP
+ q.To.Type_ = D_SPR
+ q.To.Offset = D_LR
+
+ q.Link = p.Link
+ p.Link = q
+ p = q
+
+ if false {
+ // Debug bad returns
+ q = ctxt.Arch.Prg()
+
+ q.As = AMOVD
+ q.Lineno = p.Lineno
+ q.From.Type_ = D_OREG
+ q.From.Offset = 0
+ q.From.Reg = REGTMP
+ q.To.Type_ = D_REG
+ q.To.Reg = REGTMP
+
+ q.Link = p.Link
+ p.Link = q
+ p = q
+ }
+
+ if autosize != 0 {
+ q = ctxt.Arch.Prg()
+ q.As = AADD
+ q.Lineno = p.Lineno
+ q.From.Type_ = D_CONST
+ q.From.Offset = int64(autosize)
+ q.To.Type_ = D_REG
+ q.To.Reg = REGSP
+ q.Spadj = -autosize
+
+ q.Link = p.Link
+ p.Link = q
+ }
+
+ q1 = ctxt.Arch.Prg()
+ q1.As = ABR
+ q1.Lineno = p.Lineno
+ q1.To.Type_ = D_SPR
+ q1.To.Offset = D_LR
+ q1.Mark |= BRANCH
+ q1.Spadj = +autosize
+
+ q1.Link = q.Link
+ q.Link = q1
+
+ case AADD:
+ if p.To.Type_ == D_REG && p.To.Reg == REGSP && p.From.Type_ == D_CONST {
+ p.Spadj = int32(-p.From.Offset)
+ }
+ break
+ }
+ }
+}
+
+/*
+// instruction scheduling
+ if(debug['Q'] == 0)
+ return;
+
+ curtext = nil;
+ q = nil; // p - 1
+ q1 = firstp; // top of block
+ o = 0; // count of instructions
+ for(p = firstp; p != nil; p = p1) {
+ p1 = p->link;
+ o++;
+ if(p->mark & NOSCHED){
+ if(q1 != p){
+ sched(q1, q);
+ }
+ for(; p != nil; p = p->link){
+ if(!(p->mark & NOSCHED))
+ break;
+ q = p;
+ }
+ p1 = p;
+ q1 = p;
+ o = 0;
+ continue;
+ }
+ if(p->mark & (LABEL|SYNC)) {
+ if(q1 != p)
+ sched(q1, q);
+ q1 = p;
+ o = 1;
+ }
+ if(p->mark & (BRANCH|SYNC)) {
+ sched(q1, p);
+ q1 = p1;
+ o = 0;
+ }
+ if(o >= NSCHED) {
+ sched(q1, p);
+ q1 = p1;
+ o = 0;
+ }
+ q = p;
+ }
+*/
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, noctxt int) *obj.Prog {
+
+ var q *obj.Prog
+ var q1 *obj.Prog
+
+ // MOVD g_stackguard(g), R3
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVD
+ p.From.Type_ = D_OREG
+ p.From.Reg = REGG
+ p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ p.To.Type_ = D_REG
+ p.To.Reg = 3
+
+ q = nil
+ if framesize <= obj.StackSmall {
+ // small stack: SP < stackguard
+ // CMP stackguard, SP
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ACMPU
+ p.From.Type_ = D_REG
+ p.From.Reg = 3
+ p.To.Type_ = D_REG
+ p.To.Reg = REGSP
+ } else if framesize <= obj.StackBig {
+ // large stack: SP-framesize < stackguard-StackSmall
+ // ADD $-framesize, SP, R4
+ // CMP stackguard, R4
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AADD
+ p.From.Type_ = D_CONST
+ p.From.Offset = int64(-framesize)
+ p.Reg = REGSP
+ p.To.Type_ = D_REG
+ p.To.Reg = 4
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPU
+ p.From.Type_ = D_REG
+ p.From.Reg = 3
+ p.To.Type_ = D_REG
+ p.To.Reg = 4
+ } else {
+
+ // Such a large stack we need to protect against wraparound.
+ // If SP is close to zero:
+ // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
+ // The +StackGuard on both sides is required to keep the left side positive:
+ // SP is allowed to be slightly below stackguard. See stack.h.
+ //
+ // Preemption sets stackguard to StackPreempt, a very large value.
+ // That breaks the math above, so we have to check for that explicitly.
+ // // stackguard is R3
+ // CMP R3, $StackPreempt
+ // BEQ label-of-call-to-morestack
+ // ADD $StackGuard, SP, R4
+ // SUB R3, R4
+ // MOVD $(framesize+(StackGuard-StackSmall)), R31
+ // CMPU R31, R4
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ACMP
+ p.From.Type_ = D_REG
+ p.From.Reg = 3
+ p.To.Type_ = D_CONST
+ p.To.Offset = obj.StackPreempt
+
+ p = obj.Appendp(ctxt, p)
+ q = p
+ p.As = ABEQ
+ p.To.Type_ = D_BRANCH
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AADD
+ p.From.Type_ = D_CONST
+ p.From.Offset = obj.StackGuard
+ p.Reg = REGSP
+ p.To.Type_ = D_REG
+ p.To.Reg = 4
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ASUB
+ p.From.Type_ = D_REG
+ p.From.Reg = 3
+ p.To.Type_ = D_REG
+ p.To.Reg = 4
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVD
+ p.From.Type_ = D_CONST
+ p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall
+ p.To.Type_ = D_REG
+ p.To.Reg = REGTMP
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPU
+ p.From.Type_ = D_REG
+ p.From.Reg = REGTMP
+ p.To.Type_ = D_REG
+ p.To.Reg = 4
+ }
+
+ // q1: BLT done
+ p = obj.Appendp(ctxt, p)
+ q1 = p
+
+ p.As = ABLT
+ p.To.Type_ = D_BRANCH
+
+ // MOVD LR, R5
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVD
+ p.From.Type_ = D_SPR
+ p.From.Offset = D_LR
+ p.To.Type_ = D_REG
+ p.To.Reg = 5
+ if q != nil {
+ q.Pcond = p
+ }
+
+ // BL runtime.morestack(SB)
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ABL
+ p.To.Type_ = D_BRANCH
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
+ } else {
+
+ p.To.Sym = ctxt.Symmorestack[noctxt]
+ }
+
+ // BR start
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ABR
+ p.To.Type_ = D_BRANCH
+ p.Pcond = ctxt.Cursym.Text.Link
+
+ // placeholder for q1's jump target
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ANOP // zero-width place holder
+ q1.Pcond = p
+
+ return p
+}
+
+func follow(ctxt *obj.Link, s *obj.LSym) {
+ var firstp *obj.Prog
+ var lastp *obj.Prog
+
+ ctxt.Cursym = s
+
+ firstp = ctxt.Arch.Prg()
+ lastp = firstp
+ xfol(ctxt, s.Text, &lastp)
+ lastp.Link = nil
+ s.Text = firstp.Link
+}
+
+func relinv(a int) int {
+ switch a {
+ case ABEQ:
+ return ABNE
+ case ABNE:
+ return ABEQ
+
+ case ABGE:
+ return ABLT
+ case ABLT:
+ return ABGE
+
+ case ABGT:
+ return ABLE
+ case ABLE:
+ return ABGT
+
+ case ABVC:
+ return ABVS
+ case ABVS:
+ return ABVC
+ }
+
+ return 0
+}
+
+func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
+ var q *obj.Prog
+ var r *obj.Prog
+ var a int
+ var b int
+ var i int
+
+loop:
+ if p == nil {
+ return
+ }
+ a = int(p.As)
+ if a == ABR {
+ q = p.Pcond
+ if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) {
+ p.Mark |= FOLL
+ (*last).Link = p
+ *last = p
+ p = p.Link
+ xfol(ctxt, p, last)
+ p = q
+ if p != nil && !(p.Mark&FOLL != 0) {
+ goto loop
+ }
+ return
+ }
+
+ if q != nil {
+ p.Mark |= FOLL
+ p = q
+ if !(p.Mark&FOLL != 0) {
+ goto loop
+ }
+ }
+ }
+
+ if p.Mark&FOLL != 0 {
+ i = 0
+ q = p
+ for ; i < 4; (func() { i++; q = q.Link })() {
+ if q == *last || (q.Mark&NOSCHED != 0) {
+ break
+ }
+ b = 0 /* set */
+ a = int(q.As)
+ if a == ANOP {
+ i--
+ continue
+ }
+
+ if a == ABR || a == ARETURN || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
+ goto copy
+ }
+ if !(q.Pcond != nil) || (q.Pcond.Mark&FOLL != 0) {
+ continue
+ }
+ b = relinv(a)
+ if !(b != 0) {
+ continue
+ }
+
+ copy:
+ for {
+ r = ctxt.Arch.Prg()
+ *r = *p
+ if !(r.Mark&FOLL != 0) {
+ fmt.Printf("cant happen 1\n")
+ }
+ r.Mark |= FOLL
+ if p != q {
+ p = p.Link
+ (*last).Link = r
+ *last = r
+ continue
+ }
+
+ (*last).Link = r
+ *last = r
+ if a == ABR || a == ARETURN || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
+ return
+ }
+ r.As = int16(b)
+ r.Pcond = p.Link
+ r.Link = p.Pcond
+ if !(r.Link.Mark&FOLL != 0) {
+ xfol(ctxt, r.Link, last)
+ }
+ if !(r.Pcond.Mark&FOLL != 0) {
+ fmt.Printf("cant happen 2\n")
+ }
+ return
+ }
+ }
+
+ a = ABR
+ q = ctxt.Arch.Prg()
+ q.As = int16(a)
+ q.Lineno = p.Lineno
+ q.To.Type_ = D_BRANCH
+ q.To.Offset = p.Pc
+ q.Pcond = p
+ p = q
+ }
+
+ p.Mark |= FOLL
+ (*last).Link = p
+ *last = p
+ if a == ABR || a == ARETURN || a == ARFI || a == ARFCI || a == ARFID || a == AHRFID {
+ if p.Mark&NOSCHED != 0 {
+ p = p.Link
+ goto loop
+ }
+
+ return
+ }
+
+ if p.Pcond != nil {
+ if a != ABL && p.Link != nil {
+ xfol(ctxt, p.Link, last)
+ p = p.Pcond
+ if p == nil || (p.Mark&FOLL != 0) {
+ return
+ }
+ goto loop
+ }
+ }
+
+ p = p.Link
+ goto loop
+}
+
+func prg() *obj.Prog {
+ var p *obj.Prog
+
+ p = new(obj.Prog)
+ *p = zprg
+ return p
+}
+
+var Linkppc64 = obj.LinkArch{
+ ByteOrder: binary.BigEndian,
+ Pconv: Pconv,
+ Name: "ppc64",
+ Thechar: '9',
+ Endian: obj.BigEndian,
+ Addstacksplit: addstacksplit,
+ Assemble: span9,
+ Datasize: datasize,
+ Follow: follow,
+ Iscall: iscall,
+ Isdata: isdata,
+ Prg: prg,
+ Progedit: progedit,
+ Settextflag: settextflag,
+ Symtype: symtype,
+ Textflag: textflag,
+ Minlc: 4,
+ Ptrsize: 8,
+ Regsize: 8,
+ D_ADDR: D_ADDR,
+ D_AUTO: D_AUTO,
+ D_BRANCH: D_BRANCH,
+ D_CONST: D_CONST,
+ D_EXTERN: D_EXTERN,
+ D_FCONST: D_FCONST,
+ D_NONE: D_NONE,
+ D_PARAM: D_PARAM,
+ D_SCONST: D_SCONST,
+ D_STATIC: D_STATIC,
+ D_OREG: D_OREG,
+ ACALL: ABL,
+ ADATA: ADATA,
+ AEND: AEND,
+ AFUNCDATA: AFUNCDATA,
+ AGLOBL: AGLOBL,
+ AJMP: ABR,
+ ANOP: ANOP,
+ APCDATA: APCDATA,
+ ARET: ARETURN,
+ ATEXT: ATEXT,
+ ATYPE: ATYPE,
+ AUSEFIELD: AUSEFIELD,
+}
+
+var Linkppc64le = obj.LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Pconv: Pconv,
+ Name: "ppc64le",
+ Thechar: '9',
+ Endian: obj.LittleEndian,
+ Addstacksplit: addstacksplit,
+ Assemble: span9,
+ Datasize: datasize,
+ Follow: follow,
+ Iscall: iscall,
+ Isdata: isdata,
+ Prg: prg,
+ Progedit: progedit,
+ Settextflag: settextflag,
+ Symtype: symtype,
+ Textflag: textflag,
+ Minlc: 4,
+ Ptrsize: 8,
+ Regsize: 8,
+ D_ADDR: D_ADDR,
+ D_AUTO: D_AUTO,
+ D_BRANCH: D_BRANCH,
+ D_CONST: D_CONST,
+ D_EXTERN: D_EXTERN,
+ D_FCONST: D_FCONST,
+ D_NONE: D_NONE,
+ D_PARAM: D_PARAM,
+ D_SCONST: D_SCONST,
+ D_STATIC: D_STATIC,
+ D_OREG: D_OREG,
+ ACALL: ABL,
+ ADATA: ADATA,
+ AEND: AEND,
+ AFUNCDATA: AFUNCDATA,
+ AGLOBL: AGLOBL,
+ AJMP: ABR,
+ ANOP: ANOP,
+ APCDATA: APCDATA,
+ ARET: ARETURN,
+ ATEXT: ATEXT,
+ ATYPE: ATYPE,
+ AUSEFIELD: AUSEFIELD,
+}
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+const (
+ fmtLong = 1 << iota
+)
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}
--- /dev/null
+// Inferno utils/5l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+// Instruction layout.
+
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// For the linkers. Must match Go definitions.
+// TODO(rsc): Share Go definitions with linkers directly.
+
+const (
+ StackSystem = 0
+ StackBig = 4096
+ StackGuard = 640 + StackSystem
+ StackSmall = 128
+ StackLimit = StackGuard - StackSystem - StackSmall
+)
+
+const (
+ StackPreempt = -1314 // 0xfff...fade
+)
--- /dev/null
+// Derived from Inferno utils/6l/obj.c and utils/6l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/obj.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "path/filepath"
+)
+
+func yy_isalpha(c int) bool {
+ return 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z'
+}
+
+var headers = []struct {
+ name string
+ val int
+}{
+ struct {
+ name string
+ val int
+ }{"android", Hlinux},
+ struct {
+ name string
+ val int
+ }{"darwin", Hdarwin},
+ struct {
+ name string
+ val int
+ }{"dragonfly", Hdragonfly},
+ struct {
+ name string
+ val int
+ }{"elf", Helf},
+ struct {
+ name string
+ val int
+ }{"freebsd", Hfreebsd},
+ struct {
+ name string
+ val int
+ }{"linux", Hlinux},
+ struct {
+ name string
+ val int
+ }{"nacl", Hnacl},
+ struct {
+ name string
+ val int
+ }{"netbsd", Hnetbsd},
+ struct {
+ name string
+ val int
+ }{"openbsd", Hopenbsd},
+ struct {
+ name string
+ val int
+ }{"plan9", Hplan9},
+ struct {
+ name string
+ val int
+ }{"solaris", Hsolaris},
+ struct {
+ name string
+ val int
+ }{"windows", Hwindows},
+ struct {
+ name string
+ val int
+ }{"windowsgui", Hwindows},
+}
+
+func headtype(name string) int {
+ var i int
+
+ for i = 0; i < len(headers); i++ {
+ if name == headers[i].name {
+ return headers[i].val
+ }
+ }
+ return -1
+}
+
+func Headstr(v int) string {
+ var buf string
+ var i int
+
+ for i = 0; i < len(headers); i++ {
+ if v == headers[i].val {
+ return headers[i].name
+ }
+ }
+ buf = fmt.Sprintf("%d", v)
+ return buf
+}
+
+func Linknew(arch *LinkArch) *Link {
+ var ctxt *Link
+ var p string
+ var buf string
+
+ ctxt = new(Link)
+ ctxt.Arch = arch
+ ctxt.Version = HistVersion
+ ctxt.Goroot = Getgoroot()
+ ctxt.Goroot_final = os.Getenv("GOROOT_FINAL")
+
+ buf, _ = os.Getwd()
+ if buf == "" {
+ buf = "/???"
+ }
+ buf = filepath.ToSlash(buf)
+
+ ctxt.Pathname = buf
+
+ ctxt.Headtype = headtype(Getgoos())
+ if ctxt.Headtype < 0 {
+ log.Fatalf("unknown goos %s", Getgoos())
+ }
+
+ // Record thread-local storage offset.
+ // TODO(rsc): Move tlsoffset back into the linker.
+ switch ctxt.Headtype {
+
+ default:
+ log.Fatalf("unknown thread-local storage offset for %s", Headstr(ctxt.Headtype))
+
+ case Hplan9,
+ Hwindows:
+ break
+
+ /*
+ * ELF uses TLS offset negative from FS.
+ * Translate 0(FS) and 8(FS) into -16(FS) and -8(FS).
+ * Known to low-level assembly in package runtime and runtime/cgo.
+ */
+ case Hlinux,
+ Hfreebsd,
+ Hnetbsd,
+ Hopenbsd,
+ Hdragonfly,
+ Hsolaris:
+ ctxt.Tlsoffset = -2 * ctxt.Arch.Ptrsize
+
+ case Hnacl:
+ switch ctxt.Arch.Thechar {
+ default:
+ log.Fatalf("unknown thread-local storage offset for nacl/%s", ctxt.Arch.Name)
+
+ case '6':
+ ctxt.Tlsoffset = 0
+
+ case '8':
+ ctxt.Tlsoffset = -8
+
+ case '5':
+ ctxt.Tlsoffset = 0
+ break
+ }
+
+ /*
+ * OS X system constants - offset from 0(GS) to our TLS.
+ * Explained in ../../runtime/cgo/gcc_darwin_*.c.
+ */
+ case Hdarwin:
+ switch ctxt.Arch.Thechar {
+
+ default:
+ log.Fatalf("unknown thread-local storage offset for darwin/%s", ctxt.Arch.Name)
+
+ case '6':
+ ctxt.Tlsoffset = 0x8a0
+
+ case '8':
+ ctxt.Tlsoffset = 0x468
+ break
+ }
+
+ break
+ }
+
+ // On arm, record goarm.
+ if ctxt.Arch.Thechar == '5' {
+
+ p = Getgoarm()
+ if p != "" {
+ ctxt.Goarm = int32(Atoi(p))
+ } else {
+
+ ctxt.Goarm = 6
+ }
+ }
+
+ return ctxt
+}
+
+func linknewsym(ctxt *Link, symb string, v int) *LSym {
+ var s *LSym
+
+ s = new(LSym)
+ *s = LSym{}
+
+ s.Dynid = -1
+ s.Plt = -1
+ s.Got = -1
+ s.Name = symb
+ s.Type_ = 0
+ s.Version = int16(v)
+ s.Value = 0
+ s.Sig = 0
+ s.Size = 0
+ ctxt.Nsymbol++
+
+ s.Allsym = ctxt.Allsym
+ ctxt.Allsym = s
+
+ return s
+}
+
+func _lookup(ctxt *Link, symb string, v int, creat int) *LSym {
+ var s *LSym
+ var h uint32
+
+ h = uint32(v)
+ for i := 0; i < len(symb); i++ {
+ c := int(symb[i])
+ h = h + h + h + uint32(c)
+ }
+ h &= 0xffffff
+ h %= LINKHASH
+ for s = ctxt.Hash[h]; s != nil; s = s.Hash {
+ if int(s.Version) == v && s.Name == symb {
+ return s
+ }
+ }
+ if !(creat != 0) {
+ return nil
+ }
+
+ s = linknewsym(ctxt, symb, v)
+ s.Extname = s.Name
+ s.Hash = ctxt.Hash[h]
+ ctxt.Hash[h] = s
+
+ return s
+}
+
+func Linklookup(ctxt *Link, name string, v int) *LSym {
+ return _lookup(ctxt, name, v, 1)
+}
+
+// read-only lookup
+func linkrlookup(ctxt *Link, name string, v int) *LSym {
+
+ return _lookup(ctxt, name, v, 0)
+}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file defines flags attached to various functions
+// and data objects. The compilers, assemblers, and linker must
+// all agree on these values.
+
+package obj
+
+const (
+ // Don't profile the marked routine. This flag is deprecated.
+ NOPROF = 1
+
+ // It is ok for the linker to get multiple of these symbols. It will
+ // pick one of the duplicates to use.
+ DUPOK = 2
+
+ // Don't insert stack check preamble.
+ NOSPLIT = 4
+
+ // Put this data in a read-only section.
+ RODATA = 8
+
+ // This data contains no pointers.
+ NOPTR = 16
+
+ // This is a wrapper function and should not count as disabling 'recover'.
+ WRAPPER = 32
+
+ // This function uses its incoming context register.
+ NEEDCTXT = 64
+)
--- /dev/null
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+import (
+ "bufio"
+ "fmt"
+ "go/build"
+ "io"
+ "os"
+ "strconv"
+ "time"
+)
+
+var start time.Time
+
+func Cputime() float64 {
+ if start.IsZero() {
+ start = time.Now()
+ }
+ return time.Since(start).Seconds()
+}
+
+type Biobuf struct {
+ unget int
+ haveUnget bool
+ r *bufio.Reader
+ w *bufio.Writer
+}
+
+func Binitw(w io.Writer) *Biobuf {
+ return &Biobuf{w: bufio.NewWriter(w)}
+}
+
+func (b *Biobuf) Write(p []byte) (int, error) {
+ return b.w.Write(p)
+}
+
+func (b *Biobuf) Flush() error {
+ return b.w.Flush()
+}
+
+func Bwrite(b *Biobuf, p []byte) (int, error) {
+ return b.w.Write(p)
+}
+
+func Bputc(b *Biobuf, c byte) {
+ b.w.WriteByte(c)
+}
+
+func Bgetc(b *Biobuf) int {
+ if b.haveUnget {
+ b.haveUnget = false
+ return int(b.unget)
+ }
+ c, err := b.r.ReadByte()
+ if err != nil {
+ b.unget = -1
+ return -1
+ }
+ b.unget = int(c)
+ return int(c)
+}
+
+func Bungetc(b *Biobuf) {
+ b.haveUnget = true
+}
+
+func Boffset(b *Biobuf) int64 {
+ panic("Boffset")
+}
+
+func Bflush(b *Biobuf) error {
+ return b.w.Flush()
+}
+
+func Getgoroot() string {
+ return build.Default.GOROOT
+}
+
+func Getgoarch() string {
+ return build.Default.GOARCH
+}
+
+func Getgoos() string {
+ return build.Default.GOOS
+}
+
+func Atoi(s string) int {
+ i, _ := strconv.Atoi(s)
+ return i
+}
+
+func Getgoarm() string {
+ env := os.Getenv("GOARM")
+ if env != "" {
+ return env
+ }
+ return "5"
+}
+
+func (p *Prog) Line() string {
+ return linklinefmt(p.Ctxt, int(p.Lineno), false, false)
+}
+
+func (p *Prog) String() string {
+ if p.Ctxt == nil {
+ return fmt.Sprintf("<Prog without ctxt>")
+ }
+ return p.Ctxt.Arch.Pconv(p)
+}
--- /dev/null
+// Inferno utils/6c/6.out.h
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/6.out.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package x86
+
+/*
+ * amd64
+ */
+const (
+ AXXX = iota
+ AAAA
+ AAAD
+ AAAM
+ AAAS
+ AADCB
+ AADCL
+ AADCW
+ AADDB
+ AADDL
+ AADDW
+ AADJSP
+ AANDB
+ AANDL
+ AANDW
+ AARPL
+ ABOUNDL
+ ABOUNDW
+ ABSFL
+ ABSFW
+ ABSRL
+ ABSRW
+ ABTL
+ ABTW
+ ABTCL
+ ABTCW
+ ABTRL
+ ABTRW
+ ABTSL
+ ABTSW
+ ABYTE
+ ACALL
+ ACLC
+ ACLD
+ ACLI
+ ACLTS
+ ACMC
+ ACMPB
+ ACMPL
+ ACMPW
+ ACMPSB
+ ACMPSL
+ ACMPSW
+ ADAA
+ ADAS
+ ADATA
+ ADECB
+ ADECL
+ ADECQ
+ ADECW
+ ADIVB
+ ADIVL
+ ADIVW
+ AENTER
+ AGLOBL
+ AGOK
+ AHISTORY
+ AHLT
+ AIDIVB
+ AIDIVL
+ AIDIVW
+ AIMULB
+ AIMULL
+ AIMULW
+ AINB
+ AINL
+ AINW
+ AINCB
+ AINCL
+ AINCQ
+ AINCW
+ AINSB
+ AINSL
+ AINSW
+ AINT
+ AINTO
+ AIRETL
+ AIRETW
+ AJCC
+ AJCS
+ AJCXZL
+ AJEQ
+ AJGE
+ AJGT
+ AJHI
+ AJLE
+ AJLS
+ AJLT
+ AJMI
+ AJMP
+ AJNE
+ AJOC
+ AJOS
+ AJPC
+ AJPL
+ AJPS
+ ALAHF
+ ALARL
+ ALARW
+ ALEAL
+ ALEAW
+ ALEAVEL
+ ALEAVEW
+ ALOCK
+ ALODSB
+ ALODSL
+ ALODSW
+ ALONG
+ ALOOP
+ ALOOPEQ
+ ALOOPNE
+ ALSLL
+ ALSLW
+ AMOVB
+ AMOVL
+ AMOVW
+ AMOVBLSX
+ AMOVBLZX
+ AMOVBQSX
+ AMOVBQZX
+ AMOVBWSX
+ AMOVBWZX
+ AMOVWLSX
+ AMOVWLZX
+ AMOVWQSX
+ AMOVWQZX
+ AMOVSB
+ AMOVSL
+ AMOVSW
+ AMULB
+ AMULL
+ AMULW
+ ANAME
+ ANEGB
+ ANEGL
+ ANEGW
+ ANOP
+ ANOTB
+ ANOTL
+ ANOTW
+ AORB
+ AORL
+ AORW
+ AOUTB
+ AOUTL
+ AOUTW
+ AOUTSB
+ AOUTSL
+ AOUTSW
+ APAUSE
+ APOPAL
+ APOPAW
+ APOPFL
+ APOPFW
+ APOPL
+ APOPW
+ APUSHAL
+ APUSHAW
+ APUSHFL
+ APUSHFW
+ APUSHL
+ APUSHW
+ ARCLB
+ ARCLL
+ ARCLW
+ ARCRB
+ ARCRL
+ ARCRW
+ AREP
+ AREPN
+ ARET
+ AROLB
+ AROLL
+ AROLW
+ ARORB
+ ARORL
+ ARORW
+ ASAHF
+ ASALB
+ ASALL
+ ASALW
+ ASARB
+ ASARL
+ ASARW
+ ASBBB
+ ASBBL
+ ASBBW
+ ASCASB
+ ASCASL
+ ASCASW
+ ASETCC
+ ASETCS
+ ASETEQ
+ ASETGE
+ ASETGT
+ ASETHI
+ ASETLE
+ ASETLS
+ ASETLT
+ ASETMI
+ ASETNE
+ ASETOC
+ ASETOS
+ ASETPC
+ ASETPL
+ ASETPS
+ ACDQ
+ ACWD
+ ASHLB
+ ASHLL
+ ASHLW
+ ASHRB
+ ASHRL
+ ASHRW
+ ASTC
+ ASTD
+ ASTI
+ ASTOSB
+ ASTOSL
+ ASTOSW
+ ASUBB
+ ASUBL
+ ASUBW
+ ASYSCALL
+ ATESTB
+ ATESTL
+ ATESTW
+ ATEXT
+ AVERR
+ AVERW
+ AWAIT
+ AWORD
+ AXCHGB
+ AXCHGL
+ AXCHGW
+ AXLAT
+ AXORB
+ AXORL
+ AXORW
+ AFMOVB
+ AFMOVBP
+ AFMOVD
+ AFMOVDP
+ AFMOVF
+ AFMOVFP
+ AFMOVL
+ AFMOVLP
+ AFMOVV
+ AFMOVVP
+ AFMOVW
+ AFMOVWP
+ AFMOVX
+ AFMOVXP
+ AFCOMB
+ AFCOMBP
+ AFCOMD
+ AFCOMDP
+ AFCOMDPP
+ AFCOMF
+ AFCOMFP
+ AFCOML
+ AFCOMLP
+ AFCOMW
+ AFCOMWP
+ AFUCOM
+ AFUCOMP
+ AFUCOMPP
+ AFADDDP
+ AFADDW
+ AFADDL
+ AFADDF
+ AFADDD
+ AFMULDP
+ AFMULW
+ AFMULL
+ AFMULF
+ AFMULD
+ AFSUBDP
+ AFSUBW
+ AFSUBL
+ AFSUBF
+ AFSUBD
+ AFSUBRDP
+ AFSUBRW
+ AFSUBRL
+ AFSUBRF
+ AFSUBRD
+ AFDIVDP
+ AFDIVW
+ AFDIVL
+ AFDIVF
+ AFDIVD
+ AFDIVRDP
+ AFDIVRW
+ AFDIVRL
+ AFDIVRF
+ AFDIVRD
+ AFXCHD
+ AFFREE
+ AFLDCW
+ AFLDENV
+ AFRSTOR
+ AFSAVE
+ AFSTCW
+ AFSTENV
+ AFSTSW
+ AF2XM1
+ AFABS
+ AFCHS
+ AFCLEX
+ AFCOS
+ AFDECSTP
+ AFINCSTP
+ AFINIT
+ AFLD1
+ AFLDL2E
+ AFLDL2T
+ AFLDLG2
+ AFLDLN2
+ AFLDPI
+ AFLDZ
+ AFNOP
+ AFPATAN
+ AFPREM
+ AFPREM1
+ AFPTAN
+ AFRNDINT
+ AFSCALE
+ AFSIN
+ AFSINCOS
+ AFSQRT
+ AFTST
+ AFXAM
+ AFXTRACT
+ AFYL2X
+ AFYL2XP1
+ AEND
+ ADYNT_
+ AINIT_
+ ASIGNAME
+ ACMPXCHGB
+ ACMPXCHGL
+ ACMPXCHGW
+ ACMPXCHG8B
+ ACPUID
+ AINVD
+ AINVLPG
+ ALFENCE
+ AMFENCE
+ AMOVNTIL
+ ARDMSR
+ ARDPMC
+ ARDTSC
+ ARSM
+ ASFENCE
+ ASYSRET
+ AWBINVD
+ AWRMSR
+ AXADDB
+ AXADDL
+ AXADDW
+ ACMOVLCC
+ ACMOVLCS
+ ACMOVLEQ
+ ACMOVLGE
+ ACMOVLGT
+ ACMOVLHI
+ ACMOVLLE
+ ACMOVLLS
+ ACMOVLLT
+ ACMOVLMI
+ ACMOVLNE
+ ACMOVLOC
+ ACMOVLOS
+ ACMOVLPC
+ ACMOVLPL
+ ACMOVLPS
+ ACMOVQCC
+ ACMOVQCS
+ ACMOVQEQ
+ ACMOVQGE
+ ACMOVQGT
+ ACMOVQHI
+ ACMOVQLE
+ ACMOVQLS
+ ACMOVQLT
+ ACMOVQMI
+ ACMOVQNE
+ ACMOVQOC
+ ACMOVQOS
+ ACMOVQPC
+ ACMOVQPL
+ ACMOVQPS
+ ACMOVWCC
+ ACMOVWCS
+ ACMOVWEQ
+ ACMOVWGE
+ ACMOVWGT
+ ACMOVWHI
+ ACMOVWLE
+ ACMOVWLS
+ ACMOVWLT
+ ACMOVWMI
+ ACMOVWNE
+ ACMOVWOC
+ ACMOVWOS
+ ACMOVWPC
+ ACMOVWPL
+ ACMOVWPS
+ AADCQ
+ AADDQ
+ AANDQ
+ ABSFQ
+ ABSRQ
+ ABTCQ
+ ABTQ
+ ABTRQ
+ ABTSQ
+ ACMPQ
+ ACMPSQ
+ ACMPXCHGQ
+ ACQO
+ ADIVQ
+ AIDIVQ
+ AIMULQ
+ AIRETQ
+ AJCXZQ
+ ALEAQ
+ ALEAVEQ
+ ALODSQ
+ AMOVQ
+ AMOVLQSX
+ AMOVLQZX
+ AMOVNTIQ
+ AMOVSQ
+ AMULQ
+ ANEGQ
+ ANOTQ
+ AORQ
+ APOPFQ
+ APOPQ
+ APUSHFQ
+ APUSHQ
+ ARCLQ
+ ARCRQ
+ AROLQ
+ ARORQ
+ AQUAD
+ ASALQ
+ ASARQ
+ ASBBQ
+ ASCASQ
+ ASHLQ
+ ASHRQ
+ ASTOSQ
+ ASUBQ
+ ATESTQ
+ AXADDQ
+ AXCHGQ
+ AXORQ
+ AADDPD
+ AADDPS
+ AADDSD
+ AADDSS
+ AANDNPD
+ AANDNPS
+ AANDPD
+ AANDPS
+ ACMPPD
+ ACMPPS
+ ACMPSD
+ ACMPSS
+ ACOMISD
+ ACOMISS
+ ACVTPD2PL
+ ACVTPD2PS
+ ACVTPL2PD
+ ACVTPL2PS
+ ACVTPS2PD
+ ACVTPS2PL
+ ACVTSD2SL
+ ACVTSD2SQ
+ ACVTSD2SS
+ ACVTSL2SD
+ ACVTSL2SS
+ ACVTSQ2SD
+ ACVTSQ2SS
+ ACVTSS2SD
+ ACVTSS2SL
+ ACVTSS2SQ
+ ACVTTPD2PL
+ ACVTTPS2PL
+ ACVTTSD2SL
+ ACVTTSD2SQ
+ ACVTTSS2SL
+ ACVTTSS2SQ
+ ADIVPD
+ ADIVPS
+ ADIVSD
+ ADIVSS
+ AEMMS
+ AFXRSTOR
+ AFXRSTOR64
+ AFXSAVE
+ AFXSAVE64
+ ALDMXCSR
+ AMASKMOVOU
+ AMASKMOVQ
+ AMAXPD
+ AMAXPS
+ AMAXSD
+ AMAXSS
+ AMINPD
+ AMINPS
+ AMINSD
+ AMINSS
+ AMOVAPD
+ AMOVAPS
+ AMOVOU
+ AMOVHLPS
+ AMOVHPD
+ AMOVHPS
+ AMOVLHPS
+ AMOVLPD
+ AMOVLPS
+ AMOVMSKPD
+ AMOVMSKPS
+ AMOVNTO
+ AMOVNTPD
+ AMOVNTPS
+ AMOVNTQ
+ AMOVO
+ AMOVQOZX
+ AMOVSD
+ AMOVSS
+ AMOVUPD
+ AMOVUPS
+ AMULPD
+ AMULPS
+ AMULSD
+ AMULSS
+ AORPD
+ AORPS
+ APACKSSLW
+ APACKSSWB
+ APACKUSWB
+ APADDB
+ APADDL
+ APADDQ
+ APADDSB
+ APADDSW
+ APADDUSB
+ APADDUSW
+ APADDW
+ APANDB
+ APANDL
+ APANDSB
+ APANDSW
+ APANDUSB
+ APANDUSW
+ APANDW
+ APAND
+ APANDN
+ APAVGB
+ APAVGW
+ APCMPEQB
+ APCMPEQL
+ APCMPEQW
+ APCMPGTB
+ APCMPGTL
+ APCMPGTW
+ APEXTRW
+ APFACC
+ APFADD
+ APFCMPEQ
+ APFCMPGE
+ APFCMPGT
+ APFMAX
+ APFMIN
+ APFMUL
+ APFNACC
+ APFPNACC
+ APFRCP
+ APFRCPIT1
+ APFRCPI2T
+ APFRSQIT1
+ APFRSQRT
+ APFSUB
+ APFSUBR
+ APINSRW
+ APINSRD
+ APINSRQ
+ APMADDWL
+ APMAXSW
+ APMAXUB
+ APMINSW
+ APMINUB
+ APMOVMSKB
+ APMULHRW
+ APMULHUW
+ APMULHW
+ APMULLW
+ APMULULQ
+ APOR
+ APSADBW
+ APSHUFHW
+ APSHUFL
+ APSHUFLW
+ APSHUFW
+ APSHUFB
+ APSLLO
+ APSLLL
+ APSLLQ
+ APSLLW
+ APSRAL
+ APSRAW
+ APSRLO
+ APSRLL
+ APSRLQ
+ APSRLW
+ APSUBB
+ APSUBL
+ APSUBQ
+ APSUBSB
+ APSUBSW
+ APSUBUSB
+ APSUBUSW
+ APSUBW
+ APSWAPL
+ APUNPCKHBW
+ APUNPCKHLQ
+ APUNPCKHQDQ
+ APUNPCKHWL
+ APUNPCKLBW
+ APUNPCKLLQ
+ APUNPCKLQDQ
+ APUNPCKLWL
+ APXOR
+ ARCPPS
+ ARCPSS
+ ARSQRTPS
+ ARSQRTSS
+ ASHUFPD
+ ASHUFPS
+ ASQRTPD
+ ASQRTPS
+ ASQRTSD
+ ASQRTSS
+ ASTMXCSR
+ ASUBPD
+ ASUBPS
+ ASUBSD
+ ASUBSS
+ AUCOMISD
+ AUCOMISS
+ AUNPCKHPD
+ AUNPCKHPS
+ AUNPCKLPD
+ AUNPCKLPS
+ AXORPD
+ AXORPS
+ APF2IW
+ APF2IL
+ API2FW
+ API2FL
+ ARETFW
+ ARETFL
+ ARETFQ
+ ASWAPGS
+ AMODE
+ ACRC32B
+ ACRC32Q
+ AIMUL3Q
+ APREFETCHT0
+ APREFETCHT1
+ APREFETCHT2
+ APREFETCHNTA
+ AMOVQL
+ ABSWAPL
+ ABSWAPQ
+ AUNDEF
+ AAESENC
+ AAESENCLAST
+ AAESDEC
+ AAESDECLAST
+ AAESIMC
+ AAESKEYGENASSIST
+ APSHUFD
+ APCLMULQDQ
+ AUSEFIELD
+ ATYPE
+ AFUNCDATA
+ APCDATA
+ ACHECKNIL
+ AVARDEF
+ AVARKILL
+ ADUFFCOPY
+ ADUFFZERO
+ ALAST
+)
+
+const (
+ D_AL = 0 + iota
+ D_CL
+ D_DL
+ D_BL
+ D_SPB
+ D_BPB
+ D_SIB
+ D_DIB
+ D_R8B
+ D_R9B
+ D_R10B
+ D_R11B
+ D_R12B
+ D_R13B
+ D_R14B
+ D_R15B
+ D_AX = 16 + iota - 16
+ D_CX
+ D_DX
+ D_BX
+ D_SP
+ D_BP
+ D_SI
+ D_DI
+ D_R8
+ D_R9
+ D_R10
+ D_R11
+ D_R12
+ D_R13
+ D_R14
+ D_R15
+ D_AH = 32 + iota - 32
+ D_CH
+ D_DH
+ D_BH
+ D_F0 = 36
+ D_M0 = 44
+ D_X0 = 52 + iota - 38
+ D_X1
+ D_X2
+ D_X3
+ D_X4
+ D_X5
+ D_X6
+ D_X7
+ D_X8
+ D_X9
+ D_X10
+ D_X11
+ D_X12
+ D_X13
+ D_X14
+ D_X15
+ D_CS = 68 + iota - 54
+ D_SS
+ D_DS
+ D_ES
+ D_FS
+ D_GS
+ D_GDTR
+ D_IDTR
+ D_LDTR
+ D_MSW
+ D_TASK
+ D_CR = 79
+ D_DR = 95
+ D_TR = 103
+ D_TLS = 111
+ D_NONE = 112
+ D_BRANCH = 113
+ D_EXTERN = 114
+ D_STATIC = 115
+ D_AUTO = 116
+ D_PARAM = 117
+ D_CONST = 118
+ D_FCONST = 119
+ D_SCONST = 120
+ D_ADDR = 121 + iota - 78
+ D_INDIR
+ D_LAST
+ T_TYPE = 1 << 0
+ T_INDEX = 1 << 1
+ T_OFFSET = 1 << 2
+ T_FCONST = 1 << 3
+ T_SYM = 1 << 4
+ T_SCONST = 1 << 5
+ T_64 = 1 << 6
+ T_GOTYPE = 1 << 7
+ REGARG = -1
+ REGRET = D_AX
+ FREGRET = D_X0
+ REGSP = D_SP
+ REGTMP = D_DI
+ REGEXT = D_R15
+ FREGMIN = D_X0 + 5
+ FREGEXT = D_X0 + 15
+)
--- /dev/null
+package x86
+
+/*
+ * this is the ranlib header
+ */
+var anames6 = []string{
+ "XXX",
+ "AAA",
+ "AAD",
+ "AAM",
+ "AAS",
+ "ADCB",
+ "ADCL",
+ "ADCW",
+ "ADDB",
+ "ADDL",
+ "ADDW",
+ "ADJSP",
+ "ANDB",
+ "ANDL",
+ "ANDW",
+ "ARPL",
+ "BOUNDL",
+ "BOUNDW",
+ "BSFL",
+ "BSFW",
+ "BSRL",
+ "BSRW",
+ "BTL",
+ "BTW",
+ "BTCL",
+ "BTCW",
+ "BTRL",
+ "BTRW",
+ "BTSL",
+ "BTSW",
+ "BYTE",
+ "CALL",
+ "CLC",
+ "CLD",
+ "CLI",
+ "CLTS",
+ "CMC",
+ "CMPB",
+ "CMPL",
+ "CMPW",
+ "CMPSB",
+ "CMPSL",
+ "CMPSW",
+ "DAA",
+ "DAS",
+ "DATA",
+ "DECB",
+ "DECL",
+ "DECQ",
+ "DECW",
+ "DIVB",
+ "DIVL",
+ "DIVW",
+ "ENTER",
+ "GLOBL",
+ "GOK",
+ "HISTORY",
+ "HLT",
+ "IDIVB",
+ "IDIVL",
+ "IDIVW",
+ "IMULB",
+ "IMULL",
+ "IMULW",
+ "INB",
+ "INL",
+ "INW",
+ "INCB",
+ "INCL",
+ "INCQ",
+ "INCW",
+ "INSB",
+ "INSL",
+ "INSW",
+ "INT",
+ "INTO",
+ "IRETL",
+ "IRETW",
+ "JCC",
+ "JCS",
+ "JCXZL",
+ "JEQ",
+ "JGE",
+ "JGT",
+ "JHI",
+ "JLE",
+ "JLS",
+ "JLT",
+ "JMI",
+ "JMP",
+ "JNE",
+ "JOC",
+ "JOS",
+ "JPC",
+ "JPL",
+ "JPS",
+ "LAHF",
+ "LARL",
+ "LARW",
+ "LEAL",
+ "LEAW",
+ "LEAVEL",
+ "LEAVEW",
+ "LOCK",
+ "LODSB",
+ "LODSL",
+ "LODSW",
+ "LONG",
+ "LOOP",
+ "LOOPEQ",
+ "LOOPNE",
+ "LSLL",
+ "LSLW",
+ "MOVB",
+ "MOVL",
+ "MOVW",
+ "MOVBLSX",
+ "MOVBLZX",
+ "MOVBQSX",
+ "MOVBQZX",
+ "MOVBWSX",
+ "MOVBWZX",
+ "MOVWLSX",
+ "MOVWLZX",
+ "MOVWQSX",
+ "MOVWQZX",
+ "MOVSB",
+ "MOVSL",
+ "MOVSW",
+ "MULB",
+ "MULL",
+ "MULW",
+ "NAME",
+ "NEGB",
+ "NEGL",
+ "NEGW",
+ "NOP",
+ "NOTB",
+ "NOTL",
+ "NOTW",
+ "ORB",
+ "ORL",
+ "ORW",
+ "OUTB",
+ "OUTL",
+ "OUTW",
+ "OUTSB",
+ "OUTSL",
+ "OUTSW",
+ "PAUSE",
+ "POPAL",
+ "POPAW",
+ "POPFL",
+ "POPFW",
+ "POPL",
+ "POPW",
+ "PUSHAL",
+ "PUSHAW",
+ "PUSHFL",
+ "PUSHFW",
+ "PUSHL",
+ "PUSHW",
+ "RCLB",
+ "RCLL",
+ "RCLW",
+ "RCRB",
+ "RCRL",
+ "RCRW",
+ "REP",
+ "REPN",
+ "RET",
+ "ROLB",
+ "ROLL",
+ "ROLW",
+ "RORB",
+ "RORL",
+ "RORW",
+ "SAHF",
+ "SALB",
+ "SALL",
+ "SALW",
+ "SARB",
+ "SARL",
+ "SARW",
+ "SBBB",
+ "SBBL",
+ "SBBW",
+ "SCASB",
+ "SCASL",
+ "SCASW",
+ "SETCC",
+ "SETCS",
+ "SETEQ",
+ "SETGE",
+ "SETGT",
+ "SETHI",
+ "SETLE",
+ "SETLS",
+ "SETLT",
+ "SETMI",
+ "SETNE",
+ "SETOC",
+ "SETOS",
+ "SETPC",
+ "SETPL",
+ "SETPS",
+ "CDQ",
+ "CWD",
+ "SHLB",
+ "SHLL",
+ "SHLW",
+ "SHRB",
+ "SHRL",
+ "SHRW",
+ "STC",
+ "STD",
+ "STI",
+ "STOSB",
+ "STOSL",
+ "STOSW",
+ "SUBB",
+ "SUBL",
+ "SUBW",
+ "SYSCALL",
+ "TESTB",
+ "TESTL",
+ "TESTW",
+ "TEXT",
+ "VERR",
+ "VERW",
+ "WAIT",
+ "WORD",
+ "XCHGB",
+ "XCHGL",
+ "XCHGW",
+ "XLAT",
+ "XORB",
+ "XORL",
+ "XORW",
+ "FMOVB",
+ "FMOVBP",
+ "FMOVD",
+ "FMOVDP",
+ "FMOVF",
+ "FMOVFP",
+ "FMOVL",
+ "FMOVLP",
+ "FMOVV",
+ "FMOVVP",
+ "FMOVW",
+ "FMOVWP",
+ "FMOVX",
+ "FMOVXP",
+ "FCOMB",
+ "FCOMBP",
+ "FCOMD",
+ "FCOMDP",
+ "FCOMDPP",
+ "FCOMF",
+ "FCOMFP",
+ "FCOML",
+ "FCOMLP",
+ "FCOMW",
+ "FCOMWP",
+ "FUCOM",
+ "FUCOMP",
+ "FUCOMPP",
+ "FADDDP",
+ "FADDW",
+ "FADDL",
+ "FADDF",
+ "FADDD",
+ "FMULDP",
+ "FMULW",
+ "FMULL",
+ "FMULF",
+ "FMULD",
+ "FSUBDP",
+ "FSUBW",
+ "FSUBL",
+ "FSUBF",
+ "FSUBD",
+ "FSUBRDP",
+ "FSUBRW",
+ "FSUBRL",
+ "FSUBRF",
+ "FSUBRD",
+ "FDIVDP",
+ "FDIVW",
+ "FDIVL",
+ "FDIVF",
+ "FDIVD",
+ "FDIVRDP",
+ "FDIVRW",
+ "FDIVRL",
+ "FDIVRF",
+ "FDIVRD",
+ "FXCHD",
+ "FFREE",
+ "FLDCW",
+ "FLDENV",
+ "FRSTOR",
+ "FSAVE",
+ "FSTCW",
+ "FSTENV",
+ "FSTSW",
+ "F2XM1",
+ "FABS",
+ "FCHS",
+ "FCLEX",
+ "FCOS",
+ "FDECSTP",
+ "FINCSTP",
+ "FINIT",
+ "FLD1",
+ "FLDL2E",
+ "FLDL2T",
+ "FLDLG2",
+ "FLDLN2",
+ "FLDPI",
+ "FLDZ",
+ "FNOP",
+ "FPATAN",
+ "FPREM",
+ "FPREM1",
+ "FPTAN",
+ "FRNDINT",
+ "FSCALE",
+ "FSIN",
+ "FSINCOS",
+ "FSQRT",
+ "FTST",
+ "FXAM",
+ "FXTRACT",
+ "FYL2X",
+ "FYL2XP1",
+ "END",
+ "DYNT_",
+ "INIT_",
+ "SIGNAME",
+ "CMPXCHGB",
+ "CMPXCHGL",
+ "CMPXCHGW",
+ "CMPXCHG8B",
+ "CPUID",
+ "INVD",
+ "INVLPG",
+ "LFENCE",
+ "MFENCE",
+ "MOVNTIL",
+ "RDMSR",
+ "RDPMC",
+ "RDTSC",
+ "RSM",
+ "SFENCE",
+ "SYSRET",
+ "WBINVD",
+ "WRMSR",
+ "XADDB",
+ "XADDL",
+ "XADDW",
+ "CMOVLCC",
+ "CMOVLCS",
+ "CMOVLEQ",
+ "CMOVLGE",
+ "CMOVLGT",
+ "CMOVLHI",
+ "CMOVLLE",
+ "CMOVLLS",
+ "CMOVLLT",
+ "CMOVLMI",
+ "CMOVLNE",
+ "CMOVLOC",
+ "CMOVLOS",
+ "CMOVLPC",
+ "CMOVLPL",
+ "CMOVLPS",
+ "CMOVQCC",
+ "CMOVQCS",
+ "CMOVQEQ",
+ "CMOVQGE",
+ "CMOVQGT",
+ "CMOVQHI",
+ "CMOVQLE",
+ "CMOVQLS",
+ "CMOVQLT",
+ "CMOVQMI",
+ "CMOVQNE",
+ "CMOVQOC",
+ "CMOVQOS",
+ "CMOVQPC",
+ "CMOVQPL",
+ "CMOVQPS",
+ "CMOVWCC",
+ "CMOVWCS",
+ "CMOVWEQ",
+ "CMOVWGE",
+ "CMOVWGT",
+ "CMOVWHI",
+ "CMOVWLE",
+ "CMOVWLS",
+ "CMOVWLT",
+ "CMOVWMI",
+ "CMOVWNE",
+ "CMOVWOC",
+ "CMOVWOS",
+ "CMOVWPC",
+ "CMOVWPL",
+ "CMOVWPS",
+ "ADCQ",
+ "ADDQ",
+ "ANDQ",
+ "BSFQ",
+ "BSRQ",
+ "BTCQ",
+ "BTQ",
+ "BTRQ",
+ "BTSQ",
+ "CMPQ",
+ "CMPSQ",
+ "CMPXCHGQ",
+ "CQO",
+ "DIVQ",
+ "IDIVQ",
+ "IMULQ",
+ "IRETQ",
+ "JCXZQ",
+ "LEAQ",
+ "LEAVEQ",
+ "LODSQ",
+ "MOVQ",
+ "MOVLQSX",
+ "MOVLQZX",
+ "MOVNTIQ",
+ "MOVSQ",
+ "MULQ",
+ "NEGQ",
+ "NOTQ",
+ "ORQ",
+ "POPFQ",
+ "POPQ",
+ "PUSHFQ",
+ "PUSHQ",
+ "RCLQ",
+ "RCRQ",
+ "ROLQ",
+ "RORQ",
+ "QUAD",
+ "SALQ",
+ "SARQ",
+ "SBBQ",
+ "SCASQ",
+ "SHLQ",
+ "SHRQ",
+ "STOSQ",
+ "SUBQ",
+ "TESTQ",
+ "XADDQ",
+ "XCHGQ",
+ "XORQ",
+ "ADDPD",
+ "ADDPS",
+ "ADDSD",
+ "ADDSS",
+ "ANDNPD",
+ "ANDNPS",
+ "ANDPD",
+ "ANDPS",
+ "CMPPD",
+ "CMPPS",
+ "CMPSD",
+ "CMPSS",
+ "COMISD",
+ "COMISS",
+ "CVTPD2PL",
+ "CVTPD2PS",
+ "CVTPL2PD",
+ "CVTPL2PS",
+ "CVTPS2PD",
+ "CVTPS2PL",
+ "CVTSD2SL",
+ "CVTSD2SQ",
+ "CVTSD2SS",
+ "CVTSL2SD",
+ "CVTSL2SS",
+ "CVTSQ2SD",
+ "CVTSQ2SS",
+ "CVTSS2SD",
+ "CVTSS2SL",
+ "CVTSS2SQ",
+ "CVTTPD2PL",
+ "CVTTPS2PL",
+ "CVTTSD2SL",
+ "CVTTSD2SQ",
+ "CVTTSS2SL",
+ "CVTTSS2SQ",
+ "DIVPD",
+ "DIVPS",
+ "DIVSD",
+ "DIVSS",
+ "EMMS",
+ "FXRSTOR",
+ "FXRSTOR64",
+ "FXSAVE",
+ "FXSAVE64",
+ "LDMXCSR",
+ "MASKMOVOU",
+ "MASKMOVQ",
+ "MAXPD",
+ "MAXPS",
+ "MAXSD",
+ "MAXSS",
+ "MINPD",
+ "MINPS",
+ "MINSD",
+ "MINSS",
+ "MOVAPD",
+ "MOVAPS",
+ "MOVOU",
+ "MOVHLPS",
+ "MOVHPD",
+ "MOVHPS",
+ "MOVLHPS",
+ "MOVLPD",
+ "MOVLPS",
+ "MOVMSKPD",
+ "MOVMSKPS",
+ "MOVNTO",
+ "MOVNTPD",
+ "MOVNTPS",
+ "MOVNTQ",
+ "MOVO",
+ "MOVQOZX",
+ "MOVSD",
+ "MOVSS",
+ "MOVUPD",
+ "MOVUPS",
+ "MULPD",
+ "MULPS",
+ "MULSD",
+ "MULSS",
+ "ORPD",
+ "ORPS",
+ "PACKSSLW",
+ "PACKSSWB",
+ "PACKUSWB",
+ "PADDB",
+ "PADDL",
+ "PADDQ",
+ "PADDSB",
+ "PADDSW",
+ "PADDUSB",
+ "PADDUSW",
+ "PADDW",
+ "PANDB",
+ "PANDL",
+ "PANDSB",
+ "PANDSW",
+ "PANDUSB",
+ "PANDUSW",
+ "PANDW",
+ "PAND",
+ "PANDN",
+ "PAVGB",
+ "PAVGW",
+ "PCMPEQB",
+ "PCMPEQL",
+ "PCMPEQW",
+ "PCMPGTB",
+ "PCMPGTL",
+ "PCMPGTW",
+ "PEXTRW",
+ "PFACC",
+ "PFADD",
+ "PFCMPEQ",
+ "PFCMPGE",
+ "PFCMPGT",
+ "PFMAX",
+ "PFMIN",
+ "PFMUL",
+ "PFNACC",
+ "PFPNACC",
+ "PFRCP",
+ "PFRCPIT1",
+ "PFRCPI2T",
+ "PFRSQIT1",
+ "PFRSQRT",
+ "PFSUB",
+ "PFSUBR",
+ "PINSRW",
+ "PINSRD",
+ "PINSRQ",
+ "PMADDWL",
+ "PMAXSW",
+ "PMAXUB",
+ "PMINSW",
+ "PMINUB",
+ "PMOVMSKB",
+ "PMULHRW",
+ "PMULHUW",
+ "PMULHW",
+ "PMULLW",
+ "PMULULQ",
+ "POR",
+ "PSADBW",
+ "PSHUFHW",
+ "PSHUFL",
+ "PSHUFLW",
+ "PSHUFW",
+ "PSHUFB",
+ "PSLLO",
+ "PSLLL",
+ "PSLLQ",
+ "PSLLW",
+ "PSRAL",
+ "PSRAW",
+ "PSRLO",
+ "PSRLL",
+ "PSRLQ",
+ "PSRLW",
+ "PSUBB",
+ "PSUBL",
+ "PSUBQ",
+ "PSUBSB",
+ "PSUBSW",
+ "PSUBUSB",
+ "PSUBUSW",
+ "PSUBW",
+ "PSWAPL",
+ "PUNPCKHBW",
+ "PUNPCKHLQ",
+ "PUNPCKHQDQ",
+ "PUNPCKHWL",
+ "PUNPCKLBW",
+ "PUNPCKLLQ",
+ "PUNPCKLQDQ",
+ "PUNPCKLWL",
+ "PXOR",
+ "RCPPS",
+ "RCPSS",
+ "RSQRTPS",
+ "RSQRTSS",
+ "SHUFPD",
+ "SHUFPS",
+ "SQRTPD",
+ "SQRTPS",
+ "SQRTSD",
+ "SQRTSS",
+ "STMXCSR",
+ "SUBPD",
+ "SUBPS",
+ "SUBSD",
+ "SUBSS",
+ "UCOMISD",
+ "UCOMISS",
+ "UNPCKHPD",
+ "UNPCKHPS",
+ "UNPCKLPD",
+ "UNPCKLPS",
+ "XORPD",
+ "XORPS",
+ "PF2IW",
+ "PF2IL",
+ "PI2FW",
+ "PI2FL",
+ "RETFW",
+ "RETFL",
+ "RETFQ",
+ "SWAPGS",
+ "MODE",
+ "CRC32B",
+ "CRC32Q",
+ "IMUL3Q",
+ "PREFETCHT0",
+ "PREFETCHT1",
+ "PREFETCHT2",
+ "PREFETCHNTA",
+ "MOVQL",
+ "BSWAPL",
+ "BSWAPQ",
+ "UNDEF",
+ "AESENC",
+ "AESENCLAST",
+ "AESDEC",
+ "AESDECLAST",
+ "AESIMC",
+ "AESKEYGENASSIST",
+ "PSHUFD",
+ "PCLMULQDQ",
+ "USEFIELD",
+ "TYPE",
+ "FUNCDATA",
+ "PCDATA",
+ "CHECKNIL",
+ "VARDEF",
+ "VARKILL",
+ "DUFFCOPY",
+ "DUFFZERO",
+ "LAST",
+}
+
+var dnames6 = []string{
+ D_AL: "AL",
+ D_CL: "CL",
+ D_DL: "DL",
+ D_BL: "BL",
+ D_SPB: "SPB",
+ D_BPB: "BPB",
+ D_SIB: "SIB",
+ D_DIB: "DIB",
+ D_R8B: "R8B",
+ D_R9B: "R9B",
+ D_R10B: "R10B",
+ D_R11B: "R11B",
+ D_R12B: "R12B",
+ D_R13B: "R13B",
+ D_R14B: "R14B",
+ D_R15B: "R15B",
+ D_AX: "AX",
+ D_CX: "CX",
+ D_DX: "DX",
+ D_BX: "BX",
+ D_SP: "SP",
+ D_BP: "BP",
+ D_SI: "SI",
+ D_DI: "DI",
+ D_R8: "R8",
+ D_R9: "R9",
+ D_R10: "R10",
+ D_R11: "R11",
+ D_R12: "R12",
+ D_R13: "R13",
+ D_R14: "R14",
+ D_R15: "R15",
+ D_AH: "AH",
+ D_CH: "CH",
+ D_DH: "DH",
+ D_BH: "BH",
+ D_F0: "F0",
+ D_M0: "M0",
+ D_X0: "X0",
+ D_X1: "X1",
+ D_X2: "X2",
+ D_X3: "X3",
+ D_X4: "X4",
+ D_X5: "X5",
+ D_X6: "X6",
+ D_X7: "X7",
+ D_X8: "X8",
+ D_X9: "X9",
+ D_X10: "X10",
+ D_X11: "X11",
+ D_X12: "X12",
+ D_X13: "X13",
+ D_X14: "X14",
+ D_X15: "X15",
+ D_CS: "CS",
+ D_SS: "SS",
+ D_DS: "DS",
+ D_ES: "ES",
+ D_FS: "FS",
+ D_GS: "GS",
+ D_GDTR: "GDTR",
+ D_IDTR: "IDTR",
+ D_LDTR: "LDTR",
+ D_MSW: "MSW",
+ D_TASK: "TASK",
+ D_CR: "CR",
+ D_DR: "DR",
+ D_TR: "TR",
+ D_TLS: "TLS",
+ D_NONE: "NONE",
+ D_BRANCH: "BRANCH",
+ D_EXTERN: "EXTERN",
+ D_STATIC: "STATIC",
+ D_AUTO: "AUTO",
+ D_PARAM: "PARAM",
+ D_CONST: "CONST",
+ D_FCONST: "FCONST",
+ D_SCONST: "SCONST",
+ D_ADDR: "ADDR",
+ D_INDIR: "INDIR",
+}
--- /dev/null
+// Inferno utils/6l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package x86
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "log"
+ "strings"
+)
+
+// Instruction layout.
+
+const (
+ MaxAlign = 32
+ LoopAlign = 16
+ MaxLoopPad = 0
+ FuncAlign = 16
+)
+
+type Optab struct {
+ as int16
+ ytab []byte
+ prefix uint8
+ op [23]uint8
+}
+
+type Movtab struct {
+ as int16
+ ft uint8
+ tt uint8
+ code uint8
+ op [4]uint8
+}
+
+const (
+ Yxxx = 0 + iota
+ Ynone
+ Yi0
+ Yi1
+ Yi8
+ Ys32
+ Yi32
+ Yi64
+ Yiauto
+ Yal
+ Ycl
+ Yax
+ Ycx
+ Yrb
+ Yrl
+ Yrf
+ Yf0
+ Yrx
+ Ymb
+ Yml
+ Ym
+ Ybr
+ Ycol
+ Ycs
+ Yss
+ Yds
+ Yes
+ Yfs
+ Ygs
+ Ygdtr
+ Yidtr
+ Yldtr
+ Ymsw
+ Ytask
+ Ycr0
+ Ycr1
+ Ycr2
+ Ycr3
+ Ycr4
+ Ycr5
+ Ycr6
+ Ycr7
+ Ycr8
+ Ydr0
+ Ydr1
+ Ydr2
+ Ydr3
+ Ydr4
+ Ydr5
+ Ydr6
+ Ydr7
+ Ytr0
+ Ytr1
+ Ytr2
+ Ytr3
+ Ytr4
+ Ytr5
+ Ytr6
+ Ytr7
+ Yrl32
+ Yrl64
+ Ymr
+ Ymm
+ Yxr
+ Yxm
+ Ytls
+ Ymax
+ Zxxx = 0 + iota - 67
+ Zlit
+ Zlitm_r
+ Z_rp
+ Zbr
+ Zcall
+ Zcallindreg
+ Zib_
+ Zib_rp
+ Zibo_m
+ Zibo_m_xm
+ Zil_
+ Zil_rp
+ Ziq_rp
+ Zilo_m
+ Ziqo_m
+ Zjmp
+ Zloop
+ Zo_iw
+ Zm_o
+ Zm_r
+ Zm2_r
+ Zm_r_xm
+ Zm_r_i_xm
+ Zm_r_3d
+ Zm_r_xm_nr
+ Zr_m_xm_nr
+ Zibm_r
+ Zmb_r
+ Zaut_r
+ Zo_m
+ Zo_m64
+ Zpseudo
+ Zr_m
+ Zr_m_xm
+ Zr_m_i_xm
+ Zrp_
+ Z_ib
+ Z_il
+ Zm_ibo
+ Zm_ilo
+ Zib_rr
+ Zil_rr
+ Zclr
+ Zbyte
+ Zmax
+ Px = 0
+ P32 = 0x32
+ Pe = 0x66
+ Pm = 0x0f
+ Pq = 0xff
+ Pb = 0xfe
+ Pf2 = 0xf2
+ Pf3 = 0xf3
+ Pq3 = 0x67
+ Pw = 0x48
+ Py = 0x80
+ Rxf = 1 << 9
+ Rxt = 1 << 8
+ Rxw = 1 << 3
+ Rxr = 1 << 2
+ Rxx = 1 << 1
+ Rxb = 1 << 0
+ Maxand = 10
+)
+
+var ycover [Ymax * Ymax]uint8
+
+var reg [D_NONE]int
+
+var regrex [D_NONE + 1]int
+
+var ynone = []uint8{
+ Ynone,
+ Ynone,
+ Zlit,
+ 1,
+ 0,
+}
+
+var ytext = []uint8{
+ Ymb,
+ Yi64,
+ Zpseudo,
+ 1,
+ 0,
+}
+
+var ynop = []uint8{
+ Ynone,
+ Ynone,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yiauto,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yml,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yrf,
+ Zpseudo,
+ 0,
+ Ynone,
+ Yxr,
+ Zpseudo,
+ 0,
+ Yiauto,
+ Ynone,
+ Zpseudo,
+ 0,
+ Yml,
+ Ynone,
+ Zpseudo,
+ 0,
+ Yrf,
+ Ynone,
+ Zpseudo,
+ 0,
+ Yxr,
+ Ynone,
+ Zpseudo,
+ 1,
+ 0,
+}
+
+var yfuncdata = []uint8{
+ Yi32,
+ Ym,
+ Zpseudo,
+ 0,
+ 0,
+}
+
+var ypcdata = []uint8{
+ Yi32,
+ Yi32,
+ Zpseudo,
+ 0,
+ 0,
+}
+
+var yxorb = []uint8{
+ Yi32,
+ Yal,
+ Zib_,
+ 1,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yxorl = []uint8{
+ Yi8,
+ Yml,
+ Zibo_m,
+ 2,
+ Yi32,
+ Yax,
+ Zil_,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yaddl = []uint8{
+ Yi8,
+ Yml,
+ Zibo_m,
+ 2,
+ Yi32,
+ Yax,
+ Zil_,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yincb = []uint8{
+ Ynone,
+ Ymb,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yincw = []uint8{
+ Ynone,
+ Yml,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yincl = []uint8{
+ Ynone,
+ Yml,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var ycmpb = []uint8{
+ Yal,
+ Yi32,
+ Z_ib,
+ 1,
+ Ymb,
+ Yi32,
+ Zm_ibo,
+ 2,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var ycmpl = []uint8{
+ Yml,
+ Yi8,
+ Zm_ibo,
+ 2,
+ Yax,
+ Yi32,
+ Z_il,
+ 1,
+ Yml,
+ Yi32,
+ Zm_ilo,
+ 2,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var yshb = []uint8{
+ Yi1,
+ Ymb,
+ Zo_m,
+ 2,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ Ycx,
+ Ymb,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yshl = []uint8{
+ Yi1,
+ Yml,
+ Zo_m,
+ 2,
+ Yi32,
+ Yml,
+ Zibo_m,
+ 2,
+ Ycl,
+ Yml,
+ Zo_m,
+ 2,
+ Ycx,
+ Yml,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var ytestb = []uint8{
+ Yi32,
+ Yal,
+ Zib_,
+ 1,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ytestl = []uint8{
+ Yi32,
+ Yax,
+ Zil_,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ymovb = []uint8{
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ Yi32,
+ Yrb,
+ Zib_rp,
+ 1,
+ Yi32,
+ Ymb,
+ Zibo_m,
+ 2,
+ 0,
+}
+
+var ymbs = []uint8{
+ Ymb,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ybtl = []uint8{
+ Yi8,
+ Yml,
+ Zibo_m,
+ 2,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var ymovw = []uint8{
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ Yi0,
+ Yrl,
+ Zclr,
+ 1,
+ Yi32,
+ Yrl,
+ Zil_rp,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yiauto,
+ Yrl,
+ Zaut_r,
+ 2,
+ 0,
+}
+
+var ymovl = []uint8{
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ Yi0,
+ Yrl,
+ Zclr,
+ 1,
+ Yi32,
+ Yrl,
+ Zil_rp,
+ 1,
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2,
+ Yml,
+ Ymr,
+ Zm_r_xm,
+ 1, // MMX MOVD
+ Ymr,
+ Yml,
+ Zr_m_xm,
+ 1, // MMX MOVD
+ Yml,
+ Yxr,
+ Zm_r_xm,
+ 2, // XMM MOVD (32 bit)
+ Yxr,
+ Yml,
+ Zr_m_xm,
+ 2, // XMM MOVD (32 bit)
+ Yiauto,
+ Yrl,
+ Zaut_r,
+ 2,
+ 0,
+}
+
+var yret = []uint8{
+ Ynone,
+ Ynone,
+ Zo_iw,
+ 1,
+ Yi32,
+ Ynone,
+ Zo_iw,
+ 1,
+ 0,
+}
+
+var ymovq = []uint8{
+ Yrl,
+ Yml,
+ Zr_m,
+ 1, // 0x89
+ Yml,
+ Yrl,
+ Zm_r,
+ 1, // 0x8b
+ Yi0,
+ Yrl,
+ Zclr,
+ 1, // 0x31
+ Ys32,
+ Yrl,
+ Zilo_m,
+ 2, // 32 bit signed 0xc7,(0)
+ Yi64,
+ Yrl,
+ Ziq_rp,
+ 1, // 0xb8 -- 32/64 bit immediate
+ Yi32,
+ Yml,
+ Zilo_m,
+ 2, // 0xc7,(0)
+ Ym,
+ Ymr,
+ Zm_r_xm_nr,
+ 1, // MMX MOVQ (shorter encoding)
+ Ymr,
+ Ym,
+ Zr_m_xm_nr,
+ 1, // MMX MOVQ
+ Ymm,
+ Ymr,
+ Zm_r_xm,
+ 1, // MMX MOVD
+ Ymr,
+ Ymm,
+ Zr_m_xm,
+ 1, // MMX MOVD
+ Yxr,
+ Ymr,
+ Zm_r_xm_nr,
+ 2, // MOVDQ2Q
+ Yxm,
+ Yxr,
+ Zm_r_xm_nr,
+ 2, // MOVQ xmm1/m64 -> xmm2
+ Yxr,
+ Yxm,
+ Zr_m_xm_nr,
+ 2, // MOVQ xmm1 -> xmm2/m64
+ Yml,
+ Yxr,
+ Zm_r_xm,
+ 2, // MOVD xmm load
+ Yxr,
+ Yml,
+ Zr_m_xm,
+ 2, // MOVD xmm store
+ Yiauto,
+ Yrl,
+ Zaut_r,
+ 2, // built-in LEAQ
+ 0,
+}
+
+var ym_rl = []uint8{
+ Ym,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yrl_m = []uint8{
+ Yrl,
+ Ym,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var ymb_rl = []uint8{
+ Ymb,
+ Yrl,
+ Zmb_r,
+ 1,
+ 0,
+}
+
+var yml_rl = []uint8{
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yrl_ml = []uint8{
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var yml_mb = []uint8{
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ Ymb,
+ Yrb,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var yrb_mb = []uint8{
+ Yrb,
+ Ymb,
+ Zr_m,
+ 1,
+ 0,
+}
+
+var yxchg = []uint8{
+ Yax,
+ Yrl,
+ Z_rp,
+ 1,
+ Yrl,
+ Yax,
+ Zrp_,
+ 1,
+ Yrl,
+ Yml,
+ Zr_m,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ydivl = []uint8{
+ Yml,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ydivb = []uint8{
+ Ymb,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var yimul = []uint8{
+ Yml,
+ Ynone,
+ Zm_o,
+ 2,
+ Yi8,
+ Yrl,
+ Zib_rr,
+ 1,
+ Yi32,
+ Yrl,
+ Zil_rr,
+ 1,
+ Yml,
+ Yrl,
+ Zm_r,
+ 2,
+ 0,
+}
+
+var yimul3 = []uint8{
+ Yml,
+ Yrl,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+var ybyte = []uint8{
+ Yi64,
+ Ynone,
+ Zbyte,
+ 1,
+ 0,
+}
+
+var yin = []uint8{
+ Yi32,
+ Ynone,
+ Zib_,
+ 1,
+ Ynone,
+ Ynone,
+ Zlit,
+ 1,
+ 0,
+}
+
+var yint = []uint8{
+ Yi32,
+ Ynone,
+ Zib_,
+ 1,
+ 0,
+}
+
+var ypushl = []uint8{
+ Yrl,
+ Ynone,
+ Zrp_,
+ 1,
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ Yi8,
+ Ynone,
+ Zib_,
+ 1,
+ Yi32,
+ Ynone,
+ Zil_,
+ 1,
+ 0,
+}
+
+var ypopl = []uint8{
+ Ynone,
+ Yrl,
+ Z_rp,
+ 1,
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var ybswap = []uint8{
+ Ynone,
+ Yrl,
+ Z_rp,
+ 2,
+ 0,
+}
+
+var yscond = []uint8{
+ Ynone,
+ Ymb,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yjcond = []uint8{
+ Ynone,
+ Ybr,
+ Zbr,
+ 0,
+ Yi0,
+ Ybr,
+ Zbr,
+ 0,
+ Yi1,
+ Ybr,
+ Zbr,
+ 1,
+ 0,
+}
+
+var yloop = []uint8{
+ Ynone,
+ Ybr,
+ Zloop,
+ 1,
+ 0,
+}
+
+var ycall = []uint8{
+ Ynone,
+ Yml,
+ Zcallindreg,
+ 0,
+ Yrx,
+ Yrx,
+ Zcallindreg,
+ 2,
+ Ynone,
+ Ybr,
+ Zcall,
+ 1,
+ 0,
+}
+
+var yduff = []uint8{
+ Ynone,
+ Yi32,
+ Zcall,
+ 1,
+ 0,
+}
+
+var yjmp = []uint8{
+ Ynone,
+ Yml,
+ Zo_m64,
+ 2,
+ Ynone,
+ Ybr,
+ Zjmp,
+ 1,
+ 0,
+}
+
+var yfmvd = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ Yrf,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfmvdp = []uint8{
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfmvf = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfmvx = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var yfmvp = []uint8{
+ Yf0,
+ Ym,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfadd = []uint8{
+ Ym,
+ Yf0,
+ Zm_o,
+ 2,
+ Yrf,
+ Yf0,
+ Zm_o,
+ 2,
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfaddp = []uint8{
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ 0,
+}
+
+var yfxch = []uint8{
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2,
+ Yrf,
+ Yf0,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ycompp = []uint8{
+ Yf0,
+ Yrf,
+ Zo_m,
+ 2, /* botch is really f0,f1 */
+ 0,
+}
+
+var ystsw = []uint8{
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ Ynone,
+ Yax,
+ Zlit,
+ 1,
+ 0,
+}
+
+var ystcw = []uint8{
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ysvrs = []uint8{
+ Ynone,
+ Ym,
+ Zo_m,
+ 2,
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var ymm = []uint8{
+ Ymm,
+ Ymr,
+ Zm_r_xm,
+ 1,
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var yxm = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxcvm1 = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ Yxm,
+ Ymr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var yxcvm2 = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ Ymm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+/*
+static uchar yxmq[] =
+{
+ Yxm, Yxr, Zm_r_xm, 2,
+ 0
+};
+*/
+var yxr = []uint8{
+ Yxr,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxr_ml = []uint8{
+ Yxr,
+ Yml,
+ Zr_m_xm,
+ 1,
+ 0,
+}
+
+var ymr = []uint8{
+ Ymr,
+ Ymr,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ymr_ml = []uint8{
+ Ymr,
+ Yml,
+ Zr_m_xm,
+ 1,
+ 0,
+}
+
+var yxcmp = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxcmpi = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_i_xm,
+ 2,
+ 0,
+}
+
+var yxmov = []uint8{
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ Yxr,
+ Yxm,
+ Zr_m_xm,
+ 1,
+ 0,
+}
+
+var yxcvfl = []uint8{
+ Yxm,
+ Yrl,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxcvlf = []uint8{
+ Yml,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var yxcvfq = []uint8{
+ Yxm,
+ Yrl,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var yxcvqf = []uint8{
+ Yml,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ 0,
+}
+
+var yps = []uint8{
+ Ymm,
+ Ymr,
+ Zm_r_xm,
+ 1,
+ Yi8,
+ Ymr,
+ Zibo_m_xm,
+ 2,
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 2,
+ Yi8,
+ Yxr,
+ Zibo_m_xm,
+ 3,
+ 0,
+}
+
+var yxrrl = []uint8{
+ Yxr,
+ Yrl,
+ Zm_r,
+ 1,
+ 0,
+}
+
+var ymfp = []uint8{
+ Ymm,
+ Ymr,
+ Zm_r_3d,
+ 1,
+ 0,
+}
+
+var ymrxr = []uint8{
+ Ymr,
+ Yxr,
+ Zm_r,
+ 1,
+ Yxm,
+ Yxr,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var ymshuf = []uint8{
+ Ymm,
+ Ymr,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+var ymshufb = []uint8{
+ Yxm,
+ Yxr,
+ Zm2_r,
+ 2,
+ 0,
+}
+
+var yxshuf = []uint8{
+ Yxm,
+ Yxr,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+var yextrw = []uint8{
+ Yxr,
+ Yrl,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+var yinsrw = []uint8{
+ Yml,
+ Yxr,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+var yinsr = []uint8{
+ Ymm,
+ Yxr,
+ Zibm_r,
+ 3,
+ 0,
+}
+
+var ypsdq = []uint8{
+ Yi8,
+ Yxr,
+ Zibo_m,
+ 2,
+ 0,
+}
+
+var ymskb = []uint8{
+ Yxr,
+ Yrl,
+ Zm_r_xm,
+ 2,
+ Ymr,
+ Yrl,
+ Zm_r_xm,
+ 1,
+ 0,
+}
+
+var ycrc32l = []uint8{Yml, Yrl, Zlitm_r, 0}
+
+var yprefetch = []uint8{
+ Ym,
+ Ynone,
+ Zm_o,
+ 2,
+ 0,
+}
+
+var yaes = []uint8{
+ Yxm,
+ Yxr,
+ Zlitm_r,
+ 2,
+ 0,
+}
+
+var yaes2 = []uint8{
+ Yxm,
+ Yxr,
+ Zibm_r,
+ 2,
+ 0,
+}
+
+/*
+ * You are doasm, holding in your hand a Prog* with p->as set to, say, ACRC32,
+ * and p->from and p->to as operands (Addr*). The linker scans optab to find
+ * the entry with the given p->as and then looks through the ytable for that
+ * instruction (the second field in the optab struct) for a line whose first
+ * two values match the Ytypes of the p->from and p->to operands. The function
+ * oclass in span.c computes the specific Ytype of an operand and then the set
+ * of more general Ytypes that it satisfies is implied by the ycover table, set
+ * up in instinit. For example, oclass distinguishes the constants 0 and 1
+ * from the more general 8-bit constants, but instinit says
+ *
+ * ycover[Yi0*Ymax + Ys32] = 1;
+ * ycover[Yi1*Ymax + Ys32] = 1;
+ * ycover[Yi8*Ymax + Ys32] = 1;
+ *
+ * which means that Yi0, Yi1, and Yi8 all count as Ys32 (signed 32)
+ * if that's what an instruction can handle.
+ *
+ * In parallel with the scan through the ytable for the appropriate line, there
+ * is a z pointer that starts out pointing at the strange magic byte list in
+ * the Optab struct. With each step past a non-matching ytable line, z
+ * advances by the 4th entry in the line. When a matching line is found, that
+ * z pointer has the extra data to use in laying down the instruction bytes.
+ * The actual bytes laid down are a function of the 3rd entry in the line (that
+ * is, the Ztype) and the z bytes.
+ *
+ * For example, let's look at AADDL. The optab line says:
+ * { AADDL, yaddl, Px, 0x83,(00),0x05,0x81,(00),0x01,0x03 },
+ *
+ * and yaddl says
+ * uchar yaddl[] =
+ * {
+ * Yi8, Yml, Zibo_m, 2,
+ * Yi32, Yax, Zil_, 1,
+ * Yi32, Yml, Zilo_m, 2,
+ * Yrl, Yml, Zr_m, 1,
+ * Yml, Yrl, Zm_r, 1,
+ * 0
+ * };
+ *
+ * so there are 5 possible types of ADDL instruction that can be laid down, and
+ * possible states used to lay them down (Ztype and z pointer, assuming z
+ * points at {0x83,(00),0x05,0x81,(00),0x01,0x03}) are:
+ *
+ * Yi8, Yml -> Zibo_m, z (0x83, 00)
+ * Yi32, Yax -> Zil_, z+2 (0x05)
+ * Yi32, Yml -> Zilo_m, z+2+1 (0x81, 0x00)
+ * Yrl, Yml -> Zr_m, z+2+1+2 (0x01)
+ * Yml, Yrl -> Zm_r, z+2+1+2+1 (0x03)
+ *
+ * The Pconstant in the optab line controls the prefix bytes to emit. That's
+ * relatively straightforward as this program goes.
+ *
+ * The switch on t[2] in doasm implements the various Z cases. Zibo_m, for
+ * example, is an opcode byte (z[0]) then an asmando (which is some kind of
+ * encoded addressing mode for the Yml arg), and then a single immediate byte.
+ * Zilo_m is the same but a long (32-bit) immediate.
+ */
+var optab =
+/* as, ytab, andproto, opcode */
+[]Optab{
+ Optab{AXXX, nil, 0, [23]uint8{}},
+ Optab{AAAA, ynone, P32, [23]uint8{0x37}},
+ Optab{AAAD, ynone, P32, [23]uint8{0xd5, 0x0a}},
+ Optab{AAAM, ynone, P32, [23]uint8{0xd4, 0x0a}},
+ Optab{AAAS, ynone, P32, [23]uint8{0x3f}},
+ Optab{AADCB, yxorb, Pb, [23]uint8{0x14, 0x80, 02, 0x10, 0x10}},
+ Optab{AADCL, yxorl, Px, [23]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}},
+ Optab{AADCQ, yxorl, Pw, [23]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}},
+ Optab{AADCW, yxorl, Pe, [23]uint8{0x83, 02, 0x15, 0x81, 02, 0x11, 0x13}},
+ Optab{AADDB, yxorb, Pb, [23]uint8{0x04, 0x80, 00, 0x00, 0x02}},
+ Optab{AADDL, yaddl, Px, [23]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}},
+ Optab{AADDPD, yxm, Pq, [23]uint8{0x58}},
+ Optab{AADDPS, yxm, Pm, [23]uint8{0x58}},
+ Optab{AADDQ, yaddl, Pw, [23]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}},
+ Optab{AADDSD, yxm, Pf2, [23]uint8{0x58}},
+ Optab{AADDSS, yxm, Pf3, [23]uint8{0x58}},
+ Optab{AADDW, yaddl, Pe, [23]uint8{0x83, 00, 0x05, 0x81, 00, 0x01, 0x03}},
+ Optab{AADJSP, nil, 0, [23]uint8{}},
+ Optab{AANDB, yxorb, Pb, [23]uint8{0x24, 0x80, 04, 0x20, 0x22}},
+ Optab{AANDL, yxorl, Px, [23]uint8{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}},
+ Optab{AANDNPD, yxm, Pq, [23]uint8{0x55}},
+ Optab{AANDNPS, yxm, Pm, [23]uint8{0x55}},
+ Optab{AANDPD, yxm, Pq, [23]uint8{0x54}},
+ Optab{AANDPS, yxm, Pq, [23]uint8{0x54}},
+ Optab{AANDQ, yxorl, Pw, [23]uint8{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}},
+ Optab{AANDW, yxorl, Pe, [23]uint8{0x83, 04, 0x25, 0x81, 04, 0x21, 0x23}},
+ Optab{AARPL, yrl_ml, P32, [23]uint8{0x63}},
+ Optab{ABOUNDL, yrl_m, P32, [23]uint8{0x62}},
+ Optab{ABOUNDW, yrl_m, Pe, [23]uint8{0x62}},
+ Optab{ABSFL, yml_rl, Pm, [23]uint8{0xbc}},
+ Optab{ABSFQ, yml_rl, Pw, [23]uint8{0x0f, 0xbc}},
+ Optab{ABSFW, yml_rl, Pq, [23]uint8{0xbc}},
+ Optab{ABSRL, yml_rl, Pm, [23]uint8{0xbd}},
+ Optab{ABSRQ, yml_rl, Pw, [23]uint8{0x0f, 0xbd}},
+ Optab{ABSRW, yml_rl, Pq, [23]uint8{0xbd}},
+ Optab{ABSWAPL, ybswap, Px, [23]uint8{0x0f, 0xc8}},
+ Optab{ABSWAPQ, ybswap, Pw, [23]uint8{0x0f, 0xc8}},
+ Optab{ABTCL, ybtl, Pm, [23]uint8{0xba, 07, 0xbb}},
+ Optab{ABTCQ, ybtl, Pw, [23]uint8{0x0f, 0xba, 07, 0x0f, 0xbb}},
+ Optab{ABTCW, ybtl, Pq, [23]uint8{0xba, 07, 0xbb}},
+ Optab{ABTL, ybtl, Pm, [23]uint8{0xba, 04, 0xa3}},
+ Optab{ABTQ, ybtl, Pw, [23]uint8{0x0f, 0xba, 04, 0x0f, 0xa3}},
+ Optab{ABTRL, ybtl, Pm, [23]uint8{0xba, 06, 0xb3}},
+ Optab{ABTRQ, ybtl, Pw, [23]uint8{0x0f, 0xba, 06, 0x0f, 0xb3}},
+ Optab{ABTRW, ybtl, Pq, [23]uint8{0xba, 06, 0xb3}},
+ Optab{ABTSL, ybtl, Pm, [23]uint8{0xba, 05, 0xab}},
+ Optab{ABTSQ, ybtl, Pw, [23]uint8{0x0f, 0xba, 05, 0x0f, 0xab}},
+ Optab{ABTSW, ybtl, Pq, [23]uint8{0xba, 05, 0xab}},
+ Optab{ABTW, ybtl, Pq, [23]uint8{0xba, 04, 0xa3}},
+ Optab{ABYTE, ybyte, Px, [23]uint8{1}},
+ Optab{ACALL, ycall, Px, [23]uint8{0xff, 02, 0xe8}},
+ Optab{ACDQ, ynone, Px, [23]uint8{0x99}},
+ Optab{ACLC, ynone, Px, [23]uint8{0xf8}},
+ Optab{ACLD, ynone, Px, [23]uint8{0xfc}},
+ Optab{ACLI, ynone, Px, [23]uint8{0xfa}},
+ Optab{ACLTS, ynone, Pm, [23]uint8{0x06}},
+ Optab{ACMC, ynone, Px, [23]uint8{0xf5}},
+ Optab{ACMOVLCC, yml_rl, Pm, [23]uint8{0x43}},
+ Optab{ACMOVLCS, yml_rl, Pm, [23]uint8{0x42}},
+ Optab{ACMOVLEQ, yml_rl, Pm, [23]uint8{0x44}},
+ Optab{ACMOVLGE, yml_rl, Pm, [23]uint8{0x4d}},
+ Optab{ACMOVLGT, yml_rl, Pm, [23]uint8{0x4f}},
+ Optab{ACMOVLHI, yml_rl, Pm, [23]uint8{0x47}},
+ Optab{ACMOVLLE, yml_rl, Pm, [23]uint8{0x4e}},
+ Optab{ACMOVLLS, yml_rl, Pm, [23]uint8{0x46}},
+ Optab{ACMOVLLT, yml_rl, Pm, [23]uint8{0x4c}},
+ Optab{ACMOVLMI, yml_rl, Pm, [23]uint8{0x48}},
+ Optab{ACMOVLNE, yml_rl, Pm, [23]uint8{0x45}},
+ Optab{ACMOVLOC, yml_rl, Pm, [23]uint8{0x41}},
+ Optab{ACMOVLOS, yml_rl, Pm, [23]uint8{0x40}},
+ Optab{ACMOVLPC, yml_rl, Pm, [23]uint8{0x4b}},
+ Optab{ACMOVLPL, yml_rl, Pm, [23]uint8{0x49}},
+ Optab{ACMOVLPS, yml_rl, Pm, [23]uint8{0x4a}},
+ Optab{ACMOVQCC, yml_rl, Pw, [23]uint8{0x0f, 0x43}},
+ Optab{ACMOVQCS, yml_rl, Pw, [23]uint8{0x0f, 0x42}},
+ Optab{ACMOVQEQ, yml_rl, Pw, [23]uint8{0x0f, 0x44}},
+ Optab{ACMOVQGE, yml_rl, Pw, [23]uint8{0x0f, 0x4d}},
+ Optab{ACMOVQGT, yml_rl, Pw, [23]uint8{0x0f, 0x4f}},
+ Optab{ACMOVQHI, yml_rl, Pw, [23]uint8{0x0f, 0x47}},
+ Optab{ACMOVQLE, yml_rl, Pw, [23]uint8{0x0f, 0x4e}},
+ Optab{ACMOVQLS, yml_rl, Pw, [23]uint8{0x0f, 0x46}},
+ Optab{ACMOVQLT, yml_rl, Pw, [23]uint8{0x0f, 0x4c}},
+ Optab{ACMOVQMI, yml_rl, Pw, [23]uint8{0x0f, 0x48}},
+ Optab{ACMOVQNE, yml_rl, Pw, [23]uint8{0x0f, 0x45}},
+ Optab{ACMOVQOC, yml_rl, Pw, [23]uint8{0x0f, 0x41}},
+ Optab{ACMOVQOS, yml_rl, Pw, [23]uint8{0x0f, 0x40}},
+ Optab{ACMOVQPC, yml_rl, Pw, [23]uint8{0x0f, 0x4b}},
+ Optab{ACMOVQPL, yml_rl, Pw, [23]uint8{0x0f, 0x49}},
+ Optab{ACMOVQPS, yml_rl, Pw, [23]uint8{0x0f, 0x4a}},
+ Optab{ACMOVWCC, yml_rl, Pq, [23]uint8{0x43}},
+ Optab{ACMOVWCS, yml_rl, Pq, [23]uint8{0x42}},
+ Optab{ACMOVWEQ, yml_rl, Pq, [23]uint8{0x44}},
+ Optab{ACMOVWGE, yml_rl, Pq, [23]uint8{0x4d}},
+ Optab{ACMOVWGT, yml_rl, Pq, [23]uint8{0x4f}},
+ Optab{ACMOVWHI, yml_rl, Pq, [23]uint8{0x47}},
+ Optab{ACMOVWLE, yml_rl, Pq, [23]uint8{0x4e}},
+ Optab{ACMOVWLS, yml_rl, Pq, [23]uint8{0x46}},
+ Optab{ACMOVWLT, yml_rl, Pq, [23]uint8{0x4c}},
+ Optab{ACMOVWMI, yml_rl, Pq, [23]uint8{0x48}},
+ Optab{ACMOVWNE, yml_rl, Pq, [23]uint8{0x45}},
+ Optab{ACMOVWOC, yml_rl, Pq, [23]uint8{0x41}},
+ Optab{ACMOVWOS, yml_rl, Pq, [23]uint8{0x40}},
+ Optab{ACMOVWPC, yml_rl, Pq, [23]uint8{0x4b}},
+ Optab{ACMOVWPL, yml_rl, Pq, [23]uint8{0x49}},
+ Optab{ACMOVWPS, yml_rl, Pq, [23]uint8{0x4a}},
+ Optab{ACMPB, ycmpb, Pb, [23]uint8{0x3c, 0x80, 07, 0x38, 0x3a}},
+ Optab{ACMPL, ycmpl, Px, [23]uint8{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}},
+ Optab{ACMPPD, yxcmpi, Px, [23]uint8{Pe, 0xc2}},
+ Optab{ACMPPS, yxcmpi, Pm, [23]uint8{0xc2, 0}},
+ Optab{ACMPQ, ycmpl, Pw, [23]uint8{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}},
+ Optab{ACMPSB, ynone, Pb, [23]uint8{0xa6}},
+ Optab{ACMPSD, yxcmpi, Px, [23]uint8{Pf2, 0xc2}},
+ Optab{ACMPSL, ynone, Px, [23]uint8{0xa7}},
+ Optab{ACMPSQ, ynone, Pw, [23]uint8{0xa7}},
+ Optab{ACMPSS, yxcmpi, Px, [23]uint8{Pf3, 0xc2}},
+ Optab{ACMPSW, ynone, Pe, [23]uint8{0xa7}},
+ Optab{ACMPW, ycmpl, Pe, [23]uint8{0x83, 07, 0x3d, 0x81, 07, 0x39, 0x3b}},
+ Optab{ACOMISD, yxcmp, Pe, [23]uint8{0x2f}},
+ Optab{ACOMISS, yxcmp, Pm, [23]uint8{0x2f}},
+ Optab{ACPUID, ynone, Pm, [23]uint8{0xa2}},
+ Optab{ACVTPL2PD, yxcvm2, Px, [23]uint8{Pf3, 0xe6, Pe, 0x2a}},
+ Optab{ACVTPL2PS, yxcvm2, Pm, [23]uint8{0x5b, 0, 0x2a, 0}},
+ Optab{ACVTPD2PL, yxcvm1, Px, [23]uint8{Pf2, 0xe6, Pe, 0x2d}},
+ Optab{ACVTPD2PS, yxm, Pe, [23]uint8{0x5a}},
+ Optab{ACVTPS2PL, yxcvm1, Px, [23]uint8{Pe, 0x5b, Pm, 0x2d}},
+ Optab{ACVTPS2PD, yxm, Pm, [23]uint8{0x5a}},
+ Optab{API2FW, ymfp, Px, [23]uint8{0x0c}},
+ Optab{ACVTSD2SL, yxcvfl, Pf2, [23]uint8{0x2d}},
+ Optab{ACVTSD2SQ, yxcvfq, Pw, [23]uint8{Pf2, 0x2d}},
+ Optab{ACVTSD2SS, yxm, Pf2, [23]uint8{0x5a}},
+ Optab{ACVTSL2SD, yxcvlf, Pf2, [23]uint8{0x2a}},
+ Optab{ACVTSQ2SD, yxcvqf, Pw, [23]uint8{Pf2, 0x2a}},
+ Optab{ACVTSL2SS, yxcvlf, Pf3, [23]uint8{0x2a}},
+ Optab{ACVTSQ2SS, yxcvqf, Pw, [23]uint8{Pf3, 0x2a}},
+ Optab{ACVTSS2SD, yxm, Pf3, [23]uint8{0x5a}},
+ Optab{ACVTSS2SL, yxcvfl, Pf3, [23]uint8{0x2d}},
+ Optab{ACVTSS2SQ, yxcvfq, Pw, [23]uint8{Pf3, 0x2d}},
+ Optab{ACVTTPD2PL, yxcvm1, Px, [23]uint8{Pe, 0xe6, Pe, 0x2c}},
+ Optab{ACVTTPS2PL, yxcvm1, Px, [23]uint8{Pf3, 0x5b, Pm, 0x2c}},
+ Optab{ACVTTSD2SL, yxcvfl, Pf2, [23]uint8{0x2c}},
+ Optab{ACVTTSD2SQ, yxcvfq, Pw, [23]uint8{Pf2, 0x2c}},
+ Optab{ACVTTSS2SL, yxcvfl, Pf3, [23]uint8{0x2c}},
+ Optab{ACVTTSS2SQ, yxcvfq, Pw, [23]uint8{Pf3, 0x2c}},
+ Optab{ACWD, ynone, Pe, [23]uint8{0x99}},
+ Optab{ACQO, ynone, Pw, [23]uint8{0x99}},
+ Optab{ADAA, ynone, P32, [23]uint8{0x27}},
+ Optab{ADAS, ynone, P32, [23]uint8{0x2f}},
+ Optab{ADATA, nil, 0, [23]uint8{}},
+ Optab{ADECB, yincb, Pb, [23]uint8{0xfe, 01}},
+ Optab{ADECL, yincl, Px, [23]uint8{0xff, 01}},
+ Optab{ADECQ, yincl, Pw, [23]uint8{0xff, 01}},
+ Optab{ADECW, yincw, Pe, [23]uint8{0xff, 01}},
+ Optab{ADIVB, ydivb, Pb, [23]uint8{0xf6, 06}},
+ Optab{ADIVL, ydivl, Px, [23]uint8{0xf7, 06}},
+ Optab{ADIVPD, yxm, Pe, [23]uint8{0x5e}},
+ Optab{ADIVPS, yxm, Pm, [23]uint8{0x5e}},
+ Optab{ADIVQ, ydivl, Pw, [23]uint8{0xf7, 06}},
+ Optab{ADIVSD, yxm, Pf2, [23]uint8{0x5e}},
+ Optab{ADIVSS, yxm, Pf3, [23]uint8{0x5e}},
+ Optab{ADIVW, ydivl, Pe, [23]uint8{0xf7, 06}},
+ Optab{AEMMS, ynone, Pm, [23]uint8{0x77}},
+ Optab{AENTER, nil, 0, [23]uint8{}}, /* botch */
+ Optab{AFXRSTOR, ysvrs, Pm, [23]uint8{0xae, 01, 0xae, 01}},
+ Optab{AFXSAVE, ysvrs, Pm, [23]uint8{0xae, 00, 0xae, 00}},
+ Optab{AFXRSTOR64, ysvrs, Pw, [23]uint8{0x0f, 0xae, 01, 0x0f, 0xae, 01}},
+ Optab{AFXSAVE64, ysvrs, Pw, [23]uint8{0x0f, 0xae, 00, 0x0f, 0xae, 00}},
+ Optab{AGLOBL, nil, 0, [23]uint8{}},
+ Optab{AGOK, nil, 0, [23]uint8{}},
+ Optab{AHISTORY, nil, 0, [23]uint8{}},
+ Optab{AHLT, ynone, Px, [23]uint8{0xf4}},
+ Optab{AIDIVB, ydivb, Pb, [23]uint8{0xf6, 07}},
+ Optab{AIDIVL, ydivl, Px, [23]uint8{0xf7, 07}},
+ Optab{AIDIVQ, ydivl, Pw, [23]uint8{0xf7, 07}},
+ Optab{AIDIVW, ydivl, Pe, [23]uint8{0xf7, 07}},
+ Optab{AIMULB, ydivb, Pb, [23]uint8{0xf6, 05}},
+ Optab{AIMULL, yimul, Px, [23]uint8{0xf7, 05, 0x6b, 0x69, Pm, 0xaf}},
+ Optab{AIMULQ, yimul, Pw, [23]uint8{0xf7, 05, 0x6b, 0x69, Pm, 0xaf}},
+ Optab{AIMULW, yimul, Pe, [23]uint8{0xf7, 05, 0x6b, 0x69, Pm, 0xaf}},
+ Optab{AIMUL3Q, yimul3, Pw, [23]uint8{0x6b, 00}},
+ Optab{AINB, yin, Pb, [23]uint8{0xe4, 0xec}},
+ Optab{AINCB, yincb, Pb, [23]uint8{0xfe, 00}},
+ Optab{AINCL, yincl, Px, [23]uint8{0xff, 00}},
+ Optab{AINCQ, yincl, Pw, [23]uint8{0xff, 00}},
+ Optab{AINCW, yincw, Pe, [23]uint8{0xff, 00}},
+ Optab{AINL, yin, Px, [23]uint8{0xe5, 0xed}},
+ Optab{AINSB, ynone, Pb, [23]uint8{0x6c}},
+ Optab{AINSL, ynone, Px, [23]uint8{0x6d}},
+ Optab{AINSW, ynone, Pe, [23]uint8{0x6d}},
+ Optab{AINT, yint, Px, [23]uint8{0xcd}},
+ Optab{AINTO, ynone, P32, [23]uint8{0xce}},
+ Optab{AINW, yin, Pe, [23]uint8{0xe5, 0xed}},
+ Optab{AIRETL, ynone, Px, [23]uint8{0xcf}},
+ Optab{AIRETQ, ynone, Pw, [23]uint8{0xcf}},
+ Optab{AIRETW, ynone, Pe, [23]uint8{0xcf}},
+ Optab{AJCC, yjcond, Px, [23]uint8{0x73, 0x83, 00}},
+ Optab{AJCS, yjcond, Px, [23]uint8{0x72, 0x82}},
+ Optab{AJCXZL, yloop, Px, [23]uint8{0xe3}},
+ Optab{AJCXZQ, yloop, Px, [23]uint8{0xe3}},
+ Optab{AJEQ, yjcond, Px, [23]uint8{0x74, 0x84}},
+ Optab{AJGE, yjcond, Px, [23]uint8{0x7d, 0x8d}},
+ Optab{AJGT, yjcond, Px, [23]uint8{0x7f, 0x8f}},
+ Optab{AJHI, yjcond, Px, [23]uint8{0x77, 0x87}},
+ Optab{AJLE, yjcond, Px, [23]uint8{0x7e, 0x8e}},
+ Optab{AJLS, yjcond, Px, [23]uint8{0x76, 0x86}},
+ Optab{AJLT, yjcond, Px, [23]uint8{0x7c, 0x8c}},
+ Optab{AJMI, yjcond, Px, [23]uint8{0x78, 0x88}},
+ Optab{AJMP, yjmp, Px, [23]uint8{0xff, 04, 0xeb, 0xe9}},
+ Optab{AJNE, yjcond, Px, [23]uint8{0x75, 0x85}},
+ Optab{AJOC, yjcond, Px, [23]uint8{0x71, 0x81, 00}},
+ Optab{AJOS, yjcond, Px, [23]uint8{0x70, 0x80, 00}},
+ Optab{AJPC, yjcond, Px, [23]uint8{0x7b, 0x8b}},
+ Optab{AJPL, yjcond, Px, [23]uint8{0x79, 0x89}},
+ Optab{AJPS, yjcond, Px, [23]uint8{0x7a, 0x8a}},
+ Optab{ALAHF, ynone, Px, [23]uint8{0x9f}},
+ Optab{ALARL, yml_rl, Pm, [23]uint8{0x02}},
+ Optab{ALARW, yml_rl, Pq, [23]uint8{0x02}},
+ Optab{ALDMXCSR, ysvrs, Pm, [23]uint8{0xae, 02, 0xae, 02}},
+ Optab{ALEAL, ym_rl, Px, [23]uint8{0x8d}},
+ Optab{ALEAQ, ym_rl, Pw, [23]uint8{0x8d}},
+ Optab{ALEAVEL, ynone, P32, [23]uint8{0xc9}},
+ Optab{ALEAVEQ, ynone, Py, [23]uint8{0xc9}},
+ Optab{ALEAVEW, ynone, Pe, [23]uint8{0xc9}},
+ Optab{ALEAW, ym_rl, Pe, [23]uint8{0x8d}},
+ Optab{ALOCK, ynone, Px, [23]uint8{0xf0}},
+ Optab{ALODSB, ynone, Pb, [23]uint8{0xac}},
+ Optab{ALODSL, ynone, Px, [23]uint8{0xad}},
+ Optab{ALODSQ, ynone, Pw, [23]uint8{0xad}},
+ Optab{ALODSW, ynone, Pe, [23]uint8{0xad}},
+ Optab{ALONG, ybyte, Px, [23]uint8{4}},
+ Optab{ALOOP, yloop, Px, [23]uint8{0xe2}},
+ Optab{ALOOPEQ, yloop, Px, [23]uint8{0xe1}},
+ Optab{ALOOPNE, yloop, Px, [23]uint8{0xe0}},
+ Optab{ALSLL, yml_rl, Pm, [23]uint8{0x03}},
+ Optab{ALSLW, yml_rl, Pq, [23]uint8{0x03}},
+ Optab{AMASKMOVOU, yxr, Pe, [23]uint8{0xf7}},
+ Optab{AMASKMOVQ, ymr, Pm, [23]uint8{0xf7}},
+ Optab{AMAXPD, yxm, Pe, [23]uint8{0x5f}},
+ Optab{AMAXPS, yxm, Pm, [23]uint8{0x5f}},
+ Optab{AMAXSD, yxm, Pf2, [23]uint8{0x5f}},
+ Optab{AMAXSS, yxm, Pf3, [23]uint8{0x5f}},
+ Optab{AMINPD, yxm, Pe, [23]uint8{0x5d}},
+ Optab{AMINPS, yxm, Pm, [23]uint8{0x5d}},
+ Optab{AMINSD, yxm, Pf2, [23]uint8{0x5d}},
+ Optab{AMINSS, yxm, Pf3, [23]uint8{0x5d}},
+ Optab{AMOVAPD, yxmov, Pe, [23]uint8{0x28, 0x29}},
+ Optab{AMOVAPS, yxmov, Pm, [23]uint8{0x28, 0x29}},
+ Optab{AMOVB, ymovb, Pb, [23]uint8{0x88, 0x8a, 0xb0, 0xc6, 00}},
+ Optab{AMOVBLSX, ymb_rl, Pm, [23]uint8{0xbe}},
+ Optab{AMOVBLZX, ymb_rl, Pm, [23]uint8{0xb6}},
+ Optab{AMOVBQSX, ymb_rl, Pw, [23]uint8{0x0f, 0xbe}},
+ Optab{AMOVBQZX, ymb_rl, Pm, [23]uint8{0xb6}},
+ Optab{AMOVBWSX, ymb_rl, Pq, [23]uint8{0xbe}},
+ Optab{AMOVBWZX, ymb_rl, Pq, [23]uint8{0xb6}},
+ Optab{AMOVO, yxmov, Pe, [23]uint8{0x6f, 0x7f}},
+ Optab{AMOVOU, yxmov, Pf3, [23]uint8{0x6f, 0x7f}},
+ Optab{AMOVHLPS, yxr, Pm, [23]uint8{0x12}},
+ Optab{AMOVHPD, yxmov, Pe, [23]uint8{0x16, 0x17}},
+ Optab{AMOVHPS, yxmov, Pm, [23]uint8{0x16, 0x17}},
+ Optab{AMOVL, ymovl, Px, [23]uint8{0x89, 0x8b, 0x31, 0xb8, 0xc7, 00, 0x6e, 0x7e, Pe, 0x6e, Pe, 0x7e, 0}},
+ Optab{AMOVLHPS, yxr, Pm, [23]uint8{0x16}},
+ Optab{AMOVLPD, yxmov, Pe, [23]uint8{0x12, 0x13}},
+ Optab{AMOVLPS, yxmov, Pm, [23]uint8{0x12, 0x13}},
+ Optab{AMOVLQSX, yml_rl, Pw, [23]uint8{0x63}},
+ Optab{AMOVLQZX, yml_rl, Px, [23]uint8{0x8b}},
+ Optab{AMOVMSKPD, yxrrl, Pq, [23]uint8{0x50}},
+ Optab{AMOVMSKPS, yxrrl, Pm, [23]uint8{0x50}},
+ Optab{AMOVNTO, yxr_ml, Pe, [23]uint8{0xe7}},
+ Optab{AMOVNTPD, yxr_ml, Pe, [23]uint8{0x2b}},
+ Optab{AMOVNTPS, yxr_ml, Pm, [23]uint8{0x2b}},
+ Optab{AMOVNTQ, ymr_ml, Pm, [23]uint8{0xe7}},
+ Optab{AMOVQ, ymovq, Pw, [23]uint8{0x89, 0x8b, 0x31, 0xc7, 00, 0xb8, 0xc7, 00, 0x6f, 0x7f, 0x6e, 0x7e, Pf2, 0xd6, Pf3, 0x7e, Pe, 0xd6, Pe, 0x6e, Pe, 0x7e, 0}},
+ Optab{AMOVQOZX, ymrxr, Pf3, [23]uint8{0xd6, 0x7e}},
+ Optab{AMOVSB, ynone, Pb, [23]uint8{0xa4}},
+ Optab{AMOVSD, yxmov, Pf2, [23]uint8{0x10, 0x11}},
+ Optab{AMOVSL, ynone, Px, [23]uint8{0xa5}},
+ Optab{AMOVSQ, ynone, Pw, [23]uint8{0xa5}},
+ Optab{AMOVSS, yxmov, Pf3, [23]uint8{0x10, 0x11}},
+ Optab{AMOVSW, ynone, Pe, [23]uint8{0xa5}},
+ Optab{AMOVUPD, yxmov, Pe, [23]uint8{0x10, 0x11}},
+ Optab{AMOVUPS, yxmov, Pm, [23]uint8{0x10, 0x11}},
+ Optab{AMOVW, ymovw, Pe, [23]uint8{0x89, 0x8b, 0x31, 0xb8, 0xc7, 00, 0}},
+ Optab{AMOVWLSX, yml_rl, Pm, [23]uint8{0xbf}},
+ Optab{AMOVWLZX, yml_rl, Pm, [23]uint8{0xb7}},
+ Optab{AMOVWQSX, yml_rl, Pw, [23]uint8{0x0f, 0xbf}},
+ Optab{AMOVWQZX, yml_rl, Pw, [23]uint8{0x0f, 0xb7}},
+ Optab{AMULB, ydivb, Pb, [23]uint8{0xf6, 04}},
+ Optab{AMULL, ydivl, Px, [23]uint8{0xf7, 04}},
+ Optab{AMULPD, yxm, Pe, [23]uint8{0x59}},
+ Optab{AMULPS, yxm, Ym, [23]uint8{0x59}},
+ Optab{AMULQ, ydivl, Pw, [23]uint8{0xf7, 04}},
+ Optab{AMULSD, yxm, Pf2, [23]uint8{0x59}},
+ Optab{AMULSS, yxm, Pf3, [23]uint8{0x59}},
+ Optab{AMULW, ydivl, Pe, [23]uint8{0xf7, 04}},
+ Optab{ANAME, nil, 0, [23]uint8{}},
+ Optab{ANEGB, yscond, Pb, [23]uint8{0xf6, 03}},
+ Optab{ANEGL, yscond, Px, [23]uint8{0xf7, 03}},
+ Optab{ANEGQ, yscond, Pw, [23]uint8{0xf7, 03}},
+ Optab{ANEGW, yscond, Pe, [23]uint8{0xf7, 03}},
+ Optab{ANOP, ynop, Px, [23]uint8{0, 0}},
+ Optab{ANOTB, yscond, Pb, [23]uint8{0xf6, 02}},
+ Optab{ANOTL, yscond, Px, [23]uint8{0xf7, 02}},
+ Optab{ANOTQ, yscond, Pw, [23]uint8{0xf7, 02}},
+ Optab{ANOTW, yscond, Pe, [23]uint8{0xf7, 02}},
+ Optab{AORB, yxorb, Pb, [23]uint8{0x0c, 0x80, 01, 0x08, 0x0a}},
+ Optab{AORL, yxorl, Px, [23]uint8{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}},
+ Optab{AORPD, yxm, Pq, [23]uint8{0x56}},
+ Optab{AORPS, yxm, Pm, [23]uint8{0x56}},
+ Optab{AORQ, yxorl, Pw, [23]uint8{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}},
+ Optab{AORW, yxorl, Pe, [23]uint8{0x83, 01, 0x0d, 0x81, 01, 0x09, 0x0b}},
+ Optab{AOUTB, yin, Pb, [23]uint8{0xe6, 0xee}},
+ Optab{AOUTL, yin, Px, [23]uint8{0xe7, 0xef}},
+ Optab{AOUTSB, ynone, Pb, [23]uint8{0x6e}},
+ Optab{AOUTSL, ynone, Px, [23]uint8{0x6f}},
+ Optab{AOUTSW, ynone, Pe, [23]uint8{0x6f}},
+ Optab{AOUTW, yin, Pe, [23]uint8{0xe7, 0xef}},
+ Optab{APACKSSLW, ymm, Py, [23]uint8{0x6b, Pe, 0x6b}},
+ Optab{APACKSSWB, ymm, Py, [23]uint8{0x63, Pe, 0x63}},
+ Optab{APACKUSWB, ymm, Py, [23]uint8{0x67, Pe, 0x67}},
+ Optab{APADDB, ymm, Py, [23]uint8{0xfc, Pe, 0xfc}},
+ Optab{APADDL, ymm, Py, [23]uint8{0xfe, Pe, 0xfe}},
+ Optab{APADDQ, yxm, Pe, [23]uint8{0xd4}},
+ Optab{APADDSB, ymm, Py, [23]uint8{0xec, Pe, 0xec}},
+ Optab{APADDSW, ymm, Py, [23]uint8{0xed, Pe, 0xed}},
+ Optab{APADDUSB, ymm, Py, [23]uint8{0xdc, Pe, 0xdc}},
+ Optab{APADDUSW, ymm, Py, [23]uint8{0xdd, Pe, 0xdd}},
+ Optab{APADDW, ymm, Py, [23]uint8{0xfd, Pe, 0xfd}},
+ Optab{APAND, ymm, Py, [23]uint8{0xdb, Pe, 0xdb}},
+ Optab{APANDN, ymm, Py, [23]uint8{0xdf, Pe, 0xdf}},
+ Optab{APAUSE, ynone, Px, [23]uint8{0xf3, 0x90}},
+ Optab{APAVGB, ymm, Py, [23]uint8{0xe0, Pe, 0xe0}},
+ Optab{APAVGW, ymm, Py, [23]uint8{0xe3, Pe, 0xe3}},
+ Optab{APCMPEQB, ymm, Py, [23]uint8{0x74, Pe, 0x74}},
+ Optab{APCMPEQL, ymm, Py, [23]uint8{0x76, Pe, 0x76}},
+ Optab{APCMPEQW, ymm, Py, [23]uint8{0x75, Pe, 0x75}},
+ Optab{APCMPGTB, ymm, Py, [23]uint8{0x64, Pe, 0x64}},
+ Optab{APCMPGTL, ymm, Py, [23]uint8{0x66, Pe, 0x66}},
+ Optab{APCMPGTW, ymm, Py, [23]uint8{0x65, Pe, 0x65}},
+ Optab{APEXTRW, yextrw, Pq, [23]uint8{0xc5, 00}},
+ Optab{APF2IL, ymfp, Px, [23]uint8{0x1d}},
+ Optab{APF2IW, ymfp, Px, [23]uint8{0x1c}},
+ Optab{API2FL, ymfp, Px, [23]uint8{0x0d}},
+ Optab{APFACC, ymfp, Px, [23]uint8{0xae}},
+ Optab{APFADD, ymfp, Px, [23]uint8{0x9e}},
+ Optab{APFCMPEQ, ymfp, Px, [23]uint8{0xb0}},
+ Optab{APFCMPGE, ymfp, Px, [23]uint8{0x90}},
+ Optab{APFCMPGT, ymfp, Px, [23]uint8{0xa0}},
+ Optab{APFMAX, ymfp, Px, [23]uint8{0xa4}},
+ Optab{APFMIN, ymfp, Px, [23]uint8{0x94}},
+ Optab{APFMUL, ymfp, Px, [23]uint8{0xb4}},
+ Optab{APFNACC, ymfp, Px, [23]uint8{0x8a}},
+ Optab{APFPNACC, ymfp, Px, [23]uint8{0x8e}},
+ Optab{APFRCP, ymfp, Px, [23]uint8{0x96}},
+ Optab{APFRCPIT1, ymfp, Px, [23]uint8{0xa6}},
+ Optab{APFRCPI2T, ymfp, Px, [23]uint8{0xb6}},
+ Optab{APFRSQIT1, ymfp, Px, [23]uint8{0xa7}},
+ Optab{APFRSQRT, ymfp, Px, [23]uint8{0x97}},
+ Optab{APFSUB, ymfp, Px, [23]uint8{0x9a}},
+ Optab{APFSUBR, ymfp, Px, [23]uint8{0xaa}},
+ Optab{APINSRW, yinsrw, Pq, [23]uint8{0xc4, 00}},
+ Optab{APINSRD, yinsr, Pq, [23]uint8{0x3a, 0x22, 00}},
+ Optab{APINSRQ, yinsr, Pq3, [23]uint8{0x3a, 0x22, 00}},
+ Optab{APMADDWL, ymm, Py, [23]uint8{0xf5, Pe, 0xf5}},
+ Optab{APMAXSW, yxm, Pe, [23]uint8{0xee}},
+ Optab{APMAXUB, yxm, Pe, [23]uint8{0xde}},
+ Optab{APMINSW, yxm, Pe, [23]uint8{0xea}},
+ Optab{APMINUB, yxm, Pe, [23]uint8{0xda}},
+ Optab{APMOVMSKB, ymskb, Px, [23]uint8{Pe, 0xd7, 0xd7}},
+ Optab{APMULHRW, ymfp, Px, [23]uint8{0xb7}},
+ Optab{APMULHUW, ymm, Py, [23]uint8{0xe4, Pe, 0xe4}},
+ Optab{APMULHW, ymm, Py, [23]uint8{0xe5, Pe, 0xe5}},
+ Optab{APMULLW, ymm, Py, [23]uint8{0xd5, Pe, 0xd5}},
+ Optab{APMULULQ, ymm, Py, [23]uint8{0xf4, Pe, 0xf4}},
+ Optab{APOPAL, ynone, P32, [23]uint8{0x61}},
+ Optab{APOPAW, ynone, Pe, [23]uint8{0x61}},
+ Optab{APOPFL, ynone, P32, [23]uint8{0x9d}},
+ Optab{APOPFQ, ynone, Py, [23]uint8{0x9d}},
+ Optab{APOPFW, ynone, Pe, [23]uint8{0x9d}},
+ Optab{APOPL, ypopl, P32, [23]uint8{0x58, 0x8f, 00}},
+ Optab{APOPQ, ypopl, Py, [23]uint8{0x58, 0x8f, 00}},
+ Optab{APOPW, ypopl, Pe, [23]uint8{0x58, 0x8f, 00}},
+ Optab{APOR, ymm, Py, [23]uint8{0xeb, Pe, 0xeb}},
+ Optab{APSADBW, yxm, Pq, [23]uint8{0xf6}},
+ Optab{APSHUFHW, yxshuf, Pf3, [23]uint8{0x70, 00}},
+ Optab{APSHUFL, yxshuf, Pq, [23]uint8{0x70, 00}},
+ Optab{APSHUFLW, yxshuf, Pf2, [23]uint8{0x70, 00}},
+ Optab{APSHUFW, ymshuf, Pm, [23]uint8{0x70, 00}},
+ Optab{APSHUFB, ymshufb, Pq, [23]uint8{0x38, 0x00}},
+ Optab{APSLLO, ypsdq, Pq, [23]uint8{0x73, 07}},
+ Optab{APSLLL, yps, Py, [23]uint8{0xf2, 0x72, 06, Pe, 0xf2, Pe, 0x72, 06}},
+ Optab{APSLLQ, yps, Py, [23]uint8{0xf3, 0x73, 06, Pe, 0xf3, Pe, 0x73, 06}},
+ Optab{APSLLW, yps, Py, [23]uint8{0xf1, 0x71, 06, Pe, 0xf1, Pe, 0x71, 06}},
+ Optab{APSRAL, yps, Py, [23]uint8{0xe2, 0x72, 04, Pe, 0xe2, Pe, 0x72, 04}},
+ Optab{APSRAW, yps, Py, [23]uint8{0xe1, 0x71, 04, Pe, 0xe1, Pe, 0x71, 04}},
+ Optab{APSRLO, ypsdq, Pq, [23]uint8{0x73, 03}},
+ Optab{APSRLL, yps, Py, [23]uint8{0xd2, 0x72, 02, Pe, 0xd2, Pe, 0x72, 02}},
+ Optab{APSRLQ, yps, Py, [23]uint8{0xd3, 0x73, 02, Pe, 0xd3, Pe, 0x73, 02}},
+ Optab{APSRLW, yps, Py, [23]uint8{0xd1, 0x71, 02, Pe, 0xe1, Pe, 0x71, 02}},
+ Optab{APSUBB, yxm, Pe, [23]uint8{0xf8}},
+ Optab{APSUBL, yxm, Pe, [23]uint8{0xfa}},
+ Optab{APSUBQ, yxm, Pe, [23]uint8{0xfb}},
+ Optab{APSUBSB, yxm, Pe, [23]uint8{0xe8}},
+ Optab{APSUBSW, yxm, Pe, [23]uint8{0xe9}},
+ Optab{APSUBUSB, yxm, Pe, [23]uint8{0xd8}},
+ Optab{APSUBUSW, yxm, Pe, [23]uint8{0xd9}},
+ Optab{APSUBW, yxm, Pe, [23]uint8{0xf9}},
+ Optab{APSWAPL, ymfp, Px, [23]uint8{0xbb}},
+ Optab{APUNPCKHBW, ymm, Py, [23]uint8{0x68, Pe, 0x68}},
+ Optab{APUNPCKHLQ, ymm, Py, [23]uint8{0x6a, Pe, 0x6a}},
+ Optab{APUNPCKHQDQ, yxm, Pe, [23]uint8{0x6d}},
+ Optab{APUNPCKHWL, ymm, Py, [23]uint8{0x69, Pe, 0x69}},
+ Optab{APUNPCKLBW, ymm, Py, [23]uint8{0x60, Pe, 0x60}},
+ Optab{APUNPCKLLQ, ymm, Py, [23]uint8{0x62, Pe, 0x62}},
+ Optab{APUNPCKLQDQ, yxm, Pe, [23]uint8{0x6c}},
+ Optab{APUNPCKLWL, ymm, Py, [23]uint8{0x61, Pe, 0x61}},
+ Optab{APUSHAL, ynone, P32, [23]uint8{0x60}},
+ Optab{APUSHAW, ynone, Pe, [23]uint8{0x60}},
+ Optab{APUSHFL, ynone, P32, [23]uint8{0x9c}},
+ Optab{APUSHFQ, ynone, Py, [23]uint8{0x9c}},
+ Optab{APUSHFW, ynone, Pe, [23]uint8{0x9c}},
+ Optab{APUSHL, ypushl, P32, [23]uint8{0x50, 0xff, 06, 0x6a, 0x68}},
+ Optab{APUSHQ, ypushl, Py, [23]uint8{0x50, 0xff, 06, 0x6a, 0x68}},
+ Optab{APUSHW, ypushl, Pe, [23]uint8{0x50, 0xff, 06, 0x6a, 0x68}},
+ Optab{APXOR, ymm, Py, [23]uint8{0xef, Pe, 0xef}},
+ Optab{AQUAD, ybyte, Px, [23]uint8{8}},
+ Optab{ARCLB, yshb, Pb, [23]uint8{0xd0, 02, 0xc0, 02, 0xd2, 02}},
+ Optab{ARCLL, yshl, Px, [23]uint8{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}},
+ Optab{ARCLQ, yshl, Pw, [23]uint8{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}},
+ Optab{ARCLW, yshl, Pe, [23]uint8{0xd1, 02, 0xc1, 02, 0xd3, 02, 0xd3, 02}},
+ Optab{ARCPPS, yxm, Pm, [23]uint8{0x53}},
+ Optab{ARCPSS, yxm, Pf3, [23]uint8{0x53}},
+ Optab{ARCRB, yshb, Pb, [23]uint8{0xd0, 03, 0xc0, 03, 0xd2, 03}},
+ Optab{ARCRL, yshl, Px, [23]uint8{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}},
+ Optab{ARCRQ, yshl, Pw, [23]uint8{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}},
+ Optab{ARCRW, yshl, Pe, [23]uint8{0xd1, 03, 0xc1, 03, 0xd3, 03, 0xd3, 03}},
+ Optab{AREP, ynone, Px, [23]uint8{0xf3}},
+ Optab{AREPN, ynone, Px, [23]uint8{0xf2}},
+ Optab{ARET, ynone, Px, [23]uint8{0xc3}},
+ Optab{ARETFW, yret, Pe, [23]uint8{0xcb, 0xca}},
+ Optab{ARETFL, yret, Px, [23]uint8{0xcb, 0xca}},
+ Optab{ARETFQ, yret, Pw, [23]uint8{0xcb, 0xca}},
+ Optab{AROLB, yshb, Pb, [23]uint8{0xd0, 00, 0xc0, 00, 0xd2, 00}},
+ Optab{AROLL, yshl, Px, [23]uint8{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}},
+ Optab{AROLQ, yshl, Pw, [23]uint8{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}},
+ Optab{AROLW, yshl, Pe, [23]uint8{0xd1, 00, 0xc1, 00, 0xd3, 00, 0xd3, 00}},
+ Optab{ARORB, yshb, Pb, [23]uint8{0xd0, 01, 0xc0, 01, 0xd2, 01}},
+ Optab{ARORL, yshl, Px, [23]uint8{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}},
+ Optab{ARORQ, yshl, Pw, [23]uint8{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}},
+ Optab{ARORW, yshl, Pe, [23]uint8{0xd1, 01, 0xc1, 01, 0xd3, 01, 0xd3, 01}},
+ Optab{ARSQRTPS, yxm, Pm, [23]uint8{0x52}},
+ Optab{ARSQRTSS, yxm, Pf3, [23]uint8{0x52}},
+ Optab{ASAHF, ynone, Px, [23]uint8{0x86, 0xe0, 0x50, 0x9d}}, /* XCHGB AH,AL; PUSH AX; POPFL */
+ Optab{ASALB, yshb, Pb, [23]uint8{0xd0, 04, 0xc0, 04, 0xd2, 04}},
+ Optab{ASALL, yshl, Px, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASALQ, yshl, Pw, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASALW, yshl, Pe, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASARB, yshb, Pb, [23]uint8{0xd0, 07, 0xc0, 07, 0xd2, 07}},
+ Optab{ASARL, yshl, Px, [23]uint8{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}},
+ Optab{ASARQ, yshl, Pw, [23]uint8{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}},
+ Optab{ASARW, yshl, Pe, [23]uint8{0xd1, 07, 0xc1, 07, 0xd3, 07, 0xd3, 07}},
+ Optab{ASBBB, yxorb, Pb, [23]uint8{0x1c, 0x80, 03, 0x18, 0x1a}},
+ Optab{ASBBL, yxorl, Px, [23]uint8{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}},
+ Optab{ASBBQ, yxorl, Pw, [23]uint8{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}},
+ Optab{ASBBW, yxorl, Pe, [23]uint8{0x83, 03, 0x1d, 0x81, 03, 0x19, 0x1b}},
+ Optab{ASCASB, ynone, Pb, [23]uint8{0xae}},
+ Optab{ASCASL, ynone, Px, [23]uint8{0xaf}},
+ Optab{ASCASQ, ynone, Pw, [23]uint8{0xaf}},
+ Optab{ASCASW, ynone, Pe, [23]uint8{0xaf}},
+ Optab{ASETCC, yscond, Pb, [23]uint8{0x0f, 0x93, 00}},
+ Optab{ASETCS, yscond, Pb, [23]uint8{0x0f, 0x92, 00}},
+ Optab{ASETEQ, yscond, Pb, [23]uint8{0x0f, 0x94, 00}},
+ Optab{ASETGE, yscond, Pb, [23]uint8{0x0f, 0x9d, 00}},
+ Optab{ASETGT, yscond, Pb, [23]uint8{0x0f, 0x9f, 00}},
+ Optab{ASETHI, yscond, Pb, [23]uint8{0x0f, 0x97, 00}},
+ Optab{ASETLE, yscond, Pb, [23]uint8{0x0f, 0x9e, 00}},
+ Optab{ASETLS, yscond, Pb, [23]uint8{0x0f, 0x96, 00}},
+ Optab{ASETLT, yscond, Pb, [23]uint8{0x0f, 0x9c, 00}},
+ Optab{ASETMI, yscond, Pb, [23]uint8{0x0f, 0x98, 00}},
+ Optab{ASETNE, yscond, Pb, [23]uint8{0x0f, 0x95, 00}},
+ Optab{ASETOC, yscond, Pb, [23]uint8{0x0f, 0x91, 00}},
+ Optab{ASETOS, yscond, Pb, [23]uint8{0x0f, 0x90, 00}},
+ Optab{ASETPC, yscond, Pb, [23]uint8{0x0f, 0x9b, 00}},
+ Optab{ASETPL, yscond, Pb, [23]uint8{0x0f, 0x99, 00}},
+ Optab{ASETPS, yscond, Pb, [23]uint8{0x0f, 0x9a, 00}},
+ Optab{ASHLB, yshb, Pb, [23]uint8{0xd0, 04, 0xc0, 04, 0xd2, 04}},
+ Optab{ASHLL, yshl, Px, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASHLQ, yshl, Pw, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASHLW, yshl, Pe, [23]uint8{0xd1, 04, 0xc1, 04, 0xd3, 04, 0xd3, 04}},
+ Optab{ASHRB, yshb, Pb, [23]uint8{0xd0, 05, 0xc0, 05, 0xd2, 05}},
+ Optab{ASHRL, yshl, Px, [23]uint8{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}},
+ Optab{ASHRQ, yshl, Pw, [23]uint8{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}},
+ Optab{ASHRW, yshl, Pe, [23]uint8{0xd1, 05, 0xc1, 05, 0xd3, 05, 0xd3, 05}},
+ Optab{ASHUFPD, yxshuf, Pq, [23]uint8{0xc6, 00}},
+ Optab{ASHUFPS, yxshuf, Pm, [23]uint8{0xc6, 00}},
+ Optab{ASQRTPD, yxm, Pe, [23]uint8{0x51}},
+ Optab{ASQRTPS, yxm, Pm, [23]uint8{0x51}},
+ Optab{ASQRTSD, yxm, Pf2, [23]uint8{0x51}},
+ Optab{ASQRTSS, yxm, Pf3, [23]uint8{0x51}},
+ Optab{ASTC, ynone, Px, [23]uint8{0xf9}},
+ Optab{ASTD, ynone, Px, [23]uint8{0xfd}},
+ Optab{ASTI, ynone, Px, [23]uint8{0xfb}},
+ Optab{ASTMXCSR, ysvrs, Pm, [23]uint8{0xae, 03, 0xae, 03}},
+ Optab{ASTOSB, ynone, Pb, [23]uint8{0xaa}},
+ Optab{ASTOSL, ynone, Px, [23]uint8{0xab}},
+ Optab{ASTOSQ, ynone, Pw, [23]uint8{0xab}},
+ Optab{ASTOSW, ynone, Pe, [23]uint8{0xab}},
+ Optab{ASUBB, yxorb, Pb, [23]uint8{0x2c, 0x80, 05, 0x28, 0x2a}},
+ Optab{ASUBL, yaddl, Px, [23]uint8{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}},
+ Optab{ASUBPD, yxm, Pe, [23]uint8{0x5c}},
+ Optab{ASUBPS, yxm, Pm, [23]uint8{0x5c}},
+ Optab{ASUBQ, yaddl, Pw, [23]uint8{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}},
+ Optab{ASUBSD, yxm, Pf2, [23]uint8{0x5c}},
+ Optab{ASUBSS, yxm, Pf3, [23]uint8{0x5c}},
+ Optab{ASUBW, yaddl, Pe, [23]uint8{0x83, 05, 0x2d, 0x81, 05, 0x29, 0x2b}},
+ Optab{ASWAPGS, ynone, Pm, [23]uint8{0x01, 0xf8}},
+ Optab{ASYSCALL, ynone, Px, [23]uint8{0x0f, 0x05}}, /* fast syscall */
+ Optab{ATESTB, ytestb, Pb, [23]uint8{0xa8, 0xf6, 00, 0x84, 0x84}},
+ Optab{ATESTL, ytestl, Px, [23]uint8{0xa9, 0xf7, 00, 0x85, 0x85}},
+ Optab{ATESTQ, ytestl, Pw, [23]uint8{0xa9, 0xf7, 00, 0x85, 0x85}},
+ Optab{ATESTW, ytestl, Pe, [23]uint8{0xa9, 0xf7, 00, 0x85, 0x85}},
+ Optab{ATEXT, ytext, Px, [23]uint8{}},
+ Optab{AUCOMISD, yxcmp, Pe, [23]uint8{0x2e}},
+ Optab{AUCOMISS, yxcmp, Pm, [23]uint8{0x2e}},
+ Optab{AUNPCKHPD, yxm, Pe, [23]uint8{0x15}},
+ Optab{AUNPCKHPS, yxm, Pm, [23]uint8{0x15}},
+ Optab{AUNPCKLPD, yxm, Pe, [23]uint8{0x14}},
+ Optab{AUNPCKLPS, yxm, Pm, [23]uint8{0x14}},
+ Optab{AVERR, ydivl, Pm, [23]uint8{0x00, 04}},
+ Optab{AVERW, ydivl, Pm, [23]uint8{0x00, 05}},
+ Optab{AWAIT, ynone, Px, [23]uint8{0x9b}},
+ Optab{AWORD, ybyte, Px, [23]uint8{2}},
+ Optab{AXCHGB, yml_mb, Pb, [23]uint8{0x86, 0x86}},
+ Optab{AXCHGL, yxchg, Px, [23]uint8{0x90, 0x90, 0x87, 0x87}},
+ Optab{AXCHGQ, yxchg, Pw, [23]uint8{0x90, 0x90, 0x87, 0x87}},
+ Optab{AXCHGW, yxchg, Pe, [23]uint8{0x90, 0x90, 0x87, 0x87}},
+ Optab{AXLAT, ynone, Px, [23]uint8{0xd7}},
+ Optab{AXORB, yxorb, Pb, [23]uint8{0x34, 0x80, 06, 0x30, 0x32}},
+ Optab{AXORL, yxorl, Px, [23]uint8{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}},
+ Optab{AXORPD, yxm, Pe, [23]uint8{0x57}},
+ Optab{AXORPS, yxm, Pm, [23]uint8{0x57}},
+ Optab{AXORQ, yxorl, Pw, [23]uint8{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}},
+ Optab{AXORW, yxorl, Pe, [23]uint8{0x83, 06, 0x35, 0x81, 06, 0x31, 0x33}},
+ Optab{AFMOVB, yfmvx, Px, [23]uint8{0xdf, 04}},
+ Optab{AFMOVBP, yfmvp, Px, [23]uint8{0xdf, 06}},
+ Optab{AFMOVD, yfmvd, Px, [23]uint8{0xdd, 00, 0xdd, 02, 0xd9, 00, 0xdd, 02}},
+ Optab{AFMOVDP, yfmvdp, Px, [23]uint8{0xdd, 03, 0xdd, 03}},
+ Optab{AFMOVF, yfmvf, Px, [23]uint8{0xd9, 00, 0xd9, 02}},
+ Optab{AFMOVFP, yfmvp, Px, [23]uint8{0xd9, 03}},
+ Optab{AFMOVL, yfmvf, Px, [23]uint8{0xdb, 00, 0xdb, 02}},
+ Optab{AFMOVLP, yfmvp, Px, [23]uint8{0xdb, 03}},
+ Optab{AFMOVV, yfmvx, Px, [23]uint8{0xdf, 05}},
+ Optab{AFMOVVP, yfmvp, Px, [23]uint8{0xdf, 07}},
+ Optab{AFMOVW, yfmvf, Px, [23]uint8{0xdf, 00, 0xdf, 02}},
+ Optab{AFMOVWP, yfmvp, Px, [23]uint8{0xdf, 03}},
+ Optab{AFMOVX, yfmvx, Px, [23]uint8{0xdb, 05}},
+ Optab{AFMOVXP, yfmvp, Px, [23]uint8{0xdb, 07}},
+ Optab{AFCOMB, nil, 0, [23]uint8{}},
+ Optab{AFCOMBP, nil, 0, [23]uint8{}},
+ Optab{AFCOMD, yfadd, Px, [23]uint8{0xdc, 02, 0xd8, 02, 0xdc, 02}}, /* botch */
+ Optab{AFCOMDP, yfadd, Px, [23]uint8{0xdc, 03, 0xd8, 03, 0xdc, 03}}, /* botch */
+ Optab{AFCOMDPP, ycompp, Px, [23]uint8{0xde, 03}},
+ Optab{AFCOMF, yfmvx, Px, [23]uint8{0xd8, 02}},
+ Optab{AFCOMFP, yfmvx, Px, [23]uint8{0xd8, 03}},
+ Optab{AFCOML, yfmvx, Px, [23]uint8{0xda, 02}},
+ Optab{AFCOMLP, yfmvx, Px, [23]uint8{0xda, 03}},
+ Optab{AFCOMW, yfmvx, Px, [23]uint8{0xde, 02}},
+ Optab{AFCOMWP, yfmvx, Px, [23]uint8{0xde, 03}},
+ Optab{AFUCOM, ycompp, Px, [23]uint8{0xdd, 04}},
+ Optab{AFUCOMP, ycompp, Px, [23]uint8{0xdd, 05}},
+ Optab{AFUCOMPP, ycompp, Px, [23]uint8{0xda, 13}},
+ Optab{AFADDDP, yfaddp, Px, [23]uint8{0xde, 00}},
+ Optab{AFADDW, yfmvx, Px, [23]uint8{0xde, 00}},
+ Optab{AFADDL, yfmvx, Px, [23]uint8{0xda, 00}},
+ Optab{AFADDF, yfmvx, Px, [23]uint8{0xd8, 00}},
+ Optab{AFADDD, yfadd, Px, [23]uint8{0xdc, 00, 0xd8, 00, 0xdc, 00}},
+ Optab{AFMULDP, yfaddp, Px, [23]uint8{0xde, 01}},
+ Optab{AFMULW, yfmvx, Px, [23]uint8{0xde, 01}},
+ Optab{AFMULL, yfmvx, Px, [23]uint8{0xda, 01}},
+ Optab{AFMULF, yfmvx, Px, [23]uint8{0xd8, 01}},
+ Optab{AFMULD, yfadd, Px, [23]uint8{0xdc, 01, 0xd8, 01, 0xdc, 01}},
+ Optab{AFSUBDP, yfaddp, Px, [23]uint8{0xde, 05}},
+ Optab{AFSUBW, yfmvx, Px, [23]uint8{0xde, 04}},
+ Optab{AFSUBL, yfmvx, Px, [23]uint8{0xda, 04}},
+ Optab{AFSUBF, yfmvx, Px, [23]uint8{0xd8, 04}},
+ Optab{AFSUBD, yfadd, Px, [23]uint8{0xdc, 04, 0xd8, 04, 0xdc, 05}},
+ Optab{AFSUBRDP, yfaddp, Px, [23]uint8{0xde, 04}},
+ Optab{AFSUBRW, yfmvx, Px, [23]uint8{0xde, 05}},
+ Optab{AFSUBRL, yfmvx, Px, [23]uint8{0xda, 05}},
+ Optab{AFSUBRF, yfmvx, Px, [23]uint8{0xd8, 05}},
+ Optab{AFSUBRD, yfadd, Px, [23]uint8{0xdc, 05, 0xd8, 05, 0xdc, 04}},
+ Optab{AFDIVDP, yfaddp, Px, [23]uint8{0xde, 07}},
+ Optab{AFDIVW, yfmvx, Px, [23]uint8{0xde, 06}},
+ Optab{AFDIVL, yfmvx, Px, [23]uint8{0xda, 06}},
+ Optab{AFDIVF, yfmvx, Px, [23]uint8{0xd8, 06}},
+ Optab{AFDIVD, yfadd, Px, [23]uint8{0xdc, 06, 0xd8, 06, 0xdc, 07}},
+ Optab{AFDIVRDP, yfaddp, Px, [23]uint8{0xde, 06}},
+ Optab{AFDIVRW, yfmvx, Px, [23]uint8{0xde, 07}},
+ Optab{AFDIVRL, yfmvx, Px, [23]uint8{0xda, 07}},
+ Optab{AFDIVRF, yfmvx, Px, [23]uint8{0xd8, 07}},
+ Optab{AFDIVRD, yfadd, Px, [23]uint8{0xdc, 07, 0xd8, 07, 0xdc, 06}},
+ Optab{AFXCHD, yfxch, Px, [23]uint8{0xd9, 01, 0xd9, 01}},
+ Optab{AFFREE, nil, 0, [23]uint8{}},
+ Optab{AFLDCW, ystcw, Px, [23]uint8{0xd9, 05, 0xd9, 05}},
+ Optab{AFLDENV, ystcw, Px, [23]uint8{0xd9, 04, 0xd9, 04}},
+ Optab{AFRSTOR, ysvrs, Px, [23]uint8{0xdd, 04, 0xdd, 04}},
+ Optab{AFSAVE, ysvrs, Px, [23]uint8{0xdd, 06, 0xdd, 06}},
+ Optab{AFSTCW, ystcw, Px, [23]uint8{0xd9, 07, 0xd9, 07}},
+ Optab{AFSTENV, ystcw, Px, [23]uint8{0xd9, 06, 0xd9, 06}},
+ Optab{AFSTSW, ystsw, Px, [23]uint8{0xdd, 07, 0xdf, 0xe0}},
+ Optab{AF2XM1, ynone, Px, [23]uint8{0xd9, 0xf0}},
+ Optab{AFABS, ynone, Px, [23]uint8{0xd9, 0xe1}},
+ Optab{AFCHS, ynone, Px, [23]uint8{0xd9, 0xe0}},
+ Optab{AFCLEX, ynone, Px, [23]uint8{0xdb, 0xe2}},
+ Optab{AFCOS, ynone, Px, [23]uint8{0xd9, 0xff}},
+ Optab{AFDECSTP, ynone, Px, [23]uint8{0xd9, 0xf6}},
+ Optab{AFINCSTP, ynone, Px, [23]uint8{0xd9, 0xf7}},
+ Optab{AFINIT, ynone, Px, [23]uint8{0xdb, 0xe3}},
+ Optab{AFLD1, ynone, Px, [23]uint8{0xd9, 0xe8}},
+ Optab{AFLDL2E, ynone, Px, [23]uint8{0xd9, 0xea}},
+ Optab{AFLDL2T, ynone, Px, [23]uint8{0xd9, 0xe9}},
+ Optab{AFLDLG2, ynone, Px, [23]uint8{0xd9, 0xec}},
+ Optab{AFLDLN2, ynone, Px, [23]uint8{0xd9, 0xed}},
+ Optab{AFLDPI, ynone, Px, [23]uint8{0xd9, 0xeb}},
+ Optab{AFLDZ, ynone, Px, [23]uint8{0xd9, 0xee}},
+ Optab{AFNOP, ynone, Px, [23]uint8{0xd9, 0xd0}},
+ Optab{AFPATAN, ynone, Px, [23]uint8{0xd9, 0xf3}},
+ Optab{AFPREM, ynone, Px, [23]uint8{0xd9, 0xf8}},
+ Optab{AFPREM1, ynone, Px, [23]uint8{0xd9, 0xf5}},
+ Optab{AFPTAN, ynone, Px, [23]uint8{0xd9, 0xf2}},
+ Optab{AFRNDINT, ynone, Px, [23]uint8{0xd9, 0xfc}},
+ Optab{AFSCALE, ynone, Px, [23]uint8{0xd9, 0xfd}},
+ Optab{AFSIN, ynone, Px, [23]uint8{0xd9, 0xfe}},
+ Optab{AFSINCOS, ynone, Px, [23]uint8{0xd9, 0xfb}},
+ Optab{AFSQRT, ynone, Px, [23]uint8{0xd9, 0xfa}},
+ Optab{AFTST, ynone, Px, [23]uint8{0xd9, 0xe4}},
+ Optab{AFXAM, ynone, Px, [23]uint8{0xd9, 0xe5}},
+ Optab{AFXTRACT, ynone, Px, [23]uint8{0xd9, 0xf4}},
+ Optab{AFYL2X, ynone, Px, [23]uint8{0xd9, 0xf1}},
+ Optab{AFYL2XP1, ynone, Px, [23]uint8{0xd9, 0xf9}},
+ Optab{ACMPXCHGB, yrb_mb, Pb, [23]uint8{0x0f, 0xb0}},
+ Optab{ACMPXCHGL, yrl_ml, Px, [23]uint8{0x0f, 0xb1}},
+ Optab{ACMPXCHGW, yrl_ml, Pe, [23]uint8{0x0f, 0xb1}},
+ Optab{ACMPXCHGQ, yrl_ml, Pw, [23]uint8{0x0f, 0xb1}},
+ Optab{ACMPXCHG8B, yscond, Pm, [23]uint8{0xc7, 01}},
+ Optab{AINVD, ynone, Pm, [23]uint8{0x08}},
+ Optab{AINVLPG, ymbs, Pm, [23]uint8{0x01, 07}},
+ Optab{ALFENCE, ynone, Pm, [23]uint8{0xae, 0xe8}},
+ Optab{AMFENCE, ynone, Pm, [23]uint8{0xae, 0xf0}},
+ Optab{AMOVNTIL, yrl_ml, Pm, [23]uint8{0xc3}},
+ Optab{AMOVNTIQ, yrl_ml, Pw, [23]uint8{0x0f, 0xc3}},
+ Optab{ARDMSR, ynone, Pm, [23]uint8{0x32}},
+ Optab{ARDPMC, ynone, Pm, [23]uint8{0x33}},
+ Optab{ARDTSC, ynone, Pm, [23]uint8{0x31}},
+ Optab{ARSM, ynone, Pm, [23]uint8{0xaa}},
+ Optab{ASFENCE, ynone, Pm, [23]uint8{0xae, 0xf8}},
+ Optab{ASYSRET, ynone, Pm, [23]uint8{0x07}},
+ Optab{AWBINVD, ynone, Pm, [23]uint8{0x09}},
+ Optab{AWRMSR, ynone, Pm, [23]uint8{0x30}},
+ Optab{AXADDB, yrb_mb, Pb, [23]uint8{0x0f, 0xc0}},
+ Optab{AXADDL, yrl_ml, Px, [23]uint8{0x0f, 0xc1}},
+ Optab{AXADDQ, yrl_ml, Pw, [23]uint8{0x0f, 0xc1}},
+ Optab{AXADDW, yrl_ml, Pe, [23]uint8{0x0f, 0xc1}},
+ Optab{ACRC32B, ycrc32l, Px, [23]uint8{0xf2, 0x0f, 0x38, 0xf0, 0}},
+ Optab{ACRC32Q, ycrc32l, Pw, [23]uint8{0xf2, 0x0f, 0x38, 0xf1, 0}},
+ Optab{APREFETCHT0, yprefetch, Pm, [23]uint8{0x18, 01}},
+ Optab{APREFETCHT1, yprefetch, Pm, [23]uint8{0x18, 02}},
+ Optab{APREFETCHT2, yprefetch, Pm, [23]uint8{0x18, 03}},
+ Optab{APREFETCHNTA, yprefetch, Pm, [23]uint8{0x18, 00}},
+ Optab{AMOVQL, yrl_ml, Px, [23]uint8{0x89}},
+ Optab{AUNDEF, ynone, Px, [23]uint8{0x0f, 0x0b}},
+ Optab{AAESENC, yaes, Pq, [23]uint8{0x38, 0xdc, 0}},
+ Optab{AAESENCLAST, yaes, Pq, [23]uint8{0x38, 0xdd, 0}},
+ Optab{AAESDEC, yaes, Pq, [23]uint8{0x38, 0xde, 0}},
+ Optab{AAESDECLAST, yaes, Pq, [23]uint8{0x38, 0xdf, 0}},
+ Optab{AAESIMC, yaes, Pq, [23]uint8{0x38, 0xdb, 0}},
+ Optab{AAESKEYGENASSIST, yaes2, Pq, [23]uint8{0x3a, 0xdf, 0}},
+ Optab{APSHUFD, yaes2, Pq, [23]uint8{0x70, 0}},
+ Optab{APCLMULQDQ, yxshuf, Pq, [23]uint8{0x3a, 0x44, 0}},
+ Optab{AUSEFIELD, ynop, Px, [23]uint8{0, 0}},
+ Optab{ATYPE, nil, 0, [23]uint8{}},
+ Optab{AFUNCDATA, yfuncdata, Px, [23]uint8{0, 0}},
+ Optab{APCDATA, ypcdata, Px, [23]uint8{0, 0}},
+ Optab{ACHECKNIL, nil, 0, [23]uint8{}},
+ Optab{AVARDEF, nil, 0, [23]uint8{}},
+ Optab{AVARKILL, nil, 0, [23]uint8{}},
+ Optab{ADUFFCOPY, yduff, Px, [23]uint8{0xe8}},
+ Optab{ADUFFZERO, yduff, Px, [23]uint8{0xe8}},
+ Optab{AEND, nil, 0, [23]uint8{}},
+ Optab{0, nil, 0, [23]uint8{}},
+}
+
+var opindex [ALAST + 1]*Optab
+
+// isextern reports whether s describes an external symbol that must avoid pc-relative addressing.
+// This happens on systems like Solaris that call .so functions instead of system calls.
+// It does not seem to be necessary for any other systems. This is probably working
+// around a Solaris-specific bug that should be fixed differently, but we don't know
+// what that bug is. And this does fix it.
+func isextern(s *obj.LSym) int {
+
+ // All the Solaris dynamic imports from libc.so begin with "libc_".
+ return bool2int(strings.HasPrefix(s.Name, "libc_"))
+}
+
+// single-instruction no-ops of various lengths.
+// constructed by hand and disassembled with gdb to verify.
+// see http://www.agner.org/optimize/optimizing_assembly.pdf for discussion.
+var nop = [][16]uint8{
+ [16]uint8{0x90},
+ [16]uint8{0x66, 0x90},
+ [16]uint8{0x0F, 0x1F, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x40, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x44, 0x00, 0x00},
+ [16]uint8{0x66, 0x0F, 0x1F, 0x44, 0x00, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00},
+ [16]uint8{0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+ [16]uint8{0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+}
+
+// Native Client rejects the repeated 0x66 prefix.
+// {0x66, 0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
+func fillnop(p []byte, n int) {
+
+ var m int
+
+ for n > 0 {
+ m = n
+ if m > len(nop) {
+ m = len(nop)
+ }
+ copy(p[:m], nop[m-1][:m])
+ p = p[m:]
+ n -= m
+ }
+}
+
+func naclpad(ctxt *obj.Link, s *obj.LSym, c int32, pad int32) int32 {
+ obj.Symgrow(ctxt, s, int64(c)+int64(pad))
+ fillnop(s.P[c:], int(pad))
+ return c + pad
+}
+
+func spadjop(ctxt *obj.Link, p *obj.Prog, l int, q int) int {
+ if p.Mode != 64 || ctxt.Arch.Ptrsize == 4 {
+ return l
+ }
+ return q
+}
+
+func span6(ctxt *obj.Link, s *obj.LSym) {
+ var p *obj.Prog
+ var q *obj.Prog
+ var c int32
+ var v int32
+ var loop int32
+ var bp []byte
+ var n int
+ var m int
+ var i int
+
+ ctxt.Cursym = s
+
+ if s.P != nil {
+ return
+ }
+
+ if ycover[0] == 0 {
+ instinit()
+ }
+
+ for p = ctxt.Cursym.Text; p != nil; p = p.Link {
+ n = 0
+ if p.To.Type_ == D_BRANCH {
+ if p.Pcond == nil {
+ p.Pcond = p
+ }
+ }
+ q = p.Pcond
+ if q != nil {
+ if q.Back != 2 {
+ n = 1
+ }
+ }
+ p.Back = uint8(n)
+ if p.As == AADJSP {
+ p.To.Type_ = D_SP
+ v = int32(-p.From.Offset)
+ p.From.Offset = int64(v)
+ p.As = int16(spadjop(ctxt, p, AADDL, AADDQ))
+ if v < 0 {
+ p.As = int16(spadjop(ctxt, p, ASUBL, ASUBQ))
+ v = -v
+ p.From.Offset = int64(v)
+ }
+
+ if v == 0 {
+ p.As = ANOP
+ }
+ }
+ }
+
+ for p = s.Text; p != nil; p = p.Link {
+ p.Back = 2 // use short branches first time through
+ q = p.Pcond
+ if q != nil && (q.Back&2 != 0) {
+ p.Back |= 1 // backward jump
+ q.Back |= 4 // loop head
+ }
+
+ if p.As == AADJSP {
+ p.To.Type_ = D_SP
+ v = int32(-p.From.Offset)
+ p.From.Offset = int64(v)
+ p.As = int16(spadjop(ctxt, p, AADDL, AADDQ))
+ if v < 0 {
+ p.As = int16(spadjop(ctxt, p, ASUBL, ASUBQ))
+ v = -v
+ p.From.Offset = int64(v)
+ }
+
+ if v == 0 {
+ p.As = ANOP
+ }
+ }
+ }
+
+ n = 0
+ for {
+ loop = 0
+ for i = 0; i < len(s.R); i++ {
+ s.R[i] = obj.Reloc{}
+ }
+ s.R = s.R[:0]
+ s.P = s.P[:0]
+ c = 0
+ for p = s.Text; p != nil; p = p.Link {
+ if ctxt.Headtype == obj.Hnacl && p.Isize > 0 {
+ var deferreturn *obj.LSym
+
+ if deferreturn == nil {
+ deferreturn = obj.Linklookup(ctxt, "runtime.deferreturn", 0)
+ }
+
+ // pad everything to avoid crossing 32-byte boundary
+ if c>>5 != (c+int32(p.Isize)-1)>>5 {
+
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+
+ // pad call deferreturn to start at 32-byte boundary
+ // so that subtracting 5 in jmpdefer will jump back
+ // to that boundary and rerun the call.
+ if p.As == ACALL && p.To.Sym == deferreturn {
+
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+
+ // pad call to end at 32-byte boundary
+ if p.As == ACALL {
+
+ c = naclpad(ctxt, s, c, -(c+int32(p.Isize))&31)
+ }
+
+ // the linker treats REP and STOSQ as different instructions
+ // but in fact the REP is a prefix on the STOSQ.
+ // make sure REP has room for 2 more bytes, so that
+ // padding will not be inserted before the next instruction.
+ if (p.As == AREP || p.As == AREPN) && c>>5 != (c+3-1)>>5 {
+
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+
+ // same for LOCK.
+ // various instructions follow; the longest is 4 bytes.
+ // give ourselves 8 bytes so as to avoid surprises.
+ if p.As == ALOCK && c>>5 != (c+8-1)>>5 {
+
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+ }
+
+ if (p.Back&4 != 0) && c&(LoopAlign-1) != 0 {
+ // pad with NOPs
+ v = -c & (LoopAlign - 1)
+
+ if v <= MaxLoopPad {
+ obj.Symgrow(ctxt, s, int64(c)+int64(v))
+ fillnop(s.P[c:], int(v))
+ c += v
+ }
+ }
+
+ p.Pc = int64(c)
+
+ // process forward jumps to p
+ for q = p.Comefrom; q != nil; q = q.Forwd {
+
+ v = int32(p.Pc - (q.Pc + int64(q.Mark)))
+ if q.Back&2 != 0 { // short
+ if v > 127 {
+ loop++
+ q.Back ^= 2
+ }
+
+ if q.As == AJCXZL {
+ s.P[q.Pc+2] = byte(v)
+ } else {
+
+ s.P[q.Pc+1] = byte(v)
+ }
+ } else {
+
+ bp = s.P[q.Pc+int64(q.Mark)-4:]
+ bp[0] = byte(v)
+ bp = bp[1:]
+ bp[0] = byte(v >> 8)
+ bp = bp[1:]
+ bp[0] = byte(v >> 16)
+ bp = bp[1:]
+ bp[0] = byte(v >> 24)
+ }
+ }
+
+ p.Comefrom = nil
+
+ p.Pc = int64(c)
+ asmins(ctxt, p)
+ m = -cap(ctxt.Andptr) + cap(ctxt.And[:])
+ if int(p.Isize) != m {
+ p.Isize = uint8(m)
+ loop++
+ }
+
+ obj.Symgrow(ctxt, s, p.Pc+int64(m))
+ copy(s.P[p.Pc:][:m], ctxt.And[:m])
+ p.Mark = uint16(m)
+ c += int32(m)
+ }
+
+ n++
+ if n > 20 {
+ ctxt.Diag("span must be looping")
+ log.Fatalf("loop")
+ }
+ if !(loop != 0) {
+ break
+ }
+ }
+
+ if ctxt.Headtype == obj.Hnacl {
+ c = naclpad(ctxt, s, c, -c&31)
+ }
+
+ c += -c & (FuncAlign - 1)
+ s.Size = int64(c)
+
+ if false { /* debug['a'] > 1 */
+ fmt.Printf("span1 %s %d (%d tries)\n %.6x", s.Name, s.Size, n, 0)
+ for i = 0; i < len(s.P); i++ {
+ fmt.Printf(" %.2x", s.P[i])
+ if i%16 == 15 {
+ fmt.Printf("\n %.6x", uint(i+1))
+ }
+ }
+
+ if i%16 != 0 {
+ fmt.Printf("\n")
+ }
+
+ for i = 0; i < len(s.R); i++ {
+ var r *obj.Reloc
+
+ r = &s.R[i]
+ fmt.Printf(" rel %#.4x/%d %s%+d\n", uint32(r.Off), r.Siz, r.Sym.Name, r.Add)
+ }
+ }
+}
+
+func instinit() {
+ var c int
+ var i int
+
+ for i = 1; optab[i].as != 0; i++ {
+ c = int(optab[i].as)
+ if opindex[c] != nil {
+ log.Fatalf("phase error in optab: %d (%v)", i, Aconv(c))
+ }
+ opindex[c] = &optab[i]
+ }
+
+ for i = 0; i < Ymax; i++ {
+ ycover[i*Ymax+i] = 1
+ }
+
+ ycover[Yi0*Ymax+Yi8] = 1
+ ycover[Yi1*Ymax+Yi8] = 1
+
+ ycover[Yi0*Ymax+Ys32] = 1
+ ycover[Yi1*Ymax+Ys32] = 1
+ ycover[Yi8*Ymax+Ys32] = 1
+
+ ycover[Yi0*Ymax+Yi32] = 1
+ ycover[Yi1*Ymax+Yi32] = 1
+ ycover[Yi8*Ymax+Yi32] = 1
+ ycover[Ys32*Ymax+Yi32] = 1
+
+ ycover[Yi0*Ymax+Yi64] = 1
+ ycover[Yi1*Ymax+Yi64] = 1
+ ycover[Yi8*Ymax+Yi64] = 1
+ ycover[Ys32*Ymax+Yi64] = 1
+ ycover[Yi32*Ymax+Yi64] = 1
+
+ ycover[Yal*Ymax+Yrb] = 1
+ ycover[Ycl*Ymax+Yrb] = 1
+ ycover[Yax*Ymax+Yrb] = 1
+ ycover[Ycx*Ymax+Yrb] = 1
+ ycover[Yrx*Ymax+Yrb] = 1
+ ycover[Yrl*Ymax+Yrb] = 1
+
+ ycover[Ycl*Ymax+Ycx] = 1
+
+ ycover[Yax*Ymax+Yrx] = 1
+ ycover[Ycx*Ymax+Yrx] = 1
+
+ ycover[Yax*Ymax+Yrl] = 1
+ ycover[Ycx*Ymax+Yrl] = 1
+ ycover[Yrx*Ymax+Yrl] = 1
+
+ ycover[Yf0*Ymax+Yrf] = 1
+
+ ycover[Yal*Ymax+Ymb] = 1
+ ycover[Ycl*Ymax+Ymb] = 1
+ ycover[Yax*Ymax+Ymb] = 1
+ ycover[Ycx*Ymax+Ymb] = 1
+ ycover[Yrx*Ymax+Ymb] = 1
+ ycover[Yrb*Ymax+Ymb] = 1
+ ycover[Yrl*Ymax+Ymb] = 1
+ ycover[Ym*Ymax+Ymb] = 1
+
+ ycover[Yax*Ymax+Yml] = 1
+ ycover[Ycx*Ymax+Yml] = 1
+ ycover[Yrx*Ymax+Yml] = 1
+ ycover[Yrl*Ymax+Yml] = 1
+ ycover[Ym*Ymax+Yml] = 1
+
+ ycover[Yax*Ymax+Ymm] = 1
+ ycover[Ycx*Ymax+Ymm] = 1
+ ycover[Yrx*Ymax+Ymm] = 1
+ ycover[Yrl*Ymax+Ymm] = 1
+ ycover[Ym*Ymax+Ymm] = 1
+ ycover[Ymr*Ymax+Ymm] = 1
+
+ ycover[Ym*Ymax+Yxm] = 1
+ ycover[Yxr*Ymax+Yxm] = 1
+
+ for i = 0; i < D_NONE; i++ {
+ reg[i] = -1
+ if i >= D_AL && i <= D_R15B {
+ reg[i] = (i - D_AL) & 7
+ if i >= D_SPB && i <= D_DIB {
+ regrex[i] = 0x40
+ }
+ if i >= D_R8B && i <= D_R15B {
+ regrex[i] = Rxr | Rxx | Rxb
+ }
+ }
+
+ if i >= D_AH && i <= D_BH {
+ reg[i] = 4 + ((i - D_AH) & 7)
+ }
+ if i >= D_AX && i <= D_R15 {
+ reg[i] = (i - D_AX) & 7
+ if i >= D_R8 {
+ regrex[i] = Rxr | Rxx | Rxb
+ }
+ }
+
+ if i >= D_F0 && i <= D_F0+7 {
+ reg[i] = (i - D_F0) & 7
+ }
+ if i >= D_M0 && i <= D_M0+7 {
+ reg[i] = (i - D_M0) & 7
+ }
+ if i >= D_X0 && i <= D_X0+15 {
+ reg[i] = (i - D_X0) & 7
+ if i >= D_X0+8 {
+ regrex[i] = Rxr | Rxx | Rxb
+ }
+ }
+
+ if i >= D_CR+8 && i <= D_CR+15 {
+ regrex[i] = Rxr
+ }
+ }
+}
+
+func prefixof(ctxt *obj.Link, a *obj.Addr) int {
+ switch a.Type_ {
+ case D_INDIR + D_CS:
+ return 0x2e
+
+ case D_INDIR + D_DS:
+ return 0x3e
+
+ case D_INDIR + D_ES:
+ return 0x26
+
+ case D_INDIR + D_FS:
+ return 0x64
+
+ case D_INDIR + D_GS:
+ return 0x65
+
+ // NOTE: Systems listed here should be only systems that
+ // support direct TLS references like 8(TLS) implemented as
+ // direct references from FS or GS. Systems that require
+ // the initial-exec model, where you load the TLS base into
+ // a register and then index from that register, do not reach
+ // this code and should not be listed.
+ case D_INDIR + D_TLS:
+ switch ctxt.Headtype {
+
+ default:
+ log.Fatalf("unknown TLS base register for %s", obj.Headstr(ctxt.Headtype))
+
+ case obj.Hdragonfly,
+ obj.Hfreebsd,
+ obj.Hlinux,
+ obj.Hnetbsd,
+ obj.Hopenbsd,
+ obj.Hsolaris:
+ return 0x64 // FS
+
+ case obj.Hdarwin:
+ return 0x65 // GS
+ }
+ }
+
+ switch a.Index {
+ case D_CS:
+ return 0x2e
+
+ case D_DS:
+ return 0x3e
+
+ case D_ES:
+ return 0x26
+
+ case D_FS:
+ return 0x64
+
+ case D_GS:
+ return 0x65
+ }
+
+ return 0
+}
+
+func oclass(ctxt *obj.Link, a *obj.Addr) int {
+ var v int64
+ var l int32
+
+ if a.Type_ >= D_INDIR || a.Index != D_NONE {
+ if a.Index != D_NONE && a.Scale == 0 {
+ if a.Type_ == D_ADDR {
+ switch a.Index {
+ case D_EXTERN,
+ D_STATIC:
+ if a.Sym != nil && isextern(a.Sym) != 0 {
+ return Yi32
+ }
+ return Yiauto // use pc-relative addressing
+
+ case D_AUTO,
+ D_PARAM:
+ return Yiauto
+ }
+
+ return Yxxx
+ }
+
+ return Ycol
+ }
+
+ return Ym
+ }
+
+ switch a.Type_ {
+ case D_AL:
+ return Yal
+
+ case D_AX:
+ return Yax
+
+ /*
+ case D_SPB:
+ */
+ case D_BPB,
+ D_SIB,
+ D_DIB,
+ D_R8B,
+ D_R9B,
+ D_R10B,
+ D_R11B,
+ D_R12B,
+ D_R13B,
+ D_R14B,
+ D_R15B:
+ if ctxt.Asmode != 64 {
+
+ return Yxxx
+ }
+ fallthrough
+
+ case D_DL,
+ D_BL,
+ D_AH,
+ D_CH,
+ D_DH,
+ D_BH:
+ return Yrb
+
+ case D_CL:
+ return Ycl
+
+ case D_CX:
+ return Ycx
+
+ case D_DX,
+ D_BX:
+ return Yrx
+
+ case D_R8, /* not really Yrl */
+ D_R9,
+ D_R10,
+ D_R11,
+ D_R12,
+ D_R13,
+ D_R14,
+ D_R15:
+ if ctxt.Asmode != 64 {
+
+ return Yxxx
+ }
+ fallthrough
+
+ case D_SP,
+ D_BP,
+ D_SI,
+ D_DI:
+ return Yrl
+
+ case D_F0 + 0:
+ return Yf0
+
+ case D_F0 + 1,
+ D_F0 + 2,
+ D_F0 + 3,
+ D_F0 + 4,
+ D_F0 + 5,
+ D_F0 + 6,
+ D_F0 + 7:
+ return Yrf
+
+ case D_M0 + 0,
+ D_M0 + 1,
+ D_M0 + 2,
+ D_M0 + 3,
+ D_M0 + 4,
+ D_M0 + 5,
+ D_M0 + 6,
+ D_M0 + 7:
+ return Ymr
+
+ case D_X0 + 0,
+ D_X0 + 1,
+ D_X0 + 2,
+ D_X0 + 3,
+ D_X0 + 4,
+ D_X0 + 5,
+ D_X0 + 6,
+ D_X0 + 7,
+ D_X0 + 8,
+ D_X0 + 9,
+ D_X0 + 10,
+ D_X0 + 11,
+ D_X0 + 12,
+ D_X0 + 13,
+ D_X0 + 14,
+ D_X0 + 15:
+ return Yxr
+
+ case D_NONE:
+ return Ynone
+
+ case D_CS:
+ return Ycs
+ case D_SS:
+ return Yss
+ case D_DS:
+ return Yds
+ case D_ES:
+ return Yes
+ case D_FS:
+ return Yfs
+ case D_GS:
+ return Ygs
+ case D_TLS:
+ return Ytls
+
+ case D_GDTR:
+ return Ygdtr
+ case D_IDTR:
+ return Yidtr
+ case D_LDTR:
+ return Yldtr
+ case D_MSW:
+ return Ymsw
+ case D_TASK:
+ return Ytask
+
+ case D_CR + 0:
+ return Ycr0
+ case D_CR + 1:
+ return Ycr1
+ case D_CR + 2:
+ return Ycr2
+ case D_CR + 3:
+ return Ycr3
+ case D_CR + 4:
+ return Ycr4
+ case D_CR + 5:
+ return Ycr5
+ case D_CR + 6:
+ return Ycr6
+ case D_CR + 7:
+ return Ycr7
+ case D_CR + 8:
+ return Ycr8
+
+ case D_DR + 0:
+ return Ydr0
+ case D_DR + 1:
+ return Ydr1
+ case D_DR + 2:
+ return Ydr2
+ case D_DR + 3:
+ return Ydr3
+ case D_DR + 4:
+ return Ydr4
+ case D_DR + 5:
+ return Ydr5
+ case D_DR + 6:
+ return Ydr6
+ case D_DR + 7:
+ return Ydr7
+
+ case D_TR + 0:
+ return Ytr0
+ case D_TR + 1:
+ return Ytr1
+ case D_TR + 2:
+ return Ytr2
+ case D_TR + 3:
+ return Ytr3
+ case D_TR + 4:
+ return Ytr4
+ case D_TR + 5:
+ return Ytr5
+ case D_TR + 6:
+ return Ytr6
+ case D_TR + 7:
+ return Ytr7
+
+ case D_EXTERN,
+ D_STATIC,
+ D_AUTO,
+ D_PARAM:
+ return Ym
+
+ case D_CONST,
+ D_ADDR:
+ if a.Sym == nil {
+ v = a.Offset
+ if v == 0 {
+ return Yi0
+ }
+ if v == 1 {
+ return Yi1
+ }
+ if v >= -128 && v <= 127 {
+ return Yi8
+ }
+ l = int32(v)
+ if int64(l) == v {
+ return Ys32 /* can sign extend */
+ }
+ if v>>32 == 0 {
+ return Yi32 /* unsigned */
+ }
+ return Yi64
+ }
+
+ return Yi32
+
+ case D_BRANCH:
+ return Ybr
+ }
+
+ return Yxxx
+}
+
+func asmidx(ctxt *obj.Link, scale int, index int, base int) {
+ var i int
+
+ switch index {
+ default:
+ goto bad
+
+ case D_NONE:
+ i = 4 << 3
+ goto bas
+
+ case D_R8,
+ D_R9,
+ D_R10,
+ D_R11,
+ D_R12,
+ D_R13,
+ D_R14,
+ D_R15:
+ if ctxt.Asmode != 64 {
+ goto bad
+ }
+ fallthrough
+
+ case D_AX,
+ D_CX,
+ D_DX,
+ D_BX,
+ D_BP,
+ D_SI,
+ D_DI:
+ i = reg[index] << 3
+ break
+ }
+
+ switch scale {
+ default:
+ goto bad
+
+ case 1:
+ break
+
+ case 2:
+ i |= 1 << 6
+
+ case 4:
+ i |= 2 << 6
+
+ case 8:
+ i |= 3 << 6
+ break
+ }
+
+bas:
+ switch base {
+ default:
+ goto bad
+
+ case D_NONE: /* must be mod=00 */
+ i |= 5
+
+ case D_R8,
+ D_R9,
+ D_R10,
+ D_R11,
+ D_R12,
+ D_R13,
+ D_R14,
+ D_R15:
+ if ctxt.Asmode != 64 {
+ goto bad
+ }
+ fallthrough
+
+ case D_AX,
+ D_CX,
+ D_DX,
+ D_BX,
+ D_SP,
+ D_BP,
+ D_SI,
+ D_DI:
+ i |= reg[base]
+ break
+ }
+
+ ctxt.Andptr[0] = byte(i)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+
+bad:
+ ctxt.Diag("asmidx: bad address %d/%d/%d", scale, index, base)
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+}
+
+func put4(ctxt *obj.Link, v int32) {
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr[1] = byte(v >> 8)
+ ctxt.Andptr[2] = byte(v >> 16)
+ ctxt.Andptr[3] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[4:]
+}
+
+func relput4(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
+ var v int64
+ var rel obj.Reloc
+ var r *obj.Reloc
+
+ v = vaddr(ctxt, p, a, &rel)
+ if rel.Siz != 0 {
+ if rel.Siz != 4 {
+ ctxt.Diag("bad reloc")
+ }
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ put4(ctxt, int32(v))
+}
+
+func put8(ctxt *obj.Link, v int64) {
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr[1] = byte(v >> 8)
+ ctxt.Andptr[2] = byte(v >> 16)
+ ctxt.Andptr[3] = byte(v >> 24)
+ ctxt.Andptr[4] = byte(v >> 32)
+ ctxt.Andptr[5] = byte(v >> 40)
+ ctxt.Andptr[6] = byte(v >> 48)
+ ctxt.Andptr[7] = byte(v >> 56)
+ ctxt.Andptr = ctxt.Andptr[8:]
+}
+
+/*
+static void
+relput8(Prog *p, Addr *a)
+{
+ vlong v;
+ Reloc rel, *r;
+
+ v = vaddr(ctxt, p, a, &rel);
+ if(rel.siz != 0) {
+ r = addrel(ctxt->cursym);
+ *r = rel;
+ r->siz = 8;
+ r->off = p->pc + ctxt->andptr - ctxt->and;
+ }
+ put8(ctxt, v);
+}
+*/
+func vaddr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r *obj.Reloc) int64 {
+
+ var t int
+ var v int64
+ var s *obj.LSym
+
+ if r != nil {
+ *r = obj.Reloc{}
+ }
+
+ t = int(a.Type_)
+ v = a.Offset
+ if t == D_ADDR {
+ t = int(a.Index)
+ }
+ switch t {
+ case D_STATIC,
+ D_EXTERN:
+ s = a.Sym
+ if r == nil {
+ ctxt.Diag("need reloc for %v", Dconv(p, 0, a))
+ log.Fatalf("reloc")
+ }
+
+ if isextern(s) != 0 {
+ r.Siz = 4
+ r.Type_ = obj.R_ADDR
+ } else {
+
+ r.Siz = 4
+ r.Type_ = obj.R_PCREL
+ }
+
+ r.Off = -1 // caller must fill in
+ r.Sym = s
+ r.Add = v
+ v = 0
+ if s.Type_ == obj.STLSBSS {
+ r.Xadd = r.Add - int64(r.Siz)
+ r.Type_ = obj.R_TLS
+ r.Xsym = s
+ }
+
+ case D_INDIR + D_TLS:
+ if r == nil {
+ ctxt.Diag("need reloc for %v", Dconv(p, 0, a))
+ log.Fatalf("reloc")
+ }
+
+ r.Type_ = obj.R_TLS_LE
+ r.Siz = 4
+ r.Off = -1 // caller must fill in
+ r.Add = v
+ v = 0
+ break
+ }
+
+ return v
+}
+
+func asmandsz(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, r int, rex int, m64 int) {
+ var v int32
+ var t int
+ var scale int
+ var rel obj.Reloc
+
+ rex &= 0x40 | Rxr
+ v = int32(a.Offset)
+ t = int(a.Type_)
+ rel.Siz = 0
+ if a.Index != D_NONE && a.Index != D_TLS {
+ if t < D_INDIR {
+ switch t {
+ default:
+ goto bad
+
+ case D_EXTERN,
+ D_STATIC:
+ if !(isextern(a.Sym) != 0) {
+ goto bad
+ }
+ t = D_NONE
+ v = int32(vaddr(ctxt, p, a, &rel))
+
+ case D_AUTO,
+ D_PARAM:
+ t = D_SP
+ break
+ }
+ } else {
+
+ t -= D_INDIR
+ }
+ ctxt.Rexflag |= regrex[int(a.Index)]&Rxx | regrex[t]&Rxb | rex
+ if t == D_NONE {
+ ctxt.Andptr[0] = byte(0<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), t)
+ goto putrelv
+ }
+
+ if v == 0 && rel.Siz == 0 && t != D_BP && t != D_R13 {
+ ctxt.Andptr[0] = byte(0<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), t)
+ return
+ }
+
+ if v >= -128 && v < 128 && rel.Siz == 0 {
+ ctxt.Andptr[0] = byte(1<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), t)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+
+ ctxt.Andptr[0] = byte(2<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, int(a.Scale), int(a.Index), t)
+ goto putrelv
+ }
+
+ if t >= D_AL && t <= D_X0+15 {
+ if v != 0 {
+ goto bad
+ }
+ ctxt.Andptr[0] = byte(3<<6 | reg[t]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Rexflag |= regrex[t]&(0x40|Rxb) | rex
+ return
+ }
+
+ scale = int(a.Scale)
+ if t < D_INDIR {
+ switch a.Type_ {
+ default:
+ goto bad
+
+ case D_STATIC,
+ D_EXTERN:
+ t = D_NONE
+ v = int32(vaddr(ctxt, p, a, &rel))
+
+ case D_AUTO,
+ D_PARAM:
+ t = D_SP
+ break
+ }
+
+ scale = 1
+ } else {
+
+ t -= D_INDIR
+ }
+ if t == D_TLS {
+ v = int32(vaddr(ctxt, p, a, &rel))
+ }
+
+ ctxt.Rexflag |= regrex[t]&Rxb | rex
+ if t == D_NONE || (D_CS <= t && t <= D_GS) || t == D_TLS {
+ if (a.Sym == nil || !(isextern(a.Sym) != 0)) && t == D_NONE && (a.Type_ == D_STATIC || a.Type_ == D_EXTERN) || ctxt.Asmode != 64 {
+ ctxt.Andptr[0] = byte(0<<6 | 5<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ goto putrelv
+ }
+
+ /* temporary */
+ ctxt.Andptr[0] = byte(0<<6 | 4<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:] /* sib present */
+ ctxt.Andptr[0] = 0<<6 | 4<<3 | 5<<0
+ ctxt.Andptr = ctxt.Andptr[1:] /* DS:d32 */
+ goto putrelv
+ }
+
+ if t == D_SP || t == D_R12 {
+ if v == 0 {
+ ctxt.Andptr[0] = byte(0<<6 | reg[t]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, scale, D_NONE, t)
+ return
+ }
+
+ if v >= -128 && v < 128 {
+ ctxt.Andptr[0] = byte(1<<6 | reg[t]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, scale, D_NONE, t)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+
+ ctxt.Andptr[0] = byte(2<<6 | reg[t]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmidx(ctxt, scale, D_NONE, t)
+ goto putrelv
+ }
+
+ if t >= D_AX && t <= D_R15 {
+ if a.Index == D_TLS {
+ rel = obj.Reloc{}
+ rel.Type_ = obj.R_TLS_IE
+ rel.Siz = 4
+ rel.Sym = nil
+ rel.Add = int64(v)
+ v = 0
+ }
+
+ if v == 0 && rel.Siz == 0 && t != D_BP && t != D_R13 {
+ ctxt.Andptr[0] = byte(0<<6 | reg[t]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+ }
+
+ if v >= -128 && v < 128 && rel.Siz == 0 {
+ ctxt.Andptr[0] = byte(1<<6 | reg[t]<<0 | r<<3)
+ ctxt.Andptr[1] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[2:]
+ return
+ }
+
+ ctxt.Andptr[0] = byte(2<<6 | reg[t]<<0 | r<<3)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ goto putrelv
+ }
+
+ goto bad
+
+putrelv:
+ if rel.Siz != 0 {
+ var r *obj.Reloc
+
+ if rel.Siz != 4 {
+ ctxt.Diag("bad rel")
+ goto bad
+ }
+
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(ctxt.Curp.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ put4(ctxt, v)
+ return
+
+bad:
+ ctxt.Diag("asmand: bad address %v", Dconv(p, 0, a))
+ return
+}
+
+func asmand(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, ra *obj.Addr) {
+ asmandsz(ctxt, p, a, reg[ra.Type_], regrex[ra.Type_], 0)
+}
+
+func asmando(ctxt *obj.Link, p *obj.Prog, a *obj.Addr, o int) {
+ asmandsz(ctxt, p, a, o, 0, 0)
+}
+
+func bytereg(a *obj.Addr, t *uint8) {
+ if a.Index == D_NONE && (a.Type_ >= D_AX && a.Type_ <= D_R15) {
+ a.Type_ = D_AL + (a.Type_ - D_AX)
+ *t = 0
+ }
+}
+
+const (
+ E = 0xff
+)
+
+var ymovtab = []Movtab{
+ /* push */
+ Movtab{APUSHL, Ycs, Ynone, 0, [4]uint8{0x0e, E, 0, 0}},
+ Movtab{APUSHL, Yss, Ynone, 0, [4]uint8{0x16, E, 0, 0}},
+ Movtab{APUSHL, Yds, Ynone, 0, [4]uint8{0x1e, E, 0, 0}},
+ Movtab{APUSHL, Yes, Ynone, 0, [4]uint8{0x06, E, 0, 0}},
+ Movtab{APUSHL, Yfs, Ynone, 0, [4]uint8{0x0f, 0xa0, E, 0}},
+ Movtab{APUSHL, Ygs, Ynone, 0, [4]uint8{0x0f, 0xa8, E, 0}},
+ Movtab{APUSHQ, Yfs, Ynone, 0, [4]uint8{0x0f, 0xa0, E, 0}},
+ Movtab{APUSHQ, Ygs, Ynone, 0, [4]uint8{0x0f, 0xa8, E, 0}},
+ Movtab{APUSHW, Ycs, Ynone, 0, [4]uint8{Pe, 0x0e, E, 0}},
+ Movtab{APUSHW, Yss, Ynone, 0, [4]uint8{Pe, 0x16, E, 0}},
+ Movtab{APUSHW, Yds, Ynone, 0, [4]uint8{Pe, 0x1e, E, 0}},
+ Movtab{APUSHW, Yes, Ynone, 0, [4]uint8{Pe, 0x06, E, 0}},
+ Movtab{APUSHW, Yfs, Ynone, 0, [4]uint8{Pe, 0x0f, 0xa0, E}},
+ Movtab{APUSHW, Ygs, Ynone, 0, [4]uint8{Pe, 0x0f, 0xa8, E}},
+
+ /* pop */
+ Movtab{APOPL, Ynone, Yds, 0, [4]uint8{0x1f, E, 0, 0}},
+ Movtab{APOPL, Ynone, Yes, 0, [4]uint8{0x07, E, 0, 0}},
+ Movtab{APOPL, Ynone, Yss, 0, [4]uint8{0x17, E, 0, 0}},
+ Movtab{APOPL, Ynone, Yfs, 0, [4]uint8{0x0f, 0xa1, E, 0}},
+ Movtab{APOPL, Ynone, Ygs, 0, [4]uint8{0x0f, 0xa9, E, 0}},
+ Movtab{APOPQ, Ynone, Yfs, 0, [4]uint8{0x0f, 0xa1, E, 0}},
+ Movtab{APOPQ, Ynone, Ygs, 0, [4]uint8{0x0f, 0xa9, E, 0}},
+ Movtab{APOPW, Ynone, Yds, 0, [4]uint8{Pe, 0x1f, E, 0}},
+ Movtab{APOPW, Ynone, Yes, 0, [4]uint8{Pe, 0x07, E, 0}},
+ Movtab{APOPW, Ynone, Yss, 0, [4]uint8{Pe, 0x17, E, 0}},
+ Movtab{APOPW, Ynone, Yfs, 0, [4]uint8{Pe, 0x0f, 0xa1, E}},
+ Movtab{APOPW, Ynone, Ygs, 0, [4]uint8{Pe, 0x0f, 0xa9, E}},
+
+ /* mov seg */
+ Movtab{AMOVW, Yes, Yml, 1, [4]uint8{0x8c, 0, 0, 0}},
+ Movtab{AMOVW, Ycs, Yml, 1, [4]uint8{0x8c, 1, 0, 0}},
+ Movtab{AMOVW, Yss, Yml, 1, [4]uint8{0x8c, 2, 0, 0}},
+ Movtab{AMOVW, Yds, Yml, 1, [4]uint8{0x8c, 3, 0, 0}},
+ Movtab{AMOVW, Yfs, Yml, 1, [4]uint8{0x8c, 4, 0, 0}},
+ Movtab{AMOVW, Ygs, Yml, 1, [4]uint8{0x8c, 5, 0, 0}},
+ Movtab{AMOVW, Yml, Yes, 2, [4]uint8{0x8e, 0, 0, 0}},
+ Movtab{AMOVW, Yml, Ycs, 2, [4]uint8{0x8e, 1, 0, 0}},
+ Movtab{AMOVW, Yml, Yss, 2, [4]uint8{0x8e, 2, 0, 0}},
+ Movtab{AMOVW, Yml, Yds, 2, [4]uint8{0x8e, 3, 0, 0}},
+ Movtab{AMOVW, Yml, Yfs, 2, [4]uint8{0x8e, 4, 0, 0}},
+ Movtab{AMOVW, Yml, Ygs, 2, [4]uint8{0x8e, 5, 0, 0}},
+
+ /* mov cr */
+ Movtab{AMOVL, Ycr0, Yml, 3, [4]uint8{0x0f, 0x20, 0, 0}},
+ Movtab{AMOVL, Ycr2, Yml, 3, [4]uint8{0x0f, 0x20, 2, 0}},
+ Movtab{AMOVL, Ycr3, Yml, 3, [4]uint8{0x0f, 0x20, 3, 0}},
+ Movtab{AMOVL, Ycr4, Yml, 3, [4]uint8{0x0f, 0x20, 4, 0}},
+ Movtab{AMOVL, Ycr8, Yml, 3, [4]uint8{0x0f, 0x20, 8, 0}},
+ Movtab{AMOVQ, Ycr0, Yml, 3, [4]uint8{0x0f, 0x20, 0, 0}},
+ Movtab{AMOVQ, Ycr2, Yml, 3, [4]uint8{0x0f, 0x20, 2, 0}},
+ Movtab{AMOVQ, Ycr3, Yml, 3, [4]uint8{0x0f, 0x20, 3, 0}},
+ Movtab{AMOVQ, Ycr4, Yml, 3, [4]uint8{0x0f, 0x20, 4, 0}},
+ Movtab{AMOVQ, Ycr8, Yml, 3, [4]uint8{0x0f, 0x20, 8, 0}},
+ Movtab{AMOVL, Yml, Ycr0, 4, [4]uint8{0x0f, 0x22, 0, 0}},
+ Movtab{AMOVL, Yml, Ycr2, 4, [4]uint8{0x0f, 0x22, 2, 0}},
+ Movtab{AMOVL, Yml, Ycr3, 4, [4]uint8{0x0f, 0x22, 3, 0}},
+ Movtab{AMOVL, Yml, Ycr4, 4, [4]uint8{0x0f, 0x22, 4, 0}},
+ Movtab{AMOVL, Yml, Ycr8, 4, [4]uint8{0x0f, 0x22, 8, 0}},
+ Movtab{AMOVQ, Yml, Ycr0, 4, [4]uint8{0x0f, 0x22, 0, 0}},
+ Movtab{AMOVQ, Yml, Ycr2, 4, [4]uint8{0x0f, 0x22, 2, 0}},
+ Movtab{AMOVQ, Yml, Ycr3, 4, [4]uint8{0x0f, 0x22, 3, 0}},
+ Movtab{AMOVQ, Yml, Ycr4, 4, [4]uint8{0x0f, 0x22, 4, 0}},
+ Movtab{AMOVQ, Yml, Ycr8, 4, [4]uint8{0x0f, 0x22, 8, 0}},
+
+ /* mov dr */
+ Movtab{AMOVL, Ydr0, Yml, 3, [4]uint8{0x0f, 0x21, 0, 0}},
+ Movtab{AMOVL, Ydr6, Yml, 3, [4]uint8{0x0f, 0x21, 6, 0}},
+ Movtab{AMOVL, Ydr7, Yml, 3, [4]uint8{0x0f, 0x21, 7, 0}},
+ Movtab{AMOVQ, Ydr0, Yml, 3, [4]uint8{0x0f, 0x21, 0, 0}},
+ Movtab{AMOVQ, Ydr6, Yml, 3, [4]uint8{0x0f, 0x21, 6, 0}},
+ Movtab{AMOVQ, Ydr7, Yml, 3, [4]uint8{0x0f, 0x21, 7, 0}},
+ Movtab{AMOVL, Yml, Ydr0, 4, [4]uint8{0x0f, 0x23, 0, 0}},
+ Movtab{AMOVL, Yml, Ydr6, 4, [4]uint8{0x0f, 0x23, 6, 0}},
+ Movtab{AMOVL, Yml, Ydr7, 4, [4]uint8{0x0f, 0x23, 7, 0}},
+ Movtab{AMOVQ, Yml, Ydr0, 4, [4]uint8{0x0f, 0x23, 0, 0}},
+ Movtab{AMOVQ, Yml, Ydr6, 4, [4]uint8{0x0f, 0x23, 6, 0}},
+ Movtab{AMOVQ, Yml, Ydr7, 4, [4]uint8{0x0f, 0x23, 7, 0}},
+
+ /* mov tr */
+ Movtab{AMOVL, Ytr6, Yml, 3, [4]uint8{0x0f, 0x24, 6, 0}},
+ Movtab{AMOVL, Ytr7, Yml, 3, [4]uint8{0x0f, 0x24, 7, 0}},
+ Movtab{AMOVL, Yml, Ytr6, 4, [4]uint8{0x0f, 0x26, 6, E}},
+ Movtab{AMOVL, Yml, Ytr7, 4, [4]uint8{0x0f, 0x26, 7, E}},
+
+ /* lgdt, sgdt, lidt, sidt */
+ Movtab{AMOVL, Ym, Ygdtr, 4, [4]uint8{0x0f, 0x01, 2, 0}},
+ Movtab{AMOVL, Ygdtr, Ym, 3, [4]uint8{0x0f, 0x01, 0, 0}},
+ Movtab{AMOVL, Ym, Yidtr, 4, [4]uint8{0x0f, 0x01, 3, 0}},
+ Movtab{AMOVL, Yidtr, Ym, 3, [4]uint8{0x0f, 0x01, 1, 0}},
+ Movtab{AMOVQ, Ym, Ygdtr, 4, [4]uint8{0x0f, 0x01, 2, 0}},
+ Movtab{AMOVQ, Ygdtr, Ym, 3, [4]uint8{0x0f, 0x01, 0, 0}},
+ Movtab{AMOVQ, Ym, Yidtr, 4, [4]uint8{0x0f, 0x01, 3, 0}},
+ Movtab{AMOVQ, Yidtr, Ym, 3, [4]uint8{0x0f, 0x01, 1, 0}},
+
+ /* lldt, sldt */
+ Movtab{AMOVW, Yml, Yldtr, 4, [4]uint8{0x0f, 0x00, 2, 0}},
+ Movtab{AMOVW, Yldtr, Yml, 3, [4]uint8{0x0f, 0x00, 0, 0}},
+
+ /* lmsw, smsw */
+ Movtab{AMOVW, Yml, Ymsw, 4, [4]uint8{0x0f, 0x01, 6, 0}},
+ Movtab{AMOVW, Ymsw, Yml, 3, [4]uint8{0x0f, 0x01, 4, 0}},
+
+ /* ltr, str */
+ Movtab{AMOVW, Yml, Ytask, 4, [4]uint8{0x0f, 0x00, 3, 0}},
+ Movtab{AMOVW, Ytask, Yml, 3, [4]uint8{0x0f, 0x00, 1, 0}},
+
+ /* load full pointer */
+ Movtab{AMOVL, Yml, Ycol, 5, [4]uint8{0, 0, 0, 0}},
+ Movtab{AMOVW, Yml, Ycol, 5, [4]uint8{Pe, 0, 0, 0}},
+
+ /* double shift */
+ Movtab{ASHLL, Ycol, Yml, 6, [4]uint8{0xa4, 0xa5, 0, 0}},
+ Movtab{ASHRL, Ycol, Yml, 6, [4]uint8{0xac, 0xad, 0, 0}},
+ Movtab{ASHLQ, Ycol, Yml, 6, [4]uint8{Pw, 0xa4, 0xa5, 0}},
+ Movtab{ASHRQ, Ycol, Yml, 6, [4]uint8{Pw, 0xac, 0xad, 0}},
+ Movtab{ASHLW, Ycol, Yml, 6, [4]uint8{Pe, 0xa4, 0xa5, 0}},
+ Movtab{ASHRW, Ycol, Yml, 6, [4]uint8{Pe, 0xac, 0xad, 0}},
+
+ /* load TLS base */
+ Movtab{AMOVQ, Ytls, Yrl, 7, [4]uint8{0, 0, 0, 0}},
+ Movtab{0, 0, 0, 0, [4]uint8{}},
+}
+
+func isax(a *obj.Addr) int {
+ switch a.Type_ {
+ case D_AX,
+ D_AL,
+ D_AH,
+ D_INDIR + D_AX:
+ return 1
+ }
+
+ if a.Index == D_AX {
+ return 1
+ }
+ return 0
+}
+
+func subreg(p *obj.Prog, from int, to int) {
+ if false { /*debug['Q']*/
+ fmt.Printf("\n%v\ts/%v/%v/\n", p, Rconv(from), Rconv(to))
+ }
+
+ if int(p.From.Type_) == from {
+ p.From.Type_ = int16(to)
+ }
+ if int(p.To.Type_) == from {
+ p.To.Type_ = int16(to)
+ }
+
+ if int(p.From.Index) == from {
+ p.From.Index = uint8(to)
+ }
+ if int(p.To.Index) == from {
+ p.To.Index = uint8(to)
+ }
+
+ from += D_INDIR
+ if int(p.From.Type_) == from {
+ p.From.Type_ = int16(to + D_INDIR)
+ }
+ if int(p.To.Type_) == from {
+ p.To.Type_ = int16(to + D_INDIR)
+ }
+
+ if false { /*debug['Q']*/
+ fmt.Printf("%v\n", p)
+ }
+}
+
+func mediaop(ctxt *obj.Link, o *Optab, op int, osize int, z int) int {
+ switch op {
+ case Pm,
+ Pe,
+ Pf2,
+ Pf3:
+ if osize != 1 {
+ if op != Pm {
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+ z++
+ op = int(o.op[z])
+ break
+ }
+ fallthrough
+
+ default:
+ if -cap(ctxt.Andptr) == -cap(ctxt.And) || ctxt.And[-cap(ctxt.Andptr)+cap(ctxt.And[:])-1] != Pm {
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ break
+ }
+
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return z
+}
+
+func doasm(ctxt *obj.Link, p *obj.Prog) {
+ var o *Optab
+ var q *obj.Prog
+ var pp obj.Prog
+ var t []byte
+ var mo []Movtab
+ var z int
+ var op int
+ var ft int
+ var tt int
+ var xo int
+ var l int
+ var pre int
+ var v int64
+ var rel obj.Reloc
+ var r *obj.Reloc
+ var a *obj.Addr
+
+ ctxt.Curp = p // TODO
+
+ o = opindex[p.As]
+
+ if o == nil {
+ ctxt.Diag("asmins: missing op %v", p)
+ return
+ }
+
+ pre = prefixof(ctxt, &p.From)
+ if pre != 0 {
+ ctxt.Andptr[0] = byte(pre)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ pre = prefixof(ctxt, &p.To)
+ if pre != 0 {
+ ctxt.Andptr[0] = byte(pre)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ if p.Ft == 0 {
+ p.Ft = uint8(oclass(ctxt, &p.From))
+ }
+ if p.Tt == 0 {
+ p.Tt = uint8(oclass(ctxt, &p.To))
+ }
+
+ ft = int(p.Ft) * Ymax
+ tt = int(p.Tt) * Ymax
+
+ t = o.ytab
+ if t == nil {
+ ctxt.Diag("asmins: noproto %v", p)
+ return
+ }
+
+ xo = bool2int(o.op[0] == 0x0f)
+ for z = 0; t[0] != 0; (func() { z += int(t[3]) + xo; t = t[4:] })() {
+ if ycover[ft+int(t[0])] != 0 {
+ if ycover[tt+int(t[1])] != 0 {
+ goto found
+ }
+ }
+ }
+ goto domov
+
+found:
+ switch o.prefix {
+ case Pq: /* 16 bit escape and opcode escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pq3: /* 16 bit escape, Rex.w, and opcode escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = Pw
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pf2, /* xmm opcode escape */
+ Pf3:
+ ctxt.Andptr[0] = byte(o.prefix)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pm: /* opcode escape */
+ ctxt.Andptr[0] = Pm
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pe: /* 16 bit escape */
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Pw: /* 64-bit escape */
+ if p.Mode != 64 {
+
+ ctxt.Diag("asmins: illegal 64: %v", p)
+ }
+ ctxt.Rexflag |= Pw
+
+ case Pb: /* botch */
+ bytereg(&p.From, &p.Ft)
+
+ bytereg(&p.To, &p.Tt)
+
+ case P32: /* 32 bit but illegal if 64-bit mode */
+ if p.Mode == 64 {
+
+ ctxt.Diag("asmins: illegal in 64-bit mode: %v", p)
+ }
+
+ case Py: /* 64-bit only, no prefix */
+ if p.Mode != 64 {
+
+ ctxt.Diag("asmins: illegal in %d-bit mode: %v", p.Mode, p)
+ }
+ break
+ }
+
+ if z >= len(o.op) {
+ log.Fatalf("asmins bad table %v", p)
+ }
+ op = int(o.op[z])
+ if op == 0x0f {
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ z++
+ op = int(o.op[z])
+ }
+
+ switch t[2] {
+ default:
+ ctxt.Diag("asmins: unknown z %d %v", t[2], p)
+ return
+
+ case Zpseudo:
+ break
+
+ case Zlit:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if !(op != 0) {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case Zlitm_r:
+ for ; ; z++ {
+ op = int(o.op[z])
+ if !(op != 0) {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zmb_r:
+ bytereg(&p.From, &p.Ft)
+ fallthrough
+
+ /* fall through */
+ case Zm_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zm2_r:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zm_r_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zm_r_xm_nr:
+ ctxt.Rexflag = 0
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case Zm_r_i_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.From, &p.To)
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zm_r_3d:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.From, &p.To)
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibm_r:
+ for {
+ tmp1 := z
+ z++
+ op = int(o.op[tmp1])
+ if !(op != 0) {
+ break
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ asmand(ctxt, p, &p.From, &p.To)
+ ctxt.Andptr[0] = byte(p.To.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zaut_r:
+ ctxt.Andptr[0] = 0x8d
+ ctxt.Andptr = ctxt.Andptr[1:] /* leal */
+ if p.From.Type_ != D_ADDR {
+ ctxt.Diag("asmins: Zaut sb type ADDR")
+ }
+ p.From.Type_ = int16(p.From.Index)
+ p.From.Index = D_NONE
+ asmand(ctxt, p, &p.From, &p.To)
+ p.From.Index = uint8(p.From.Type_)
+ p.From.Type_ = D_ADDR
+
+ case Zm_o:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.From, int(o.op[z+1]))
+
+ case Zr_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, &p.From)
+
+ case Zr_m_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.To, &p.From)
+
+ case Zr_m_xm_nr:
+ ctxt.Rexflag = 0
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.To, &p.From)
+
+ case Zr_m_i_xm:
+ mediaop(ctxt, o, op, int(t[3]), z)
+ asmand(ctxt, p, &p.To, &p.From)
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+
+ case Zcallindreg:
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc)
+ r.Type_ = obj.R_CALLIND
+ r.Siz = 0
+ fallthrough
+
+ // fallthrough
+ case Zo_m64:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmandsz(ctxt, p, &p.To, int(o.op[z+1]), 0, 1)
+
+ case Zm_ibo:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.From, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.To, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zibo_m_xm:
+ z = mediaop(ctxt, o, op, int(t[3]), z)
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Z_ib,
+ Zib_:
+ if t[2] == Zib_ {
+ a = &p.From
+ } else {
+
+ a = &p.To
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, a, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zib_rp:
+ ctxt.Rexflag |= regrex[p.To.Type_] & (Rxb | 0x40)
+ ctxt.Andptr[0] = byte(op + reg[p.To.Type_])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zil_rp:
+ ctxt.Rexflag |= regrex[p.To.Type_] & Rxb
+ ctxt.Andptr[0] = byte(op + reg[p.To.Type_])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+
+ relput4(ctxt, p, &p.From)
+ }
+
+ case Zo_iw:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if p.From.Type_ != D_NONE {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case Ziq_rp:
+ v = vaddr(ctxt, p, &p.From, &rel)
+ l = int(v >> 32)
+ if l == 0 && rel.Siz != 8 {
+ //p->mark |= 0100;
+ //print("zero: %llux %P\n", v, p);
+ ctxt.Rexflag &^= (0x40 | Rxw)
+
+ ctxt.Rexflag |= regrex[p.To.Type_] & Rxb
+ ctxt.Andptr[0] = byte(0xb8 + reg[p.To.Type_])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if rel.Type_ != 0 {
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ put4(ctxt, int32(v))
+ } else if l == -1 && uint64(v)&(uint64(1)<<31) != 0 { /* sign extend */
+
+ //p->mark |= 0100;
+ //print("sign: %llux %P\n", v, p);
+ ctxt.Andptr[0] = 0xc7
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmando(ctxt, p, &p.To, 0)
+ put4(ctxt, int32(v)) /* need all 8 */
+ } else {
+
+ //print("all: %llux %P\n", v, p);
+ ctxt.Rexflag |= regrex[p.To.Type_] & Rxb
+
+ ctxt.Andptr[0] = byte(op + reg[p.To.Type_])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if rel.Type_ != 0 {
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ put8(ctxt, v)
+ }
+
+ case Zib_rr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, &p.To)
+ ctxt.Andptr[0] = byte(vaddr(ctxt, p, &p.From, nil))
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Z_il,
+ Zil_:
+ if t[2] == Zil_ {
+ a = &p.From
+ } else {
+
+ a = &p.To
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+
+ relput4(ctxt, p, a)
+ }
+
+ case Zm_ilo,
+ Zilo_m:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if t[2] == Zilo_m {
+ a = &p.From
+ asmando(ctxt, p, &p.To, int(o.op[z+1]))
+ } else {
+
+ a = &p.To
+ asmando(ctxt, p, &p.From, int(o.op[z+1]))
+ }
+
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, a, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+
+ relput4(ctxt, p, a)
+ }
+
+ case Zil_rr:
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, &p.To)
+ if o.prefix == Pe {
+ v = vaddr(ctxt, p, &p.From, nil)
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else {
+
+ relput4(ctxt, p, &p.From)
+ }
+
+ case Z_rp:
+ ctxt.Rexflag |= regrex[p.To.Type_] & (Rxb | 0x40)
+ ctxt.Andptr[0] = byte(op + reg[p.To.Type_])
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zrp_:
+ ctxt.Rexflag |= regrex[p.From.Type_] & (Rxb | 0x40)
+ ctxt.Andptr[0] = byte(op + reg[p.From.Type_])
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case Zclr:
+ ctxt.Rexflag &^= Pw
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &p.To, &p.To)
+
+ case Zcall:
+ if p.To.Sym == nil {
+ ctxt.Diag("call without target")
+ log.Fatalf("bad code")
+ }
+
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Sym = p.To.Sym
+ r.Add = p.To.Offset
+ r.Type_ = obj.R_CALL
+ r.Siz = 4
+ put4(ctxt, 0)
+
+ // TODO: jump across functions needs reloc
+ case Zbr,
+ Zjmp,
+ Zloop:
+ if p.To.Sym != nil {
+
+ if t[2] != Zjmp {
+ ctxt.Diag("branch to ATEXT")
+ log.Fatalf("bad code")
+ }
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ r.Sym = p.To.Sym
+ r.Type_ = obj.R_PCREL
+ r.Siz = 4
+ put4(ctxt, 0)
+ break
+ }
+
+ // Assumes q is in this function.
+ // TODO: Check in input, preserve in brchain.
+
+ // Fill in backward jump now.
+ q = p.Pcond
+
+ if q == nil {
+ ctxt.Diag("jmp/branch/loop without target")
+ log.Fatalf("bad code")
+ }
+
+ if p.Back&1 != 0 {
+ v = q.Pc - (p.Pc + 2)
+ if v >= -128 {
+ if p.As == AJCXZL {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if t[2] == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+
+ v -= 5 - 2
+ if t[2] == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ v--
+ }
+
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ break
+ }
+
+ // Annotate target; will fill in later.
+ p.Forwd = q.Comefrom
+
+ q.Comefrom = p
+ if p.Back&2 != 0 { // short
+ if p.As == AJCXZL {
+ ctxt.Andptr[0] = 0x67
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(op)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ } else if t[2] == Zloop {
+ ctxt.Diag("loop too far: %v", p)
+ } else {
+
+ if t[2] == Zbr {
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ ctxt.Andptr[0] = byte(o.op[z+1])
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ break
+
+ /*
+ v = q->pc - p->pc - 2;
+ if((v >= -128 && v <= 127) || p->pc == -1 || q->pc == -1) {
+ *ctxt->andptr++ = op;
+ *ctxt->andptr++ = v;
+ } else {
+ v -= 5-2;
+ if(t[2] == Zbr) {
+ *ctxt->andptr++ = 0x0f;
+ v--;
+ }
+ *ctxt->andptr++ = o->op[z+1];
+ *ctxt->andptr++ = v;
+ *ctxt->andptr++ = v>>8;
+ *ctxt->andptr++ = v>>16;
+ *ctxt->andptr++ = v>>24;
+ }
+ */
+
+ case Zbyte:
+ v = vaddr(ctxt, p, &p.From, &rel)
+ if rel.Siz != 0 {
+ rel.Siz = uint8(op)
+ r = obj.Addrel(ctxt.Cursym)
+ *r = rel
+ r.Off = int32(p.Pc + int64(-cap(ctxt.Andptr)+cap(ctxt.And[:])))
+ }
+
+ ctxt.Andptr[0] = byte(v)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 1 {
+ ctxt.Andptr[0] = byte(v >> 8)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 2 {
+ ctxt.Andptr[0] = byte(v >> 16)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 24)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ if op > 4 {
+ ctxt.Andptr[0] = byte(v >> 32)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 40)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 48)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(v >> 56)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ }
+ }
+
+ break
+ }
+
+ return
+
+domov:
+ for mo = ymovtab; mo[0].as != 0; mo = mo[1:] {
+ if p.As == mo[0].as {
+ if ycover[ft+int(mo[0].ft)] != 0 {
+ if ycover[tt+int(mo[0].tt)] != 0 {
+ t = mo[0].op[:]
+ goto mfound
+ }
+ }
+ }
+ }
+
+bad:
+ if p.Mode != 64 {
+ /*
+ * here, the assembly has failed.
+ * if its a byte instruction that has
+ * unaddressable registers, try to
+ * exchange registers and reissue the
+ * instruction with the operands renamed.
+ */
+ pp = *p
+
+ z = int(p.From.Type_)
+ if z >= D_BP && z <= D_DI {
+ if isax(&p.To) != 0 || p.To.Type_ == D_NONE {
+ // We certainly don't want to exchange
+ // with AX if the op is MUL or DIV.
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lhs,bx */
+ asmando(ctxt, p, &p.From, reg[D_BX])
+ subreg(&pp, z, D_BX)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lhs,bx */
+ asmando(ctxt, p, &p.From, reg[D_BX])
+ } else {
+
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lsh,ax */
+ subreg(&pp, z, D_AX)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg lsh,ax */
+ }
+
+ return
+ }
+
+ z = int(p.To.Type_)
+ if z >= D_BP && z <= D_DI {
+ if isax(&p.From) != 0 {
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rhs,bx */
+ asmando(ctxt, p, &p.To, reg[D_BX])
+ subreg(&pp, z, D_BX)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = 0x87
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rhs,bx */
+ asmando(ctxt, p, &p.To, reg[D_BX])
+ } else {
+
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rsh,ax */
+ subreg(&pp, z, D_AX)
+ doasm(ctxt, &pp)
+ ctxt.Andptr[0] = byte(0x90 + reg[z])
+ ctxt.Andptr = ctxt.Andptr[1:] /* xchg rsh,ax */
+ }
+
+ return
+ }
+ }
+
+ ctxt.Diag("doasm: notfound from=%x to=%x %v", uint16(p.From.Type_), uint16(p.To.Type_), p)
+ return
+
+mfound:
+ switch mo[0].code {
+ default:
+ ctxt.Diag("asmins: unknown mov %d %v", mo[0].code, p)
+
+ case 0: /* lit */
+ for z = 0; t[z] != E; z++ {
+
+ ctxt.Andptr[0] = t[z]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case 1: /* r,m */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmando(ctxt, p, &p.To, int(t[1]))
+
+ case 2: /* m,r */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ asmando(ctxt, p, &p.From, int(t[1]))
+
+ case 3: /* r,m - 2op */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[1]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.To, int(t[2]))
+ ctxt.Rexflag |= regrex[p.From.Type_] & (Rxr | 0x40)
+
+ case 4: /* m,r - 2op */
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = t[1]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmando(ctxt, p, &p.From, int(t[2]))
+ ctxt.Rexflag |= regrex[p.To.Type_] & (Rxr | 0x40)
+
+ case 5: /* load full pointer, trash heap */
+ if t[0] != 0 {
+
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ switch p.To.Index {
+ default:
+ goto bad
+
+ case D_DS:
+ ctxt.Andptr[0] = 0xc5
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case D_SS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb2
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case D_ES:
+ ctxt.Andptr[0] = 0xc4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case D_FS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb4
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case D_GS:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xb5
+ ctxt.Andptr = ctxt.Andptr[1:]
+ break
+ }
+
+ asmand(ctxt, p, &p.From, &p.To)
+
+ case 6: /* double shift */
+ if t[0] == Pw {
+
+ if p.Mode != 64 {
+ ctxt.Diag("asmins: illegal 64: %v", p)
+ }
+ ctxt.Rexflag |= Pw
+ t = t[1:]
+ } else if t[0] == Pe {
+ ctxt.Andptr[0] = Pe
+ ctxt.Andptr = ctxt.Andptr[1:]
+ t = t[1:]
+ }
+
+ z = int(p.From.Type_)
+ switch z {
+ default:
+ goto bad
+
+ case D_CONST:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[0]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0)
+ ctxt.Andptr[0] = byte(p.From.Offset)
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ case D_CL,
+ D_CX:
+ ctxt.Andptr[0] = 0x0f
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = t[1]
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmandsz(ctxt, p, &p.To, reg[int(p.From.Index)], regrex[int(p.From.Index)], 0)
+ break
+ }
+
+ // NOTE: The systems listed here are the ones that use the "TLS initial exec" model,
+ // where you load the TLS base register into a register and then index off that
+ // register to access the actual TLS variables. Systems that allow direct TLS access
+ // are handled in prefixof above and should not be listed here.
+ case 7: /* mov tls, r */
+ switch ctxt.Headtype {
+
+ default:
+ log.Fatalf("unknown TLS base location for %s", obj.Headstr(ctxt.Headtype))
+
+ case obj.Hplan9:
+ if ctxt.Plan9privates == nil {
+ ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
+ }
+ pp.From = obj.Addr{}
+ pp.From.Type_ = D_EXTERN
+ pp.From.Sym = ctxt.Plan9privates
+ pp.From.Offset = 0
+ pp.From.Index = D_NONE
+ ctxt.Rexflag |= Pw
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, &p.To)
+
+ // TLS base is 0(FS).
+ case obj.Hsolaris: // TODO(rsc): Delete Hsolaris from list. Should not use this code. See progedit in obj6.c.
+ pp.From = p.From
+
+ pp.From.Type_ = D_INDIR + D_NONE
+ pp.From.Offset = 0
+ pp.From.Index = D_NONE
+ pp.From.Scale = 0
+ ctxt.Rexflag |= Pw
+ ctxt.Andptr[0] = 0x64
+ ctxt.Andptr = ctxt.Andptr[1:] // FS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, &p.To)
+
+ // Windows TLS base is always 0x28(GS).
+ case obj.Hwindows:
+ pp.From = p.From
+
+ pp.From.Type_ = D_INDIR + D_GS
+ pp.From.Offset = 0x28
+ pp.From.Index = D_NONE
+ pp.From.Scale = 0
+ ctxt.Rexflag |= Pw
+ ctxt.Andptr[0] = 0x65
+ ctxt.Andptr = ctxt.Andptr[1:] // GS
+ ctxt.Andptr[0] = 0x8B
+ ctxt.Andptr = ctxt.Andptr[1:]
+ asmand(ctxt, p, &pp.From, &p.To)
+ break
+ }
+
+ break
+ }
+}
+
+var naclret = []uint8{
+ 0x5e, // POPL SI
+ // 0x8b, 0x7d, 0x00, // MOVL (BP), DI - catch return to invalid address, for debugging
+ 0x83,
+ 0xe6,
+ 0xe0, // ANDL $~31, SI
+ 0x4c,
+ 0x01,
+ 0xfe, // ADDQ R15, SI
+ 0xff,
+ 0xe6, // JMP SI
+}
+
+var naclspfix = []uint8{0x4c, 0x01, 0xfc} // ADDQ R15, SP
+
+var naclbpfix = []uint8{0x4c, 0x01, 0xfd} // ADDQ R15, BP
+
+var naclmovs = []uint8{
+ 0x89,
+ 0xf6, // MOVL SI, SI
+ 0x49,
+ 0x8d,
+ 0x34,
+ 0x37, // LEAQ (R15)(SI*1), SI
+ 0x89,
+ 0xff, // MOVL DI, DI
+ 0x49,
+ 0x8d,
+ 0x3c,
+ 0x3f, // LEAQ (R15)(DI*1), DI
+}
+
+var naclstos = []uint8{
+ 0x89,
+ 0xff, // MOVL DI, DI
+ 0x49,
+ 0x8d,
+ 0x3c,
+ 0x3f, // LEAQ (R15)(DI*1), DI
+}
+
+func nacltrunc(ctxt *obj.Link, reg int) {
+ if reg >= D_R8 {
+ ctxt.Andptr[0] = 0x45
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+ reg = (reg - D_AX) & 7
+ ctxt.Andptr[0] = 0x89
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(3<<6 | reg<<3 | reg)
+ ctxt.Andptr = ctxt.Andptr[1:]
+}
+
+func asmins(ctxt *obj.Link, p *obj.Prog) {
+ var i int
+ var n int
+ var np int
+ var c int
+ var and0 []byte
+ var r *obj.Reloc
+
+ ctxt.Andptr = ctxt.And[:]
+ ctxt.Asmode = int(p.Mode)
+
+ if p.As == AUSEFIELD {
+ r = obj.Addrel(ctxt.Cursym)
+ r.Off = 0
+ r.Siz = 0
+ r.Sym = p.From.Sym
+ r.Type_ = obj.R_USEFIELD
+ return
+ }
+
+ if ctxt.Headtype == obj.Hnacl {
+ if p.As == AREP {
+ ctxt.Rep++
+ return
+ }
+
+ if p.As == AREPN {
+ ctxt.Repn++
+ return
+ }
+
+ if p.As == ALOCK {
+ ctxt.Lock++
+ return
+ }
+
+ if p.As != ALEAQ && p.As != ALEAL {
+ if p.From.Index != D_NONE && p.From.Scale > 0 {
+ nacltrunc(ctxt, int(p.From.Index))
+ }
+ if p.To.Index != D_NONE && p.To.Scale > 0 {
+ nacltrunc(ctxt, int(p.To.Index))
+ }
+ }
+
+ switch p.As {
+ case ARET:
+ copy(ctxt.Andptr, naclret)
+ ctxt.Andptr = ctxt.Andptr[len(naclret):]
+ return
+
+ case ACALL,
+ AJMP:
+ if D_AX <= p.To.Type_ && p.To.Type_ <= D_DI {
+ // ANDL $~31, reg
+ ctxt.Andptr[0] = 0x83
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = byte(0xe0 | (p.To.Type_ - D_AX))
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xe0
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ // ADDQ R15, reg
+ ctxt.Andptr[0] = 0x4c
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = 0x01
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(0xf8 | (p.To.Type_ - D_AX))
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ if D_R8 <= p.To.Type_ && p.To.Type_ <= D_R15 {
+ // ANDL $~31, reg
+ ctxt.Andptr[0] = 0x41
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = 0x83
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(0xe0 | (p.To.Type_ - D_R8))
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = 0xe0
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ // ADDQ R15, reg
+ ctxt.Andptr[0] = 0x4d
+ ctxt.Andptr = ctxt.Andptr[1:]
+
+ ctxt.Andptr[0] = 0x01
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Andptr[0] = byte(0xf8 | (p.To.Type_ - D_R8))
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ case AINT:
+ ctxt.Andptr[0] = 0xf4
+ ctxt.Andptr = ctxt.Andptr[1:]
+ return
+
+ case ASCASB,
+ ASCASW,
+ ASCASL,
+ ASCASQ,
+ ASTOSB,
+ ASTOSW,
+ ASTOSL,
+ ASTOSQ:
+ copy(ctxt.Andptr, naclstos)
+ ctxt.Andptr = ctxt.Andptr[len(naclstos):]
+
+ case AMOVSB,
+ AMOVSW,
+ AMOVSL,
+ AMOVSQ:
+ copy(ctxt.Andptr, naclmovs)
+ ctxt.Andptr = ctxt.Andptr[len(naclmovs):]
+ break
+ }
+
+ if ctxt.Rep != 0 {
+ ctxt.Andptr[0] = 0xf3
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Rep = 0
+ }
+
+ if ctxt.Repn != 0 {
+ ctxt.Andptr[0] = 0xf2
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Repn = 0
+ }
+
+ if ctxt.Lock != 0 {
+ ctxt.Andptr[0] = 0xf0
+ ctxt.Andptr = ctxt.Andptr[1:]
+ ctxt.Lock = 0
+ }
+ }
+
+ ctxt.Rexflag = 0
+ and0 = ctxt.Andptr
+ ctxt.Asmode = int(p.Mode)
+ doasm(ctxt, p)
+ if ctxt.Rexflag != 0 {
+ /*
+ * as befits the whole approach of the architecture,
+ * the rex prefix must appear before the first opcode byte
+ * (and thus after any 66/67/f2/f3/26/2e/3e prefix bytes, but
+ * before the 0f opcode escape!), or it might be ignored.
+ * note that the handbook often misleadingly shows 66/f2/f3 in `opcode'.
+ */
+ if p.Mode != 64 {
+
+ ctxt.Diag("asmins: illegal in mode %d: %v", p.Mode, p)
+ }
+ n = -cap(ctxt.Andptr) + cap(and0)
+ for np = 0; np < n; np++ {
+ c = int(and0[np])
+ if c != 0xf2 && c != 0xf3 && (c < 0x64 || c > 0x67) && c != 0x2e && c != 0x3e && c != 0x26 {
+ break
+ }
+ }
+
+ copy(and0[np+1:], and0[np:n])
+ and0[np] = byte(0x40 | ctxt.Rexflag)
+ ctxt.Andptr = ctxt.Andptr[1:]
+ }
+
+ n = -cap(ctxt.Andptr) + cap(ctxt.And[:])
+ for i = len(ctxt.Cursym.R) - 1; i >= 0; i-- {
+ r = &ctxt.Cursym.R[i:][0]
+ if int64(r.Off) < p.Pc {
+ break
+ }
+ if ctxt.Rexflag != 0 {
+ r.Off++
+ }
+ if r.Type_ == obj.R_PCREL || r.Type_ == obj.R_CALL {
+ r.Add -= p.Pc + int64(n) - (int64(r.Off) + int64(r.Siz))
+ }
+ }
+
+ if ctxt.Headtype == obj.Hnacl && p.As != ACMPL && p.As != ACMPQ {
+ switch p.To.Type_ {
+ case D_SP:
+ copy(ctxt.Andptr, naclspfix)
+ ctxt.Andptr = ctxt.Andptr[len(naclspfix):]
+
+ case D_BP:
+ copy(ctxt.Andptr, naclbpfix)
+ ctxt.Andptr = ctxt.Andptr[len(naclbpfix):]
+ break
+ }
+ }
+}
--- /dev/null
+// Inferno utils/6c/list.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/list.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package x86
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+//
+// Format conversions
+// %A int Opcodes (instruction mnemonics)
+//
+// %D Addr* Addresses (instruction operands)
+// Flags: "%lD": seperate the high and low words of a constant by "-"
+//
+// %P Prog* Instructions
+//
+// %R int Registers
+//
+// %$ char* String constant addresses (for internal use only)
+
+const (
+ STRINGSZ = 1000
+)
+
+var bigP *obj.Prog
+
+func Pconv(p *obj.Prog) string {
+ var str string
+ var fp string
+
+ switch p.As {
+ case ADATA:
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v/%d,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), p.From.Scale, Dconv(p, 0, &p.To))
+
+ case ATEXT:
+ if p.From.Scale != 0 {
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%d,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), p.From.Scale, Dconv(p, fmtLong, &p.To))
+ break
+ }
+
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), Dconv(p, fmtLong, &p.To))
+
+ default:
+ str = fmt.Sprintf("%.5d (%v)\t%v\t%v,%v", p.Pc, p.Line(), Aconv(int(p.As)), Dconv(p, 0, &p.From), Dconv(p, 0, &p.To))
+ break
+ }
+
+ fp += str
+ return fp
+}
+
+func Aconv(i int) string {
+ var fp string
+
+ fp += anames6[i]
+ return fp
+}
+
+func Dconv(p *obj.Prog, flag int, a *obj.Addr) string {
+ var str string
+ var s string
+ var fp string
+
+ var i int
+
+ i = int(a.Type_)
+
+ if flag&fmtLong != 0 /*untyped*/ {
+ if i == D_CONST {
+ str = fmt.Sprintf("$%d-%d", a.Offset&0xffffffff, a.Offset>>32)
+ } else {
+
+ // ATEXT dst is not constant
+ str = fmt.Sprintf("!!%v", Dconv(p, 0, a))
+ }
+
+ goto brk
+ }
+
+ if i >= D_INDIR {
+ if a.Offset != 0 {
+ str = fmt.Sprintf("%d(%v)", a.Offset, Rconv(i-D_INDIR))
+ } else {
+
+ str = fmt.Sprintf("(%v)", Rconv(i-D_INDIR))
+ }
+ goto brk
+ }
+
+ switch i {
+ default:
+ if a.Offset != 0 {
+ str = fmt.Sprintf("$%d,%v", a.Offset, Rconv(i))
+ } else {
+
+ str = fmt.Sprintf("%v", Rconv(i))
+ }
+
+ case D_NONE:
+ str = ""
+
+ case D_BRANCH:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s(SB)", a.Sym.Name)
+ } else if p != nil && p.Pcond != nil {
+ str = fmt.Sprintf("%d", p.Pcond.Pc)
+ } else if a.U.Branch != nil {
+ str = fmt.Sprintf("%d", a.U.Branch.Pc)
+ } else {
+
+ str = fmt.Sprintf("%d(PC)", a.Offset)
+ }
+
+ case D_EXTERN:
+ str = fmt.Sprintf("%s+%d(SB)", a.Sym.Name, a.Offset)
+
+ case D_STATIC:
+ str = fmt.Sprintf("%s<>+%d(SB)", a.Sym.Name, a.Offset)
+
+ case D_AUTO:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s+%d(SP)", a.Sym.Name, a.Offset)
+ } else {
+
+ str = fmt.Sprintf("%d(SP)", a.Offset)
+ }
+
+ case D_PARAM:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s+%d(FP)", a.Sym.Name, a.Offset)
+ } else {
+
+ str = fmt.Sprintf("%d(FP)", a.Offset)
+ }
+
+ case D_CONST:
+ str = fmt.Sprintf("$%d", a.Offset)
+
+ case D_FCONST:
+ str = fmt.Sprintf("$(%.17g)", a.U.Dval)
+
+ case D_SCONST:
+ str = fmt.Sprintf("$\"%q\"", a.U.Sval)
+
+ case D_ADDR:
+ a.Type_ = int16(a.Index)
+ a.Index = D_NONE
+ str = fmt.Sprintf("$%v", Dconv(p, 0, a))
+ a.Index = uint8(a.Type_)
+ a.Type_ = D_ADDR
+ goto conv
+ }
+
+brk:
+ if a.Index != D_NONE {
+ s = fmt.Sprintf("(%v*%d)", Rconv(int(a.Index)), int(a.Scale))
+ str += s
+ }
+
+conv:
+ fp += str
+ return fp
+}
+
+var regstr = []string{
+ "AL", /* [D_AL] */
+ "CL",
+ "DL",
+ "BL",
+ "SPB",
+ "BPB",
+ "SIB",
+ "DIB",
+ "R8B",
+ "R9B",
+ "R10B",
+ "R11B",
+ "R12B",
+ "R13B",
+ "R14B",
+ "R15B",
+ "AX", /* [D_AX] */
+ "CX",
+ "DX",
+ "BX",
+ "SP",
+ "BP",
+ "SI",
+ "DI",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "AH",
+ "CH",
+ "DH",
+ "BH",
+ "F0", /* [D_F0] */
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "M0",
+ "M1",
+ "M2",
+ "M3",
+ "M4",
+ "M5",
+ "M6",
+ "M7",
+ "X0",
+ "X1",
+ "X2",
+ "X3",
+ "X4",
+ "X5",
+ "X6",
+ "X7",
+ "X8",
+ "X9",
+ "X10",
+ "X11",
+ "X12",
+ "X13",
+ "X14",
+ "X15",
+ "CS", /* [D_CS] */
+ "SS",
+ "DS",
+ "ES",
+ "FS",
+ "GS",
+ "GDTR", /* [D_GDTR] */
+ "IDTR", /* [D_IDTR] */
+ "LDTR", /* [D_LDTR] */
+ "MSW", /* [D_MSW] */
+ "TASK", /* [D_TASK] */
+ "CR0", /* [D_CR] */
+ "CR1",
+ "CR2",
+ "CR3",
+ "CR4",
+ "CR5",
+ "CR6",
+ "CR7",
+ "CR8",
+ "CR9",
+ "CR10",
+ "CR11",
+ "CR12",
+ "CR13",
+ "CR14",
+ "CR15",
+ "DR0", /* [D_DR] */
+ "DR1",
+ "DR2",
+ "DR3",
+ "DR4",
+ "DR5",
+ "DR6",
+ "DR7",
+ "TR0", /* [D_TR] */
+ "TR1",
+ "TR2",
+ "TR3",
+ "TR4",
+ "TR5",
+ "TR6",
+ "TR7",
+ "TLS", /* [D_TLS] */
+ "NONE", /* [D_NONE] */
+}
+
+func Rconv(r int) string {
+ var str string
+ var fp string
+
+ if r >= D_AL && r <= D_NONE {
+ str = fmt.Sprintf("%s", regstr[r-D_AL])
+ } else {
+
+ str = fmt.Sprintf("gok(%d)", r)
+ }
+
+ fp += str
+ return fp
+}
--- /dev/null
+// Inferno utils/6l/pass.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/pass.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package x86
+
+import (
+ "cmd/internal/obj"
+ "encoding/binary"
+ "fmt"
+ "log"
+ "math"
+)
+
+var zprg = obj.Prog{
+ Back: 2,
+ As: AGOK,
+ From: obj.Addr{
+ Type_: D_NONE,
+ Index: D_NONE,
+ },
+ To: obj.Addr{
+ Type_: D_NONE,
+ Index: D_NONE,
+ },
+}
+
+func nopout(p *obj.Prog) {
+ p.As = ANOP
+ p.From.Type_ = D_NONE
+ p.To.Type_ = D_NONE
+}
+
+func symtype(a *obj.Addr) int {
+ var t int
+
+ t = int(a.Type_)
+ if t == D_ADDR {
+ t = int(a.Index)
+ }
+ return t
+}
+
+func isdata(p *obj.Prog) bool {
+ return p.As == ADATA || p.As == AGLOBL
+}
+
+func iscall(p *obj.Prog) bool {
+ return p.As == ACALL
+}
+
+func datasize(p *obj.Prog) int {
+ return int(p.From.Scale)
+}
+
+func textflag(p *obj.Prog) int {
+ return int(p.From.Scale)
+}
+
+func settextflag(p *obj.Prog, f int) {
+ p.From.Scale = int8(f)
+}
+
+func canuselocaltls(ctxt *obj.Link) int {
+ switch ctxt.Headtype {
+ case obj.Hplan9,
+ obj.Hwindows:
+ return 0
+ }
+
+ return 1
+}
+
+func progedit(ctxt *obj.Link, p *obj.Prog) {
+ var literal string
+ var s *obj.LSym
+ var q *obj.Prog
+
+ // Thread-local storage references use the TLS pseudo-register.
+ // As a register, TLS refers to the thread-local storage base, and it
+ // can only be loaded into another register:
+ //
+ // MOVQ TLS, AX
+ //
+ // An offset from the thread-local storage base is written off(reg)(TLS*1).
+ // Semantically it is off(reg), but the (TLS*1) annotation marks this as
+ // indexing from the loaded TLS base. This emits a relocation so that
+ // if the linker needs to adjust the offset, it can. For example:
+ //
+ // MOVQ TLS, AX
+ // MOVQ 8(AX)(TLS*1), CX // load m into CX
+ //
+ // On systems that support direct access to the TLS memory, this
+ // pair of instructions can be reduced to a direct TLS memory reference:
+ //
+ // MOVQ 8(TLS), CX // load m into CX
+ //
+ // The 2-instruction and 1-instruction forms correspond roughly to
+ // ELF TLS initial exec mode and ELF TLS local exec mode, respectively.
+ //
+ // We applies this rewrite on systems that support the 1-instruction form.
+ // The decision is made using only the operating system (and probably
+ // the -shared flag, eventually), not the link mode. If some link modes
+ // on a particular operating system require the 2-instruction form,
+ // then all builds for that operating system will use the 2-instruction
+ // form, so that the link mode decision can be delayed to link time.
+ //
+ // In this way, all supported systems use identical instructions to
+ // access TLS, and they are rewritten appropriately first here in
+ // liblink and then finally using relocations in the linker.
+
+ if canuselocaltls(ctxt) != 0 {
+
+ // Reduce TLS initial exec model to TLS local exec model.
+ // Sequences like
+ // MOVQ TLS, BX
+ // ... off(BX)(TLS*1) ...
+ // become
+ // NOP
+ // ... off(TLS) ...
+ //
+ // TODO(rsc): Remove the Hsolaris special case. It exists only to
+ // guarantee we are producing byte-identical binaries as before this code.
+ // But it should be unnecessary.
+ if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type_ == D_TLS && D_AX <= p.To.Type_ && p.To.Type_ <= D_R15 && ctxt.Headtype != obj.Hsolaris {
+
+ nopout(p)
+ }
+ if p.From.Index == D_TLS && D_INDIR+D_AX <= p.From.Type_ && p.From.Type_ <= D_INDIR+D_R15 {
+ p.From.Type_ = D_INDIR + D_TLS
+ p.From.Scale = 0
+ p.From.Index = D_NONE
+ }
+
+ if p.To.Index == D_TLS && D_INDIR+D_AX <= p.To.Type_ && p.To.Type_ <= D_INDIR+D_R15 {
+ p.To.Type_ = D_INDIR + D_TLS
+ p.To.Scale = 0
+ p.To.Index = D_NONE
+ }
+ } else {
+
+ // As a courtesy to the C compilers, rewrite TLS local exec load as TLS initial exec load.
+ // The instruction
+ // MOVQ off(TLS), BX
+ // becomes the sequence
+ // MOVQ TLS, BX
+ // MOVQ off(BX)(TLS*1), BX
+ // This allows the C compilers to emit references to m and g using the direct off(TLS) form.
+ if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type_ == D_INDIR+D_TLS && D_AX <= p.To.Type_ && p.To.Type_ <= D_R15 {
+
+ q = obj.Appendp(ctxt, p)
+ q.As = p.As
+ q.From = p.From
+ q.From.Type_ = D_INDIR + p.To.Type_
+ q.From.Index = D_TLS
+ q.From.Scale = 2 // TODO: use 1
+ q.To = p.To
+ p.From.Type_ = D_TLS
+ p.From.Index = D_NONE
+ p.From.Offset = 0
+ }
+ }
+
+ // TODO: Remove.
+ if ctxt.Headtype == obj.Hwindows || ctxt.Headtype == obj.Hplan9 {
+
+ if p.From.Scale == 1 && p.From.Index == D_TLS {
+ p.From.Scale = 2
+ }
+ if p.To.Scale == 1 && p.To.Index == D_TLS {
+ p.To.Scale = 2
+ }
+ }
+
+ if ctxt.Headtype == obj.Hnacl {
+ nacladdr(ctxt, p, &p.From)
+ nacladdr(ctxt, p, &p.To)
+ }
+
+ // Maintain information about code generation mode.
+ if ctxt.Mode == 0 {
+
+ ctxt.Mode = 64
+ }
+ p.Mode = int8(ctxt.Mode)
+
+ switch p.As {
+ case AMODE:
+ if p.From.Type_ == D_CONST || p.From.Type_ == D_INDIR+D_NONE {
+ switch int(p.From.Offset) {
+ case 16,
+ 32,
+ 64:
+ ctxt.Mode = int(p.From.Offset)
+ break
+ }
+ }
+
+ nopout(p)
+ break
+ }
+
+ // Rewrite CALL/JMP/RET to symbol as D_BRANCH.
+ switch p.As {
+
+ case ACALL,
+ AJMP,
+ ARET:
+ if (p.To.Type_ == D_EXTERN || p.To.Type_ == D_STATIC) && p.To.Sym != nil {
+ p.To.Type_ = D_BRANCH
+ }
+ break
+ }
+
+ // Rewrite float constants to values stored in memory.
+ switch p.As {
+
+ // Convert AMOVSS $(0), Xx to AXORPS Xx, Xx
+ case AMOVSS:
+ if p.From.Type_ == D_FCONST {
+
+ if p.From.U.Dval == 0 {
+ if p.To.Type_ >= D_X0 {
+ if p.To.Type_ <= D_X15 {
+ p.As = AXORPS
+ p.From.Type_ = p.To.Type_
+ p.From.Index = p.To.Index
+ break
+ }
+ }
+ }
+ }
+ fallthrough
+
+ // fallthrough
+
+ case AFMOVF,
+ AFADDF,
+ AFSUBF,
+ AFSUBRF,
+ AFMULF,
+ AFDIVF,
+ AFDIVRF,
+ AFCOMF,
+ AFCOMFP,
+ AADDSS,
+ ASUBSS,
+ AMULSS,
+ ADIVSS,
+ ACOMISS,
+ AUCOMISS:
+ if p.From.Type_ == D_FCONST {
+
+ var i32 uint32
+ var f32 float32
+ f32 = float32(p.From.U.Dval)
+ i32 = math.Float32bits(f32)
+ literal = fmt.Sprintf("$f32.%08x", i32)
+ s = obj.Linklookup(ctxt, literal, 0)
+ if s.Type_ == 0 {
+ s.Type_ = obj.SRODATA
+ obj.Adduint32(ctxt, s, i32)
+ s.Reachable = 0
+ }
+
+ p.From.Type_ = D_EXTERN
+ p.From.Sym = s
+ p.From.Offset = 0
+ }
+
+ // Convert AMOVSD $(0), Xx to AXORPS Xx, Xx
+ case AMOVSD:
+ if p.From.Type_ == D_FCONST {
+
+ if p.From.U.Dval == 0 {
+ if p.To.Type_ >= D_X0 {
+ if p.To.Type_ <= D_X15 {
+ p.As = AXORPS
+ p.From.Type_ = p.To.Type_
+ p.From.Index = p.To.Index
+ break
+ }
+ }
+ }
+ }
+ fallthrough
+
+ // fallthrough
+ case AFMOVD,
+ AFADDD,
+ AFSUBD,
+ AFSUBRD,
+ AFMULD,
+ AFDIVD,
+ AFDIVRD,
+ AFCOMD,
+ AFCOMDP,
+ AADDSD,
+ ASUBSD,
+ AMULSD,
+ ADIVSD,
+ ACOMISD,
+ AUCOMISD:
+ if p.From.Type_ == D_FCONST {
+
+ var i64 uint64
+ i64 = math.Float64bits(p.From.U.Dval)
+ literal = fmt.Sprintf("$f64.%016x", i64)
+ s = obj.Linklookup(ctxt, literal, 0)
+ if s.Type_ == 0 {
+ s.Type_ = obj.SRODATA
+ obj.Adduint64(ctxt, s, i64)
+ s.Reachable = 0
+ }
+
+ p.From.Type_ = D_EXTERN
+ p.From.Sym = s
+ p.From.Offset = 0
+ }
+
+ break
+ }
+}
+
+func nacladdr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
+ if p.As == ALEAL || p.As == ALEAQ {
+ return
+ }
+
+ if a.Type_ == D_BP || a.Type_ == D_INDIR+D_BP {
+ ctxt.Diag("invalid address: %v", p)
+ return
+ }
+
+ if a.Type_ == D_INDIR+D_TLS {
+ a.Type_ = D_INDIR + D_BP
+ } else if a.Type_ == D_TLS {
+ a.Type_ = D_BP
+ }
+ if D_INDIR <= a.Type_ && a.Type_ <= D_INDIR+D_INDIR {
+ switch a.Type_ {
+ // all ok
+ case D_INDIR + D_BP,
+ D_INDIR + D_SP,
+ D_INDIR + D_R15:
+ break
+
+ default:
+ if a.Index != D_NONE {
+ ctxt.Diag("invalid address %v", p)
+ }
+ a.Index = uint8(a.Type_ - D_INDIR)
+ if a.Index != D_NONE {
+ a.Scale = 1
+ }
+ a.Type_ = D_INDIR + D_R15
+ break
+ }
+ }
+}
+
+func parsetextconst(arg int64, textstksiz *int64, textarg *int64) {
+ *textstksiz = arg & 0xffffffff
+ if *textstksiz&0x80000000 != 0 {
+ *textstksiz = -(-*textstksiz & 0xffffffff)
+ }
+
+ *textarg = (arg >> 32) & 0xffffffff
+ if *textarg&0x80000000 != 0 {
+ *textarg = 0
+ }
+ *textarg = (*textarg + 7) &^ 7
+}
+
+func addstacksplit(ctxt *obj.Link, cursym *obj.LSym) {
+ var p *obj.Prog
+ var q *obj.Prog
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var autoffset int32
+ var deltasp int32
+ var a int
+ var pcsize int
+ var textstksiz int64
+ var textarg int64
+
+ if ctxt.Tlsg == nil {
+ ctxt.Tlsg = obj.Linklookup(ctxt, "runtime.tlsg", 0)
+ }
+ if ctxt.Symmorestack[0] == nil {
+ ctxt.Symmorestack[0] = obj.Linklookup(ctxt, "runtime.morestack", 0)
+ ctxt.Symmorestack[1] = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
+ }
+
+ if ctxt.Headtype == obj.Hplan9 && ctxt.Plan9privates == nil {
+ ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
+ }
+
+ ctxt.Cursym = cursym
+
+ if cursym.Text == nil || cursym.Text.Link == nil {
+ return
+ }
+
+ p = cursym.Text
+ parsetextconst(p.To.Offset, &textstksiz, &textarg)
+ autoffset = int32(textstksiz)
+ if autoffset < 0 {
+ autoffset = 0
+ }
+
+ cursym.Args = int32(p.To.Offset >> 32)
+ cursym.Locals = int32(textstksiz)
+
+ if autoffset < obj.StackSmall && !(p.From.Scale&obj.NOSPLIT != 0) {
+ for q = p; q != nil; q = q.Link {
+ if q.As == ACALL {
+ goto noleaf
+ }
+ if (q.As == ADUFFCOPY || q.As == ADUFFZERO) && autoffset >= obj.StackSmall-8 {
+ goto noleaf
+ }
+ }
+
+ p.From.Scale |= obj.NOSPLIT
+ noleaf:
+ }
+
+ q = nil
+ if !(p.From.Scale&obj.NOSPLIT != 0) || (p.From.Scale&obj.WRAPPER != 0) {
+ p = obj.Appendp(ctxt, p)
+ p = load_g_cx(ctxt, p) // load g into CX
+ }
+
+ if !(cursym.Text.From.Scale&obj.NOSPLIT != 0) {
+ p = stacksplit(ctxt, p, autoffset, int32(textarg), bool2int(!(cursym.Text.From.Scale&obj.NEEDCTXT != 0)), &q) // emit split check
+ }
+
+ if autoffset != 0 {
+
+ if autoffset%int32(ctxt.Arch.Regsize) != 0 {
+ ctxt.Diag("unaligned stack size %d", autoffset)
+ }
+ p = obj.Appendp(ctxt, p)
+ p.As = AADJSP
+ p.From.Type_ = D_CONST
+ p.From.Offset = int64(autoffset)
+ p.Spadj = autoffset
+ } else {
+
+ // zero-byte stack adjustment.
+ // Insert a fake non-zero adjustment so that stkcheck can
+ // recognize the end of the stack-splitting prolog.
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ANOP
+ p.Spadj = int32(-ctxt.Arch.Ptrsize)
+ p = obj.Appendp(ctxt, p)
+ p.As = ANOP
+ p.Spadj = int32(ctxt.Arch.Ptrsize)
+ }
+
+ if q != nil {
+ q.Pcond = p
+ }
+ deltasp = autoffset
+
+ if cursym.Text.From.Scale&obj.WRAPPER != 0 {
+ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
+ //
+ // MOVQ g_panic(CX), BX
+ // TESTQ BX, BX
+ // JEQ end
+ // LEAQ (autoffset+8)(SP), DI
+ // CMPQ panic_argp(BX), DI
+ // JNE end
+ // MOVQ SP, panic_argp(BX)
+ // end:
+ // NOP
+ //
+ // The NOP is needed to give the jumps somewhere to land.
+ // It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes.
+
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVQ
+ p.From.Type_ = D_INDIR + D_CX
+ p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
+ p.To.Type_ = D_BX
+ if ctxt.Headtype == obj.Hnacl {
+ p.As = AMOVL
+ p.From.Type_ = D_INDIR + D_R15
+ p.From.Scale = 1
+ p.From.Index = D_CX
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ATESTQ
+ p.From.Type_ = D_BX
+ p.To.Type_ = D_BX
+ if ctxt.Headtype == obj.Hnacl {
+ p.As = ATESTL
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJEQ
+ p.To.Type_ = D_BRANCH
+ p1 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ALEAQ
+ p.From.Type_ = D_INDIR + D_SP
+ p.From.Offset = int64(autoffset) + 8
+ p.To.Type_ = D_DI
+ if ctxt.Headtype == obj.Hnacl {
+ p.As = ALEAL
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPQ
+ p.From.Type_ = D_INDIR + D_BX
+ p.From.Offset = 0 // Panic.argp
+ p.To.Type_ = D_DI
+ if ctxt.Headtype == obj.Hnacl {
+ p.As = ACMPL
+ p.From.Type_ = D_INDIR + D_R15
+ p.From.Scale = 1
+ p.From.Index = D_BX
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJNE
+ p.To.Type_ = D_BRANCH
+ p2 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVQ
+ p.From.Type_ = D_SP
+ p.To.Type_ = D_INDIR + D_BX
+ p.To.Offset = 0 // Panic.argp
+ if ctxt.Headtype == obj.Hnacl {
+ p.As = AMOVL
+ p.To.Type_ = D_INDIR + D_R15
+ p.To.Scale = 1
+ p.To.Index = D_BX
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ANOP
+ p1.Pcond = p
+ p2.Pcond = p
+ }
+
+ if ctxt.Debugzerostack != 0 && autoffset != 0 && !(cursym.Text.From.Scale&obj.NOSPLIT != 0) {
+ // 6l -Z means zero the stack frame on entry.
+ // This slows down function calls but can help avoid
+ // false positives in garbage collection.
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVQ
+ p.From.Type_ = D_SP
+ p.To.Type_ = D_DI
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVQ
+ p.From.Type_ = D_CONST
+ p.From.Offset = int64(autoffset) / 8
+ p.To.Type_ = D_CX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVQ
+ p.From.Type_ = D_CONST
+ p.From.Offset = 0
+ p.To.Type_ = D_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AREP
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ASTOSQ
+ }
+
+ for ; p != nil; p = p.Link {
+ pcsize = int(p.Mode) / 8
+ a = int(p.From.Type_)
+ if a == D_AUTO {
+ p.From.Offset += int64(deltasp)
+ }
+ if a == D_PARAM {
+ p.From.Offset += int64(deltasp) + int64(pcsize)
+ }
+ a = int(p.To.Type_)
+ if a == D_AUTO {
+ p.To.Offset += int64(deltasp)
+ }
+ if a == D_PARAM {
+ p.To.Offset += int64(deltasp) + int64(pcsize)
+ }
+
+ switch p.As {
+ default:
+ continue
+
+ case APUSHL,
+ APUSHFL:
+ deltasp += 4
+ p.Spadj = 4
+ continue
+
+ case APUSHQ,
+ APUSHFQ:
+ deltasp += 8
+ p.Spadj = 8
+ continue
+
+ case APUSHW,
+ APUSHFW:
+ deltasp += 2
+ p.Spadj = 2
+ continue
+
+ case APOPL,
+ APOPFL:
+ deltasp -= 4
+ p.Spadj = -4
+ continue
+
+ case APOPQ,
+ APOPFQ:
+ deltasp -= 8
+ p.Spadj = -8
+ continue
+
+ case APOPW,
+ APOPFW:
+ deltasp -= 2
+ p.Spadj = -2
+ continue
+
+ case ARET:
+ break
+ }
+
+ if autoffset != deltasp {
+ ctxt.Diag("unbalanced PUSH/POP")
+ }
+
+ if autoffset != 0 {
+ p.As = AADJSP
+ p.From.Type_ = D_CONST
+ p.From.Offset = int64(-autoffset)
+ p.Spadj = -autoffset
+ p = obj.Appendp(ctxt, p)
+ p.As = ARET
+
+ // If there are instructions following
+ // this ARET, they come from a branch
+ // with the same stackframe, so undo
+ // the cleanup.
+ p.Spadj = +autoffset
+ }
+
+ if p.To.Sym != nil { // retjmp
+ p.As = AJMP
+ }
+ }
+}
+
+func indir_cx(ctxt *obj.Link, a *obj.Addr) {
+ if ctxt.Headtype == obj.Hnacl {
+ a.Type_ = D_INDIR + D_R15
+ a.Index = D_CX
+ a.Scale = 1
+ return
+ }
+
+ a.Type_ = D_INDIR + D_CX
+}
+
+// Append code to p to load g into cx.
+// Overwrites p with the first instruction (no first appendp).
+// Overwriting p is unusual but it lets use this in both the
+// prologue (caller must call appendp first) and in the epilogue.
+// Returns last new instruction.
+func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
+
+ var next *obj.Prog
+
+ p.As = AMOVQ
+ if ctxt.Arch.Ptrsize == 4 {
+ p.As = AMOVL
+ }
+ p.From.Type_ = D_INDIR + D_TLS
+ p.From.Offset = 0
+ p.To.Type_ = D_CX
+
+ next = p.Link
+ progedit(ctxt, p)
+ for p.Link != next {
+ p = p.Link
+ }
+
+ if p.From.Index == D_TLS {
+ p.From.Scale = 2
+ }
+
+ return p
+}
+
+// Append code to p to check for stack split.
+// Appends to (does not overwrite) p.
+// Assumes g is in CX.
+// Returns last new instruction.
+// On return, *jmpok is the instruction that should jump
+// to the stack frame allocation if no split is needed.
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32, noctxt int, jmpok **obj.Prog) *obj.Prog {
+
+ var q *obj.Prog
+ var q1 *obj.Prog
+ var cmp int
+ var lea int
+ var mov int
+ var sub int
+
+ cmp = ACMPQ
+ lea = ALEAQ
+ mov = AMOVQ
+ sub = ASUBQ
+
+ if ctxt.Headtype == obj.Hnacl {
+ cmp = ACMPL
+ lea = ALEAL
+ mov = AMOVL
+ sub = ASUBL
+ }
+
+ q1 = nil
+ if framesize <= obj.StackSmall {
+ // small stack: SP <= stackguard
+ // CMPQ SP, stackguard
+ p = obj.Appendp(ctxt, p)
+
+ p.As = int16(cmp)
+ p.From.Type_ = D_SP
+ indir_cx(ctxt, &p.To)
+ p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ } else if framesize <= obj.StackBig {
+ // large stack: SP-framesize <= stackguard-StackSmall
+ // LEAQ -xxx(SP), AX
+ // CMPQ AX, stackguard
+ p = obj.Appendp(ctxt, p)
+
+ p.As = int16(lea)
+ p.From.Type_ = D_INDIR + D_SP
+ p.From.Offset = -(int64(framesize) - obj.StackSmall)
+ p.To.Type_ = D_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(cmp)
+ p.From.Type_ = D_AX
+ indir_cx(ctxt, &p.To)
+ p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ } else {
+
+ // Such a large stack we need to protect against wraparound.
+ // If SP is close to zero:
+ // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
+ // The +StackGuard on both sides is required to keep the left side positive:
+ // SP is allowed to be slightly below stackguard. See stack.h.
+ //
+ // Preemption sets stackguard to StackPreempt, a very large value.
+ // That breaks the math above, so we have to check for that explicitly.
+ // MOVQ stackguard, CX
+ // CMPQ CX, $StackPreempt
+ // JEQ label-of-call-to-morestack
+ // LEAQ StackGuard(SP), AX
+ // SUBQ CX, AX
+ // CMPQ AX, $(framesize+(StackGuard-StackSmall))
+
+ p = obj.Appendp(ctxt, p)
+
+ p.As = int16(mov)
+ indir_cx(ctxt, &p.From)
+ p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ p.To.Type_ = D_SI
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(cmp)
+ p.From.Type_ = D_SI
+ p.To.Type_ = D_CONST
+ p.To.Offset = obj.StackPreempt
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJEQ
+ p.To.Type_ = D_BRANCH
+ q1 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(lea)
+ p.From.Type_ = D_INDIR + D_SP
+ p.From.Offset = obj.StackGuard
+ p.To.Type_ = D_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(sub)
+ p.From.Type_ = D_SI
+ p.To.Type_ = D_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(cmp)
+ p.From.Type_ = D_AX
+ p.To.Type_ = D_CONST
+ p.To.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
+ }
+
+ // common
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AJHI
+ p.To.Type_ = D_BRANCH
+ q = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACALL
+ p.To.Type_ = D_BRANCH
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
+ } else {
+
+ p.To.Sym = ctxt.Symmorestack[noctxt]
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJMP
+ p.To.Type_ = D_BRANCH
+ p.Pcond = ctxt.Cursym.Text.Link
+
+ if q != nil {
+ q.Pcond = p.Link
+ }
+ if q1 != nil {
+ q1.Pcond = q.Link
+ }
+
+ *jmpok = q
+ return p
+}
+
+func follow(ctxt *obj.Link, s *obj.LSym) {
+ var firstp *obj.Prog
+ var lastp *obj.Prog
+
+ ctxt.Cursym = s
+
+ firstp = ctxt.Arch.Prg()
+ lastp = firstp
+ xfol(ctxt, s.Text, &lastp)
+ lastp.Link = nil
+ s.Text = firstp.Link
+}
+
+func nofollow(a int) int {
+ switch a {
+ case AJMP,
+ ARET,
+ AIRETL,
+ AIRETQ,
+ AIRETW,
+ ARETFL,
+ ARETFQ,
+ ARETFW,
+ AUNDEF:
+ return 1
+ }
+
+ return 0
+}
+
+func pushpop(a int) int {
+ switch a {
+ case APUSHL,
+ APUSHFL,
+ APUSHQ,
+ APUSHFQ,
+ APUSHW,
+ APUSHFW,
+ APOPL,
+ APOPFL,
+ APOPQ,
+ APOPFQ,
+ APOPW,
+ APOPFW:
+ return 1
+ }
+
+ return 0
+}
+
+func relinv(a int) int {
+ switch a {
+ case AJEQ:
+ return AJNE
+ case AJNE:
+ return AJEQ
+ case AJLE:
+ return AJGT
+ case AJLS:
+ return AJHI
+ case AJLT:
+ return AJGE
+ case AJMI:
+ return AJPL
+ case AJGE:
+ return AJLT
+ case AJPL:
+ return AJMI
+ case AJGT:
+ return AJLE
+ case AJHI:
+ return AJLS
+ case AJCS:
+ return AJCC
+ case AJCC:
+ return AJCS
+ case AJPS:
+ return AJPC
+ case AJPC:
+ return AJPS
+ case AJOS:
+ return AJOC
+ case AJOC:
+ return AJOS
+ }
+
+ log.Fatalf("unknown relation: %s", anames6[a])
+ return 0
+}
+
+func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
+ var q *obj.Prog
+ var i int
+ var a int
+
+loop:
+ if p == nil {
+ return
+ }
+ if p.As == AJMP {
+ q = p.Pcond
+ if q != nil && q.As != ATEXT {
+ /* mark instruction as done and continue layout at target of jump */
+ p.Mark = 1
+
+ p = q
+ if p.Mark == 0 {
+ goto loop
+ }
+ }
+ }
+
+ if p.Mark != 0 {
+ /*
+ * p goes here, but already used it elsewhere.
+ * copy up to 4 instructions or else branch to other copy.
+ */
+ i = 0
+ q = p
+ for ; i < 4; (func() { i++; q = q.Link })() {
+
+ if q == nil {
+ break
+ }
+ if q == *last {
+ break
+ }
+ a = int(q.As)
+ if a == ANOP {
+ i--
+ continue
+ }
+
+ if nofollow(a) != 0 || pushpop(a) != 0 {
+ break // NOTE(rsc): arm does goto copy
+ }
+ if q.Pcond == nil || q.Pcond.Mark != 0 {
+ continue
+ }
+ if a == ACALL || a == ALOOP {
+ continue
+ }
+ for {
+ if p.As == ANOP {
+ p = p.Link
+ continue
+ }
+
+ q = obj.Copyp(ctxt, p)
+ p = p.Link
+ q.Mark = 1
+ (*last).Link = q
+ *last = q
+ if int(q.As) != a || q.Pcond == nil || q.Pcond.Mark != 0 {
+ continue
+ }
+
+ q.As = int16(relinv(int(q.As)))
+ p = q.Pcond
+ q.Pcond = q.Link
+ q.Link = p
+ xfol(ctxt, q.Link, last)
+ p = q.Link
+ if p.Mark != 0 {
+ return
+ }
+ goto loop
+ /* */
+ }
+ }
+ q = ctxt.Arch.Prg()
+ q.As = AJMP
+ q.Lineno = p.Lineno
+ q.To.Type_ = D_BRANCH
+ q.To.Offset = p.Pc
+ q.Pcond = p
+ p = q
+ }
+
+ /* emit p */
+ p.Mark = 1
+
+ (*last).Link = p
+ *last = p
+ a = int(p.As)
+
+ /* continue loop with what comes after p */
+ if nofollow(a) != 0 {
+
+ return
+ }
+ if p.Pcond != nil && a != ACALL {
+ /*
+ * some kind of conditional branch.
+ * recurse to follow one path.
+ * continue loop on the other.
+ */
+ q = obj.Brchain(ctxt, p.Pcond)
+ if q != nil {
+
+ p.Pcond = q
+ }
+ q = obj.Brchain(ctxt, p.Link)
+ if q != nil {
+ p.Link = q
+ }
+ if p.From.Type_ == D_CONST {
+ if p.From.Offset == 1 {
+ /*
+ * expect conditional jump to be taken.
+ * rewrite so that's the fall-through case.
+ */
+ p.As = int16(relinv(a))
+
+ q = p.Link
+ p.Link = p.Pcond
+ p.Pcond = q
+ }
+ } else {
+
+ q = p.Link
+ if q.Mark != 0 {
+ if a != ALOOP {
+ p.As = int16(relinv(a))
+ p.Link = p.Pcond
+ p.Pcond = q
+ }
+ }
+ }
+
+ xfol(ctxt, p.Link, last)
+ if p.Pcond.Mark != 0 {
+ return
+ }
+ p = p.Pcond
+ goto loop
+ }
+
+ p = p.Link
+ goto loop
+}
+
+func prg() *obj.Prog {
+ var p *obj.Prog
+
+ p = new(obj.Prog)
+ *p = zprg
+ return p
+}
+
+var Linkamd64 = obj.LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Pconv: Pconv,
+ Name: "amd64",
+ Thechar: '6',
+ Endian: obj.LittleEndian,
+ Addstacksplit: addstacksplit,
+ Assemble: span6,
+ Datasize: datasize,
+ Follow: follow,
+ Iscall: iscall,
+ Isdata: isdata,
+ Prg: prg,
+ Progedit: progedit,
+ Settextflag: settextflag,
+ Symtype: symtype,
+ Textflag: textflag,
+ Minlc: 1,
+ Ptrsize: 8,
+ Regsize: 8,
+ D_ADDR: D_ADDR,
+ D_AUTO: D_AUTO,
+ D_BRANCH: D_BRANCH,
+ D_CONST: D_CONST,
+ D_EXTERN: D_EXTERN,
+ D_FCONST: D_FCONST,
+ D_NONE: D_NONE,
+ D_PARAM: D_PARAM,
+ D_SCONST: D_SCONST,
+ D_STATIC: D_STATIC,
+ ACALL: ACALL,
+ ADATA: ADATA,
+ AEND: AEND,
+ AFUNCDATA: AFUNCDATA,
+ AGLOBL: AGLOBL,
+ AJMP: AJMP,
+ ANOP: ANOP,
+ APCDATA: APCDATA,
+ ARET: ARET,
+ ATEXT: ATEXT,
+ ATYPE: ATYPE,
+ AUSEFIELD: AUSEFIELD,
+}
+
+var Linkamd64p32 = obj.LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Pconv: Pconv,
+ Name: "amd64p32",
+ Thechar: '6',
+ Endian: obj.LittleEndian,
+ Addstacksplit: addstacksplit,
+ Assemble: span6,
+ Datasize: datasize,
+ Follow: follow,
+ Iscall: iscall,
+ Isdata: isdata,
+ Prg: prg,
+ Progedit: progedit,
+ Settextflag: settextflag,
+ Symtype: symtype,
+ Textflag: textflag,
+ Minlc: 1,
+ Ptrsize: 4,
+ Regsize: 8,
+ D_ADDR: D_ADDR,
+ D_AUTO: D_AUTO,
+ D_BRANCH: D_BRANCH,
+ D_CONST: D_CONST,
+ D_EXTERN: D_EXTERN,
+ D_FCONST: D_FCONST,
+ D_NONE: D_NONE,
+ D_PARAM: D_PARAM,
+ D_SCONST: D_SCONST,
+ D_STATIC: D_STATIC,
+ ACALL: ACALL,
+ ADATA: ADATA,
+ AEND: AEND,
+ AFUNCDATA: AFUNCDATA,
+ AGLOBL: AGLOBL,
+ AJMP: AJMP,
+ ANOP: ANOP,
+ APCDATA: APCDATA,
+ ARET: ARET,
+ ATEXT: ATEXT,
+ ATYPE: ATYPE,
+ AUSEFIELD: AUSEFIELD,
+}
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Dummy placeholder for the real obj package.
-
package x86
-var Exported bool
+const (
+ fmtLong = 1 << iota
+)
+
+func bool2int(b bool) int {
+ if b {
+ return 1
+ }
+ return 0
+}