FSCALEBD F4, F5, F6 // a6101101
FLOGBF F4, F5 // 85241401
FLOGBD F4, F5 // 85281401
+
+ // VSTX/VLDX/XVSTX/XVLDX instructions
+ VMOVQ V2, (R5)(R5) // a2144438
+ VMOVQ (R4)(R5), V2 // 82144038
+ XVMOVQ X2, (R4)(R5) // 82144c38
+ XVMOVQ (R4)(R5), X2 // 82144838
+
+ // VST/VLD/XVST/XVLD instructions
+ VMOVQ V2, (R4) // 8200402c
+ VMOVQ V2, 3(R4) // 820c402c
+ VMOVQ V2, 2040(R4) // 82e05f2c
+ VMOVQ V2, -2040(R4) // 8220602c
+ VMOVQ V2, y+16(FP) // 0260402c
+ VMOVQ V2, x+2030(FP) // 02d85f2c
+ VMOVQ (R4), V2 // 8200002c
+ VMOVQ 3(R4), V2 // 820c002c
+ VMOVQ 2044(R4), V2 // 82f01f2c
+ VMOVQ -2044(R4), V2 // 8210202c
+ VMOVQ y+16(FP), V2 // 0260002c
+ VMOVQ x+2030(FP), V2 // 02d81f2c
+ XVMOVQ X2, (R4) // 8200c02c
+ XVMOVQ X3, 3(R4) // 830cc02c
+ XVMOVQ X4, 2040(R4) // 84e0df2c
+ XVMOVQ X5, -2040(R4) // 8520e02c
+ XVMOVQ X6, y+16(FP) // 0660c02c
+ XVMOVQ X7, x+2030(FP) // 07d8df2c
+ XVMOVQ (R4), X2 // 8200802c
+ XVMOVQ 3(R4), X3 // 830c802c
+ XVMOVQ 2044(R4), X4 // 84f09f2c
+ XVMOVQ -2044(R4), X5 // 8510a02c
+ XVMOVQ y+16(FP), X6 // 0660802c
+ XVMOVQ x+2030(FP), X7 // 07d89f2c
{AMOVV, C_REG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0},
{AMOVB, C_REG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0},
{AMOVBU, C_REG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0},
+ {AVMOVQ, C_VREG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0},
+ {AXVMOVQ, C_XREG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0},
+ {AVMOVQ, C_VREG, C_NONE, C_NONE, C_SAUTO, C_NONE, 7, 4, REGZERO, 0},
+ {AXVMOVQ, C_XREG, C_NONE, C_NONE, C_SAUTO, C_NONE, 7, 4, REGZERO, 0},
{ASC, C_REG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0},
{ASCV, C_REG, C_NONE, C_NONE, C_SOREG, C_NONE, 7, 4, REGZERO, 0},
{AMOVV, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0},
{AMOVB, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0},
{AMOVBU, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0},
+ {AVMOVQ, C_SOREG, C_NONE, C_NONE, C_VREG, C_NONE, 8, 4, REGZERO, 0},
+ {AXVMOVQ, C_SOREG, C_NONE, C_NONE, C_XREG, C_NONE, 8, 4, REGZERO, 0},
+ {AVMOVQ, C_SAUTO, C_NONE, C_NONE, C_VREG, C_NONE, 8, 4, REGZERO, 0},
+ {AXVMOVQ, C_SAUTO, C_NONE, C_NONE, C_XREG, C_NONE, 8, 4, REGZERO, 0},
{ALL, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0},
{ALLV, C_SOREG, C_NONE, C_NONE, C_REG, C_NONE, 8, 4, REGZERO, 0},
{AMOVV, C_REG, C_NONE, C_NONE, C_ROFF, C_NONE, 20, 4, 0, 0},
{AMOVF, C_FREG, C_NONE, C_NONE, C_ROFF, C_NONE, 20, 4, 0, 0},
{AMOVD, C_FREG, C_NONE, C_NONE, C_ROFF, C_NONE, 20, 4, 0, 0},
+ {AVMOVQ, C_VREG, C_NONE, C_NONE, C_ROFF, C_NONE, 20, 4, 0, 0},
+ {AXVMOVQ, C_XREG, C_NONE, C_NONE, C_ROFF, C_NONE, 20, 4, 0, 0},
/* load with extended register offset */
{AMOVB, C_ROFF, C_NONE, C_NONE, C_REG, C_NONE, 21, 4, 0, 0},
{AMOVV, C_ROFF, C_NONE, C_NONE, C_REG, C_NONE, 21, 4, 0, 0},
{AMOVF, C_ROFF, C_NONE, C_NONE, C_FREG, C_NONE, 21, 4, 0, 0},
{AMOVD, C_ROFF, C_NONE, C_NONE, C_FREG, C_NONE, 21, 4, 0, 0},
+ {AVMOVQ, C_ROFF, C_NONE, C_NONE, C_VREG, C_NONE, 21, 4, 0, 0},
+ {AXVMOVQ, C_ROFF, C_NONE, C_NONE, C_XREG, C_NONE, 21, 4, 0, 0},
{obj.APCALIGN, C_SCON, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0, 0, 0},
{obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, C_NONE, 0, 0, 0, 0},
return C_FCCREG
case REG_FCSR0 <= r && r <= REG_FCSR3:
return C_FCSRREG
+ case REG_V0 <= r && r <= REG_V31:
+ return C_VREG
+ case REG_X0 <= r && r <= REG_X31:
+ return C_XREG
}
return C_GOK
AJAL,
AJMP,
AMOVWU,
+ AVMOVQ,
+ AXVMOVQ,
ALL,
ALLV,
ASC,
return 0x07070 << 15 // fstx.s
case AMOVD:
return 0x07078 << 15 // fstx.d
+ case -AVMOVQ:
+ return 0x07080 << 15 // vldx
+ case -AXVMOVQ:
+ return 0x07090 << 15 // xvldx
+ case AVMOVQ:
+ return 0x07088 << 15 // vstx
+ case AXVMOVQ:
+ return 0x07098 << 15 // xvstx
}
if a < 0 {
return 0x0ac << 22
case -AMOVD:
return 0x0ae << 22
-
+ case -AVMOVQ:
+ return 0x0b0 << 22 // vld
+ case -AXVMOVQ:
+ return 0x0b2 << 22 // xvld
+ case AVMOVQ:
+ return 0x0b1 << 22 // vst
+ case AXVMOVQ:
+ return 0x0b3 << 22 // xvst
case ASLLV:
return 0x0041 << 16
case ASRLV: