}
var allAsmTests = []*asmTests{
- {
- arch: "amd64",
- os: "linux",
- imports: []string{"runtime"},
- tests: linuxAMD64Tests,
- },
- {
- arch: "arm",
- os: "linux",
- imports: []string{"runtime"},
- tests: linuxARMTests,
- },
- {
- arch: "arm64",
- os: "linux",
- tests: linuxARM64Tests,
- },
{
arch: "amd64",
os: "plan9",
},
}
-var linuxAMD64Tests = []*asmTest{
- {
- // make sure assembly output has matching offset and base register.
- fn: `
- func f72(a, b int) int {
- runtime.GC() // use some frame
- return b
- }
- `,
- pos: []string{"b\\+24\\(SP\\)"},
- },
- // Make sure we don't put pointers in SSE registers across safe points.
- {
- fn: `
- func $(p, q *[2]*int) {
- a, b := p[0], p[1]
- runtime.GC()
- q[0], q[1] = a, b
- }
- `,
- neg: []string{"MOVUPS"},
- },
-}
-
-var linuxARMTests = []*asmTest{
- {
- // make sure assembly output has matching offset and base register.
- fn: `
- func f13(a, b int) int {
- runtime.GC() // use some frame
- return b
- }
- `,
- pos: []string{"b\\+4\\(FP\\)"},
- },
-}
-
-var linuxARM64Tests = []*asmTest{
- // Load-combining tests.
- {
- fn: `
- func $(s []byte) uint16 {
- return uint16(s[0]) | uint16(s[1]) << 8
- }
- `,
- pos: []string{"\tMOVHU\t\\(R[0-9]+\\)"},
- neg: []string{"ORR\tR[0-9]+<<8\t"},
- },
- {
- // make sure that CSEL is emitted for conditional moves
- fn: `
- func f37(c int) int {
- x := c + 4
- if c < 0 {
- x = 182
- }
- return x
- }
- `,
- pos: []string{"\tCSEL\t"},
- },
-}
-
var plan9AMD64Tests = []*asmTest{
// We should make sure that the compiler doesn't generate floating point
// instructions for non-float operations on Plan 9, because floating point
package codegen
-import "encoding/binary"
+import (
+ "encoding/binary"
+ "runtime"
+)
var sink64 uint64
var sink32 uint32
sink16 = binary.BigEndian.Uint16(b[idx:])
}
+func load_byte2_uint16(s []byte) uint16 {
+ // arm64:`MOVHU\t\(R[0-9]+\)`,-`ORR\tR[0-9]+<<8`
+ return uint16(s[0]) | uint16(s[1])<<8
+}
+
// Check load combining across function calls.
func fcall_byte(a, b byte) (byte, byte) {
return
}
+// Make sure we don't put pointers in SSE registers across safe
+// points.
+
+func safe_point(p, q *[2]*int) {
+ a, b := p[0], p[1] // amd64:-`MOVUPS`
+ runtime.GC()
+ q[0], q[1] = a, b // amd64:-`MOVUPS`
+}
+
// ------------- //
// Storing //
// ------------- //