]> Cypherpunks repositories - gostls13.git/commitdiff
test/codegen: port various mem-combining tests
authorAlberto Donizetti <alb.donizetti@gmail.com>
Mon, 9 Apr 2018 07:52:40 +0000 (09:52 +0200)
committerAlberto Donizetti <alb.donizetti@gmail.com>
Mon, 9 Apr 2018 12:00:06 +0000 (12:00 +0000)
And delete them from asm_test.

Change-Id: I0e33d58274951ab5acb67b0117b60ef617ea887a
Reviewed-on: https://go-review.googlesource.com/105735
Run-TryBot: Alberto Donizetti <alb.donizetti@gmail.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Daniel Martí <mvdan@mvdan.cc>
src/cmd/compile/internal/gc/asm_test.go
test/codegen/memcombine.go

index c98dfb9d297217f9b4908d7fbcd014430750cb4a..a503b89bf3b1c02b3016a2d5571a30c640f011b0 100644 (file)
@@ -259,26 +259,6 @@ var linuxAMD64Tests = []*asmTest{
                `,
                pos: []string{"\tSHLQ\t\\$5,", "\tLEAQ\t\\(.*\\)\\(.*\\*2\\),"},
        },
-       // see issue 19595.
-       // We want to merge load+op in f58, but not in f59.
-       {
-               fn: `
-               func f58(p, q *int) {
-                       x := *p
-                       *q += x
-               }`,
-               pos: []string{"\tADDQ\t\\("},
-       },
-       {
-               fn: `
-               func f59(p, q *int) {
-                       x := *p
-                       for i := 0; i < 10; i++ {
-                               *q += x
-                       }
-               }`,
-               pos: []string{"\tADDQ\t[A-Z]"},
-       },
        {
                // make sure assembly output has matching offset and base register.
                fn: `
@@ -289,31 +269,6 @@ var linuxAMD64Tests = []*asmTest{
                `,
                pos: []string{"b\\+24\\(SP\\)"},
        },
-       {
-               // check load combining
-               fn: `
-               func f73(a, b byte) (byte,byte) {
-                   return f73(f73(a,b))
-               }
-               `,
-               pos: []string{"\tMOVW\t"},
-       },
-       {
-               fn: `
-               func f74(a, b uint16) (uint16,uint16) {
-                   return f74(f74(a,b))
-               }
-               `,
-               pos: []string{"\tMOVL\t"},
-       },
-       {
-               fn: `
-               func f75(a, b uint32) (uint32,uint32) {
-                   return f75(f75(a,b))
-               }
-               `,
-               pos: []string{"\tMOVQ\t"},
-       },
        // Make sure we don't put pointers in SSE registers across safe points.
        {
                fn: `
@@ -384,16 +339,6 @@ var linuxARM64Tests = []*asmTest{
                `,
                pos: []string{"\tAND\t"},
        },
-       {
-               // make sure offsets are folded into load and store.
-               fn: `
-               func f36(_, a [20]byte) (b [20]byte) {
-                       b = a
-                       return
-               }
-               `,
-               pos: []string{"\tMOVD\t\"\"\\.a\\+[0-9]+\\(FP\\), R[0-9]+", "\tMOVD\tR[0-9]+, \"\"\\.b\\+[0-9]+\\(FP\\)"},
-       },
        {
                // check that we don't emit comparisons for constant shift
                fn: `
index 6d50022aba023e4a1ab6c24d9446309543222caa..ec86a79317b62c0e357708b99e6ebfdc3d2e5d66 100644 (file)
@@ -98,6 +98,40 @@ func load_be16_idx(b []byte, idx int) {
        sink16 = binary.BigEndian.Uint16(b[idx:])
 }
 
+// Check load combining across function calls.
+
+func fcall_byte(a, b byte) (byte, byte) {
+       return fcall_byte(fcall_byte(a, b)) // amd64:`MOVW`
+}
+
+func fcall_uint16(a, b uint16) (uint16, uint16) {
+       return fcall_uint16(fcall_uint16(a, b)) // amd64:`MOVL`
+}
+
+func fcall_uint32(a, b uint32) (uint32, uint32) {
+       return fcall_uint32(fcall_uint32(a, b)) // amd64:`MOVQ`
+}
+
+// We want to merge load+op in the first function, but not in the
+// second. See Issue 19595.
+func load_op_merge(p, q *int) {
+       x := *p
+       *q += x // amd64:`ADDQ\t\(`
+}
+func load_op_no_merge(p, q *int) {
+       x := *p
+       for i := 0; i < 10; i++ {
+               *q += x // amd64:`ADDQ\t[A-Z]`
+       }
+}
+
+// Make sure offsets are folded into loads and stores.
+func offsets_fold(_, a [20]byte) (b [20]byte) {
+       // arm64:`MOVD\t""\.a\+[0-9]+\(FP\), R[0-9]+`,`MOVD\tR[0-9]+, ""\.b\+[0-9]+\(FP\)`
+       b = a
+       return
+}
+
 // ------------- //
 //    Storing    //
 // ------------- //