}
func MergeMuls2(n int) int {
- // amd64:"IMUL3Q\t[$]23","ADDQ\t[$]29"
+ // amd64:"IMUL3Q\t[$]23","(ADDQ\t[$]29)|(LEAQ\t29)"
// 386:"IMUL3L\t[$]23","ADDL\t[$]29"
return 5*n + 7*(n+1) + 11*(n+2) // 23n + 29
}
// check direct operation on memory with constant and shifted constant sources
func bitOpOnMem(a []uint32, b, c, d uint32) {
- // amd64:`ANDL\s[$]200,\s\([A-Z]+\)`
+ // amd64:`ANDL\s[$]200,\s\([A-Z][A-Z0-9]+\)`
a[0] &= 200
- // amd64:`ORL\s[$]220,\s4\([A-Z]+\)`
+ // amd64:`ORL\s[$]220,\s4\([A-Z][A-Z0-9]+\)`
a[1] |= 220
- // amd64:`XORL\s[$]240,\s8\([A-Z]+\)`
+ // amd64:`XORL\s[$]240,\s8\([A-Z][A-Z0-9]+\)`
a[2] ^= 240
- // amd64:`BTRL\s[$]15,\s12\([A-Z]+\)`,-`ANDL`
+ // amd64:`BTRL\s[$]15,\s12\([A-Z][A-Z0-9]+\)`,-`ANDL`
a[3] &= 0xffff7fff
- // amd64:`BTSL\s[$]14,\s16\([A-Z]+\)`,-`ORL`
+ // amd64:`BTSL\s[$]14,\s16\([A-Z][A-Z0-9]+\)`,-`ORL`
a[4] |= 0x4000
- // amd64:`BTCL\s[$]13,\s20\([A-Z]+\)`,-`XORL`
+ // amd64:`BTCL\s[$]13,\s20\([A-Z][A-Z0-9]+\)`,-`XORL`
a[5] ^= 0x2000
- // amd64:`BTRL\s[A-Z]+,\s24\([A-Z]+\)`
+ // amd64:`BTRL\s[A-Z][A-Z0-9]+,\s24\([A-Z][A-Z0-9]+\)`
a[6] &^= 1 << (b & 31)
- // amd64:`BTSL\s[A-Z]+,\s28\([A-Z]+\)`
+ // amd64:`BTSL\s[A-Z][A-Z0-9]+,\s28\([A-Z][A-Z0-9]+\)`
a[7] |= 1 << (c & 31)
- // amd64:`BTCL\s[A-Z]+,\s32\([A-Z]+\)`
+ // amd64:`BTCL\s[A-Z][A-Z0-9]+,\s32\([A-Z][A-Z0-9]+\)`
a[8] ^= 1 << (d & 31)
}
// amd64:`MOVQ\t\$-2401018187971961171, R8`, `MOVQ\t\$-2401018187971961171, R9`, `MOVQ\t\$-2401018187971961171, R10`
// amd64:`MOVQ\t\$-2401018187971961171, R11`, `MOVQ\t\$-2401018187971961171, R12`, `MOVQ\t\$-2401018187971961171, R13`
// amd64:-`MOVQ\t\$-2401018187971961171, BP` // frame pointer is not clobbered
- StackArgsCall(a, b, c, d)
+ StackArgsCall([10]int{a, b, c})
// amd64:`MOVQ\t\$-2401018187971961171, R12`, `MOVQ\t\$-2401018187971961171, R13`, `MOVQ\t\$-2401018187971961171, DX`
// amd64:-`MOVQ\t\$-2401018187971961171, AX`, -`MOVQ\t\$-2401018187971961171, R11` // register args are not clobbered
RegArgsCall(a, b, c, d)
}
//go:noinline
-func StackArgsCall(int, int, int, S) {}
+func StackArgsCall([10]int) {}
//go:noinline
//go:registerparams
// Test that LEAQ/ADDQconst are folded into SETx ops
-func CmpFold(x uint32) bool {
- // amd64:`SETHI\t.*\(SP\)`
- return x > 4
+var r bool
+
+func CmpFold(x uint32) {
+ // amd64:`SETHI\t.*\(SB\)`
+ r = x > 4
}
// Test that direct comparisons with memory are generated when
'\r': true,
}
-func zeroExtArgByte(ch byte) bool {
- return wsp[ch] // amd64:-"MOVBLZX\t..,.."
+func zeroExtArgByte(ch [2]byte) bool {
+ return wsp[ch[0]] // amd64:-"MOVBLZX\t..,.."
}
-func zeroExtArgUint16(ch uint16) bool {
- return wsp[ch] // amd64:-"MOVWLZX\t..,.."
+func zeroExtArgUint16(ch [2]uint16) bool {
+ return wsp[ch[0]] // amd64:-"MOVWLZX\t..,.."
}
// Direct use of constants in fast map access calls (Issue #19015).
func AccessInt1(m map[int]int) int {
- // amd64:"MOVQ\t[$]5"
+ // amd64:"MOV[LQ]\t[$]5"
return m[5]
}
func AccessInt2(m map[int]int) bool {
- // amd64:"MOVQ\t[$]5"
+ // amd64:"MOV[LQ]\t[$]5"
_, ok := m[5]
return ok
}
// are evaluated at compile-time
func constantCheck64() bool {
- // amd64:"MOVB\t[$]0",-"FCMP",-"MOVB\t[$]1"
+ // amd64:"(MOVB\t[$]0)|(XORL\t[A-Z][A-Z0-9]+, [A-Z][A-Z0-9]+)",-"FCMP",-"MOVB\t[$]1"
// s390x:"MOV(B|BZ|D)\t[$]0,",-"FCMPU",-"MOV(B|BZ|D)\t[$]1,"
return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63))
}
func constantCheck32() bool {
- // amd64:"MOVB\t[$]1",-"FCMP",-"MOVB\t[$]0"
+ // amd64:"MOV(B|L)\t[$]1",-"FCMP",-"MOV(B|L)\t[$]0"
// s390x:"MOV(B|BZ|D)\t[$]1,",-"FCMPU",-"MOV(B|BZ|D)\t[$]0,"
return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31))
}
// Check load combining across function calls.
-func fcall_byte(a, b byte) (byte, byte) {
- return fcall_byte(fcall_byte(a, b)) // amd64:`MOVW`
+func fcall_byte(a [2]byte) [2]byte {
+ return fcall_byte(fcall_byte(a)) // amd64:`MOVW`
}
-func fcall_uint16(a, b uint16) (uint16, uint16) {
- return fcall_uint16(fcall_uint16(a, b)) // amd64:`MOVL`
+func fcall_uint16(a [2]uint16) [2]uint16 {
+ return fcall_uint16(fcall_uint16(a)) // amd64:`MOVL`
}
-func fcall_uint32(a, b uint32) (uint32, uint32) {
- return fcall_uint32(fcall_uint32(a, b)) // amd64:`MOVQ`
+func fcall_uint32(a [2]uint32) [2]uint32 {
+ return fcall_uint32(fcall_uint32(a)) // amd64:`MOVQ`
}
// We want to merge load+op in the first function, but not in the
}
func InitNotSmallSliceLiteral() []int {
- // amd64:`MOVQ\t.*autotmp_`
+ // amd64:`LEAQ\t.*stmp_`
return []int{
42,
42,
// Check that assembly output has matching offset and base register
// (issue #21064).
-func check_asmout(a, b int) int {
+func check_asmout(b [2]int) int {
runtime.GC() // use some frame
// amd64:`.*b\+24\(SP\)`
// arm:`.*b\+4\(FP\)`
- return b
+ return b[1]
}
// Check that simple functions get promoted to nosplit, even when
func zeroSize() {
c := make(chan struct{})
- // amd64:`MOVQ\t\$0, ""\.s\+32\(SP\)`
+ // amd64:`MOVQ\t\$0, ""\.s\+56\(SP\)`
var s *int
- g(&s) // force s to be a stack object
+ // force s to be a stack object, also use some (fixed) stack space
+ g(&s, 1, 2, 3, 4, 5)
- // amd64:`LEAQ\t""\..*\+31\(SP\)`
+ // amd64:`LEAQ\t""\..*\+55\(SP\)`
c <- struct{}{}
}
//go:noinline
-func g(p **int) {
-}
+func g(**int, int, int, int, int, int) {}