// func cputicks() int64
TEXT runtime·cputicks(SB),NOSPLIT,$0-8
- CMPB runtime·support_sse2(SB), $1
+ CMPB internal∕cpu·X86+const_offset_x86_HasSSE2(SB), $1
JNE done
CMPB runtime·lfenceBeforeRdtsc(SB), $1
JNE mfence
--- /dev/null
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "internal/cpu"
+ "unsafe"
+)
+
+// Offsets into internal/cpu records for use in assembly.
+const (
+ offset_x86_HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2)
+ offset_x86_HasERMS = unsafe.Offsetof(cpu.X86.HasERMS)
+ offset_x86_HasSSE2 = unsafe.Offsetof(cpu.X86.HasSSE2)
+)
import (
"internal/cpu"
- "unsafe"
-)
-
-// Offsets into internal/cpu records for use in assembly.
-const (
- offsetX86HasAVX2 = unsafe.Offsetof(cpu.X86.HasAVX2)
)
var useAVXmemmove bool
// +build !plan9
+#include "go_asm.h"
#include "textflag.h"
// NOTE: Windows externalthreadhandler expects memclr to preserve DX.
JBE _5through8
CMPL BX, $16
JBE _9through16
- CMPB runtime·support_sse2(SB), $1
+ CMPB internal∕cpu·X86+const_offset_x86_HasSSE2(SB), $1
JNE nosse2
PXOR X0, X0
CMPL BX, $32
JBE _65through128
CMPQ BX, $256
JBE _129through256
- CMPB internal∕cpu·X86+const_offsetX86HasAVX2(SB), $1
+ CMPB internal∕cpu·X86+const_offset_x86_HasAVX2(SB), $1
JE loop_preheader_avx2
// TODO: for really big clears, use MOVNTDQ, even without AVX2.
// +build !plan9
+#include "go_asm.h"
#include "textflag.h"
// func memmove(to, from unsafe.Pointer, n uintptr)
JBE move_5through8
CMPL BX, $16
JBE move_9through16
- CMPB runtime·support_sse2(SB), $1
+ CMPB internal∕cpu·X86+const_offset_x86_HasSSE2(SB), $1
JNE nosse2
CMPL BX, $32
JBE move_17through32
*/
forward:
// If REP MOVSB isn't fast, don't use it
- CMPB runtime·support_erms(SB), $1 // enhanced REP MOVSB/STOSB
+ CMPB internal∕cpu·X86+const_offset_x86_HasERMS(SB), $1 // enhanced REP MOVSB/STOSB
JNE fwdBy4
// Check alignment
// +build !plan9
+#include "go_asm.h"
#include "textflag.h"
// func memmove(to, from unsafe.Pointer, n uintptr)
JLS move_256through2048
// If REP MOVSB isn't fast, don't use it
- CMPB runtime·support_erms(SB), $1 // enhanced REP MOVSB/STOSB
+ CMPB internal∕cpu·X86+const_offset_x86_HasERMS(SB), $1 // enhanced REP MOVSB/STOSB
JNE fwdBy8
// Check alignment
cpu.Initialize(env)
- support_erms = cpu.X86.HasERMS
+ // Support cpu feature variables are used in code generated by the compiler
+ // to guard execution of instructions that can not be assumed to be always supported.
support_popcnt = cpu.X86.HasPOPCNT
support_sse2 = cpu.X86.HasSSE2
support_sse41 = cpu.X86.HasSSE41
newprocs int32
// Information about what cpu features are available.
- // Set on startup in runtime.cpuinit.
// Packages outside the runtime should not use these
// as they are not an external api.
- // TODO: deprecate these; use internal/cpu directly.
+ // Set on startup in asm_{386,amd64,amd64p32}.s
processorVersionInfo uint32
isIntel bool
lfenceBeforeRdtsc bool
// Set in runtime.cpuinit.
- support_erms bool
+ // TODO: deprecate these; use internal/cpu directly.
support_popcnt bool
support_sse2 bool
support_sse41 bool