this.mem = new DataView(this._inst.exports.mem.buffer);
},
- // func nanotime() int64
- "runtime.nanotime": (sp) => {
+ // func nanotime1() int64
+ "runtime.nanotime1": (sp) => {
setInt64(sp + 8, (timeOrigin + performance.now()) * 1000000);
},
- // func walltime() (sec int64, nsec int32)
- "runtime.walltime": (sp) => {
+ // func walltime1() (sec int64, nsec int32)
+ "runtime.walltime1": (sp) => {
const msec = (new Date).getTime();
setInt64(sp + 8, msec / 1000);
this.mem.setInt32(sp + 16, (msec % 1000) * 1000000, true);
exit1(code)
}
-func write1(fd, p uintptr, n int32) int32
+func write2(fd, p uintptr, n int32) int32
//go:nosplit
-func write(fd uintptr, p unsafe.Pointer, n int32) int32 {
+func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
_g_ := getg()
// Check the validity of g because without a g during
r, _ := syscall3(&libc_write, uintptr(fd), uintptr(p), uintptr(n))
return int32(r)
}
- return write1(fd, uintptr(p), n)
+ return write2(fd, uintptr(p), n)
}
sysvicall2(&libc_munmap, uintptr(addr), uintptr(n))
}
-func nanotime1()
+func nanotime2()
//go:nosplit
-func nanotime() int64 {
- return int64(sysvicall0((*libcFunc)(unsafe.Pointer(funcPC(nanotime1)))))
+func nanotime1() int64 {
+ return int64(sysvicall0((*libcFunc)(unsafe.Pointer(funcPC(nanotime2)))))
}
//go:nosplit
}
//go:nosplit
-func write(fd uintptr, buf unsafe.Pointer, nbyte int32) int32 {
+func write1(fd uintptr, buf unsafe.Pointer, nbyte int32) int32 {
return int32(sysvicall3(&libc_write, uintptr(fd), uintptr(buf), uintptr(nbyte)))
}
)
//go:nosplit
-func nanotime() int64 {
+func nanotime1() int64 {
tp := ×pec{}
if clock_gettime(_CLOCK_REALTIME, tp) != 0 {
throw("syscall clock_gettime failed")
return tp.tv_sec*1000000000 + tp.tv_nsec
}
-func walltime() (sec int64, nsec int32) {
+func walltime1() (sec int64, nsec int32) {
ts := ×pec{}
if clock_gettime(_CLOCK_REALTIME, ts) != 0 {
throw("syscall clock_gettime failed")
func exit(code int32)
-func write(fd uintptr, p unsafe.Pointer, n int32) int32 {
+func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
if fd > 2 {
throw("runtime.write to fd > 2 is unsupported")
}
// but the timestamp must increase if the fd changes.
var lastfaketimefd int32
+func walltime() (sec int64, nsec int32)
+
/*
An attempt at IRT. Doesn't work. See end of sys_nacl_amd64.s.
}
//go:nosplit
-func nanotime() int64 {
+func nanotime1() int64 {
var scratch int64
ns := nsec(&scratch)
// TODO(aram): remove hack after I fix _nsec in the pc64 kernel.
return -1
}
len := findnull(&msg[0])
- if write(uintptr(fd), unsafe.Pointer(&msg[0]), int32(len)) != int64(len) {
+ if write1(uintptr(fd), unsafe.Pointer(&msg[0]), int32(len)) != int32(len) {
closefd(fd)
return -1
}
}
//go:nosplit
-func write(fd uintptr, buf unsafe.Pointer, n int32) int64 {
- return int64(pwrite(int32(fd), buf, n, -1))
+func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
+ return pwrite(int32(fd), buf, n, -1)
}
var _badsignal = []byte("runtime: signal received on thread not created by Go.\n")
stdcall2(_SetProcessPriorityBoost, currentProcess, 1)
}
-func nanotime() int64
-
// useQPCTime controls whether time.now and nanotime use QueryPerformanceCounter.
// This is only set to 1 when running under Wine.
var useQPCTime uint8
stdcall1(_ExitProcess, uintptr(code))
}
+// write1 must be nosplit because it's used as a last resort in
+// functions like badmorestackg0. In such cases, we'll always take the
+// ASCII path.
+//
//go:nosplit
-func write(fd uintptr, buf unsafe.Pointer, n int32) int32 {
+func write1(fd uintptr, buf unsafe.Pointer, n int32) int32 {
const (
_STD_OUTPUT_HANDLE = ^uintptr(10) // -11
_STD_ERROR_HANDLE = ^uintptr(11) // -12
return
}
+// walltime1 isn't implemented on Windows, but will never be called.
+func walltime1() (sec int64, nsec int32)
+
//go:nosplit
func semasleep(ns int64) int32 {
const (
func usleep(usec uint32)
//go:noescape
-func write(fd uintptr, p unsafe.Pointer, n int32) int32
+func write1(fd uintptr, p unsafe.Pointer, n int32) int32
//go:noescape
func open(name *byte, mode, perm int32) int32
// +build !plan9
// +build !solaris
-// +build !windows
// +build !nacl
// +build !freebsd
// +build !darwin
package runtime
-func nanotime() int64
+func nanotime1() int64
CSYSCALL()
RET
-// Runs on OS stack, called from runtime·write.
-TEXT runtime·write1(SB),NOSPLIT,$0-28
+// Runs on OS stack, called from runtime·write1.
+TEXT runtime·write2(SB),NOSPLIT,$0-28
MOVD fd+0(FP), R3
MOVD p+8(FP), R4
MOVW n+16(FP), R5
//go:nosplit
//go:cgo_unsafe_args
-func write(fd uintptr, p unsafe.Pointer, n int32) int32 {
+func write1(fd uintptr, p unsafe.Pointer, n int32) int32 {
return libcCall(unsafe.Pointer(funcPC(write_trampoline)), unsafe.Pointer(&fd))
}
func write_trampoline()
//go:nosplit
//go:cgo_unsafe_args
-func nanotime() int64 {
+func nanotime1() int64 {
var r struct {
t int64 // raw timer
numer, denom uint32 // conversion factors. nanoseconds = t * numer / denom.
//go:nosplit
//go:cgo_unsafe_args
-func walltime() (int64, int32) {
+func walltime1() (int64, int32) {
var t timeval
libcCall(unsafe.Pointer(funcPC(walltime_trampoline)), unsafe.Pointer(&t))
return int64(t.tv_sec), 1000 * t.tv_usec
MOVL AX, ret+24(FP)
RET
-TEXT runtime·write(SB),NOSPLIT,$-8
+TEXT runtime·write1(SB),NOSPLIT,$-8
MOVQ fd+0(FP), DI // arg 1 fd
MOVQ p+8(FP), SI // arg 2 buf
MOVL n+16(FP), DX // arg 3 count
SYSCALL
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB), NOSPLIT, $32
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB), NOSPLIT, $32
MOVL $232, AX // clock_gettime
MOVQ $0, DI // CLOCK_REALTIME
LEAQ 8(SP), SI
MOVL DX, nsec+8(FP)
RET
-TEXT runtime·nanotime(SB), NOSPLIT, $32
+TEXT runtime·nanotime1(SB), NOSPLIT, $32
MOVL $232, AX
MOVQ $4, DI // CLOCK_MONOTONIC
LEAQ 8(SP), SI
MOVL AX, ret+12(FP)
RET
-TEXT runtime·write(SB),NOSPLIT,$-4
+TEXT runtime·write1(SB),NOSPLIT,$-4
MOVL $4, AX
INT $0x80
JAE 2(PC)
MOVL AX, ret+24(FP)
RET
-TEXT runtime·write(SB),NOSPLIT,$-8
+TEXT runtime·write1(SB),NOSPLIT,$-8
MOVQ fd+0(FP), DI // arg 1 fd
MOVQ p+8(FP), SI // arg 2 buf
MOVL n+16(FP), DX // arg 3 count
MOVW R0, ret+12(FP)
RET
-TEXT runtime·write(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0
MOVW fd+0(FP), R0 // arg 1 fd
MOVW p+4(FP), R1 // arg 2 buf
MOVW n+8(FP), R2 // arg 3 count
MOVL AX, ret+4(FP)
RET
-TEXT runtime·write(SB),NOSPLIT,$0
+TEXT runtime·write1(SB),NOSPLIT,$0
MOVL $SYS_write, AX
MOVL fd+0(FP), BX
MOVL p+4(FP), CX
MOVL AX, ret+12(FP)
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB), NOSPLIT, $0-12
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB), NOSPLIT, $0-12
// We don't know how much stack space the VDSO code will need,
// so switch to g0.
// int64 nanotime(void) so really
// void nanotime(int64 *nsec)
-TEXT runtime·nanotime(SB), NOSPLIT, $0-8
+TEXT runtime·nanotime1(SB), NOSPLIT, $0-8
// Switch to g0 stack. See comment above in runtime·walltime.
MOVL SP, BP // Save old SP; BP unchanged by C code.
MOVL AX, ret+8(FP)
RET
-TEXT runtime·write(SB),NOSPLIT,$0-28
+TEXT runtime·write1(SB),NOSPLIT,$0-28
MOVQ fd+0(FP), DI
MOVQ p+8(FP), SI
MOVL n+16(FP), DX
MOVL AX, ret+24(FP)
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB),NOSPLIT,$0-12
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB),NOSPLIT,$0-12
// We don't know how much stack space the VDSO code will need,
// so switch to g0.
// In particular, a kernel configured with CONFIG_OPTIMIZE_INLINING=n
MOVL DX, nsec+8(FP)
RET
-TEXT runtime·nanotime(SB),NOSPLIT,$0-8
+TEXT runtime·nanotime1(SB),NOSPLIT,$0-8
// Switch to g0 stack. See comment above in runtime·walltime.
MOVQ SP, BP // Save old SP; BP unchanged by C code.
MOVW R0, ret+4(FP)
RET
-TEXT runtime·write(SB),NOSPLIT,$0
+TEXT runtime·write1(SB),NOSPLIT,$0
MOVW fd+0(FP), R0
MOVW p+4(FP), R1
MOVW n+8(FP), R2
MOVW R0, ret+12(FP)
RET
-TEXT runtime·walltime(SB),NOSPLIT,$0-12
+TEXT runtime·walltime1(SB),NOSPLIT,$0-12
// We don't know how much stack space the VDSO code will need,
// so switch to g0.
MOVW R2, nsec+8(FP)
RET
-// int64 nanotime(void)
-TEXT runtime·nanotime(SB),NOSPLIT,$0-8
+// int64 nanotime1(void)
+TEXT runtime·nanotime1(SB),NOSPLIT,$0-8
// Switch to g0 stack. See comment above in runtime·walltime.
// Save old SP. Use R13 instead of SP to avoid linker rewriting the offsets.
MOVW R0, ret+8(FP)
RET
-TEXT runtime·write(SB),NOSPLIT|NOFRAME,$0-28
+TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0-28
MOVD fd+0(FP), R0
MOVD p+8(FP), R1
MOVW n+16(FP), R2
MOVW R0, ret+24(FP)
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB),NOSPLIT,$24-12
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB),NOSPLIT,$24-12
MOVD RSP, R20 // R20 is unchanged by C code
MOVD RSP, R1
MOVW R5, nsec+8(FP)
RET
-TEXT runtime·nanotime(SB),NOSPLIT,$24-8
+TEXT runtime·nanotime1(SB),NOSPLIT,$24-8
MOVD RSP, R20 // R20 is unchanged by C code
MOVD RSP, R1
MOVW R2, ret+8(FP)
RET
-TEXT runtime·write(SB),NOSPLIT|NOFRAME,$0-28
+TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0-28
MOVV fd+0(FP), R4
MOVV p+8(FP), R5
MOVW n+16(FP), R6
MOVW R2, ret+24(FP)
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB),NOSPLIT,$16
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB),NOSPLIT,$16
MOVW $0, R4 // CLOCK_REALTIME
MOVV $0(R29), R5
MOVV $SYS_clock_gettime, R2
MOVW R5, nsec+8(FP)
RET
-TEXT runtime·nanotime(SB),NOSPLIT,$16
+TEXT runtime·nanotime1(SB),NOSPLIT,$16
MOVW $1, R4 // CLOCK_MONOTONIC
MOVV $0(R29), R5
MOVV $SYS_clock_gettime, R2
MOVW R2, ret+4(FP)
RET
-TEXT runtime·write(SB),NOSPLIT,$0-16
+TEXT runtime·write1(SB),NOSPLIT,$0-16
MOVW fd+0(FP), R4
MOVW p+4(FP), R5
MOVW n+8(FP), R6
MOVW R2, ret+12(FP)
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB),NOSPLIT,$8-12
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB),NOSPLIT,$8-12
MOVW $0, R4 // CLOCK_REALTIME
MOVW $4(R29), R5
MOVW $SYS_clock_gettime, R2
MOVW R5, nsec+8(FP)
RET
-TEXT runtime·nanotime(SB),NOSPLIT,$8-8
+TEXT runtime·nanotime1(SB),NOSPLIT,$8-8
MOVW $1, R4 // CLOCK_MONOTONIC
MOVW $4(R29), R5
MOVW $SYS_clock_gettime, R2
MOVW R3, ret+8(FP)
RET
-TEXT runtime·write(SB),NOSPLIT|NOFRAME,$0-28
+TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0-28
MOVD fd+0(FP), R3
MOVD p+8(FP), R4
MOVW n+16(FP), R5
MOVW R3, ret+24(FP)
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB),NOSPLIT,$16
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB),NOSPLIT,$16
MOVD R1, R15 // R15 is unchanged by C code
MOVD g_m(g), R21 // R21 = m
MOVD 40(R1), R5
JMP finish
-TEXT runtime·nanotime(SB),NOSPLIT,$16
+TEXT runtime·nanotime1(SB),NOSPLIT,$16
MOVD $1, R3 // CLOCK_MONOTONIC
MOVD R1, R15 // R15 is unchanged by C code
MOVW R2, ret+8(FP)
RET
-TEXT runtime·write(SB),NOSPLIT|NOFRAME,$0-28
+TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0-28
MOVD fd+0(FP), R2
MOVD p+8(FP), R3
MOVW n+16(FP), R4
MOVW R2, ret+24(FP)
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB),NOSPLIT,$16
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB),NOSPLIT,$16
MOVW $0, R2 // CLOCK_REALTIME
MOVD $tp-16(SP), R3
MOVW $SYS_clock_gettime, R1
MOVW R3, nsec+8(FP)
RET
-TEXT runtime·nanotime(SB),NOSPLIT,$16
+TEXT runtime·nanotime1(SB),NOSPLIT,$16
MOVW $1, R2 // CLOCK_MONOTONIC
MOVD $tp-16(SP), R3
MOVW $SYS_clock_gettime, R1
MOVL AX, ret+12(FP)
RET
-TEXT runtime·write(SB),NOSPLIT,$-4
+TEXT runtime·write1(SB),NOSPLIT,$-4
MOVL $SYS_write, AX
INT $0x80
JAE 2(PC)
INT $0x80
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB), NOSPLIT, $32
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB), NOSPLIT, $32
LEAL 12(SP), BX
MOVL $CLOCK_REALTIME, 4(SP) // arg 1 - clock_id
MOVL BX, 8(SP) // arg 2 - tp
MOVL BX, nsec+8(FP)
RET
-// int64 nanotime(void) so really
-// void nanotime(int64 *nsec)
-TEXT runtime·nanotime(SB),NOSPLIT,$32
+// int64 nanotime1(void) so really
+// void nanotime1(int64 *nsec)
+TEXT runtime·nanotime1(SB),NOSPLIT,$32
LEAL 12(SP), BX
MOVL $CLOCK_MONOTONIC, 4(SP) // arg 1 - clock_id
MOVL BX, 8(SP) // arg 2 - tp
MOVL AX, ret+24(FP)
RET
-TEXT runtime·write(SB),NOSPLIT,$-8
+TEXT runtime·write1(SB),NOSPLIT,$-8
MOVQ fd+0(FP), DI // arg 1 - fd
MOVQ p+8(FP), SI // arg 2 - buf
MOVL n+16(FP), DX // arg 3 - nbyte
SYSCALL
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB), NOSPLIT, $32
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB), NOSPLIT, $32
MOVQ $CLOCK_REALTIME, DI // arg 1 - clock_id
LEAQ 8(SP), SI // arg 2 - tp
MOVL $SYS___clock_gettime50, AX
MOVL DX, nsec+8(FP)
RET
-TEXT runtime·nanotime(SB),NOSPLIT,$32
+TEXT runtime·nanotime1(SB),NOSPLIT,$32
MOVQ $CLOCK_MONOTONIC, DI // arg 1 - clock_id
LEAQ 8(SP), SI // arg 2 - tp
MOVL $SYS___clock_gettime50, AX
MOVW R0, ret+12(FP)
RET
-TEXT runtime·write(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0
MOVW fd+0(FP), R0 // arg 1 - fd
MOVW p+4(FP), R1 // arg 2 - buf
MOVW n+8(FP), R2 // arg 3 - nbyte
SWI $SYS___setitimer50
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB), NOSPLIT, $32
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB), NOSPLIT, $32
MOVW $0, R0 // CLOCK_REALTIME
MOVW $8(R13), R1
SWI $SYS___clock_gettime50
MOVW R2, nsec+8(FP)
RET
-// int64 nanotime(void) so really
-// void nanotime(int64 *nsec)
-TEXT runtime·nanotime(SB), NOSPLIT, $32
+// int64 nanotime1(void) so really
+// void nanotime1(int64 *nsec)
+TEXT runtime·nanotime1(SB), NOSPLIT, $32
MOVW $3, R0 // CLOCK_MONOTONIC
MOVW $8(R13), R1
SWI $SYS___clock_gettime50
MOVW R0, ret+24(FP)
RET
-TEXT runtime·write(SB),NOSPLIT,$-8
+TEXT runtime·write1(SB),NOSPLIT,$-8
MOVD fd+0(FP), R0 // arg 1 - fd
MOVD p+8(FP), R1 // arg 2 - buf
MOVW n+16(FP), R2 // arg 3 - nbyte
SVC $SYS___setitimer50
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB), NOSPLIT, $32
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB), NOSPLIT, $32
MOVW $CLOCK_REALTIME, R0 // arg 1 - clock_id
MOVD $8(RSP), R1 // arg 2 - tp
SVC $SYS___clock_gettime50
MOVW R1, nsec+8(FP)
RET
-// int64 nanotime(void) so really
-// void nanotime(int64 *nsec)
-TEXT runtime·nanotime(SB), NOSPLIT, $32
+// int64 nanotime1(void) so really
+// void nanotime1(int64 *nsec)
+TEXT runtime·nanotime1(SB), NOSPLIT, $32
MOVD $CLOCK_MONOTONIC, R0 // arg 1 - clock_id
MOVD $8(RSP), R1 // arg 2 - tp
SVC $SYS___clock_gettime50
MOVL AX, ret+12(FP)
RET
-TEXT runtime·write(SB),NOSPLIT,$-4
+TEXT runtime·write1(SB),NOSPLIT,$-4
MOVL $4, AX // sys_write
INT $0x80
JAE 2(PC)
INT $0x80
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB), NOSPLIT, $32
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB), NOSPLIT, $32
LEAL 12(SP), BX
MOVL $0, 4(SP) // arg 1 - clock_id
MOVL BX, 8(SP) // arg 2 - tp
MOVL BX, nsec+8(FP)
RET
-// int64 nanotime(void) so really
-// void nanotime(int64 *nsec)
-TEXT runtime·nanotime(SB),NOSPLIT,$32
+// int64 nanotime1(void) so really
+// void nanotime1(int64 *nsec)
+TEXT runtime·nanotime1(SB),NOSPLIT,$32
LEAL 12(SP), BX
MOVL CLOCK_MONOTONIC, 4(SP) // arg 1 - clock_id
MOVL BX, 8(SP) // arg 2 - tp
MOVL AX, ret+24(FP)
RET
-TEXT runtime·write(SB),NOSPLIT,$-8
+TEXT runtime·write1(SB),NOSPLIT,$-8
MOVQ fd+0(FP), DI // arg 1 - fd
MOVQ p+8(FP), SI // arg 2 - buf
MOVL n+16(FP), DX // arg 3 - nbyte
SYSCALL
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB), NOSPLIT, $32
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB), NOSPLIT, $32
MOVQ $0, DI // arg 1 - clock_id
LEAQ 8(SP), SI // arg 2 - tp
MOVL $87, AX // sys_clock_gettime
MOVL DX, nsec+8(FP)
RET
-TEXT runtime·nanotime(SB),NOSPLIT,$24
+TEXT runtime·nanotime1(SB),NOSPLIT,$24
MOVQ CLOCK_MONOTONIC, DI // arg 1 - clock_id
LEAQ 8(SP), SI // arg 2 - tp
MOVL $87, AX // sys_clock_gettime
MOVW R0, ret+12(FP)
RET
-TEXT runtime·write(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0
MOVW fd+0(FP), R0 // arg 1 - fd
MOVW p+4(FP), R1 // arg 2 - buf
MOVW n+8(FP), R2 // arg 3 - nbyte
SWI $0
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB), NOSPLIT, $32
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB), NOSPLIT, $32
MOVW CLOCK_REALTIME, R0 // arg 1 - clock_id
MOVW $8(R13), R1 // arg 2 - tp
MOVW $87, R12 // sys_clock_gettime
RET
-// int64 nanotime(void) so really
-// void nanotime(int64 *nsec)
-TEXT runtime·nanotime(SB),NOSPLIT,$32
+// int64 nanotime1(void) so really
+// void nanotime1(int64 *nsec)
+TEXT runtime·nanotime1(SB),NOSPLIT,$32
MOVW CLOCK_MONOTONIC, R0 // arg 1 - clock_id
MOVW $8(R13), R1 // arg 2 - tp
MOVW $87, R12 // sys_clock_gettime
MOVW R0, ret+24(FP)
RET
-TEXT runtime·write(SB),NOSPLIT|NOFRAME,$0
+TEXT runtime·write1(SB),NOSPLIT|NOFRAME,$0
MOVW fd+0(FP), R0 // arg 1 - fd
MOVD p+8(FP), R1 // arg 2 - buf
MOVW n+16(FP), R2 // arg 3 - nbyte
SVC
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB), NOSPLIT, $32
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB), NOSPLIT, $32
MOVW CLOCK_REALTIME, R0 // arg 1 - clock_id
MOVD $8(RSP), R1 // arg 2 - tp
MOVD $87, R8 // sys_clock_gettime
RET
-// int64 nanotime(void) so really
-// void nanotime(int64 *nsec)
-TEXT runtime·nanotime(SB),NOSPLIT,$32
+// int64 nanotime1(void) so really
+// void nanotime1(int64 *nsec)
+TEXT runtime·nanotime1(SB),NOSPLIT,$32
MOVW CLOCK_MONOTONIC, R0 // arg 1 - clock_id
MOVD $8(RSP), R1 // arg 2 - tp
MOVD $87, R8 // sys_clock_gettime
MOVL $-1, ret_hi+8(FP)
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB),NOSPLIT,$8-12
- CALL runtime·nanotime(SB)
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB),NOSPLIT,$8-12
+ CALL runtime·nanotime1(SB)
MOVL 0(SP), AX
MOVL 4(SP), DX
MOVQ AX, ret+8(FP)
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB),NOSPLIT,$8-12
- CALL runtime·nanotime(SB)
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB),NOSPLIT,$8-12
+ CALL runtime·nanotime1(SB)
MOVQ 0(SP), AX
// generated code for
MOVW R0, ret_hi+8(FP)
RET
-// time.now() (sec int64, nsec int32)
-TEXT runtime·walltime(SB),NOSPLIT,$12-12
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB),NOSPLIT,$12-12
// use nsec system call to get current time in nanoseconds
MOVW $sysnsec_lo-8(SP), R0 // destination addr
MOVW R0,res-12(SP)
MOVQ AX, (m_mOS+mOS_perrno)(BX)
RET
-// int64 runtime·nanotime1(void);
+// int64 runtime·nanotime2(void);
//
// clock_gettime(3c) wrapper because Timespec is too large for
// runtime·nanotime stack.
//
// Called using runtime·sysvicall6 from os_solaris.c:/nanotime.
// NOT USING GO CALLING CONVENTION.
-TEXT runtime·nanotime1(SB),NOSPLIT,$0
+TEXT runtime·nanotime2(SB),NOSPLIT,$0
// need space for the timespec argument.
SUBQ $64, SP // 16 bytes will do, but who knows in the future?
MOVQ $3, DI // CLOCK_REALTIME from <sys/time_impl.h>
CALL AX
RET
-// func walltime() (sec int64, nsec int32)
-TEXT runtime·walltime(SB),NOSPLIT,$8-12
- CALL runtime·nanotime(SB)
+// func walltime1() (sec int64, nsec int32)
+TEXT runtime·walltime1(SB),NOSPLIT,$8-12
+ CALL runtime·nanotime1(SB)
MOVQ 0(SP), AX
// generated code for
CallImport
RET
-TEXT ·nanotime(SB), NOSPLIT, $0
+TEXT ·nanotime1(SB), NOSPLIT, $0
CallImport
RET
-TEXT ·walltime(SB), NOSPLIT, $0
+TEXT ·walltime1(SB), NOSPLIT, $0
CallImport
RET
#define time_hi1 4
#define time_hi2 8
-TEXT runtime·nanotime(SB),NOSPLIT,$0-8
+TEXT runtime·nanotime1(SB),NOSPLIT,$0-8
CMPB runtime·useQPCTime(SB), $0
JNE useQPC
loop:
#define time_hi1 4
#define time_hi2 8
-TEXT runtime·nanotime(SB),NOSPLIT,$0-8
+TEXT runtime·nanotime1(SB),NOSPLIT,$0-8
CMPB runtime·useQPCTime(SB), $0
JNE useQPC
MOVQ $_INTERRUPT_TIME, DI
#define time_hi1 4
#define time_hi2 8
-TEXT runtime·nanotime(SB),NOSPLIT,$0-8
+TEXT runtime·nanotime1(SB),NOSPLIT,$0-8
MOVW $0, R0
MOVB runtime·useQPCTime(SB), R0
CMP $0, R0
--- /dev/null
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !nacl
+
+package runtime
+
+import "unsafe"
+
+//go:nosplit
+func nanotime() int64 {
+ return nanotime1()
+}
+
+func walltime() (sec int64, nsec int32) {
+ return walltime1()
+}
+
+// write must be nosplit on Windows (see write1)
+//
+//go:nosplit
+func write(fd uintptr, p unsafe.Pointer, n int32) int32 {
+ return write1(fd, p, n)
+}
package runtime
-func walltime() (sec int64, nsec int32)
+func walltime1() (sec int64, nsec int32)
func fallback_walltime() (sec int64, nsec int32)
//go:nosplit
-func nanotime() int64 {
+func nanotime1() int64 {
bt := vdsoClockGettime(_CLOCK_MONOTONIC)
if bt == zeroBintime {
return fallback_nanotime()
return int64((1e9 * uint64(bt.sec)) + ((1e9 * uint64(bt.frac>>32)) >> 32))
}
-func walltime() (sec int64, nsec int32) {
+func walltime1() (sec int64, nsec int32) {
bt := vdsoClockGettime(_CLOCK_REALTIME)
if bt == zeroBintime {
return fallback_walltime()