MOVL $0, err+36(FP)
RET
+// func rawVforkSyscall(trap, a1 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-16
+ MOVL trap+0(FP), AX // syscall entry
+ MOVL a1+4(FP), BX
+ MOVL $0, CX
+ MOVL $0, DX
+ POPL SI // preserve return address
+ INVOKE_SYSCALL
+ PUSHL SI
+ CMPL AX, $0xfffff001
+ JLS ok
+ MOVL $-1, r1+8(FP)
+ NEGL AX
+ MOVL AX, err+12(FP)
+ RET
+ok:
+ MOVL AX, r1+8(FP)
+ MOVL $0, err+12(FP)
+ RET
+
// func rawSyscallNoError(trap uintptr, a1, a2, a3 uintptr) (r1, r2 uintptr);
TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-24
MOVL trap+0(FP), AX // syscall entry
RET
// func rawVforkSyscall(trap, a1 uintptr) (r1, err uintptr)
-TEXT ·rawVforkSyscall(SB),NOSPLIT,$0-32
+TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-32
MOVQ a1+8(FP), DI
MOVQ $0, SI
MOVQ $0, DX
MOVW R0, err+24(FP)
RET
+// func rawVforkSyscall(trap, a1 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-16
+ MOVW trap+0(FP), R7 // syscall entry
+ MOVW a1+4(FP), R0
+ MOVW $0, R1
+ MOVW $0, R2
+ SWI $0
+ MOVW $0xfffff001, R1
+ CMP R1, R0
+ BLS ok
+ MOVW $-1, R1
+ MOVW R1, r1+8(FP)
+ RSB $0, R0, R0
+ MOVW R0, err+12(FP)
+ RET
+ok:
+ MOVW R0, r1+8(FP)
+ MOVW $0, R0
+ MOVW R0, err+12(FP)
+ RET
+
// func rawSyscallNoError(trap uintptr, a1, a2, a3 uintptr) (r1, r2 uintptr);
TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-24
MOVW trap+0(FP), R7 // syscall entry
MOVD ZR, err+24(FP) // errno
RET
-
// func rawSyscallNoError(trap uintptr, a1, a2, a3 uintptr) (r1, r2 uintptr);
TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48
MOVD a1+8(FP), R0
MOVV R0, err+72(FP) // errno
RET
+// func rawVforkSyscall(trap, a1 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-32
+ MOVV a1+8(FP), R4
+ MOVV R0, R5
+ MOVV R0, R6
+ MOVV R0, R7
+ MOVV R0, R8
+ MOVV R0, R9
+ MOVV trap+0(FP), R2 // syscall entry
+ SYSCALL
+ BEQ R7, ok
+ MOVV $-1, R1
+ MOVV R1, r1+16(FP) // r1
+ MOVV R2, err+24(FP) // errno
+ RET
+ok:
+ MOVV R2, r1+16(FP) // r1
+ MOVV R0, err+24(FP) // errno
+ RET
+
TEXT ·rawSyscallNoError(SB),NOSPLIT,$0-48
MOVV a1+8(FP), R4
MOVV a2+16(FP), R5
MOVW R0, err+36(FP) // errno
RET
+// func rawVforkSyscall(trap, a1 uintptr) (r1, err uintptr)
+TEXT ·rawVforkSyscall(SB),NOSPLIT|NOFRAME,$0-16
+ MOVW a1+4(FP), R4
+ MOVW R0, R5
+ MOVW R0, R6
+ MOVW trap+0(FP), R2 // syscall entry
+ SYSCALL
+ BEQ R7, ok
+ MOVW $-1, R1
+ MOVW R1, r1+8(FP) // r1
+ MOVW R2, err+12(FP) // errno
+ RET
+ok:
+ MOVW R2, r1+8(FP) // r1
+ MOVW R0, err+12(FP) // errno
+ RET
+
TEXT ·rawSyscallNoError(SB),NOSPLIT,$20-24
MOVW a1+4(FP), R4
MOVW a2+8(FP), R5
}
}
- var hasRawVforkSyscall bool
- switch runtime.GOARCH {
- case "amd64", "arm64", "ppc64", "riscv64", "s390x":
- hasRawVforkSyscall = true
- }
-
// About to call fork.
// No more allocation or calls of non-assembly functions.
runtime_BeforeFork()
locked = true
switch {
- case hasRawVforkSyscall && (sys.Cloneflags&CLONE_NEWUSER == 0 && sys.Unshareflags&CLONE_NEWUSER == 0):
+ case sys.Cloneflags&CLONE_NEWUSER == 0 && sys.Unshareflags&CLONE_NEWUSER == 0:
r1, err1 = rawVforkSyscall(SYS_CLONE, uintptr(SIGCHLD|CLONE_VFORK|CLONE_VM)|sys.Cloneflags)
case runtime.GOARCH == "s390x":
r1, _, err1 = RawSyscall6(SYS_CLONE, 0, uintptr(SIGCHLD)|sys.Cloneflags, 0, 0, 0, 0)
cmsg.Len = uint32(length)
}
-func rawVforkSyscall(trap, a1 uintptr) (r1 uintptr, err Errno) {
- panic("not implemented")
-}
+func rawVforkSyscall(trap, a1 uintptr) (r1 uintptr, err Errno)
cmsg.Len = uint32(length)
}
-func rawVforkSyscall(trap, a1 uintptr) (r1 uintptr, err Errno) {
- panic("not implemented")
-}
+func rawVforkSyscall(trap, a1 uintptr) (r1 uintptr, err Errno)
cmsg.Len = uint64(length)
}
-func rawVforkSyscall(trap, a1 uintptr) (r1 uintptr, err Errno) {
- panic("not implemented")
-}
+func rawVforkSyscall(trap, a1 uintptr) (r1 uintptr, err Errno)
cmsg.Len = uint32(length)
}
-func rawVforkSyscall(trap, a1 uintptr) (r1 uintptr, err Errno) {
- panic("not implemented")
-}
+func rawVforkSyscall(trap, a1 uintptr) (r1 uintptr, err Errno)