)
const (
- // spliceNonblock makes calls to splice(2) non-blocking.
- spliceNonblock = 0x2
-
// maxSpliceSize is the maximum amount of data Splice asks
// the kernel to move in a single call to splice(2).
// We use 1MB as Splice writes data through a pipe, and 1MB is the default maximum pipe buffer size,
return 0, err
}
for {
- n, err := splice(pipefd, sock.Sysfd, max, spliceNonblock)
+ n, err := splice(pipefd, sock.Sysfd, max, 0)
if err == syscall.EINTR {
continue
}
if err != syscall.EAGAIN {
return n, err
}
- if err := sock.pd.waitRead(sock.isFile); err != nil {
- return n, err
+ if sock.pd.pollable() {
+ if err := sock.pd.waitRead(sock.isFile); err != nil {
+ return n, err
+ }
}
}
}
}
written := 0
for inPipe > 0 {
- n, err := splice(sock.Sysfd, pipefd, inPipe, spliceNonblock)
+ n, err := splice(sock.Sysfd, pipefd, inPipe, 0)
// Here, the condition n == 0 && err == nil should never be
// observed, since Splice controls the write side of the pipe.
if n > 0 {
if err != syscall.EAGAIN {
return written, err
}
- if err := sock.pd.waitWrite(sock.isFile); err != nil {
- return written, err
+ if sock.pd.pollable() {
+ if err := sock.pd.waitWrite(sock.isFile); err != nil {
+ return written, err
+ }
}
}
return written, nil
import (
"bytes"
+ "errors"
"internal/poll"
+ "internal/testpty"
"io"
"math/rand"
"net"
"runtime"
"strconv"
"strings"
+ "sync"
"syscall"
"testing"
"time"
})
}
})
+ t.Run("TCP-To-TTY", func(t *testing.T) {
+ testSpliceToTTY(t, "tcp", 32768)
+ })
+ t.Run("Unix-To-TTY", func(t *testing.T) {
+ testSpliceToTTY(t, "unix", 32768)
+ })
t.Run("Limited", func(t *testing.T) {
t.Run("OneLess-TCP", func(t *testing.T) {
for _, size := range sizes {
}
}
+// Issue #59041.
+func testSpliceToTTY(t *testing.T, proto string, size int64) {
+ var wg sync.WaitGroup
+
+ // Call wg.Wait as the final deferred function,
+ // because the goroutines may block until some of
+ // the deferred Close calls.
+ defer wg.Wait()
+
+ pty, ttyName, err := testpty.Open()
+ if err != nil {
+ t.Skipf("skipping test because pty open failed: %v", err)
+ }
+ defer pty.Close()
+
+ // Open the tty directly, rather than via OpenFile.
+ // This bypasses the non-blocking support and is required
+ // to recreate the problem in the issue (#59041).
+ ttyFD, err := syscall.Open(ttyName, syscall.O_RDWR, 0)
+ if err != nil {
+ t.Skipf("skipping test becaused failed to open tty: %v", err)
+ }
+ defer syscall.Close(ttyFD)
+
+ tty := NewFile(uintptr(ttyFD), "tty")
+ defer tty.Close()
+
+ client, server := createSocketPair(t, proto)
+
+ data := bytes.Repeat([]byte{'a'}, int(size))
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ // The problem (issue #59041) occurs when writing
+ // a series of blocks of data. It does not occur
+ // when all the data is written at once.
+ for i := 0; i < len(data); i += 1024 {
+ if _, err := client.Write(data[i : i+1024]); err != nil {
+ // If we get here because the client was
+ // closed, skip the error.
+ if !errors.Is(err, net.ErrClosed) {
+ t.Errorf("error writing to socket: %v", err)
+ }
+ return
+ }
+ }
+ client.Close()
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ buf := make([]byte, 32)
+ for {
+ if _, err := pty.Read(buf); err != nil {
+ if err != io.EOF && !errors.Is(err, ErrClosed) {
+ // An error here doesn't matter for
+ // our test.
+ t.Logf("error reading from pty: %v", err)
+ }
+ return
+ }
+ }
+ }()
+
+ // Close Client to wake up the writing goroutine if necessary.
+ defer client.Close()
+
+ _, err = io.Copy(tty, server)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
func testCopyFileRange(t *testing.T, size int64, limit int64) {
dst, src, data, hook := newCopyFileRangeTest(t, size)