]> Cypherpunks repositories - gostls13.git/commitdiff
httputil: move dump and chunking functions out of http
authorBrad Fitzpatrick <bradfitz@golang.org>
Fri, 4 Nov 2011 01:12:51 +0000 (18:12 -0700)
committerBrad Fitzpatrick <bradfitz@golang.org>
Fri, 4 Nov 2011 01:12:51 +0000 (18:12 -0700)
This moves DumpRequest, DumpResponse, NewChunkedReader,
and NewChunkedWriter out of http, as part of the continued
http diet plan.

Also, adds DumpRequestOut (for dumping outbound requests),
since DumpRequest's ambiguity (the "wire representation" in
what direction?) was often a source of confusion and bug
reports.

R=rsc, adg
CC=golang-dev
https://golang.org/cl/5339041

src/pkg/net/http/Makefile
src/pkg/net/http/chunked.go
src/pkg/net/http/dump.go [deleted file]
src/pkg/net/http/httputil/Makefile
src/pkg/net/http/httputil/chunked.go [new file with mode: 0644]
src/pkg/net/http/httputil/chunked_test.go [new file with mode: 0644]
src/pkg/net/http/httputil/dump.go [new file with mode: 0644]
src/pkg/net/http/httputil/dump_test.go [new file with mode: 0644]
src/pkg/net/http/request.go
src/pkg/net/http/requestwrite_test.go
src/pkg/net/http/transfer.go

index e94f4ccf438e343531ca2b6c203df44a8783ddf7..13705e73810a118615f717a376b2592adb647685 100644 (file)
@@ -9,7 +9,6 @@ GOFILES=\
        chunked.go\
        client.go\
        cookie.go\
-       dump.go\
        filetransport.go\
        fs.go\
        header.go\
index 76beb15c3485d7e568d08074499b1d27ad17913b..b012dd18496c8e0f0d5dabccce865bf3d21a63fb 100644 (file)
@@ -7,23 +7,10 @@ package http
 import (
        "bufio"
        "io"
-       "log"
        "strconv"
 )
 
-// NewChunkedWriter returns a new writer that translates writes into HTTP
-// "chunked" format before writing them to w. Closing the returned writer
-// sends the final 0-length chunk that marks the end of the stream.
-//
-// NewChunkedWriter is not needed by normal applications. The http
-// package adds chunking automatically if handlers don't set a
-// Content-Length header. Using NewChunkedWriter inside a handler
-// would result in double chunking or chunking with a Content-Length
-// length, both of which are wrong.
-func NewChunkedWriter(w io.Writer) io.WriteCloser {
-       if _, bad := w.(*response); bad {
-               log.Printf("warning: using NewChunkedWriter in an http.Handler; expect corrupt output")
-       }
+func newChunkedWriter(w io.Writer) io.WriteCloser {
        return &chunkedWriter{w}
 }
 
@@ -65,12 +52,6 @@ func (cw *chunkedWriter) Close() error {
        return err
 }
 
-// NewChunkedReader returns a new reader that translates the data read from r
-// out of HTTP "chunked" format before returning it. 
-// The reader returns io.EOF when the final 0-length chunk is read.
-//
-// NewChunkedReader is not needed by normal applications. The http package
-// automatically decodes chunking when reading response bodies.
-func NewChunkedReader(r *bufio.Reader) io.Reader {
+func newChunkedReader(r *bufio.Reader) io.Reader {
        return &chunkedReader{r: r}
 }
diff --git a/src/pkg/net/http/dump.go b/src/pkg/net/http/dump.go
deleted file mode 100644 (file)
index b85feea..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http
-
-import (
-       "bytes"
-       "io"
-       "io/ioutil"
-)
-
-// One of the copies, say from b to r2, could be avoided by using a more
-// elaborate trick where the other copy is made during Request/Response.Write.
-// This would complicate things too much, given that these functions are for
-// debugging only.
-func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) {
-       var buf bytes.Buffer
-       if _, err = buf.ReadFrom(b); err != nil {
-               return nil, nil, err
-       }
-       if err = b.Close(); err != nil {
-               return nil, nil, err
-       }
-       return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())), nil
-}
-
-// DumpRequest returns the wire representation of req,
-// optionally including the request body, for debugging.
-// DumpRequest is semantically a no-op, but in order to
-// dump the body, it reads the body data into memory and
-// changes req.Body to refer to the in-memory copy.
-// The documentation for Request.Write details which fields
-// of req are used.
-func DumpRequest(req *Request, body bool) (dump []byte, err error) {
-       var b bytes.Buffer
-       save := req.Body
-       if !body || req.Body == nil {
-               req.Body = nil
-       } else {
-               save, req.Body, err = drainBody(req.Body)
-               if err != nil {
-                       return
-               }
-       }
-       err = req.dumpWrite(&b)
-       req.Body = save
-       if err != nil {
-               return
-       }
-       dump = b.Bytes()
-       return
-}
-
-// DumpResponse is like DumpRequest but dumps a response.
-func DumpResponse(resp *Response, body bool) (dump []byte, err error) {
-       var b bytes.Buffer
-       save := resp.Body
-       savecl := resp.ContentLength
-       if !body || resp.Body == nil {
-               resp.Body = nil
-               resp.ContentLength = 0
-       } else {
-               save, resp.Body, err = drainBody(resp.Body)
-               if err != nil {
-                       return
-               }
-       }
-       err = resp.Write(&b)
-       resp.Body = save
-       resp.ContentLength = savecl
-       if err != nil {
-               return
-       }
-       dump = b.Bytes()
-       return
-}
index 799b31359da18a9b0a883059c1f81d3ef0396b01..8bfc7a022be2113b0e09d40e68c79a8e67604346 100644 (file)
@@ -6,6 +6,8 @@ include ../../../../Make.inc
 
 TARG=net/http/httputil
 GOFILES=\
+       chunked.go\
+       dump.go\
        persist.go\
        reverseproxy.go\
 
diff --git a/src/pkg/net/http/httputil/chunked.go b/src/pkg/net/http/httputil/chunked.go
new file mode 100644 (file)
index 0000000..8286692
--- /dev/null
@@ -0,0 +1,84 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httputil
+
+import (
+       "bufio"
+       "http"
+       "io"
+       "strconv"
+       "strings"
+)
+
+// NewChunkedWriter returns a new writer that translates writes into HTTP
+// "chunked" format before writing them to w. Closing the returned writer
+// sends the final 0-length chunk that marks the end of the stream.
+//
+// NewChunkedWriter is not needed by normal applications. The http
+// package adds chunking automatically if handlers don't set a
+// Content-Length header. Using NewChunkedWriter inside a handler
+// would result in double chunking or chunking with a Content-Length
+// length, both of which are wrong.
+func NewChunkedWriter(w io.Writer) io.WriteCloser {
+       return &chunkedWriter{w}
+}
+
+// Writing to ChunkedWriter translates to writing in HTTP chunked Transfer
+// Encoding wire format to the underlying Wire writer.
+type chunkedWriter struct {
+       Wire io.Writer
+}
+
+// Write the contents of data as one chunk to Wire.
+// NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has
+// a bug since it does not check for success of io.WriteString
+func (cw *chunkedWriter) Write(data []byte) (n int, err error) {
+
+       // Don't send 0-length data. It looks like EOF for chunked encoding.
+       if len(data) == 0 {
+               return 0, nil
+       }
+
+       head := strconv.Itob(len(data), 16) + "\r\n"
+
+       if _, err = io.WriteString(cw.Wire, head); err != nil {
+               return 0, err
+       }
+       if n, err = cw.Wire.Write(data); err != nil {
+               return
+       }
+       if n != len(data) {
+               err = io.ErrShortWrite
+               return
+       }
+       _, err = io.WriteString(cw.Wire, "\r\n")
+
+       return
+}
+
+func (cw *chunkedWriter) Close() error {
+       _, err := io.WriteString(cw.Wire, "0\r\n")
+       return err
+}
+
+// NewChunkedReader returns a new reader that translates the data read from r
+// out of HTTP "chunked" format before returning it. 
+// The reader returns io.EOF when the final 0-length chunk is read.
+//
+// NewChunkedReader is not needed by normal applications. The http package
+// automatically decodes chunking when reading response bodies.
+func NewChunkedReader(r io.Reader) io.Reader {
+       // This is a bit of a hack so we don't have to copy chunkedReader into
+       // httputil.  It's a bit more complex than chunkedWriter, which is copied
+       // above.
+       req, err := http.ReadRequest(bufio.NewReader(io.MultiReader(
+               strings.NewReader("POST / HTTP/1.1\r\nTransfer-Encoding: chunked\r\n\r\n"),
+               r,
+               strings.NewReader("\r\n"))))
+       if err != nil {
+               panic("bad fake request: " + err.Error())
+       }
+       return req.Body
+}
diff --git a/src/pkg/net/http/httputil/chunked_test.go b/src/pkg/net/http/httputil/chunked_test.go
new file mode 100644 (file)
index 0000000..258d39b
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httputil
+
+import (
+       "bytes"
+       "io/ioutil"
+       "testing"
+)
+
+func TestChunk(t *testing.T) {
+       var b bytes.Buffer
+
+       w := NewChunkedWriter(&b)
+       const chunk1 = "hello, "
+       const chunk2 = "world! 0123456789abcdef"
+       w.Write([]byte(chunk1))
+       w.Write([]byte(chunk2))
+       w.Close()
+
+       if g, e := b.String(), "7\r\nhello, \r\n17\r\nworld! 0123456789abcdef\r\n0\r\n"; g != e {
+               t.Fatalf("chunk writer wrote %q; want %q", g, e)
+       }
+
+       r := NewChunkedReader(&b)
+       data, err := ioutil.ReadAll(r)
+       if err != nil {
+               t.Fatalf("ReadAll from NewChunkedReader: %v", err)
+       }
+       if g, e := string(data), chunk1+chunk2; g != e {
+               t.Errorf("chunk reader read %q; want %q", g, e)
+       }
+}
diff --git a/src/pkg/net/http/httputil/dump.go b/src/pkg/net/http/httputil/dump.go
new file mode 100644 (file)
index 0000000..5b861b7
--- /dev/null
@@ -0,0 +1,203 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httputil
+
+import (
+       "bytes"
+       "errors"
+       "fmt"
+       "http"
+       "io"
+       "io/ioutil"
+       "net"
+       "strings"
+)
+
+// One of the copies, say from b to r2, could be avoided by using a more
+// elaborate trick where the other copy is made during Request/Response.Write.
+// This would complicate things too much, given that these functions are for
+// debugging only.
+func drainBody(b io.ReadCloser) (r1, r2 io.ReadCloser, err error) {
+       var buf bytes.Buffer
+       if _, err = buf.ReadFrom(b); err != nil {
+               return nil, nil, err
+       }
+       if err = b.Close(); err != nil {
+               return nil, nil, err
+       }
+       return ioutil.NopCloser(&buf), ioutil.NopCloser(bytes.NewBuffer(buf.Bytes())), nil
+}
+
+// dumpConn is a net.Conn which writes to Writer and reads from Reader
+type dumpConn struct {
+       io.Writer
+       io.Reader
+}
+
+func (c *dumpConn) Close() error                     { return nil }
+func (c *dumpConn) LocalAddr() net.Addr              { return nil }
+func (c *dumpConn) RemoteAddr() net.Addr             { return nil }
+func (c *dumpConn) SetTimeout(nsec int64) error      { return nil }
+func (c *dumpConn) SetReadTimeout(nsec int64) error  { return nil }
+func (c *dumpConn) SetWriteTimeout(nsec int64) error { return nil }
+
+// DumpRequestOut is like DumpRequest but includes
+// headers that the standard http.Transport adds,
+// such as User-Agent.
+func DumpRequestOut(req *http.Request, body bool) (dump []byte, err error) {
+       save := req.Body
+       if !body || req.Body == nil {
+               req.Body = nil
+       } else {
+               save, req.Body, err = drainBody(req.Body)
+               if err != nil {
+                       return
+               }
+       }
+
+       var b bytes.Buffer
+       dialed := false
+       t := &http.Transport{
+               Dial: func(net, addr string) (c net.Conn, err error) {
+                       if dialed {
+                               return nil, errors.New("unexpected second dial")
+                       }
+                       c = &dumpConn{
+                               Writer: &b,
+                               Reader: strings.NewReader("HTTP/1.1 500 Fake Error\r\n\r\n"),
+                       }
+                       return
+               },
+       }
+
+       _, err = t.RoundTrip(req)
+
+       req.Body = save
+       if err != nil {
+               return
+       }
+       dump = b.Bytes()
+       return
+}
+
+// Return value if nonempty, def otherwise.
+func valueOrDefault(value, def string) string {
+       if value != "" {
+               return value
+       }
+       return def
+}
+
+var reqWriteExcludeHeaderDump = map[string]bool{
+       "Host":              true, // not in Header map anyway
+       "Content-Length":    true,
+       "Transfer-Encoding": true,
+       "Trailer":           true,
+}
+
+// dumpAsReceived writes req to w in the form as it was received, or
+// at least as accurately as possible from the information retained in
+// the request.
+func dumpAsReceived(req *http.Request, w io.Writer) error {
+       return nil
+}
+
+// DumpRequest returns the as-received wire representation of req,
+// optionally including the request body, for debugging.
+// DumpRequest is semantically a no-op, but in order to
+// dump the body, it reads the body data into memory and
+// changes req.Body to refer to the in-memory copy.
+// The documentation for http.Request.Write details which fields
+// of req are used.
+func DumpRequest(req *http.Request, body bool) (dump []byte, err error) {
+       save := req.Body
+       if !body || req.Body == nil {
+               req.Body = nil
+       } else {
+               save, req.Body, err = drainBody(req.Body)
+               if err != nil {
+                       return
+               }
+       }
+
+       var b bytes.Buffer
+
+       urlStr := req.URL.Raw
+       if urlStr == "" {
+               urlStr = valueOrDefault(req.URL.EncodedPath(), "/")
+               if req.URL.RawQuery != "" {
+                       urlStr += "?" + req.URL.RawQuery
+               }
+       }
+
+       fmt.Fprintf(&b, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"), urlStr,
+               req.ProtoMajor, req.ProtoMinor)
+
+       host := req.Host
+       if host == "" && req.URL != nil {
+               host = req.URL.Host
+       }
+       if host != "" {
+               fmt.Fprintf(&b, "Host: %s\r\n", host)
+       }
+
+       chunked := len(req.TransferEncoding) > 0 && req.TransferEncoding[0] == "chunked"
+       if len(req.TransferEncoding) > 0 {
+               fmt.Fprintf(&b, "Transfer-Encoding: %s\r\n", strings.Join(req.TransferEncoding, ","))
+       }
+       if req.Close {
+               fmt.Fprintf(&b, "Connection: close\r\n")
+       }
+
+       err = req.Header.WriteSubset(&b, reqWriteExcludeHeaderDump)
+       if err != nil {
+               return
+       }
+
+       io.WriteString(&b, "\r\n")
+
+       if req.Body != nil {
+               var dest io.Writer = &b
+               if chunked {
+                       dest = NewChunkedWriter(dest)
+               }
+               _, err = io.Copy(dest, req.Body)
+               if chunked {
+                       dest.(io.Closer).Close()
+                       io.WriteString(&b, "\r\n")
+               }
+       }
+
+       req.Body = save
+       if err != nil {
+               return
+       }
+       dump = b.Bytes()
+       return
+}
+
+// DumpResponse is like DumpRequest but dumps a response.
+func DumpResponse(resp *http.Response, body bool) (dump []byte, err error) {
+       var b bytes.Buffer
+       save := resp.Body
+       savecl := resp.ContentLength
+       if !body || resp.Body == nil {
+               resp.Body = nil
+               resp.ContentLength = 0
+       } else {
+               save, resp.Body, err = drainBody(resp.Body)
+               if err != nil {
+                       return
+               }
+       }
+       err = resp.Write(&b)
+       resp.Body = save
+       resp.ContentLength = savecl
+       if err != nil {
+               return
+       }
+       dump = b.Bytes()
+       return
+}
diff --git a/src/pkg/net/http/httputil/dump_test.go b/src/pkg/net/http/httputil/dump_test.go
new file mode 100644 (file)
index 0000000..b9856ce
--- /dev/null
@@ -0,0 +1,140 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httputil
+
+import (
+       "bytes"
+       "fmt"
+       "http"
+       "io"
+       "io/ioutil"
+       "testing"
+       "url"
+)
+
+type dumpTest struct {
+       Req  http.Request
+       Body interface{} // optional []byte or func() io.ReadCloser to populate Req.Body
+
+       WantDump    string
+       WantDumpOut string
+}
+
+var dumpTests = []dumpTest{
+
+       // HTTP/1.1 => chunked coding; body; empty trailer
+       {
+               Req: http.Request{
+                       Method: "GET",
+                       URL: &url.URL{
+                               Scheme: "http",
+                               Host:   "www.google.com",
+                               Path:   "/search",
+                       },
+                       ProtoMajor:       1,
+                       ProtoMinor:       1,
+                       TransferEncoding: []string{"chunked"},
+               },
+
+               Body: []byte("abcdef"),
+
+               WantDump: "GET /search HTTP/1.1\r\n" +
+                       "Host: www.google.com\r\n" +
+                       "Transfer-Encoding: chunked\r\n\r\n" +
+                       chunk("abcdef") + chunk(""),
+       },
+
+       // Verify that DumpRequest preserves the HTTP version number, doesn't add a Host,
+       // and doesn't add a User-Agent.
+       {
+               Req: http.Request{
+                       Method:     "GET",
+                       URL:        mustParseURL("/foo"),
+                       ProtoMajor: 1,
+                       ProtoMinor: 0,
+                       Header: http.Header{
+                               "X-Foo": []string{"X-Bar"},
+                       },
+               },
+
+               WantDump: "GET /foo HTTP/1.0\r\n" +
+                       "X-Foo: X-Bar\r\n\r\n",
+       },
+
+       {
+               Req: *mustNewRequest("GET", "http://example.com/foo", nil),
+
+               WantDumpOut: "GET /foo HTTP/1.1\r\n" +
+                       "Host: example.com\r\n" +
+                       "User-Agent: Go http package\r\n" +
+                       "Accept-Encoding: gzip\r\n\r\n",
+       },
+}
+
+func TestDumpRequest(t *testing.T) {
+       for i, tt := range dumpTests {
+               setBody := func() {
+                       if tt.Body == nil {
+                               return
+                       }
+                       switch b := tt.Body.(type) {
+                       case []byte:
+                               tt.Req.Body = ioutil.NopCloser(bytes.NewBuffer(b))
+                       case func() io.ReadCloser:
+                               tt.Req.Body = b()
+                       }
+               }
+               setBody()
+               if tt.Req.Header == nil {
+                       tt.Req.Header = make(http.Header)
+               }
+
+               if tt.WantDump != "" {
+                       setBody()
+                       dump, err := DumpRequest(&tt.Req, true)
+                       if err != nil {
+                               t.Errorf("DumpRequest #%d: %s", i, err)
+                               continue
+                       }
+                       if string(dump) != tt.WantDump {
+                               t.Errorf("DumpRequest %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDump, string(dump))
+                               continue
+                       }
+               }
+
+               if tt.WantDumpOut != "" {
+                       setBody()
+                       dump, err := DumpRequestOut(&tt.Req, true)
+                       if err != nil {
+                               t.Errorf("DumpRequestOut #%d: %s", i, err)
+                               continue
+                       }
+                       if string(dump) != tt.WantDumpOut {
+                               t.Errorf("DumpRequestOut %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDumpOut, string(dump))
+                               continue
+                       }
+               }
+       }
+}
+
+func chunk(s string) string {
+       return fmt.Sprintf("%x\r\n%s\r\n", len(s), s)
+}
+
+func mustParseURL(s string) *url.URL {
+       u, err := url.Parse(s)
+       if err != nil {
+               panic(fmt.Sprintf("Error parsing URL %q: %v", s, err))
+       }
+       return u
+}
+
+func mustNewRequest(method, url string, body io.Reader) *http.Request {
+       req, err := http.NewRequest(method, url, body)
+       if err != nil {
+               panic(fmt.Sprintf("NewRequest(%q, %q, %p) err = %v", method, url, body, err))
+       }
+       return req
+}
index 7a62dcede450d9f04bcd512da75a70aa22782a1f..30e3b7ed18e3e4fb0bc9616c029ea8fc945cf528 100644 (file)
@@ -69,13 +69,6 @@ var reqWriteExcludeHeader = map[string]bool{
        "Trailer":           true,
 }
 
-var reqWriteExcludeHeaderDump = map[string]bool{
-       "Host":              true, // not in Header map anyway
-       "Content-Length":    true,
-       "Transfer-Encoding": true,
-       "Trailer":           true,
-}
-
 // A Request represents a parsed HTTP request header.
 type Request struct {
        Method string // GET, POST, PUT, etc.
@@ -286,51 +279,6 @@ func (req *Request) WriteProxy(w io.Writer) error {
        return req.write(w, true, nil)
 }
 
-func (req *Request) dumpWrite(w io.Writer) error {
-       // TODO(bradfitz): RawPath here?
-       urlStr := valueOrDefault(req.URL.EncodedPath(), "/")
-       if req.URL.RawQuery != "" {
-               urlStr += "?" + req.URL.RawQuery
-       }
-
-       bw := bufio.NewWriter(w)
-       fmt.Fprintf(bw, "%s %s HTTP/%d.%d\r\n", valueOrDefault(req.Method, "GET"), urlStr,
-               req.ProtoMajor, req.ProtoMinor)
-
-       host := req.Host
-       if host == "" && req.URL != nil {
-               host = req.URL.Host
-       }
-       if host != "" {
-               fmt.Fprintf(bw, "Host: %s\r\n", host)
-       }
-
-       // Process Body,ContentLength,Close,Trailer
-       tw, err := newTransferWriter(req)
-       if err != nil {
-               return err
-       }
-       err = tw.WriteHeader(bw)
-       if err != nil {
-               return err
-       }
-
-       err = req.Header.WriteSubset(bw, reqWriteExcludeHeaderDump)
-       if err != nil {
-               return err
-       }
-
-       io.WriteString(bw, "\r\n")
-
-       // Write body and trailer
-       err = tw.WriteBody(bw)
-       if err != nil {
-               return err
-       }
-       bw.Flush()
-       return nil
-}
-
 // extraHeaders may be nil
 func (req *Request) write(w io.Writer, usingProxy bool, extraHeaders Header) error {
        host := req.Host
index 16593e987aa542898b5d4ceabcad50233b43064f..3da8ad719b80526d215d3372912f70fb3aac20d6 100644 (file)
@@ -22,7 +22,6 @@ type reqWriteTest struct {
        // Any of these three may be empty to skip that test.
        WantWrite string // Request.Write
        WantProxy string // Request.WriteProxy
-       WantDump  string // DumpRequest
 
        WantError error // wanted error from Request.Write
 }
@@ -109,11 +108,6 @@ var reqWriteTests = []reqWriteTest{
                        "User-Agent: Go http package\r\n" +
                        "Transfer-Encoding: chunked\r\n\r\n" +
                        chunk("abcdef") + chunk(""),
-
-               WantDump: "GET /search HTTP/1.1\r\n" +
-                       "Host: www.google.com\r\n" +
-                       "Transfer-Encoding: chunked\r\n\r\n" +
-                       chunk("abcdef") + chunk(""),
        },
        // HTTP/1.1 POST => chunked coding; body; empty trailer
        {
@@ -335,13 +329,6 @@ var reqWriteTests = []reqWriteTest{
                        },
                },
 
-               // We can dump it:
-               WantDump: "GET /foo HTTP/1.0\r\n" +
-                       "X-Foo: X-Bar\r\n\r\n",
-
-               // .. but we can't call Request.Write on it, due to its lack of Host header.
-               // TODO(bradfitz): there might be an argument to allow this, but for now I'd
-               // rather let HTTP/1.0 continue to die.
                WantWrite: "GET /foo HTTP/1.1\r\n" +
                        "Host: \r\n" +
                        "User-Agent: Go http package\r\n" +
@@ -401,19 +388,6 @@ func TestRequestWrite(t *testing.T) {
                                continue
                        }
                }
-
-               if tt.WantDump != "" {
-                       setBody()
-                       dump, err := DumpRequest(&tt.Req, true)
-                       if err != nil {
-                               t.Errorf("DumpRequest #%d: %s", i, err)
-                               continue
-                       }
-                       if string(dump) != tt.WantDump {
-                               t.Errorf("DumpRequest %d, expecting:\n%s\nGot:\n%s\n", i, tt.WantDump, string(dump))
-                               continue
-                       }
-               }
        }
 }
 
index 94772183f0094efa8546cfba4fd290965575a554..4c23de33f94f68207724dc95db2fd52e46f5843a 100644 (file)
@@ -187,7 +187,7 @@ func (t *transferWriter) WriteBody(w io.Writer) (err error) {
        // Write body
        if t.Body != nil {
                if chunked(t.TransferEncoding) {
-                       cw := NewChunkedWriter(w)
+                       cw := newChunkedWriter(w)
                        _, err = io.Copy(cw, t.Body)
                        if err == nil {
                                err = cw.Close()
@@ -319,7 +319,7 @@ func readTransfer(msg interface{}, r *bufio.Reader) (err error) {
        // or close connection when finished, since multipart is not supported yet
        switch {
        case chunked(t.TransferEncoding):
-               t.Body = &body{Reader: NewChunkedReader(r), hdr: msg, r: r, closing: t.Close}
+               t.Body = &body{Reader: newChunkedReader(r), hdr: msg, r: r, closing: t.Close}
        case t.ContentLength >= 0:
                // TODO: limit the Content-Length. This is an easy DoS vector.
                t.Body = &body{Reader: io.LimitReader(r, t.ContentLength), closing: t.Close}