import (
"bufio"
"compress/gzip"
+ "container/list"
"context"
"crypto/tls"
"errors"
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
},
+ MaxIdleConns: 100,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
idleCount int
idleConn map[connectMethodKey][]*persistConn
idleConnCh map[connectMethodKey]chan *persistConn
+ idleLRU connLRU
reqMu sync.Mutex
reqCanceler map[*Request]func()
// uncompressed.
DisableCompression bool
+ // MaxIdleConns controls the maximum number of idle (keep-alive)
+ // connections across all hosts. Zero means no limit.
+ MaxIdleConns int
+
// MaxIdleConnsPerHost, if non-zero, controls the maximum idle
// (keep-alive) connections to keep per-host. If zero,
// DefaultMaxIdleConnsPerHost is used.
t.idleConn = nil
t.idleConnCh = nil
t.wantIdle = true
+ t.idleCount = 0
+ t.idleLRU = connLRU{}
t.idleMu.Unlock()
for _, conns := range m {
for _, pconn := range conns {
errConnBroken = errors.New("http: putIdleConn: connection is in bad state")
errWantIdle = errors.New("http: putIdleConn: CloseIdleConnections was called")
errTooManyIdle = errors.New("http: putIdleConn: too many idle connections")
+ errTooManyIdleHost = errors.New("http: putIdleConn: too many idle connections for host")
errCloseIdleConns = errors.New("http: CloseIdleConnections called")
errReadLoopExiting = errors.New("http: persistConn.readLoop exiting")
errServerClosedIdle = errors.New("http: server closed idle conn")
}
}
+func (t *Transport) maxIdleConnsPerHost() int {
+ if v := t.MaxIdleConnsPerHost; v != 0 {
+ return v
+ }
+ return DefaultMaxIdleConnsPerHost
+}
+
// tryPutIdleConn adds pconn to the list of idle persistent connections awaiting
// a new request.
// If pconn is no longer needed or not in a good state, tryPutIdleConn returns
if pconn.isBroken() {
return errConnBroken
}
- key := pconn.cacheKey
- max := t.MaxIdleConnsPerHost
- if max == 0 {
- max = DefaultMaxIdleConnsPerHost
- }
pconn.markReused()
+ key := pconn.cacheKey
t.idleMu.Lock()
defer t.idleMu.Unlock()
t.idleConn = make(map[connectMethodKey][]*persistConn)
}
idles := t.idleConn[key]
- if len(idles) >= max {
- return errTooManyIdle
+ if len(idles) >= t.maxIdleConnsPerHost() {
+ return errTooManyIdleHost
}
for _, exist := range idles {
if exist == pconn {
log.Fatalf("dup idle pconn %p in freelist", pconn)
}
}
-
t.idleConn[key] = append(idles, pconn)
t.idleCount++
+ t.idleLRU.add(pconn)
+ if t.MaxIdleConns != 0 && t.idleLRU.len() > t.MaxIdleConns {
+ oldest := t.idleLRU.removeOldest()
+ oldest.close(errTooManyIdle)
+ t.removeIdleConnLocked(oldest)
+ }
pconn.idleAt = time.Now()
return nil
}
pconn = pconns[0]
delete(t.idleConn, key)
} else {
- // 2 or more cached connections; pop last
- // TODO: queue?
+ // 2 or more cached connections; use the most
+ // recently used one.
pconn = pconns[len(pconns)-1]
t.idleConn[key] = pconns[:len(pconns)-1]
}
t.idleCount--
+ t.idleLRU.remove(pconn)
if pconn.isBroken() {
// There is a tiny window where this is
// possible, between the connecting dying and
// removeIdleConn marks pconn as dead.
func (t *Transport) removeIdleConn(pconn *persistConn) {
- key := pconn.cacheKey
t.idleMu.Lock()
defer t.idleMu.Unlock()
+ t.removeIdleConnLocked(pconn)
+}
+// t.idleMu must be held.
+func (t *Transport) removeIdleConnLocked(pconn *persistConn) {
+ t.idleLRU.remove(pconn)
+ key := pconn.cacheKey
pconns, _ := t.idleConn[key]
switch len(pconns) {
case 0:
delete(t.idleConn, key)
}
default:
- // TODO(bradfitz): map into LRU element?
for i, v := range pconns {
if v != pconn {
continue
Renegotiation: cfg.Renegotiation,
}
}
+
+type connLRU struct {
+ ll *list.List // list.Element.Value type is of *persistConn
+ m map[*persistConn]*list.Element
+}
+
+// addO adds pc to the head of the linked list.
+func (cl *connLRU) add(pc *persistConn) {
+ if cl.ll == nil {
+ cl.ll = list.New()
+ cl.m = make(map[*persistConn]*list.Element)
+ }
+ ele := cl.ll.PushFront(pc)
+ if _, ok := cl.m[pc]; ok {
+ panic("persistConn was already in LRU")
+ }
+ cl.m[pc] = ele
+}
+
+func (cl *connLRU) removeOldest() *persistConn {
+ ele := cl.ll.Back()
+ pc := ele.Value.(*persistConn)
+ cl.ll.Remove(ele)
+ delete(cl.m, pc)
+ return pc
+}
+
+// remove removes pc from cl.
+func (cl *connLRU) remove(pc *persistConn) {
+ if ele, ok := cl.m[pc]; ok {
+ cl.ll.Remove(ele)
+ delete(cl.m, pc)
+ }
+}
+
+// len returns the number of items in the cache.
+func (cl *connLRU) len() int {
+ return len(cl.m)
+}
}
}))
defer ts.Close()
- maxIdleConns := 2
- tr := &Transport{DisableKeepAlives: false, MaxIdleConnsPerHost: maxIdleConns}
+ maxIdleConnsPerHost := 2
+ tr := &Transport{DisableKeepAlives: false, MaxIdleConnsPerHost: maxIdleConnsPerHost}
c := &Client{Transport: tr}
// Start 3 outstanding requests and wait for the server to get them.
resch <- "res2"
<-donech
- if e, g := 2, tr.IdleConnCountForTesting(cacheKey); e != g {
- t.Errorf("after second response, expected %d idle conns; got %d", e, g)
+ if g, w := tr.IdleConnCountForTesting(cacheKey), 2; g != w {
+ t.Errorf("after second response, idle conns = %d; want %d", g, w)
}
resch <- "res3"
<-donech
- if e, g := maxIdleConns, tr.IdleConnCountForTesting(cacheKey); e != g {
- t.Errorf("after third response, still expected %d idle conns; got %d", e, g)
+ if g, w := tr.IdleConnCountForTesting(cacheKey), maxIdleConnsPerHost; g != w {
+ t.Errorf("after third response, idle conns = %d; want %d", g, w)
}
}
t.Errorf("unexpected DNS host lookup for %q", host)
return nil, nil
}
- if err != nil {
- t.Error(err)
- return nil, err
- }
return []net.IPAddr{net.IPAddr{IP: net.ParseIP(ip)}}, nil
})
}
}
+func TestTransportMaxIdleConns(t *testing.T) {
+ defer afterTest(t)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ // No body for convenience.
+ }))
+ defer ts.Close()
+ tr := &Transport{
+ MaxIdleConns: 4,
+ }
+ defer tr.CloseIdleConnections()
+
+ ip, port, err := net.SplitHostPort(ts.Listener.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ c := &Client{Transport: tr}
+ ctx := context.WithValue(context.Background(), nettrace.LookupIPAltResolverKey{}, func(ctx context.Context, host string) ([]net.IPAddr, error) {
+ return []net.IPAddr{net.IPAddr{IP: net.ParseIP(ip)}}, nil
+ })
+
+ hitHost := func(n int) {
+ req, _ := NewRequest("GET", fmt.Sprintf("http://host-%d.dns-is-faked.golang:"+port, n), nil)
+ req = req.WithContext(ctx)
+ res, err := c.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+ }
+ for i := 0; i < 4; i++ {
+ hitHost(i)
+ }
+ want := []string{
+ "|http|host-0.dns-is-faked.golang:" + port,
+ "|http|host-1.dns-is-faked.golang:" + port,
+ "|http|host-2.dns-is-faked.golang:" + port,
+ "|http|host-3.dns-is-faked.golang:" + port,
+ }
+ if got := tr.IdleConnKeysForTesting(); !reflect.DeepEqual(got, want) {
+ t.Fatalf("idle conn keys mismatch.\n got: %q\nwant: %q\n", got, want)
+ }
+
+ // Now hitting the 5th host should kick out the first host:
+ hitHost(4)
+ want = []string{
+ "|http|host-1.dns-is-faked.golang:" + port,
+ "|http|host-2.dns-is-faked.golang:" + port,
+ "|http|host-3.dns-is-faked.golang:" + port,
+ "|http|host-4.dns-is-faked.golang:" + port,
+ }
+ if got := tr.IdleConnKeysForTesting(); !reflect.DeepEqual(got, want) {
+ t.Fatalf("idle conn keys mismatch after 5th host.\n got: %q\nwant: %q\n", got, want)
+ }
+}
+
var errFakeRoundTrip = errors.New("fake roundtrip")
type funcRoundTripper func()