"runtime"
"sort"
"sync"
+ "sync/atomic"
)
var drivers = make(map[string]driver.Driver)
type DB struct {
driver driver.Driver
dsn string
+ // numClosed is an atomic counter which represents a total number of
+ // closed connections. Stmt.openStmt checks it before cleaning closed
+ // connections in Stmt.css.
+ numClosed uint64
mu sync.Mutex // protects following fields
freeConn []*driverConn
// guarded by db.mu
inUse bool
onPut []func() // code (with db.mu held) run when conn is next returned
- dbmuClosed bool // same as closed, but guarded by db.mu, for connIfFree
+ dbmuClosed bool // same as closed, but guarded by db.mu, for removeClosedStmtLocked
}
func (dc *driverConn) releaseConn(err error) {
dc.db.maybeOpenNewConnections()
dc.db.mu.Unlock()
+ atomic.AddUint64(&dc.db.numClosed, 1)
return err
}
errConnBusy = errors.New("database/sql: internal sentinel error: conn is busy")
)
-// connIfFree returns (wanted, nil) if wanted is still a valid conn and
-// isn't in use.
-//
-// The error is errConnClosed if the connection if the requested connection
-// is invalid because it's been closed.
-//
-// The error is errConnBusy if the connection is in use.
-func (db *DB) connIfFree(wanted *driverConn) (*driverConn, error) {
- db.mu.Lock()
- defer db.mu.Unlock()
- if wanted.dbmuClosed {
- return nil, errConnClosed
- }
- if wanted.inUse {
- return nil, errConnBusy
- }
- idx := -1
- for ii, v := range db.freeConn {
- if v == wanted {
- idx = ii
- break
- }
- }
- if idx >= 0 {
- db.freeConn = append(db.freeConn[:idx], db.freeConn[idx+1:]...)
- wanted.inUse = true
- return wanted, nil
- }
- // TODO(bradfitz): shouldn't get here. After Go 1.1, change this to:
- // panic("connIfFree call requested a non-closed, non-busy, non-free conn")
- // Which passes all the tests, but I'm too paranoid to include this
- // late in Go 1.1.
- // Instead, treat it like a busy connection:
- return nil, errConnBusy
-}
-
// putConnHook is a hook for testing.
var putConnHook func(*DB, *driverConn)
return nil, err
}
stmt := &Stmt{
- db: db,
- query: query,
- css: []connStmt{{dc, si}},
+ db: db,
+ query: query,
+ css: []connStmt{{dc, si}},
+ lastNumClosed: atomic.LoadUint64(&db.numClosed),
}
db.addDep(stmt, stmt)
db.putConn(dc, nil)
// used if tx == nil and one is found that has idle
// connections. If tx != nil, txsi is always used.
css []connStmt
+
+ // lastNumClosed is copied from db.numClosed when Stmt is created
+ // without tx and closed connections in css are removed.
+ lastNumClosed uint64
}
// Exec executes a prepared statement with the given arguments and
return driverResult{ds.Locker, resi}, nil
}
+// removeClosedStmtLocked removes closed conns in s.css.
+//
+// To avoid lock contention on DB.mu, we do it only when
+// s.db.numClosed - s.lastNum is large enough.
+func (s *Stmt) removeClosedStmtLocked() {
+ t := len(s.css)/2 + 1
+ if t > 10 {
+ t = 10
+ }
+ dbClosed := atomic.LoadUint64(&s.db.numClosed)
+ if dbClosed-s.lastNumClosed < uint64(t) {
+ return
+ }
+
+ s.db.mu.Lock()
+ for i := 0; i < len(s.css); i++ {
+ if s.css[i].dc.dbmuClosed {
+ s.css[i] = s.css[len(s.css)-1]
+ s.css = s.css[:len(s.css)-1]
+ i--
+ }
+ }
+ s.db.mu.Unlock()
+ s.lastNumClosed = dbClosed
+}
+
// connStmt returns a free driver connection on which to execute the
// statement, a function to call to release the connection, and a
// statement bound to that connection.
return ci, releaseConn, s.txsi.si, nil
}
- for i := 0; i < len(s.css); i++ {
- v := s.css[i]
- _, err := s.db.connIfFree(v.dc)
- if err == nil {
- s.mu.Unlock()
- return v.dc, v.dc.releaseConn, v.si, nil
- }
- if err == errConnClosed {
- // Lazily remove dead conn from our freelist.
- s.css[i] = s.css[len(s.css)-1]
- s.css = s.css[:len(s.css)-1]
- i--
- }
-
- }
+ s.removeClosedStmtLocked()
s.mu.Unlock()
- // If all connections are busy, either wait for one to become available (if
- // we've already hit the maximum number of open connections) or create a
- // new one.
- //
// TODO(bradfitz): or always wait for one? make configurable later?
dc, err := s.db.conn()
if err != nil {
return nil, nil, nil, err
}
- // Do another pass over the list to see whether this statement has
- // already been prepared on the connection assigned to us.
s.mu.Lock()
for _, v := range s.css {
if v.dc == dc {
wg.Wait()
}
-func manyConcurrentQueries(t testing.TB) {
- maxProcs, numReqs := 16, 500
- if testing.Short() {
- maxProcs, numReqs = 4, 50
- }
- defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs))
-
- db := newTestDB(t, "people")
- defer closeDB(t, db)
-
- stmt, err := db.Prepare("SELECT|people|name|")
- if err != nil {
- t.Fatal(err)
- }
- defer stmt.Close()
-
- var wg sync.WaitGroup
- wg.Add(numReqs)
-
- reqs := make(chan bool)
- defer close(reqs)
-
- for i := 0; i < maxProcs*2; i++ {
- go func() {
- for range reqs {
- rows, err := stmt.Query()
- if err != nil {
- t.Errorf("error on query: %v", err)
- wg.Done()
- continue
- }
-
- var name string
- for rows.Next() {
- rows.Scan(&name)
- }
- rows.Close()
-
- wg.Done()
- }
- }()
- }
-
- for i := 0; i < numReqs; i++ {
- reqs <- true
- }
-
- wg.Wait()
-}
-
func TestIssue6081(t *testing.T) {
db := newTestDB(t, "people")
defer closeDB(t, db)
doConcurrentTest(b, ct)
}
}
+
+func BenchmarkManyConcurrentQueries(b *testing.B) {
+ b.ReportAllocs()
+ // To see lock contention in Go 1.4, 16~ cores and 128~ goroutines are required.
+ const parallelism = 16
+
+ db := newTestDB(b, "magicquery")
+ defer closeDB(b, db)
+ db.SetMaxIdleConns(runtime.GOMAXPROCS(0) * parallelism)
+
+ stmt, err := db.Prepare("SELECT|magicquery|op|op=?,millis=?")
+ if err != nil {
+ b.Fatal(err)
+ }
+ defer stmt.Close()
+
+ b.SetParallelism(parallelism)
+ b.RunParallel(func(pb *testing.PB) {
+ for pb.Next() {
+ rows, err := stmt.Query("sleep", 1)
+ if err != nil {
+ b.Error(err)
+ return
+ }
+ rows.Close()
+ }
+ })
+}