vendor dependencies with dep

This commit is contained in:
dhax 2017-09-25 20:20:52 +02:00
parent 93d8310491
commit 1384296a47
2712 changed files with 965742 additions and 0 deletions

62
vendor/github.com/go-pg/pg/internal/error.go generated vendored Normal file
View file

@ -0,0 +1,62 @@
package internal
import (
"errors"
"fmt"
)
var ErrNoRows = errors.New("pg: no rows in result set")
var ErrMultiRows = errors.New("pg: multiple rows in result set")
type Error struct {
s string
}
func Errorf(s string, args ...interface{}) Error {
return Error{s: fmt.Sprintf(s, args...)}
}
func (err Error) Error() string {
return err.s
}
type PGError struct {
m map[byte]string
}
func NewPGError(m map[byte]string) PGError {
return PGError{
m: m,
}
}
func (err PGError) Field(k byte) string {
return err.m[k]
}
func (err PGError) IntegrityViolation() bool {
switch err.Field('C') {
case "23000", "23001", "23502", "23503", "23505", "23514", "23P01":
return true
default:
return false
}
}
func (err PGError) Error() string {
return fmt.Sprintf(
"%s #%s %s (addr=%q)",
err.Field('S'), err.Field('C'), err.Field('M'), err.Field('a'),
)
}
func AssertOneRow(l int) error {
switch {
case l == 0:
return ErrNoRows
case l > 1:
return ErrMultiRows
default:
return nil
}
}

24
vendor/github.com/go-pg/pg/internal/internal.go generated vendored Normal file
View file

@ -0,0 +1,24 @@
package internal
import (
"math/rand"
"time"
)
// Retry backoff with jitter sleep to prevent overloaded conditions during intervals
// https://www.awsarchitectureblog.com/2015/03/backoff.html
func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
if retry < 0 {
retry = 0
}
backoff := minBackoff << uint(retry)
if backoff > maxBackoff || backoff < minBackoff {
backoff = maxBackoff
}
if backoff == 0 {
return 0
}
return time.Duration(rand.Int63n(int64(backoff)))
}

15
vendor/github.com/go-pg/pg/internal/log.go generated vendored Normal file
View file

@ -0,0 +1,15 @@
package internal
import (
"fmt"
"log"
)
var Logger *log.Logger
func Logf(s string, args ...interface{}) {
if Logger == nil {
return
}
Logger.Output(2, fmt.Sprintf(s, args...))
}

View file

@ -0,0 +1,80 @@
package parser
import (
"bytes"
"fmt"
)
type ArrayParser struct {
*Parser
stickyErr error
}
func NewArrayParser(b []byte) *ArrayParser {
var err error
if len(b) < 2 || b[0] != '{' || b[len(b)-1] != '}' {
err = fmt.Errorf("pg: can't parse array: %s", string(b))
} else {
b = b[1 : len(b)-1]
}
return &ArrayParser{
Parser: New(b),
stickyErr: err,
}
}
func (p *ArrayParser) NextElem() ([]byte, error) {
if p.stickyErr != nil {
return nil, p.stickyErr
}
switch c := p.Peek(); c {
case '"':
p.Advance()
b := p.readSubstring()
p.Skip(',')
return b, nil
case '{':
b := p.readElem()
if b != nil {
b = append(b, '}')
}
p.Skip(',')
return b, nil
default:
b, _ := p.ReadSep(',')
if bytes.Equal(b, pgNull) {
b = nil
}
return b, nil
}
}
func (p *ArrayParser) readElem() []byte {
var b []byte
for p.Valid() {
c := p.Read()
switch c {
case '"':
b = append(b, '"')
for {
bb, ok := p.ReadSep('"')
b = append(b, bb...)
stop := len(b) > 0 && b[len(b)-1] != '\\'
if ok {
b = append(b, '"')
}
if stop {
break
}
}
case '}':
return b
default:
b = append(b, c)
}
}
return b
}

View file

@ -0,0 +1,55 @@
package parser_test
import (
"testing"
"github.com/go-pg/pg/internal/parser"
)
var arrayTests = []struct {
s string
els []string
}{
{`{"\\"}`, []string{`\`}},
{`{"''"}`, []string{`'`}},
{`{{"''\"{}"}}`, []string{`{"''\"{}"}`}},
{`{"''\"{}"}`, []string{`'"{}`}},
{"{1,2}", []string{"1", "2"}},
{"{1,NULL}", []string{"1", ""}},
{`{"1","2"}`, []string{"1", "2"}},
{`{"{1}","{2}"}`, []string{"{1}", "{2}"}},
{"{{1,2},{3}}", []string{"{1,2}", "{3}"}},
}
func TestArrayParser(t *testing.T) {
for testi, test := range arrayTests {
p := parser.NewArrayParser([]byte(test.s))
var got []string
for p.Valid() {
b, err := p.NextElem()
if err != nil {
t.Fatal(err)
}
got = append(got, string(b))
}
if len(got) != len(test.els) {
t.Fatalf(
"#%d got %d elements, wanted %d (got=%#v wanted=%#v)",
testi, len(got), len(test.els), got, test.els,
)
}
for i, el := range got {
if el != test.els[i] {
t.Fatalf(
"#%d el #%d does not match: %q != %q (got=%#v wanted=%#v)",
testi, i, el, test.els[i], got, test.els,
)
}
}
}
}

View file

@ -0,0 +1,40 @@
package parser
import "fmt"
type HstoreParser struct {
*Parser
}
func NewHstoreParser(b []byte) *HstoreParser {
return &HstoreParser{
Parser: New(b),
}
}
func (p *HstoreParser) NextKey() ([]byte, error) {
if p.Skip(',') {
p.Skip(' ')
}
if !p.Skip('"') {
return nil, fmt.Errorf("pg: can't parse hstore key: %q", p.Bytes())
}
key := p.readSubstring()
if !(p.Skip('=') && p.Skip('>')) {
return nil, fmt.Errorf("pg: can't parse hstore key: %q", p.Bytes())
}
return key, nil
}
func (p *HstoreParser) NextValue() ([]byte, error) {
if !p.Skip('"') {
return nil, fmt.Errorf("pg: can't parse hstore value: %q", p.Bytes())
}
value := p.readSubstring()
p.SkipBytes([]byte(", "))
return value, nil
}

View file

@ -0,0 +1,57 @@
package parser_test
import (
"testing"
"github.com/go-pg/pg/internal/parser"
)
var hstoreTests = []struct {
s string
m map[string]string
}{
{`""=>""`, map[string]string{"": ""}},
{`"k''k"=>"k''k"`, map[string]string{"k'k": "k'k"}},
{`"k\"k"=>"k\"k"`, map[string]string{`k"k`: `k"k`}},
{`"k\k"=>"k\k"`, map[string]string{`k\k`: `k\k`}},
{`"foo"=>"bar"`, map[string]string{"foo": "bar"}},
{`"foo"=>"bar","k"=>"v"`, map[string]string{"foo": "bar", "k": "v"}},
}
func TestHstoreParser(t *testing.T) {
for testi, test := range hstoreTests {
p := parser.NewHstoreParser([]byte(test.s))
got := make(map[string]string)
for p.Valid() {
key, err := p.NextKey()
if err != nil {
t.Fatal(err)
}
value, err := p.NextValue()
if err != nil {
t.Fatal(err)
}
got[string(key)] = string(value)
}
if len(got) != len(test.m) {
t.Fatalf(
"#%d got %d elements, wanted %d (got=%#v wanted=%#v)",
testi, len(got), len(test.m), got, test.m,
)
}
for k, v := range got {
if v != test.m[k] {
t.Fatalf(
"#%d el %q does not match: %q != %q (got=%#v wanted=%#v)",
testi, k, v, test.m[k], got, test.m,
)
}
}
}
}

153
vendor/github.com/go-pg/pg/internal/parser/parser.go generated vendored Normal file
View file

@ -0,0 +1,153 @@
package parser
import (
"bytes"
"strconv"
"github.com/go-pg/pg/internal"
)
type Parser struct {
b []byte
}
func New(b []byte) *Parser {
return &Parser{
b: b,
}
}
func NewString(s string) *Parser {
return New(internal.StringToBytes(s))
}
func (p *Parser) Bytes() []byte {
return p.b
}
func (p *Parser) Valid() bool {
return len(p.b) > 0
}
func (p *Parser) Read() byte {
if p.Valid() {
c := p.b[0]
p.Skip(c)
return c
}
return 0
}
func (p *Parser) Peek() byte {
if p.Valid() {
return p.b[0]
}
return 0
}
func (p *Parser) Advance() {
p.b = p.b[1:]
}
func (p *Parser) Skip(c byte) bool {
if p.Peek() == c {
p.Advance()
return true
}
return false
}
func (p *Parser) SkipBytes(b []byte) bool {
if len(b) > len(p.b) {
return false
}
if !bytes.Equal(p.b[:len(b)], b) {
return false
}
p.b = p.b[len(b):]
return true
}
func (p *Parser) ReadSep(c byte) ([]byte, bool) {
ind := bytes.IndexByte(p.b, c)
if ind == -1 {
b := p.b
p.b = p.b[len(p.b):]
return b, false
}
b := p.b[:ind]
p.b = p.b[ind+1:]
return b, true
}
func (p *Parser) ReadIdentifier() (s string, numeric bool) {
end := len(p.b)
numeric = true
for i, ch := range p.b {
if isNum(ch) {
continue
}
if isAlpha(ch) || ch == '_' {
numeric = false
continue
}
end = i
break
}
if end <= 0 {
return "", false
}
b := p.b[:end]
p.b = p.b[end:]
return internal.BytesToString(b), numeric
}
func (p *Parser) ReadNumber() int {
end := len(p.b)
for i, ch := range p.b {
if !isNum(ch) {
end = i
break
}
}
if end <= 0 {
return 0
}
n, _ := strconv.Atoi(string(p.b[:end]))
p.b = p.b[end:]
return n
}
func (p *Parser) readSubstring() []byte {
var b []byte
for p.Valid() {
c := p.Read()
switch c {
case '\\':
switch p.Peek() {
case '\\':
b = append(b, '\\')
p.Advance()
case '"':
b = append(b, '"')
p.Advance()
default:
b = append(b, c)
}
case '\'':
switch p.Peek() {
case '\'':
b = append(b, '\'')
p.Skip(c)
default:
b = append(b, c)
}
case '"':
return b
default:
b = append(b, c)
}
}
return b
}

15
vendor/github.com/go-pg/pg/internal/parser/util.go generated vendored Normal file
View file

@ -0,0 +1,15 @@
package parser
var pgNull = []byte("NULL")
func isNum(c byte) bool {
return c >= '0' && c <= '9'
}
func isAlpha(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
func isAlnum(c byte) bool {
return isAlpha(c) || isNum(c)
}

80
vendor/github.com/go-pg/pg/internal/pool/bench_test.go generated vendored Normal file
View file

@ -0,0 +1,80 @@
package pool_test
import (
"testing"
"time"
"github.com/go-pg/pg/internal/pool"
)
func benchmarkPoolGetPut(b *testing.B, poolSize int) {
connPool := pool.NewConnPool(&pool.Options{
Dialer: dummyDialer,
PoolSize: poolSize,
PoolTimeout: time.Second,
IdleTimeout: time.Hour,
IdleCheckFrequency: time.Hour,
})
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
cn, _, err := connPool.Get()
if err != nil {
b.Fatal(err)
}
if err = connPool.Put(cn); err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkPoolGetPut10Conns(b *testing.B) {
benchmarkPoolGetPut(b, 10)
}
func BenchmarkPoolGetPut100Conns(b *testing.B) {
benchmarkPoolGetPut(b, 100)
}
func BenchmarkPoolGetPut1000Conns(b *testing.B) {
benchmarkPoolGetPut(b, 1000)
}
func benchmarkPoolGetRemove(b *testing.B, poolSize int) {
connPool := pool.NewConnPool(&pool.Options{
Dialer: dummyDialer,
PoolSize: poolSize,
PoolTimeout: time.Second,
IdleTimeout: time.Hour,
IdleCheckFrequency: time.Hour,
})
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
cn, _, err := connPool.Get()
if err != nil {
b.Fatal(err)
}
if err := connPool.Remove(cn); err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkPoolGetRemove10Conns(b *testing.B) {
benchmarkPoolGetRemove(b, 10)
}
func BenchmarkPoolGetRemove100Conns(b *testing.B) {
benchmarkPoolGetRemove(b, 100)
}
func BenchmarkPoolGetRemove1000Conns(b *testing.B) {
benchmarkPoolGetRemove(b, 1000)
}

106
vendor/github.com/go-pg/pg/internal/pool/conn.go generated vendored Normal file
View file

@ -0,0 +1,106 @@
package pool
import (
"bufio"
"encoding/hex"
"fmt"
"io"
"net"
"strconv"
"time"
)
var noDeadline = time.Time{}
type Conn struct {
netConn net.Conn
Reader *bufio.Reader
readBuf []byte
Columns [][]byte
Writer *WriteBuffer
InitedAt time.Time
UsedAt time.Time
ProcessId int32
SecretKey int32
_lastId int64
}
func NewConn(netConn net.Conn) *Conn {
cn := &Conn{
Reader: bufio.NewReader(netConn),
readBuf: make([]byte, 0, 512),
Writer: NewWriteBuffer(),
UsedAt: time.Now(),
}
cn.SetNetConn(netConn)
return cn
}
func (cn *Conn) RemoteAddr() net.Addr {
return cn.netConn.RemoteAddr()
}
func (cn *Conn) SetNetConn(netConn net.Conn) {
cn.netConn = netConn
cn.Reader.Reset(netConn)
}
func (cn *Conn) NetConn() net.Conn {
return cn.netConn
}
func (cn *Conn) NextId() string {
cn._lastId++
return strconv.FormatInt(cn._lastId, 10)
}
func (cn *Conn) SetTimeout(rt, wt time.Duration) {
cn.UsedAt = time.Now()
if rt > 0 {
cn.netConn.SetReadDeadline(cn.UsedAt.Add(rt))
} else {
cn.netConn.SetReadDeadline(noDeadline)
}
if wt > 0 {
cn.netConn.SetWriteDeadline(cn.UsedAt.Add(wt))
} else {
cn.netConn.SetWriteDeadline(noDeadline)
}
}
func (cn *Conn) ReadN(n int) ([]byte, error) {
if d := n - cap(cn.readBuf); d > 0 {
cn.readBuf = cn.readBuf[:cap(cn.readBuf)]
cn.readBuf = append(cn.readBuf, make([]byte, d)...)
} else {
cn.readBuf = cn.readBuf[:n]
}
_, err := io.ReadFull(cn.Reader, cn.readBuf)
return cn.readBuf, err
}
func (cn *Conn) FlushWriter() error {
_, err := cn.netConn.Write(cn.Writer.Bytes)
cn.Writer.Reset()
return err
}
func (cn *Conn) Close() error {
return cn.netConn.Close()
}
func (cn *Conn) CheckHealth() error {
if cn.Reader.Buffered() != 0 {
b, _ := cn.Reader.Peek(cn.Reader.Buffered())
err := fmt.Errorf("connection has unread data:\n%s", hex.Dump(b))
return err
}
return nil
}

35
vendor/github.com/go-pg/pg/internal/pool/main_test.go generated vendored Normal file
View file

@ -0,0 +1,35 @@
package pool_test
import (
"net"
"sync"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestGinkgoSuite(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "pool")
}
func perform(n int, cbs ...func(int)) {
var wg sync.WaitGroup
for _, cb := range cbs {
for i := 0; i < n; i++ {
wg.Add(1)
go func(cb func(int), i int) {
defer GinkgoRecover()
defer wg.Done()
cb(i)
}(cb, i)
}
}
wg.Wait()
}
func dummyDialer() (net.Conn, error) {
return &net.TCPConn{}, nil
}

377
vendor/github.com/go-pg/pg/internal/pool/pool.go generated vendored Normal file
View file

@ -0,0 +1,377 @@
package pool
import (
"errors"
"net"
"sync"
"sync/atomic"
"time"
"github.com/go-pg/pg/internal"
)
var ErrClosed = errors.New("pg: database is closed")
var ErrPoolTimeout = errors.New("pg: connection pool timeout")
var timers = sync.Pool{
New: func() interface{} {
t := time.NewTimer(time.Hour)
t.Stop()
return t
},
}
// Stats contains pool state information and accumulated stats.
type Stats struct {
Hits uint32 // number of times free connection was found in the pool
Misses uint32 // number of times free connection was NOT found in the pool
Timeouts uint32 // number of times a wait timeout occurred
TotalConns uint32 // number of total connections in the pool
FreeConns uint32 // number of free connections in the pool
StaleConns uint32 // number of stale connections removed from the pool
}
type Pooler interface {
NewConn() (*Conn, error)
CloseConn(*Conn) error
Get() (*Conn, bool, error)
Put(*Conn) error
Remove(*Conn) error
Len() int
FreeLen() int
Stats() *Stats
Close() error
}
type Options struct {
Dialer func() (net.Conn, error)
OnClose func(*Conn) error
PoolSize int
PoolTimeout time.Duration
IdleTimeout time.Duration
IdleCheckFrequency time.Duration
MaxAge time.Duration
}
type ConnPool struct {
opt *Options
dialErrorsNum uint32 // atomic
_lastDialError atomic.Value
queue chan struct{}
connsMu sync.Mutex
conns []*Conn
freeConnsMu sync.Mutex
freeConns []*Conn
stats Stats
_closed uint32 // atomic
}
var _ Pooler = (*ConnPool)(nil)
func NewConnPool(opt *Options) *ConnPool {
p := &ConnPool{
opt: opt,
queue: make(chan struct{}, opt.PoolSize),
conns: make([]*Conn, 0, opt.PoolSize),
freeConns: make([]*Conn, 0, opt.PoolSize),
}
if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
go p.reaper(opt.IdleCheckFrequency)
}
return p
}
func (p *ConnPool) NewConn() (*Conn, error) {
if p.closed() {
return nil, ErrClosed
}
if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
return nil, p.lastDialError()
}
netConn, err := p.opt.Dialer()
if err != nil {
p.setLastDialError(err)
if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
go p.tryDial()
}
return nil, err
}
atomic.StoreUint32(&p.dialErrorsNum, 0)
cn := NewConn(netConn)
p.connsMu.Lock()
p.conns = append(p.conns, cn)
p.connsMu.Unlock()
return cn, nil
}
func (p *ConnPool) tryDial() {
for {
if p.closed() {
return
}
conn, err := p.opt.Dialer()
if err != nil {
p.setLastDialError(err)
time.Sleep(time.Second)
continue
}
atomic.StoreUint32(&p.dialErrorsNum, 0)
_ = conn.Close()
return
}
}
func (p *ConnPool) setLastDialError(err error) {
p._lastDialError.Store(err)
}
func (p *ConnPool) lastDialError() error {
return p._lastDialError.Load().(error)
}
func (p *ConnPool) isStaleConn(cn *Conn) bool {
if p.opt.IdleTimeout == 0 && p.opt.MaxAge == 0 {
return false
}
now := time.Now()
if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt) >= p.opt.IdleTimeout {
return true
}
if p.opt.MaxAge > 0 && now.Sub(cn.InitedAt) >= p.opt.MaxAge {
return true
}
return false
}
func (p *ConnPool) popFree() *Conn {
if len(p.freeConns) == 0 {
return nil
}
idx := len(p.freeConns) - 1
cn := p.freeConns[idx]
p.freeConns = p.freeConns[:idx]
return cn
}
// Get returns existed connection from the pool or creates a new one.
func (p *ConnPool) Get() (*Conn, bool, error) {
if p.closed() {
return nil, false, ErrClosed
}
select {
case p.queue <- struct{}{}:
default:
timer := timers.Get().(*time.Timer)
timer.Reset(p.opt.PoolTimeout)
select {
case p.queue <- struct{}{}:
if !timer.Stop() {
<-timer.C
}
timers.Put(timer)
case <-timer.C:
timers.Put(timer)
atomic.AddUint32(&p.stats.Timeouts, 1)
return nil, false, ErrPoolTimeout
}
}
for {
p.freeConnsMu.Lock()
cn := p.popFree()
p.freeConnsMu.Unlock()
if cn == nil {
break
}
if p.isStaleConn(cn) {
p.CloseConn(cn)
continue
}
atomic.AddUint32(&p.stats.Hits, 1)
return cn, false, nil
}
atomic.AddUint32(&p.stats.Misses, 1)
newcn, err := p.NewConn()
if err != nil {
<-p.queue
return nil, false, err
}
return newcn, true, nil
}
func (p *ConnPool) Put(cn *Conn) error {
if e := cn.CheckHealth(); e != nil {
internal.Logf(e.Error())
return p.Remove(cn)
}
p.freeConnsMu.Lock()
p.freeConns = append(p.freeConns, cn)
p.freeConnsMu.Unlock()
<-p.queue
return nil
}
func (p *ConnPool) Remove(cn *Conn) error {
_ = p.CloseConn(cn)
<-p.queue
return nil
}
func (p *ConnPool) CloseConn(cn *Conn) error {
p.connsMu.Lock()
for i, c := range p.conns {
if c == cn {
p.conns = append(p.conns[:i], p.conns[i+1:]...)
break
}
}
p.connsMu.Unlock()
return p.closeConn(cn)
}
func (p *ConnPool) closeConn(cn *Conn) error {
if p.opt.OnClose != nil {
_ = p.opt.OnClose(cn)
}
return cn.Close()
}
// Len returns total number of connections.
func (p *ConnPool) Len() int {
p.connsMu.Lock()
l := len(p.conns)
p.connsMu.Unlock()
return l
}
// FreeLen returns number of free connections.
func (p *ConnPool) FreeLen() int {
p.freeConnsMu.Lock()
l := len(p.freeConns)
p.freeConnsMu.Unlock()
return l
}
func (p *ConnPool) Stats() *Stats {
return &Stats{
Hits: atomic.LoadUint32(&p.stats.Hits),
Misses: atomic.LoadUint32(&p.stats.Misses),
Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
TotalConns: uint32(p.Len()),
FreeConns: uint32(p.FreeLen()),
}
}
func (p *ConnPool) closed() bool {
return atomic.LoadUint32(&p._closed) == 1
}
func (p *ConnPool) Close() error {
if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
return ErrClosed
}
p.connsMu.Lock()
var firstErr error
for _, cn := range p.conns {
if cn == nil {
continue
}
if err := p.closeConn(cn); err != nil && firstErr == nil {
firstErr = err
}
}
p.conns = nil
p.connsMu.Unlock()
p.freeConnsMu.Lock()
p.freeConns = nil
p.freeConnsMu.Unlock()
return firstErr
}
func (p *ConnPool) reapStaleConn() bool {
if len(p.freeConns) == 0 {
return false
}
cn := p.freeConns[0]
if !p.isStaleConn(cn) {
return false
}
_ = p.CloseConn(cn)
p.freeConns = append(p.freeConns[:0], p.freeConns[1:]...)
return true
}
func (p *ConnPool) ReapStaleConns() (int, error) {
var n int
for {
p.queue <- struct{}{}
p.freeConnsMu.Lock()
reaped := p.reapStaleConn()
p.freeConnsMu.Unlock()
<-p.queue
if reaped {
n++
} else {
break
}
}
return n, nil
}
func (p *ConnPool) reaper(frequency time.Duration) {
ticker := time.NewTicker(frequency)
defer ticker.Stop()
for _ = range ticker.C {
if p.closed() {
break
}
n, err := p.ReapStaleConns()
if err != nil {
internal.Logf("ReapStaleConns failed: %s", err)
continue
}
atomic.AddUint32(&p.stats.StaleConns, uint32(n))
}
}

View file

@ -0,0 +1,55 @@
package pool
type SingleConnPool struct {
cn *Conn
}
var _ Pooler = (*SingleConnPool)(nil)
func NewSingleConnPool(cn *Conn) *SingleConnPool {
return &SingleConnPool{
cn: cn,
}
}
func (p *SingleConnPool) NewConn() (*Conn, error) {
panic("not implemented")
}
func (p *SingleConnPool) CloseConn(*Conn) error {
panic("not implemented")
}
func (p *SingleConnPool) Get() (*Conn, bool, error) {
return p.cn, false, nil
}
func (p *SingleConnPool) Put(cn *Conn) error {
if p.cn != cn {
panic("p.cn != cn")
}
return nil
}
func (p *SingleConnPool) Remove(cn *Conn) error {
if p.cn != cn {
panic("p.cn != cn")
}
return nil
}
func (p *SingleConnPool) Len() int {
return 1
}
func (p *SingleConnPool) FreeLen() int {
return 0
}
func (p *SingleConnPool) Stats() *Stats {
return nil
}
func (p *SingleConnPool) Close() error {
return nil
}

255
vendor/github.com/go-pg/pg/internal/pool/pool_test.go generated vendored Normal file
View file

@ -0,0 +1,255 @@
package pool_test
import (
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/go-pg/pg/internal/pool"
)
var _ = Describe("ConnPool", func() {
var connPool *pool.ConnPool
BeforeEach(func() {
connPool = pool.NewConnPool(&pool.Options{
Dialer: dummyDialer,
PoolSize: 10,
PoolTimeout: time.Hour,
IdleTimeout: time.Millisecond,
IdleCheckFrequency: time.Millisecond,
})
})
AfterEach(func() {
connPool.Close()
})
It("should unblock client when conn is removed", func() {
// Reserve one connection.
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
// Reserve all other connections.
var cns []*pool.Conn
for i := 0; i < 9; i++ {
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
cns = append(cns, cn)
}
started := make(chan bool, 1)
done := make(chan bool, 1)
go func() {
defer GinkgoRecover()
started <- true
_, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
done <- true
err = connPool.Put(cn)
Expect(err).NotTo(HaveOccurred())
}()
<-started
// Check that Get is blocked.
select {
case <-done:
Fail("Get is not blocked")
default:
// ok
}
err = connPool.Remove(cn)
Expect(err).NotTo(HaveOccurred())
// Check that Ping is unblocked.
select {
case <-done:
// ok
case <-time.After(time.Second):
Fail("Get is not unblocked")
}
for _, cn := range cns {
err = connPool.Put(cn)
Expect(err).NotTo(HaveOccurred())
}
})
})
var _ = Describe("conns reaper", func() {
const idleTimeout = time.Minute
const maxAge = time.Hour
var connPool *pool.ConnPool
var conns, staleConns, closedConns []*pool.Conn
assert := func(typ string) {
BeforeEach(func() {
closedConns = nil
connPool = pool.NewConnPool(&pool.Options{
Dialer: dummyDialer,
PoolSize: 10,
PoolTimeout: time.Second,
IdleTimeout: idleTimeout,
MaxAge: maxAge,
IdleCheckFrequency: time.Hour,
OnClose: func(cn *pool.Conn) error {
closedConns = append(closedConns, cn)
return nil
},
})
conns = nil
// add stale connections
staleConns = nil
for i := 0; i < 3; i++ {
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
switch typ {
case "idle":
cn.UsedAt = time.Now().Add(-2 * idleTimeout)
case "aged":
cn.InitedAt = time.Now().Add(-2 * maxAge)
}
conns = append(conns, cn)
staleConns = append(staleConns, cn)
}
// add fresh connections
for i := 0; i < 3; i++ {
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
conns = append(conns, cn)
}
for _, cn := range conns {
if cn.InitedAt.IsZero() {
cn.InitedAt = time.Now()
}
Expect(connPool.Put(cn)).NotTo(HaveOccurred())
}
Expect(connPool.Len()).To(Equal(6))
Expect(connPool.FreeLen()).To(Equal(6))
n, err := connPool.ReapStaleConns()
Expect(err).NotTo(HaveOccurred())
Expect(n).To(Equal(3))
})
AfterEach(func() {
_ = connPool.Close()
Expect(connPool.Len()).To(Equal(0))
Expect(connPool.FreeLen()).To(Equal(0))
Expect(len(closedConns)).To(Equal(len(conns)))
Expect(closedConns).To(ConsistOf(conns))
})
It("reaps stale connections", func() {
Expect(connPool.Len()).To(Equal(3))
Expect(connPool.FreeLen()).To(Equal(3))
})
It("does not reap fresh connections", func() {
n, err := connPool.ReapStaleConns()
Expect(err).NotTo(HaveOccurred())
Expect(n).To(Equal(0))
})
It("stale connections are closed", func() {
Expect(len(closedConns)).To(Equal(len(staleConns)))
Expect(closedConns).To(ConsistOf(staleConns))
})
It("pool is functional", func() {
for j := 0; j < 3; j++ {
var freeCns []*pool.Conn
for i := 0; i < 3; i++ {
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
Expect(cn).NotTo(BeNil())
freeCns = append(freeCns, cn)
}
Expect(connPool.Len()).To(Equal(3))
Expect(connPool.FreeLen()).To(Equal(0))
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
Expect(cn).NotTo(BeNil())
conns = append(conns, cn)
Expect(connPool.Len()).To(Equal(4))
Expect(connPool.FreeLen()).To(Equal(0))
err = connPool.Remove(cn)
Expect(err).NotTo(HaveOccurred())
Expect(connPool.Len()).To(Equal(3))
Expect(connPool.FreeLen()).To(Equal(0))
for _, cn := range freeCns {
err := connPool.Put(cn)
Expect(err).NotTo(HaveOccurred())
}
Expect(connPool.Len()).To(Equal(3))
Expect(connPool.FreeLen()).To(Equal(3))
}
})
}
assert("idle")
assert("aged")
})
var _ = Describe("race", func() {
var connPool *pool.ConnPool
var C, N int
BeforeEach(func() {
C, N = 10, 1000
if testing.Short() {
C = 4
N = 100
}
})
AfterEach(func() {
connPool.Close()
})
It("does not happen on Get, Put, and Remove", func() {
connPool = pool.NewConnPool(&pool.Options{
Dialer: dummyDialer,
PoolSize: 10,
PoolTimeout: time.Minute,
IdleTimeout: time.Millisecond,
IdleCheckFrequency: time.Millisecond,
})
perform(C, func(id int) {
for i := 0; i < N; i++ {
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
if err == nil {
Expect(connPool.Put(cn)).NotTo(HaveOccurred())
}
}
}, func(id int) {
for i := 0; i < N; i++ {
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
if err == nil {
Expect(connPool.Remove(cn)).NotTo(HaveOccurred())
}
}
})
})
})

View file

@ -0,0 +1,90 @@
package pool
import (
"encoding/binary"
"io"
)
type WriteBuffer struct {
Bytes []byte
msgStart, paramStart int
}
func NewWriteBuffer() *WriteBuffer {
return &WriteBuffer{
Bytes: make([]byte, 0, 4096),
}
}
func (buf *WriteBuffer) StartMessage(c byte) {
if c == 0 {
buf.msgStart = len(buf.Bytes)
buf.Bytes = append(buf.Bytes, 0, 0, 0, 0)
} else {
buf.msgStart = len(buf.Bytes) + 1
buf.Bytes = append(buf.Bytes, c, 0, 0, 0, 0)
}
}
func (buf *WriteBuffer) FinishMessage() {
binary.BigEndian.PutUint32(
buf.Bytes[buf.msgStart:], uint32(len(buf.Bytes)-buf.msgStart))
}
func (buf *WriteBuffer) StartParam() {
buf.paramStart = len(buf.Bytes)
buf.Bytes = append(buf.Bytes, 0, 0, 0, 0)
}
func (buf *WriteBuffer) FinishParam() {
binary.BigEndian.PutUint32(
buf.Bytes[buf.paramStart:], uint32(len(buf.Bytes)-buf.paramStart-4))
}
var nullParamLength = int32(-1)
func (buf *WriteBuffer) FinishNullParam() {
binary.BigEndian.PutUint32(
buf.Bytes[buf.paramStart:], uint32(nullParamLength))
}
func (buf *WriteBuffer) Write(b []byte) (int, error) {
buf.Bytes = append(buf.Bytes, b...)
return len(b), nil
}
func (buf *WriteBuffer) WriteInt16(num int16) {
buf.Bytes = append(buf.Bytes, 0, 0)
binary.BigEndian.PutUint16(buf.Bytes[len(buf.Bytes)-2:], uint16(num))
}
func (buf *WriteBuffer) WriteInt32(num int32) {
buf.Bytes = append(buf.Bytes, 0, 0, 0, 0)
binary.BigEndian.PutUint32(buf.Bytes[len(buf.Bytes)-4:], uint32(num))
}
func (buf *WriteBuffer) WriteString(s string) {
buf.Bytes = append(buf.Bytes, s...)
buf.Bytes = append(buf.Bytes, 0)
}
func (buf *WriteBuffer) WriteBytes(b []byte) {
buf.Bytes = append(buf.Bytes, b...)
buf.Bytes = append(buf.Bytes, 0)
}
func (buf *WriteBuffer) WriteByte(c byte) error {
buf.Bytes = append(buf.Bytes, c)
return nil
}
func (buf *WriteBuffer) Reset() {
buf.Bytes = buf.Bytes[:0]
}
func (buf *WriteBuffer) ReadFrom(r io.Reader) (int64, error) {
n, err := r.Read(buf.Bytes[len(buf.Bytes):cap(buf.Bytes)])
buf.Bytes = buf.Bytes[:len(buf.Bytes)+int(n)]
return int64(n), err
}

11
vendor/github.com/go-pg/pg/internal/safe.go generated vendored Normal file
View file

@ -0,0 +1,11 @@
// +build appengine
package internal
func BytesToString(b []byte) string {
return string(b)
}
func StringToBytes(s string) []byte {
return []byte(s)
}

73
vendor/github.com/go-pg/pg/internal/underscore.go generated vendored Normal file
View file

@ -0,0 +1,73 @@
package internal
func isUpper(c byte) bool {
return c >= 'A' && c <= 'Z'
}
func isLower(c byte) bool {
return !isUpper(c)
}
func toUpper(c byte) byte {
return c - 32
}
func toLower(c byte) byte {
return c + 32
}
// Underscore converts "CamelCasedString" to "camel_cased_string".
func Underscore(s string) string {
r := make([]byte, 0, len(s))
for i := 0; i < len(s); i++ {
c := s[i]
if isUpper(c) {
if i > 0 && i+1 < len(s) && (isLower(s[i-1]) || isLower(s[i+1])) {
r = append(r, '_', toLower(c))
} else {
r = append(r, toLower(c))
}
} else {
r = append(r, c)
}
}
return string(r)
}
func ToUpper(s string) string {
if isUpperString(s) {
return s
}
b := make([]byte, len(s))
for i := range b {
c := s[i]
if c >= 'a' && c <= 'z' {
c -= 'a' - 'A'
}
b[i] = c
}
return string(b)
}
func isUpperString(s string) bool {
for i := 0; i < len(s); i++ {
c := s[i]
if c >= 'a' && c <= 'z' {
return false
}
}
return true
}
func ToExported(s string) string {
if len(s) == 0 {
return s
}
if c := s[0]; isLower(c) {
b := []byte(s)
b[0] = toUpper(c)
return string(b)
}
return s
}

23
vendor/github.com/go-pg/pg/internal/underscore_test.go generated vendored Normal file
View file

@ -0,0 +1,23 @@
package internal_test
import (
"testing"
"github.com/go-pg/pg/internal"
)
func TestUnderscore(t *testing.T) {
tests := []struct {
s, wanted string
}{
{"Megacolumn", "megacolumn"},
{"MegaColumn", "mega_column"},
{"MegaColumn_Id", "mega_column__id"},
{"MegaColumn_id", "mega_column_id"},
}
for _, v := range tests {
if got := internal.Underscore(v.s); got != v.wanted {
t.Errorf("got %q, wanted %q", got, v.wanted)
}
}
}

27
vendor/github.com/go-pg/pg/internal/unsafe.go generated vendored Normal file
View file

@ -0,0 +1,27 @@
// +build !appengine
package internal
import (
"reflect"
"unsafe"
)
func BytesToString(b []byte) string {
bytesHeader := (*reflect.SliceHeader)(unsafe.Pointer(&b))
strHeader := reflect.StringHeader{
Data: bytesHeader.Data,
Len: bytesHeader.Len,
}
return *(*string)(unsafe.Pointer(&strHeader))
}
func StringToBytes(s string) []byte {
sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
bh := reflect.SliceHeader{
Data: sh.Data,
Len: sh.Len,
Cap: sh.Len,
}
return *(*[]byte)(unsafe.Pointer(&bh))
}

21
vendor/github.com/go-pg/pg/internal/util.go generated vendored Normal file
View file

@ -0,0 +1,21 @@
package internal
import "reflect"
func SliceNextElem(v reflect.Value) reflect.Value {
if v.Len() < v.Cap() {
v.Set(v.Slice(0, v.Len()+1))
return v.Index(v.Len() - 1)
}
elemType := v.Type().Elem()
if elemType.Kind() == reflect.Ptr {
elem := reflect.New(elemType.Elem())
v.Set(reflect.Append(v, elem))
return elem.Elem()
}
v.Set(reflect.Append(v, reflect.Zero(elemType)))
return v.Index(v.Len() - 1)
}