diff --git a/.travis.yml b/.travis.yml index f0052c7..9ebf51d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,10 @@ +dist: trusty +sudo: false + language: go go: - - 1.5 - - 1.6 - - 1.7 + - 1.6.4 + - 1.7.4 - tip before_install: @@ -13,7 +15,6 @@ before_script: - ./integration/setup_travis.sh script: - - test -z "$(go fmt ./...)" - go build -o sql-runner ./sql_runner/ - ./integration/run_tests.sh - go test ./sql_runner/ -v -covermode=count -coverprofile=coverage.out @@ -28,7 +29,7 @@ deploy: provider: script script: release-manager --config .release.yml --check-version --make-version --make-artifact --upload-artifact on: - condition: '"${TRAVIS_GO_VERSION}" == "1.7"' + condition: '"${TRAVIS_GO_VERSION}" == "1.7.4"' tags: true addons: diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 3f4e0b6..6556391 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,70 +1,75 @@ { "ImportPath": "github.com/snowplow/sql-runner", - "GoVersion": "go1.5", - "GodepVersion": "v74", + "GoVersion": "go1.7", + "GodepVersion": "v76", "Packages": [ "./sql_runner/" ], "Deps": [ { "ImportPath": "github.com/aws/aws-sdk-go/aws", - "Comment": "v1.4.8", - "Rev": "2c129c11a732646f1411de34ad4833f50503f344" + "Comment": "v1.6.10-1-g8649d27", + "Rev": "8649d278323ebf6bd20c9cd56ecb152b1c617375" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/awserr", - "Comment": "v1.4.8", - "Rev": "2c129c11a732646f1411de34ad4833f50503f344" + "Comment": "v1.6.10-1-g8649d27", + "Rev": "8649d278323ebf6bd20c9cd56ecb152b1c617375" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil", - "Comment": "v1.4.8", - "Rev": "2c129c11a732646f1411de34ad4833f50503f344" + "Comment": "v1.6.10-1-g8649d27", + "Rev": "8649d278323ebf6bd20c9cd56ecb152b1c617375" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/client", - "Comment": "v1.4.8", - "Rev": "2c129c11a732646f1411de34ad4833f50503f344" + "Comment": "v1.6.10-1-g8649d27", + "Rev": "8649d278323ebf6bd20c9cd56ecb152b1c617375" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata", - "Comment": "v1.4.8", - "Rev": "2c129c11a732646f1411de34ad4833f50503f344" + "Comment": "v1.6.10-1-g8649d27", + "Rev": "8649d278323ebf6bd20c9cd56ecb152b1c617375" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials", - "Comment": "v1.4.8", - "Rev": "2c129c11a732646f1411de34ad4833f50503f344" + "Comment": "v1.6.10-1-g8649d27", + "Rev": "8649d278323ebf6bd20c9cd56ecb152b1c617375" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "Comment": "v1.4.8", - "Rev": "2c129c11a732646f1411de34ad4833f50503f344" + "Comment": "v1.6.10-1-g8649d27", + "Rev": "8649d278323ebf6bd20c9cd56ecb152b1c617375" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "Comment": "v1.4.8", - "Rev": "2c129c11a732646f1411de34ad4833f50503f344" + "Comment": "v1.6.10-1-g8649d27", + "Rev": "8649d278323ebf6bd20c9cd56ecb152b1c617375" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/endpoints", + "Comment": "v1.6.10-1-g8649d27", + "Rev": "8649d278323ebf6bd20c9cd56ecb152b1c617375" }, { "ImportPath": "github.com/aws/aws-sdk-go/aws/request", - "Comment": "v1.4.8", - "Rev": "2c129c11a732646f1411de34ad4833f50503f344" + "Comment": "v1.6.10-1-g8649d27", + "Rev": "8649d278323ebf6bd20c9cd56ecb152b1c617375" }, { "ImportPath": "github.com/davecgh/go-spew/spew", - "Comment": "v1.0.0-3-g6d21280", - "Rev": "6d212800a42e8ab5c146b8ace3490ee17e5225f9" + "Comment": "v1.1.0", + "Rev": "346938d642f2ec3594ed81d874461961cd0faa76" }, { "ImportPath": "github.com/go-ini/ini", - "Comment": "v1.21.1", - "Rev": "6e4869b434bd001f6983749881c7ead3545887d8" + "Comment": "v1.23.0", + "Rev": "6f66b0e091edb3c7b380f7c4f0f884274d550b67" }, { "ImportPath": "github.com/hashicorp/consul/api", - "Comment": "v0.7.0-rc2-31-g0a34741", - "Rev": "0a34741d7266af34eed36048d1a8e8880a7b17a1" + "Comment": "v0.7.2-59-gcae8480", + "Rev": "cae84804ec9625f46e7e941e680c79da334bd076" }, { "ImportPath": "github.com/hashicorp/go-cleanhttp", @@ -72,12 +77,12 @@ }, { "ImportPath": "github.com/hashicorp/serf/coordinate", - "Comment": "v0.7.0-97-g9432bc0", - "Rev": "9432bc08aa8d486e497e27f84878ebbe8c1eab66" + "Comment": "v0.8.0-34-gd3a67ab", + "Rev": "d3a67ab21bc8a4643fa53a3633f2d951dd50c6ca" }, { "ImportPath": "github.com/jinzhu/inflection", - "Rev": "74387dc39a75e970e7a3ae6a3386b5bd2e5c5cff" + "Rev": "1c35d901db3da928c72a72d8458480cc9ade058f" }, { "ImportPath": "github.com/jmespath/go-jmespath", @@ -95,42 +100,38 @@ }, { "ImportPath": "github.com/stretchr/testify/assert", - "Comment": "v1.1.3-19-gd77da35", - "Rev": "d77da356e56a7428ad25149ca77381849a6a5232" - }, - { - "ImportPath": "gopkg.in/bsm/ratelimit.v1", - "Rev": "db14e161995a5177acef654cb0dd785e8ee8bc22" + "Comment": "v1.1.4-25-g2402e8e", + "Rev": "2402e8e7a02fc811447d11f881aa9746cdc57983" }, { "ImportPath": "gopkg.in/pg.v5", - "Comment": "v4.9.4-36-g1065c65", - "Rev": "1065c653f9edd4035545201ab62c40e0bbaea851" + "Comment": "v5.2.3", + "Rev": "d223c86b67b63331b349ec88e994a5c9ae831634" }, { "ImportPath": "gopkg.in/pg.v5/internal", - "Comment": "v4.9.4-36-g1065c65", - "Rev": "1065c653f9edd4035545201ab62c40e0bbaea851" + "Comment": "v5.2.3", + "Rev": "d223c86b67b63331b349ec88e994a5c9ae831634" }, { "ImportPath": "gopkg.in/pg.v5/internal/parser", - "Comment": "v4.9.4-36-g1065c65", - "Rev": "1065c653f9edd4035545201ab62c40e0bbaea851" + "Comment": "v5.2.3", + "Rev": "d223c86b67b63331b349ec88e994a5c9ae831634" }, { "ImportPath": "gopkg.in/pg.v5/internal/pool", - "Comment": "v4.9.4-36-g1065c65", - "Rev": "1065c653f9edd4035545201ab62c40e0bbaea851" + "Comment": "v5.2.3", + "Rev": "d223c86b67b63331b349ec88e994a5c9ae831634" }, { "ImportPath": "gopkg.in/pg.v5/orm", - "Comment": "v4.9.4-36-g1065c65", - "Rev": "1065c653f9edd4035545201ab62c40e0bbaea851" + "Comment": "v5.2.3", + "Rev": "d223c86b67b63331b349ec88e994a5c9ae831634" }, { "ImportPath": "gopkg.in/pg.v5/types", - "Comment": "v4.9.4-36-g1065c65", - "Rev": "1065c653f9edd4035545201ab62c40e0bbaea851" + "Comment": "v5.2.3", + "Rev": "d223c86b67b63331b349ec88e994a5c9ae831634" }, { "ImportPath": "gopkg.in/yaml.v1", diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore deleted file mode 100644 index f037d68..0000000 --- a/Godeps/_workspace/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/pkg -/bin diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/http_request.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/http_request.go deleted file mode 100644 index a4087f2..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/http_request.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.5 - -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := &http.Request{ - URL: &url.URL{}, - Header: http.Header{}, - Close: r.Close, - Body: body, - Host: r.Host, - Method: r.Method, - Proto: r.Proto, - ContentLength: r.ContentLength, - // Cancel will be deprecated in 1.7 and will be replaced with Context - Cancel: r.Cancel, - } - - *req.URL = *r.URL - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go deleted file mode 100644 index 75da021..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build !go1.5 - -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := &http.Request{ - URL: &url.URL{}, - Header: http.Header{}, - Close: r.Close, - Body: body, - Host: r.Host, - Method: r.Method, - Proto: r.Proto, - ContentLength: r.ContentLength, - } - - *req.URL = *r.URL - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/Godeps/_workspace/src/gopkg.in/bsm/ratelimit.v1/.travis.yml b/Godeps/_workspace/src/gopkg.in/bsm/ratelimit.v1/.travis.yml deleted file mode 100644 index ddf176f..0000000 --- a/Godeps/_workspace/src/gopkg.in/bsm/ratelimit.v1/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go -script: make testall -sudo: false -go: - - 1.5 - - 1.4 - - 1.3 - - 1.2 -install: - - go get github.com/onsi/ginkgo github.com/onsi/gomega - - mkdir -p $GOPATH/src/gopkg.in/bsm - - mv $GOPATH/src/github.com/bsm/ratelimit $GOPATH/src/gopkg.in/bsm/ratelimit.v1 - - cd $GOPATH/src/gopkg.in/bsm/ratelimit.v1 diff --git a/Godeps/_workspace/src/gopkg.in/bsm/ratelimit.v1/Makefile b/Godeps/_workspace/src/gopkg.in/bsm/ratelimit.v1/Makefile deleted file mode 100644 index aa713cc..0000000 --- a/Godeps/_workspace/src/gopkg.in/bsm/ratelimit.v1/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -default: test - -testdeps: - @go get github.com/onsi/ginkgo - @go get github.com/onsi/gomega - -test: testdeps - @go test ./... - -testrace: testdeps - @go test ./... -race - -testall: test testrace - -bench: - @go test ./... -run=NONE -bench=. diff --git a/Godeps/_workspace/src/gopkg.in/bsm/ratelimit.v1/README.md b/Godeps/_workspace/src/gopkg.in/bsm/ratelimit.v1/README.md deleted file mode 100644 index bf48e94..0000000 --- a/Godeps/_workspace/src/gopkg.in/bsm/ratelimit.v1/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# RateLimit [![Build Status](https://travis-ci.org/bsm/ratelimit.png?branch=master)](https://travis-ci.org/bsm/ratelimit) - -Simple, thread-safe Go rate-limiter. -Inspired by Antti Huima's algorithm on http://stackoverflow.com/a/668327 - -### Example - -```go -package main - -import ( - "github.com/bsm/ratelimit" - "log" -) - -func main() { - // Create a new rate-limiter, allowing up-to 10 calls - // per second - rl := ratelimit.New(10, time.Second) - - for i:=0; i<20; i++ { - if rl.Limit() { - fmt.Println("DOH! Over limit!") - } else { - fmt.Println("OK") - } - } -} -``` - -### Licence - -``` -Copyright (c) 2015 Black Square Media - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -``` diff --git a/Godeps/_workspace/src/gopkg.in/bsm/ratelimit.v1/ratelimit.go b/Godeps/_workspace/src/gopkg.in/bsm/ratelimit.v1/ratelimit.go deleted file mode 100644 index d006009..0000000 --- a/Godeps/_workspace/src/gopkg.in/bsm/ratelimit.v1/ratelimit.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Simple, thread-safe Go rate-limiter. -Inspired by Antti Huima's algorithm on http://stackoverflow.com/a/668327 - -Example: - - // Create a new rate-limiter, allowing up-to 10 calls - // per second - rl := ratelimit.New(10, time.Second) - - for i:=0; i<20; i++ { - if rl.Limit() { - fmt.Println("DOH! Over limit!") - } else { - fmt.Println("OK") - } - } -*/ -package ratelimit - -import ( - "sync/atomic" - "time" -) - -// RateLimiter instances are thread-safe. -type RateLimiter struct { - rate, allowance, max, unit, lastCheck uint64 -} - -// New creates a new rate limiter instance -func New(rate int, per time.Duration) *RateLimiter { - nano := uint64(per) - if nano < 1 { - nano = uint64(time.Second) - } - if rate < 1 { - rate = 1 - } - - return &RateLimiter{ - rate: uint64(rate), // store the rate - allowance: uint64(rate) * nano, // set our allowance to max in the beginning - max: uint64(rate) * nano, // remember our maximum allowance - unit: nano, // remember our unit size - - lastCheck: unixNano(), - } -} - -// UpdateRate allows to update the allowed rate -func (rl *RateLimiter) UpdateRate(rate int) { - atomic.StoreUint64(&rl.rate, uint64(rate)) - atomic.StoreUint64(&rl.max, uint64(rate)*rl.unit) -} - -// Limit returns true if rate was exceeded -func (rl *RateLimiter) Limit() bool { - // Calculate the number of ns that have passed since our last call - now := unixNano() - passed := now - atomic.SwapUint64(&rl.lastCheck, now) - - // Add them to our allowance - rate := atomic.LoadUint64(&rl.rate) - current := atomic.AddUint64(&rl.allowance, passed*rate) - - // Ensure our allowance is not over maximum - if max := atomic.LoadUint64(&rl.max); current > max { - atomic.AddUint64(&rl.allowance, max-current) - current = max - } - - // If our allowance is less than one unit, rate-limit! - if current < rl.unit { - return true - } - - // Not limited, subtract a unit - atomic.AddUint64(&rl.allowance, -rl.unit) - return false -} - -// Undo reverts the last Limit() call, returning consumed allowance -func (rl *RateLimiter) Undo() { - current := atomic.AddUint64(&rl.allowance, rl.unit) - - // Ensure our allowance is not over maximum - if max := atomic.LoadUint64(&rl.max); current > max { - atomic.AddUint64(&rl.allowance, max-current) - } -} - -// now as unix nanoseconds -func unixNano() uint64 { - return uint64(time.Now().UnixNano()) -} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/listener.go b/Godeps/_workspace/src/gopkg.in/pg.v5/listener.go deleted file mode 100644 index 0239938..0000000 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/listener.go +++ /dev/null @@ -1,120 +0,0 @@ -package pg - -import ( - "sync" - "time" - - "gopkg.in/pg.v5/internal" - "gopkg.in/pg.v5/internal/pool" -) - -// Not thread-safe. -type Listener struct { - channels []string - - db *DB - - _cn *pool.Conn - closed bool - mx sync.Mutex -} - -func (l *Listener) conn(readTimeout time.Duration) (*pool.Conn, error) { - defer l.mx.Unlock() - l.mx.Lock() - - if l.closed { - return nil, errListenerClosed - } - - if l._cn == nil { - cn, err := l.db.conn() - if err != nil { - return nil, err - } - l._cn = cn - - if len(l.channels) > 0 { - if err := l.listen(cn, l.channels...); err != nil { - return nil, err - } - } - } - - l._cn.SetReadTimeout(readTimeout) - l._cn.SetWriteTimeout(l.db.opt.WriteTimeout) - return l._cn, nil -} - -func (l *Listener) Listen(channels ...string) error { - cn, err := l.conn(l.db.opt.ReadTimeout) - if err != nil { - return err - } - if err := l.listen(cn, channels...); err != nil { - if err != nil { - l.freeConn(err) - } - return err - } - l.channels = append(l.channels, channels...) - return nil -} - -func (l *Listener) listen(cn *pool.Conn, channels ...string) error { - for _, channel := range channels { - if err := writeQueryMsg(cn.Wr, "LISTEN ?", F(channel)); err != nil { - return err - } - } - return cn.Wr.Flush() -} - -func (l *Listener) Receive() (channel string, payload string, err error) { - return l.ReceiveTimeout(0) -} - -func (l *Listener) ReceiveTimeout(readTimeout time.Duration) (channel, payload string, err error) { - channel, payload, err = l.receiveTimeout(readTimeout) - if err != nil { - l.freeConn(err) - } - return channel, payload, err -} - -func (l *Listener) receiveTimeout(readTimeout time.Duration) (channel, payload string, err error) { - cn, err := l.conn(readTimeout) - if err != nil { - return "", "", err - } - return readNotification(cn) -} - -func (l *Listener) freeConn(err error) (retErr error) { - if !isBadConn(err, true) { - return nil - } - internal.Logf("pg: discarding bad listener connection: %s", err) - return l.closeConn(err) -} - -func (l *Listener) closeConn(err error) (retErr error) { - l.mx.Lock() - if l._cn != nil { - retErr = l.db.pool.Remove(l._cn, err) - l._cn = nil - } - l.mx.Unlock() - return retErr -} - -func (l *Listener) Close() error { - l.mx.Lock() - closed := l.closed - l.closed = true - l.mx.Unlock() - if closed { - return errListenerClosed - } - return l.closeConn(errListenerClosed) -} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/delete.go b/Godeps/_workspace/src/gopkg.in/pg.v5/orm/delete.go deleted file mode 100644 index 9b96945..0000000 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/delete.go +++ /dev/null @@ -1,23 +0,0 @@ -package orm - -import "gopkg.in/pg.v5/internal" - -func Delete(db DB, model interface{}) error { - res, err := NewQuery(db, model).Delete() - if err != nil { - return err - } - return internal.AssertOneRow(res.RowsAffected()) -} - -type deleteQuery struct { - *Query -} - -var _ QueryAppender = (*deleteQuery)(nil) - -func (del deleteQuery) AppendQuery(b []byte, params ...interface{}) ([]byte, error) { - b = append(b, "DELETE FROM "...) - b = del.appendTables(b) - return del.mustAppendWhere(b) -} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/format.go b/Godeps/_workspace/src/gopkg.in/pg.v5/orm/format.go deleted file mode 100644 index f2b60bd..0000000 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/format.go +++ /dev/null @@ -1,166 +0,0 @@ -package orm - -import ( - "bytes" - "strconv" - "strings" - - "gopkg.in/pg.v5/internal/parser" - "gopkg.in/pg.v5/types" -) - -type FormatAppender interface { - AppendFormat([]byte, QueryFormatter) []byte -} - -//------------------------------------------------------------------------------ - -type queryParams struct { - query string - params []interface{} -} - -var _ FormatAppender = (*queryParams)(nil) - -func Q(query string, params ...interface{}) FormatAppender { - return queryParams{query, params} -} - -func (q queryParams) AppendFormat(dst []byte, f QueryFormatter) []byte { - return f.FormatQuery(dst, q.query, q.params...) -} - -//------------------------------------------------------------------------------ - -type fieldAppender struct { - field string -} - -var _ FormatAppender = (*fieldAppender)(nil) - -func (a fieldAppender) AppendFormat(b []byte, f QueryFormatter) []byte { - return types.AppendField(b, a.field, 1) -} - -//------------------------------------------------------------------------------ - -type Formatter struct { - paramsMap map[string]interface{} -} - -func (f *Formatter) SetParam(key string, value interface{}) { - if f.paramsMap == nil { - f.paramsMap = make(map[string]interface{}) - } - f.paramsMap[key] = value -} - -func (f Formatter) Append(dst []byte, src string, params ...interface{}) []byte { - if (params == nil && f.paramsMap == nil) || strings.IndexByte(src, '?') == -1 { - return append(dst, src...) - } - return f.append(dst, parser.NewString(src), params) -} - -func (f Formatter) AppendBytes(dst, src []byte, params ...interface{}) []byte { - if (params == nil && f.paramsMap == nil) || bytes.IndexByte(src, '?') == -1 { - return append(dst, src...) - } - return f.append(dst, parser.New(src), params) -} - -func (f Formatter) FormatQuery(dst []byte, query string, params ...interface{}) []byte { - return f.Append(dst, query, params...) -} - -func (f Formatter) append(dst []byte, p *parser.Parser, params []interface{}) []byte { - var paramsIndex int - var model tableModel - var modelErr error - - for p.Valid() { - b, ok := p.ReadSep('?') - if !ok { - dst = append(dst, b...) - continue - } - if len(b) > 0 && b[len(b)-1] == '\\' { - dst = append(dst, b[:len(b)-1]...) - dst = append(dst, '?') - continue - } - dst = append(dst, b...) - - if id, numeric := p.ReadIdentifier(); id != nil { - if numeric { - idx, err := strconv.Atoi(string(id)) - if err != nil { - goto restore_param - } - - if idx >= len(params) { - goto restore_param - } - - dst = f.appendParam(dst, params[idx]) - continue - } - - if f.paramsMap != nil { - if param, ok := f.paramsMap[string(id)]; ok { - dst = f.appendParam(dst, param) - continue - } - } - - if modelErr != nil { - goto restore_param - } - - if model == nil { - if len(params) == 0 { - goto restore_param - } - - model, modelErr = newTableModel(params[len(params)-1]) - if modelErr != nil { - goto restore_param - } - params = params[:len(params)-1] - } - - dst, ok = model.AppendParam(dst, string(id)) - if ok { - continue - } - - restore_param: - dst = append(dst, '?') - dst = append(dst, id...) - continue - } - - if paramsIndex >= len(params) { - dst = append(dst, '?') - continue - } - - param := params[paramsIndex] - paramsIndex++ - - if fa, ok := param.(FormatAppender); ok { - dst = fa.AppendFormat(dst, f) - } else { - dst = types.Append(dst, param, 1) - } - } - - return dst -} - -func (f Formatter) appendParam(b []byte, param interface{}) []byte { - if fa, ok := param.(FormatAppender); ok { - return fa.AppendFormat(b, f) - } - return types.Append(b, param, 1) -} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/join.go b/Godeps/_workspace/src/gopkg.in/pg.v5/orm/join.go deleted file mode 100644 index bd7aa2f..0000000 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/join.go +++ /dev/null @@ -1,134 +0,0 @@ -package orm - -import "gopkg.in/pg.v5/types" - -type join struct { - Parent *join - BaseModel tableModel - JoinModel tableModel - Rel *Relation - ApplyQuery func(*Query) (*Query, error) - - Columns []string -} - -func (j *join) JoinHasOne(q *Query) { - if j.hasColumns() { - q.columns = append(q.columns, hasOneColumnsQuery{j}) - } - q.joins = append(q.joins, hasOneJoinQuery{j}) -} - -func (j *join) JoinBelongsTo(q *Query) { - if j.hasColumns() { - q.columns = append(q.columns, hasOneColumnsQuery{j}) - } - q.joins = append(q.joins, belongsToJoinQuery{j}) -} - -func (j *join) Select(db DB) error { - switch j.Rel.Type { - case HasManyRelation: - return j.selectMany(db) - case Many2ManyRelation: - return j.selectM2M(db) - } - panic("not reached") -} - -func (j *join) selectMany(db DB) (err error) { - root := j.JoinModel.Root() - index := j.JoinModel.ParentIndex() - - manyModel := newManyModel(j) - q := NewQuery(db, manyModel) - if j.ApplyQuery != nil { - q, err = j.ApplyQuery(q) - if err != nil { - return err - } - } - - q.columns = append(q.columns, manyColumnsQuery{j}) - - baseTable := j.BaseModel.Table() - cols := columns(j.JoinModel.Table().Alias, "", j.Rel.FKs) - vals := values(root, index, baseTable.PKs) - q = q.Where(`(?) IN (?)`, types.Q(cols), types.Q(vals)) - - if j.Rel.Polymorphic { - q = q.Where( - `? IN (?, ?)`, - types.F(j.Rel.BasePrefix+"type"), - baseTable.ModelName, baseTable.TypeName, - ) - } - - err = q.Select() - if err != nil { - return err - } - - return nil -} - -func (j *join) selectM2M(db DB) (err error) { - index := j.JoinModel.ParentIndex() - - baseTable := j.BaseModel.Table() - m2mCols := columns(j.Rel.M2MTableName, j.Rel.BasePrefix, baseTable.PKs) - m2mVals := values(j.BaseModel.Root(), index, baseTable.PKs) - - m2mModel := newM2MModel(j) - q := NewQuery(db, m2mModel) - if j.ApplyQuery != nil { - q, err = j.ApplyQuery(q) - if err != nil { - return err - } - } - - q.columns = append(q.columns, manyColumnsQuery{j}) - q = q.Join( - "JOIN ? ON (?) IN (?)", - j.Rel.M2MTableName, - types.Q(m2mCols), types.Q(m2mVals), - ) - - joinAlias := j.JoinModel.Table().Alias - for _, pk := range j.JoinModel.Table().PKs { - q = q.Where( - "?.? = ?.?", - joinAlias, pk.ColName, - j.Rel.M2MTableName, types.F(j.Rel.JoinPrefix+pk.SQLName), - ) - } - - err = q.Select() - if err != nil { - return err - } - - return nil -} - -func (j *join) alias() []byte { - var b []byte - return appendAlias(b, j) -} - -func appendAlias(b []byte, j *join) []byte { - if j.Parent != nil { - switch j.Parent.Rel.Type { - case HasOneRelation, BelongsToRelation: - b = appendAlias(b, j.Parent) - } - } - b = append(b, j.Rel.Field.SQLName...) - b = append(b, "__"...) - return b -} - -func (q *join) hasColumns() bool { - return len(q.Columns) != 0 || q.Columns == nil -} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/join_query.go b/Godeps/_workspace/src/gopkg.in/pg.v5/orm/join_query.go deleted file mode 100644 index 06f32db..0000000 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/join_query.go +++ /dev/null @@ -1,126 +0,0 @@ -package orm - -import "gopkg.in/pg.v5/types" - -type hasOneJoinQuery struct { - *join -} - -func (q hasOneJoinQuery) AppendFormat(b []byte, f QueryFormatter) []byte { - b = append(b, "LEFT JOIN "...) - b = append(b, q.JoinModel.Table().Name...) - b = append(b, " AS "...) - b = append(b, q.Rel.Field.ColName...) - - joinTable := q.Rel.JoinTable - b = append(b, " ON "...) - for i, fk := range q.Rel.FKs { - if i > 0 { - b = append(b, " AND "...) - } - b = append(b, q.Rel.Field.ColName...) - b = append(b, '.') - b = append(b, joinTable.PKs[i].ColName...) - b = append(b, " = "...) - b = append(b, q.BaseModel.Table().Alias...) - b = append(b, '.') - b = append(b, fk.ColName...) - } - - return b -} - -type hasOneColumnsQuery struct { - *join -} - -func (q hasOneColumnsQuery) AppendFormat(b []byte, f QueryFormatter) []byte { - alias := q.alias() - - if q.Columns == nil { - for i, f := range q.JoinModel.Table().Fields { - if i > 0 { - b = append(b, ", "...) - } - b = append(b, q.Rel.Field.ColName...) - b = append(b, '.') - b = append(b, f.ColName...) - b = append(b, " AS "...) - columnAlias := append(alias, f.SQLName...) - b = types.AppendFieldBytes(b, columnAlias, 1) - alias = columnAlias[:len(alias)] - } - return b - } - - for i, column := range q.Columns { - if i > 0 { - b = append(b, ", "...) - } - b = append(b, q.Rel.Field.ColName...) - b = append(b, '.') - b = types.AppendField(b, column, 1) - b = append(b, " AS "...) - columnAlias := append(alias, column...) - b = types.AppendFieldBytes(b, append(alias, column...), 1) - alias = columnAlias[:len(alias)] - } - - return b -} - -type belongsToJoinQuery struct { - *join -} - -func (q belongsToJoinQuery) AppendFormat(b []byte, f QueryFormatter) []byte { - b = append(b, "LEFT JOIN "...) - b = append(b, q.JoinModel.Table().Name...) - b = append(b, " AS "...) - b = append(b, q.Rel.Field.ColName...) - - baseTable := q.BaseModel.Table() - b = append(b, " ON "...) - for i, fk := range q.Rel.FKs { - if i > 0 { - b = append(b, " AND "...) - } - b = append(b, q.Rel.Field.ColName...) - b = append(b, '.') - b = append(b, fk.ColName...) - b = append(b, " = "...) - b = append(b, baseTable.Alias...) - b = append(b, '.') - b = append(b, baseTable.PKs[i].ColName...) - } - - return b -} - -type manyColumnsQuery struct { - *join -} - -func (q manyColumnsQuery) AppendFormat(b []byte, f QueryFormatter) []byte { - if q.Rel.M2MTableName != "" { - b = append(b, q.Rel.M2MTableName...) - b = append(b, ".*, "...) - } - - if q.Columns == nil { - b = append(b, q.JoinModel.Table().Alias...) - b = append(b, ".*"...) - return b - } - - for i, column := range q.Columns { - if i > 0 { - b = append(b, ", "...) - } - b = append(b, q.JoinModel.Table().Alias...) - b = append(b, '.') - b = types.AppendField(b, column, 1) - } - - return b -} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/underscore.go b/Godeps/_workspace/src/gopkg.in/pg.v5/orm/underscore.go deleted file mode 100644 index 75615fc..0000000 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/underscore.go +++ /dev/null @@ -1,35 +0,0 @@ -package orm - -func isUpper(c byte) bool { - return c >= 'A' && c <= 'Z' -} - -func isLower(c byte) bool { - return !isUpper(c) -} - -func toUpper(c byte) byte { - return c - 32 -} - -func toLower(c byte) byte { - return c + 32 -} - -// Underscore converts "CamelCasedString" to "camel_cased_string". -func Underscore(s string) string { - r := make([]byte, 0, len(s)) - for i := 0; i < len(s); i++ { - c := s[i] - if isUpper(c) { - if i-0 > 0 && i+1 < len(s) && (isLower(s[i-1]) || isLower(s[i+1])) { - r = append(r, '_', toLower(c)) - } else { - r = append(r, toLower(c)) - } - } else { - r = append(r, c) - } - } - return string(r) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/LICENSE.txt rename to vendor/github.com/aws/aws-sdk-go/LICENSE.txt diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/NOTICE.txt rename to vendor/github.com/aws/aws-sdk-go/NOTICE.txt diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go rename to vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go rename to vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go rename to vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go rename to vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go similarity index 98% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go rename to vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go index 4d2a01e..11c52c3 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -106,8 +106,8 @@ func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTer if indexStar || index != nil { nextvals = []reflect.Value{} - for _, value := range values { - value := reflect.Indirect(value) + for _, valItem := range values { + value := reflect.Indirect(valItem) if value.Kind() != reflect.Slice { continue } diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go similarity index 95% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go rename to vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go index fc38172..710eb43 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -61,6 +61,12 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + nl, id, id2 := "", "", "" if v.Len() > 3 { nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go rename to vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go similarity index 95% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go rename to vendor/github.com/aws/aws-sdk-go/aws/client/client.go index 4003c04..aeeada0 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -2,7 +2,6 @@ package client import ( "fmt" - "io/ioutil" "net/http/httputil" "github.com/aws/aws-sdk-go/aws" @@ -12,9 +11,11 @@ import ( // A Config provides configuration to a service client instance. type Config struct { - Config *aws.Config - Handlers request.Handlers - Endpoint, SigningRegion string + Config *aws.Config + Handlers request.Handlers + Endpoint string + SigningRegion string + SigningName string } // ConfigProvider provides a generic way for a service client to receive @@ -104,8 +105,7 @@ func logRequest(r *request.Request) { // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's // Body as a NoOpCloser and will not be reset after read by the HTTP // client reader. - r.Body.Seek(r.BodyStart, 0) - r.HTTPRequest.Body = ioutil.NopCloser(r.Body) + r.ResetBody() } r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go rename to vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go rename to vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go similarity index 92% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go rename to vendor/github.com/aws/aws-sdk-go/aws/config.go index fca9225..d58b812 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -5,6 +5,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" ) // UseServiceDefaultRetries instructs the config to use the service's own @@ -48,6 +49,10 @@ type Config struct { // endpoint for a client. Endpoint *string + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + // The region to send requests to. This parameter is required and must // be configured globally or on a per-client basis unless otherwise // noted. A full list of regions is found in the "Regions and Endpoints" @@ -137,9 +142,6 @@ type Config struct { // accelerate enabled. If the bucket is not enabled for accelerate an error // will be returned. The bucket name must be DNS compatible to also work // with accelerate. - // - // Not compatible with UseDualStack requests will fail if both flags are - // specified. S3UseAccelerate *bool // Set this to `true` to disable the EC2Metadata client from overriding the @@ -185,6 +187,19 @@ type Config struct { // the delay of a request see the aws/client.DefaultRetryer and // aws/request.Retryer. SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess, err := session.NewSession(&aws.Config{DisableRestProtocolURICleaning: aws.Bool(true)) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool } // NewConfig returns a new Config pointer that can be chained with builder @@ -225,6 +240,13 @@ func (c *Config) WithEndpoint(endpoint string) *Config { return c } +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + // WithRegion sets a config Region value returning a Config pointer for // chaining. func (c *Config) WithRegion(region string) *Config { @@ -347,6 +369,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.Endpoint = other.Endpoint } + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + if other.Region != nil { dst.Region = other.Region } @@ -406,6 +432,10 @@ func mergeInConfig(dst *Config, other *Config) { if other.SleepDelay != nil { dst.SleepDelay = other.SleepDelay } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } } // Copy will return a shallow copy of the Config object. If any additional diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convert_types.go rename to vendor/github.com/aws/aws-sdk-go/aws/convert_types.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go similarity index 97% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go rename to vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go index 857311f..6efc77b 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -34,7 +34,7 @@ var ( // // Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. // In this example EnvProvider will first check if any credentials are available -// vai the environment variables. If there are none ChainProvider will check +// via the environment variables. If there are none ChainProvider will check // the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider // does not return any credentials ChainProvider will return the error // ErrNoValidProvidersFoundInChain diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go rename to vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go similarity index 98% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go rename to vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go index aa9d689..c397495 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -111,7 +111,7 @@ func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { }, nil } -// A ec2RoleCredRespBody provides the shape for unmarshalling credential +// A ec2RoleCredRespBody provides the shape for unmarshaling credential // request responses. type ec2RoleCredRespBody struct { // Success State diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go rename to vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini rename to vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go rename to vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go rename to vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go similarity index 85% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go rename to vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go index 669c813..984407a 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -3,6 +3,7 @@ package ec2metadata import ( "encoding/json" "fmt" + "net/http" "path" "strings" "time" @@ -27,6 +28,27 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) { return output.Content, req.Send() } +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "user-data"), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { + if r.HTTPResponse.StatusCode == http.StatusNotFound { + r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) + } + }) + + return output.Content, req.Send() +} + // GetDynamicData uses the path provided to request information from the EC2 // instance metadata service for dynamic data. The content will be returned // as a string, or error if the request failed. @@ -111,7 +133,7 @@ func (c *EC2Metadata) Available() bool { return true } -// An EC2IAMInfo provides the shape for unmarshalling +// An EC2IAMInfo provides the shape for unmarshaling // an IAM info from the metadata API type EC2IAMInfo struct { Code string @@ -120,7 +142,7 @@ type EC2IAMInfo struct { InstanceProfileID string } -// An EC2InstanceIdentityDocument provides the shape for unmarshalling +// An EC2InstanceIdentityDocument provides the shape for unmarshaling // an instance identity document type EC2InstanceIdentityDocument struct { DevpayProductCodes []string `json:"devpayProductCodes"` diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go rename to vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 0000000..74f72de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,133 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custAddEC2Metadata(p) + custAddS3DualStack(p) + custRmIotDataService(p) + } + + return ps, nil +} + +func custAddS3DualStack(p *partition) { + if p.ID != "aws" { + return + } + + s, ok := p.Services["s3"] + if !ok { + return + } + + s.Defaults.HasDualStack = boxedTrue + s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" + + p.Services["s3"] = s +} + +func custAddEC2Metadata(p *partition) { + p.Services["ec2metadata"] = service{ + IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + } +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 0000000..945f408 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,2002 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. +) + +// AWS Standard partition's regions. +const ( + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). + EuWest1RegionID = "eu-west-1" // EU (Ireland). + EuWest2RegionID = "eu-west-2" // EU (London). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). +) + +// Service identifiers +const ( + AcmServiceID = "acm" // Acm. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + AppstreamServiceID = "appstream" // Appstream. + Appstream2ServiceID = "appstream2" // Appstream2. + AutoscalingServiceID = "autoscaling" // Autoscaling. + BudgetsServiceID = "budgets" // Budgets. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ConfigServiceID = "config" // Config. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + MonitoringServiceID = "monitoring" // Monitoring. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + S3ServiceID = "s3" // S3. + SdbServiceID = "sdb" // Sdb. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultResolver() Resolver { + return defaultPartitions +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "EU (Frankfurt)", + }, + "eu-west-1": region{ + Description: "EU (Ireland)", + }, + "eu-west-2": region{ + Description: "EU (London)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appstream": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudsearch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-sync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecr": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "metering.marketplace": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "s3": service{ + PartitionEndpoint: "us-east-1", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3-ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3-eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3-us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3-us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "Shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "http", "https", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Defaults: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{ + Hostname: "sts.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "support": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + }, + Services: services{ + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "http", "https", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-west-1": region{ + Description: "AWS GovCloud (US)", + }, + }, + Services: services{ + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 0000000..a0e9bc4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.Name) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.Name) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 0000000..3adec13 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,369 @@ +package endpoints + +import ( + "fmt" + "regexp" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + UseDualStack bool + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id string + p *partition +} + +// ID returns the identifier of the partition. +func (p *Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the enpoint returned my look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expantions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p *Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p *Partition) Regions() map[string]Region { + rs := map[string]Region{} + for id := range p.p.Regions { + rs[id] = Region{ + id: id, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p *Partition) Services() map[string]Service { + ss := map[string]Service{} + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r *Region) ID() string { return r.id } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r *Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r *Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[r.id]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s *Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s *Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +func (s *Service) Endpoints() map[string]Endpoint { + es := map[string]Endpoint{} + for id := range s.p.Services[s.id].Endpoints { + es[id] = Endpoint{ + id: id, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e *Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e *Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e *Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +//// NewEndpointNotFoundError builds and returns NewEndpointNotFoundError. +//func NewEndpointNotFoundError(p, s, r string) EndpointNotFoundError { +// return EndpointNotFoundError{ +// awsError: awserr.New("EndpointNotFoundError", "unable to find endpoint", nil), +// Partition: p, +// Service: s, +// Region: r, +// } +//} +// +//// Error returns string representation of the error. +//func (e EndpointNotFoundError) Error() string { +// extra := fmt.Sprintf("partition: %q, service: %q, region: %q", +// e.Partition, e.Service, e.Region) +// return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +//} +// +//// String returns the string representation of the error. +//func (e EndpointNotFoundError) String() string { +// return e.Error() +//} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 0000000..6522ce9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,301 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpoint `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[region] + + if hasEndpoint && hasService { + return true + } + + if strictMatch { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + s, hasService := p.Services[service] + if !hasService { + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + e, hasEndpoint := s.endpointForRegion(region) + if !hasEndpoint && opt.StrictMatching { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + } + + defs := []endpoint{p.Defaults, s.Defaults} + return e.resolve(service, region, p.DNSSuffix, defs, opt), nil +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es endpoints) []string { + list := make([]string, 0, len(es)) + for k := range es { + list = append(list, k) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpoint `json:"defaults"` + Endpoints endpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string) (endpoint, bool) { + if s.IsRegionalized == boxedFalse { + return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + } + + if e, ok := s.Endpoints[region]; ok { + return e, true + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type endpoints map[string]endpoint + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + // Custom fields not modeled + HasDualStack boxedBool `json:"-"` + DualStackHostname string `json:"-"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + hostname := e.Hostname + + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + if len(signingName) == 0 { + signingName = service + } + + return ResolvedEndpoint{ + URL: u, + SigningRegion: signingRegion, + SigningName: signingName, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + } +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if other.HasDualStack != boxedBoolUnset { + e.HasDualStack = other.HasDualStack + } + if len(other.DualStackHostname) > 0 { + e.DualStackHostname = other.DualStackHostname + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 0000000..1e7369d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,334 @@ +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" . }} + + {{ range $_, $partition := . }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ template "service consts" . }} + + {{ template "endpoint resolvers" . }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Casting the return value of this func to a EnumPartitions will + // allow you to get a list of the partitions in the order the endpoints + // will be resolved in. + // + // resolver := endpoints.DefaultResolver() + // partitions := resolver.(endpoints.EnumPartitions).Partitions() + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultResolver() Resolver { + return defaultPartitions + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults }}, + {{- end }} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults -}}, + {{- end }} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +endpoints{ + {{ range $id, $endpoint := . -}} + "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} + {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} + +} +{{- end }} +` diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/errors.go rename to vendor/github.com/aws/aws-sdk-go/aws/errors.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go rename to vendor/github.com/aws/aws-sdk-go/aws/logger.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/handlers.go rename to vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 0000000..79f7960 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go similarity index 66% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go rename to vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go index da6396d..02f07f4 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -9,7 +9,7 @@ import ( // with retrying requests type offsetReader struct { buf io.ReadSeeker - lock sync.RWMutex + lock sync.Mutex closed bool } @@ -21,7 +21,8 @@ func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { return reader } -// Close is a thread-safe close. Uses the write lock. +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. func (o *offsetReader) Close() error { o.lock.Lock() defer o.lock.Unlock() @@ -29,10 +30,10 @@ func (o *offsetReader) Close() error { return nil } -// Read is a thread-safe read using a read lock. +// Read is a thread-safe read of the underlying io.ReadSeeker func (o *offsetReader) Read(p []byte) (int, error) { - o.lock.RLock() - defer o.lock.RUnlock() + o.lock.Lock() + defer o.lock.Unlock() if o.closed { return 0, io.EOF @@ -41,6 +42,14 @@ func (o *offsetReader) Read(p []byte) (int, error) { return o.buf.Read(p) } +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + // CloseAndCopy will return a new offsetReader with a copy of the old buffer // and close the old buffer. func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go similarity index 62% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go rename to vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 2832aaa..77312bb 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -4,7 +4,7 @@ import ( "bytes" "fmt" "io" - "io/ioutil" + "net" "net/http" "net/url" "reflect" @@ -42,6 +42,12 @@ type Request struct { LastSignedAt time.Time built bool + + // Need to persist an intermideant body betweend the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and rewraps the input Body for each HTTP request. + safeBody *offsetReader } // An Operation is the service API operation to be made. @@ -50,6 +56,8 @@ type Operation struct { HTTPMethod string HTTPPath string *Paginator + + BeforePresignFn func(r *Request) error } // Paginator keeps track of pagination configuration for an API operation. @@ -135,8 +143,8 @@ func (r *Request) SetStringBody(s string) { // SetReaderBody will set the request's body reader. func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.HTTPRequest.Body = newOffsetReader(reader, 0) r.Body = reader + r.ResetBody() } // Presign returns the request's signed URL. Error will be returned @@ -144,6 +152,15 @@ func (r *Request) SetReaderBody(reader io.ReadSeeker) { func (r *Request) Presign(expireTime time.Duration) (string, error) { r.ExpireTime = expireTime r.NotHoist = false + + if r.Operation.BeforePresignFn != nil { + r = r.copy() + err := r.Operation.BeforePresignFn(r) + if err != nil { + return "", err + } + } + r.Sign() if r.Error != nil { return "", r.Error @@ -220,6 +237,99 @@ func (r *Request) Sign() error { return r.Error } +// ResetBody rewinds the request body backto its starting position, and +// set's the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +func (r *Request) ResetBody() { + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody = newOffsetReader(r.Body, r.BodyStart) + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := computeBodyLength(r.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to compute request body size", err) + return + } + + if l == 0 { + r.HTTPRequest.Body = noBodyReader + } else if l > 0 { + r.HTTPRequest.Body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + r.HTTPRequest.Body = noBodyReader + default: + r.HTTPRequest.Body = r.safeBody + } + } +} + +// Attempts to compute the length of the body of the reader using the +// io.Seeker interface. If the value is not seekable because of being +// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned. +// If no error occurs the length of the body will be returned. +func computeBodyLength(r io.ReadSeeker) (int64, error) { + seekable := true + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := r.(type) { + case aws.ReaderSeekerCloser: + seekable = v.IsSeeker() + case *aws.ReaderSeekerCloser: + seekable = v.IsSeeker() + } + if !seekable { + return -1, nil + } + + curOffset, err := r.Seek(0, 1) + if err != nil { + return 0, err + } + + endOffset, err := r.Seek(0, 2) + if err != nil { + return 0, err + } + + _, err = r.Seek(curOffset, 0) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + // Send will send the request returning error if errors are encountered. // // Send will sign the request prior to sending. All Send Handlers will @@ -231,6 +341,8 @@ func (r *Request) Sign() error { // // readLoop() and getConn(req *Request, cm connectMethod) // https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. func (r *Request) Send() error { for { if aws.BoolValue(r.Retryable) { @@ -239,21 +351,15 @@ func (r *Request) Send() error { r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) } - var body io.ReadCloser - if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok { - body = reader.CloseAndCopy(r.BodyStart) - } else { - if r.Config.Logger != nil { - r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions") - } - r.Body.Seek(r.BodyStart, 0) - body = ioutil.NopCloser(r.Body) - } + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body) + // Closing response body to ensure that no response body is leaked + // between retry attempts. if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - // Closing response body. Since we are setting a new request to send off, this - // response will get squashed and leaked. r.HTTPResponse.Body.Close() } } @@ -267,7 +373,7 @@ func (r *Request) Send() error { r.Handlers.Send.Run(r) if r.Error != nil { - if strings.Contains(r.Error.Error(), "net/http: request canceled") { + if !shouldRetryCancel(r) { return r.Error } @@ -281,7 +387,6 @@ func (r *Request) Send() error { debugLogReqError(r, "Send Request", true, err) continue } - r.Handlers.UnmarshalMeta.Run(r) r.Handlers.ValidateResponse.Run(r) if r.Error != nil { @@ -316,6 +421,17 @@ func (r *Request) Send() error { return nil } +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + // AddToUserAgent adds the string to the end of the request's current user agent. func AddToUserAgent(r *Request, s string) { curUA := r.HTTPRequest.Header.Get("User-Agent") @@ -324,3 +440,26 @@ func AddToUserAgent(r *Request, s string) { } r.HTTPRequest.Header.Set("User-Agent", s) } + +func shouldRetryCancel(r *Request) bool { + awsErr, ok := r.Error.(awserr.Error) + timeoutErr := false + errStr := r.Error.Error() + if ok { + err := awsErr.OrigErr() + netErr, netOK := err.(net.Error) + timeoutErr = netOK && netErr.Temporary() + if urlErr, ok := err.(*url.Error); !timeoutErr && ok { + errStr = urlErr.Err.Error() + } + } + + // There can be two types of canceled errors here. + // The first being a net.Error and the other being an error. + // If the request was timed out, we want to continue the retry + // process. Otherwise, return the canceled error. + return timeoutErr || + (errStr != "net/http: request canceled" && + errStr != "net/http: request canceled while waiting for connection") + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 0000000..1323af9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,21 @@ +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// Is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var noBodyReader = noBody{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 0000000..8b963f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,9 @@ +// +build go1.8 + +package request + +import "net/http" + +// Is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var noBodyReader = http.NoBody diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go rename to vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go similarity index 98% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go rename to vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go index 8cc8b01..ebd60cc 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -38,6 +38,7 @@ var throttleCodes = map[string]struct{}{ "RequestThrottled": {}, "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 } // credsExpiredCodes is a collection of error codes which signify the credentials diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go similarity index 100% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request/validation.go rename to vendor/github.com/aws/aws-sdk-go/aws/request/validation.go diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go similarity index 86% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go rename to vendor/github.com/aws/aws-sdk-go/aws/types.go index fa014b4..9ca685e 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -5,7 +5,13 @@ import ( "sync" ) -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should +// only be used with an io.Reader that is also an io.Seeker. Doing so may +// cause request signature errors, or request body's not sent for GET, HEAD +// and DELETE HTTP methods. +// +// Deprecated: Should only be used with io.ReadSeeker. If using for +// S3 PutObject to stream content use s3manager.Uploader instead. func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { return ReaderSeekerCloser{r} } @@ -44,6 +50,12 @@ func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { return int64(0), nil } +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + // Close closes the ReaderSeekerCloser. // // If the ReaderSeekerCloser is not an io.Closer nothing will be done. diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go similarity index 87% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go rename to vendor/github.com/aws/aws-sdk-go/aws/version.go index 01bd81a..efd2812 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.4.8" +const SDKVersion = "1.6.10" diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE similarity index 92% rename from Godeps/_workspace/src/github.com/davecgh/go-spew/LICENSE rename to vendor/github.com/davecgh/go-spew/LICENSE index bb67332..c836416 100644 --- a/Godeps/_workspace/src/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -1,6 +1,6 @@ ISC License -Copyright (c) 2012-2013 Dave Collins +Copyright (c) 2012-2016 Dave Collins Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go similarity index 98% rename from Godeps/_workspace/src/github.com/davecgh/go-spew/spew/bypass.go rename to vendor/github.com/davecgh/go-spew/spew/bypass.go index d42a0bc..8a4a658 100644 --- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015 Dave Collins +// Copyright (c) 2015-2016 Dave Collins // // Permission to use, copy, modify, and distribute this software for any // purpose with or without fee is hereby granted, provided that the above diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go similarity index 96% rename from Godeps/_workspace/src/github.com/davecgh/go-spew/spew/bypasssafe.go rename to vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index e47a4e7..1fe3cf3 100644 --- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015 Dave Collins +// Copyright (c) 2015-2016 Dave Collins // // Permission to use, copy, modify, and distribute this software for any // purpose with or without fee is hereby granted, provided that the above diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go similarity index 99% rename from Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common.go rename to vendor/github.com/davecgh/go-spew/spew/common.go index 14f02dc..7c519ff 100644 --- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go similarity index 96% rename from Godeps/_workspace/src/github.com/davecgh/go-spew/spew/config.go rename to vendor/github.com/davecgh/go-spew/spew/config.go index 5552827..2e3d22f 100644 --- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/config.go +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -67,6 +67,15 @@ type ConfigState struct { // Google App Engine or with the "safe" build tag specified. DisablePointerMethods bool + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + // ContinueOnMethod specifies whether or not recursion should continue once // a custom error or Stringer interface is invoked. The default, false, // means it will print the results of invoking the custom error or Stringer diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go similarity index 94% rename from Godeps/_workspace/src/github.com/davecgh/go-spew/spew/doc.go rename to vendor/github.com/davecgh/go-spew/spew/doc.go index 5be0c40..aacaac6 100644 --- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/doc.go +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -91,6 +91,15 @@ The following configuration options are available: which only accept pointer receivers from non-pointer variables. Pointer method invocation is enabled by default. + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + * ContinueOnMethod Enables recursion into types after invoking error and Stringer interface methods. Recursion after method invocation is disabled by default. diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go similarity index 98% rename from Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump.go rename to vendor/github.com/davecgh/go-spew/spew/dump.go index a0ff95e..df1d582 100644 --- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) { d.w.Write(closeParenBytes) // Display pointer information. - if len(pointerChain) > 0 { + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { d.w.Write(openParenBytes) for i, addr := range pointerChain { if i > 0 { @@ -282,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) { case reflect.Map, reflect.String: valueLen = v.Len() } - if valueLen != 0 || valueCap != 0 { + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { d.w.Write(openParenBytes) if valueLen != 0 { d.w.Write(lenEqualsBytes) printInt(d.w, int64(valueLen), 10) } - if valueCap != 0 { + if !d.cs.DisableCapacities && valueCap != 0 { if valueLen != 0 { d.w.Write(spaceBytes) } diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go similarity index 99% rename from Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format.go rename to vendor/github.com/davecgh/go-spew/spew/format.go index ecf3b80..c49875b 100644 --- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go similarity index 99% rename from Godeps/_workspace/src/github.com/davecgh/go-spew/spew/spew.go rename to vendor/github.com/davecgh/go-spew/spew/spew.go index d8233f5..32c0e33 100644 --- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/spew.go +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore similarity index 100% rename from Godeps/_workspace/src/github.com/go-ini/ini/.gitignore rename to vendor/github.com/go-ini/ini/.gitignore diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml similarity index 100% rename from Godeps/_workspace/src/github.com/go-ini/ini/.travis.yml rename to vendor/github.com/go-ini/ini/.travis.yml diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/go-ini/ini/LICENSE rename to vendor/github.com/go-ini/ini/LICENSE diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile similarity index 100% rename from Godeps/_workspace/src/github.com/go-ini/ini/Makefile rename to vendor/github.com/go-ini/ini/Makefile diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md similarity index 92% rename from Godeps/_workspace/src/github.com/go-ini/ini/README.md rename to vendor/github.com/go-ini/ini/README.md index a939d75..22a4234 100644 --- a/Godeps/_workspace/src/github.com/go-ini/ini/README.md +++ b/vendor/github.com/go-ini/ini/README.md @@ -9,7 +9,7 @@ Package ini provides INI file read and write functionality in Go. ## Feature -- Load multiple data sources(`[]byte` or file) with overwrites. +- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. - Read with recursion values. - Read with parent-child sections. - Read with auto-increment key names. @@ -44,10 +44,10 @@ Please add `-u` flag to update in the future. ### Loading from data sources -A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many data sources as you want**. Passing other types will simply return an error. +A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error. ```go -cfg, err := ini.Load([]byte("raw data"), "filename") +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) ``` Or start with an empty object: @@ -106,6 +106,16 @@ cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. +#### Comment + +Take care that following format will be treated as comment: + +1. Line begins with `#` or `;` +2. Words after `#` or `;` +3. Words after section name (i.e words after `[some section name]`) + +If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```. + ### Working with sections To get a section, you would need to: @@ -123,7 +133,7 @@ section, err := cfg.GetSection("") When you're pretty sure the section exists, following code could make your life easier: ```go -section := cfg.Section("") +section := cfg.Section("section name") ``` What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. @@ -400,6 +410,12 @@ cfg.WriteTo(writer) cfg.WriteToIndent(writer, "\t") ``` +By default, spaces are used to align "=" sign between key and values, to disable that: + +```go +ini.PrettyFormat = false +``` + ## Advanced Usage ### Recursive Values @@ -447,6 +463,21 @@ cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] ``` +### Unparseable Sections + +Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + ### Auto-increment Key Names If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md similarity index 91% rename from Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md rename to vendor/github.com/go-ini/ini/README_ZH.md index 2178e47..3b4fb66 100644 --- a/Godeps/_workspace/src/github.com/go-ini/ini/README_ZH.md +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -2,7 +2,7 @@ ## 功能特性 -- 支持覆盖加载多个数据源(`[]byte` 或文件) +- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`) - 支持递归读取键值 - 支持读取父子分区 - 支持读取自增键名 @@ -37,10 +37,10 @@ ### 从数据源加载 -一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 +一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 ```go -cfg, err := ini.Load([]byte("raw data"), "filename") +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) ``` 或者从一个空白的文件开始: @@ -99,6 +99,16 @@ cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) 这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 +#### 关于注释 + +下述几种情况的内容将被视为注释: + +1. 所有以 `#` 或 `;` 开头的行 +2. 所有在 `#` 或 `;` 之后的内容 +3. 分区标签后的文字 (即 `[分区名]` 之后的内容) + +如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。 + ### 操作分区(Section) 获取指定分区: @@ -116,7 +126,7 @@ section, err := cfg.GetSection("") 当您非常确定某个分区是存在的,可以使用以下简便方法: ```go -section := cfg.Section("") +section := cfg.Section("section name") ``` 如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 @@ -393,9 +403,15 @@ cfg.WriteTo(writer) cfg.WriteToIndent(writer, "\t") ``` -### 高级用法 +默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能: -#### 递归读取键值 +```go +ini.PrettyFormat = false +``` + +## 高级用法 + +### 递归读取键值 在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 @@ -415,7 +431,7 @@ cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini ``` -#### 读取父子分区 +### 读取父子分区 您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 @@ -440,7 +456,22 @@ cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] ``` -#### 读取自增键名 +### 无法解析的分区 + +如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### 读取自增键名 如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go similarity index 100% rename from Godeps/_workspace/src/github.com/go-ini/ini/error.go rename to vendor/github.com/go-ini/ini/error.go diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go similarity index 92% rename from Godeps/_workspace/src/github.com/go-ini/ini/ini.go rename to vendor/github.com/go-ini/ini/ini.go index cd065e7..50abd81 100644 --- a/Godeps/_workspace/src/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "os" "regexp" "runtime" @@ -36,7 +37,7 @@ const ( // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - _VERSION = "1.21.1" + _VERSION = "1.23.0" ) // Version returns current package version literal. @@ -108,7 +109,16 @@ type sourceData struct { } func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return &bytesReadCloser{bytes.NewReader(s.data)}, nil + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil } // File represents a combination of a or more INI file(s) in memory. @@ -149,6 +159,8 @@ func parseDataSource(source interface{}) (dataSource, error) { return sourceFile{s}, nil case []byte: return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil default: return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) } @@ -164,6 +176,9 @@ type LoadOptions struct { // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. // This type of keys are mostly used in my.cnf. AllowBooleanKeys bool + // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string } func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { @@ -233,6 +248,18 @@ func (f *File) NewSection(name string) (*Section, error) { return f.sections[name], nil } +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + // NewSections creates a list of sections. func (f *File) NewSections(names ...string) (err error) { for _, name := range names { @@ -386,6 +413,13 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { } } + if sec.isRawSection { + if _, err = buf.WriteString(sec.rawBody); err != nil { + return 0, err + } + continue + } + // Count and generate alignment length and buffer spaces using the // longest key. Keys may be modifed if they contain certain characters so // we need to take that into account in our calculation. diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go similarity index 100% rename from Godeps/_workspace/src/github.com/go-ini/ini/key.go rename to vendor/github.com/go-ini/ini/key.go diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go similarity index 89% rename from Godeps/_workspace/src/github.com/go-ini/ini/parser.go rename to vendor/github.com/go-ini/ini/parser.go index dc6df87..b0aabe3 100644 --- a/Godeps/_workspace/src/github.com/go-ini/ini/parser.go +++ b/vendor/github.com/go-ini/ini/parser.go @@ -48,16 +48,31 @@ func newParser(r io.Reader) *parser { } } -// BOM handles header of BOM-UTF8 format. +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding func (p *parser) BOM() error { - mask, err := p.buf.Peek(3) + mask, err := p.buf.Peek(2) if err != nil && err != io.EOF { return err - } else if len(mask) < 3 { + } else if len(mask) < 2 { return nil - } else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 { + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: p.buf.Read(mask) + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + p.buf.Read(mask) + } } return nil } @@ -235,6 +250,7 @@ func (f *File) parse(reader io.Reader) (err error) { section, _ := f.NewSection(DEFAULT_SECTION) var line []byte + var inUnparseableSection bool for !p.isEOF { line, err = p.readUntil('\n') if err != nil { @@ -280,6 +296,21 @@ func (f *File) parse(reader io.Reader) (err error) { // Reset aotu-counter and comments p.comment.Reset() p.count = 1 + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) continue } diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go similarity index 90% rename from Godeps/_workspace/src/github.com/go-ini/ini/section.go rename to vendor/github.com/go-ini/ini/section.go index bbb73ca..45d2f3b 100644 --- a/Godeps/_workspace/src/github.com/go-ini/ini/section.go +++ b/vendor/github.com/go-ini/ini/section.go @@ -28,10 +28,19 @@ type Section struct { keys map[string]*Key keyList []string keysHash map[string]string + + isRawSection bool + rawBody string } func newSection(f *File, name string) *Section { - return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)} + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } } // Name returns name of Section. @@ -39,6 +48,12 @@ func (s *Section) Name() string { return s.name } +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + // NewKey creates a new key to given section. func (s *Section) NewKey(name, val string) (*Key, error) { if len(name) == 0 { diff --git a/Godeps/_workspace/src/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go similarity index 100% rename from Godeps/_workspace/src/github.com/go-ini/ini/struct.go rename to vendor/github.com/go-ini/ini/struct.go diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/LICENSE b/vendor/github.com/hashicorp/consul/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/consul/LICENSE rename to vendor/github.com/hashicorp/consul/LICENSE diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md rename to vendor/github.com/hashicorp/consul/api/README.md diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go rename to vendor/github.com/hashicorp/consul/api/acl.go diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go similarity index 89% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go rename to vendor/github.com/hashicorp/consul/api/agent.go index 87a6c10..1893d1c 100644 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -1,6 +1,7 @@ package api import ( + "bufio" "fmt" ) @@ -73,6 +74,8 @@ type AgentServiceCheck struct { HTTP string `json:",omitempty"` TCP string `json:",omitempty"` Status string `json:",omitempty"` + Notes string `json:",omitempty"` + TLSSkipVerify bool `json:",omitempty"` // In Consul 0.7 and later, checks that are associated with a service // may also contain this optional DeregisterCriticalServiceAfter field, @@ -114,6 +117,17 @@ func (a *Agent) Self() (map[string]map[string]interface{}, error) { return out, nil } +// Reload triggers a configuration reload for the agent we are connected to. +func (a *Agent) Reload() error { + r := a.c.newRequest("PUT", "/v1/agent/reload") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + // NodeName is used to get the node name of the agent func (a *Agent) NodeName() (string, error) { if a.nodeName != "" { @@ -345,6 +359,17 @@ func (a *Agent) Join(addr string, wan bool) error { return nil } +// Leave is used to have the agent gracefully leave the cluster and shutdown +func (a *Agent) Leave() error { + r := a.c.newRequest("PUT", "/v1/agent/leave") + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + // ForceLeave is used to have the agent eject a failed node func (a *Agent) ForceLeave(node string) error { r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) @@ -409,3 +434,38 @@ func (a *Agent) DisableNodeMaintenance() error { resp.Body.Close() return nil } + +// Monitor returns a channel which will receive streaming logs from the agent +// Providing a non-nil stopCh can be used to close the connection and stop the +// log stream +func (a *Agent) Monitor(loglevel string, stopCh chan struct{}, q *QueryOptions) (chan string, error) { + r := a.c.newRequest("GET", "/v1/agent/monitor") + r.setQueryOptions(q) + if loglevel != "" { + r.params.Add("loglevel", loglevel) + } + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return nil, err + } + + logCh := make(chan string, 64) + go func() { + defer resp.Body.Close() + + scanner := bufio.NewScanner(resp.Body) + for { + select { + case <-stopCh: + close(logCh) + return + default: + } + if scanner.Scan() { + logCh <- scanner.Text() + } + } + }() + + return logCh, nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go similarity index 90% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go rename to vendor/github.com/hashicorp/consul/api/api.go index dd811fd..9a59b72 100644 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -20,6 +20,28 @@ import ( "github.com/hashicorp/go-cleanhttp" ) +const ( + // HTTPAddrEnvName defines an environment variable name which sets + // the HTTP address if there is no -http-addr specified. + HTTPAddrEnvName = "CONSUL_HTTP_ADDR" + + // HTTPTokenEnvName defines an environment variable name which sets + // the HTTP token. + HTTPTokenEnvName = "CONSUL_HTTP_TOKEN" + + // HTTPAuthEnvName defines an environment variable name which sets + // the HTTP authentication header. + HTTPAuthEnvName = "CONSUL_HTTP_AUTH" + + // HTTPSSLEnvName defines an environment variable name which sets + // whether or not to use HTTPS. + HTTPSSLEnvName = "CONSUL_HTTP_SSL" + + // HTTPSSLVerifyEnvName defines an environment variable name which sets + // whether or not to disable certificate checking. + HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY" +) + // QueryOptions are used to parameterize a query type QueryOptions struct { // Providing a datacenter overwrites the DC provided @@ -52,6 +74,11 @@ type QueryOptions struct { // that node. Setting this to "_agent" will use the agent's node // for the sort. Near string + + // NodeMeta is used to filter results by nodes with the given + // metadata key/value pairs. Currently, only one key/value pair can + // be provided for filtering. + NodeMeta map[string]string } // WriteOptions are used to parameterize a write @@ -181,15 +208,15 @@ func defaultConfig(transportFn func() *http.Transport) *Config { }, } - if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" { + if addr := os.Getenv(HTTPAddrEnvName); addr != "" { config.Address = addr } - if token := os.Getenv("CONSUL_HTTP_TOKEN"); token != "" { + if token := os.Getenv(HTTPTokenEnvName); token != "" { config.Token = token } - if auth := os.Getenv("CONSUL_HTTP_AUTH"); auth != "" { + if auth := os.Getenv(HTTPAuthEnvName); auth != "" { var username, password string if strings.Contains(auth, ":") { split := strings.SplitN(auth, ":", 2) @@ -205,10 +232,10 @@ func defaultConfig(transportFn func() *http.Transport) *Config { } } - if ssl := os.Getenv("CONSUL_HTTP_SSL"); ssl != "" { + if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" { enabled, err := strconv.ParseBool(ssl) if err != nil { - log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL: %s", err) + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err) } if enabled { @@ -216,10 +243,10 @@ func defaultConfig(transportFn func() *http.Transport) *Config { } } - if verify := os.Getenv("CONSUL_HTTP_SSL_VERIFY"); verify != "" { + if verify := os.Getenv(HTTPSSLVerifyEnvName); verify != "" { doVerify, err := strconv.ParseBool(verify) if err != nil { - log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL_VERIFY: %s", err) + log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err) } if !doVerify { @@ -364,6 +391,11 @@ func (r *request) setQueryOptions(q *QueryOptions) { if q.Near != "" { r.params.Set("near", q.Near) } + if len(q.NodeMeta) > 0 { + for key, value := range q.NodeMeta { + r.params.Add("node-meta", key+":"+value) + } + } } // durToMsec converts a duration to a millisecond specified string. If the diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go similarity index 96% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go rename to vendor/github.com/hashicorp/consul/api/catalog.go index 337772e..10e93b4 100644 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -4,18 +4,22 @@ type Node struct { Node string Address string TaggedAddresses map[string]string + Meta map[string]string } type CatalogService struct { Node string Address string TaggedAddresses map[string]string + NodeMeta map[string]string ServiceID string ServiceName string ServiceAddress string ServiceTags []string ServicePort int ServiceEnableTagOverride bool + CreateIndex uint64 + ModifyIndex uint64 } type CatalogNode struct { @@ -27,6 +31,7 @@ type CatalogRegistration struct { Node string Address string TaggedAddresses map[string]string + NodeMeta map[string]string Datacenter string Service *AgentService Check *AgentCheck diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/coordinate.go rename to vendor/github.com/hashicorp/consul/api/coordinate.go diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go rename to vendor/github.com/hashicorp/consul/api/event.go diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go similarity index 64% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go rename to vendor/github.com/hashicorp/consul/api/health.go index 74da949..8abe239 100644 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -2,6 +2,7 @@ package api import ( "fmt" + "strings" ) const ( @@ -11,6 +12,15 @@ const ( HealthPassing = "passing" HealthWarning = "warning" HealthCritical = "critical" + HealthMaint = "maintenance" +) + +const ( + // NodeMaint is the special key set by a node in maintenance mode. + NodeMaint = "_node_maintenance" + + // ServiceMaintPrefix is the prefix for a service in maintenance mode. + ServiceMaintPrefix = "_service_maintenance:" ) // HealthCheck is used to represent a single check @@ -25,11 +35,56 @@ type HealthCheck struct { ServiceName string } +// HealthChecks is a collection of HealthCheck structs. +type HealthChecks []*HealthCheck + +// AggregatedStatus returns the "best" status for the list of health checks. +// Because a given entry may have many service and node-level health checks +// attached, this function determines the best representative of the status as +// as single string using the following heuristic: +// +// maintenance > critical > warning > passing +// +func (c HealthChecks) AggregatedStatus() string { + var passing, warning, critical, maintenance bool + for _, check := range c { + id := string(check.CheckID) + if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) { + maintenance = true + continue + } + + switch check.Status { + case HealthPassing: + passing = true + case HealthWarning: + warning = true + case HealthCritical: + critical = true + default: + return "" + } + } + + switch { + case maintenance: + return HealthMaint + case critical: + return HealthCritical + case warning: + return HealthWarning + case passing: + return HealthPassing + default: + return HealthPassing + } +} + // ServiceEntry is used for the health service endpoint type ServiceEntry struct { Node *Node Service *AgentService - Checks []*HealthCheck + Checks HealthChecks } // Health can be used to query the Health endpoints @@ -43,7 +98,7 @@ func (c *Client) Health() *Health { } // Node is used to query for checks belonging to a given node -func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { +func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { r := h.c.newRequest("GET", "/v1/health/node/"+node) r.setQueryOptions(q) rtt, resp, err := requireOK(h.c.doRequest(r)) @@ -56,7 +111,7 @@ func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, parseQueryMeta(resp, qm) qm.RequestTime = rtt - var out []*HealthCheck + var out HealthChecks if err := decodeBody(resp, &out); err != nil { return nil, nil, err } @@ -64,7 +119,7 @@ func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, } // Checks is used to return the checks associated with a service -func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { +func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { r := h.c.newRequest("GET", "/v1/health/checks/"+service) r.setQueryOptions(q) rtt, resp, err := requireOK(h.c.doRequest(r)) @@ -77,7 +132,7 @@ func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *Query parseQueryMeta(resp, qm) qm.RequestTime = rtt - var out []*HealthCheck + var out HealthChecks if err := decodeBody(resp, &out); err != nil { return nil, nil, err } @@ -115,7 +170,7 @@ func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) // State is used to retrieve all the checks in a given state. // The wildcard "any" state can also be used for all checks. -func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { +func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) { switch state { case HealthAny: case HealthWarning: @@ -136,7 +191,7 @@ func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMet parseQueryMeta(resp, qm) qm.RequestTime = rtt - var out []*HealthCheck + var out HealthChecks if err := decodeBody(resp, &out); err != nil { return nil, nil, err } diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go similarity index 87% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go rename to vendor/github.com/hashicorp/consul/api/kv.go index 3dac258..44e06bb 100644 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -11,13 +11,35 @@ import ( // KVPair is used to represent a single K/V entry type KVPair struct { - Key string + // Key is the name of the key. It is also part of the URL path when accessed + // via the API. + Key string + + // CreateIndex holds the index corresponding the creation of this KVPair. This + // is a read-only field. CreateIndex uint64 + + // ModifyIndex is used for the Check-And-Set operations and can also be fed + // back into the WaitIndex of the QueryOptions in order to perform blocking + // queries. ModifyIndex uint64 - LockIndex uint64 - Flags uint64 - Value []byte - Session string + + // LockIndex holds the index corresponding to a lock on this key, if any. This + // is a read-only field. + LockIndex uint64 + + // Flags are any user-defined flags on the key. It is up to the implementer + // to check these values, since Consul does not treat them specially. + Flags uint64 + + // Value is the value for the key. This can be any value, but it will be + // base64 encoded upon transport. + Value []byte + + // Session is a string representing the ID of the session. Any other + // interactions with this key over the same session must specify the same + // session ID. + Session string } // KVPairs is a list of KVPair objects @@ -28,21 +50,21 @@ type KVOp string const ( KVSet KVOp = "set" - KVDelete = "delete" - KVDeleteCAS = "delete-cas" - KVDeleteTree = "delete-tree" - KVCAS = "cas" - KVLock = "lock" - KVUnlock = "unlock" - KVGet = "get" - KVGetTree = "get-tree" - KVCheckSession = "check-session" - KVCheckIndex = "check-index" + KVDelete KVOp = "delete" + KVDeleteCAS KVOp = "delete-cas" + KVDeleteTree KVOp = "delete-tree" + KVCAS KVOp = "cas" + KVLock KVOp = "lock" + KVUnlock KVOp = "unlock" + KVGet KVOp = "get" + KVGetTree KVOp = "get-tree" + KVCheckSession KVOp = "check-session" + KVCheckIndex KVOp = "check-index" ) // KVTxnOp defines a single operation inside a transaction. type KVTxnOp struct { - Verb string + Verb KVOp Key string Value []byte Flags uint64 @@ -70,7 +92,8 @@ func (c *Client) KV() *KV { return &KV{c} } -// Get is used to lookup a single key +// Get is used to lookup a single key. The returned pointer +// to the KVPair will be nil if the key does not exist. func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { resp, qm, err := k.getInternal(key, nil, q) if err != nil { @@ -133,7 +156,7 @@ func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMe } func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { - r := k.c.newRequest("GET", "/v1/kv/"+key) + r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/")) r.setQueryOptions(q) for param, val := range params { r.params.Set(param, val) @@ -254,7 +277,7 @@ func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { } func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { - r := k.c.newRequest("DELETE", "/v1/kv/"+key) + r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/")) r.setWriteOptions(q) for param, val := range params { r.params.Set(param, val) diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go similarity index 97% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go rename to vendor/github.com/hashicorp/consul/api/lock.go index 08e8e79..9f9845a 100644 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -72,8 +72,9 @@ type LockOptions struct { Key string // Must be set and have write permissions Value []byte // Optional, value to associate with the lock Session string // Optional, created if not specified - SessionName string // Optional, defaults to DefaultLockSessionName - SessionTTL string // Optional, defaults to DefaultLockSessionTTL + SessionOpts *SessionEntry // Optional, options to use when creating a session + SessionName string // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given) + SessionTTL string // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given) MonitorRetries int // Optional, defaults to 0 which means no retries MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime @@ -329,9 +330,12 @@ func (l *Lock) Destroy() error { // createSession is used to create a new managed session func (l *Lock) createSession() (string, error) { session := l.c.Session() - se := &SessionEntry{ - Name: l.opts.SessionName, - TTL: l.opts.SessionTTL, + se := l.opts.SessionOpts + if se == nil { + se = &SessionEntry{ + Name: l.opts.SessionName, + TTL: l.opts.SessionTTL, + } } id, _, err := session.Create(se, nil) if err != nil { diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go similarity index 53% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/operator.go rename to vendor/github.com/hashicorp/consul/api/operator.go index 48d74f3..a8d04a3 100644 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/operator.go +++ b/vendor/github.com/hashicorp/consul/api/operator.go @@ -43,6 +43,26 @@ type RaftConfiguration struct { Index uint64 } +// keyringRequest is used for performing Keyring operations +type keyringRequest struct { + Key string +} + +// KeyringResponse is returned when listing the gossip encryption keys +type KeyringResponse struct { + // Whether this response is for a WAN ring + WAN bool + + // The datacenter name this request corresponds to + Datacenter string + + // A map of the encryption keys to the number of nodes they're installed on + Keys map[string]int + + // The total number of nodes in this ring + NumNodes int +} + // RaftGetConfiguration is used to query the current Raft peer set. func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) { r := op.c.newRequest("GET", "/v1/operator/raft/configuration") @@ -79,3 +99,65 @@ func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) err resp.Body.Close() return nil } + +// KeyringInstall is used to install a new gossip encryption key into the cluster +func (op *Operator) KeyringInstall(key string, q *WriteOptions) error { + r := op.c.newRequest("POST", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringList is used to list the gossip keys installed in the cluster +func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) { + r := op.c.newRequest("GET", "/v1/operator/keyring") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out []*KeyringResponse + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + return out, nil +} + +// KeyringRemove is used to remove a gossip encryption key from the cluster +func (op *Operator) KeyringRemove(key string, q *WriteOptions) error { + r := op.c.newRequest("DELETE", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +// KeyringUse is used to change the active gossip encryption key +func (op *Operator) KeyringUse(key string, q *WriteOptions) error { + r := op.c.newRequest("PUT", "/v1/operator/keyring") + r.setWriteOptions(q) + r.obj = keyringRequest{ + Key: key, + } + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go similarity index 96% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/prepared_query.go rename to vendor/github.com/hashicorp/consul/api/prepared_query.go index 63e741e..876e2e3 100644 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/prepared_query.go +++ b/vendor/github.com/hashicorp/consul/api/prepared_query.go @@ -167,19 +167,18 @@ func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDe } // Delete is used to delete a specific prepared query. -func (c *PreparedQuery) Delete(queryID string, q *QueryOptions) (*QueryMeta, error) { +func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) { r := c.c.newRequest("DELETE", "/v1/query/"+queryID) - r.setQueryOptions(q) + r.setWriteOptions(q) rtt, resp, err := requireOK(c.c.doRequest(r)) if err != nil { return nil, err } defer resp.Body.Close() - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - return qm, nil + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil } // Execute is used to execute a specific prepared query. You can execute using diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go rename to vendor/github.com/hashicorp/consul/api/raw.go diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go rename to vendor/github.com/hashicorp/consul/api/semaphore.go diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go rename to vendor/github.com/hashicorp/consul/api/session.go diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go new file mode 100644 index 0000000..e902377 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/snapshot.go @@ -0,0 +1,47 @@ +package api + +import ( + "io" +) + +// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of +// Consul's internal state and restore snapshots for disaster recovery. +type Snapshot struct { + c *Client +} + +// Snapshot returns a handle that exposes the snapshot endpoints. +func (c *Client) Snapshot() *Snapshot { + return &Snapshot{c} +} + +// Save requests a new snapshot and provides an io.ReadCloser with the snapshot +// data to save. If this doesn't return an error, then it's the responsibility +// of the caller to close it. Only a subset of the QueryOptions are supported: +// Datacenter, AllowStale, and Token. +func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) { + r := s.c.newRequest("GET", "/v1/snapshot") + r.setQueryOptions(q) + + rtt, resp, err := requireOK(s.c.doRequest(r)) + if err != nil { + return nil, nil, err + } + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + return resp.Body, qm, nil +} + +// Restore streams in an existing snapshot and attempts to restore it. +func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error { + r := s.c.newRequest("PUT", "/v1/snapshot") + r.body = in + r.setWriteOptions(q) + _, _, err := requireOK(s.c.doRequest(r)) + if err != nil { + return err + } + return nil +} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go rename to vendor/github.com/hashicorp/consul/api/status.go diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/go-cleanhttp/LICENSE rename to vendor/github.com/hashicorp/go-cleanhttp/LICENSE diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/go-cleanhttp/README.md rename to vendor/github.com/hashicorp/go-cleanhttp/README.md diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/go-cleanhttp/cleanhttp.go rename to vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go diff --git a/Godeps/_workspace/src/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/go-cleanhttp/doc.go rename to vendor/github.com/hashicorp/go-cleanhttp/doc.go diff --git a/Godeps/_workspace/src/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/serf/LICENSE rename to vendor/github.com/hashicorp/serf/LICENSE diff --git a/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/client.go rename to vendor/github.com/hashicorp/serf/coordinate/client.go diff --git a/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/config.go rename to vendor/github.com/hashicorp/serf/coordinate/config.go diff --git a/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/coordinate.go rename to vendor/github.com/hashicorp/serf/coordinate/coordinate.go diff --git a/Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go similarity index 100% rename from Godeps/_workspace/src/github.com/hashicorp/serf/coordinate/phantom.go rename to vendor/github.com/hashicorp/serf/coordinate/phantom.go diff --git a/Godeps/_workspace/src/github.com/jinzhu/inflection/LICENSE b/vendor/github.com/jinzhu/inflection/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/jinzhu/inflection/LICENSE rename to vendor/github.com/jinzhu/inflection/LICENSE diff --git a/Godeps/_workspace/src/github.com/jinzhu/inflection/README.md b/vendor/github.com/jinzhu/inflection/README.md similarity index 57% rename from Godeps/_workspace/src/github.com/jinzhu/inflection/README.md rename to vendor/github.com/jinzhu/inflection/README.md index 2221b91..4dd0f2d 100644 --- a/Godeps/_workspace/src/github.com/jinzhu/inflection/README.md +++ b/vendor/github.com/jinzhu/inflection/README.md @@ -13,15 +13,15 @@ inflection.Plural("bus") => "buses" inflection.Plural("BUS") => "BUSES" inflection.Plural("Bus") => "Buses" -inflection.Singularize("people") => "person" -inflection.Singularize("People") => "Person" -inflection.Singularize("PEOPLE") => "PERSON" -inflection.Singularize("buses") => "bus" -inflection.Singularize("BUSES") => "BUS" -inflection.Singularize("Buses") => "Bus" +inflection.Singular("people") => "person" +inflection.Singular("People") => "Person" +inflection.Singular("PEOPLE") => "PERSON" +inflection.Singular("buses") => "bus" +inflection.Singular("BUSES") => "BUS" +inflection.Singular("Buses") => "Bus" inflection.Plural("FancyPerson") => "FancyPeople" -inflection.Singularize("FancyPeople") => "FancyPerson" +inflection.Singular("FancyPeople") => "FancyPerson" ``` ## Register Rules @@ -37,3 +37,19 @@ inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS" ``` +## Supporting the project + +[![http://patreon.com/jinzhu](http://patreon_public_assets.s3.amazonaws.com/sized/becomeAPatronBanner.png)](http://patreon.com/jinzhu) + + +## Author + +**jinzhu** + +* +* +* + +## License + +Released under the [MIT License](http://www.opensource.org/licenses/MIT). diff --git a/Godeps/_workspace/src/github.com/jinzhu/inflection/inflections.go b/vendor/github.com/jinzhu/inflection/inflections.go similarity index 97% rename from Godeps/_workspace/src/github.com/jinzhu/inflection/inflections.go rename to vendor/github.com/jinzhu/inflection/inflections.go index c8438bd..606263b 100644 --- a/Godeps/_workspace/src/github.com/jinzhu/inflection/inflections.go +++ b/vendor/github.com/jinzhu/inflection/inflections.go @@ -5,12 +5,12 @@ Package inflection pluralizes and singularizes English nouns. inflection.Plural("Person") => "People" inflection.Plural("PERSON") => "PEOPLE" - inflection.Singularize("people") => "person" - inflection.Singularize("People") => "Person" - inflection.Singularize("PEOPLE") => "PERSON" + inflection.Singular("people") => "person" + inflection.Singular("People") => "Person" + inflection.Singular("PEOPLE") => "PERSON" inflection.Plural("FancyPerson") => "FancydPeople" - inflection.Singularize("FancyPeople") => "FancydPerson" + inflection.Singular("FancyPeople") => "FancydPerson" Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb) diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore similarity index 100% rename from Godeps/_workspace/src/github.com/jmespath/go-jmespath/.gitignore rename to vendor/github.com/jmespath/go-jmespath/.gitignore diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml similarity index 100% rename from Godeps/_workspace/src/github.com/jmespath/go-jmespath/.travis.yml rename to vendor/github.com/jmespath/go-jmespath/.travis.yml diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/jmespath/go-jmespath/LICENSE rename to vendor/github.com/jmespath/go-jmespath/LICENSE diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile similarity index 100% rename from Godeps/_workspace/src/github.com/jmespath/go-jmespath/Makefile rename to vendor/github.com/jmespath/go-jmespath/Makefile diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/jmespath/go-jmespath/README.md rename to vendor/github.com/jmespath/go-jmespath/README.md diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go similarity index 100% rename from Godeps/_workspace/src/github.com/jmespath/go-jmespath/api.go rename to vendor/github.com/jmespath/go-jmespath/api.go diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go similarity index 100% rename from Godeps/_workspace/src/github.com/jmespath/go-jmespath/astnodetype_string.go rename to vendor/github.com/jmespath/go-jmespath/astnodetype_string.go diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go similarity index 100% rename from Godeps/_workspace/src/github.com/jmespath/go-jmespath/functions.go rename to vendor/github.com/jmespath/go-jmespath/functions.go diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go similarity index 100% rename from Godeps/_workspace/src/github.com/jmespath/go-jmespath/interpreter.go rename to vendor/github.com/jmespath/go-jmespath/interpreter.go diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go similarity index 100% rename from Godeps/_workspace/src/github.com/jmespath/go-jmespath/lexer.go rename to vendor/github.com/jmespath/go-jmespath/lexer.go diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go similarity index 100% rename from Godeps/_workspace/src/github.com/jmespath/go-jmespath/parser.go rename to vendor/github.com/jmespath/go-jmespath/parser.go diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go similarity index 100% rename from Godeps/_workspace/src/github.com/jmespath/go-jmespath/toktype_string.go rename to vendor/github.com/jmespath/go-jmespath/toktype_string.go diff --git a/Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go similarity index 100% rename from Godeps/_workspace/src/github.com/jmespath/go-jmespath/util.go rename to vendor/github.com/jmespath/go-jmespath/util.go diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/LICENSE b/vendor/github.com/kardianos/osext/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/kardianos/osext/LICENSE rename to vendor/github.com/kardianos/osext/LICENSE diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/README.md b/vendor/github.com/kardianos/osext/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/kardianos/osext/README.md rename to vendor/github.com/kardianos/osext/README.md diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/osext.go b/vendor/github.com/kardianos/osext/osext.go similarity index 100% rename from Godeps/_workspace/src/github.com/kardianos/osext/osext.go rename to vendor/github.com/kardianos/osext/osext.go diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/osext_plan9.go b/vendor/github.com/kardianos/osext/osext_plan9.go similarity index 100% rename from Godeps/_workspace/src/github.com/kardianos/osext/osext_plan9.go rename to vendor/github.com/kardianos/osext/osext_plan9.go diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go b/vendor/github.com/kardianos/osext/osext_procfs.go similarity index 100% rename from Godeps/_workspace/src/github.com/kardianos/osext/osext_procfs.go rename to vendor/github.com/kardianos/osext/osext_procfs.go diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/osext_sysctl.go b/vendor/github.com/kardianos/osext/osext_sysctl.go similarity index 100% rename from Godeps/_workspace/src/github.com/kardianos/osext/osext_sysctl.go rename to vendor/github.com/kardianos/osext/osext_sysctl.go diff --git a/Godeps/_workspace/src/github.com/kardianos/osext/osext_windows.go b/vendor/github.com/kardianos/osext/osext_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/kardianos/osext/osext_windows.go rename to vendor/github.com/kardianos/osext/osext_windows.go diff --git a/Godeps/_workspace/src/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/pmezard/go-difflib/LICENSE rename to vendor/github.com/pmezard/go-difflib/LICENSE diff --git a/Godeps/_workspace/src/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go similarity index 100% rename from Godeps/_workspace/src/github.com/pmezard/go-difflib/difflib/difflib.go rename to vendor/github.com/pmezard/go-difflib/difflib/difflib.go diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/LICENCE.txt b/vendor/github.com/stretchr/testify/LICENCE.txt similarity index 100% rename from Godeps/_workspace/src/github.com/stretchr/testify/LICENCE.txt rename to vendor/github.com/stretchr/testify/LICENCE.txt diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/stretchr/testify/LICENSE rename to vendor/github.com/stretchr/testify/LICENSE diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go similarity index 94% rename from Godeps/_workspace/src/github.com/stretchr/testify/assert/assertion_forward.go rename to vendor/github.com/stretchr/testify/assert/assertion_forward.go index e6a7960..aa4311f 100644 --- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -1,386 +1,351 @@ /* * CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen * THIS FILE MUST NOT BE EDITED BY HAND -*/ + */ package assert import ( - http "net/http" url "net/url" time "time" ) - // Condition uses a Comparison to assert a complex condition. func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { return Condition(a.t, comp, msgAndArgs...) } - // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. -// +// // a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") // a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") // a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { return Contains(a.t, s, contains, msgAndArgs...) } - // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. -// +// // a.Empty(obj) -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { return Empty(a.t, object, msgAndArgs...) } - // Equal asserts that two objects are equal. -// +// // a.Equal(123, 123, "123 and 123 should be equal") -// +// // Returns whether the assertion was successful (true) or not (false). +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { return Equal(a.t, expected, actual, msgAndArgs...) } - // EqualError asserts that a function returned an error (i.e. not `nil`) // and that it is equal to the provided error. -// +// // actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } -// +// a.EqualError(err, expectedErrorString, "An error was expected") +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { return EqualError(a.t, theError, errString, msgAndArgs...) } - // EqualValues asserts that two objects are equal or convertable to the same types // and equal. -// +// // a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { return EqualValues(a.t, expected, actual, msgAndArgs...) } - // Error asserts that a function returned an error (i.e. not `nil`). -// +// // actualObj, err := SomeFunction() // if a.Error(err, "An error was expected") { // assert.Equal(t, err, expectedError) // } -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { return Error(a.t, err, msgAndArgs...) } - // Exactly asserts that two objects are equal is value and type. -// +// // a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { return Exactly(a.t, expected, actual, msgAndArgs...) } - // Fail reports a failure through func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { return Fail(a.t, failureMessage, msgAndArgs...) } - // FailNow fails test func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { return FailNow(a.t, failureMessage, msgAndArgs...) } - // False asserts that the specified value is false. -// +// // a.False(myBool, "myBool should be false") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { return False(a.t, value, msgAndArgs...) } - // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. -// +// // a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { return HTTPBodyContains(a.t, handler, method, url, values, str) } - // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. -// +// // a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { return HTTPBodyNotContains(a.t, handler, method, url, values, str) } - // HTTPError asserts that a specified handler returns an error status code. -// +// // a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool { return HTTPError(a.t, handler, method, url, values) } - // HTTPRedirect asserts that a specified handler returns a redirect status code. -// +// // a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool { return HTTPRedirect(a.t, handler, method, url, values) } - // HTTPSuccess asserts that a specified handler returns a success status code. -// +// // a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool { return HTTPSuccess(a.t, handler, method, url, values) } - // Implements asserts that an object is implemented by the specified interface. -// +// // a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { return Implements(a.t, interfaceObject, object, msgAndArgs...) } - // InDelta asserts that the two numerals are within delta of each other. -// +// // a.InDelta(math.Pi, (22 / 7.0), 0.01) -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { return InDelta(a.t, expected, actual, delta, msgAndArgs...) } - // InDeltaSlice is the same as InDelta, except it compares two slices. func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) } - // InEpsilon asserts that expected and actual have a relative error less than epsilon -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) } - -// InEpsilonSlice is the same as InEpsilon, except it compares two slices. -func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...) +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) } - // IsType asserts that the specified objects are of the same type. func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { return IsType(a.t, expectedType, object, msgAndArgs...) } - // JSONEq asserts that two JSON strings are equivalent. -// +// // a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { return JSONEq(a.t, expected, actual, msgAndArgs...) } - // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. -// +// // a.Len(mySlice, 3, "The size of slice is not 3") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { return Len(a.t, object, length, msgAndArgs...) } - // Nil asserts that the specified object is nil. -// +// // a.Nil(err, "err should be nothing") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { return Nil(a.t, object, msgAndArgs...) } - // NoError asserts that a function returned no error (i.e. `nil`). -// +// // actualObj, err := SomeFunction() // if a.NoError(err) { // assert.Equal(t, actualObj, expectedObj) // } -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { return NoError(a.t, err, msgAndArgs...) } - // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. -// +// // a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") // a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") // a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { return NotContains(a.t, s, contains, msgAndArgs...) } - // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. -// +// // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) // } -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { return NotEmpty(a.t, object, msgAndArgs...) } - // NotEqual asserts that the specified values are NOT equal. -// +// // a.NotEqual(obj1, obj2, "two objects shouldn't be equal") -// +// // Returns whether the assertion was successful (true) or not (false). +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { return NotEqual(a.t, expected, actual, msgAndArgs...) } - // NotNil asserts that the specified object is not nil. -// +// // a.NotNil(err, "err should be something") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { return NotNil(a.t, object, msgAndArgs...) } - // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// +// // a.NotPanics(func(){ // RemainCalm() // }, "Calling RemainCalm() should NOT panic") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { return NotPanics(a.t, f, msgAndArgs...) } - // NotRegexp asserts that a specified regexp does not match a string. -// +// // a.NotRegexp(regexp.MustCompile("starts"), "it's starting") // a.NotRegexp("^start", "it's not starting") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { return NotRegexp(a.t, rx, str, msgAndArgs...) } - // NotZero asserts that i is not the zero value for its type and returns the truth. func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { return NotZero(a.t, i, msgAndArgs...) } - // Panics asserts that the code inside the specified PanicTestFunc panics. -// +// // a.Panics(func(){ // GoCrazy() // }, "Calling GoCrazy() should panic") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { return Panics(a.t, f, msgAndArgs...) } - // Regexp asserts that a specified regexp matches a string. -// +// // a.Regexp(regexp.MustCompile("start"), "it's starting") // a.Regexp("start...$", "it's not starting") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { return Regexp(a.t, rx, str, msgAndArgs...) } - // True asserts that the specified value is true. -// +// // a.True(myBool, "myBool should be true") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { return True(a.t, value, msgAndArgs...) } - // WithinDuration asserts that the two times are within duration delta of each other. -// +// // a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// +// // Returns whether the assertion was successful (true) or not (false). func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) } - // Zero asserts that i is the zero value for its type and returns the truth. func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { return Zero(a.t, i, msgAndArgs...) diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl similarity index 100% rename from Godeps/_workspace/src/github.com/stretchr/testify/assert/assertion_forward.go.tmpl rename to vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go similarity index 84% rename from Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go rename to vendor/github.com/stretchr/testify/assert/assertions.go index 348d5f1..d1552e5 100644 --- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -65,7 +65,7 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool { /* CallerInfo is necessary because the assert functions use the testing object internally, causing it to print the file:line of the assert method, rather than where -the problem actually occured in calling code.*/ +the problem actually occurred in calling code.*/ // CallerInfo returns an array of strings containing the file and line number // of each stack frame leading from the current test to the assert call that @@ -82,7 +82,9 @@ func CallerInfo() []string { for i := 0; ; i++ { pc, file, line, ok = runtime.Caller(i) if !ok { - return nil + // The breaks below failed to terminate the loop, and we ran off the + // end of the call stack. + break } // This is a huge edge case, but it will panic if this is the case, see #180 @@ -90,6 +92,21 @@ func CallerInfo() []string { break } + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + parts := strings.Split(file, "/") dir := parts[len(parts)-2] file = parts[len(parts)-1] @@ -97,11 +114,6 @@ func CallerInfo() []string { callers = append(callers, fmt.Sprintf("%s:%d", file, line)) } - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() // Drop the package segments := strings.Split(name, ".") name = segments[len(segments)-1] @@ -141,7 +153,7 @@ func getWhitespaceString() string { parts := strings.Split(file, "/") file = parts[len(parts)-1] - return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) + return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) } @@ -158,22 +170,18 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { return "" } -// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's -// test printing (see inner comment for specifics) -func indentMessageLines(message string, tabs int) string { +// Aligns the provided message so that all lines after the first line start at the same location as the first line. +// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). +// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the +// basis on which the alignment occurs). +func indentMessageLines(message string, longestLabelLen int) string { outBuf := new(bytes.Buffer) for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { + // no need to align first line because it starts at the correct location (after the label) if i != 0 { - outBuf.WriteRune('\n') - } - for ii := 0; ii < tabs; ii++ { - outBuf.WriteRune('\t') - // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter - // by 1 prematurely. - if ii == 0 && i > 0 { - ii++ - } + // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab + outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen +1) + "\t") } outBuf.WriteString(scanner.Text()) } @@ -205,29 +213,49 @@ func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool // Fail reports a failure through func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + content := []labeledContent{ + {"Error Trace", strings.Join(CallerInfo(), "\n\r\t\t\t")}, + {"Error", failureMessage}, + } message := messageFromMsgAndArgs(msgAndArgs...) - - errorTrace := strings.Join(CallerInfo(), "\n\r\t\t\t") if len(message) > 0 { - t.Errorf("\r%s\r\tError Trace:\t%s\n"+ - "\r\tError:%s\n"+ - "\r\tMessages:\t%s\n\r", - getWhitespaceString(), - errorTrace, - indentMessageLines(failureMessage, 2), - message) - } else { - t.Errorf("\r%s\r\tError Trace:\t%s\n"+ - "\r\tError:%s\n\r", - getWhitespaceString(), - errorTrace, - indentMessageLines(failureMessage, 2)) + content = append(content, labeledContent{"Messages", message}) } + t.Errorf("\r" + getWhitespaceString() + labeledOutput(content...)) + return false } +type labeledContent struct { + label string + content string +} + +// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: +// +// \r\t{{label}}:{{align_spaces}}\t{{content}}\n +// +// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. +// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this +// alignment is achieved, "\t{{content}}\n" is added for the output. +// +// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. +func labeledOutput(content ...labeledContent) string { + longestLabel := 0 + for _, v := range content { + if len(v.label) > longestLabel { + longestLabel = len(v.label) + } + } + var output string + for _, v := range content { + output += "\r\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" + } + return output +} + // Implements asserts that an object is implemented by the specified interface. // // assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") @@ -258,18 +286,39 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs // assert.Equal(t, 123, 123, "123 and 123 should be equal") // // Returns whether the assertion was successful (true) or not (false). +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { if !ObjectsAreEqual(expected, actual) { diff := diff(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ - " != %#v (actual)%s", expected, actual, diff), msgAndArgs...) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "received: %s%s", expected, actual, diff), msgAndArgs...) } return true } +// formatUnequalValues takes two values of arbitrary types and returns string +// representations appropriate to be presented to the user. +// +// If the values are not of like type, the returned strings will be prefixed +// with the type name, and the value will be enclosed in parenthesis similar +// to a type conversion in the Go grammar. +func formatUnequalValues(expected, actual interface{}) (e string, a string) { + if reflect.TypeOf(expected) != reflect.TypeOf(actual) { + return fmt.Sprintf("%T(%#v)", expected, expected), + fmt.Sprintf("%T(%#v)", actual, actual) + } + + return fmt.Sprintf("%#v", expected), + fmt.Sprintf("%#v", actual) +} + // EqualValues asserts that two objects are equal or convertable to the same types // and equal. // @@ -279,8 +328,11 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { if !ObjectsAreEqualValues(expected, actual) { - return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ - " != %#v (actual)", expected, actual), msgAndArgs...) + diff := diff(expected, actual) + expected, actual = formatUnequalValues(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: \n"+ + "expected: %s\n"+ + "received: %s%s", expected, actual, diff), msgAndArgs...) } return true @@ -507,6 +559,9 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { // assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") // // Returns whether the assertion was successful (true) or not (false). +// +// Pointer variable equality is determined based on the equality of the +// referenced values (as opposed to the memory addresses). func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { if ObjectsAreEqual(expected, actual) { @@ -833,7 +888,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m // Returns whether the assertion was successful (true) or not (false). func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { if err != nil { - return Fail(t, fmt.Sprintf("Received unexpected error %q", err), msgAndArgs...) + return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) } return true @@ -849,9 +904,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // Returns whether the assertion was successful (true) or not (false). func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - message := messageFromMsgAndArgs(msgAndArgs...) if err == nil { - return Fail(t, "An error is expected but got nil. %s", message) + return Fail(t, "An error is expected but got nil.", msgAndArgs...) } return true @@ -861,20 +915,22 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// if assert.Error(t, err, "An error was expected") { -// assert.Equal(t, err, expectedError) -// } +// assert.EqualError(t, err, expectedErrorString, "An error was expected") // // Returns whether the assertion was successful (true) or not (false). func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { - - message := messageFromMsgAndArgs(msgAndArgs...) - if !NotNil(t, theError, "An error is expected but got nil. %s", message) { + if !Error(t, theError, msgAndArgs...) { return false } - s := "An error with value \"%s\" is expected but got \"%s\". %s" - return Equal(t, errString, theError.Error(), - s, errString, theError.Error(), message) + expected := errString + actual := theError.Error() + // don't need to use deep equals here, we know they are both strings + if expected != actual { + return Fail(t, fmt.Sprintf("Error message not equal:\n"+ + "expected: %q\n"+ + "received: %q", expected, actual), msgAndArgs...) + } + return true } // matchRegexp return true if a specified regexp matches a string. @@ -989,9 +1045,8 @@ func diff(expected interface{}, actual interface{}) string { return "" } - spew.Config.SortKeys = true - e := spew.Sdump(expected) - a := spew.Sdump(actual) + e := spewConfig.Sdump(expected) + a := spewConfig.Sdump(actual) diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ A: difflib.SplitLines(e), @@ -1005,3 +1060,10 @@ func diff(expected interface{}, actual interface{}) string { return "\n\nDiff:\n" + diff } + +var spewConfig = spew.ConfigState{ + Indent: " ", + DisablePointerAddresses: true, + DisableCapacities: true, + SortKeys: true, +} diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go similarity index 100% rename from Godeps/_workspace/src/github.com/stretchr/testify/assert/doc.go rename to vendor/github.com/stretchr/testify/assert/doc.go diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go similarity index 100% rename from Godeps/_workspace/src/github.com/stretchr/testify/assert/errors.go rename to vendor/github.com/stretchr/testify/assert/errors.go diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go similarity index 100% rename from Godeps/_workspace/src/github.com/stretchr/testify/assert/forward_assertions.go rename to vendor/github.com/stretchr/testify/assert/forward_assertions.go diff --git a/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go similarity index 96% rename from Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go rename to vendor/github.com/stretchr/testify/assert/http_assertions.go index e1b9442..fa7ab89 100644 --- a/Godeps/_workspace/src/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -99,7 +99,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body) + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) } return !contains diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/.travis.yml b/vendor/gopkg.in/pg.v5/.travis.yml similarity index 82% rename from Godeps/_workspace/src/gopkg.in/pg.v5/.travis.yml rename to vendor/gopkg.in/pg.v5/.travis.yml index 201384a..a6c5b8c 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/.travis.yml +++ b/vendor/gopkg.in/pg.v5/.travis.yml @@ -16,18 +16,18 @@ before_install: - sudo apt-get -y remove --purge postgresql-9.2 - sudo apt-get -y remove --purge postgresql-9.3 - sudo apt-get -y remove --purge postgresql-9.4 + - sudo apt-get -y remove --purge postgresql-9.5 - sudo apt-get -y autoremove - sudo apt-key adv --keyserver keys.gnupg.net --recv-keys 7FCC7D46ACCC4CF8 - - sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main 9.5" >> /etc/apt/sources.list.d/postgresql.list' + - sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ precise-pgdg main 9.6" >> /etc/apt/sources.list.d/postgresql.list' - sudo apt-get update - - sudo apt-get -y install postgresql-9.5 - - sudo sh -c 'echo "local all postgres trust" > /etc/postgresql/9.5/main/pg_hba.conf' - - sudo sh -c 'echo -n "host all all 0.0.0.0/0 trust" >> /etc/postgresql/9.5/main/pg_hba.conf' + - sudo apt-get -y install postgresql-9.6 + - sudo sh -c 'echo "local all postgres trust" > /etc/postgresql/9.6/main/pg_hba.conf' + - sudo sh -c 'echo -n "host all all 0.0.0.0/0 trust" >> /etc/postgresql/9.6/main/pg_hba.conf' - sudo /etc/init.d/postgresql restart - sudo -u postgres psql -c "CREATE EXTENSION hstore" install: - - go get gopkg.in/bsm/ratelimit.v1 - go get github.com/jinzhu/inflection - go get gopkg.in/check.v1 - go get github.com/onsi/ginkgo diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/CHANGELOG.md b/vendor/gopkg.in/pg.v5/CHANGELOG.md similarity index 84% rename from Godeps/_workspace/src/gopkg.in/pg.v5/CHANGELOG.md rename to vendor/gopkg.in/pg.v5/CHANGELOG.md index 7716055..c1a49c0 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/CHANGELOG.md +++ b/vendor/gopkg.in/pg.v5/CHANGELOG.md @@ -9,6 +9,8 @@ - Indexed placeholders support, e.g. `db.Exec("SELECT ?0 + ?0", 1)`. - Named placeholders are evaluated when query is executed. - Added Update and Delete hooks. + - Order reworked to quote column names. OrderExpr added to bypass Order quoting restrictions. + - Group reworked to quote column names. GroupExpr added to bypass Group quoting restrictions. ## v4 diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/LICENSE b/vendor/gopkg.in/pg.v5/LICENSE similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/LICENSE rename to vendor/gopkg.in/pg.v5/LICENSE diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/Makefile b/vendor/gopkg.in/pg.v5/Makefile similarity index 85% rename from Godeps/_workspace/src/gopkg.in/pg.v5/Makefile rename to vendor/gopkg.in/pg.v5/Makefile index b342b00..161c4fd 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/Makefile +++ b/vendor/gopkg.in/pg.v5/Makefile @@ -1,3 +1,4 @@ all: go test ./... go test ./... -short -race + go vet diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/README.md b/vendor/gopkg.in/pg.v5/README.md similarity index 71% rename from Godeps/_workspace/src/gopkg.in/pg.v5/README.md rename to vendor/gopkg.in/pg.v5/README.md index 65dc7b2..599399f 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/README.md +++ b/vendor/gopkg.in/pg.v5/README.md @@ -20,8 +20,10 @@ Supports: - Scanning variables using [ORM](https://godoc.org/gopkg.in/pg.v5#example-DB-Select-SomeColumnsIntoVars) and [SQL](https://godoc.org/gopkg.in/pg.v5#example-Scan). - [SelectOrInsert](https://godoc.org/gopkg.in/pg.v5#example-DB-Insert-SelectOrInsert) using on-conflict. - [INSERT ... ON CONFLICT DO UPDATE](https://godoc.org/gopkg.in/pg.v5#example-DB-Insert-OnConflictDoUpdate) using ORM. +- Common table expressions using [WITH](https://godoc.org/gopkg.in/pg.v5#example-DB-Select-With) and [WrapWith](https://godoc.org/gopkg.in/pg.v5#example-DB-Select-WrapWith). - [CountEstimate](https://godoc.org/gopkg.in/pg.v5#example-DB-Model-CountEstimate) using `EXPLAIN` to get [estimated number of matching rows](https://wiki.postgresql.org/wiki/Count_estimate). - [HasOne](https://godoc.org/gopkg.in/pg.v5#example-DB-Model-HasOne), [BelongsTo](https://godoc.org/gopkg.in/pg.v5#example-DB-Model-BelongsTo), [HasMany](https://godoc.org/gopkg.in/pg.v5#example-DB-Model-HasMany) and [ManyToMany](https://godoc.org/gopkg.in/pg.v5#example-DB-Model-ManyToMany). +- [Creating tables from structs](https://godoc.org/gopkg.in/pg.v5#example-DB-CreateTable). - [Migrations](https://github.com/go-pg/migrations). - [Sharding](https://github.com/go-pg/sharding). @@ -37,6 +39,7 @@ Examples: http://godoc.org/gopkg.in/pg.v5#pkg-examples. * [Writing queries](#writing-queries) * [Placeholders](#placeholders) * [Select](#select) + * [Column names](#column-names) * [Reusing queries](#reusing-queries) * [Insert](#insert) * [Update](#update) @@ -171,15 +174,15 @@ func ExampleDB_Model() { ## Model definition -Models are defined using Go structs. Order of the struct fields usually does not matter with the only exception being primary key(s) that must be defined before any other fields. Otherwise table relationships can be recognized incorrectly. +Models are defined using Go structs which are mapped to PostgreSQL tables. Exported struct fields are mapped to table columns. Table name and alias are automatically derived from struct name by underscoring it; table name is also pluralized (struct `Genre` -> table `genres AS genre`). Default table name can be overrided using `tableName` field. Column name is derived from struct field name by underscoring it (field `ParentId` -> column `parent_id`). Default column name can be overrided using `sql` tag. Order of struct fields does not matter with the only exception being primary keys that must be defined before any other fields. Otherwise table relationships can be recognized incorrectly. -Please *note* that most struct tags in following example have the same values as the defaults and are included only for demonstration purposes. +Please *note* that most struct tags in the following example have the same values as the defaults and are included only for demonstration purposes. ```go type Genre struct { - // TableName is an optional field that specifies custom table name and alias. - // By default go-pg generates table name and alias from the struct name. - TableName struct{} `sql:"genres,alias:genre"` // default name and alias are the same + // tableName is an optional field that specifies custom table name and alias. + // By default go-pg generates table name and alias from struct name. + tableName struct{} `sql:"genres,alias:genre"` // default values are the same Id int // Id is automatically detected as primary key Name string @@ -187,18 +190,26 @@ type Genre struct { Books []Book `pg:",many2many:book_genres"` // many to many relation - ParentId int `sql:",null"` + ParentId int Subgenres []Genre `pg:",fk:Parent"` // fk specifies prefix for foreign key (ParentId) } +func (g Genre) String() string { + return fmt.Sprintf("Genre", g.Id, g.Name) +} + type Author struct { ID int // both "Id" and "ID" are detected as primary key Name string - Books []Book // has many relation + Books []*Book // has many relation +} + +func (a Author) String() string { + return fmt.Sprintf("Author", a.ID, a.Name) } type BookGenre struct { - TableName struct{} `sql:",alias:bg"` // custom table alias + tableName struct{} `sql:",alias:bg"` // custom table alias BookId int `sql:",pk"` // pk tag is used to mark field as primary key GenreId int `sql:",pk"` @@ -212,14 +223,25 @@ type Book struct { AuthorID int Author *Author // has one relation EditorID int - Editor *Author // has one relation - CreatedAt time.Time `sql:",null"` + Editor *Author // has one relation + CreatedAt time.Time Genres []Genre `pg:",many2many:book_genres" gorm:"many2many:book_genres;"` // many to many relation Translations []Translation // has many relation Comments []Comment `pg:",polymorphic:Trackable"` // has many polymorphic relation } +func (b Book) String() string { + return fmt.Sprintf("Book", b.Id, b.Title) +} + +func (b *Book) BeforeInsert(db orm.DB) error { + if b.CreatedAt.IsZero() { + b.CreatedAt = time.Now() + } + return nil +} + // BookWithCommentCount is like Book model, but has additional CommentCount // field that is used to select data into it. The use of `pg:",override"` tag // is essential here and it overrides internal model properties such as table name. @@ -230,7 +252,7 @@ type BookWithCommentCount struct { } type Translation struct { - TableName struct{} `sql:",alias:tr"` // custom table alias + tableName struct{} `sql:",alias:tr"` // custom table alias Id int BookId int @@ -241,8 +263,8 @@ type Translation struct { } type Comment struct { - TrackableId int `sql:",pk"` // Book.Id or Translation.Id - TrackableType string `sql:",pk"` // "Book" or "Translation" + TrackableId int // Book.Id or Translation.Id + TrackableType string // "Book" or "Translation" Text string } ``` @@ -316,27 +338,27 @@ func (p *Params) Sum() int { // go-pg recognizes placeholders (`?`) in queries and replaces them // with parameters when queries are executed. Parameters are escaped // before replacing according to PostgreSQL rules. Specifically: -// - all parameters are properly quoted against SQL injections; -// - null byte is removed; -// - JSON/JSONB gets `\u0000` escaped as `\\u0000`. +// - all parameters are properly quoted against SQL injections; +// - null byte is removed; +// - JSON/JSONB gets `\u0000` escaped as `\\u0000`. func Example_placeholders() { var num int - // Simple placeholders. + // Simple params. _, err := db.Query(pg.Scan(&num), "SELECT ?", 42) if err != nil { panic(err) } - fmt.Println(num) + fmt.Println("simple:", num) - // Indexed placeholders. + // Indexed params. _, err = db.Query(pg.Scan(&num), "SELECT ?0 + ?0", 1) if err != nil { panic(err) } - fmt.Println(num) + fmt.Println("indexed:", num) - // Named placeholders. + // Named params. params := &Params{ X: 1, Y: 1, @@ -345,11 +367,19 @@ func Example_placeholders() { if err != nil { panic(err) } - fmt.Println(num) + fmt.Println("named:", num) + + // Global params. + _, err = db.WithParam("z", 1).Query(pg.Scan(&num), "SELECT ?x + ?y + ?z", ¶ms) + if err != nil { + panic(err) + } + fmt.Println("global:", num) - // Output: 42 - // 2 - // 4 + // Output: simple: 42 + // indexed: 2 + // named: 4 + // global: 3 } ``` @@ -358,53 +388,66 @@ func Example_placeholders() { ```go // Select book by primary key. err := db.Select(&book) -// SELECT * FROM "books" WHERE id = 1 +// SELECT "book"."id", "book"."title", "book"."text" +// FROM "books" WHERE id = 1 // Select only book title and text. err := db.Model(&book).Column("title", "text").Where("id = ?", 1).Select() -// SELECT "title", "text" FROM "books" WHERE id = 1 +// SELECT "title", "text" +// FROM "books" WHERE id = 1 // Select only book title and text into variables. var title, text string err := db.Model(&Book{}).Column("title", "text").Where("id = ?", 1).Select(&title, &text) -// SELECT "title", "text" FROM "books" WHERE id = 1 +// SELECT "title", "text" +// FROM "books" WHERE id = 1 -// Select book using WHERE. +// Select book using WHERE ... AND ... err := db.Model(&book). Where("id > ?", 100). Where("title LIKE ?", "my%"). Limit(1). Select() -// SELECT * FROM "books" WHERE (id > 100) AND (title LIKE 'my%') LIMIT 1 +// SELECT "book"."id", "book"."title", "book"."text" +// FROM "books" +// WHERE (id > 100) AND (title LIKE 'my%') +// LIMIT 1 -// Select book using WHERE OR. +// Select book using WHERE ... OR ... err := db.Model(&book). - WhereOr( - pg.SQL("id > ?", 100), - pg.SQL("title LIKE ?", "my%"), - ). + Where("id > ?", 100). + WhereOr("title LIKE ?", "my%"). Limit(1). Select() -// SELECT * FROM "books" WHERE (id > 100 OR title LIKE 'my%') LIMIT 1 +// SELECT "book"."id", "book"."title", "book"."text" +// FROM "books" +// WHERE (id > 100) OR (title LIKE 'my%') +// LIMIT 1 // Select first 20 books. err := db.Model(&books).Order("id ASC").Limit(20).Select() -// SELECT * FROM "books" ORDER BY id ASC LIMIT 20 +// SELECT "book"."id", "book"."title", "book"."text" +// FROM "books" +// ORDER BY id ASC LIMIT 20 // Count books. count, err := db.Model(&Book{}).Count() -// SELECT COUNT(*) FROM "books" +// SELECT count(*) FROM "books" // Select 20 books and count all books. count, err := db.Model(&books).Limit(20).SelectAndCount() -// SELECT * FROM "books" LIMIT 20 -// SELECT COUNT(*) FROM "books" +// SELECT "book"."id", "book"."title", "book"."text" +// FROM "books" LIMIT 20 +// +// SELECT count(*) FROM "books" // Select 20 books and count estimated number of books. count, err := db.Model(&books).Limit(20).SelectAndCountEstimate(100000) -// SELECT * FROM "books" LIMIT 20 +// SELECT "book"."id", "book"."title", "book"."text" +// FROM "books" LIMIT 20 +// // EXPLAIN SELECT 2147483647 FROM "books" -// SELECT COUNT(*) FROM "books" +// SELECT count(*) FROM "books" // Select author id and number of books. var res []struct { @@ -433,39 +476,101 @@ err := db.Model(nil). With("author_books", authorBooks). Table("author_books"). Select(&books) -// WITH "author_books" AS (SELECT "book".* FROM "books" AS "book" WHERE (author_id = 1)) +// WITH "author_books" AS ( +// SELECT "book"."id", "book"."title", "book"."text" +// FROM "books" AS "book" WHERE (author_id = 1) +// ) // SELECT * FROM "author_books" + +// Same query using WrapWith. +err := db.Model(&books). + Where("author_id = ?", 1). + WrapWith("author_books"). + Table("author_books"). + Select(&books) +// WITH "author_books" AS ( +// SELECT "book"."id", "book"."title", "book"."text" +// FROM "books" AS "book" WHERE (author_id = 1) +// ) +// SELECT * FROM "author_books" +``` + +### Column names + +```go +// Select book and associated author. +err := db.Model(&book).Column("Author").Select() +// SELECT +// "book"."id", "book"."title", "book"."text", +// "author"."id" AS "author__id", "author"."name" AS "author__name" +// FROM "books" +// LEFT JOIN "users" AS "author" ON "author"."id" = "book"."author_id" +// WHERE id = 1 + +// Select book id and associated author id. +err := db.Model(&book).Column("book.id", "Author.id").Select() +// SELECT "book"."id", "author"."id" AS "author__id" +// FROM "books" +// LEFT JOIN "users" AS "author" ON "author"."id" = "book"."author_id" +// WHERE id = 1 + +// Select book and join author without selecting it. +err := db.Model(&book).Column("Author._").Select() +// SELECT "book"."id" +// FROM "books" +// LEFT JOIN "users" AS "author" ON "author"."id" = "book"."author_id" +// WHERE id = 1 + +// Join and select book author without selecting book. +err := db.Model(&book).Column("_", "Author").Select() +// SELECT "author"."id" AS "author__id", "author"."name" AS "author__name" +// FROM "books" +// LEFT JOIN "users" AS "author" ON "author"."id" = "book"."author_id" +// WHERE id = 1 ``` ### Reusing queries ```go -// pager retrieves page number from the req and sets query LIMIT and OFFSET. -func pager(req *http.Request) func(*orm.Query) *orm.Query { - const pageSize = 20 - return func(q *orm.Query) *orm.Query { - q = q.Limit(pageSize) - param := req.URL.Query().Get("page") - if param == "" { - return q - } - page, err := strconv.Atoi(param) - if err != nil { - // Set the query error. - return q.Err(err) - } - return q.Offset((page - 1) * pageSize) - } -} +// Pager sets LIMIT and OFFSET from the URL values: +// - ?limit=10 - sets q.Limit(10), max limit is 1000. +// - ?page=5 - sets q.Offset((page - 1) * limit), max offset is 1000000. +func Pager(urlValues url.Values, defaultLimit int) func(*Query) (*Query, error) { + return func(q *Query) (*Query, error) { + const maxLimit = 1000 + const maxOffset = 1e6 + + limit, err := intParam(urlValues, "limit") + if err != nil { + return nil, err + } + if limit < 1 { + limit = defaultLimit + } else if limit > maxLimit { + return nil, fmt.Errorf("limit can't bigger than %d", maxLimit) + } + if limit > 0 { + q = q.Limit(limit) + } -var books []Book -err := db.Model(&books).Apply(pager(req)).Select() -// SELECT * FROM "books" LIMIT 20 + page, err := intParam(urlValues, "page") + if err != nil { + return nil, err + } + if page > 0 { + offset := (page - 1) * limit + if offset > maxOffset { + return nil, fmt.Errorf("offset can't bigger than %d", maxOffset) + } + q = q.Offset(offset) + } -// OR using DB and model late binding + return q, nil + } +} -query := pg.Model().Apply(pager(req)) -err := query.DB(db).Model(&books).Select() +var books []Book +err := db.Model(&books).Apply(orm.Pager(req.URL.Query())).Select() // SELECT * FROM "books" LIMIT 20 ``` @@ -537,25 +642,61 @@ res, err := db.Model(&book).Where("title = ?title").Delete() ### Has one -Following example selects all items and their subitems using LEFT JOIN and `sub_id` column. +Following examples selects users joining their profiles: ```go -type Item struct { - Id int +type Profile struct { + Id int + Lang string +} - Sub *Item - SubId int +// User has one profile. +type User struct { + Id int + Name string + ProfileId int + Profile *Profile } -var items []Item -err := db.Model(&items). - Column("item.*", "Sub"). - Where("item.sub_id IS NOT NULL"). +db := connect() +defer db.Close() + +qs := []string{ + "CREATE TEMP TABLE users (id int, name text, profile_id int)", + "CREATE TEMP TABLE profiles (id int, lang text)", + "INSERT INTO users VALUES (1, 'user 1', 1), (2, 'user 2', 2)", + "INSERT INTO profiles VALUES (1, 'en'), (2, 'ru')", +} +for _, q := range qs { + _, err := db.Exec(q) + if err != nil { + panic(err) + } +} + +// Select users joining their profiles with following query: +// +// SELECT +// "user".*, +// "profile"."id" AS "profile__id", +// "profile"."lang" AS "profile__lang" +// FROM "users" AS "user" +// LEFT JOIN "profiles" AS "profile" ON "profile"."id" = "user"."profile_id" + +var users []User +err := db.Model(&users). + Column("user.*", "Profile"). Select() -// SELECT "item".*, "sub"."id" AS "sub__id", "sub"."sub_id" AS "sub__sub_id" -// FROM "items" AS "item" -// LEFT JOIN "items" AS "sub" ON "sub"."id" = item."sub_id" -// WHERE (item.sub_id IS NOT NULL) +if err != nil { + panic(err) +} + +fmt.Println(len(users), "results") +fmt.Println(users[0].Id, users[0].Name, users[0].Profile) +fmt.Println(users[1].Id, users[1].Name, users[1].Profile) +// Output: 2 results +// 1 user 1 &{1 en} +// 2 user 2 &{2 ru} ``` ### Belongs to @@ -760,9 +901,9 @@ Please go through [examples](http://godoc.org/gopkg.in/pg.v5#pkg-examples) to ge ``` ```sql - SELECT "book".* FROM "books" AS "book" LIMIT 100 - SELECT "translation".* FROM "translations" AS "translation" - WHERE ("translation"."book_id") IN ((100), (101), ... (199)); + SELECT "book".* FROM "books" AS "book" LIMIT 100 + SELECT "translation".* FROM "translations" AS "translation" + WHERE ("translation"."book_id") IN ((100), (101), ... (199)); ``` GORM: diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/db.go b/vendor/gopkg.in/pg.v5/db.go similarity index 79% rename from Godeps/_workspace/src/gopkg.in/pg.v5/db.go rename to vendor/gopkg.in/pg.v5/db.go index 29c46b5..8235a60 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/db.go +++ b/vendor/gopkg.in/pg.v5/db.go @@ -1,6 +1,7 @@ package pg import ( + "fmt" "io" "time" @@ -26,12 +27,17 @@ func Connect(opt *Options) *DB { // underlying connections. It's safe for concurrent use by multiple // goroutines. type DB struct { - opt *Options - pool *pool.ConnPool + opt *Options + pool *pool.ConnPool + fmter orm.Formatter } var _ orm.DB = (*DB)(nil) +func (db *DB) String() string { + return fmt.Sprintf("DB", db.opt.Addr, db.fmter) +} + // Options returns read-only Options that were used to connect to the DB. func (db *DB) Options() *Options { return db.opt @@ -43,26 +49,37 @@ func (db *DB) WithTimeout(d time.Duration) *DB { newopt.ReadTimeout = d newopt.WriteTimeout = d return &DB{ - opt: &newopt, - pool: db.pool, + opt: &newopt, + pool: db.pool, + fmter: db.fmter, + } +} + +// WithParam returns a DB that replaces the param with the value in queries. +func (db *DB) WithParam(param string, value interface{}) *DB { + return &DB{ + opt: db.opt, + pool: db.pool, + fmter: db.fmter.WithParam(param, value), } } func (db *DB) conn() (*pool.Conn, error) { - cn, err := db.pool.Get() + cn, _, err := db.pool.Get() if err != nil { return nil, err } - if !cn.Inited { + cn.SetReadWriteTimeout(db.opt.ReadTimeout, db.opt.WriteTimeout) + + if cn.InitedAt.IsZero() { if err := db.initConn(cn); err != nil { _ = db.pool.Remove(cn, err) return nil, err } + cn.InitedAt = time.Now() } - cn.SetReadTimeout(db.opt.ReadTimeout) - cn.SetWriteTimeout(db.opt.WriteTimeout) return cn, nil } @@ -78,7 +95,6 @@ func (db *DB) initConn(cn *pool.Conn) error { return err } - cn.Inited = true return nil } @@ -97,6 +113,8 @@ func (db *DB) shouldRetry(err error) bool { switch pgerr.Field('C') { case "40001": // serialization_failure return true + case "55000": // attempted to delete invisible tuple + return true case "57014": // statement_timeout return db.opt.RetryStatementTimeout default: @@ -122,7 +140,7 @@ func (db *DB) Close() error { } // Exec executes a query ignoring returned rows. The params are for any -// placeholder parameters in the query. +// placeholders in the query. func (db *DB) Exec(query interface{}, params ...interface{}) (res *types.Result, err error) { for i := 0; ; i++ { var cn *pool.Conn @@ -132,7 +150,7 @@ func (db *DB) Exec(query interface{}, params ...interface{}) (res *types.Result, return nil, err } - res, err = simpleQuery(cn, query, params...) + res, err = db.simpleQuery(cn, query, params...) db.freeConn(cn, err) if i >= db.opt.MaxRetries { @@ -163,7 +181,7 @@ func (db *DB) ExecOne(query interface{}, params ...interface{}) (*types.Result, } // Query executes a query that returns rows, typically a SELECT. -// The params are for any placeholder parameters in the query. +// The params are for any placeholders in the query. func (db *DB) Query(model, query interface{}, params ...interface{}) (res *types.Result, err error) { var mod orm.Model for i := 0; i < 3; i++ { @@ -174,7 +192,7 @@ func (db *DB) Query(model, query interface{}, params ...interface{}) (res *types return nil, err } - res, mod, err = simpleQueryData(cn, model, query, params...) + res, mod, err = db.simpleQueryData(cn, model, query, params...) db.freeConn(cn, err) if i >= db.opt.MaxRetries { @@ -219,16 +237,13 @@ func (db *DB) QueryOne(model, query interface{}, params ...interface{}) (*types. return res, nil } -// Listen listens for notifications sent by NOTIFY statement. -func (db *DB) Listen(channels ...string) (*Listener, error) { - l := &Listener{ +// Listen listens for notifications sent with NOTIFY command. +func (db *DB) Listen(channels ...string) *Listener { + ln := &Listener{ db: db, } - if err := l.Listen(channels...); err != nil { - l.Close() - return nil, err - } - return l, nil + _ = ln.Listen(channels...) + return ln } // CopyFrom copies data from reader to a table. @@ -238,7 +253,7 @@ func (db *DB) CopyFrom(reader io.Reader, query interface{}, params ...interface{ return nil, err } - res, err := copyFrom(cn, reader, query, params...) + res, err := db.copyFrom(cn, reader, query, params...) db.freeConn(cn, err) return res, err } @@ -250,7 +265,7 @@ func (db *DB) CopyTo(writer io.Writer, query interface{}, params ...interface{}) return nil, err } - if err := writeQueryMsg(cn.Wr, query, params...); err != nil { + if err := writeQueryMsg(cn.Wr, db, query, params...); err != nil { db.pool.Put(cn) return nil, err } @@ -300,14 +315,16 @@ func (db *DB) Delete(model interface{}) error { return orm.Delete(db, model) } -// CreateTable creates table for the model in db. +// CreateTable creates table for the model. It recognizes following field tags: +// - notnull - sets NOT NULL constraint. +// - unique - sets UNIQUE constraint. func (db *DB) CreateTable(model interface{}, opt *orm.CreateTableOptions) error { _, err := orm.CreateTable(db, model, opt) return err } func (db *DB) FormatQuery(dst []byte, query string, params ...interface{}) []byte { - return orm.Formatter{}.Append(dst, query, params...) + return db.fmter.Append(dst, query, params...) } func (db *DB) cancelRequest(processId, secretKey int32) error { @@ -325,8 +342,10 @@ func (db *DB) cancelRequest(processId, secretKey int32) error { return nil } -func simpleQuery(cn *pool.Conn, query interface{}, params ...interface{}) (*types.Result, error) { - if err := writeQueryMsg(cn.Wr, query, params...); err != nil { +func (db *DB) simpleQuery( + cn *pool.Conn, query interface{}, params ...interface{}, +) (*types.Result, error) { + if err := writeQueryMsg(cn.Wr, db, query, params...); err != nil { return nil, err } @@ -337,10 +356,10 @@ func simpleQuery(cn *pool.Conn, query interface{}, params ...interface{}) (*type return readSimpleQuery(cn) } -func simpleQueryData( +func (db *DB) simpleQueryData( cn *pool.Conn, model, query interface{}, params ...interface{}, ) (*types.Result, orm.Model, error) { - if err := writeQueryMsg(cn.Wr, query, params...); err != nil { + if err := writeQueryMsg(cn.Wr, db, query, params...); err != nil { return nil, nil, err } @@ -351,8 +370,8 @@ func simpleQueryData( return readSimpleQueryData(cn, model) } -func copyFrom(cn *pool.Conn, r io.Reader, query interface{}, params ...interface{}) (*types.Result, error) { - if err := writeQueryMsg(cn.Wr, query, params...); err != nil { +func (db *DB) copyFrom(cn *pool.Conn, r io.Reader, query interface{}, params ...interface{}) (*types.Result, error) { + if err := writeQueryMsg(cn.Wr, db, query, params...); err != nil { return nil, err } diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/doc.go b/vendor/gopkg.in/pg.v5/doc.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/doc.go rename to vendor/gopkg.in/pg.v5/doc.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/error.go b/vendor/gopkg.in/pg.v5/error.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/error.go rename to vendor/gopkg.in/pg.v5/error.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/error.go b/vendor/gopkg.in/pg.v5/internal/error.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/internal/error.go rename to vendor/gopkg.in/pg.v5/internal/error.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/internal.go b/vendor/gopkg.in/pg.v5/internal/internal.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/internal/internal.go rename to vendor/gopkg.in/pg.v5/internal/internal.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/log.go b/vendor/gopkg.in/pg.v5/internal/log.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/internal/log.go rename to vendor/gopkg.in/pg.v5/internal/log.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/parser/array_parser.go b/vendor/gopkg.in/pg.v5/internal/parser/array_parser.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/internal/parser/array_parser.go rename to vendor/gopkg.in/pg.v5/internal/parser/array_parser.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/parser/hstore_parser.go b/vendor/gopkg.in/pg.v5/internal/parser/hstore_parser.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/internal/parser/hstore_parser.go rename to vendor/gopkg.in/pg.v5/internal/parser/hstore_parser.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/parser/parser.go b/vendor/gopkg.in/pg.v5/internal/parser/parser.go similarity index 93% rename from Godeps/_workspace/src/gopkg.in/pg.v5/internal/parser/parser.go rename to vendor/gopkg.in/pg.v5/internal/parser/parser.go index 0401991..c2f0dbd 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/parser/parser.go +++ b/vendor/gopkg.in/pg.v5/internal/parser/parser.go @@ -76,7 +76,7 @@ func (p *Parser) ReadSep(c byte) ([]byte, bool) { return b, true } -func (p *Parser) ReadIdentifier() (b []byte, numeric bool) { +func (p *Parser) ReadIdentifier() (s string, numeric bool) { pos := len(p.b) numeric = true for i, ch := range p.b { @@ -91,11 +91,11 @@ func (p *Parser) ReadIdentifier() (b []byte, numeric bool) { break } if pos <= 0 { - return nil, false + return "", false } - b = p.b[:pos] + b := p.b[:pos] p.b = p.b[pos:] - return b, numeric + return internal.BytesToString(b), numeric } func (p *Parser) ReadNumber() int { diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/parser/util.go b/vendor/gopkg.in/pg.v5/internal/parser/util.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/internal/parser/util.go rename to vendor/gopkg.in/pg.v5/internal/parser/util.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/pool/buffer.go b/vendor/gopkg.in/pg.v5/internal/pool/buffer.go similarity index 65% rename from Godeps/_workspace/src/gopkg.in/pg.v5/internal/pool/buffer.go rename to vendor/gopkg.in/pg.v5/internal/pool/buffer.go index ed4ca79..5a36a83 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/pool/buffer.go +++ b/vendor/gopkg.in/pg.v5/internal/pool/buffer.go @@ -5,21 +5,21 @@ import ( "io" ) -type Buffer struct { +type WriteBuffer struct { w io.Writer Bytes []byte msgStart, paramStart int } -func NewBuffer(w io.Writer, b []byte) *Buffer { - return &Buffer{ +func NewWriteBuffer(w io.Writer, b []byte) *WriteBuffer { + return &WriteBuffer{ w: w, Bytes: b, } } -func (buf *Buffer) StartMessage(c byte) { +func (buf *WriteBuffer) StartMessage(c byte) { if c == 0 { buf.msgStart = len(buf.Bytes) buf.Bytes = append(buf.Bytes, 0, 0, 0, 0) @@ -29,68 +29,68 @@ func (buf *Buffer) StartMessage(c byte) { } } -func (buf *Buffer) FinishMessage() { +func (buf *WriteBuffer) FinishMessage() { binary.BigEndian.PutUint32( buf.Bytes[buf.msgStart:], uint32(len(buf.Bytes)-buf.msgStart)) } -func (buf *Buffer) StartParam() { +func (buf *WriteBuffer) StartParam() { buf.paramStart = len(buf.Bytes) buf.Bytes = append(buf.Bytes, 0, 0, 0, 0) } -func (buf *Buffer) FinishParam() { +func (buf *WriteBuffer) FinishParam() { binary.BigEndian.PutUint32( buf.Bytes[buf.paramStart:], uint32(len(buf.Bytes)-buf.paramStart-4)) } var nullParamLength = int32(-1) -func (buf *Buffer) FinishNullParam() { +func (buf *WriteBuffer) FinishNullParam() { binary.BigEndian.PutUint32( buf.Bytes[buf.paramStart:], uint32(nullParamLength)) } -func (buf *Buffer) Write(b []byte) (int, error) { +func (buf *WriteBuffer) Write(b []byte) (int, error) { buf.Bytes = append(buf.Bytes, b...) return len(b), nil } -func (buf *Buffer) WriteInt16(num int16) { +func (buf *WriteBuffer) WriteInt16(num int16) { buf.Bytes = append(buf.Bytes, 0, 0) binary.BigEndian.PutUint16(buf.Bytes[len(buf.Bytes)-2:], uint16(num)) } -func (buf *Buffer) WriteInt32(num int32) { +func (buf *WriteBuffer) WriteInt32(num int32) { buf.Bytes = append(buf.Bytes, 0, 0, 0, 0) binary.BigEndian.PutUint32(buf.Bytes[len(buf.Bytes)-4:], uint32(num)) } -func (buf *Buffer) WriteString(s string) { +func (buf *WriteBuffer) WriteString(s string) { buf.Bytes = append(buf.Bytes, s...) buf.Bytes = append(buf.Bytes, 0) } -func (buf *Buffer) WriteBytes(b []byte) { +func (buf *WriteBuffer) WriteBytes(b []byte) { buf.Bytes = append(buf.Bytes, b...) buf.Bytes = append(buf.Bytes, 0) } -func (buf *Buffer) WriteByte(c byte) { +func (buf *WriteBuffer) WriteByte(c byte) { buf.Bytes = append(buf.Bytes, c) } -func (buf *Buffer) Flush() error { +func (buf *WriteBuffer) Flush() error { _, err := buf.w.Write(buf.Bytes) - buf.Bytes = buf.Bytes[:0] + buf.Reset() return err } -func (buf *Buffer) Reset() { +func (buf *WriteBuffer) Reset() { buf.Bytes = buf.Bytes[:0] } -func (buf *Buffer) ReadFrom(r io.Reader) (int64, error) { +func (buf *WriteBuffer) ReadFrom(r io.Reader) (int64, error) { n, err := r.Read(buf.Bytes[len(buf.Bytes):cap(buf.Bytes)]) buf.Bytes = buf.Bytes[:len(buf.Bytes)+int(n)] return int64(n), err diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/pool/conn.go b/vendor/gopkg.in/pg.v5/internal/pool/conn.go similarity index 53% rename from Godeps/_workspace/src/gopkg.in/pg.v5/internal/pool/conn.go rename to vendor/gopkg.in/pg.v5/internal/pool/conn.go index fbd5a53..fbc8e75 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/pool/conn.go +++ b/vendor/gopkg.in/pg.v5/internal/pool/conn.go @@ -14,68 +14,57 @@ var noDeadline = time.Time{} type Conn struct { NetConn net.Conn - Rd *bufio.Reader // read buffer - Wr *Buffer // write buffer - Buf []byte // reusable - Columns [][]byte // reusable + Buf []byte // read/write buffer - Inited bool - UsedAt time.Time + Rd *bufio.Reader + Columns [][]byte + + Wr *WriteBuffer + + InitedAt time.Time + UsedAt time.Time ProcessId int32 SecretKey int32 - _id int64 + _lastId int64 } func NewConn(netConn net.Conn) *Conn { cn := &Conn{ - NetConn: netConn, - Buf: make([]byte, 0, 8192), - + Buf: make([]byte, 0, 8192), UsedAt: time.Now(), } - cn.Rd = bufio.NewReader(cn) - cn.Wr = NewBuffer(cn, cn.Buf) + cn.SetNetConn(netConn) return cn } -func (cn *Conn) IsStale(timeout time.Duration) bool { - return timeout > 0 && time.Since(cn.UsedAt) > timeout +func (cn *Conn) SetNetConn(netConn net.Conn) { + cn.NetConn = netConn + cn.Rd = bufio.NewReader(cn.NetConn) + cn.Wr = NewWriteBuffer(cn.NetConn, cn.Buf) } func (cn *Conn) NextId() string { - cn._id++ - return strconv.FormatInt(cn._id, 10) + cn._lastId++ + return strconv.FormatInt(cn._lastId, 10) } -func (cn *Conn) SetReadTimeout(dur time.Duration) { +func (cn *Conn) SetReadWriteTimeout(rt, wt time.Duration) { cn.UsedAt = time.Now() - if dur == 0 { - cn.NetConn.SetReadDeadline(noDeadline) + if rt > 0 { + cn.NetConn.SetReadDeadline(cn.UsedAt.Add(rt)) } else { - cn.NetConn.SetReadDeadline(cn.UsedAt.Add(dur)) + cn.NetConn.SetReadDeadline(noDeadline) } -} - -func (cn *Conn) SetWriteTimeout(dur time.Duration) { - cn.UsedAt = time.Now() - if dur == 0 { - cn.NetConn.SetWriteDeadline(noDeadline) + if wt > 0 { + cn.NetConn.SetWriteDeadline(cn.UsedAt.Add(wt)) } else { - cn.NetConn.SetWriteDeadline(cn.UsedAt.Add(dur)) + cn.NetConn.SetWriteDeadline(noDeadline) } } -func (cn *Conn) Read(b []byte) (int, error) { - return cn.NetConn.Read(b) -} - -func (cn *Conn) Write(b []byte) (int, error) { - return cn.NetConn.Write(b) -} - func (cn *Conn) ReadN(n int) ([]byte, error) { if d := n - cap(cn.Buf); d > 0 { cn.Buf = cn.Buf[:cap(cn.Buf)] diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/pool/pool.go b/vendor/gopkg.in/pg.v5/internal/pool/pool.go similarity index 72% rename from Godeps/_workspace/src/gopkg.in/pg.v5/internal/pool/pool.go rename to vendor/gopkg.in/pg.v5/internal/pool/pool.go index ab06922..d188cd6 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/pool/pool.go +++ b/vendor/gopkg.in/pg.v5/internal/pool/pool.go @@ -2,21 +2,18 @@ package pool import ( "errors" - "fmt" "net" "sync" "sync/atomic" "time" - "gopkg.in/bsm/ratelimit.v1" - "gopkg.in/pg.v5/internal" ) var ( ErrClosed = errors.New("pg: database is closed") ErrPoolTimeout = errors.New("pg: connection pool timeout") - errConnStale = errors.New("connection is stale") + errConnStale = errors.New("pg: connection is stale") ) var timers = sync.Pool{ @@ -36,7 +33,7 @@ type Stats struct { } type Pooler interface { - Get() (*Conn, error) + Get() (*Conn, bool, error) Put(*Conn) error Remove(*Conn, error) error Len() int @@ -46,15 +43,19 @@ type Pooler interface { Closed() bool } -type dialer func() (net.Conn, error) +type Options struct { + Dial func() (net.Conn, error) + OnClose func(*Conn) error -type ConnPool struct { - _dial dialer - DialLimiter *ratelimit.RateLimiter - OnClose func(*Conn) error + PoolSize int + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + MaxAge time.Duration +} - poolTimeout time.Duration - idleTimeout time.Duration +type ConnPool struct { + opt *Options queue chan struct{} @@ -67,41 +68,29 @@ type ConnPool struct { stats Stats _closed int32 // atomic - lastErr atomic.Value } var _ Pooler = (*ConnPool)(nil) -func NewConnPool(dial dialer, poolSize int, poolTimeout, idleTimeout, idleCheckFrequency time.Duration) *ConnPool { +func NewConnPool(opt *Options) *ConnPool { p := &ConnPool{ - _dial: dial, - DialLimiter: ratelimit.New(3*poolSize, time.Second), - - poolTimeout: poolTimeout, - idleTimeout: idleTimeout, + opt: opt, - queue: make(chan struct{}, poolSize), - conns: make([]*Conn, 0, poolSize), - freeConns: make([]*Conn, 0, poolSize), + queue: make(chan struct{}, opt.PoolSize), + conns: make([]*Conn, 0, opt.PoolSize), + freeConns: make([]*Conn, 0, opt.PoolSize), } - if idleTimeout > 0 && idleCheckFrequency > 0 { - go p.reaper(idleCheckFrequency) + + if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 { + go p.reaper(opt.IdleCheckFrequency) } + return p } func (p *ConnPool) dial() (net.Conn, error) { - if p.DialLimiter != nil && p.DialLimiter.Limit() { - err := fmt.Errorf( - "pg: you open connections too fast (last_error=%q)", - p.loadLastErr(), - ) - return nil, err - } - - cn, err := p._dial() + cn, err := p.opt.Dial() if err != nil { - p.storeLastErr(err.Error()) return nil, err } return cn, nil @@ -115,9 +104,25 @@ func (p *ConnPool) NewConn() (*Conn, error) { return NewConn(netConn), nil } +func (p *ConnPool) isStaleConn(cn *Conn) bool { + if p.opt.IdleTimeout == 0 && p.opt.MaxAge == 0 { + return false + } + + now := time.Now() + if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt) >= p.opt.IdleTimeout { + return true + } + if p.opt.MaxAge > 0 && now.Sub(cn.InitedAt) >= p.opt.MaxAge { + return true + } + + return false +} + func (p *ConnPool) PopFree() *Conn { timer := timers.Get().(*time.Timer) - if !timer.Reset(p.poolTimeout) { + if !timer.Reset(p.opt.PoolTimeout) { <-timer.C } @@ -152,15 +157,15 @@ func (p *ConnPool) popFree() *Conn { } // Get returns existed connection from the pool or creates a new one. -func (p *ConnPool) Get() (*Conn, error) { +func (p *ConnPool) Get() (*Conn, bool, error) { if p.Closed() { - return nil, ErrClosed + return nil, false, ErrClosed } atomic.AddUint32(&p.stats.Requests, 1) timer := timers.Get().(*time.Timer) - if !timer.Reset(p.poolTimeout) { + if !timer.Reset(p.opt.PoolTimeout) { <-timer.C } @@ -170,35 +175,38 @@ func (p *ConnPool) Get() (*Conn, error) { case <-timer.C: timers.Put(timer) atomic.AddUint32(&p.stats.Timeouts, 1) - return nil, ErrPoolTimeout + return nil, false, ErrPoolTimeout } - p.freeConnsMu.Lock() - cn := p.popFree() - p.freeConnsMu.Unlock() + for { + p.freeConnsMu.Lock() + cn := p.popFree() + p.freeConnsMu.Unlock() - if cn != nil { - atomic.AddUint32(&p.stats.Hits, 1) - if !cn.IsStale(p.idleTimeout) { - return cn, nil + if cn == nil { + break } - _ = p.closeConn(cn, errConnStale) + + if p.isStaleConn(cn) { + p.remove(cn, errConnStale) + continue + } + + atomic.AddUint32(&p.stats.Hits, 1) + return cn, false, nil } newcn, err := p.NewConn() if err != nil { <-p.queue - return nil, err + return nil, false, err } p.connsMu.Lock() - if cn != nil { - p.removeConn(cn) - } p.conns = append(p.conns, newcn) p.connsMu.Unlock() - return newcn, nil + return newcn, true, nil } func (p *ConnPool) Put(cn *Conn) error { @@ -223,17 +231,13 @@ func (p *ConnPool) remove(cn *Conn, reason error) { _ = p.closeConn(cn, reason) p.connsMu.Lock() - p.removeConn(cn) - p.connsMu.Unlock() -} - -func (p *ConnPool) removeConn(cn *Conn) { for i, c := range p.conns { if c == cn { p.conns = append(p.conns[:i], p.conns[i+1:]...) break } } + p.connsMu.Unlock() } // Len returns total number of connections. @@ -253,13 +257,13 @@ func (p *ConnPool) FreeLen() int { } func (p *ConnPool) Stats() *Stats { - stats := Stats{} - stats.Requests = atomic.LoadUint32(&p.stats.Requests) - stats.Hits = atomic.LoadUint32(&p.stats.Hits) - stats.Timeouts = atomic.LoadUint32(&p.stats.Timeouts) - stats.TotalConns = uint32(p.Len()) - stats.FreeConns = uint32(p.FreeLen()) - return &stats + return &Stats{ + Requests: atomic.LoadUint32(&p.stats.Requests), + Hits: atomic.LoadUint32(&p.stats.Hits), + Timeouts: atomic.LoadUint32(&p.stats.Timeouts), + TotalConns: uint32(p.Len()), + FreeConns: uint32(p.FreeLen()), + } } func (p *ConnPool) Closed() bool { @@ -292,9 +296,8 @@ func (p *ConnPool) Close() (retErr error) { } func (p *ConnPool) closeConn(cn *Conn, reason error) error { - p.storeLastErr(reason.Error()) - if p.OnClose != nil { - _ = p.OnClose(cn) + if p.opt.OnClose != nil { + _ = p.opt.OnClose(cn) } return cn.Close() } @@ -305,7 +308,7 @@ func (p *ConnPool) reapStaleConn() bool { } cn := p.freeConns[0] - if !cn.IsStale(p.idleTimeout) { + if !p.isStaleConn(cn) { return false } @@ -356,17 +359,6 @@ func (p *ConnPool) reaper(frequency time.Duration) { } } -func (p *ConnPool) storeLastErr(err string) { - p.lastErr.Store(err) -} - -func (p *ConnPool) loadLastErr() string { - if v := p.lastErr.Load(); v != nil { - return v.(string) - } - return "" -} - //------------------------------------------------------------------------------ var idleCheckFrequency atomic.Value diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/safe.go b/vendor/gopkg.in/pg.v5/internal/safe.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/internal/safe.go rename to vendor/gopkg.in/pg.v5/internal/safe.go diff --git a/vendor/gopkg.in/pg.v5/internal/underscore.go b/vendor/gopkg.in/pg.v5/internal/underscore.go new file mode 100644 index 0000000..4df2681 --- /dev/null +++ b/vendor/gopkg.in/pg.v5/internal/underscore.go @@ -0,0 +1,73 @@ +package internal + +func isUpper(c byte) bool { + return c >= 'A' && c <= 'Z' +} + +func isLower(c byte) bool { + return !isUpper(c) +} + +func toUpper(c byte) byte { + return c - 32 +} + +func toLower(c byte) byte { + return c + 32 +} + +// Underscore converts "CamelCasedString" to "camel_cased_string". +func Underscore(s string) string { + r := make([]byte, 0, len(s)) + for i := 0; i < len(s); i++ { + c := s[i] + if isUpper(c) { + if i > 0 && i+1 < len(s) && (isLower(s[i-1]) || isLower(s[i+1])) { + r = append(r, '_', toLower(c)) + } else { + r = append(r, toLower(c)) + } + } else { + r = append(r, c) + } + } + return string(r) +} + +func ToUpper(s string) string { + if isUpperString(s) { + return s + } + + b := make([]byte, len(s)) + for i := range b { + c := s[i] + if c >= 'a' && c <= 'z' { + c -= 'a' - 'A' + } + b[i] = c + } + return string(b) +} + +func isUpperString(s string) bool { + for i := 0; i < len(s); i++ { + c := s[i] + if c >= 'a' && c <= 'z' { + return false + } + } + return true +} + +func ToExported(s string) string { + if len(s) == 0 { + return s + } + if c := s[0]; isLower(c) { + b := []byte(s) + b[0] = toUpper(c) + return string(b) + } + return s +} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/unsafe.go b/vendor/gopkg.in/pg.v5/internal/unsafe.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/internal/unsafe.go rename to vendor/gopkg.in/pg.v5/internal/unsafe.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/internal/util.go b/vendor/gopkg.in/pg.v5/internal/util.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/internal/util.go rename to vendor/gopkg.in/pg.v5/internal/util.go diff --git a/vendor/gopkg.in/pg.v5/listener.go b/vendor/gopkg.in/pg.v5/listener.go new file mode 100644 index 0000000..f5372f7 --- /dev/null +++ b/vendor/gopkg.in/pg.v5/listener.go @@ -0,0 +1,172 @@ +package pg + +import ( + "sync" + "time" + + "gopkg.in/pg.v5/internal" + "gopkg.in/pg.v5/internal/pool" +) + +// A notification received with LISTEN command. +type Notification struct { + Channel string + Payload string +} + +// Listener listens for notifications sent with NOTIFY command. +// It's NOT safe for concurrent use by multiple goroutines +// except the Channel API. +type Listener struct { + db *DB + + channels []string + + mu sync.Mutex + _cn *pool.Conn + closed bool +} + +func (ln *Listener) conn(readTimeout time.Duration) (*pool.Conn, error) { + defer ln.mu.Unlock() + ln.mu.Lock() + + if ln.closed { + return nil, errListenerClosed + } + + if ln._cn == nil { + cn, err := ln.db.conn() + if err != nil { + return nil, err + } + ln._cn = cn + + if len(ln.channels) > 0 { + if err := ln.listen(cn, ln.channels...); err != nil { + return nil, err + } + } + } + + ln._cn.SetReadWriteTimeout(readTimeout, ln.db.opt.WriteTimeout) + return ln._cn, nil +} + +// Channel returns a channel for concurrently receiving notifications. +// The channel is closed with Listener. +func (ln *Listener) Channel() <-chan *Notification { + ch := make(chan *Notification, 100) + go func() { + for { + channel, payload, err := ln.ReceiveTimeout(5 * time.Second) + if err != nil { + if err == errListenerClosed { + break + } + continue + } + ch <- &Notification{channel, payload} + } + close(ch) + }() + return ch +} + +// Listen starts listening for notifications on channels. +func (ln *Listener) Listen(channels ...string) error { + cn, err := ln.conn(ln.db.opt.ReadTimeout) + if err != nil { + return err + } + + if err := ln.listen(cn, channels...); err != nil { + if err != nil { + ln.freeConn(err) + } + return err + } + + ln.channels = appendIfNotExists(ln.channels, channels...) + return nil +} + +func (ln *Listener) listen(cn *pool.Conn, channels ...string) error { + for _, channel := range channels { + if err := writeQueryMsg(cn.Wr, ln.db, "LISTEN ?", F(channel)); err != nil { + return err + } + } + return cn.Wr.Flush() +} + +// Receive indefinitely waits for a notification. +func (ln *Listener) Receive() (channel string, payload string, err error) { + return ln.ReceiveTimeout(0) +} + +// ReceiveTimeout waits for a notification until timeout is reached. +func (ln *Listener) ReceiveTimeout(timeout time.Duration) (channel, payload string, err error) { + channel, payload, err = ln.receiveTimeout(timeout) + if err != nil { + ln.freeConn(err) + } + return channel, payload, err +} + +func (ln *Listener) receiveTimeout(readTimeout time.Duration) (channel, payload string, err error) { + cn, err := ln.conn(readTimeout) + if err != nil { + return "", "", err + } + return readNotification(cn) +} + +func (ln *Listener) freeConn(err error) (retErr error) { + if !isBadConn(err, true) { + return nil + } + return ln.closeConn(err) +} + +func (ln *Listener) closeConn(reason error) error { + var firstErr error + + ln.mu.Lock() + if ln._cn != nil { + if !ln.closed { + internal.Logf("pg: discarding bad listener connection: %s", reason) + } + + firstErr = ln.db.pool.Remove(ln._cn, reason) + ln._cn = nil + } + ln.mu.Unlock() + + return firstErr +} + +// Close closes the listener, releasing any open resources. +func (ln *Listener) Close() error { + ln.mu.Lock() + closed := ln.closed + ln.closed = true + ln.mu.Unlock() + if closed { + return errListenerClosed + } + return ln.closeConn(errListenerClosed) +} + +func appendIfNotExists(ss []string, es ...string) []string { +loop: + for _, e := range es { + for _, s := range ss { + if s == e { + continue loop + } + } + ss = append(ss, e) + } + return ss +} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/messages.go b/vendor/gopkg.in/pg.v5/messages.go similarity index 94% rename from Godeps/_workspace/src/gopkg.in/pg.v5/messages.go rename to vendor/gopkg.in/pg.v5/messages.go index 044cef0..988a66f 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/messages.go +++ b/vendor/gopkg.in/pg.v5/messages.go @@ -109,7 +109,7 @@ func enableSSL(cn *pool.Conn, tlsConf *tls.Config) error { return err } - b := make([]byte, 1) + b := cn.Buf[:1] _, err := io.ReadFull(cn.NetConn, b) if err != nil { return err @@ -118,13 +118,7 @@ func enableSSL(cn *pool.Conn, tlsConf *tls.Config) error { return errSSLNotSupported } - if tlsConf == nil { - tlsConf = &tls.Config{ - InsecureSkipVerify: true, - } - } - cn.NetConn = tls.Client(cn.NetConn, tlsConf) - + cn.SetNetConn(tls.Client(cn.NetConn, tlsConf)) return nil } @@ -211,7 +205,7 @@ func md5s(s string) string { return hex.EncodeToString(h.Sum(nil)) } -func writeStartupMsg(buf *pool.Buffer, user, database string) { +func writeStartupMsg(buf *pool.WriteBuffer, user, database string) { buf.StartMessage(0) buf.WriteInt32(196608) buf.WriteString("user") @@ -222,24 +216,24 @@ func writeStartupMsg(buf *pool.Buffer, user, database string) { buf.FinishMessage() } -func writeSSLMsg(buf *pool.Buffer) { +func writeSSLMsg(buf *pool.WriteBuffer) { buf.StartMessage(0) buf.WriteInt32(80877103) buf.FinishMessage() } -func writePasswordMsg(buf *pool.Buffer, password string) { +func writePasswordMsg(buf *pool.WriteBuffer, password string) { buf.StartMessage(passwordMessageMsg) buf.WriteString(password) buf.FinishMessage() } -func writeFlushMsg(buf *pool.Buffer) { +func writeFlushMsg(buf *pool.WriteBuffer) { buf.StartMessage(flushMsg) buf.FinishMessage() } -func writeCancelRequestMsg(buf *pool.Buffer, processId, secretKey int32) { +func writeCancelRequestMsg(buf *pool.WriteBuffer, processId, secretKey int32) { buf.StartMessage(0) buf.WriteInt32(80877102) buf.WriteInt32(processId) @@ -247,9 +241,9 @@ func writeCancelRequestMsg(buf *pool.Buffer, processId, secretKey int32) { buf.FinishMessage() } -func writeQueryMsg(buf *pool.Buffer, query interface{}, params ...interface{}) error { +func writeQueryMsg(buf *pool.WriteBuffer, fmter orm.QueryFormatter, query interface{}, params ...interface{}) error { buf.StartMessage(queryMsg) - bytes, err := appendQuery(buf.Bytes, query, params...) + bytes, err := appendQuery(buf.Bytes, fmter, query, params...) if err != nil { buf.Reset() return err @@ -263,23 +257,23 @@ func writeQueryMsg(buf *pool.Buffer, query interface{}, params ...interface{}) e return nil } -func appendQuery(dst []byte, query interface{}, params ...interface{}) ([]byte, error) { +func appendQuery(dst []byte, fmter orm.QueryFormatter, query interface{}, params ...interface{}) ([]byte, error) { switch query := query.(type) { case orm.QueryAppender: return query.AppendQuery(dst, params...) case string: - return orm.Formatter{}.Append(dst, query, params...), nil + return fmter.FormatQuery(dst, query, params...), nil default: return nil, fmt.Errorf("pg: can't append %T", query) } } -func writeSyncMsg(buf *pool.Buffer) { +func writeSyncMsg(buf *pool.WriteBuffer) { buf.StartMessage(syncMsg) buf.FinishMessage() } -func writeParseDescribeSyncMsg(buf *pool.Buffer, name, q string) { +func writeParseDescribeSyncMsg(buf *pool.WriteBuffer, name, q string) { buf.StartMessage(parseMsg) buf.WriteString(name) buf.WriteString(q) @@ -346,7 +340,7 @@ func readParseDescribeSync(cn *pool.Conn) ([][]byte, error) { } // Writes BIND, EXECUTE and SYNC messages. -func writeBindExecuteMsg(buf *pool.Buffer, name string, params ...interface{}) error { +func writeBindExecuteMsg(buf *pool.WriteBuffer, name string, params ...interface{}) error { const paramLenWidth = 4 buf.StartMessage(bindMsg) @@ -412,7 +406,7 @@ func readBindMsg(cn *pool.Conn) error { } } -func writeCloseMsg(buf *pool.Buffer, name string) { +func writeCloseMsg(buf *pool.WriteBuffer, name string) { buf.StartMessage(closeMsg) buf.WriteByte('S') buf.WriteString(name) @@ -633,9 +627,14 @@ func readDataRow(cn *pool.Conn, scanner orm.ColumnScanner, columns [][]byte) (re func newModel(mod interface{}) (orm.Model, error) { m, ok := mod.(orm.Model) if ok { - return m, nil + return m, m.Reset() } - return orm.NewModel(mod) + + m, err := orm.NewModel(mod) + if err != nil { + return nil, err + } + return m, m.Reset() } func readSimpleQueryData( @@ -660,7 +659,7 @@ func readSimpleQueryData( if err != nil { return nil, nil, err } - case dataRowMsg: + if model == nil { var err error model, err = newModel(mod) @@ -669,7 +668,7 @@ func readSimpleQueryData( model = Discard } } - + case dataRowMsg: m := model.NewModel() if err := readDataRow(cn, m, cn.Columns); err != nil { setErr(err) @@ -901,14 +900,14 @@ func readCopyData(cn *pool.Conn, w io.Writer) (*types.Result, error) { } } -func writeCopyData(buf *pool.Buffer, r io.Reader) (int64, error) { +func writeCopyData(buf *pool.WriteBuffer, r io.Reader) (int64, error) { buf.StartMessage(copyDataMsg) n, err := buf.ReadFrom(r) buf.FinishMessage() return n, err } -func writeCopyDone(buf *pool.Buffer) { +func writeCopyDone(buf *pool.WriteBuffer) { buf.StartMessage(copyDoneMsg) buf.FinishMessage() } @@ -1005,7 +1004,7 @@ var terminateMessage = []byte{terminateMsg, 0, 0, 0, 4} func terminateConn(cn *pool.Conn) error { // Don't use cn.Buf because it is racy with user code. - _, err := cn.Write(terminateMessage) + _, err := cn.NetConn.Write(terminateMessage) return err } diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/options.go b/vendor/gopkg.in/pg.v5/options.go similarity index 72% rename from Godeps/_workspace/src/gopkg.in/pg.v5/options.go rename to vendor/gopkg.in/pg.v5/options.go index 9772086..2dc9f7d 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/options.go +++ b/vendor/gopkg.in/pg.v5/options.go @@ -46,16 +46,26 @@ type Options struct { // Maximum number of socket connections. // Default is 20 connections. PoolSize int - // Amount of time client waits for free connection if all + // Time for which client waits for free connection if all // connections are busy before returning an error. // Default is 5 seconds. PoolTimeout time.Duration - // Amount of time after which client closes idle connections. + // Time after which client closes idle connections. // Default is to not close idle connections. IdleTimeout time.Duration + // Connection age at which client retires (closes) the connection. + // Primarily useful with proxies like HAProxy. + // Default is to not close aged connections. + MaxAge time.Duration // Frequency of idle checks. // Default is 1 minute. IdleCheckFrequency time.Duration + + // When true Tx does not issue BEGIN, COMMIT, or ROLLBACK. + // Also underlying database connection is immediately returned to the pool. + // This is primarily useful for running your database tests in one big + // transaction, because PostgreSQL does not support nested transactions. + DisableTransaction bool } func (opt *Options) init() { @@ -105,15 +115,14 @@ func (opt *Options) getDialer() func() (net.Conn, error) { } func newConnPool(opt *Options) *pool.ConnPool { - p := pool.NewConnPool( - opt.getDialer(), - opt.PoolSize, - opt.PoolTimeout, - opt.IdleTimeout, - opt.IdleCheckFrequency, - ) - p.OnClose = func(cn *pool.Conn) error { - return terminateConn(cn) - } - return p + return pool.NewConnPool(&pool.Options{ + Dial: opt.getDialer(), + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: opt.IdleCheckFrequency, + OnClose: func(cn *pool.Conn) error { + return terminateConn(cn) + }, + }) } diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/count_estimate.go b/vendor/gopkg.in/pg.v5/orm/count_estimate.go similarity index 75% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/count_estimate.go rename to vendor/gopkg.in/pg.v5/orm/count_estimate.go index 4672789..758b1b3 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/count_estimate.go +++ b/vendor/gopkg.in/pg.v5/orm/count_estimate.go @@ -8,11 +8,11 @@ import ( ) // Placeholder that is replaced with count(*). -const placeholder = "2147483647" +const placeholder = `'_go_pg_placeholder'` // https://wiki.postgresql.org/wiki/Count_estimate var pgCountEstimateFunc = fmt.Sprintf(` -CREATE OR REPLACE FUNCTION _go_pg_count_estimate(query text, threshold int) +CREATE OR REPLACE FUNCTION _go_pg_count_estimate_v2(query text, threshold int) RETURNS int AS $$ DECLARE rec record; @@ -22,13 +22,20 @@ BEGIN nrows := substring(rec."QUERY PLAN" FROM ' rows=(\d+)'); EXIT WHEN nrows IS NOT NULL; END LOOP; + -- Return the estimation if there are too many rows. IF nrows > threshold THEN RETURN nrows; END IF; + -- Otherwise execute real count query. - query := replace(query, 'SELECT %s', 'SELECT count(*)'); + query := replace(query, 'SELECT '%s'', 'SELECT count(*)'); EXECUTE query INTO nrows; + + IF nrows IS NULL THEN + nrows := 0; + END IF; + RETURN nrows; END; $$ LANGUAGE plpgsql; @@ -45,16 +52,7 @@ func (q *Query) CountEstimate(threshold int) (int, error) { return 0, q.stickyErr } - q = q.copy() - q.columns = []FormatAppender{queryParams{query: placeholder}} - q.order = nil - q.limit = 0 - q.offset = 0 - - sel := selectQuery{ - Query: q, - } - query, err := sel.AppendQuery(nil) + query, err := q.countSelectQuery(placeholder).AppendQuery(nil) if err != nil { return 0, err } @@ -63,11 +61,12 @@ func (q *Query) CountEstimate(threshold int) (int, error) { var count int _, err = q.db.QueryOne( Scan(&count), - "SELECT _go_pg_count_estimate(?, ?)", + "SELECT _go_pg_count_estimate_v2(?, ?)", string(query), threshold, ) if err != nil { if pgerr, ok := err.(internal.PGError); ok && pgerr.Field('C') == "42883" { + // undefined_function if err := q.createCountEstimateFunc(); err != nil { return 0, err } @@ -77,7 +76,7 @@ func (q *Query) CountEstimate(threshold int) (int, error) { return count, err } - panic("not reached") + return 0, err } func (q *Query) createCountEstimateFunc() error { @@ -85,9 +84,9 @@ func (q *Query) createCountEstimateFunc() error { return err } -// SelectAndCountEstimate runs Select and CountEstimate in two separate goroutines, +// SelectAndCountEstimate runs Select and CountEstimate in two goroutines, // waits for them to finish and returns the result. -func (q *Query) SelectAndCountEstimate(threshold int) (count int, err error) { +func (q *Query) SelectAndCountEstimate(threshold int, values ...interface{}) (count int, err error) { if q.stickyErr != nil { return 0, q.stickyErr } @@ -97,7 +96,7 @@ func (q *Query) SelectAndCountEstimate(threshold int) (count int, err error) { go func() { defer wg.Done() - if e := q.Select(); e != nil { + if e := q.Select(values...); e != nil { err = e } }() diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/create_table.go b/vendor/gopkg.in/pg.v5/orm/create_table.go similarity index 80% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/create_table.go rename to vendor/gopkg.in/pg.v5/orm/create_table.go index fd1e5df..b408f58 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/create_table.go +++ b/vendor/gopkg.in/pg.v5/orm/create_table.go @@ -7,7 +7,9 @@ import ( "gopkg.in/pg.v5/types" ) -type CreateTableOptions struct{} +type CreateTableOptions struct { + Temp bool +} func CreateTable(db DB, model interface{}, opt *CreateTableOptions) (*types.Result, error) { return db.Exec(createTableQuery{model: model, opt: opt}) @@ -30,7 +32,11 @@ func (c createTableQuery) AppendQuery(b []byte, params ...interface{}) ([]byte, table := Tables.Get(typ) - b = append(b, "CREATE TABLE "...) + b = append(b, "CREATE "...) + if c.opt != nil && c.opt.Temp { + b = append(b, "TEMP "...) + } + b = append(b, "TABLE "...) b = append(b, table.Name...) b = append(b, " ("...) @@ -38,6 +44,12 @@ func (c createTableQuery) AppendQuery(b []byte, params ...interface{}) ([]byte, b = append(b, field.SQLName...) b = append(b, " "...) b = append(b, field.SQLType...) + if field.Has(NotNullFlag) { + b = append(b, " NOT NULL"...) + } + if field.Has(UniqueFlag) { + b = append(b, " UNIQUE"...) + } if i != len(table.Fields)-1 { b = append(b, ", "...) diff --git a/vendor/gopkg.in/pg.v5/orm/delete.go b/vendor/gopkg.in/pg.v5/orm/delete.go new file mode 100644 index 0000000..2e2a4ea --- /dev/null +++ b/vendor/gopkg.in/pg.v5/orm/delete.go @@ -0,0 +1,47 @@ +package orm + +import "gopkg.in/pg.v5/internal" + +func Delete(db DB, model interface{}) error { + res, err := NewQuery(db, model).Delete() + if err != nil { + return err + } + return internal.AssertOneRow(res.RowsAffected()) +} + +type deleteQuery struct { + *Query +} + +var _ QueryAppender = (*deleteQuery)(nil) + +func (q deleteQuery) AppendQuery(b []byte, params ...interface{}) ([]byte, error) { + var err error + + if len(q.with) > 0 { + b, err = q.appendWith(b) + if err != nil { + return nil, err + } + } + + b = append(b, "DELETE FROM "...) + b = q.appendFirstTable(b) + + if q.hasOtherTables() { + b = append(b, " USING "...) + b = q.appendOtherTables(b) + } + + b, err = q.mustAppendWhere(b) + if err != nil { + return nil, err + } + + if len(q.returning) > 0 { + b = q.appendReturning(b) + } + + return b, nil +} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/field.go b/vendor/gopkg.in/pg.v5/orm/field.go similarity index 99% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/field.go rename to vendor/gopkg.in/pg.v5/orm/field.go index 8578e71..5be8b8a 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/field.go +++ b/vendor/gopkg.in/pg.v5/orm/field.go @@ -10,6 +10,7 @@ const ( PrimaryKeyFlag = 1 << iota ForeignKeyFlag NotNullFlag + UniqueFlag ) type Field struct { diff --git a/vendor/gopkg.in/pg.v5/orm/format.go b/vendor/gopkg.in/pg.v5/orm/format.go new file mode 100644 index 0000000..3afe959 --- /dev/null +++ b/vendor/gopkg.in/pg.v5/orm/format.go @@ -0,0 +1,236 @@ +package orm + +import ( + "bytes" + "fmt" + "sort" + "strconv" + "strings" + + "gopkg.in/pg.v5/internal/parser" + "gopkg.in/pg.v5/types" +) + +type FormatAppender interface { + AppendFormat([]byte, QueryFormatter) []byte +} + +type sepFormatAppender interface { + FormatAppender + AppendSep([]byte) []byte +} + +//------------------------------------------------------------------------------ + +type queryParamsAppender struct { + query string + params []interface{} +} + +var _ FormatAppender = (*queryParamsAppender)(nil) + +func Q(query string, params ...interface{}) FormatAppender { + return queryParamsAppender{query, params} +} + +func (q queryParamsAppender) AppendFormat(b []byte, f QueryFormatter) []byte { + return f.FormatQuery(b, q.query, q.params...) +} + +//------------------------------------------------------------------------------ + +type whereAppender struct { + conj string + query string + params []interface{} +} + +var _ FormatAppender = (*whereAppender)(nil) + +func (q whereAppender) AppendSep(b []byte) []byte { + return append(b, q.conj...) +} + +func (q whereAppender) AppendFormat(b []byte, f QueryFormatter) []byte { + b = append(b, '(') + b = f.FormatQuery(b, q.query, q.params...) + b = append(b, ')') + return b +} + +//------------------------------------------------------------------------------ + +type fieldAppender struct { + field string +} + +var _ FormatAppender = (*fieldAppender)(nil) + +func (a fieldAppender) AppendFormat(b []byte, f QueryFormatter) []byte { + return types.AppendField(b, a.field, 1) +} + +//------------------------------------------------------------------------------ + +type Formatter struct { + namedParams map[string]interface{} +} + +func (f Formatter) String() string { + if len(f.namedParams) == 0 { + return "" + } + + var keys []string + for k, _ := range f.namedParams { + keys = append(keys, k) + } + sort.Strings(keys) + + var ss []string + for _, k := range keys { + ss = append(ss, fmt.Sprintf("%s=%v", k, f.namedParams[k])) + } + return " " + strings.Join(ss, " ") +} + +func (f Formatter) Copy() Formatter { + var cp Formatter + for param, value := range f.namedParams { + cp.SetParam(param, value) + } + return cp +} + +func (f *Formatter) SetParam(param string, value interface{}) { + if f.namedParams == nil { + f.namedParams = make(map[string]interface{}) + } + f.namedParams[param] = value +} + +func (f *Formatter) WithParam(param string, value interface{}) Formatter { + cp := f.Copy() + cp.SetParam(param, value) + return cp +} + +func (f Formatter) Append(dst []byte, src string, params ...interface{}) []byte { + if (params == nil && f.namedParams == nil) || strings.IndexByte(src, '?') == -1 { + return append(dst, src...) + } + return f.append(dst, parser.NewString(src), params) +} + +func (f Formatter) AppendBytes(dst, src []byte, params ...interface{}) []byte { + if (params == nil && f.namedParams == nil) || bytes.IndexByte(src, '?') == -1 { + return append(dst, src...) + } + return f.append(dst, parser.New(src), params) +} + +func (f Formatter) FormatQuery(dst []byte, query string, params ...interface{}) []byte { + return f.Append(dst, query, params...) +} + +func (f Formatter) append(dst []byte, p *parser.Parser, params []interface{}) []byte { + var paramsIndex int + var namedParams *tableParams + var namedParamsInit bool + var model tableModel + + if len(params) > 0 { + var ok bool + model, ok = params[len(params)-1].(tableModel) + if ok { + params = params[:len(params)-1] + } + } + + for p.Valid() { + b, ok := p.ReadSep('?') + if !ok { + dst = append(dst, b...) + continue + } + if len(b) > 0 && b[len(b)-1] == '\\' { + dst = append(dst, b[:len(b)-1]...) + dst = append(dst, '?') + continue + } + dst = append(dst, b...) + + if id, numeric := p.ReadIdentifier(); id != "" { + if numeric { + idx, err := strconv.Atoi(id) + if err != nil { + goto restore_param + } + + if idx >= len(params) { + goto restore_param + } + + dst = f.appendParam(dst, params[idx]) + continue + } + + if f.namedParams != nil { + if param, ok := f.namedParams[id]; ok { + dst = f.appendParam(dst, param) + continue + } + } + + if !namedParamsInit && len(params) > 0 { + namedParams, ok = newTableParams(params[len(params)-1]) + if ok { + params = params[:len(params)-1] + } + namedParamsInit = true + } + + if namedParams != nil { + dst, ok = namedParams.AppendParam(dst, id) + if ok { + continue + } + } + + if model != nil { + dst, ok = model.AppendParam(dst, id) + if ok { + continue + } + } + + restore_param: + dst = append(dst, '?') + dst = append(dst, id...) + continue + } + + if paramsIndex >= len(params) { + dst = append(dst, '?') + continue + } + + param := params[paramsIndex] + paramsIndex++ + + if fa, ok := param.(FormatAppender); ok { + dst = fa.AppendFormat(dst, f) + } else { + dst = types.Append(dst, param, 1) + } + } + + return dst +} + +func (f Formatter) appendParam(b []byte, param interface{}) []byte { + if fa, ok := param.(FormatAppender); ok { + return fa.AppendFormat(b, f) + } + return types.Append(b, param, 1) +} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/hook.go b/vendor/gopkg.in/pg.v5/orm/hook.go similarity index 97% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/hook.go rename to vendor/gopkg.in/pg.v5/orm/hook.go index 1cb9f23..ff6fc86 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/hook.go +++ b/vendor/gopkg.in/pg.v5/orm/hook.go @@ -14,7 +14,7 @@ const ( ) func callHookSlice(slice reflect.Value, ptr bool, db DB, hook func(reflect.Value, DB) error) error { - var retErr error + var firstErr error for i := 0; i < slice.Len(); i++ { var err error if ptr { @@ -22,11 +22,11 @@ func callHookSlice(slice reflect.Value, ptr bool, db DB, hook func(reflect.Value } else { err = hook(slice.Index(i).Addr(), db) } - if err != nil && retErr == nil { - retErr = err + if err != nil && firstErr == nil { + firstErr = err } } - return retErr + return firstErr } type afterQueryHook interface { diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/insert.go b/vendor/gopkg.in/pg.v5/orm/insert.go similarity index 97% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/insert.go rename to vendor/gopkg.in/pg.v5/orm/insert.go index 667c334..745da9c 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/insert.go +++ b/vendor/gopkg.in/pg.v5/orm/insert.go @@ -85,7 +85,7 @@ func (q insertQuery) AppendQuery(b []byte, params ...interface{}) ([]byte, error } func onConflictDoUpdate(b []byte) bool { - return bytes.LastIndex(b, []byte(" DO UPDATE")) >= 0 + return bytes.HasSuffix(b, []byte(" DO UPDATE")) } func (q *insertQuery) appendValues(b []byte, fields []*Field, v reflect.Value) []byte { diff --git a/vendor/gopkg.in/pg.v5/orm/join.go b/vendor/gopkg.in/pg.v5/orm/join.go new file mode 100644 index 0000000..1b0373e --- /dev/null +++ b/vendor/gopkg.in/pg.v5/orm/join.go @@ -0,0 +1,268 @@ +package orm + +import "gopkg.in/pg.v5/types" + +type join struct { + Parent *join + BaseModel tableModel + JoinModel tableModel + Rel *Relation + ApplyQuery func(*Query) (*Query, error) + + Columns []string +} + +func (j *join) Select(db DB) error { + switch j.Rel.Type { + case HasManyRelation: + return j.selectMany(db) + case Many2ManyRelation: + return j.selectM2M(db) + } + panic("not reached") +} + +func (j *join) selectMany(db DB) error { + q, err := j.manyQuery(db) + if err != nil { + return err + } + + err = q.Select() + if err != nil { + return err + } + + return nil +} + +func (j *join) manyQuery(db DB) (*Query, error) { + root := j.JoinModel.Root() + index := j.JoinModel.ParentIndex() + + manyModel := newManyModel(j) + q := NewQuery(db, manyModel) + if j.ApplyQuery != nil { + var err error + q, err = j.ApplyQuery(q) + if err != nil { + return nil, err + } + } + + q.columns = append(q.columns, hasManyColumnsAppender{j}) + + baseTable := j.BaseModel.Table() + cols := columns(j.JoinModel.Table().Alias, "", j.Rel.FKs) + vals := values(root, index, baseTable.PKs) + q = q.Where(`(?) IN (?)`, types.Q(cols), types.Q(vals)) + + if j.Rel.Polymorphic { + q = q.Where( + `? IN (?, ?)`, + types.F(j.Rel.BasePrefix+"type"), + baseTable.ModelName, baseTable.TypeName, + ) + } + + return q, nil +} + +func (j *join) selectM2M(db DB) error { + q, err := j.m2mQuery(db) + if err != nil { + return err + } + + err = q.Select() + if err != nil { + return err + } + + return nil +} + +func (j *join) m2mQuery(db DB) (*Query, error) { + index := j.JoinModel.ParentIndex() + + baseTable := j.BaseModel.Table() + m2mCols := columns(j.Rel.M2MTableName, j.Rel.BasePrefix, baseTable.PKs) + m2mVals := values(j.BaseModel.Root(), index, baseTable.PKs) + + m2mModel := newM2MModel(j) + q := NewQuery(db, m2mModel) + if j.ApplyQuery != nil { + var err error + q, err = j.ApplyQuery(q) + if err != nil { + return nil, err + } + } + + q.columns = append(q.columns, hasManyColumnsAppender{j}) + q = q.Join( + "JOIN ? ON (?) IN (?)", + j.Rel.M2MTableName, + types.Q(m2mCols), types.Q(m2mVals), + ) + + joinAlias := j.JoinModel.Table().Alias + for _, pk := range j.JoinModel.Table().PKs { + q = q.Where( + "?.? = ?.?", + joinAlias, pk.ColName, + j.Rel.M2MTableName, types.F(j.Rel.JoinPrefix+pk.SQLName), + ) + } + + return q, nil +} + +func (j *join) hasParent() bool { + if j.Parent != nil { + switch j.Parent.Rel.Type { + case HasOneRelation, BelongsToRelation: + return true + } + } + return false +} + +func (j *join) appendAlias(b []byte) []byte { + b = append(b, '"') + b = appendAlias(b, j, true) + b = append(b, '"') + return b +} + +func (j *join) appendAliasColumn(b []byte, column string) []byte { + b = append(b, '"') + b = appendAlias(b, j, true) + b = append(b, "__"...) + b = types.AppendField(b, column, 2) + b = append(b, '"') + return b +} + +func (j *join) appendBaseAlias(b []byte) []byte { + if j.hasParent() { + b = append(b, '"') + b = appendAlias(b, j.Parent, true) + b = append(b, '"') + return b + } + return append(b, j.BaseModel.Table().Alias...) +} + +func appendAlias(b []byte, j *join, topLevel bool) []byte { + if j.hasParent() { + b = appendAlias(b, j.Parent, topLevel) + topLevel = false + } + if !topLevel { + b = append(b, "__"...) + } + b = append(b, j.Rel.Field.SQLName...) + return b +} + +func (j *join) appendHasOneColumns(b []byte) []byte { + if j.Columns == nil { + for _, f := range j.JoinModel.Table().Fields { + b = append(b, ", "...) + b = j.appendAlias(b) + b = append(b, '.') + b = append(b, f.ColName...) + b = append(b, " AS "...) + b = j.appendAliasColumn(b, f.SQLName) + } + return b + } + + for _, column := range j.Columns { + b = append(b, ", "...) + b = j.appendAlias(b) + b = append(b, '.') + b = types.AppendField(b, column, 1) + b = append(b, " AS "...) + b = j.appendAliasColumn(b, column) + } + + return b +} + +func (j *join) appendHasOneJoin(b []byte) []byte { + b = append(b, "LEFT JOIN "...) + b = append(b, j.JoinModel.Table().Name...) + b = append(b, " AS "...) + b = j.appendAlias(b) + + b = append(b, " ON "...) + if j.Rel.Type == HasOneRelation { + joinTable := j.Rel.JoinTable + for i, fk := range j.Rel.FKs { + if i > 0 { + b = append(b, " AND "...) + } + b = j.appendAlias(b) + b = append(b, '.') + b = append(b, joinTable.PKs[i].ColName...) + b = append(b, " = "...) + b = j.appendBaseAlias(b) + b = append(b, '.') + b = append(b, fk.ColName...) + } + } else { + baseTable := j.BaseModel.Table() + for i, fk := range j.Rel.FKs { + if i > 0 { + b = append(b, " AND "...) + } + b = j.appendAlias(b) + b = append(b, '.') + b = append(b, fk.ColName...) + b = append(b, " = "...) + b = j.appendBaseAlias(b) + b = append(b, '.') + b = append(b, baseTable.PKs[i].ColName...) + } + } + + return b +} + +type hasManyColumnsAppender struct { + *join +} + +func (q hasManyColumnsAppender) AppendFormat(b []byte, f QueryFormatter) []byte { + if q.Rel.M2MTableName != "" { + b = append(b, q.Rel.M2MTableName...) + b = append(b, ".*, "...) + } + + joinTable := q.JoinModel.Table() + + if q.Columns == nil { + for i, f := range joinTable.Fields { + if i > 0 { + b = append(b, ", "...) + } + b = append(b, joinTable.Alias...) + b = append(b, '.') + b = append(b, f.ColName...) + } + return b + } + + for i, column := range q.Columns { + if i > 0 { + b = append(b, ", "...) + } + b = append(b, joinTable.Alias...) + b = append(b, '.') + b = types.AppendField(b, column, 1) + } + + return b +} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/kinds.go b/vendor/gopkg.in/pg.v5/orm/kinds.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/kinds.go rename to vendor/gopkg.in/pg.v5/orm/kinds.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model.go b/vendor/gopkg.in/pg.v5/orm/model.go similarity index 84% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/model.go rename to vendor/gopkg.in/pg.v5/orm/model.go index 0dbde29..9e6c999 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model.go +++ b/vendor/gopkg.in/pg.v5/orm/model.go @@ -17,9 +17,18 @@ type useQueryOne interface { } type Model interface { - Collection ColumnScanner + // Reset resets model state. + Reset() error + + // NewModel returns ColumnScanner that is used to scan columns + // from the current row. + NewModel() ColumnScanner + + // AddModel adds ColumnScanner to the Collection. + AddModel(ColumnScanner) error + AfterQuery(DB) error AfterSelect(DB) error diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_discard.go b/vendor/gopkg.in/pg.v5/orm/model_discard.go similarity index 89% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_discard.go rename to vendor/gopkg.in/pg.v5/orm/model_discard.go index 859b95a..dcc0ebb 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_discard.go +++ b/vendor/gopkg.in/pg.v5/orm/model_discard.go @@ -2,8 +2,11 @@ package orm type Discard struct{} -var _ Collection = (*Discard)(nil) -var _ ColumnScanner = (*Discard)(nil) +var _ Model = (*Discard)(nil) + +func (Discard) Reset() error { + return nil +} func (d Discard) NewModel() ColumnScanner { return d diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_scan.go b/vendor/gopkg.in/pg.v5/orm/model_scan.go similarity index 73% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_scan.go rename to vendor/gopkg.in/pg.v5/orm/model_scan.go index a415468..aff255f 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_scan.go +++ b/vendor/gopkg.in/pg.v5/orm/model_scan.go @@ -1,13 +1,16 @@ package orm -import "gopkg.in/pg.v5/types" +import ( + "fmt" + + "gopkg.in/pg.v5/types" +) type valuesModel struct { values []interface{} } -var _ ColumnScanner = valuesModel{} -var _ Collection = valuesModel{} +var _ Model = valuesModel{} func Scan(values ...interface{}) valuesModel { return valuesModel{ @@ -19,6 +22,10 @@ func (valuesModel) useQueryOne() bool { return true } +func (valuesModel) Reset() error { + return nil +} + func (m valuesModel) NewModel() ColumnScanner { return m } @@ -59,6 +66,9 @@ func (valuesModel) AfterDelete(_ DB) error { return nil } -func (m valuesModel) ScanColumn(colIdx int, _ string, b []byte) error { +func (m valuesModel) ScanColumn(colIdx int, colName string, b []byte) error { + if colIdx >= len(m.values) { + return fmt.Errorf("pg: no Scan value for column index=%d name=%s", colIdx, colName) + } return types.Scan(m.values[colIdx], b) } diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_slice.go b/vendor/gopkg.in/pg.v5/orm/model_slice.go similarity index 87% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_slice.go rename to vendor/gopkg.in/pg.v5/orm/model_slice.go index 4a35347..aaeb023 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_slice.go +++ b/vendor/gopkg.in/pg.v5/orm/model_slice.go @@ -13,6 +13,13 @@ type sliceModel struct { var _ Model = (*sliceModel)(nil) +func (m *sliceModel) Reset() error { + if m.slice.IsValid() && m.slice.Len() > 0 { + m.slice.Set(m.slice.Slice(0, 0)) + } + return nil +} + func (m *sliceModel) NewModel() ColumnScanner { return m } diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_table.go b/vendor/gopkg.in/pg.v5/orm/model_table.go similarity index 96% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_table.go rename to vendor/gopkg.in/pg.v5/orm/model_table.go index 1c31fa0..af946f1 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_table.go +++ b/vendor/gopkg.in/pg.v5/orm/model_table.go @@ -10,9 +10,10 @@ type tableModel interface { Model Table() *Table + Relation() *Relation AppendParam([]byte, string) ([]byte, bool) - Join(string, func(*Query) (*Query, error)) *join + Join(string, func(*Query) (*Query, error)) (bool, *join) GetJoin(string) *join GetJoins() []join AddJoin(join) *join diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_m2m.go b/vendor/gopkg.in/pg.v5/orm/model_table_m2m.go similarity index 89% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_m2m.go rename to vendor/gopkg.in/pg.v5/orm/model_table_m2m.go index 3454ce3..b530e50 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_m2m.go +++ b/vendor/gopkg.in/pg.v5/orm/model_table_m2m.go @@ -10,10 +10,9 @@ type m2mModel struct { baseTable *Table rel *Relation - buf []byte - zeroStruct reflect.Value - dstValues map[string][]reflect.Value - columns map[string]string + buf []byte + dstValues map[string][]reflect.Value + columns map[string]string } var _ tableModel = (*m2mModel)(nil) @@ -21,7 +20,7 @@ var _ tableModel = (*m2mModel)(nil) func newM2MModel(join *join) *m2mModel { baseTable := join.BaseModel.Table() joinModel := join.JoinModel.(*sliceTableModel) - dstValues := dstValues(joinModel.Root(), joinModel.Index(), baseTable.PKs) + dstValues := dstValues(joinModel, baseTable.PKs) m := &m2mModel{ sliceTableModel: joinModel, baseTable: baseTable, @@ -32,7 +31,6 @@ func newM2MModel(join *join) *m2mModel { } if !m.sliceOfPtr { m.strct = reflect.New(m.table.Type).Elem() - m.zeroStruct = reflect.Zero(m.table.Type) } return m } @@ -41,7 +39,7 @@ func (m *m2mModel) NewModel() ColumnScanner { if m.sliceOfPtr { m.strct = reflect.New(m.table.Type).Elem() } else { - m.strct.Set(m.zeroStruct) + m.strct.Set(m.table.zeroStruct) } m.structTableModel.NewModel() return m diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_many.go b/vendor/gopkg.in/pg.v5/orm/model_table_many.go similarity index 87% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_many.go rename to vendor/gopkg.in/pg.v5/orm/model_table_many.go index 7ff22a7..d0bd4c8 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_many.go +++ b/vendor/gopkg.in/pg.v5/orm/model_table_many.go @@ -9,16 +9,15 @@ type manyModel struct { *sliceTableModel rel *Relation - buf []byte - zeroStruct reflect.Value - dstValues map[string][]reflect.Value + buf []byte + dstValues map[string][]reflect.Value } var _ tableModel = (*manyModel)(nil) func newManyModel(j *join) *manyModel { joinModel := j.JoinModel.(*sliceTableModel) - dstValues := dstValues(joinModel.Root(), joinModel.Index(), j.BaseModel.Table().PKs) + dstValues := dstValues(joinModel, j.BaseModel.Table().PKs) m := manyModel{ sliceTableModel: joinModel, rel: j.Rel, @@ -27,7 +26,6 @@ func newManyModel(j *join) *manyModel { } if !m.sliceOfPtr { m.strct = reflect.New(m.table.Type).Elem() - m.zeroStruct = reflect.Zero(m.table.Type) } return &m } @@ -36,7 +34,7 @@ func (m *manyModel) NewModel() ColumnScanner { if m.sliceOfPtr { m.strct = reflect.New(m.table.Type).Elem() } else { - m.strct.Set(m.zeroStruct) + m.strct.Set(m.table.zeroStruct) } m.structTableModel.NewModel() return m diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_table_slice.go b/vendor/gopkg.in/pg.v5/orm/model_table_slice.go similarity index 84% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_table_slice.go rename to vendor/gopkg.in/pg.v5/orm/model_table_slice.go index 58eb7e7..463a393 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_table_slice.go +++ b/vendor/gopkg.in/pg.v5/orm/model_table_slice.go @@ -7,7 +7,6 @@ type sliceTableModel struct { slice reflect.Value sliceOfPtr bool - zeroElem reflect.Value } var _ tableModel = (*sliceTableModel)(nil) @@ -17,16 +16,11 @@ func (m *sliceTableModel) init(sliceType reflect.Type) { case reflect.Ptr, reflect.Interface: m.sliceOfPtr = true } - if !m.sliceOfPtr { - m.zeroElem = reflect.Zero(m.table.Type) - } } -func (sliceTableModel) useQueryOne() bool { - return false -} +func (sliceTableModel) useQueryOne() {} -func (m *sliceTableModel) Join(name string, apply func(*Query) (*Query, error)) *join { +func (m *sliceTableModel) Join(name string, apply func(*Query) (*Query, error)) (bool, *join) { return m.join(m.Value(), name, apply) } @@ -38,13 +32,16 @@ func (m *sliceTableModel) Value() reflect.Value { return m.slice } -func (m *sliceTableModel) NewModel() ColumnScanner { - if !m.strct.IsValid() { +func (m *sliceTableModel) Reset() error { + if m.slice.IsValid() && m.slice.Len() > 0 { m.slice.Set(m.slice.Slice(0, 0)) } + return nil +} +func (m *sliceTableModel) NewModel() ColumnScanner { m.strct = m.nextElem() - m.structTableModel.NewModel() + m.bindChildren() return m } @@ -107,15 +104,24 @@ func (m *sliceTableModel) AfterDelete(db DB) error { func (m *sliceTableModel) nextElem() reflect.Value { if m.slice.Len() < m.slice.Cap() { m.slice.Set(m.slice.Slice(0, m.slice.Len()+1)) - return m.slice.Index(m.slice.Len() - 1) + elem := m.slice.Index(m.slice.Len() - 1) + if m.sliceOfPtr { + if elem.IsNil() { + elem.Set(reflect.New(elem.Type().Elem())) + } + return elem.Elem() + } else { + return elem + } } if m.sliceOfPtr { elem := reflect.New(m.table.Type) m.slice.Set(reflect.Append(m.slice, elem)) return elem.Elem() - } - m.slice.Set(reflect.Append(m.slice, m.zeroElem)) - return m.slice.Index(m.slice.Len() - 1) + } else { + m.slice.Set(reflect.Append(m.slice, m.table.zeroStruct)) + return m.slice.Index(m.slice.Len() - 1) + } } diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_table_struct.go b/vendor/gopkg.in/pg.v5/orm/model_table_struct.go similarity index 86% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_table_struct.go rename to vendor/gopkg.in/pg.v5/orm/model_table_struct.go index 8f4e9a6..320e462 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/model_table_struct.go +++ b/vendor/gopkg.in/pg.v5/orm/model_table_struct.go @@ -35,8 +35,8 @@ func newStructTableModelValue(v reflect.Value) (*structTableModel, error) { if !v.IsValid() { return nil, errors.New("pg: Model(nil)") } - v = reflect.Indirect(v) + v = reflect.Indirect(v) if v.Kind() != reflect.Struct { return nil, fmt.Errorf("pg: Model(unsupported %s)", v.Type()) } @@ -56,14 +56,13 @@ func (m *structTableModel) Table() *Table { return m.table } -func (m *structTableModel) AppendParam(dst []byte, name string) ([]byte, bool) { - if field, ok := m.table.FieldsMap[name]; ok { - dst = field.AppendValue(dst, m.strct, 1) - return dst, true - } +func (m *structTableModel) Relation() *Relation { + return m.rel +} - if method, ok := m.table.Methods[name]; ok { - dst = method.AppendValue(dst, m.strct.Addr(), 1) +func (m *structTableModel) AppendParam(dst []byte, name string) ([]byte, bool) { + dst, ok := m.table.AppendParam(dst, m.strct, name) + if ok { return dst, true } @@ -88,12 +87,30 @@ func (m *structTableModel) ParentIndex() []int { return m.index[:len(m.index)-len(m.rel.Field.Index)] } +func (m *structTableModel) Value() reflect.Value { + return m.strct +} + func (m *structTableModel) Bind(bind reflect.Value) { m.strct = bind.FieldByIndex(m.rel.Field.Index) } -func (m *structTableModel) Value() reflect.Value { - return m.strct +func (m *structTableModel) initStruct(bindChildren bool) { + if m.strct.Kind() == reflect.Interface { + m.strct = m.strct.Elem() + } + if m.strct.Kind() == reflect.Ptr { + if m.strct.IsNil() { + m.strct.Set(reflect.New(m.strct.Type().Elem())) + m.strct = m.strct.Elem() + bindChildren = true + } else { + m.strct = m.strct.Elem() + } + } + if bindChildren { + m.bindChildren() + } } func (m *structTableModel) bindChildren() { @@ -106,8 +123,12 @@ func (m *structTableModel) bindChildren() { } } +func (structTableModel) Reset() error { + return nil +} + func (m *structTableModel) NewModel() ColumnScanner { - m.bindChildren() + m.initStruct(true) return m } @@ -176,7 +197,7 @@ func (m *structTableModel) ScanColumn(colIdx int, colName string, b []byte) erro if ok { return err } - return fmt.Errorf("pg: can't find column %s in model %s", colName, m.table.TypeName) + return fmt.Errorf("pg: can't find column=%s in model=%s", colName, m.table.Type.Name()) } func (m *structTableModel) scanColumn(colIdx int, colName string, b []byte) (bool, error) { @@ -191,23 +212,12 @@ func (m *structTableModel) scanColumn(colIdx int, colName string, b []byte) (boo } field, ok := m.table.FieldsMap[colName] - if ok { - if m.strct.Kind() == reflect.Interface { - m.strct = m.strct.Elem() - } - if m.strct.Kind() == reflect.Ptr { - if m.strct.IsNil() { - m.strct.Set(reflect.New(m.strct.Type().Elem())) - m.strct = m.strct.Elem() - m.bindChildren() - } else { - m.strct = m.strct.Elem() - } - } - return true, field.ScanValue(m.strct, b) + if !ok { + return false, nil } - return false, nil + m.initStruct(false) + return true, field.ScanValue(m.strct, b) } func (m *structTableModel) GetJoin(name string) *join { @@ -229,13 +239,13 @@ func (m *structTableModel) AddJoin(j join) *join { return &m.joins[len(m.joins)-1] } -func (m *structTableModel) Join(name string, apply func(*Query) (*Query, error)) *join { +func (m *structTableModel) Join(name string, apply func(*Query) (*Query, error)) (bool, *join) { return m.join(m.Value(), name, apply) } func (m *structTableModel) join( bind reflect.Value, name string, apply func(*Query) (*Query, error), -) *join { +) (bool, *join) { path := strings.Split(name, ".") index := make([]int, 0, len(path)) @@ -243,6 +253,7 @@ func (m *structTableModel) join( BaseModel: m, JoinModel: m, } + var created bool var lastJoin *join var hasColumnName bool @@ -258,23 +269,27 @@ func (m *structTableModel) join( if j := currJoin.JoinModel.GetJoin(name); j != nil { currJoin.BaseModel = j.BaseModel currJoin.JoinModel = j.JoinModel + + created = false lastJoin = j } else { model, err := newTableModelIndex(bind, index, rel) if err != nil { - return nil + return false, nil } currJoin.Parent = lastJoin currJoin.BaseModel = currJoin.JoinModel currJoin.JoinModel = model + + created = true lastJoin = currJoin.BaseModel.AddJoin(currJoin) } } // No joins with such name. if lastJoin == nil { - return nil + return false, nil } if apply != nil { lastJoin.ApplyQuery = apply @@ -291,7 +306,7 @@ func (m *structTableModel) join( } } - return lastJoin + return created, lastJoin } func splitColumn(s string) (string, string) { diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/orm.go b/vendor/gopkg.in/pg.v5/orm/orm.go similarity index 79% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/orm.go rename to vendor/gopkg.in/pg.v5/orm/orm.go index 3d011dc..3797fa9 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/orm.go +++ b/vendor/gopkg.in/pg.v5/orm/orm.go @@ -11,16 +11,6 @@ type ColumnScanner interface { ScanColumn(colIdx int, colName string, b []byte) error } -// Collection is a set of models mapped to database rows. -type Collection interface { - // NewModel returns ColumnScanner that is used to scan columns - // from the current row. - NewModel() ColumnScanner - - // AddModel adds ColumnScanner to the Collection. - AddModel(ColumnScanner) error -} - type QueryAppender interface { AppendQuery(dst []byte, params ...interface{}) ([]byte, error) } diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/query.go b/vendor/gopkg.in/pg.v5/orm/query.go similarity index 54% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/query.go rename to vendor/gopkg.in/pg.v5/orm/query.go index 843ce34..04f89b9 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/query.go +++ b/vendor/gopkg.in/pg.v5/orm/query.go @@ -3,6 +3,7 @@ package orm import ( "errors" "fmt" + "strings" "sync" "time" @@ -17,22 +18,22 @@ type withQuery struct { type Query struct { db DB - model tableModel stickyErr error - tableAlias string + model tableModel + ignoreModel bool + with []withQuery - tables []string - fields []string + tables []FormatAppender columns []FormatAppender - rels map[string]func(*Query) (*Query, error) - set []queryParams - where []FormatAppender + set []FormatAppender + where []sepFormatAppender joins []FormatAppender - group []queryParams - order []queryParams + group []FormatAppender + having []queryParamsAppender + order []FormatAppender onConflict FormatAppender - returning []queryParams + returning []queryParamsAppender limit int offset int } @@ -41,9 +42,41 @@ func NewQuery(db DB, model ...interface{}) *Query { return (&Query{}).DB(db).Model(model...) } -func (q *Query) copy() *Query { - cp := *q - return &cp +// New returns new zero Query binded to the current db and model. +func (q *Query) New() *Query { + return &Query{ + db: q.db, + model: q.model, + ignoreModel: true, + } +} + +// Copy returns copy of the Query. +func (q *Query) Copy() *Query { + copy := &Query{ + db: q.db, + stickyErr: q.stickyErr, + + model: q.model, + ignoreModel: q.ignoreModel, + + tables: q.tables[:], + columns: q.columns[:], + set: q.set[:], + where: q.where[:], + joins: q.joins[:], + group: q.group[:], + having: q.having[:], + order: q.order[:], + onConflict: q.onConflict, + returning: q.returning[:], + limit: q.limit, + offset: q.offset, + } + for _, with := range q.with { + copy = copy.With(with.name, with.query.Copy()) + } + return copy } func (q *Query) err(err error) *Query { @@ -55,6 +88,9 @@ func (q *Query) err(err error) *Query { func (q *Query) DB(db DB) *Query { q.db = db + for _, with := range q.with { + with.query.db = db + } return q } @@ -64,88 +100,158 @@ func (q *Query) Model(model ...interface{}) *Query { case l == 0: q.model = nil case l == 1: - model0 := model[0] - if model0 != nil { - q.model, err = newTableModel(model0) - } + q.model, err = newTableModel(model[0]) case l > 1: q.model, err = newTableModel(&model) } if err != nil { q = q.err(err) } + if q.ignoreModel { + q.ignoreModel = false + } return q } +// With adds subq as common table expression with the given name. func (q *Query) With(name string, subq *Query) *Query { q.with = append(q.with, withQuery{name, subq}) return q } +// WrapWith creates new Query and adds to it current query as +// common table expression with the given name. +func (q *Query) WrapWith(name string) *Query { + topq := q.New() + topq.with = q.with + q.with = nil + topq = topq.With(name, q) + return topq +} + func (q *Query) Table(tables ...string) *Query { for _, table := range tables { - q.tables = append(q.tables, table) + q.tables = append(q.tables, fieldAppender{table}) } return q } -func (q *Query) Alias(alias string) *Query { - q.tableAlias = alias +func (q *Query) TableExpr(expr string, params ...interface{}) *Query { + q.tables = append(q.tables, queryParamsAppender{expr, params}) return q } +// Column adds column to the Query quoting it according to PostgreSQL rules. +// ColumnExpr can be used to bypass quoting restriction. func (q *Query) Column(columns ...string) *Query { -loop: for _, column := range columns { + if column == "_" { + q.columns = make([]FormatAppender, 0) + continue + } + if q.model != nil { - if j := q.model.Join(column, nil); j != nil { - continue loop + if _, j := q.model.Join(column, nil); j != nil { + continue } } - q.fields = append(q.fields, column) q.columns = append(q.columns, fieldAppender{column}) } return q } +// ColumnExpr adds column expression to the Query. func (q *Query) ColumnExpr(expr string, params ...interface{}) *Query { - q.columns = append(q.columns, queryParams{expr, params}) + q.columns = append(q.columns, queryParamsAppender{expr, params}) return q } +func (q *Query) getFields() []string { + var fields []string + for _, col := range q.columns { + if f, ok := col.(fieldAppender); ok { + fields = append(fields, f.field) + } + } + return fields +} + func (q *Query) Relation(name string, apply func(*Query) (*Query, error)) *Query { - if j := q.model.Join(name, apply); j == nil { + if _, j := q.model.Join(name, apply); j == nil { return q.err(fmt.Errorf( - "model %s does not have relation %s", - q.model.Table().TypeName, name, + "model=%s does not have relation=%s", + q.model.Table().Type.Name(), name, )) } return q } func (q *Query) Set(set string, params ...interface{}) *Query { - q.set = append(q.set, queryParams{set, params}) + q.set = append(q.set, queryParamsAppender{set, params}) return q } func (q *Query) Where(where string, params ...interface{}) *Query { - q.where = append(q.where, queryParams{where, params}) + q.where = append(q.where, &whereAppender{"AND", where, params}) + return q +} + +func (q *Query) WhereOr(where string, params ...interface{}) *Query { + q.where = append(q.where, &whereAppender{"OR", where, params}) return q } func (q *Query) Join(join string, params ...interface{}) *Query { - q.joins = append(q.joins, queryParams{join, params}) + q.joins = append(q.joins, queryParamsAppender{join, params}) return q } -func (q *Query) Group(group string, params ...interface{}) *Query { - q.group = append(q.group, queryParams{group, params}) +func (q *Query) Group(columns ...string) *Query { + for _, column := range columns { + q.group = append(q.group, fieldAppender{column}) + } + return q +} + +func (q *Query) GroupExpr(group string, params ...interface{}) *Query { + q.group = append(q.group, queryParamsAppender{group, params}) return q } -func (q *Query) Order(order string, params ...interface{}) *Query { - q.order = append(q.order, queryParams{order, params}) +func (q *Query) Having(having string, params ...interface{}) *Query { + q.having = append(q.having, queryParamsAppender{having, params}) + return q +} + +// Order adds sort order to the Query quoting column name. +// OrderExpr can be used to bypass quoting restriction. +func (q *Query) Order(orders ...string) *Query { +loop: + for _, order := range orders { + ind := strings.LastIndex(order, " ") + if ind != -1 { + field := order[:ind] + sort := order[ind+1:] + switch internal.ToUpper(sort) { + case "ASC", "DESC": + q.order = append(q.order, queryParamsAppender{ + query: "? ?", + params: []interface{}{types.F(field), types.Q(sort)}, + }) + continue loop + } + } + + q.order = append(q.order, fieldAppender{order}) + continue + } + return q +} + +// Order adds sort order to the Query. +func (q *Query) OrderExpr(order string, params ...interface{}) *Query { + q.order = append(q.order, queryParamsAppender{order, params}) return q } @@ -160,12 +266,12 @@ func (q *Query) Offset(n int) *Query { } func (q *Query) OnConflict(s string, params ...interface{}) *Query { - q.onConflict = queryParams{s, params} + q.onConflict = queryParamsAppender{s, params} return q } func (q *Query) Returning(s string, params ...interface{}) *Query { - q.returning = append(q.returning, queryParams{s, params}) + q.returning = append(q.returning, queryParamsAppender{s, params}) return q } @@ -185,45 +291,36 @@ func (q *Query) Count() (int, error) { return 0, q.stickyErr } - q = q.copy() - q.columns = append(q.columns, Q("count(*)")) - q.order = nil - q.limit = 0 - q.offset = 0 - - sel := &selectQuery{ - Query: q, - } var count int - _, err := q.db.QueryOne(Scan(&count), sel, q.model) + _, err := q.db.QueryOne(Scan(&count), q.countSelectQuery("count(*)"), q.model) return count, err } +func (q *Query) countSelectQuery(query string) selectQuery { + return selectQuery{ + Query: q.countQuery(), + count: queryParamsAppender{query: query}, + } +} + +func (q *Query) countQuery() *Query { + if len(q.group) > 0 { + return q.Copy().WrapWith("wrapper").Table("wrapper") + } + return q +} + // First selects the first row. func (q *Query) First() error { b := columns(q.model.Table().Alias, "", q.model.Table().PKs) - return q.Order(string(b)).Limit(1).Select() + return q.OrderExpr(string(b)).Limit(1).Select() } // Last selects the last row. func (q *Query) Last() error { b := columns(q.model.Table().Alias, "", q.model.Table().PKs) b = append(b, " DESC"...) - return q.Order(string(b)).Limit(1).Select() -} - -func (q *Query) newModel(values []interface{}) (model Model, err error) { - if len(values) > 0 { - return NewModel(values...) - } - return q.model, nil -} - -func (q *Query) query(model Model, query interface{}) (*types.Result, error) { - if m, ok := model.(useQueryOne); ok && m.useQueryOne() { - return q.db.QueryOne(model, query, q.model) - } - return q.db.Query(model, query, q.model) + return q.OrderExpr(string(b)).Limit(1).Select() } // Select selects the model. @@ -232,23 +329,21 @@ func (q *Query) Select(values ...interface{}) error { return q.stickyErr } - if q.model != nil { - q.addJoins(q.model.GetJoins()) - } - - model, err := q.newModel(values) + model, err := q.newModel(values...) if err != nil { return err } - res, err := q.query(model, selectQuery{q}) + res, err := q.query(model, selectQuery{Query: q}) if err != nil { return err } - if res.RowsReturned() > 0 && q.model != nil { - if err := selectJoins(q.db, q.model.GetJoins()); err != nil { - return err + if res.RowsReturned() > 0 { + if q.model != nil { + if err := selectJoins(q.db, q.model.GetJoins()); err != nil { + return err + } } if err := model.AfterSelect(q.db); err != nil { return err @@ -258,7 +353,21 @@ func (q *Query) Select(values ...interface{}) error { return nil } -// SelectAndCount runs Select and Count in two separate goroutines, +func (q *Query) newModel(values ...interface{}) (Model, error) { + if len(values) > 0 { + return NewModel(values...) + } + return q.model, nil +} + +func (q *Query) query(model Model, query interface{}) (*types.Result, error) { + if _, ok := model.(useQueryOne); ok { + return q.db.QueryOne(model, query, q.model) + } + return q.db.Query(model, query, q.model) +} + +// SelectAndCount runs Select and Count in two goroutines, // waits for them to finish and returns the result. func (q *Query) SelectAndCount(values ...interface{}) (count int, err error) { if q.stickyErr != nil { @@ -288,16 +397,20 @@ func (q *Query) SelectAndCount(values ...interface{}) (count int, err error) { return count, err } -func (q *Query) addJoins(joins []join) { +func (q *Query) forEachHasOneJoin(fn func(*join)) { + if q.model == nil { + return + } + q._forEachHasOneJoin(q.model.GetJoins(), fn) +} + +func (q *Query) _forEachHasOneJoin(joins []join, fn func(*join)) { for i := range joins { j := &joins[i] switch j.Rel.Type { - case HasOneRelation: - j.JoinHasOne(q) - q.addJoins(j.JoinModel.GetJoins()) - case BelongsToRelation: - j.JoinBelongsTo(q) - q.addJoins(j.JoinModel.GetJoins()) + case HasOneRelation, BelongsToRelation: + fn(j) + q._forEachHasOneJoin(j.JoinModel.GetJoins(), fn) } } } @@ -324,7 +437,7 @@ func (q *Query) Insert(values ...interface{}) (*types.Result, error) { return nil, q.stickyErr } - model, err := q.newModel(values) + model, err := q.newModel(values...) if err != nil { return nil, err } @@ -401,7 +514,7 @@ func (q *Query) Update(values ...interface{}) (*types.Result, error) { return nil, q.stickyErr } - model, err := q.newModel(values) + model, err := q.newModel(values...) if err != nil { return nil, err } @@ -460,14 +573,12 @@ func (q *Query) FormatQuery(dst []byte, query string, params ...interface{}) []b return Formatter{}.Append(dst, query, params...) } -func (q *Query) appendTableAlias(b []byte) ([]byte, bool) { - if q.tableAlias != "" { - return types.AppendField(b, q.tableAlias, 1), true - } - if q.model != nil { - return append(b, q.model.Table().Alias...), true - } - return b, false +func (q *Query) hasModel() bool { + return !q.ignoreModel && q.model != nil +} + +func (q *Query) hasTables() bool { + return q.hasModel() || len(q.tables) > 0 } func (q *Query) appendTableName(b []byte) []byte { @@ -477,26 +588,53 @@ func (q *Query) appendTableName(b []byte) []byte { func (q *Query) appendTableNameWithAlias(b []byte) []byte { b = q.appendTableName(b) b = append(b, " AS "...) - b, _ = q.appendTableAlias(b) + b = append(b, q.model.Table().Alias...) return b } -func (q *Query) haveTables() bool { - return q.model != nil || len(q.tables) > 0 -} - func (q *Query) appendTables(b []byte) []byte { - if q.model != nil { + if q.hasModel() { b = q.appendTableNameWithAlias(b) if len(q.tables) > 0 { b = append(b, ", "...) } } - for i, table := range q.tables { + for i, f := range q.tables { + if i > 0 { + b = append(b, ", "...) + } + b = f.AppendFormat(b, q) + } + return b +} + +func (q *Query) appendFirstTable(b []byte) []byte { + if q.hasModel() { + return q.appendTableNameWithAlias(b) + } + if len(q.tables) > 0 { + b = q.tables[0].AppendFormat(b, q) + } + return b +} + +func (q *Query) hasOtherTables() bool { + if q.hasModel() { + return len(q.tables) > 0 + } + return len(q.tables) > 1 +} + +func (q *Query) appendOtherTables(b []byte) []byte { + tables := q.tables + if !q.hasModel() { + tables = tables[1:] + } + for i, f := range tables { if i > 0 { b = append(b, ", "...) } - b = types.AppendField(b, table, 1) + b = f.AppendFormat(b, q) } return b } @@ -516,18 +654,18 @@ func (q *Query) mustAppendWhere(b []byte) ([]byte, error) { } b = append(b, " WHERE "...) - return pkWhereQuery{q}.AppendFormat(b, nil), nil + return wherePKQuery{q}.AppendFormat(b, nil), nil } func (q *Query) appendWhere(b []byte) []byte { b = append(b, " WHERE "...) for i, f := range q.where { if i > 0 { - b = append(b, " AND "...) + b = append(b, ' ') + b = f.AppendSep(b) + b = append(b, ' ') } - b = append(b, '(') b = f.AppendFormat(b, q) - b = append(b, ')') } return b } @@ -554,11 +692,36 @@ func (q *Query) appendReturning(b []byte) []byte { return b } -type pkWhereQuery struct { +func (q *Query) appendWith(b []byte) ([]byte, error) { + var err error + b = append(b, "WITH "...) + for i, withq := range q.with { + if i > 0 { + b = append(b, ", "...) + } + b = types.AppendField(b, withq.name, 1) + b = append(b, " AS ("...) + b, err = selectQuery{Query: withq.query}.AppendQuery(b) + if err != nil { + return nil, err + } + b = append(b, ')') + } + b = append(b, ' ') + return b, nil +} + +//------------------------------------------------------------------------------ + +type wherePKQuery struct { *Query } -func (q pkWhereQuery) AppendFormat(b []byte, f QueryFormatter) []byte { +func (wherePKQuery) AppendSep(b []byte) []byte { + return append(b, "AND"...) +} + +func (q wherePKQuery) AppendFormat(b []byte, f QueryFormatter) []byte { table := q.model.Table() return appendColumnAndValue(b, q.model.Value(), table, table.PKs) } diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/relation.go b/vendor/gopkg.in/pg.v5/orm/relation.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/relation.go rename to vendor/gopkg.in/pg.v5/orm/relation.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/select.go b/vendor/gopkg.in/pg.v5/orm/select.go similarity index 50% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/select.go rename to vendor/gopkg.in/pg.v5/orm/select.go index 530481d..644dc12 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/select.go +++ b/vendor/gopkg.in/pg.v5/orm/select.go @@ -1,22 +1,19 @@ package orm -import ( - "strconv" - - "gopkg.in/pg.v5/types" -) +import "strconv" func Select(db DB, model interface{}) error { q := NewQuery(db, model) if err := q.model.Table().checkPKs(); err != nil { return err } - q.where = append(q.where, pkWhereQuery{q}) + q.where = append(q.where, wherePKQuery{q}) return q.Select() } type selectQuery struct { *Query + count FormatAppender } var _ QueryAppender = (*selectQuery)(nil) @@ -32,13 +29,21 @@ func (q selectQuery) AppendQuery(b []byte, params ...interface{}) ([]byte, error } b = append(b, "SELECT "...) - b = q.appendColumns(b) + if q.count != nil { + b = q.count.AppendFormat(b, q) + } else { + b = q.appendColumns(b) + } - if q.haveTables() { + if q.hasTables() { b = append(b, " FROM "...) b = q.appendTables(b) } + q.forEachHasOneJoin(func(j *join) { + b = append(b, ' ') + b = j.appendHasOneJoin(b) + }) if len(q.joins) > 0 { for _, f := range q.joins { b = append(b, ' ') @@ -54,50 +59,60 @@ func (q selectQuery) AppendQuery(b []byte, params ...interface{}) ([]byte, error b = append(b, " GROUP BY "...) for i, f := range q.group { if i > 0 { - b = append(b, ' ') + b = append(b, ", "...) } b = f.AppendFormat(b, q) } } - if len(q.order) > 0 { - b = append(b, " ORDER BY "...) - for i, f := range q.order { + if len(q.having) > 0 { + b = append(b, " HAVING "...) + for i, f := range q.having { if i > 0 { - b = append(b, ' ') + b = append(b, " AND "...) } + b = append(b, '(') b = f.AppendFormat(b, q) + b = append(b, ')') } } - if q.limit != 0 { - b = append(b, " LIMIT "...) - b = strconv.AppendInt(b, int64(q.limit), 10) - } + if q.count == nil { + if len(q.order) > 0 { + b = append(b, " ORDER BY "...) + for i, f := range q.order { + if i > 0 { + b = append(b, ", "...) + } + b = f.AppendFormat(b, q) + } + } + + if q.limit != 0 { + b = append(b, " LIMIT "...) + b = strconv.AppendInt(b, int64(q.limit), 10) + } - if q.offset != 0 { - b = append(b, " OFFSET "...) - b = strconv.AppendInt(b, int64(q.offset), 10) + if q.offset != 0 { + b = append(b, " OFFSET "...) + b = strconv.AppendInt(b, int64(q.offset), 10) + } } return b, nil } func (q selectQuery) appendColumns(b []byte) []byte { - if len(q.columns) > 0 { - return q.appendQueryColumns(b) - } - - if q.model != nil { - return q.appendModelColumns(b) - } - - var ok bool - b, ok = q.appendTableAlias(b) - if ok { - b = append(b, '.') + if q.columns != nil { + b = q.appendQueryColumns(b) + } else if q.hasModel() { + b = q.appendModelColumns(b) + } else { + b = append(b, '*') } - b = append(b, '*') + q.forEachHasOneJoin(func(j *join) { + b = j.appendHasOneColumns(b) + }) return b } @@ -111,36 +126,14 @@ func (q selectQuery) appendQueryColumns(b []byte) []byte { return b } -func (sel selectQuery) appendModelColumns(b []byte) []byte { - alias, hasAlias := sel.appendTableAlias(nil) - for i, f := range sel.model.Table().Fields { +func (q selectQuery) appendModelColumns(b []byte) []byte { + for i, f := range q.model.Table().Fields { if i > 0 { b = append(b, ", "...) } - if hasAlias { - b = append(b, alias...) - b = append(b, '.') - } + b = append(b, q.model.Table().Alias...) + b = append(b, '.') b = append(b, f.ColName...) } return b } - -func (q selectQuery) appendWith(b []byte) ([]byte, error) { - var err error - b = append(b, "WITH "...) - for i, withq := range q.with { - if i > 0 { - b = append(b, ", "...) - } - b = types.AppendField(b, withq.name, 1) - b = append(b, " AS ("...) - b, err = selectQuery{withq.query}.AppendQuery(b) - if err != nil { - return nil, err - } - b = append(b, ')') - } - b = append(b, ' ') - return b, nil -} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/table.go b/vendor/gopkg.in/pg.v5/orm/table.go similarity index 87% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/table.go rename to vendor/gopkg.in/pg.v5/orm/table.go index a32b12c..7e17b3d 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/table.go +++ b/vendor/gopkg.in/pg.v5/orm/table.go @@ -5,15 +5,17 @@ import ( "reflect" "strings" - "github.com/jinzhu/inflection" - + "gopkg.in/pg.v5/internal" "gopkg.in/pg.v5/types" + + "github.com/jinzhu/inflection" ) type Table struct { - Type reflect.Type - TypeName string + Type reflect.Type + zeroStruct reflect.Value + TypeName string Name types.Q Alias types.Q ModelName string @@ -35,9 +37,14 @@ func (t *Table) Has(flag int16) bool { return t.flags&flag != 0 } +func (t *Table) HasField(field string) bool { + _, err := t.GetField(field) + return err == nil +} + func (t *Table) checkPKs() error { if len(t.PKs) == 0 { - return fmt.Errorf("model %s does not have primary keys", t.TypeName) + return fmt.Errorf("model=%s does not have primary keys", t.Type.Name()) } return nil } @@ -50,11 +57,25 @@ func (t *Table) AddField(field *Field) { func (t *Table) GetField(fieldName string) (*Field, error) { field, ok := t.FieldsMap[fieldName] if !ok { - return nil, fmt.Errorf("can't find column %s in table %s", fieldName, t.Name) + return nil, fmt.Errorf("can't find column=%s in table=%s", fieldName, t.Name) } return field, nil } +func (t *Table) AppendParam(dst []byte, strct reflect.Value, name string) ([]byte, bool) { + if field, ok := t.FieldsMap[name]; ok { + dst = field.AppendValue(dst, strct, 1) + return dst, true + } + + if method, ok := t.Methods[name]; ok { + dst = method.AppendValue(dst, strct.Addr(), 1) + return dst, true + } + + return dst, false +} + func (t *Table) addRelation(rel *Relation) { if t.Relations == nil { t.Relations = make(map[string]*Relation) @@ -73,11 +94,12 @@ func newTable(typ reflect.Type) *Table { return table } - modelName := Underscore(typ.Name()) + modelName := internal.Underscore(typ.Name()) table = &Table{ - Type: typ, - TypeName: typ.Name(), + Type: typ, + zeroStruct: reflect.Zero(typ), + TypeName: internal.ToExported(typ.Name()), Name: types.Q(types.AppendField(nil, inflection.Plural(modelName), 1)), Alias: types.Q(types.AppendField(nil, modelName, 1)), ModelName: modelName, @@ -165,10 +187,6 @@ func (t *Table) addFields(typ reflect.Type, index []int) { continue } - if f.PkgPath != "" { - continue - } - field := t.newField(f, index) if field != nil { t.AddField(field) @@ -193,7 +211,11 @@ func (t *Table) getField(name string) *Field { func (t *Table) newField(f reflect.StructField, index []int) *Field { sqlName, sqlOpt := parseTag(f.Tag.Get("sql")) - if f.Name == "TableName" { + switch f.Name { + case "tableName", "TableName": + if index != nil { + return nil + } if sqlName != "" { t.Name = types.Q(sqlName) } @@ -203,9 +225,13 @@ func (t *Table) newField(f reflect.StructField, index []int) *Field { return nil } + if f.PkgPath != "" { + return nil + } + skip := sqlName == "-" if skip || sqlName == "" { - sqlName = Underscore(f.Name) + sqlName = internal.Underscore(f.Name) } if field, ok := t.FieldsMap[sqlName]; ok { @@ -245,6 +271,10 @@ func (t *Table) newField(f reflect.StructField, index []int) *Field { if _, ok := sqlOpt.Get("notnull"); ok { field.flags |= NotNullFlag } + if _, ok := sqlOpt.Get("unique"); ok { + field.flags |= UniqueFlag + } + if len(t.PKs) == 0 && (field.SQLName == "id" || field.SQLName == "uuid") { field.flags |= PrimaryKeyFlag t.PKs = append(t.PKs, &field) @@ -286,8 +316,8 @@ func (t *Table) newField(f reflect.StructField, index []int) *Field { Field: &field, JoinTable: joinTable, M2MTableName: types.Q(m2mTable), - BasePrefix: Underscore(basePrefix + "_"), - JoinPrefix: Underscore(joinPrefix + "_"), + BasePrefix: internal.Underscore(basePrefix + "_"), + JoinPrefix: internal.Underscore(joinPrefix + "_"), }) return nil } @@ -306,7 +336,7 @@ func (t *Table) newField(f reflect.StructField, index []int) *Field { Field: &field, FKs: fks, JoinTable: joinTable, - BasePrefix: Underscore(basePrefix + "_"), + BasePrefix: internal.Underscore(basePrefix + "_"), }) return nil } diff --git a/vendor/gopkg.in/pg.v5/orm/table_params.go b/vendor/gopkg.in/pg.v5/orm/table_params.go new file mode 100644 index 0000000..7c8cc7c --- /dev/null +++ b/vendor/gopkg.in/pg.v5/orm/table_params.go @@ -0,0 +1,29 @@ +package orm + +import "reflect" + +type tableParams struct { + table *Table + strct reflect.Value +} + +func newTableParams(strct interface{}) (*tableParams, bool) { + v := reflect.ValueOf(strct) + if !v.IsValid() { + return nil, false + } + + v = reflect.Indirect(v) + if v.Kind() != reflect.Struct { + return nil, false + } + + return &tableParams{ + table: Tables.Get(v.Type()), + strct: v, + }, true +} + +func (m tableParams) AppendParam(dst []byte, name string) ([]byte, bool) { + return m.table.AppendParam(dst, m.strct, name) +} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/tables.go b/vendor/gopkg.in/pg.v5/orm/tables.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/tables.go rename to vendor/gopkg.in/pg.v5/orm/tables.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/tag.go b/vendor/gopkg.in/pg.v5/orm/tag.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/tag.go rename to vendor/gopkg.in/pg.v5/orm/tag.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/update.go b/vendor/gopkg.in/pg.v5/orm/update.go similarity index 83% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/update.go rename to vendor/gopkg.in/pg.v5/orm/update.go index be36f40..c240b4b 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/update.go +++ b/vendor/gopkg.in/pg.v5/orm/update.go @@ -23,14 +23,26 @@ var _ QueryAppender = (*updateQuery)(nil) func (q updateQuery) AppendQuery(b []byte, params ...interface{}) ([]byte, error) { var err error + if len(q.with) > 0 { + b, err = q.appendWith(b) + if err != nil { + return nil, err + } + } + b = append(b, "UPDATE "...) - b = q.appendTables(b) + b = q.appendFirstTable(b) b, err = q.mustAppendSet(b) if err != nil { return nil, err } + if q.hasOtherTables() { + b = append(b, " FROM "...) + b = q.appendOtherTables(b) + } + b, err = q.mustAppendWhere(b) if err != nil { return nil, err @@ -58,8 +70,8 @@ func (q updateQuery) mustAppendSet(b []byte) ([]byte, error) { table := q.model.Table() strct := q.model.Value() - if len(q.fields) > 0 { - for i, fieldName := range q.fields { + if fields := q.getFields(); len(fields) > 0 { + for i, fieldName := range fields { field, err := table.GetField(fieldName) if err != nil { return nil, err diff --git a/vendor/gopkg.in/pg.v5/orm/url_values.go b/vendor/gopkg.in/pg.v5/orm/url_values.go new file mode 100644 index 0000000..d72a6dc --- /dev/null +++ b/vendor/gopkg.in/pg.v5/orm/url_values.go @@ -0,0 +1,125 @@ +package orm + +import ( + "fmt" + "net/url" + "strconv" + "strings" + + "gopkg.in/pg.v5/types" +) + +func URLValues(urlValues url.Values) func(*Query) (*Query, error) { + return func(q *Query) (*Query, error) { + for fieldName, values := range urlValues { + var operation string + if i := strings.Index(fieldName, "__"); i != -1 { + fieldName, operation = fieldName[:i], fieldName[i+2:] + } + + if q.model.Table().HasField(fieldName) { + q = addOperator(q, fieldName, operation, values) + } + } + + return setOrder(q, urlValues), nil + } +} + +func addOperator(q *Query, fieldName, operator string, values []string) *Query { + switch operator { + case "gt": + q = forEachValue(q, fieldName, values, "? > ?") + case "gte": + q = forEachValue(q, fieldName, values, "? >= ?") + case "lt": + q = forEachValue(q, fieldName, values, "? < ?") + case "lte": + q = forEachValue(q, fieldName, values, "? <= ?") + case "ieq": + q = forEachValue(q, fieldName, values, "? ILIKE ?") + case "match": + q = forEachValue(q, fieldName, values, "? SIMILAR TO ?") + case "exclude": + q = forAllValues(q, fieldName, values, "? != ?", "? NOT IN (?)") + case "", "include": + q = forAllValues(q, fieldName, values, "? = ?", "? IN (?)") + } + return q +} + +func forEachValue(q *Query, fieldName string, values []string, queryTemplate string) *Query { + for _, value := range values { + q = q.Where(queryTemplate, types.F(fieldName), value) + } + return q +} + +func forAllValues(q *Query, fieldName string, values []string, queryTemplate, queryArrayTemplate string) *Query { + if len(values) > 1 { + q = q.Where(queryArrayTemplate, types.F(fieldName), types.In(values)) + } else { + q = q.Where(queryTemplate, types.F(fieldName), values[0]) + } + return q +} + +func setOrder(q *Query, urlValues url.Values) *Query { + for _, order := range urlValues["order"] { + if order != "" { + q = q.Order(order) + } + } + return q +} + +// Pager sets LIMIT and OFFSET from the URL values: +// - ?limit=10 - sets q.Limit(10), max limit is 1000. +// - ?page=5 - sets q.Offset((page - 1) * limit), max offset is 1000000. +func Pager(urlValues url.Values, defaultLimit int) func(*Query) (*Query, error) { + return func(q *Query) (*Query, error) { + const maxLimit = 1000 + const maxOffset = 1e6 + + limit, err := intParam(urlValues, "limit") + if err != nil { + return nil, err + } + if limit < 1 { + limit = defaultLimit + } else if limit > maxLimit { + return nil, fmt.Errorf("limit=%d is bigger than %d", limit, maxLimit) + } + if limit > 0 { + q = q.Limit(limit) + } + + page, err := intParam(urlValues, "page") + if err != nil { + return nil, err + } + if page > 0 { + offset := (page - 1) * limit + if offset > maxOffset { + return nil, fmt.Errorf("offset=%d can't bigger than %d", offset, maxOffset) + } + q = q.Offset(offset) + } + + return q, nil + } +} + +func intParam(urlValues url.Values, paramName string) (int, error) { + values, ok := urlValues[paramName] + if !ok { + return 0, nil + } + + value, err := strconv.Atoi(values[0]) + if err != nil { + return 0, fmt.Errorf("param=%s value=%s is invalid: %s", paramName, values[0], err) + } + + return value, nil +} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/util.go b/vendor/gopkg.in/pg.v5/orm/util.go similarity index 92% rename from Godeps/_workspace/src/gopkg.in/pg.v5/orm/util.go rename to vendor/gopkg.in/pg.v5/orm/util.go index cf78299..57e55ec 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/orm/util.go +++ b/vendor/gopkg.in/pg.v5/orm/util.go @@ -111,12 +111,12 @@ func values(v reflect.Value, index []int, fields []*Field) []byte { return b } -func dstValues(root reflect.Value, path []int, fields []*Field) map[string][]reflect.Value { +func dstValues(model tableModel, fields []*Field) map[string][]reflect.Value { mp := make(map[string][]reflect.Value) var id []byte - walk(root, path[:len(path)-1], func(v reflect.Value) { + walk(model.Root(), model.ParentIndex(), func(v reflect.Value) { id = modelId(id[:0], v, fields) - mp[string(id)] = append(mp[string(id)], v.Field(path[len(path)-1])) + mp[string(id)] = append(mp[string(id)], v.FieldByIndex(model.Relation().Field.Index)) }) return mp } diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/pg.go b/vendor/gopkg.in/pg.v5/pg.go similarity index 92% rename from Godeps/_workspace/src/gopkg.in/pg.v5/pg.go rename to vendor/gopkg.in/pg.v5/pg.go index 522570f..6ac1ed7 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/pg.go +++ b/vendor/gopkg.in/pg.v5/pg.go @@ -45,7 +45,7 @@ func In(slice interface{}) types.ValueAppender { // Array accepts a slice and returns a wrapper for working with PostgreSQL // array data type. // -// Note that for struct fields you should use array tag: +// For struct fields you can use array tag: // // Emails []string `pg:",array"` func Array(v interface{}) *types.Array { @@ -56,7 +56,7 @@ func Array(v interface{}) *types.Array { // Supported map types are: // - map[string]string // -// Note that for struct fields you should use hstore tag: +// For struct fields you can use hstore tag: // // Attrs map[string]string `pg:",hstore"` func Hstore(v interface{}) *types.Hstore { @@ -79,6 +79,13 @@ type Strings []string var _ orm.Model = (*Strings)(nil) var _ types.ValueAppender = (*Strings)(nil) +func (strings *Strings) Reset() error { + if s := *strings; len(s) > 0 { + *strings = s[:0] + } + return nil +} + func (strings *Strings) NewModel() orm.ColumnScanner { return strings } @@ -144,6 +151,13 @@ type Ints []int64 var _ orm.Model = (*Ints)(nil) var _ types.ValueAppender = (*Ints)(nil) +func (ints *Ints) Reset() error { + if s := *ints; len(s) > 0 { + *ints = s[:0] + } + return nil +} + func (ints *Ints) NewModel() orm.ColumnScanner { return ints } @@ -212,6 +226,13 @@ type IntSet map[int64]struct{} var _ orm.Model = (*IntSet)(nil) +func (set *IntSet) Reset() error { + if len(*set) > 0 { + *set = make(map[int64]struct{}) + } + return nil +} + func (set *IntSet) NewModel() orm.ColumnScanner { return set } diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/stmt.go b/vendor/gopkg.in/pg.v5/stmt.go similarity index 97% rename from Godeps/_workspace/src/gopkg.in/pg.v5/stmt.go rename to vendor/gopkg.in/pg.v5/stmt.go index 9b09757..935af2b 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/stmt.go +++ b/vendor/gopkg.in/pg.v5/stmt.go @@ -44,8 +44,8 @@ func (stmt *Stmt) conn() (*pool.Conn, error) { } return nil, errStmtClosed } - stmt._cn.SetReadTimeout(stmt.db.opt.ReadTimeout) - stmt._cn.SetWriteTimeout(stmt.db.opt.WriteTimeout) + + stmt._cn.SetReadWriteTimeout(stmt.db.opt.ReadTimeout, stmt.db.opt.WriteTimeout) return stmt._cn, nil } diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/time.go b/vendor/gopkg.in/pg.v5/time.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/time.go rename to vendor/gopkg.in/pg.v5/time.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/tx.go b/vendor/gopkg.in/pg.v5/tx.go similarity index 82% rename from Godeps/_workspace/src/gopkg.in/pg.v5/tx.go rename to vendor/gopkg.in/pg.v5/tx.go index b33313c..5d2359a 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/tx.go +++ b/vendor/gopkg.in/pg.v5/tx.go @@ -2,7 +2,6 @@ package pg import ( "io" - "os" "gopkg.in/pg.v5/internal" "gopkg.in/pg.v5/internal/pool" @@ -10,16 +9,6 @@ import ( "gopkg.in/pg.v5/types" ) -// When true Tx does not issue BEGIN, COMMIT, and ROLLBACK. -// Also underlying database connection is immediately returned to the pool. -// This is primarily useful for running your database tests in transaction. -// singleTx can be enabled with GO_PG_NO_TX environment variable. -var noTx bool - -func init() { - _, noTx = os.LookupEnv("GO_PG_NO_TX") -} - // Tx is an in-progress database transaction. // // A transaction must end with a call to Commit or Rollback. @@ -44,7 +33,7 @@ func (db *DB) Begin() (*Tx, error) { db: db, } - if !noTx { + if !db.opt.DisableTransaction { cn, err := db.conn() if err != nil { return nil, err @@ -75,20 +64,43 @@ func (db *DB) RunInTransaction(fn func(*Tx) error) error { return tx.Commit() } -func (tx *Tx) conn() (*pool.Conn, error) { - if noTx { - return tx.db.conn() +// Begin returns the transaction. +func (tx *Tx) Begin() (*Tx, error) { + return tx, nil +} + +// RunInTransaction runs a function in the transaction. If function +// returns an error transaction is rollbacked, otherwise transaction +// is committed. +func (tx *Tx) RunInTransaction(fn func(*Tx) error) error { + if err := fn(tx); err != nil { + tx.Rollback() + return err } - if tx.cn == nil { - return nil, errTxDone + return tx.Commit() +} + +func (tx *Tx) conn() (*pool.Conn, error) { + var cn *pool.Conn + if tx.db.opt.DisableTransaction { + var err error + cn, err = tx.db.conn() + if err != nil { + return nil, err + } + } else { + cn = tx.cn + if cn == nil { + return nil, errTxDone + } } - tx.cn.SetReadTimeout(tx.db.opt.ReadTimeout) - tx.cn.SetWriteTimeout(tx.db.opt.WriteTimeout) - return tx.cn, nil + + cn.SetReadWriteTimeout(tx.db.opt.ReadTimeout, tx.db.opt.WriteTimeout) + return cn, nil } func (tx *Tx) freeConn(cn *pool.Conn, err error) { - if noTx { + if tx.db.opt.DisableTransaction { _ = tx.db.freeConn(cn, err) } } @@ -133,7 +145,7 @@ func (tx *Tx) Exec(query interface{}, params ...interface{}) (*types.Result, err return nil, err } - res, err := simpleQuery(cn, query, params...) + res, err := tx.db.simpleQuery(cn, query, params...) tx.freeConn(cn, err) return res, err } @@ -160,7 +172,7 @@ func (tx *Tx) Query(model interface{}, query interface{}, params ...interface{}) return nil, err } - res, mod, err := simpleQueryData(cn, model, query, params...) + res, mod, err := tx.db.simpleQueryData(cn, model, query, params...) tx.freeConn(cn, err) if err != nil { return nil, err @@ -220,7 +232,9 @@ func (tx *Tx) Delete(model interface{}) error { return orm.Delete(tx, model) } -// CreateTable creates table for the model in db. +// CreateTable creates table for the model. It recognizes following field tags: +// - notnull - sets NOT NULL constraint. +// - unique - sets UNIQUE constraint. func (tx *Tx) CreateTable(model interface{}, opt *orm.CreateTableOptions) error { _, err := orm.CreateTable(tx, model, opt) return err @@ -231,7 +245,7 @@ func (tx *Tx) FormatQuery(dst []byte, query string, params ...interface{}) []byt } func (tx *Tx) begin() error { - if noTx { + if tx.db.opt.DisableTransaction { return nil } @@ -241,7 +255,7 @@ func (tx *Tx) begin() error { // Commit commits the transaction. func (tx *Tx) Commit() error { - if noTx { + if tx.db.opt.DisableTransaction { return nil } @@ -252,7 +266,7 @@ func (tx *Tx) Commit() error { // Rollback aborts the transaction. func (tx *Tx) Rollback() error { - if noTx { + if tx.db.opt.DisableTransaction { return nil } @@ -284,7 +298,7 @@ func (tx *Tx) CopyFrom(r io.Reader, query string, params ...interface{}) (*types return nil, err } - res, err := copyFrom(cn, r, query, params...) + res, err := tx.db.copyFrom(cn, r, query, params...) tx.freeConn(cn, err) return res, err } diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/append.go b/vendor/gopkg.in/pg.v5/types/append.go similarity index 78% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/append.go rename to vendor/gopkg.in/pg.v5/types/append.go index 9ac7e05..95402aa 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/types/append.go +++ b/vendor/gopkg.in/pg.v5/types/append.go @@ -6,8 +6,6 @@ import ( "reflect" "strconv" "time" - - "gopkg.in/pg.v5/internal/parser" ) func Append(b []byte, v interface{}, quote int) []byte { @@ -180,57 +178,6 @@ func appendDriverValuer(b []byte, v driver.Valuer, quote int) []byte { return Append(b, value, quote) } -func AppendField(b []byte, field string, quote int) []byte { - return appendField(b, parser.NewString(field), quote) -} - -func AppendFieldBytes(b []byte, field []byte, quote int) []byte { - return appendField(b, parser.New(field), quote) -} - -func appendField(b []byte, p *parser.Parser, quote int) []byte { - var quoted bool - for p.Valid() { - c := p.Read() - - switch c { - case '*': - if !quoted { - b = append(b, '*') - continue - } - case '.': - if quoted && quote == 1 { - b = append(b, '"') - quoted = false - } - b = append(b, '.') - if p.Skip('*') { - b = append(b, '*') - } else if quote == 1 { - b = append(b, '"') - quoted = true - } - continue - } - - if !quoted && quote == 1 { - b = append(b, '"') - quoted = true - } - if quote == 1 && c == '"' { - b = append(b, '"', '"') - } else { - b = append(b, c) - } - - } - if quote == 1 && quoted { - b = append(b, '"') - } - return b -} - func appendAppender(b []byte, v ValueAppender, quote int) []byte { bb, err := v.AppendValue(b, quote) if err != nil { diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/append_array.go b/vendor/gopkg.in/pg.v5/types/append_array.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/append_array.go rename to vendor/gopkg.in/pg.v5/types/append_array.go diff --git a/vendor/gopkg.in/pg.v5/types/append_field.go b/vendor/gopkg.in/pg.v5/types/append_field.go new file mode 100644 index 0000000..b6e598c --- /dev/null +++ b/vendor/gopkg.in/pg.v5/types/append_field.go @@ -0,0 +1,54 @@ +package types + +import "gopkg.in/pg.v5/internal/parser" + +func AppendField(b []byte, field string, quote int) []byte { + return appendField(b, parser.NewString(field), quote) +} + +func AppendFieldBytes(b []byte, field []byte, quote int) []byte { + return appendField(b, parser.New(field), quote) +} + +func appendField(b []byte, p *parser.Parser, quote int) []byte { + var quoted bool + for p.Valid() { + c := p.Read() + + switch c { + case '*': + if !quoted { + b = append(b, '*') + continue + } + case '.': + if quoted && quote == 1 { + b = append(b, '"') + quoted = false + } + b = append(b, '.') + if p.Skip('*') { + b = append(b, '*') + } else if quote == 1 { + b = append(b, '"') + quoted = true + } + continue + } + + if !quoted && quote == 1 { + b = append(b, '"') + quoted = true + } + if c == '"' { + b = append(b, '"', '"') + } else { + b = append(b, c) + } + + } + if quote == 1 && quoted { + b = append(b, '"') + } + return b +} diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/append_hstore.go b/vendor/gopkg.in/pg.v5/types/append_hstore.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/append_hstore.go rename to vendor/gopkg.in/pg.v5/types/append_hstore.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/append_jsonb.go b/vendor/gopkg.in/pg.v5/types/append_jsonb.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/append_jsonb.go rename to vendor/gopkg.in/pg.v5/types/append_jsonb.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/append_value.go b/vendor/gopkg.in/pg.v5/types/append_value.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/append_value.go rename to vendor/gopkg.in/pg.v5/types/append_value.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/array.go b/vendor/gopkg.in/pg.v5/types/array.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/array.go rename to vendor/gopkg.in/pg.v5/types/array.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/hstore.go b/vendor/gopkg.in/pg.v5/types/hstore.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/hstore.go rename to vendor/gopkg.in/pg.v5/types/hstore.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/in_op.go b/vendor/gopkg.in/pg.v5/types/in_op.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/in_op.go rename to vendor/gopkg.in/pg.v5/types/in_op.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/interface.go b/vendor/gopkg.in/pg.v5/types/interface.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/interface.go rename to vendor/gopkg.in/pg.v5/types/interface.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/result.go b/vendor/gopkg.in/pg.v5/types/result.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/result.go rename to vendor/gopkg.in/pg.v5/types/result.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/scan.go b/vendor/gopkg.in/pg.v5/types/scan.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/scan.go rename to vendor/gopkg.in/pg.v5/types/scan.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/scan_array.go b/vendor/gopkg.in/pg.v5/types/scan_array.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/scan_array.go rename to vendor/gopkg.in/pg.v5/types/scan_array.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/scan_hstore.go b/vendor/gopkg.in/pg.v5/types/scan_hstore.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/scan_hstore.go rename to vendor/gopkg.in/pg.v5/types/scan_hstore.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/scan_value.go b/vendor/gopkg.in/pg.v5/types/scan_value.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/scan_value.go rename to vendor/gopkg.in/pg.v5/types/scan_value.go diff --git a/Godeps/_workspace/src/gopkg.in/pg.v5/types/time.go b/vendor/gopkg.in/pg.v5/types/time.go similarity index 51% rename from Godeps/_workspace/src/gopkg.in/pg.v5/types/time.go rename to vendor/gopkg.in/pg.v5/types/time.go index 53df847..4ab3071 100644 --- a/Godeps/_workspace/src/gopkg.in/pg.v5/types/time.go +++ b/vendor/gopkg.in/pg.v5/types/time.go @@ -1,38 +1,33 @@ package types -import ( - "time" - - "gopkg.in/pg.v5/internal" -) +import "time" const ( dateFormat = "2006-01-02" timeFormat = "15:04:05.999999999" timestampFormat = "2006-01-02 15:04:05.999999999" - timestamptzFormat = "2006-01-02 15:04:05.999999999-07:00" - timestamptzFormat2 = "2006-01-02 15:04:05.999999999-07" - timestamptzFormat3 = "2006-01-02 15:04:05.999999999-07:00:00" + timestamptzFormat = "2006-01-02 15:04:05.999999999-07:00:00" + timestamptzFormat2 = "2006-01-02 15:04:05.999999999-07:00" + timestamptzFormat3 = "2006-01-02 15:04:05.999999999-07" ) func ParseTime(b []byte) (time.Time, error) { - s := internal.BytesToString(b) switch l := len(b); { case l <= len(dateFormat): - return time.Parse(dateFormat, s) + return time.Parse(dateFormat, string(b)) case l <= len(timeFormat): - return time.Parse(timeFormat, s) + return time.Parse(timeFormat, string(b)) default: + if c := b[len(b)-9]; c == '+' || c == '-' { + return time.Parse(timestamptzFormat, string(b)) + } if c := b[len(b)-6]; c == '+' || c == '-' { - return time.Parse(timestamptzFormat, s) + return time.Parse(timestamptzFormat2, string(b)) } if c := b[len(b)-3]; c == '+' || c == '-' { - return time.Parse(timestamptzFormat2, s) - } - if c := b[len(b)-9]; c == '+' || c == '-' { - return time.Parse(timestamptzFormat3, s) + return time.Parse(timestamptzFormat3, string(b)) } - return time.ParseInLocation(timestampFormat, s, time.Local) + return time.ParseInLocation(timestampFormat, string(b), time.Local) } } @@ -40,7 +35,7 @@ func AppendTime(b []byte, tm time.Time, quote int) []byte { if quote == 1 { b = append(b, '\'') } - b = append(b, tm.Local().Format(timestamptzFormat)...) + b = tm.AppendFormat(b, timestamptzFormat) if quote == 1 { b = append(b, '\'') } diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/LICENSE b/vendor/gopkg.in/yaml.v1/LICENSE similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/LICENSE rename to vendor/gopkg.in/yaml.v1/LICENSE diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/LICENSE.libyaml b/vendor/gopkg.in/yaml.v1/LICENSE.libyaml similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/LICENSE.libyaml rename to vendor/gopkg.in/yaml.v1/LICENSE.libyaml diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/README.md b/vendor/gopkg.in/yaml.v1/README.md similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/README.md rename to vendor/gopkg.in/yaml.v1/README.md diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/apic.go b/vendor/gopkg.in/yaml.v1/apic.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/apic.go rename to vendor/gopkg.in/yaml.v1/apic.go diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/decode.go b/vendor/gopkg.in/yaml.v1/decode.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/decode.go rename to vendor/gopkg.in/yaml.v1/decode.go diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/emitterc.go b/vendor/gopkg.in/yaml.v1/emitterc.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/emitterc.go rename to vendor/gopkg.in/yaml.v1/emitterc.go diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/encode.go b/vendor/gopkg.in/yaml.v1/encode.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/encode.go rename to vendor/gopkg.in/yaml.v1/encode.go diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/parserc.go b/vendor/gopkg.in/yaml.v1/parserc.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/parserc.go rename to vendor/gopkg.in/yaml.v1/parserc.go diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/readerc.go b/vendor/gopkg.in/yaml.v1/readerc.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/readerc.go rename to vendor/gopkg.in/yaml.v1/readerc.go diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/resolve.go b/vendor/gopkg.in/yaml.v1/resolve.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/resolve.go rename to vendor/gopkg.in/yaml.v1/resolve.go diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/scannerc.go b/vendor/gopkg.in/yaml.v1/scannerc.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/scannerc.go rename to vendor/gopkg.in/yaml.v1/scannerc.go diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/sorter.go b/vendor/gopkg.in/yaml.v1/sorter.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/sorter.go rename to vendor/gopkg.in/yaml.v1/sorter.go diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/writerc.go b/vendor/gopkg.in/yaml.v1/writerc.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/writerc.go rename to vendor/gopkg.in/yaml.v1/writerc.go diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/yaml.go b/vendor/gopkg.in/yaml.v1/yaml.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/yaml.go rename to vendor/gopkg.in/yaml.v1/yaml.go diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/yamlh.go b/vendor/gopkg.in/yaml.v1/yamlh.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/yamlh.go rename to vendor/gopkg.in/yaml.v1/yamlh.go diff --git a/Godeps/_workspace/src/gopkg.in/yaml.v1/yamlprivateh.go b/vendor/gopkg.in/yaml.v1/yamlprivateh.go similarity index 100% rename from Godeps/_workspace/src/gopkg.in/yaml.v1/yamlprivateh.go rename to vendor/gopkg.in/yaml.v1/yamlprivateh.go