mirror of
https://github.com/vacp2p/mvds.git
synced 2026-01-09 12:07:55 -05:00
Compare commits
19 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
899aeeacb4 | ||
|
|
87839f9f3a | ||
|
|
943ab5e228 | ||
|
|
3233b23080 | ||
|
|
a8dc37599b | ||
|
|
afd2e97e38 | ||
|
|
7c42852bfd | ||
|
|
3d5dc5b3e3 | ||
|
|
c9d04bbf91 | ||
|
|
72ff1b0d14 | ||
|
|
48a12fa637 | ||
|
|
6291da6ee1 | ||
|
|
645d54357a | ||
|
|
291827907c | ||
|
|
9e1addbac0 | ||
|
|
9af764814a | ||
|
|
5fdf62d7b7 | ||
|
|
b8e366e4e6 | ||
|
|
ec87e4ab5e |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -10,3 +10,5 @@
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
mvds
|
||||
|
||||
27
.travis.yml
Normal file
27
.travis.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
notifications:
|
||||
email: false
|
||||
|
||||
language: go
|
||||
|
||||
install: true
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
before_script:
|
||||
- make install-linter
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: "1.11.x"
|
||||
env: GOFLAGS=-mod=vendor
|
||||
script:
|
||||
- make lint
|
||||
# fails without -a
|
||||
- go test -a ./... # make test
|
||||
- go: "1.12.x"
|
||||
env: GOFLAGS=-mod=vendor
|
||||
script:
|
||||
- make lint
|
||||
# fails without -a
|
||||
- go test -a ./... # make test
|
||||
42
Makefile
42
Makefile
@@ -2,6 +2,48 @@ SHELL := /bin/bash
|
||||
|
||||
GO111MODULE = on
|
||||
|
||||
build:
|
||||
go build
|
||||
.PHONY: build
|
||||
|
||||
test:
|
||||
go test -v ./...
|
||||
.PHONY: test
|
||||
|
||||
protobuf:
|
||||
protoc --go_out=. ./protobuf/*.proto
|
||||
.PHONY: protobuf
|
||||
|
||||
lint:
|
||||
golangci-lint run -v
|
||||
.PHONY: lint
|
||||
|
||||
install-linter:
|
||||
# install linter
|
||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.17.1
|
||||
.PHONY: install-linter
|
||||
|
||||
vendor:
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
modvendor -copy="**/*.c **/*.h" -v
|
||||
.PHONY: vendor
|
||||
|
||||
generate:
|
||||
go generate ./...
|
||||
.PHONY: generate
|
||||
|
||||
create-migration:
|
||||
@if [ -z "$$DIR" ]; then \
|
||||
echo 'missing DIR var'; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
@if [ -z "$$NAME" ]; then \
|
||||
echo 'missing NAME var'; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
mkdir -p $(DIR)
|
||||
touch $(DIR)/`date +"%s"`_$(NAME).down.sql ./$(DIR)/`date +"%s"`_$(NAME).up.sql
|
||||
.PHONY: create-migration
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
# Minimal Viable Data Sync
|
||||
|
||||

|
||||

|
||||
[](LICENSE)
|
||||
[](https://godoc.org/github.com/status-im/mvds) [](https://goreportcard.com/report/github.com/status-im/mvds)
|
||||
)](https://godoc.org/github.com/vacp2p/mvds)
|
||||
[](https://goreportcard.com/report/github.com/vacp2p/mvds)
|
||||
[](https://travis-ci.com/vacp2p/mvds)
|
||||
|
||||
Experimental implementation of the [minimal viable data sync protocol specification](https://github.com/status-im/bigbrother-specs/blob/master/data_sync/mvds.md).
|
||||
Experimental implementation of the [minimal viable data sync protocol specification](https://specs.vac.dev/mvds.html).
|
||||
|
||||
## Usage
|
||||
|
||||
|
||||
18
go.mod
18
go.mod
@@ -1,14 +1,16 @@
|
||||
module github.com/status-im/mvds
|
||||
module github.com/vacp2p/mvds
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcd v0.0.0-20190427004231-96897255fd17 // indirect
|
||||
github.com/ethereum/go-ethereum v1.8.27
|
||||
github.com/golang/protobuf v1.3.1
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/golang-migrate/migrate v3.5.4+incompatible // indirect
|
||||
github.com/golang-migrate/migrate/v4 v4.5.0 // indirect
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f // indirect
|
||||
golang.org/x/tools v0.0.0-20190525145741-7be61e1b0e51
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
github.com/status-im/migrate/v4 v4.0.0-20190821140204-a9d340ec8fb76af4afda06acf01740d45d2661ed
|
||||
github.com/stretchr/testify v1.3.1-0.20190712000136-221dbe5ed467
|
||||
go.uber.org/atomic v1.4.0 // indirect
|
||||
go.uber.org/multierr v1.1.0 // indirect
|
||||
go.uber.org/zap v1.10.0
|
||||
)
|
||||
|
||||
237
go.sum
237
go.sum
@@ -1,56 +1,241 @@
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/btcsuite/btcd v0.0.0-20190427004231-96897255fd17 h1:m0N5Vg5nP3zEz8TREZpwX3gt4Biw3/8fbIf4A3hO96g=
|
||||
github.com/btcsuite/btcd v0.0.0-20190427004231-96897255fd17/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495 h1:6IyqGr3fnd0tM3YxipK27TUskaOVUjU2nG45yzwcQKY=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/ethereum/go-ethereum v1.8.23 h1:xVKYpRpe3cbkaWN8gsRgStsyTvz3s82PcQsbEofjhEQ=
|
||||
github.com/ethereum/go-ethereum v1.8.23/go.mod h1:PwpWDrCLZrV+tfrhqqF6kPknbISMHaJv9Ln3kPCZLwY=
|
||||
github.com/ethereum/go-ethereum v1.8.27 h1:d+gkiLaBDk5fn3Pe/xNVaMrB/ozI+AUB2IlVBp29IrY=
|
||||
github.com/ethereum/go-ethereum v1.8.27/go.mod h1:PwpWDrCLZrV+tfrhqqF6kPknbISMHaJv9Ln3kPCZLwY=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
|
||||
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8=
|
||||
github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg=
|
||||
github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
|
||||
github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4=
|
||||
github.com/cznic/lldb v1.1.0/go.mod h1:FIZVUmYUVhPwRiPzL8nD/mpFcJ/G7SSXjjXYG4uRI3A=
|
||||
github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
|
||||
github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE=
|
||||
github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
|
||||
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
|
||||
github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
|
||||
github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc=
|
||||
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190108045446-77df18c24acf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang-migrate/migrate v3.5.4+incompatible h1:R7OzwvCJTCgwapPCiX6DyBiu2czIUMDCB118gFTKTUA=
|
||||
github.com/golang-migrate/migrate v3.5.4+incompatible/go.mod h1:IsVUlFN5puWOmXrqjgGUfIRIbU7mr8oNBE2tyERd9Wk=
|
||||
github.com/golang-migrate/migrate/v4 v4.5.0 h1:ucd2qJu1BAKTtmjh7QlWYiq01DwlE/xXYQkOPE0ZTsI=
|
||||
github.com/golang-migrate/migrate/v4 v4.5.0/go.mod h1:SzAcz2l+yDJVhQC7fwiF7T2MAFPMIkigJz98klRJ4OE=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
|
||||
github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
|
||||
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mongodb/mongo-go-driver v0.3.0/go.mod h1:NK/HWDIIZkaYsnYa0hmtP443T5ELr0KDecmIioVuuyU=
|
||||
github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f h1:hd3r+uv9DNLScbOrnlj82rBldHQf3XWmCeXAWbw8euQ=
|
||||
github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f/go.mod h1:MyUWrZlB1aI5bs7j9/pJ8ckLLZ4QcCYcNiSbsAW32D4=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/status-im/migrate/v4 v4.0.0-20190821140204-a9d340ec8fb76af4afda06acf01740d45d2661ed h1:K2iga8l8OQIHnk2bBq2QsZTO2Q38YWy04xIspdITCdM=
|
||||
github.com/status-im/migrate/v4 v4.0.0-20190821140204-a9d340ec8fb76af4afda06acf01740d45d2661ed/go.mod h1:r8HggRBZ/k7TRwByq/Hp3P/ubFppIna0nvyavVK0pjA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.3.1-0.20190712000136-221dbe5ed467 h1:/pva5wyh0PKqe0bnHBbndEzbqsilMKFNXI0GPbO+L8c=
|
||||
github.com/stretchr/testify v1.3.1-0.20190712000136-221dbe5ed467/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo=
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190225153610-fe579d43d832/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6 h1:FP8hkuE6yUEaJnK7O2eTuejKWwW+Rhfj80dQ2JcKxCU=
|
||||
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190525145741-7be61e1b0e51 h1:RhYYBLDB5MoVkvoNGMNk+DSj7WoGhySvIvtEjTyiP74=
|
||||
golang.org/x/tools v0.0.0-20190525145741-7be61e1b0e51/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
40
main.go
40
main.go
@@ -8,10 +8,12 @@ import (
|
||||
math "math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/status-im/mvds/node"
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/status-im/mvds/store"
|
||||
"github.com/status-im/mvds/transport"
|
||||
"github.com/vacp2p/mvds/node"
|
||||
"github.com/vacp2p/mvds/peers"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
"github.com/vacp2p/mvds/store"
|
||||
"github.com/vacp2p/mvds/transport"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -23,7 +25,7 @@ var (
|
||||
interactive int
|
||||
)
|
||||
|
||||
func init() {
|
||||
func parseFlags() {
|
||||
flag.IntVar(&offline, "offline", 90, "percentage of time a node is offline")
|
||||
flag.IntVar(&nodeCount, "nodes", 3, "amount of nodes")
|
||||
flag.IntVar(&communicating, "communicating", 2, "amount of nodes sending messages")
|
||||
@@ -35,6 +37,7 @@ func init() {
|
||||
|
||||
func main() {
|
||||
|
||||
parseFlags()
|
||||
// @todo validate flags
|
||||
|
||||
transports := make([]*transport.ChannelTransport, 0)
|
||||
@@ -53,9 +56,13 @@ func main() {
|
||||
mode = node.BATCH
|
||||
}
|
||||
|
||||
node, err := createNode(t, peerID(), mode)
|
||||
if err != nil {
|
||||
log.Printf("Could not create node: %+v\n", err)
|
||||
}
|
||||
nodes = append(
|
||||
nodes,
|
||||
createNode(t, peerID(), mode),
|
||||
node,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -69,14 +76,14 @@ func main() {
|
||||
peer := nodes[p].ID
|
||||
|
||||
transports[i].AddOutput(peer, input[p])
|
||||
n.AddPeer(group, peer)
|
||||
_ = n.AddPeer(group, peer)
|
||||
|
||||
log.Printf("%x sharing with %x", n.ID[:4], peer[:4])
|
||||
}
|
||||
}
|
||||
|
||||
for _, n := range nodes {
|
||||
n.Start()
|
||||
n.Start(1 * time.Second)
|
||||
}
|
||||
|
||||
chat(group, nodes[:communicating-1]...)
|
||||
@@ -105,17 +112,24 @@ OUTER:
|
||||
return peers
|
||||
}
|
||||
|
||||
func createNode(transport transport.Transport, id state.PeerID, mode node.Mode) *node.Node {
|
||||
func createNode(transport transport.Transport, id state.PeerID, mode node.Mode) (*node.Node, error) {
|
||||
ds := store.NewDummyStore()
|
||||
logger, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return node.NewNode(
|
||||
&ds,
|
||||
ds,
|
||||
transport,
|
||||
state.NewSyncState(),
|
||||
Calc,
|
||||
0,
|
||||
id,
|
||||
mode,
|
||||
)
|
||||
peers.NewMemoryPersistence(),
|
||||
logger,
|
||||
), nil
|
||||
}
|
||||
|
||||
func chat(group state.GroupID, nodes ...*node.Node) {
|
||||
@@ -137,7 +151,7 @@ func Calc(count uint64, epoch int64) int64 {
|
||||
|
||||
func peerID() state.PeerID {
|
||||
bytes := make([]byte, 65)
|
||||
rand.Read(bytes)
|
||||
_, _ = rand.Read(bytes)
|
||||
|
||||
id := state.PeerID{}
|
||||
copy(id[:], bytes)
|
||||
@@ -147,7 +161,7 @@ func peerID() state.PeerID {
|
||||
|
||||
func groupId() state.GroupID {
|
||||
bytes := make([]byte, 32)
|
||||
rand.Read(bytes)
|
||||
_, _ = rand.Read(bytes)
|
||||
|
||||
id := state.GroupID{}
|
||||
copy(id[:], bytes)
|
||||
|
||||
142
mvds_batch_test.go
Normal file
142
mvds_batch_test.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/vacp2p/mvds/node"
|
||||
"github.com/vacp2p/mvds/peers"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
"github.com/vacp2p/mvds/store"
|
||||
"github.com/vacp2p/mvds/transport"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func TestMVDSBatchSuite(t *testing.T) {
|
||||
suite.Run(t, new(MVDSBatchSuite))
|
||||
}
|
||||
|
||||
type MVDSBatchSuite struct {
|
||||
suite.Suite
|
||||
client1 *node.Node
|
||||
client2 *node.Node
|
||||
ds1 store.MessageStore
|
||||
ds2 store.MessageStore
|
||||
state1 state.SyncState
|
||||
state2 state.SyncState
|
||||
peers1 peers.Persistence
|
||||
peers2 peers.Persistence
|
||||
groupID state.GroupID
|
||||
}
|
||||
|
||||
func (s *MVDSBatchSuite) SetupTest() {
|
||||
|
||||
logger, err := zap.NewDevelopment()
|
||||
s.Require().NoError(err)
|
||||
|
||||
in1 := make(chan transport.Packet)
|
||||
t1 := transport.NewChannelTransport(0, in1)
|
||||
s.ds1 = store.NewDummyStore()
|
||||
s.state1 = state.NewSyncState()
|
||||
s.peers1 = peers.NewMemoryPersistence()
|
||||
p1 := [65]byte{0x01}
|
||||
s.client1 = node.NewNode(s.ds1, t1, s.state1, Calc, 0, p1, node.BATCH, s.peers1, logger)
|
||||
|
||||
in2 := make(chan transport.Packet)
|
||||
t2 := transport.NewChannelTransport(0, in2)
|
||||
s.ds2 = store.NewDummyStore()
|
||||
s.state2 = state.NewSyncState()
|
||||
p2 := [65]byte{0x02}
|
||||
s.peers2 = peers.NewMemoryPersistence()
|
||||
s.client2 = node.NewNode(s.ds2, t2, s.state2, Calc, 0, p2, node.BATCH, s.peers2, logger)
|
||||
|
||||
t2.AddOutput(p1, in1)
|
||||
t1.AddOutput(p2, in2)
|
||||
|
||||
s.groupID = [32]byte{0x01, 0x2, 0x3, 0x4}
|
||||
|
||||
s.Require().NoError(s.client1.AddPeer(s.groupID, p2))
|
||||
s.Require().NoError(s.client2.AddPeer(s.groupID, p1))
|
||||
|
||||
// We run the tick manually
|
||||
s.client1.Start(10 * time.Millisecond)
|
||||
s.client2.Start(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
func (s *MVDSBatchSuite) TearDownTest() {
|
||||
s.client1.Stop()
|
||||
s.client2.Stop()
|
||||
}
|
||||
|
||||
func (s *MVDSBatchSuite) TestSendClient1ToClient2() {
|
||||
subscription := s.client2.Subscribe()
|
||||
content := []byte("message 1")
|
||||
|
||||
messageID, err := s.client1.AppendMessage(s.groupID, content)
|
||||
s.Require().NoError(err)
|
||||
|
||||
// Check message is in store
|
||||
message1Sender, err := s.ds1.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Sender)
|
||||
|
||||
// Check message is received
|
||||
s.Require().Eventually(func() bool {
|
||||
message1Receiver, err := s.ds2.Get(messageID)
|
||||
return err == nil && message1Receiver != nil
|
||||
}, 1*time.Second, 10*time.Millisecond)
|
||||
|
||||
message := <-subscription
|
||||
s.Equal(message.Body, content)
|
||||
}
|
||||
|
||||
func (s *MVDSBatchSuite) TestSendClient2ToClient1() {
|
||||
subscription := s.client1.Subscribe()
|
||||
content := []byte("message 1")
|
||||
|
||||
messageID, err := s.client2.AppendMessage(s.groupID, content)
|
||||
s.Require().NoError(err)
|
||||
|
||||
// Check message is in store
|
||||
message1Sender, err := s.ds2.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Sender)
|
||||
|
||||
// Check message is received
|
||||
s.Require().Eventually(func() bool {
|
||||
message1Receiver, err := s.ds1.Get(messageID)
|
||||
return err == nil && message1Receiver != nil
|
||||
}, 1*time.Second, 10*time.Millisecond)
|
||||
|
||||
message := <-subscription
|
||||
s.Equal(message.Body, content)
|
||||
}
|
||||
|
||||
func (s *MVDSBatchSuite) TestAcks() {
|
||||
messageID, err := s.client1.AppendMessage(s.groupID, []byte("message 1"))
|
||||
s.Require().NoError(err)
|
||||
|
||||
// Check message is in store
|
||||
message1Sender, err := s.ds1.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Sender)
|
||||
|
||||
// Check state is updated correctly
|
||||
states, err := s.state1.All(s.client1.CurrentEpoch())
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(1, len(states))
|
||||
|
||||
// Check message is received
|
||||
s.Require().Eventually(func() bool {
|
||||
message1Receiver, err := s.ds2.Get(messageID)
|
||||
return err == nil && message1Receiver != nil
|
||||
}, 1*time.Second, 10*time.Millisecond)
|
||||
|
||||
// Check state is removed
|
||||
s.Require().Eventually(func() bool {
|
||||
states, err := s.state1.All(s.client1.CurrentEpoch())
|
||||
return err == nil && len(states) == 0
|
||||
|
||||
}, 1*time.Second, 10*time.Millisecond)
|
||||
}
|
||||
103
mvds_interactive_test.go
Normal file
103
mvds_interactive_test.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/vacp2p/mvds/node"
|
||||
"github.com/vacp2p/mvds/peers"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
"github.com/vacp2p/mvds/store"
|
||||
"github.com/vacp2p/mvds/transport"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func TestMVDSInteractiveSuite(t *testing.T) {
|
||||
suite.Run(t, new(MVDSInteractiveSuite))
|
||||
}
|
||||
|
||||
type MVDSInteractiveSuite struct {
|
||||
suite.Suite
|
||||
client1 *node.Node
|
||||
client2 *node.Node
|
||||
ds1 store.MessageStore
|
||||
ds2 store.MessageStore
|
||||
state1 state.SyncState
|
||||
state2 state.SyncState
|
||||
peers1 peers.Persistence
|
||||
peers2 peers.Persistence
|
||||
groupID state.GroupID
|
||||
}
|
||||
|
||||
func (s *MVDSInteractiveSuite) SetupTest() {
|
||||
|
||||
logger, err := zap.NewDevelopment()
|
||||
s.Require().NoError(err)
|
||||
|
||||
in1 := make(chan transport.Packet)
|
||||
t1 := transport.NewChannelTransport(0, in1)
|
||||
s.ds1 = store.NewDummyStore()
|
||||
s.state1 = state.NewSyncState()
|
||||
s.peers1 = peers.NewMemoryPersistence()
|
||||
p1 := [65]byte{0x01}
|
||||
s.client1 = node.NewNode(s.ds1, t1, s.state1, Calc, 0, p1, node.INTERACTIVE, s.peers1, logger)
|
||||
|
||||
in2 := make(chan transport.Packet)
|
||||
t2 := transport.NewChannelTransport(0, in2)
|
||||
s.ds2 = store.NewDummyStore()
|
||||
s.state2 = state.NewSyncState()
|
||||
p2 := [65]byte{0x02}
|
||||
s.peers2 = peers.NewMemoryPersistence()
|
||||
s.client2 = node.NewNode(s.ds2, t2, s.state2, Calc, 0, p2, node.INTERACTIVE, s.peers2, logger)
|
||||
|
||||
t2.AddOutput(p1, in1)
|
||||
t1.AddOutput(p2, in2)
|
||||
|
||||
s.groupID = [32]byte{0x01, 0x2, 0x3, 0x4}
|
||||
|
||||
s.Require().NoError(s.client1.AddPeer(s.groupID, p2))
|
||||
s.Require().NoError(s.client2.AddPeer(s.groupID, p1))
|
||||
|
||||
s.client1.Start(10 * time.Millisecond)
|
||||
s.client2.Start(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
func (s *MVDSInteractiveSuite) TearDownTest() {
|
||||
s.client1.Stop()
|
||||
s.client2.Stop()
|
||||
}
|
||||
|
||||
func (s *MVDSInteractiveSuite) TestInteractiveMode() {
|
||||
messageID, err := s.client1.AppendMessage(s.groupID, []byte("message 1"))
|
||||
s.Require().NoError(err)
|
||||
|
||||
// Check message is in store
|
||||
message1Sender, err := s.ds1.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Sender)
|
||||
|
||||
// Check state is updated correctly
|
||||
states, err := s.state1.All(s.client1.CurrentEpoch())
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(1, len(states))
|
||||
|
||||
// Check we store the request
|
||||
s.Require().Eventually(func() bool {
|
||||
states, err := s.state2.All(s.client2.CurrentEpoch())
|
||||
return err == nil && len(states) == 1 && states[0].Type == state.REQUEST
|
||||
}, 1*time.Second, 10*time.Millisecond, "An request is stored in the state")
|
||||
|
||||
// Check we eventually get the message
|
||||
s.Require().Eventually(func() bool {
|
||||
message1Receiver, err := s.ds1.Get(messageID)
|
||||
return err == nil && message1Receiver != nil
|
||||
}, 1*time.Second, 10*time.Millisecond, "The message is eventually received")
|
||||
|
||||
// Check state is removed
|
||||
s.Require().Eventually(func() bool {
|
||||
states, err := s.state1.All(s.client1.CurrentEpoch())
|
||||
return err == nil && len(states) == 0
|
||||
|
||||
}, 1*time.Second, 10*time.Millisecond, "We clear all the state")
|
||||
}
|
||||
33
node/epoch_persistency.go
Normal file
33
node/epoch_persistency.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type epochSQLitePersistence struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func newEpochSQLitePersistence(db *sql.DB) *epochSQLitePersistence {
|
||||
return &epochSQLitePersistence{db: db}
|
||||
}
|
||||
|
||||
func (p *epochSQLitePersistence) Get(nodeID state.PeerID) (epoch int64, err error) {
|
||||
row := p.db.QueryRow(`SELECT epoch FROM mvds_epoch WHERE peer_id = ?`, nodeID[:])
|
||||
err = row.Scan(&epoch)
|
||||
if err == sql.ErrNoRows {
|
||||
err = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *epochSQLitePersistence) Set(nodeID state.PeerID, epoch int64) error {
|
||||
_, err := p.db.Exec(`
|
||||
INSERT OR REPLACE INTO mvds_epoch (peer_id, epoch) VALUES (?, ?)`,
|
||||
nodeID[:],
|
||||
epoch,
|
||||
)
|
||||
return err
|
||||
}
|
||||
38
node/epoch_persistency_test.go
Normal file
38
node/epoch_persistency_test.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/vacp2p/mvds/node/migrations"
|
||||
"github.com/vacp2p/mvds/persistenceutil"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
func TestEpochSQLitePersistence(t *testing.T) {
|
||||
tmpFile, err := ioutil.TempFile("", "")
|
||||
require.NoError(t, err)
|
||||
db, err := persistenceutil.Open(tmpFile.Name(), "", persistenceutil.MigrationConfig{
|
||||
AssetNames: migrations.AssetNames(),
|
||||
AssetGetter: migrations.Asset,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
p := newEpochSQLitePersistence(db)
|
||||
|
||||
err = p.Set(state.PeerID{0x01}, 1)
|
||||
require.NoError(t, err)
|
||||
epoch, err := p.Get(state.PeerID{0x01})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), epoch)
|
||||
|
||||
err = p.Set(state.PeerID{0x01}, 2)
|
||||
require.NoError(t, err)
|
||||
epoch, err = p.Get(state.PeerID{0x01})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(2), epoch)
|
||||
|
||||
epoch, err = p.Get(state.PeerID{0xff})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), epoch)
|
||||
}
|
||||
319
node/migrations/migrations.go
Normal file
319
node/migrations/migrations.go
Normal file
@@ -0,0 +1,319 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1565345162_initial_schema.down.sql (23B)
|
||||
// 1565345162_initial_schema.up.sql (86B)
|
||||
// doc.go (377B)
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
digest [sha256.Size]byte
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var __1565345162_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x4f\x2d\xc8\x4f\xce\xb0\xe6\x02\x04\x00\x00\xff\xff\xd3\x00\xf3\x23\x17\x00\x00\x00")
|
||||
|
||||
func _1565345162_initial_schemaDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565345162_initial_schemaDownSql,
|
||||
"1565345162_initial_schema.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565345162_initial_schemaDownSql() (*asset, error) {
|
||||
bytes, err := _1565345162_initial_schemaDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565345162_initial_schema.down.sql", size: 23, mode: os.FileMode(0644), modTime: time.Unix(1565345447, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7c, 0x69, 0xd2, 0x3, 0xea, 0x82, 0x7c, 0xb3, 0x44, 0x6c, 0xef, 0x64, 0x2c, 0x99, 0x62, 0xa2, 0x8b, 0x6f, 0x96, 0x4f, 0x34, 0x41, 0x87, 0xd5, 0x4e, 0x3, 0x7f, 0x4a, 0xd1, 0x91, 0x9, 0x99}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1565345162_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x4f\x2d\xc8\x4f\xce\x50\xd0\xe0\x52\x50\x50\x50\x28\x48\x4d\x2d\x8a\xcf\x4c\x51\x70\xf2\xf1\x77\x52\x08\x08\xf2\xf4\x75\x0c\x8a\x54\xf0\x76\x8d\xd4\x01\xcb\x42\x54\x7a\xfa\x85\xb8\xba\xbb\x06\x29\xf8\xf9\x87\x28\xf8\x85\xfa\xf8\x70\x69\x5a\x73\x01\x02\x00\x00\xff\xff\x51\x96\x2d\xcb\x56\x00\x00\x00")
|
||||
|
||||
func _1565345162_initial_schemaUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565345162_initial_schemaUpSql,
|
||||
"1565345162_initial_schema.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565345162_initial_schemaUpSql() (*asset, error) {
|
||||
bytes, err := _1565345162_initial_schemaUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565345162_initial_schema.up.sql", size: 86, mode: os.FileMode(0644), modTime: time.Unix(1565345708, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x78, 0x7c, 0xdd, 0x67, 0x61, 0x3e, 0x7f, 0xd4, 0xce, 0xb0, 0x17, 0xbe, 0x5a, 0xa7, 0x9e, 0x93, 0x34, 0xe8, 0xbb, 0x44, 0xfb, 0x88, 0xd6, 0x18, 0x6d, 0x9f, 0xb4, 0x22, 0xda, 0xbc, 0x87, 0x94}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbb\x6e\xc3\x30\x0c\x45\x77\x7f\xc5\x45\x96\x2c\xb5\xb4\x74\xea\xd6\xb1\x7b\x7f\x80\x91\x68\x89\x88\x1e\xae\x48\xe7\xf1\xf7\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\xd2\x7b\x7c\x66\x51\x2c\x52\x18\xa2\x68\x1c\x58\x95\xc6\x1d\x27\x0e\xb4\x29\xe3\x90\xc4\xf2\x76\x72\xa1\x57\xaf\x46\xb6\xe9\x2c\xd5\x57\x49\x83\x8c\xfd\xe5\xf5\x30\x79\x8f\x40\xed\x68\xc8\xd4\x62\xe1\x47\x4b\xa1\x46\xc3\xa4\x25\x5c\xc5\x32\x08\xeb\xe0\x45\x6e\x0e\xef\x86\xc2\xa4\x06\xcb\x64\x47\x85\x65\x46\x20\xe5\x3d\xb3\xf4\x81\xd4\xe7\x93\xb4\x48\x46\x6e\x47\x1f\xcb\x13\xd9\x17\x06\x2a\x85\x23\x96\xd1\xeb\xc3\x55\xaa\x8c\x28\x83\x83\xf5\x71\x7f\x01\xa9\xb2\xa1\x51\x65\xdd\xfd\x4c\x17\x46\xeb\xbf\xe7\x41\x2d\xfe\xff\x11\xae\x7d\x9c\x15\xa4\xe0\xdb\xca\xc1\x38\xba\x69\x5a\x29\x9c\x29\x31\xf4\xab\x88\xf1\x34\x79\x9f\xfa\x5b\xe2\xc6\xbb\xf5\xbc\x71\x5e\xcf\x09\x3f\x35\xe9\x4d\x31\x77\x38\xe7\xff\x80\x4b\x1d\x6e\xfa\x0e\x00\x00\xff\xff\x9d\x60\x3d\x88\x79\x01\x00\x00")
|
||||
|
||||
func docGoBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_docGo,
|
||||
"doc.go",
|
||||
)
|
||||
}
|
||||
|
||||
func docGo() (*asset, error) {
|
||||
bytes, err := docGoBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1565345207, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// AssetString returns the asset contents as a string (instead of a []byte).
|
||||
func AssetString(name string) (string, error) {
|
||||
data, err := Asset(name)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// MustAssetString is like AssetString but panics when Asset would return an
|
||||
// error. It simplifies safe initialization of global variables.
|
||||
func MustAssetString(name string) string {
|
||||
return string(MustAsset(name))
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetDigest returns the digest of the file with the given name. It returns an
|
||||
// error if the asset could not be found or the digest could not be loaded.
|
||||
func AssetDigest(name string) ([sha256.Size]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.digest, nil
|
||||
}
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
|
||||
}
|
||||
|
||||
// Digests returns a map of all known files and their checksums.
|
||||
func Digests() (map[string][sha256.Size]byte, error) {
|
||||
mp := make(map[string][sha256.Size]byte, len(_bindata))
|
||||
for name := range _bindata {
|
||||
a, err := _bindata[name]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mp[name] = a.digest
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1565345162_initial_schema.down.sql": _1565345162_initial_schemaDownSql,
|
||||
|
||||
"1565345162_initial_schema.up.sql": _1565345162_initial_schemaUpSql,
|
||||
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"},
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"},
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"1565345162_initial_schema.down.sql": &bintree{_1565345162_initial_schemaDownSql, map[string]*bintree{}},
|
||||
"1565345162_initial_schema.up.sql": &bintree{_1565345162_initial_schemaUpSql, map[string]*bintree{}},
|
||||
"doc.go": &bintree{docGo, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory.
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively.
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE mvds_epoch;
|
||||
4
node/migrations/sqlite/1565345162_initial_schema.up.sql
Normal file
4
node/migrations/sqlite/1565345162_initial_schema.up.sql
Normal file
@@ -0,0 +1,4 @@
|
||||
CREATE TABLE mvds_epoch (
|
||||
peer_id BLOB PRIMARY KEY,
|
||||
epoch INTEGER NOT NULL
|
||||
);
|
||||
9
node/migrations/sqlite/doc.go
Normal file
9
node/migrations/sqlite/doc.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// This file is necessary because "github.com/status-im/migrate/v4"
|
||||
// can't handle files starting with a prefix. At least that's the case
|
||||
// for go-bindata.
|
||||
// If go-bindata is called from the same directory, asset names
|
||||
// have no prefix and "github.com/status-im/migrate/v4" works as expected.
|
||||
|
||||
package sqlite
|
||||
|
||||
//go:generate go-bindata -pkg migrations -o ../migrations.go .
|
||||
504
node/node.go
504
node/node.go
@@ -1,20 +1,23 @@
|
||||
// Package Node contains node logic.
|
||||
// Package node contains node logic.
|
||||
package node
|
||||
|
||||
// @todo this is a very rough implementation that needs cleanup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/status-im/mvds/protobuf"
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/status-im/mvds/store"
|
||||
"github.com/status-im/mvds/transport"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/vacp2p/mvds/peers"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
"github.com/vacp2p/mvds/store"
|
||||
"github.com/vacp2p/mvds/transport"
|
||||
)
|
||||
|
||||
// Mode represents the synchronization mode.
|
||||
@@ -25,10 +28,13 @@ const (
|
||||
BATCH
|
||||
)
|
||||
|
||||
type calculateNextEpoch func(count uint64, epoch int64) int64
|
||||
// CalculateNextEpoch is a function used to calculate the next `SendEpoch` for a given message.
|
||||
type CalculateNextEpoch func(count uint64, epoch int64) int64
|
||||
|
||||
// Node represents an MVDS node, it runs all the logic like sending and receiving protocol messages.
|
||||
type Node struct {
|
||||
// This needs to be declared first: https://github.com/golang/go/issues/9959
|
||||
epoch int64
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
@@ -37,18 +43,84 @@ type Node struct {
|
||||
|
||||
syncState state.SyncState
|
||||
|
||||
peers map[state.GroupID][]state.PeerID
|
||||
peers peers.Persistence
|
||||
|
||||
payloads payloads
|
||||
|
||||
nextEpoch calculateNextEpoch
|
||||
nextEpoch CalculateNextEpoch
|
||||
|
||||
ID state.PeerID
|
||||
|
||||
epoch int64
|
||||
mode Mode
|
||||
epochPersistence *epochSQLitePersistence
|
||||
mode Mode
|
||||
|
||||
subscription chan<- protobuf.Message
|
||||
subscription chan protobuf.Message
|
||||
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
func NewPersistentNode(
|
||||
db *sql.DB,
|
||||
st transport.Transport,
|
||||
id state.PeerID,
|
||||
mode Mode,
|
||||
nextEpoch CalculateNextEpoch,
|
||||
logger *zap.Logger,
|
||||
) (*Node, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
|
||||
node := Node{
|
||||
ID: id,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
store: store.NewPersistentMessageStore(db),
|
||||
transport: st,
|
||||
peers: peers.NewSQLitePersistence(db),
|
||||
syncState: state.NewPersistentSyncState(db),
|
||||
payloads: newPayloads(),
|
||||
epochPersistence: newEpochSQLitePersistence(db),
|
||||
nextEpoch: nextEpoch,
|
||||
logger: logger.With(zap.Namespace("mvds")),
|
||||
mode: mode,
|
||||
}
|
||||
if currentEpoch, err := node.epochPersistence.Get(id); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
node.epoch = currentEpoch
|
||||
}
|
||||
return &node, nil
|
||||
}
|
||||
|
||||
func NewEphemeralNode(
|
||||
id state.PeerID,
|
||||
t transport.Transport,
|
||||
nextEpoch CalculateNextEpoch,
|
||||
currentEpoch int64,
|
||||
mode Mode,
|
||||
logger *zap.Logger,
|
||||
) *Node {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
|
||||
return &Node{
|
||||
ID: id,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
store: store.NewDummyStore(),
|
||||
transport: t,
|
||||
syncState: state.NewSyncState(),
|
||||
peers: peers.NewMemoryPersistence(),
|
||||
payloads: newPayloads(),
|
||||
nextEpoch: nextEpoch,
|
||||
epoch: currentEpoch,
|
||||
logger: logger.With(zap.Namespace("mvds")),
|
||||
mode: mode,
|
||||
}
|
||||
}
|
||||
|
||||
// NewNode returns a new node.
|
||||
@@ -56,39 +128,49 @@ func NewNode(
|
||||
ms store.MessageStore,
|
||||
st transport.Transport,
|
||||
ss state.SyncState,
|
||||
nextEpoch calculateNextEpoch,
|
||||
nextEpoch CalculateNextEpoch,
|
||||
currentEpoch int64,
|
||||
id state.PeerID,
|
||||
mode Mode,
|
||||
pp peers.Persistence,
|
||||
logger *zap.Logger,
|
||||
) *Node {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
|
||||
return &Node{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
store: ms,
|
||||
transport: st,
|
||||
syncState: ss,
|
||||
peers: make(map[state.GroupID][]state.PeerID),
|
||||
payloads: newPayloads(),
|
||||
nextEpoch: nextEpoch,
|
||||
ID: id,
|
||||
epoch: currentEpoch,
|
||||
mode: mode,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
store: ms,
|
||||
transport: st,
|
||||
syncState: ss,
|
||||
peers: pp,
|
||||
payloads: newPayloads(),
|
||||
nextEpoch: nextEpoch,
|
||||
ID: id,
|
||||
epoch: currentEpoch,
|
||||
logger: logger.With(zap.Namespace("mvds")),
|
||||
mode: mode,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) CurrentEpoch() int64 {
|
||||
return atomic.LoadInt64(&n.epoch)
|
||||
}
|
||||
|
||||
// Start listens for new messages received by the node and sends out those required every epoch.
|
||||
func (n *Node) Start() {
|
||||
func (n *Node) Start(duration time.Duration) {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-n.ctx.Done():
|
||||
log.Print("Watch stopped")
|
||||
n.logger.Info("Watch stopped")
|
||||
return
|
||||
default:
|
||||
p := n.transport.Watch()
|
||||
go n.onPayload(p.Group, p.Sender, p.Payload)
|
||||
go n.onPayload(p.Sender, p.Payload)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -97,14 +179,22 @@ func (n *Node) Start() {
|
||||
for {
|
||||
select {
|
||||
case <-n.ctx.Done():
|
||||
log.Print("Epoch processing stopped")
|
||||
n.logger.Info("Epoch processing stopped")
|
||||
return
|
||||
default:
|
||||
log.Printf("Node: %x Epoch: %d", n.ID[:4], n.epoch)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
n.sendMessages()
|
||||
n.logger.Debug("Epoch processing", zap.String("node", hex.EncodeToString(n.ID[:4])), zap.Int64("epoch", n.epoch))
|
||||
time.Sleep(duration)
|
||||
err := n.sendMessages()
|
||||
if err != nil {
|
||||
n.logger.Error("Error sending messages.", zap.Error(err))
|
||||
}
|
||||
atomic.AddInt64(&n.epoch, 1)
|
||||
// When a persistent node is used, the epoch needs to be saved.
|
||||
if n.epochPersistence != nil {
|
||||
if err := n.epochPersistence.Set(n.ID, n.epoch); err != nil {
|
||||
n.logger.Error("Failed to persisten epoch", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -112,50 +202,58 @@ func (n *Node) Start() {
|
||||
|
||||
// Stop message reading and epoch processing
|
||||
func (n *Node) Stop() {
|
||||
n.logger.Info("Stopping node")
|
||||
n.Unsubscribe()
|
||||
n.cancel()
|
||||
}
|
||||
|
||||
// Subscribe subscribes to incoming messages.
|
||||
func (n *Node) Subscribe(sub chan <-protobuf.Message) {
|
||||
n.subscription = sub
|
||||
func (n *Node) Subscribe() chan protobuf.Message {
|
||||
n.subscription = make(chan protobuf.Message)
|
||||
return n.subscription
|
||||
}
|
||||
|
||||
// Unsubscribe closes the listening channels
|
||||
func (n *Node) Unsubscribe() {
|
||||
if n.subscription != nil {
|
||||
close(n.subscription)
|
||||
}
|
||||
n.subscription = nil
|
||||
}
|
||||
|
||||
// AppendMessage sends a message to a given group.
|
||||
func (n *Node) AppendMessage(group state.GroupID, data []byte) (state.MessageID, error) {
|
||||
func (n *Node) AppendMessage(groupID state.GroupID, data []byte) (state.MessageID, error) {
|
||||
m := protobuf.Message{
|
||||
GroupId: group[:],
|
||||
GroupId: groupID[:],
|
||||
Timestamp: time.Now().Unix(),
|
||||
Body: data,
|
||||
}
|
||||
|
||||
id := m.ID()
|
||||
|
||||
peers, ok := n.peers[group]
|
||||
if !ok {
|
||||
return state.MessageID{}, fmt.Errorf("trying to send to unknown group %x", group[:4])
|
||||
peers, err := n.peers.GetByGroupID(groupID)
|
||||
if err != nil {
|
||||
return state.MessageID{}, fmt.Errorf("trying to send to unknown group %x", groupID[:4])
|
||||
}
|
||||
|
||||
err := n.store.Add(m)
|
||||
err = n.store.Add(&m)
|
||||
if err != nil {
|
||||
return state.MessageID{}, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
for _, p := range peers {
|
||||
if !n.IsPeerInGroup(group, p) {
|
||||
continue
|
||||
}
|
||||
|
||||
t := state.OFFER
|
||||
if n.mode == BATCH {
|
||||
t = state.MESSAGE
|
||||
}
|
||||
|
||||
n.insertSyncState(group, id, p, t)
|
||||
for _, p := range peers {
|
||||
t := state.OFFER
|
||||
if n.mode == BATCH {
|
||||
t = state.MESSAGE
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("[%x] node %x sending %x\n", group[:4], n.ID[:4], id[:4])
|
||||
n.insertSyncState(&groupID, id, p, t)
|
||||
}
|
||||
|
||||
n.logger.Debug("Sending message",
|
||||
zap.String("node", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("groupID", hex.EncodeToString(groupID[:4])),
|
||||
zap.String("id", hex.EncodeToString(id[:4])))
|
||||
// @todo think about a way to insta trigger send messages when send was selected, we don't wanna wait for ticks here
|
||||
|
||||
return id, nil
|
||||
@@ -163,201 +261,289 @@ func (n *Node) AppendMessage(group state.GroupID, data []byte) (state.MessageID,
|
||||
|
||||
// RequestMessage adds a REQUEST record to the next payload for a given message ID.
|
||||
func (n *Node) RequestMessage(group state.GroupID, id state.MessageID) error {
|
||||
peers, ok := n.peers[group]
|
||||
if !ok {
|
||||
peers, err := n.peers.GetByGroupID(group)
|
||||
if err != nil {
|
||||
return fmt.Errorf("trying to request from an unknown group %x", group[:4])
|
||||
}
|
||||
|
||||
go func() {
|
||||
for _, p := range peers {
|
||||
if !n.IsPeerInGroup(group, p) {
|
||||
continue
|
||||
}
|
||||
|
||||
n.insertSyncState(group, id, p, state.REQUEST)
|
||||
for _, p := range peers {
|
||||
exist, err := n.IsPeerInGroup(group, p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}()
|
||||
|
||||
if exist {
|
||||
continue
|
||||
}
|
||||
|
||||
n.insertSyncState(&group, id, p, state.REQUEST)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddPeer adds a peer to a specific group making it a recipient of messages.
|
||||
func (n *Node) AddPeer(group state.GroupID, id state.PeerID) {
|
||||
if _, ok := n.peers[group]; !ok {
|
||||
n.peers[group] = make([]state.PeerID, 0)
|
||||
}
|
||||
|
||||
n.peers[group] = append(n.peers[group], id)
|
||||
func (n *Node) AddPeer(group state.GroupID, id state.PeerID) error {
|
||||
return n.peers.Add(group, id)
|
||||
}
|
||||
|
||||
// IsPeerInGroup checks whether a peer is in the specified group.
|
||||
func (n Node) IsPeerInGroup(g state.GroupID, p state.PeerID) bool {
|
||||
for _, peer := range n.peers[g] {
|
||||
if bytes.Equal(peer[:], p[:]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
func (n *Node) IsPeerInGroup(g state.GroupID, p state.PeerID) (bool, error) {
|
||||
return n.peers.Exists(g, p)
|
||||
}
|
||||
|
||||
func (n *Node) sendMessages() {
|
||||
err := n.syncState.Map(n.epoch, func(g state.GroupID, m state.MessageID, p state.PeerID, s state.State) state.State {
|
||||
if !n.IsPeerInGroup(g, p) {
|
||||
return s
|
||||
}
|
||||
|
||||
func (n *Node) sendMessages() error {
|
||||
err := n.syncState.Map(n.epoch, func(s state.State) state.State {
|
||||
m := s.MessageID
|
||||
p := s.PeerID
|
||||
switch s.Type {
|
||||
case state.OFFER:
|
||||
n.payloads.AddOffers(g, p, m[:])
|
||||
n.payloads.AddOffers(p, m[:])
|
||||
case state.REQUEST:
|
||||
n.payloads.AddRequests(g, p, m[:])
|
||||
log.Printf("[%x] sending REQUEST (%x -> %x): %x\n", g[:4], n.ID[:4], p[:4], m[:4])
|
||||
n.payloads.AddRequests(p, m[:])
|
||||
n.logger.Debug("sending REQUEST",
|
||||
zap.String("from", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("to", hex.EncodeToString(p[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(m[:4])),
|
||||
)
|
||||
|
||||
case state.MESSAGE:
|
||||
msg, err := n.store.Get(m)
|
||||
g := *s.GroupID
|
||||
// TODO: Handle errors
|
||||
exist, err := n.IsPeerInGroup(g, p)
|
||||
if err != nil {
|
||||
log.Printf("failed to retreive message %x %s", m[:4], err.Error())
|
||||
return s
|
||||
}
|
||||
|
||||
n.payloads.AddMessages(g, p, &msg)
|
||||
log.Printf("[%x] sending MESSAGE (%x -> %x): %x\n", g[:4], n.ID[:4], p[:4], m[:4])
|
||||
if !exist {
|
||||
return s
|
||||
}
|
||||
|
||||
msg, err := n.store.Get(m)
|
||||
if err != nil {
|
||||
n.logger.Error("Failed to retreive message",
|
||||
zap.String("messageID", hex.EncodeToString(m[:4])),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
n.payloads.AddMessages(p, msg)
|
||||
n.logger.Debug("sending MESSAGE",
|
||||
zap.String("groupID", hex.EncodeToString(g[:4])),
|
||||
zap.String("from", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("to", hex.EncodeToString(p[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(m[:4])),
|
||||
)
|
||||
|
||||
}
|
||||
|
||||
return n.updateSendEpoch(s)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Printf("error while mapping sync state: %s", err.Error())
|
||||
n.logger.Error("error while mapping sync state", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
n.payloads.MapAndClear(func(id state.GroupID, peer state.PeerID, payload protobuf.Payload) {
|
||||
err := n.transport.Send(id, n.ID, peer, payload)
|
||||
return n.payloads.MapAndClear(func(peer state.PeerID, payload protobuf.Payload) error {
|
||||
err := n.transport.Send(n.ID, peer, payload)
|
||||
if err != nil {
|
||||
log.Printf("error sending message: %s", err.Error())
|
||||
// @todo
|
||||
n.logger.Error("error sending message", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func (n *Node) onPayload(group state.GroupID, sender state.PeerID, payload protobuf.Payload) {
|
||||
func (n *Node) onPayload(sender state.PeerID, payload protobuf.Payload) {
|
||||
// Acks, Requests and Offers are all arrays of bytes as protobuf doesn't allow type aliases otherwise arrays of messageIDs would be nicer.
|
||||
n.onAck(group, sender, payload.Acks)
|
||||
n.onRequest(group, sender, payload.Requests)
|
||||
n.onOffer(group, sender, payload.Offers)
|
||||
n.payloads.AddAcks(group, sender, n.onMessages(group, sender, payload.Messages)...)
|
||||
if err := n.onAck(sender, payload.Acks); err != nil {
|
||||
n.logger.Error("error processing acks", zap.Error(err))
|
||||
}
|
||||
if err := n.onRequest(sender, payload.Requests); err != nil {
|
||||
n.logger.Error("error processing requests", zap.Error(err))
|
||||
}
|
||||
if err := n.onOffer(sender, payload.Offers); err != nil {
|
||||
n.logger.Error("error processing offers", zap.Error(err))
|
||||
}
|
||||
messageIds := n.onMessages(sender, payload.Messages)
|
||||
n.payloads.AddAcks(sender, messageIds)
|
||||
}
|
||||
|
||||
func (n *Node) onOffer(group state.GroupID, sender state.PeerID, offers [][]byte) {
|
||||
func (n *Node) onOffer(sender state.PeerID, offers [][]byte) error {
|
||||
for _, raw := range offers {
|
||||
id := toMessageID(raw)
|
||||
log.Printf("[%x] OFFER (%x -> %x): %x received.\n", group[:4], sender[:4], n.ID[:4], id[:4])
|
||||
n.logger.Debug("OFFER received",
|
||||
zap.String("from", hex.EncodeToString(sender[:4])),
|
||||
zap.String("to", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
|
||||
exist, err := n.store.Has(id)
|
||||
// @todo maybe ack?
|
||||
if n.store.Has(id) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exist {
|
||||
continue
|
||||
}
|
||||
|
||||
n.insertSyncState(group, id, sender, state.REQUEST)
|
||||
n.insertSyncState(nil, id, sender, state.REQUEST)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) onRequest(group state.GroupID, sender state.PeerID, requests [][]byte) {
|
||||
func (n *Node) onRequest(sender state.PeerID, requests [][]byte) error {
|
||||
for _, raw := range requests {
|
||||
id := toMessageID(raw)
|
||||
log.Printf("[%x] REQUEST (%x -> %x): %x received.\n", group[:4], sender[:4], n.ID[:4], id[:4])
|
||||
n.logger.Debug("REQUEST received",
|
||||
zap.String("from", hex.EncodeToString(sender[:4])),
|
||||
zap.String("to", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
|
||||
if !n.IsPeerInGroup(group, sender) {
|
||||
log.Printf("[%x] peer %x is not in group", group[:4], sender[:4])
|
||||
continue
|
||||
}
|
||||
|
||||
if !n.store.Has(id) {
|
||||
log.Printf("message %x does not exist", id[:4])
|
||||
continue
|
||||
}
|
||||
|
||||
n.insertSyncState(group, id, sender, state.MESSAGE)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) onAck(group state.GroupID, sender state.PeerID, acks [][]byte) {
|
||||
for _, raw := range acks {
|
||||
id := toMessageID(raw)
|
||||
|
||||
err := n.syncState.Remove(group, id, sender)
|
||||
message, err := n.store.Get(id)
|
||||
if err != nil {
|
||||
log.Printf("error while removing sync state %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if message == nil {
|
||||
n.logger.Error("message does not exist", zap.String("messageID", hex.EncodeToString(id[:4])))
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("[%x] ACK (%x -> %x): %x received.\n", group[:4], sender[:4], n.ID[:4], id[:4])
|
||||
groupID := toGroupID(message.GroupId)
|
||||
|
||||
exist, err := n.IsPeerInGroup(groupID, sender)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !exist {
|
||||
n.logger.Error("peer is not in group",
|
||||
zap.String("groupID", hex.EncodeToString(groupID[:4])),
|
||||
zap.String("peer", hex.EncodeToString(sender[:4])),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
n.insertSyncState(&groupID, id, sender, state.MESSAGE)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) onMessages(group state.GroupID, sender state.PeerID, messages []*protobuf.Message) [][]byte {
|
||||
func (n *Node) onAck(sender state.PeerID, acks [][]byte) error {
|
||||
for _, ack := range acks {
|
||||
id := toMessageID(ack)
|
||||
|
||||
err := n.syncState.Remove(id, sender)
|
||||
if err != nil {
|
||||
n.logger.Error("Error while removing sync state.", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
n.logger.Debug("ACK received",
|
||||
zap.String("from", hex.EncodeToString(sender[:4])),
|
||||
zap.String("to", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) onMessages(sender state.PeerID, messages []*protobuf.Message) [][]byte {
|
||||
a := make([][]byte, 0)
|
||||
|
||||
for _, m := range messages {
|
||||
err := n.onMessage(group, sender, *m)
|
||||
groupID := toGroupID(m.GroupId)
|
||||
err := n.onMessage(sender, *m)
|
||||
if err != nil {
|
||||
// @todo
|
||||
n.logger.Error("Error processing message", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
id := m.ID()
|
||||
log.Printf("[%x] sending ACK (%x -> %x): %x\n", group[:4], n.ID[:4], sender[:4], id[:4])
|
||||
n.logger.Debug("sending ACK",
|
||||
zap.String("groupID", hex.EncodeToString(groupID[:4])),
|
||||
zap.String("from", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("", hex.EncodeToString(sender[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
|
||||
a = append(a, id[:])
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func (n *Node) onMessage(group state.GroupID, sender state.PeerID, msg protobuf.Message) error {
|
||||
func (n *Node) onMessage(sender state.PeerID, msg protobuf.Message) error {
|
||||
id := msg.ID()
|
||||
log.Printf("[%x] MESSAGE (%x -> %x): %x received.\n", group[:4], sender[:4], n.ID[:4], id[:4])
|
||||
groupID := toGroupID(msg.GroupId)
|
||||
n.logger.Debug("MESSAGE received",
|
||||
zap.String("from", hex.EncodeToString(sender[:4])),
|
||||
zap.String("to", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
|
||||
err := n.syncState.Remove(group, id, sender)
|
||||
if err != nil {
|
||||
err := n.syncState.Remove(id, sender)
|
||||
if err != nil && err != state.ErrStateNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
for _, peer := range n.peers[group] {
|
||||
if peer == sender {
|
||||
continue
|
||||
}
|
||||
|
||||
n.insertSyncState(group, id, peer, state.OFFER)
|
||||
}
|
||||
}()
|
||||
|
||||
if n.subscription != nil {
|
||||
n.subscription <- msg
|
||||
}
|
||||
|
||||
err = n.store.Add(msg)
|
||||
err = n.store.Add(&msg)
|
||||
if err != nil {
|
||||
return err
|
||||
// @todo process, should this function ever even have an error?
|
||||
}
|
||||
|
||||
peers, err := n.peers.GetByGroupID(groupID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, peer := range peers {
|
||||
if peer == sender {
|
||||
continue
|
||||
}
|
||||
|
||||
n.insertSyncState(&groupID, id, peer, state.OFFER)
|
||||
}
|
||||
|
||||
if n.subscription != nil {
|
||||
n.subscription <- msg
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) insertSyncState(group state.GroupID, id state.MessageID, p state.PeerID, t state.RecordType) {
|
||||
func (n *Node) insertSyncState(groupID *state.GroupID, messageID state.MessageID, peerID state.PeerID, t state.RecordType) {
|
||||
s := state.State{
|
||||
GroupID: groupID,
|
||||
MessageID: messageID,
|
||||
PeerID: peerID,
|
||||
Type: t,
|
||||
SendEpoch: n.epoch + 1,
|
||||
}
|
||||
|
||||
err := n.syncState.Set(group, id, p, s)
|
||||
err := n.syncState.Add(s)
|
||||
if err != nil {
|
||||
log.Printf("error (%s) setting sync state group: %x id: %x peer: %x", err.Error(), group[:4], id[:4], p[:4])
|
||||
n.logger.Error("error setting sync states",
|
||||
zap.Error(err),
|
||||
zap.String("groupID", hex.EncodeToString(groupID[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(messageID[:4])),
|
||||
zap.String("peerID", hex.EncodeToString(peerID[:4])),
|
||||
)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (n Node) updateSendEpoch(s state.State) state.State {
|
||||
func (n *Node) updateSendEpoch(s state.State) state.State {
|
||||
s.SendCount += 1
|
||||
s.SendEpoch += n.nextEpoch(s.SendCount, n.epoch)
|
||||
return s
|
||||
@@ -368,3 +554,9 @@ func toMessageID(b []byte) state.MessageID {
|
||||
copy(id[:], b)
|
||||
return id
|
||||
}
|
||||
|
||||
func toGroupID(b []byte) state.GroupID {
|
||||
var id state.GroupID
|
||||
copy(id[:], b)
|
||||
return id
|
||||
}
|
||||
|
||||
@@ -3,92 +3,90 @@ package node
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/status-im/mvds/protobuf"
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type payloads struct {
|
||||
sync.Mutex
|
||||
|
||||
payloads map[state.GroupID]map[state.PeerID]protobuf.Payload
|
||||
payloads map[state.PeerID]protobuf.Payload
|
||||
}
|
||||
|
||||
// @todo check in all the functions below that we aren't duplicating stuff
|
||||
|
||||
func newPayloads() payloads {
|
||||
return payloads{
|
||||
payloads: make(map[state.GroupID]map[state.PeerID]protobuf.Payload),
|
||||
payloads: make(map[state.PeerID]protobuf.Payload),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *payloads) AddOffers(group state.GroupID, peer state.PeerID, offers ...[]byte) {
|
||||
func (p *payloads) AddOffers(peer state.PeerID, offers ...[]byte) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
payload := p.get(group, peer)
|
||||
payload := p.get(peer)
|
||||
|
||||
payload.Offers = append(payload.Offers, offers...)
|
||||
|
||||
p.set(group, peer, payload)
|
||||
p.set(peer, payload)
|
||||
}
|
||||
|
||||
func (p *payloads) AddAcks(group state.GroupID, peer state.PeerID, acks ...[]byte) {
|
||||
func (p *payloads) AddAcks(peer state.PeerID, acks [][]byte) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
payload := p.get(group, peer)
|
||||
payload := p.get(peer)
|
||||
|
||||
payload.Requests = append(payload.Requests, acks...)
|
||||
payload.Acks = append(payload.Acks, acks...)
|
||||
|
||||
p.set(group, peer, payload)
|
||||
p.set(peer, payload)
|
||||
}
|
||||
|
||||
func (p *payloads) AddRequests(group state.GroupID, peer state.PeerID, request ...[]byte) {
|
||||
func (p *payloads) AddRequests(peer state.PeerID, request ...[]byte) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
payload := p.get(group, peer)
|
||||
payload := p.get(peer)
|
||||
|
||||
payload.Requests = append(payload.Requests, request...)
|
||||
|
||||
p.set(group, peer, payload)
|
||||
p.set(peer, payload)
|
||||
}
|
||||
|
||||
func (p *payloads) AddMessages(group state.GroupID, peer state.PeerID, messages ...*protobuf.Message) {
|
||||
func (p *payloads) AddMessages(peer state.PeerID, messages ...*protobuf.Message) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
payload := p.get(group, peer)
|
||||
payload := p.get(peer)
|
||||
if payload.Messages == nil {
|
||||
payload.Messages = make([]*protobuf.Message, 0)
|
||||
}
|
||||
|
||||
payload.Messages = append(payload.Messages, messages...)
|
||||
p.set(group, peer, payload)
|
||||
p.set(peer, payload)
|
||||
}
|
||||
|
||||
func (p *payloads) MapAndClear(f func(state.GroupID, state.PeerID, protobuf.Payload)) {
|
||||
func (p *payloads) MapAndClear(f func(state.PeerID, protobuf.Payload) error) error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
for g, payloads := range p.payloads {
|
||||
for peer, payload := range payloads {
|
||||
f(g, peer, payload)
|
||||
for peer, payload := range p.payloads {
|
||||
err := f(peer, payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
p.payloads = make(map[state.GroupID]map[state.PeerID]protobuf.Payload)
|
||||
// TODO: this should only be called upon confirmation that the message has been sent
|
||||
p.payloads = make(map[state.PeerID]protobuf.Payload)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *payloads) get(id state.GroupID, peer state.PeerID) protobuf.Payload {
|
||||
payload, _ := p.payloads[id][peer]
|
||||
return payload
|
||||
func (p *payloads) get(peer state.PeerID) protobuf.Payload {
|
||||
return p.payloads[peer]
|
||||
}
|
||||
|
||||
func (p *payloads) set(id state.GroupID, peer state.PeerID, payload protobuf.Payload) {
|
||||
_, ok := p.payloads[id]
|
||||
if !ok {
|
||||
p.payloads[id] = make(map[state.PeerID]protobuf.Payload)
|
||||
}
|
||||
|
||||
p.payloads[id][peer] = payload
|
||||
func (p *payloads) set(peer state.PeerID, payload protobuf.Payload) {
|
||||
p.payloads[peer] = payload
|
||||
}
|
||||
|
||||
319
peers/migrations/migrations.go
Normal file
319
peers/migrations/migrations.go
Normal file
@@ -0,0 +1,319 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1565249278_initial_schema.down.sql (23B)
|
||||
// 1565249278_initial_schema.up.sql (140B)
|
||||
// doc.go (377B)
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
digest [sha256.Size]byte
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var __1565249278_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x2f\x48\x4d\x2d\x2a\xb6\xe6\x02\x04\x00\x00\xff\xff\x58\x44\x68\xf7\x17\x00\x00\x00")
|
||||
|
||||
func _1565249278_initial_schemaDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565249278_initial_schemaDownSql,
|
||||
"1565249278_initial_schema.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565249278_initial_schemaDownSql() (*asset, error) {
|
||||
bytes, err := _1565249278_initial_schemaDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565249278_initial_schema.down.sql", size: 23, mode: os.FileMode(0644), modTime: time.Unix(1565249542, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4, 0xfb, 0x5, 0x92, 0xf0, 0x93, 0xaa, 0x83, 0xb7, 0xdf, 0x66, 0xe2, 0x97, 0x53, 0x9d, 0x34, 0xd3, 0xca, 0x97, 0xd8, 0xe1, 0xed, 0xf0, 0x4a, 0x94, 0x1a, 0xb1, 0x8f, 0xcf, 0xc, 0xa4, 0x6}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1565249278_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x2f\x48\x4d\x2d\x2a\x56\xd0\xe0\x52\x50\x50\x50\x48\x2f\xca\x2f\x2d\x88\xcf\x4c\x51\x70\xf2\xf1\x77\x52\xf0\xf3\x0f\x51\xf0\x0b\xf5\xf1\xd1\x01\xcb\x81\xd4\xe1\x90\x0a\x08\xf2\xf4\x75\x0c\x8a\x54\xf0\x76\x8d\x54\xd0\x80\x99\xa1\x03\xd3\xa1\xa9\xe0\xef\xa7\xe0\xec\xef\xe7\xe6\xe3\xe9\x1c\xa2\x10\xe4\x1a\xe0\xe3\xe8\xec\xca\xa5\x69\xcd\x05\x08\x00\x00\xff\xff\x67\x63\xbf\x36\x8c\x00\x00\x00")
|
||||
|
||||
func _1565249278_initial_schemaUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565249278_initial_schemaUpSql,
|
||||
"1565249278_initial_schema.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565249278_initial_schemaUpSql() (*asset, error) {
|
||||
bytes, err := _1565249278_initial_schemaUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565249278_initial_schema.up.sql", size: 140, mode: os.FileMode(0644), modTime: time.Unix(1565251147, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8a, 0xbc, 0x3a, 0x87, 0x12, 0x93, 0xeb, 0xb4, 0xcc, 0x42, 0x6e, 0xb2, 0x7d, 0xfa, 0x9a, 0xa8, 0x3f, 0xb, 0x6b, 0xa8, 0x2d, 0x8b, 0xde, 0x67, 0x2a, 0xa8, 0xa5, 0x42, 0xad, 0x27, 0x15, 0x7e}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbb\x6e\xc3\x30\x0c\x45\x77\x7f\xc5\x45\x96\x2c\xb5\xb4\x74\xea\xd6\xb1\x7b\x7f\x80\x91\x68\x89\x88\x1e\xae\x48\xe7\xf1\xf7\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\xd2\x7b\x7c\x66\x51\x2c\x52\x18\xa2\x68\x1c\x58\x95\xc6\x1d\x27\x0e\xb4\x29\xe3\x90\xc4\xf2\x76\x72\xa1\x57\xaf\x46\xb6\xe9\x2c\xd5\x57\x49\x83\x8c\xfd\xe5\xf5\x30\x79\x8f\x40\xed\x68\xc8\xd4\x62\xe1\x47\x4b\xa1\x46\xc3\xa4\x25\x5c\xc5\x32\x08\xeb\xe0\x45\x6e\x0e\xef\x86\xc2\xa4\x06\xcb\x64\x47\x85\x65\x46\x20\xe5\x3d\xb3\xf4\x81\xd4\xe7\x93\xb4\x48\x46\x6e\x47\x1f\xcb\x13\xd9\x17\x06\x2a\x85\x23\x96\xd1\xeb\xc3\x55\xaa\x8c\x28\x83\x83\xf5\x71\x7f\x01\xa9\xb2\xa1\x51\x65\xdd\xfd\x4c\x17\x46\xeb\xbf\xe7\x41\x2d\xfe\xff\x11\xae\x7d\x9c\x15\xa4\xe0\xdb\xca\xc1\x38\xba\x69\x5a\x29\x9c\x29\x31\xf4\xab\x88\xf1\x34\x79\x9f\xfa\x5b\xe2\xc6\xbb\xf5\xbc\x71\x5e\xcf\x09\x3f\x35\xe9\x4d\x31\x77\x38\xe7\xff\x80\x4b\x1d\x6e\xfa\x0e\x00\x00\xff\xff\x9d\x60\x3d\x88\x79\x01\x00\x00")
|
||||
|
||||
func docGoBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_docGo,
|
||||
"doc.go",
|
||||
)
|
||||
}
|
||||
|
||||
func docGo() (*asset, error) {
|
||||
bytes, err := docGoBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1565250280, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// AssetString returns the asset contents as a string (instead of a []byte).
|
||||
func AssetString(name string) (string, error) {
|
||||
data, err := Asset(name)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// MustAssetString is like AssetString but panics when Asset would return an
|
||||
// error. It simplifies safe initialization of global variables.
|
||||
func MustAssetString(name string) string {
|
||||
return string(MustAsset(name))
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetDigest returns the digest of the file with the given name. It returns an
|
||||
// error if the asset could not be found or the digest could not be loaded.
|
||||
func AssetDigest(name string) ([sha256.Size]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.digest, nil
|
||||
}
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
|
||||
}
|
||||
|
||||
// Digests returns a map of all known files and their checksums.
|
||||
func Digests() (map[string][sha256.Size]byte, error) {
|
||||
mp := make(map[string][sha256.Size]byte, len(_bindata))
|
||||
for name := range _bindata {
|
||||
a, err := _bindata[name]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mp[name] = a.digest
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1565249278_initial_schema.down.sql": _1565249278_initial_schemaDownSql,
|
||||
|
||||
"1565249278_initial_schema.up.sql": _1565249278_initial_schemaUpSql,
|
||||
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"},
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"},
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"1565249278_initial_schema.down.sql": &bintree{_1565249278_initial_schemaDownSql, map[string]*bintree{}},
|
||||
"1565249278_initial_schema.up.sql": &bintree{_1565249278_initial_schemaUpSql, map[string]*bintree{}},
|
||||
"doc.go": &bintree{docGo, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory.
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively.
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE mvds_peers;
|
||||
5
peers/migrations/sqlite/1565249278_initial_schema.up.sql
Normal file
5
peers/migrations/sqlite/1565249278_initial_schema.up.sql
Normal file
@@ -0,0 +1,5 @@
|
||||
CREATE TABLE mvds_peers (
|
||||
group_id BLOB NOT NULL,
|
||||
peer_id BLOB NOT NULL,
|
||||
PRIMARY KEY (group_id, peer_id) ON CONFLICT REPLACE
|
||||
);
|
||||
9
peers/migrations/sqlite/doc.go
Normal file
9
peers/migrations/sqlite/doc.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// This file is necessary because "github.com/status-im/migrate/v4"
|
||||
// can't handle files starting with a prefix. At least that's the case
|
||||
// for go-bindata.
|
||||
// If go-bindata is called from the same directory, asset names
|
||||
// have no prefix and "github.com/status-im/migrate/v4" works as expected.
|
||||
|
||||
package sqlite
|
||||
|
||||
//go:generate go-bindata -pkg migrations -o ../migrations.go .
|
||||
11
peers/persistence.go
Normal file
11
peers/persistence.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package peers
|
||||
|
||||
import (
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type Persistence interface {
|
||||
Add(state.GroupID, state.PeerID) error
|
||||
GetByGroupID(group state.GroupID) ([]state.PeerID, error)
|
||||
Exists(state.GroupID, state.PeerID) (bool, error)
|
||||
}
|
||||
32
peers/persistence_memory.go
Normal file
32
peers/persistence_memory.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package peers
|
||||
|
||||
import "github.com/vacp2p/mvds/state"
|
||||
|
||||
type MemoryPersistence struct {
|
||||
peers map[state.GroupID][]state.PeerID
|
||||
}
|
||||
|
||||
func NewMemoryPersistence() *MemoryPersistence {
|
||||
return &MemoryPersistence{
|
||||
peers: make(map[state.GroupID][]state.PeerID),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *MemoryPersistence) Add(groupID state.GroupID, peerID state.PeerID) error {
|
||||
p.peers[groupID] = append(p.peers[groupID], peerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *MemoryPersistence) Exists(groupID state.GroupID, peerID state.PeerID) (bool, error) {
|
||||
for _, peer := range p.peers[groupID] {
|
||||
if peer == peerID {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (p *MemoryPersistence) GetByGroupID(groupID state.GroupID) ([]state.PeerID, error) {
|
||||
return p.peers[groupID], nil
|
||||
}
|
||||
|
||||
63
peers/persistence_sqlite.go
Normal file
63
peers/persistence_sqlite.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package peers
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrPeerNotFound = errors.New("peer not found")
|
||||
)
|
||||
|
||||
type sqlitePersistence struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewSQLitePersistence(db *sql.DB) sqlitePersistence {
|
||||
return sqlitePersistence{db: db}
|
||||
}
|
||||
|
||||
func (p sqlitePersistence) Add(groupID state.GroupID, peerID state.PeerID) error {
|
||||
_, err := p.db.Exec(`INSERT INTO mvds_peers (group_id, peer_id) VALUES (?, ?)`, groupID[:], peerID[:])
|
||||
return err
|
||||
}
|
||||
|
||||
func (p sqlitePersistence) Exists(groupID state.GroupID, peerID state.PeerID) (bool, error) {
|
||||
var result bool
|
||||
err := p.db.QueryRow(
|
||||
`SELECT EXISTS(SELECT 1 FROM mvds_peers WHERE group_id = ? AND peer_id = ?)`,
|
||||
groupID[:],
|
||||
peerID[:],
|
||||
).Scan(&result)
|
||||
switch err {
|
||||
case sql.ErrNoRows:
|
||||
return false, ErrPeerNotFound
|
||||
case nil:
|
||||
return result, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
func (p sqlitePersistence) GetByGroupID(groupID state.GroupID) (result []state.PeerID, err error) {
|
||||
rows, err := p.db.Query(`SELECT peer_id FROM mvds_peers WHERE group_id = ?`, groupID[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
peerIDBytes []byte
|
||||
peerID state.PeerID
|
||||
)
|
||||
if err := rows.Scan(&peerIDBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(peerID[:], peerIDBytes)
|
||||
result = append(result, peerID)
|
||||
}
|
||||
return
|
||||
}
|
||||
57
peers/persistence_sqlite_test.go
Normal file
57
peers/persistence_sqlite_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package peers
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/vacp2p/mvds/peers/migrations"
|
||||
|
||||
"github.com/vacp2p/mvds/persistenceutil"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSQLitePersistence(t *testing.T) {
|
||||
tmpFile, err := ioutil.TempFile("", "")
|
||||
require.NoError(t, err)
|
||||
db, err := persistenceutil.Open(tmpFile.Name(), "", persistenceutil.MigrationConfig{
|
||||
AssetNames: migrations.AssetNames(),
|
||||
AssetGetter: migrations.Asset,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
p := NewSQLitePersistence(db)
|
||||
|
||||
err = p.Add(state.GroupID{0x01}, state.PeerID{0x01})
|
||||
require.NoError(t, err)
|
||||
// Add the same again.
|
||||
err = p.Add(state.GroupID{0x01}, state.PeerID{0x01})
|
||||
require.NoError(t, err)
|
||||
// Add another peer to the same group.
|
||||
err = p.Add(state.GroupID{0x01}, state.PeerID{0x02})
|
||||
require.NoError(t, err)
|
||||
// Create a new group.
|
||||
err = p.Add(state.GroupID{0x02}, state.PeerID{0x01})
|
||||
require.NoError(t, err)
|
||||
err = p.Add(state.GroupID{0x02}, state.PeerID{0x02})
|
||||
require.NoError(t, err)
|
||||
err = p.Add(state.GroupID{0x02}, state.PeerID{0x03})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Validate group 0x01.
|
||||
peers, err := p.GetByGroupID(state.GroupID{0x01})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []state.PeerID{state.PeerID{0x01}, state.PeerID{0x02}}, peers)
|
||||
// Validate group 0x02.
|
||||
peers, err = p.GetByGroupID(state.GroupID{0x02})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []state.PeerID{state.PeerID{0x01}, state.PeerID{0x02}, state.PeerID{0x03}}, peers)
|
||||
|
||||
// Validate existence method.
|
||||
exists, err := p.Exists(state.GroupID{0x01}, state.PeerID{0x01})
|
||||
require.NoError(t, err)
|
||||
require.True(t, exists)
|
||||
exists, err = p.Exists(state.GroupID{0x01}, state.PeerID{0xFF})
|
||||
require.NoError(t, err)
|
||||
require.False(t, exists)
|
||||
}
|
||||
70
persistenceutil/migrations.go
Normal file
70
persistenceutil/migrations.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package persistenceutil
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"strings"
|
||||
|
||||
nodemigrations "github.com/vacp2p/mvds/node/migrations"
|
||||
peersmigrations "github.com/vacp2p/mvds/peers/migrations"
|
||||
statemigrations "github.com/vacp2p/mvds/state/migrations"
|
||||
storemigrations "github.com/vacp2p/mvds/store/migrations"
|
||||
)
|
||||
|
||||
type getter func(string) ([]byte, error)
|
||||
|
||||
type Migration struct {
|
||||
Names []string
|
||||
Getter func(name string) ([]byte, error)
|
||||
}
|
||||
|
||||
func prepareMigrations(migrations []Migration) ([]string, getter, error) {
|
||||
var allNames []string
|
||||
nameToGetter := make(map[string]getter)
|
||||
|
||||
for _, m := range migrations {
|
||||
for _, name := range m.Names {
|
||||
if !validateName(name) {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := nameToGetter[name]; ok {
|
||||
return nil, nil, errors.Errorf("migration with name %s already exists", name)
|
||||
}
|
||||
allNames = append(allNames, name)
|
||||
nameToGetter[name] = m.Getter
|
||||
}
|
||||
}
|
||||
|
||||
return allNames, func(name string) ([]byte, error) {
|
||||
getter, ok := nameToGetter[name]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("no migration for name %s", name)
|
||||
}
|
||||
return getter(name)
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DefaultMigrations is a collection of all mvds components migrations.
|
||||
var DefaultMigrations = []Migration{
|
||||
{
|
||||
Names: nodemigrations.AssetNames(),
|
||||
Getter: nodemigrations.Asset,
|
||||
},
|
||||
{
|
||||
Names: peersmigrations.AssetNames(),
|
||||
Getter: peersmigrations.Asset,
|
||||
},
|
||||
{
|
||||
Names: statemigrations.AssetNames(),
|
||||
Getter: statemigrations.Asset,
|
||||
},
|
||||
{
|
||||
Names: storemigrations.AssetNames(),
|
||||
Getter: storemigrations.Asset,
|
||||
},
|
||||
}
|
||||
|
||||
// validateName verifies that only *.sql files are taken into consideration.
|
||||
func validateName(name string) bool {
|
||||
return strings.HasSuffix(name, ".sql")
|
||||
}
|
||||
124
persistenceutil/sqlite.go
Normal file
124
persistenceutil/sqlite.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package persistenceutil
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/status-im/migrate/v4"
|
||||
"github.com/status-im/migrate/v4/database/sqlcipher"
|
||||
bindata "github.com/status-im/migrate/v4/source/go_bindata"
|
||||
)
|
||||
|
||||
// The reduced number of kdf iterations (for performance reasons) which is
|
||||
// currently used for derivation of the database key
|
||||
// https://github.com/status-im/status-go/pull/1343
|
||||
// https://notes.status.im/i8Y_l7ccTiOYq09HVgoFwA
|
||||
const reducedKdfIterationsNumber = 3200
|
||||
|
||||
// MigrationConfig is a struct that allows to define bindata migrations.
|
||||
type MigrationConfig struct {
|
||||
AssetNames []string
|
||||
AssetGetter func(name string) ([]byte, error)
|
||||
}
|
||||
|
||||
// Migrate migrates a provided sqldb
|
||||
func Migrate(db *sql.DB) error {
|
||||
assetNames, assetGetter, err := prepareMigrations(DefaultMigrations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ApplyMigrations(db, assetNames, assetGetter)
|
||||
|
||||
}
|
||||
|
||||
// Open opens or initializes a new database for a given file path.
|
||||
// MigrationConfig is optional but if provided migrations are applied automatically.
|
||||
func Open(path, key string, mc ...MigrationConfig) (*sql.DB, error) {
|
||||
return open(path, key, reducedKdfIterationsNumber, mc)
|
||||
}
|
||||
|
||||
// OpenWithIter allows to open a new database with a custom number of kdf iterations.
|
||||
// Higher kdf iterations number makes it slower to open the database.
|
||||
func OpenWithIter(path, key string, kdfIter int, mc ...MigrationConfig) (*sql.DB, error) {
|
||||
return open(path, key, kdfIter, mc)
|
||||
}
|
||||
|
||||
func open(path string, key string, kdfIter int, configs []MigrationConfig) (*sql.DB, error) {
|
||||
_, err := os.OpenFile(path, os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db, err := sql.Open("sqlite3", path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keyString := fmt.Sprintf("PRAGMA key = '%s'", key)
|
||||
|
||||
// Disable concurrent access as not supported by the driver
|
||||
db.SetMaxOpenConns(1)
|
||||
|
||||
if _, err = db.Exec("PRAGMA foreign_keys=ON"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err = db.Exec(keyString); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kdfString := fmt.Sprintf("PRAGMA kdf_iter = '%d'", kdfIter)
|
||||
|
||||
if _, err = db.Exec(kdfString); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Apply all provided migrations.
|
||||
for _, mc := range configs {
|
||||
if err := ApplyMigrations(db, mc.AssetNames, mc.AssetGetter); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// ApplyMigrations allows to apply bindata migrations on the current *sql.DB.
|
||||
// `assetNames` is a list of assets with migrations and `assetGetter` is responsible
|
||||
// for returning the content of the asset with a given name.
|
||||
func ApplyMigrations(db *sql.DB, assetNames []string, assetGetter func(name string) ([]byte, error)) error {
|
||||
resources := bindata.Resource(
|
||||
assetNames,
|
||||
assetGetter,
|
||||
)
|
||||
|
||||
source, err := bindata.WithInstance(resources)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create migration source")
|
||||
}
|
||||
|
||||
driver, err := sqlcipher.WithInstance(db, &sqlcipher.Config{
|
||||
MigrationsTable: "mvds_" + sqlcipher.DefaultMigrationsTable,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create driver")
|
||||
}
|
||||
|
||||
m, err := migrate.NewWithInstance(
|
||||
"go-bindata",
|
||||
source,
|
||||
"sqlcipher",
|
||||
driver,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create migration instance")
|
||||
}
|
||||
|
||||
if err = m.Up(); err != migrate.ErrNoChange {
|
||||
return errors.Wrap(err, "failed to migrate")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
// ID creates the MessageID for a Message
|
||||
|
||||
7
protobuf/payload.go
Normal file
7
protobuf/payload.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package protobuf
|
||||
|
||||
// IsValid checks whether there are any known field in the protobuf
|
||||
// message
|
||||
func (m *Payload) IsValid() bool {
|
||||
return len(m.Messages)+len(m.Acks)+len(m.Offers)+len(m.Requests) != 0
|
||||
}
|
||||
@@ -21,10 +21,10 @@ var _ = math.Inf
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type Payload struct {
|
||||
Acks [][]byte `protobuf:"bytes,1,rep,name=acks,proto3" json:"acks,omitempty"`
|
||||
Offers [][]byte `protobuf:"bytes,2,rep,name=offers,proto3" json:"offers,omitempty"`
|
||||
Requests [][]byte `protobuf:"bytes,3,rep,name=requests,proto3" json:"requests,omitempty"`
|
||||
Messages []*Message `protobuf:"bytes,4,rep,name=messages,proto3" json:"messages,omitempty"`
|
||||
Acks [][]byte `protobuf:"bytes,5001,rep,name=acks,proto3" json:"acks,omitempty"`
|
||||
Offers [][]byte `protobuf:"bytes,5002,rep,name=offers,proto3" json:"offers,omitempty"`
|
||||
Requests [][]byte `protobuf:"bytes,5003,rep,name=requests,proto3" json:"requests,omitempty"`
|
||||
Messages []*Message `protobuf:"bytes,5004,rep,name=messages,proto3" json:"messages,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
@@ -84,9 +84,9 @@ func (m *Payload) GetMessages() []*Message {
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
GroupId []byte `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
|
||||
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"`
|
||||
GroupId []byte `protobuf:"bytes,6001,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
|
||||
Timestamp int64 `protobuf:"varint,6002,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
Body []byte `protobuf:"bytes,6003,opt,name=body,proto3" json:"body,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
@@ -146,19 +146,19 @@ func init() {
|
||||
func init() { proto.RegisterFile("protobuf/sync.proto", fileDescriptor_2dca527c092c79d7) }
|
||||
|
||||
var fileDescriptor_2dca527c092c79d7 = []byte{
|
||||
// 212 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x8f, 0xb1, 0x4a, 0x04, 0x31,
|
||||
0x10, 0x86, 0xd9, 0xcb, 0x72, 0x1b, 0xc7, 0xb3, 0x19, 0x41, 0xa2, 0x58, 0x84, 0xab, 0x62, 0xb3,
|
||||
0x82, 0xbe, 0x81, 0x9d, 0x85, 0x20, 0x29, 0x2c, 0x6c, 0x24, 0x7b, 0xc9, 0x1e, 0x87, 0xc6, 0xac,
|
||||
0x99, 0xac, 0xb0, 0xe0, 0xc3, 0xcb, 0x8d, 0xe7, 0x5e, 0xf7, 0x7f, 0xdf, 0xcf, 0x30, 0x33, 0x70,
|
||||
0x3e, 0xe4, 0x54, 0x52, 0x37, 0xf6, 0xb7, 0x34, 0x7d, 0x6e, 0x5a, 0x26, 0xac, 0xe3, 0xb7, 0xa7,
|
||||
0xf5, 0x0f, 0x34, 0xcf, 0x6e, 0xfa, 0x48, 0xce, 0x23, 0x42, 0xed, 0x36, 0xef, 0xa4, 0x2a, 0x2d,
|
||||
0xcc, 0xca, 0x72, 0xc6, 0x0b, 0x58, 0xa6, 0xbe, 0x0f, 0x99, 0xd4, 0x82, 0xed, 0x81, 0xf0, 0x0a,
|
||||
0x64, 0x0e, 0x5f, 0x63, 0xa0, 0x42, 0x4a, 0x70, 0x33, 0x33, 0xde, 0x80, 0x8c, 0x81, 0xc8, 0x6d,
|
||||
0x03, 0xa9, 0x5a, 0x0b, 0x73, 0x7a, 0x77, 0xd6, 0xee, 0x77, 0xb5, 0x4f, 0x7f, 0xd6, 0xce, 0xf5,
|
||||
0xfa, 0x05, 0x9a, 0x83, 0xc4, 0x4b, 0x90, 0xdb, 0x9c, 0xc6, 0xe1, 0x6d, 0xe7, 0x55, 0xa5, 0x2b,
|
||||
0xb3, 0xb2, 0x0d, 0xf3, 0xa3, 0xc7, 0x6b, 0x38, 0x29, 0xbb, 0x18, 0xa8, 0xb8, 0x38, 0xa8, 0x85,
|
||||
0xae, 0x8c, 0xb0, 0x47, 0xb1, 0x3f, 0xbb, 0x4b, 0x7e, 0x52, 0x82, 0x87, 0x38, 0x3f, 0xc0, 0xab,
|
||||
0xfc, 0x7f, 0xb9, 0x5b, 0x72, 0xba, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x4c, 0x0a, 0x11, 0xee,
|
||||
0x05, 0x01, 0x00, 0x00,
|
||||
// 215 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2e, 0x28, 0xca, 0x2f,
|
||||
0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xae, 0xcc, 0x4b, 0xd6, 0x03, 0xf3, 0x84, 0x58, 0x72, 0xcb,
|
||||
0x52, 0x8a, 0x95, 0x1a, 0x18, 0xb9, 0xd8, 0x03, 0x12, 0x2b, 0x73, 0xf2, 0x13, 0x53, 0x84, 0x84,
|
||||
0xb9, 0x58, 0x12, 0x93, 0xb3, 0x8b, 0x25, 0x3a, 0xd5, 0x15, 0x98, 0x35, 0x78, 0x82, 0xc0, 0x1c,
|
||||
0x21, 0x71, 0x2e, 0xb6, 0xfc, 0xb4, 0xb4, 0xd4, 0xa2, 0x62, 0x89, 0x2e, 0x88, 0x30, 0x94, 0x2b,
|
||||
0x24, 0xcd, 0xc5, 0x51, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x52, 0x2c, 0xd1, 0x0d, 0x91, 0x82,
|
||||
0x0b, 0x08, 0x69, 0x71, 0x71, 0xe4, 0xa6, 0x16, 0x17, 0x27, 0xa6, 0xa7, 0x16, 0x4b, 0xf4, 0x80,
|
||||
0x24, 0xb9, 0x8d, 0x78, 0xf5, 0x40, 0x16, 0xea, 0xf9, 0x42, 0x84, 0x83, 0xe0, 0xf2, 0x4a, 0x91,
|
||||
0x5c, 0xec, 0x50, 0x41, 0x21, 0x29, 0x2e, 0x8e, 0xf4, 0xa2, 0xfc, 0xd2, 0x82, 0xf8, 0xcc, 0x14,
|
||||
0x89, 0x8f, 0x7a, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0xec, 0x60, 0x01, 0xcf, 0x14, 0x21, 0x59, 0x2e,
|
||||
0xce, 0x92, 0xcc, 0xdc, 0xd4, 0xe2, 0x92, 0xc4, 0xdc, 0x02, 0x89, 0x4f, 0x20, 0x49, 0xe6, 0x20,
|
||||
0x84, 0x08, 0xc8, 0xf1, 0x49, 0xf9, 0x29, 0x95, 0x12, 0x9f, 0x21, 0xda, 0xc0, 0x1c, 0x27, 0xae,
|
||||
0x28, 0x0e, 0x98, 0xd7, 0x93, 0xd8, 0xc0, 0x2c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x18,
|
||||
0xdd, 0xc7, 0x8d, 0x0d, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
@@ -4,14 +4,14 @@ package mvds;
|
||||
option go_package = "protobuf";
|
||||
|
||||
message Payload {
|
||||
repeated bytes acks = 1;
|
||||
repeated bytes offers = 2;
|
||||
repeated bytes requests = 3;
|
||||
repeated Message messages = 4;
|
||||
repeated bytes acks = 5001;
|
||||
repeated bytes offers = 5002;
|
||||
repeated bytes requests = 5003;
|
||||
repeated Message messages = 5004;
|
||||
}
|
||||
|
||||
message Message {
|
||||
bytes group_id = 1;
|
||||
int64 timestamp = 2;
|
||||
bytes body = 3;
|
||||
bytes group_id = 6001;
|
||||
int64 timestamp = 6002;
|
||||
bytes body = 6003;
|
||||
}
|
||||
|
||||
319
state/migrations/migrations.go
Normal file
319
state/migrations/migrations.go
Normal file
@@ -0,0 +1,319 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1565341329_initial_schema.down.sql (24B)
|
||||
// 1565341329_initial_schema.up.sql (237B)
|
||||
// doc.go (377B)
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
digest [sha256.Size]byte
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var __1565341329_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x2f\x2e\x49\x2c\x49\x2d\xb6\xe6\x02\x04\x00\x00\xff\xff\xb7\x43\xc1\xc1\x18\x00\x00\x00")
|
||||
|
||||
func _1565341329_initial_schemaDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565341329_initial_schemaDownSql,
|
||||
"1565341329_initial_schema.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565341329_initial_schemaDownSql() (*asset, error) {
|
||||
bytes, err := _1565341329_initial_schemaDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565341329_initial_schema.down.sql", size: 24, mode: os.FileMode(0644), modTime: time.Unix(1565341770, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x20, 0x56, 0x1a, 0x0, 0xc5, 0x81, 0xb3, 0xeb, 0x2a, 0xae, 0xed, 0xbb, 0x68, 0x51, 0x68, 0xc7, 0xe3, 0x31, 0xe, 0x1, 0x3e, 0xd2, 0x85, 0x9e, 0x6d, 0x55, 0xad, 0x55, 0xd6, 0x2f, 0x29, 0xca}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1565341329_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x2f\x2e\x49\x2c\x49\x2d\x56\xd0\xe0\x52\x50\x50\x50\x28\xa9\x2c\x48\x55\xf0\xf4\x0b\x71\x75\x77\x0d\x52\xf0\xf3\x0f\x51\xf0\x0b\xf5\xf1\xd1\x01\x4b\x15\xa7\xe6\xa5\xc4\x27\xe7\x97\xe6\x95\xe0\x53\x90\x5a\x90\x9f\x9c\x81\x43\x41\x7a\x51\x7e\x69\x41\x7c\x66\x8a\x82\x93\x8f\xbf\x13\x44\xa8\x20\x35\xb5\x08\x26\x82\xa6\x3a\x37\xb5\xb8\x38\x31\x3d\x15\x87\x6c\x40\x90\xa7\xaf\x63\x50\xa4\x82\xb7\x6b\xa4\x82\x06\x42\xa9\x0e\xcc\x44\x4d\x2e\x4d\x6b\x2e\x40\x00\x00\x00\xff\xff\x8b\xf0\x0a\x3b\xed\x00\x00\x00")
|
||||
|
||||
func _1565341329_initial_schemaUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565341329_initial_schemaUpSql,
|
||||
"1565341329_initial_schema.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565341329_initial_schemaUpSql() (*asset, error) {
|
||||
bytes, err := _1565341329_initial_schemaUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565341329_initial_schema.up.sql", size: 237, mode: os.FileMode(0644), modTime: time.Unix(1565342998, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8f, 0xf4, 0x34, 0xb, 0x4e, 0x94, 0x3b, 0xe, 0xf2, 0xc2, 0x36, 0x31, 0x47, 0x2e, 0xf8, 0x85, 0xab, 0x1a, 0x82, 0x24, 0x74, 0x39, 0xf6, 0x83, 0xaf, 0xb6, 0xb1, 0x79, 0x9, 0xda, 0x59, 0xd0}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbb\x6e\xc3\x30\x0c\x45\x77\x7f\xc5\x45\x96\x2c\xb5\xb4\x74\xea\xd6\xb1\x7b\x7f\x80\x91\x68\x89\x88\x1e\xae\x48\xe7\xf1\xf7\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\xd2\x7b\x7c\x66\x51\x2c\x52\x18\xa2\x68\x1c\x58\x95\xc6\x1d\x27\x0e\xb4\x29\xe3\x90\xc4\xf2\x76\x72\xa1\x57\xaf\x46\xb6\xe9\x2c\xd5\x57\x49\x83\x8c\xfd\xe5\xf5\x30\x79\x8f\x40\xed\x68\xc8\xd4\x62\xe1\x47\x4b\xa1\x46\xc3\xa4\x25\x5c\xc5\x32\x08\xeb\xe0\x45\x6e\x0e\xef\x86\xc2\xa4\x06\xcb\x64\x47\x85\x65\x46\x20\xe5\x3d\xb3\xf4\x81\xd4\xe7\x93\xb4\x48\x46\x6e\x47\x1f\xcb\x13\xd9\x17\x06\x2a\x85\x23\x96\xd1\xeb\xc3\x55\xaa\x8c\x28\x83\x83\xf5\x71\x7f\x01\xa9\xb2\xa1\x51\x65\xdd\xfd\x4c\x17\x46\xeb\xbf\xe7\x41\x2d\xfe\xff\x11\xae\x7d\x9c\x15\xa4\xe0\xdb\xca\xc1\x38\xba\x69\x5a\x29\x9c\x29\x31\xf4\xab\x88\xf1\x34\x79\x9f\xfa\x5b\xe2\xc6\xbb\xf5\xbc\x71\x5e\xcf\x09\x3f\x35\xe9\x4d\x31\x77\x38\xe7\xff\x80\x4b\x1d\x6e\xfa\x0e\x00\x00\xff\xff\x9d\x60\x3d\x88\x79\x01\x00\x00")
|
||||
|
||||
func docGoBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_docGo,
|
||||
"doc.go",
|
||||
)
|
||||
}
|
||||
|
||||
func docGo() (*asset, error) {
|
||||
bytes, err := docGoBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1565341287, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// AssetString returns the asset contents as a string (instead of a []byte).
|
||||
func AssetString(name string) (string, error) {
|
||||
data, err := Asset(name)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// MustAssetString is like AssetString but panics when Asset would return an
|
||||
// error. It simplifies safe initialization of global variables.
|
||||
func MustAssetString(name string) string {
|
||||
return string(MustAsset(name))
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetDigest returns the digest of the file with the given name. It returns an
|
||||
// error if the asset could not be found or the digest could not be loaded.
|
||||
func AssetDigest(name string) ([sha256.Size]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.digest, nil
|
||||
}
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
|
||||
}
|
||||
|
||||
// Digests returns a map of all known files and their checksums.
|
||||
func Digests() (map[string][sha256.Size]byte, error) {
|
||||
mp := make(map[string][sha256.Size]byte, len(_bindata))
|
||||
for name := range _bindata {
|
||||
a, err := _bindata[name]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mp[name] = a.digest
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1565341329_initial_schema.down.sql": _1565341329_initial_schemaDownSql,
|
||||
|
||||
"1565341329_initial_schema.up.sql": _1565341329_initial_schemaUpSql,
|
||||
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"},
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"},
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"1565341329_initial_schema.down.sql": &bintree{_1565341329_initial_schemaDownSql, map[string]*bintree{}},
|
||||
"1565341329_initial_schema.up.sql": &bintree{_1565341329_initial_schemaUpSql, map[string]*bintree{}},
|
||||
"doc.go": &bintree{docGo, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory.
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively.
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE mvds_states;
|
||||
11
state/migrations/sqlite/1565341329_initial_schema.up.sql
Normal file
11
state/migrations/sqlite/1565341329_initial_schema.up.sql
Normal file
@@ -0,0 +1,11 @@
|
||||
CREATE TABLE mvds_states (
|
||||
type INTEGER NOT NULL,
|
||||
send_count INTEGER NOT NULL,
|
||||
send_epoch INTEGER NOT NULL,
|
||||
group_id BLOB,
|
||||
peer_id BLOB NOT NULL,
|
||||
message_id BLOB NOT NULL,
|
||||
PRIMARY KEY (message_id, peer_id)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_send_epoch ON mvds_states(send_epoch);
|
||||
9
state/migrations/sqlite/doc.go
Normal file
9
state/migrations/sqlite/doc.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// This file is necessary because "github.com/status-im/migrate/v4"
|
||||
// can't handle files starting with a prefix. At least that's the case
|
||||
// for go-bindata.
|
||||
// If go-bindata is called from the same directory, asset names
|
||||
// have no prefix and "github.com/status-im/migrate/v4" works as expected.
|
||||
|
||||
package sqlite
|
||||
|
||||
//go:generate go-bindata -pkg migrations -o ../migrations.go .
|
||||
@@ -1,3 +1,4 @@
|
||||
package state
|
||||
|
||||
// PeerID is the ID for a specific peer.
|
||||
type PeerID [65]byte
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// Package state contains everything related to the synchronization state for MVDS.
|
||||
package state
|
||||
|
||||
// RecordType is the type for a specific record, either `OFFER`, `REQUEST` or `MESSAGE`.
|
||||
type RecordType int
|
||||
|
||||
const (
|
||||
@@ -9,15 +10,20 @@ const (
|
||||
MESSAGE
|
||||
)
|
||||
|
||||
// State is a struct used to store a records [state](https://github.com/status-im/bigbrother-specs/blob/master/data_sync/mvds.md#state).
|
||||
type State struct {
|
||||
Type RecordType
|
||||
SendCount uint64
|
||||
SendEpoch int64
|
||||
// GroupID is optional, thus nullable
|
||||
GroupID *GroupID
|
||||
PeerID PeerID
|
||||
MessageID MessageID
|
||||
}
|
||||
|
||||
type SyncState interface {
|
||||
Get(group GroupID, id MessageID, peer PeerID) (State, error)
|
||||
Set(group GroupID, id MessageID, peer PeerID, newState State) error
|
||||
Remove(group GroupID, id MessageID, peer PeerID) error
|
||||
Map(epoch int64, process func(GroupID, MessageID, PeerID, State) State) error
|
||||
Add(newState State) error
|
||||
Remove(id MessageID, peer PeerID) error
|
||||
All(epoch int64) ([]State, error)
|
||||
Map(epoch int64, process func(State) State) error
|
||||
}
|
||||
|
||||
@@ -7,62 +7,55 @@ import (
|
||||
type memorySyncState struct {
|
||||
sync.Mutex
|
||||
|
||||
state map[GroupID]map[MessageID]map[PeerID]State
|
||||
state []State
|
||||
}
|
||||
|
||||
func NewSyncState() *memorySyncState {
|
||||
return &memorySyncState{
|
||||
state: make(map[GroupID]map[MessageID]map[PeerID]State),
|
||||
}
|
||||
return &memorySyncState{}
|
||||
}
|
||||
|
||||
func (s *memorySyncState) Get(group GroupID, id MessageID, peer PeerID) (State, error) {
|
||||
func (s *memorySyncState) Add(newState State) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
state, _ := s.state[group][id][peer]
|
||||
return state, nil
|
||||
}
|
||||
s.state = append(s.state, newState)
|
||||
|
||||
func (s *memorySyncState) Set(group GroupID, id MessageID, peer PeerID, newState State) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if _, ok := s.state[group]; !ok {
|
||||
s.state[group] = make(map[MessageID]map[PeerID]State)
|
||||
}
|
||||
|
||||
if _, ok := s.state[group][id]; !ok {
|
||||
s.state[group][id] = make(map[PeerID]State)
|
||||
}
|
||||
|
||||
s.state[group][id][peer] = newState
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *memorySyncState) Remove(group GroupID, id MessageID, peer PeerID) error {
|
||||
func (s *memorySyncState) Remove(id MessageID, peer PeerID) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
var newState []State
|
||||
|
||||
delete(s.state[group][id], peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *memorySyncState) Map(epoch int64, process func(GroupID, MessageID, PeerID, State) State) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for group, syncstate := range s.state {
|
||||
for id, peers := range syncstate {
|
||||
for peer, state := range peers {
|
||||
if state.SendEpoch < epoch {
|
||||
continue
|
||||
}
|
||||
|
||||
s.state[group][id][peer] = process(group, id, peer, state)
|
||||
}
|
||||
for _, state := range s.state {
|
||||
if state.MessageID != id || state.PeerID != peer {
|
||||
newState = append(newState, state)
|
||||
}
|
||||
}
|
||||
|
||||
s.state = newState
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *memorySyncState) All(_ int64) ([]State, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.state, nil
|
||||
}
|
||||
|
||||
func (s *memorySyncState) Map(epoch int64, process func(State) State) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for i, state := range s.state {
|
||||
if state.SendEpoch > epoch {
|
||||
continue
|
||||
}
|
||||
|
||||
s.state[i] = process(state)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
166
state/state_sqlite.go
Normal file
166
state/state_sqlite.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"log"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrStateNotFound = errors.New("state not found")
|
||||
)
|
||||
|
||||
// Verify that SyncState interface is implemented.
|
||||
var _ SyncState = (*sqliteSyncState)(nil)
|
||||
|
||||
type sqliteSyncState struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewPersistentSyncState(db *sql.DB) *sqliteSyncState {
|
||||
return &sqliteSyncState{db: db}
|
||||
}
|
||||
|
||||
func (p *sqliteSyncState) Add(newState State) error {
|
||||
var groupIDBytes []byte
|
||||
if newState.GroupID != nil {
|
||||
groupIDBytes = newState.GroupID[:]
|
||||
}
|
||||
|
||||
_, err := p.db.Exec(`
|
||||
INSERT INTO mvds_states
|
||||
(type, send_count, send_epoch, group_id, peer_id, message_id)
|
||||
VALUES
|
||||
(?, ?, ?, ?, ?, ?)`,
|
||||
newState.Type,
|
||||
newState.SendCount,
|
||||
newState.SendEpoch,
|
||||
groupIDBytes,
|
||||
newState.PeerID[:],
|
||||
newState.MessageID[:],
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *sqliteSyncState) Remove(messageID MessageID, peerID PeerID) error {
|
||||
result, err := p.db.Exec(
|
||||
`DELETE FROM mvds_states WHERE message_id = ? AND peer_id = ?`,
|
||||
messageID[:],
|
||||
peerID[:],
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n, err := result.RowsAffected(); err != nil {
|
||||
return err
|
||||
} else if n == 0 {
|
||||
return ErrStateNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *sqliteSyncState) All(epoch int64) ([]State, error) {
|
||||
var result []State
|
||||
|
||||
rows, err := p.db.Query(`
|
||||
SELECT
|
||||
type, send_count, send_epoch, group_id, peer_id, message_id
|
||||
FROM
|
||||
mvds_states
|
||||
WHERE
|
||||
send_epoch <= ?
|
||||
`, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var (
|
||||
state State
|
||||
groupID, peerID, messageID []byte
|
||||
)
|
||||
err := rows.Scan(
|
||||
&state.Type,
|
||||
&state.SendCount,
|
||||
&state.SendEpoch,
|
||||
&groupID,
|
||||
&peerID,
|
||||
&messageID,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(groupID) > 0 {
|
||||
val := GroupID{}
|
||||
copy(val[:], groupID)
|
||||
state.GroupID = &val
|
||||
}
|
||||
copy(state.PeerID[:], peerID)
|
||||
copy(state.MessageID[:], messageID)
|
||||
|
||||
result = append(result, state)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *sqliteSyncState) Map(epoch int64, process func(State) State) error {
|
||||
states, err := p.All(epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var updated []State
|
||||
|
||||
for _, state := range states {
|
||||
if err := invariant(state.SendEpoch <= epoch, "invalid state provided to process"); err != nil {
|
||||
log.Printf("%v", err)
|
||||
continue
|
||||
}
|
||||
newState := process(state)
|
||||
if newState != state {
|
||||
updated = append(updated, newState)
|
||||
}
|
||||
}
|
||||
|
||||
if len(updated) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, state := range updated {
|
||||
if err := updateInTx(tx, state); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func updateInTx(tx *sql.Tx, state State) error {
|
||||
_, err := tx.Exec(`
|
||||
UPDATE mvds_states
|
||||
SET
|
||||
send_count = ?,
|
||||
send_epoch = ?
|
||||
WHERE
|
||||
message_id = ? AND
|
||||
peer_id = ?
|
||||
`,
|
||||
state.SendCount,
|
||||
state.SendEpoch,
|
||||
state.MessageID[:],
|
||||
state.PeerID[:],
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func invariant(cond bool, message string) error {
|
||||
if !cond {
|
||||
return errors.New(message)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
56
state/state_sqlite_test.go
Normal file
56
state/state_sqlite_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/vacp2p/mvds/persistenceutil"
|
||||
"github.com/vacp2p/mvds/state/migrations"
|
||||
)
|
||||
|
||||
func TestPersistentSyncState(t *testing.T) {
|
||||
tmpFile, err := ioutil.TempFile("", "")
|
||||
require.NoError(t, err)
|
||||
db, err := persistenceutil.Open(tmpFile.Name(), "", persistenceutil.MigrationConfig{
|
||||
AssetNames: migrations.AssetNames(),
|
||||
AssetGetter: migrations.Asset,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
p := NewPersistentSyncState(db)
|
||||
|
||||
stateWithoutGroupID := State{
|
||||
Type: OFFER,
|
||||
SendCount: 1,
|
||||
SendEpoch: 1,
|
||||
GroupID: nil,
|
||||
PeerID: PeerID{0x01},
|
||||
MessageID: MessageID{0xaa},
|
||||
}
|
||||
err = p.Add(stateWithoutGroupID)
|
||||
require.NoError(t, err)
|
||||
|
||||
stateWithGroupID := stateWithoutGroupID
|
||||
stateWithGroupID.GroupID = &GroupID{0x01}
|
||||
stateWithGroupID.MessageID = MessageID{0xbb}
|
||||
err = p.Add(stateWithGroupID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Getting states for the old epoch.
|
||||
allStates, err := p.All(0)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, allStates)
|
||||
|
||||
// Getting states for the current epoch.
|
||||
allStates, err = p.All(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []State{stateWithoutGroupID, stateWithGroupID}, allStates)
|
||||
require.Nil(t, allStates[0].GroupID)
|
||||
require.EqualValues(t, &GroupID{0x01}, allStates[1].GroupID)
|
||||
|
||||
err = p.Remove(stateWithoutGroupID.MessageID, stateWithoutGroupID.PeerID)
|
||||
require.NoError(t, err)
|
||||
// remove non-existing row
|
||||
err = p.Remove(MessageID{0xff}, PeerID{0xff})
|
||||
require.EqualError(t, err, "state not found")
|
||||
}
|
||||
@@ -2,12 +2,12 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/status-im/mvds/protobuf"
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type MessageStore interface {
|
||||
Has(id state.MessageID) bool
|
||||
Get(id state.MessageID) (protobuf.Message, error)
|
||||
Add(message protobuf.Message) error
|
||||
Has(id state.MessageID) (bool, error)
|
||||
Get(id state.MessageID) (*protobuf.Message, error)
|
||||
Add(message *protobuf.Message) error
|
||||
}
|
||||
|
||||
@@ -4,39 +4,40 @@ import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/status-im/mvds/protobuf"
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type DummyStore struct {
|
||||
sync.Mutex
|
||||
ms map[state.MessageID]protobuf.Message
|
||||
ms map[state.MessageID]*protobuf.Message
|
||||
}
|
||||
|
||||
func NewDummyStore() DummyStore {
|
||||
return DummyStore{ms: make(map[state.MessageID]protobuf.Message)}
|
||||
func NewDummyStore() *DummyStore {
|
||||
return &DummyStore{ms: make(map[state.MessageID]*protobuf.Message)}
|
||||
}
|
||||
|
||||
func (ds *DummyStore) Has(id state.MessageID) bool {
|
||||
func (ds *DummyStore) Has(id state.MessageID) (bool, error) {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
|
||||
_, ok := ds.ms[id]; return ok
|
||||
_, ok := ds.ms[id]
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
func (ds *DummyStore) Get(id state.MessageID) (protobuf.Message, error) {
|
||||
func (ds *DummyStore) Get(id state.MessageID) (*protobuf.Message, error) {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
|
||||
m, ok := ds.ms[id]
|
||||
if !ok {
|
||||
return protobuf.Message{}, errors.New("message does not exist")
|
||||
return nil, errors.New("message does not exist")
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (ds *DummyStore) Add(message protobuf.Message) error {
|
||||
func (ds *DummyStore) Add(message *protobuf.Message) error {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
ds.ms[message.ID()] = message
|
||||
|
||||
67
store/messagestore_sqlite.go
Normal file
67
store/messagestore_sqlite.go
Normal file
@@ -0,0 +1,67 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
|
||||
"github.com/vacp2p/mvds/state"
|
||||
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrMessageNotFound = errors.New("message not found")
|
||||
)
|
||||
|
||||
type persistentMessageStore struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewPersistentMessageStore(db *sql.DB) *persistentMessageStore {
|
||||
return &persistentMessageStore{db: db}
|
||||
}
|
||||
|
||||
func (p *persistentMessageStore) Add(message *protobuf.Message) error {
|
||||
id := message.ID()
|
||||
_, err := p.db.Exec(
|
||||
`INSERT INTO mvds_messages (id, group_id, timestamp, body)
|
||||
VALUES (?, ?, ?, ?)`,
|
||||
id[:],
|
||||
message.GroupId,
|
||||
message.Timestamp,
|
||||
message.Body,
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *persistentMessageStore) Get(id state.MessageID) (*protobuf.Message, error) {
|
||||
var message protobuf.Message
|
||||
row := p.db.QueryRow(
|
||||
`SELECT group_id, timestamp, body FROM mvds_messages WHERE id = ?`,
|
||||
id[:],
|
||||
)
|
||||
if err := row.Scan(
|
||||
&message.GroupId,
|
||||
&message.Timestamp,
|
||||
&message.Body,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &message, nil
|
||||
}
|
||||
|
||||
func (p *persistentMessageStore) Has(id state.MessageID) (bool, error) {
|
||||
var result bool
|
||||
err := p.db.QueryRow(
|
||||
`SELECT EXISTS(SELECT 1 FROM mvds_messages WHERE id = ?)`,
|
||||
id[:],
|
||||
).Scan(&result)
|
||||
switch err {
|
||||
case sql.ErrNoRows:
|
||||
return false, ErrMessageNotFound
|
||||
case nil:
|
||||
return result, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
53
store/messagestore_sqlite_test.go
Normal file
53
store/messagestore_sqlite_test.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/vacp2p/mvds/state"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/vacp2p/mvds/persistenceutil"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/store/migrations"
|
||||
)
|
||||
|
||||
func TestPersistentMessageStore(t *testing.T) {
|
||||
tmpFile, err := ioutil.TempFile("", "")
|
||||
require.NoError(t, err)
|
||||
db, err := persistenceutil.Open(tmpFile.Name(), "", persistenceutil.MigrationConfig{
|
||||
AssetNames: migrations.AssetNames(),
|
||||
AssetGetter: migrations.Asset,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
p := NewPersistentMessageStore(db)
|
||||
|
||||
now := time.Now().Unix()
|
||||
message := protobuf.Message{
|
||||
GroupId: []byte{0x01},
|
||||
Timestamp: now,
|
||||
Body: []byte{0xaa, 0xbb, 0xcc},
|
||||
}
|
||||
|
||||
err = p.Add(&message)
|
||||
require.NoError(t, err)
|
||||
// Adding the same message twice is not allowed.
|
||||
err = p.Add(&message)
|
||||
require.EqualError(t, err, "UNIQUE constraint failed: mvds_messages.id")
|
||||
// Verify if saved.
|
||||
exists, err := p.Has(message.ID())
|
||||
require.NoError(t, err)
|
||||
require.True(t, exists)
|
||||
recvMessage, err := p.Get(message.ID())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, message, *recvMessage)
|
||||
|
||||
// Verify methods against non existing message.
|
||||
recvMessage, err = p.Get(state.MessageID{0xff})
|
||||
require.EqualError(t, err, "sql: no rows in result set")
|
||||
require.Nil(t, recvMessage)
|
||||
exists, err = p.Has(state.MessageID{0xff})
|
||||
require.NoError(t, err)
|
||||
require.False(t, exists)
|
||||
}
|
||||
319
store/migrations/migrations.go
Normal file
319
store/migrations/migrations.go
Normal file
@@ -0,0 +1,319 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1565447861_initial_schema.down.sql (28B)
|
||||
// 1565447861_initial_schema.up.sql (140B)
|
||||
// doc.go (377B)
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
digest [sha256.Size]byte
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var __1565447861_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x71\xf5\x71\x0d\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\xcf\x4d\x2d\x2e\x4e\x4c\x4f\x2d\xb6\xe6\x02\x04\x00\x00\xff\xff\xc2\x3a\x18\x9b\x1c\x00\x00\x00")
|
||||
|
||||
func _1565447861_initial_schemaDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565447861_initial_schemaDownSql,
|
||||
"1565447861_initial_schema.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565447861_initial_schemaDownSql() (*asset, error) {
|
||||
bytes, err := _1565447861_initial_schemaDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565447861_initial_schema.down.sql", size: 28, mode: os.FileMode(0644), modTime: time.Unix(1565447902, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x92, 0x55, 0x8d, 0x3, 0x68, 0x1a, 0x9c, 0xd7, 0xc7, 0xb4, 0x5a, 0xb1, 0x27, 0x47, 0xf4, 0xc6, 0x8d, 0x85, 0xbb, 0xae, 0xb6, 0x69, 0xc5, 0xbc, 0x21, 0xba, 0xc0, 0xc6, 0x2a, 0xc8, 0xb2, 0xf7}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1565447861_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\xcf\x4d\x2d\x2e\x4e\x4c\x4f\x2d\x56\xd0\xe0\x52\x50\x50\x50\xc8\x4c\x51\x70\xf2\xf1\x77\x52\x08\x08\xf2\xf4\x75\x0c\x8a\x54\xf0\x76\x8d\xd4\x01\x4b\xa4\x17\xe5\x97\x16\xc4\xc3\xa4\xfd\xfc\x43\x14\xfc\x42\x7d\x7c\x20\x72\x25\x99\xb9\xa9\xc5\x25\x89\xb9\x05\x0a\x9e\x7e\x21\xae\xee\xae\x41\x68\xf2\x49\xf9\x29\x95\xa8\xfa\xb8\x34\xad\xb9\x00\x01\x00\x00\xff\xff\xae\x9b\x57\x51\x8c\x00\x00\x00")
|
||||
|
||||
func _1565447861_initial_schemaUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565447861_initial_schemaUpSql,
|
||||
"1565447861_initial_schema.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565447861_initial_schemaUpSql() (*asset, error) {
|
||||
bytes, err := _1565447861_initial_schemaUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565447861_initial_schema.up.sql", size: 140, mode: os.FileMode(0644), modTime: time.Unix(1565448062, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x21, 0x13, 0xbf, 0x64, 0x18, 0xf7, 0xe2, 0xd8, 0xb5, 0x7d, 0x8, 0xf1, 0x66, 0xb9, 0xb3, 0x49, 0x68, 0xe2, 0xa2, 0xea, 0x90, 0x11, 0x70, 0x9c, 0x15, 0x28, 0x3f, 0x3f, 0x90, 0x3c, 0x76, 0xf}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbb\x6e\xc3\x30\x0c\x45\x77\x7f\xc5\x45\x96\x2c\xb5\xb4\x74\xea\xd6\xb1\x7b\x7f\x80\x91\x68\x89\x88\x1e\xae\x48\xe7\xf1\xf7\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\xd2\x7b\x7c\x66\x51\x2c\x52\x18\xa2\x68\x1c\x58\x95\xc6\x1d\x27\x0e\xb4\x29\xe3\x90\xc4\xf2\x76\x72\xa1\x57\xaf\x46\xb6\xe9\x2c\xd5\x57\x49\x83\x8c\xfd\xe5\xf5\x30\x79\x8f\x40\xed\x68\xc8\xd4\x62\xe1\x47\x4b\xa1\x46\xc3\xa4\x25\x5c\xc5\x32\x08\xeb\xe0\x45\x6e\x0e\xef\x86\xc2\xa4\x06\xcb\x64\x47\x85\x65\x46\x20\xe5\x3d\xb3\xf4\x81\xd4\xe7\x93\xb4\x48\x46\x6e\x47\x1f\xcb\x13\xd9\x17\x06\x2a\x85\x23\x96\xd1\xeb\xc3\x55\xaa\x8c\x28\x83\x83\xf5\x71\x7f\x01\xa9\xb2\xa1\x51\x65\xdd\xfd\x4c\x17\x46\xeb\xbf\xe7\x41\x2d\xfe\xff\x11\xae\x7d\x9c\x15\xa4\xe0\xdb\xca\xc1\x38\xba\x69\x5a\x29\x9c\x29\x31\xf4\xab\x88\xf1\x34\x79\x9f\xfa\x5b\xe2\xc6\xbb\xf5\xbc\x71\x5e\xcf\x09\x3f\x35\xe9\x4d\x31\x77\x38\xe7\xff\x80\x4b\x1d\x6e\xfa\x0e\x00\x00\xff\xff\x9d\x60\x3d\x88\x79\x01\x00\x00")
|
||||
|
||||
func docGoBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_docGo,
|
||||
"doc.go",
|
||||
)
|
||||
}
|
||||
|
||||
func docGo() (*asset, error) {
|
||||
bytes, err := docGoBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1565447878, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// AssetString returns the asset contents as a string (instead of a []byte).
|
||||
func AssetString(name string) (string, error) {
|
||||
data, err := Asset(name)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// MustAssetString is like AssetString but panics when Asset would return an
|
||||
// error. It simplifies safe initialization of global variables.
|
||||
func MustAssetString(name string) string {
|
||||
return string(MustAsset(name))
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetDigest returns the digest of the file with the given name. It returns an
|
||||
// error if the asset could not be found or the digest could not be loaded.
|
||||
func AssetDigest(name string) ([sha256.Size]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.digest, nil
|
||||
}
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
|
||||
}
|
||||
|
||||
// Digests returns a map of all known files and their checksums.
|
||||
func Digests() (map[string][sha256.Size]byte, error) {
|
||||
mp := make(map[string][sha256.Size]byte, len(_bindata))
|
||||
for name := range _bindata {
|
||||
a, err := _bindata[name]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mp[name] = a.digest
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1565447861_initial_schema.down.sql": _1565447861_initial_schemaDownSql,
|
||||
|
||||
"1565447861_initial_schema.up.sql": _1565447861_initial_schemaUpSql,
|
||||
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"},
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"},
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"1565447861_initial_schema.down.sql": &bintree{_1565447861_initial_schemaDownSql, map[string]*bintree{}},
|
||||
"1565447861_initial_schema.up.sql": &bintree{_1565447861_initial_schemaUpSql, map[string]*bintree{}},
|
||||
"doc.go": &bintree{docGo, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory.
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively.
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
DELETE TABLE mvds_messages;
|
||||
6
store/migrations/sqlite/1565447861_initial_schema.up.sql
Normal file
6
store/migrations/sqlite/1565447861_initial_schema.up.sql
Normal file
@@ -0,0 +1,6 @@
|
||||
CREATE TABLE mvds_messages (
|
||||
id BLOB PRIMARY KEY,
|
||||
group_id BLOB NOT NULL,
|
||||
timestamp INTEGER NOT NULL,
|
||||
body BLOB NOT NULL
|
||||
);
|
||||
9
store/migrations/sqlite/doc.go
Normal file
9
store/migrations/sqlite/doc.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// This file is necessary because "github.com/status-im/migrate/v4"
|
||||
// can't handle files starting with a prefix. At least that's the case
|
||||
// for go-bindata.
|
||||
// If go-bindata is called from the same directory, asset names
|
||||
// have no prefix and "github.com/status-im/migrate/v4" works as expected.
|
||||
|
||||
package sqlite
|
||||
|
||||
//go:generate go-bindata -pkg migrations -o ../migrations.go .
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/status-im/mvds/protobuf"
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
// ChannelTransport implements a basic MVDS transport using channels for basic testing purposes.
|
||||
@@ -23,12 +23,12 @@ type ChannelTransport struct {
|
||||
func NewChannelTransport(offline int, in <-chan Packet) *ChannelTransport {
|
||||
return &ChannelTransport{
|
||||
offline: offline,
|
||||
in: in,
|
||||
out: make(map[state.PeerID]chan<- Packet),
|
||||
in: in,
|
||||
out: make(map[state.PeerID]chan<- Packet),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *ChannelTransport) AddOutput(id state.PeerID, c chan<-Packet) {
|
||||
func (t *ChannelTransport) AddOutput(id state.PeerID, c chan<- Packet) {
|
||||
t.out[id] = c
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func (t *ChannelTransport) Watch() Packet {
|
||||
return <-t.in
|
||||
}
|
||||
|
||||
func (t *ChannelTransport) Send(group state.GroupID, sender state.PeerID, peer state.PeerID, payload protobuf.Payload) error {
|
||||
func (t *ChannelTransport) Send(sender state.PeerID, peer state.PeerID, payload protobuf.Payload) error {
|
||||
// @todo we can do this better, we put node onlineness into a goroutine where we just stop the nodes for x seconds
|
||||
// outside of this class
|
||||
math.Seed(time.Now().UnixNano())
|
||||
@@ -49,6 +49,6 @@ func (t *ChannelTransport) Send(group state.GroupID, sender state.PeerID, peer s
|
||||
return errors.New("peer unknown")
|
||||
}
|
||||
|
||||
c <- Packet{Group: group, Sender: sender, Payload: payload}
|
||||
c <- Packet{Sender: sender, Payload: payload}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,12 +2,11 @@
|
||||
package transport
|
||||
|
||||
import (
|
||||
"github.com/status-im/mvds/protobuf"
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type Packet struct {
|
||||
Group state.GroupID
|
||||
Sender state.PeerID
|
||||
Payload protobuf.Payload
|
||||
}
|
||||
@@ -15,5 +14,5 @@ type Packet struct {
|
||||
// Transport defines an interface allowing for agnostic transport implementations.
|
||||
type Transport interface {
|
||||
Watch() Packet
|
||||
Send(group state.GroupID, sender state.PeerID, peer state.PeerID, payload protobuf.Payload) error
|
||||
Send(sender state.PeerID, peer state.PeerID, payload protobuf.Payload) error
|
||||
}
|
||||
|
||||
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = false
|
||||
|
||||
// ptrSize is the size of a pointer on the current arch.
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
type flag uintptr
|
||||
|
||||
var (
|
||||
// flagRO indicates whether the value field of a reflect.Value
|
||||
// is read-only.
|
||||
flagRO flag
|
||||
|
||||
// flagAddr indicates whether the address of the reflect.Value's
|
||||
// value may be taken.
|
||||
flagAddr flag
|
||||
)
|
||||
|
||||
// flagKindMask holds the bits that make up the kind
|
||||
// part of the flags field. In all the supported versions,
|
||||
// it is in the lower 5 bits.
|
||||
const flagKindMask = flag(0x1f)
|
||||
|
||||
// Different versions of Go have used different
|
||||
// bit layouts for the flags type. This table
|
||||
// records the known combinations.
|
||||
var okFlags = []struct {
|
||||
ro, addr flag
|
||||
}{{
|
||||
// From Go 1.4 to 1.5
|
||||
ro: 1 << 5,
|
||||
addr: 1 << 7,
|
||||
}, {
|
||||
// Up to Go tip.
|
||||
ro: 1<<5 | 1<<6,
|
||||
addr: 1 << 8,
|
||||
}}
|
||||
|
||||
var flagValOffset = func() uintptr {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
return field.Offset
|
||||
}()
|
||||
|
||||
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||
func flagField(v *reflect.Value) *flag {
|
||||
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
// the typical safety restrictions preventing access to unaddressable and
|
||||
// unexported data. It works by digging the raw pointer to the underlying
|
||||
// value out of the protected value and generating a new unprotected (unsafe)
|
||||
// reflect.Value to it.
|
||||
//
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||
return v
|
||||
}
|
||||
flagFieldPtr := flagField(&v)
|
||||
*flagFieldPtr &^= flagRO
|
||||
*flagFieldPtr |= flagAddr
|
||||
return v
|
||||
}
|
||||
|
||||
// Sanity checks against future reflect package changes
|
||||
// to the type or semantics of the Value.flag field.
|
||||
func init() {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||
panic("reflect.Value flag field has changed kind")
|
||||
}
|
||||
type t0 int
|
||||
var t struct {
|
||||
A t0
|
||||
// t0 will have flagEmbedRO set.
|
||||
t0
|
||||
// a will have flagStickyRO set
|
||||
a t0
|
||||
}
|
||||
vA := reflect.ValueOf(t).FieldByName("A")
|
||||
va := reflect.ValueOf(t).FieldByName("a")
|
||||
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||
|
||||
// Infer flagRO from the difference between the flags
|
||||
// for the (otherwise identical) fields in t.
|
||||
flagPublic := *flagField(&vA)
|
||||
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||
flagRO = flagPublic ^ flagWithRO
|
||||
|
||||
// Infer flagAddr from the difference between a value
|
||||
// taken from a pointer and not.
|
||||
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||
flagNoPtr := *flagField(&vA)
|
||||
flagPtr := *flagField(&vPtrA)
|
||||
flagAddr = flagNoPtr ^ flagPtr
|
||||
|
||||
// Check that the inferred flags tally with one of the known versions.
|
||||
for _, f := range okFlags {
|
||||
if flagRO == f.ro && flagAddr == f.addr {
|
||||
return
|
||||
}
|
||||
}
|
||||
panic("reflect.Value read-only flag has changed semantics")
|
||||
}
|
||||
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build js appengine safe disableunsafe !go1.4
|
||||
|
||||
package spew
|
||||
|
||||
import "reflect"
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = true
|
||||
)
|
||||
|
||||
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||
// that bypasses the typical safety restrictions preventing access to
|
||||
// unaddressable and unexported data. However, doing this relies on access to
|
||||
// the unsafe package. This is a stub version which simply returns the passed
|
||||
// reflect.Value when the unsafe package is not available.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
return v
|
||||
}
|
||||
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
@@ -0,0 +1,341 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||
// the technique used in the fmt package.
|
||||
var (
|
||||
panicBytes = []byte("(PANIC=")
|
||||
plusBytes = []byte("+")
|
||||
iBytes = []byte("i")
|
||||
trueBytes = []byte("true")
|
||||
falseBytes = []byte("false")
|
||||
interfaceBytes = []byte("(interface {})")
|
||||
commaNewlineBytes = []byte(",\n")
|
||||
newlineBytes = []byte("\n")
|
||||
openBraceBytes = []byte("{")
|
||||
openBraceNewlineBytes = []byte("{\n")
|
||||
closeBraceBytes = []byte("}")
|
||||
asteriskBytes = []byte("*")
|
||||
colonBytes = []byte(":")
|
||||
colonSpaceBytes = []byte(": ")
|
||||
openParenBytes = []byte("(")
|
||||
closeParenBytes = []byte(")")
|
||||
spaceBytes = []byte(" ")
|
||||
pointerChainBytes = []byte("->")
|
||||
nilAngleBytes = []byte("<nil>")
|
||||
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||
maxShortBytes = []byte("<max>")
|
||||
circularBytes = []byte("<already shown>")
|
||||
circularShortBytes = []byte("<shown>")
|
||||
invalidAngleBytes = []byte("<invalid>")
|
||||
openBracketBytes = []byte("[")
|
||||
closeBracketBytes = []byte("]")
|
||||
percentBytes = []byte("%")
|
||||
precisionBytes = []byte(".")
|
||||
openAngleBytes = []byte("<")
|
||||
closeAngleBytes = []byte(">")
|
||||
openMapBytes = []byte("map[")
|
||||
closeMapBytes = []byte("]")
|
||||
lenEqualsBytes = []byte("len=")
|
||||
capEqualsBytes = []byte("cap=")
|
||||
)
|
||||
|
||||
// hexDigits is used to map a decimal value to a hex digit.
|
||||
var hexDigits = "0123456789abcdef"
|
||||
|
||||
// catchPanic handles any panics that might occur during the handleMethods
|
||||
// calls.
|
||||
func catchPanic(w io.Writer, v reflect.Value) {
|
||||
if err := recover(); err != nil {
|
||||
w.Write(panicBytes)
|
||||
fmt.Fprintf(w, "%v", err)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// handleMethods attempts to call the Error and String methods on the underlying
|
||||
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||
//
|
||||
// It handles panics in any called methods by catching and displaying the error
|
||||
// as the formatted value.
|
||||
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||
// We need an interface to check if the type implements the error or
|
||||
// Stringer interface. However, the reflect package won't give us an
|
||||
// interface on certain things like unexported struct fields in order
|
||||
// to enforce visibility rules. We use unsafe, when it's available,
|
||||
// to bypass these restrictions since this package does not mutate the
|
||||
// values.
|
||||
if !v.CanInterface() {
|
||||
if UnsafeDisabled {
|
||||
return false
|
||||
}
|
||||
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
|
||||
// Choose whether or not to do error and Stringer interface lookups against
|
||||
// the base type or a pointer to the base type depending on settings.
|
||||
// Technically calling one of these methods with a pointer receiver can
|
||||
// mutate the value, however, types which choose to satisify an error or
|
||||
// Stringer interface with a pointer receiver should not be mutating their
|
||||
// state inside these interface methods.
|
||||
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
|
||||
// Is it an error or Stringer?
|
||||
switch iface := v.Interface().(type) {
|
||||
case error:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.Error()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
|
||||
w.Write([]byte(iface.Error()))
|
||||
return true
|
||||
|
||||
case fmt.Stringer:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.String()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
w.Write([]byte(iface.String()))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// printBool outputs a boolean value as true or false to Writer w.
|
||||
func printBool(w io.Writer, val bool) {
|
||||
if val {
|
||||
w.Write(trueBytes)
|
||||
} else {
|
||||
w.Write(falseBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// printInt outputs a signed integer value to Writer w.
|
||||
func printInt(w io.Writer, val int64, base int) {
|
||||
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||
}
|
||||
|
||||
// printUint outputs an unsigned integer value to Writer w.
|
||||
func printUint(w io.Writer, val uint64, base int) {
|
||||
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||
}
|
||||
|
||||
// printFloat outputs a floating point value using the specified precision,
|
||||
// which is expected to be 32 or 64bit, to Writer w.
|
||||
func printFloat(w io.Writer, val float64, precision int) {
|
||||
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||
}
|
||||
|
||||
// printComplex outputs a complex value using the specified float precision
|
||||
// for the real and imaginary parts to Writer w.
|
||||
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||
r := real(c)
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||
i := imag(c)
|
||||
if i >= 0 {
|
||||
w.Write(plusBytes)
|
||||
}
|
||||
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||
w.Write(iBytes)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
num := uint64(p)
|
||||
if num == 0 {
|
||||
w.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||
buf := make([]byte, 18)
|
||||
|
||||
// It's simpler to construct the hex string right to left.
|
||||
base := uint64(16)
|
||||
i := len(buf) - 1
|
||||
for num >= base {
|
||||
buf[i] = hexDigits[num%base]
|
||||
num /= base
|
||||
i--
|
||||
}
|
||||
buf[i] = hexDigits[num]
|
||||
|
||||
// Add '0x' prefix.
|
||||
i--
|
||||
buf[i] = 'x'
|
||||
i--
|
||||
buf[i] = '0'
|
||||
|
||||
// Strip unused leading bytes.
|
||||
buf = buf[i:]
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||
// elements to be sorted.
|
||||
type valuesSorter struct {
|
||||
values []reflect.Value
|
||||
strings []string // either nil or same len and values
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||
// surrogate keys on which the data should be sorted. It uses flags in
|
||||
// ConfigState to decide if and how to populate those surrogate keys.
|
||||
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||
vs := &valuesSorter{values: values, cs: cs}
|
||||
if canSortSimply(vs.values[0].Kind()) {
|
||||
return vs
|
||||
}
|
||||
if !cs.DisableMethods {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
b := bytes.Buffer{}
|
||||
if !handleMethods(cs, &b, vs.values[i]) {
|
||||
vs.strings = nil
|
||||
break
|
||||
}
|
||||
vs.strings[i] = b.String()
|
||||
}
|
||||
}
|
||||
if vs.strings == nil && cs.SpewKeys {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||
// directly, or whether it should be considered for sorting by surrogate keys
|
||||
// (if the ConfigState allows it).
|
||||
func canSortSimply(kind reflect.Kind) bool {
|
||||
// This switch parallels valueSortLess, except for the default case.
|
||||
switch kind {
|
||||
case reflect.Bool:
|
||||
return true
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return true
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return true
|
||||
case reflect.String:
|
||||
return true
|
||||
case reflect.Uintptr:
|
||||
return true
|
||||
case reflect.Array:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Len returns the number of values in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Len() int {
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// Swap swaps the values at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Swap(i, j int) {
|
||||
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||
if s.strings != nil {
|
||||
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||
}
|
||||
}
|
||||
|
||||
// valueSortLess returns whether the first value should sort before the second
|
||||
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||
// implementation.
|
||||
func valueSortLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.String:
|
||||
return a.String() < b.String()
|
||||
case reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Array:
|
||||
// Compare the contents of both arrays.
|
||||
l := a.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
av := a.Index(i)
|
||||
bv := b.Index(i)
|
||||
if av.Interface() == bv.Interface() {
|
||||
continue
|
||||
}
|
||||
return valueSortLess(av, bv)
|
||||
}
|
||||
}
|
||||
return a.String() < b.String()
|
||||
}
|
||||
|
||||
// Less returns whether the value at index i should sort before the
|
||||
// value at index j. It is part of the sort.Interface implementation.
|
||||
func (s *valuesSorter) Less(i, j int) bool {
|
||||
if s.strings == nil {
|
||||
return valueSortLess(s.values[i], s.values[j])
|
||||
}
|
||||
return s.strings[i] < s.strings[j]
|
||||
}
|
||||
|
||||
// sortValues is a sort function that handles both native types and any type that
|
||||
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||
// their Value.String() value to ensure display stability.
|
||||
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Sort(newValuesSorter(values, cs))
|
||||
}
|
||||
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
@@ -0,0 +1,306 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ConfigState houses the configuration options used by spew to format and
|
||||
// display values. There is a global instance, Config, that is used to control
|
||||
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||
// provides methods equivalent to the top-level functions.
|
||||
//
|
||||
// The zero value for ConfigState provides no indentation. You would typically
|
||||
// want to set it to a space or a tab.
|
||||
//
|
||||
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||
// with default settings. See the documentation of NewDefaultConfig for default
|
||||
// values.
|
||||
type ConfigState struct {
|
||||
// Indent specifies the string to use for each indentation level. The
|
||||
// global config instance that all top-level functions use set this to a
|
||||
// single space by default. If you would like more indentation, you might
|
||||
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||
Indent string
|
||||
|
||||
// MaxDepth controls the maximum number of levels to descend into nested
|
||||
// data structures. The default, 0, means there is no limit.
|
||||
//
|
||||
// NOTE: Circular data structures are properly detected, so it is not
|
||||
// necessary to set this value unless you specifically want to limit deeply
|
||||
// nested data structures.
|
||||
MaxDepth int
|
||||
|
||||
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||
// invoked for types that implement them.
|
||||
DisableMethods bool
|
||||
|
||||
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||
// error and Stringer interfaces on types which only accept a pointer
|
||||
// receiver when the current type is not a pointer.
|
||||
//
|
||||
// NOTE: This might be an unsafe action since calling one of these methods
|
||||
// with a pointer receiver could technically mutate the value, however,
|
||||
// in practice, types which choose to satisify an error or Stringer
|
||||
// interface with a pointer receiver should not be mutating their state
|
||||
// inside these interface methods. As a result, this option relies on
|
||||
// access to the unsafe package, so it will not have any effect when
|
||||
// running in environments without access to the unsafe package such as
|
||||
// Google App Engine or with the "safe" build tag specified.
|
||||
DisablePointerMethods bool
|
||||
|
||||
// DisablePointerAddresses specifies whether to disable the printing of
|
||||
// pointer addresses. This is useful when diffing data structures in tests.
|
||||
DisablePointerAddresses bool
|
||||
|
||||
// DisableCapacities specifies whether to disable the printing of capacities
|
||||
// for arrays, slices, maps and channels. This is useful when diffing
|
||||
// data structures in tests.
|
||||
DisableCapacities bool
|
||||
|
||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||
// a custom error or Stringer interface is invoked. The default, false,
|
||||
// means it will print the results of invoking the custom error or Stringer
|
||||
// interface and return immediately instead of continuing to recurse into
|
||||
// the internals of the data type.
|
||||
//
|
||||
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||
// via the DisableMethods or DisablePointerMethods options.
|
||||
ContinueOnMethod bool
|
||||
|
||||
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||
// this to have a more deterministic, diffable output. Note that only
|
||||
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||
// that support the error or Stringer interfaces (if methods are
|
||||
// enabled) are supported, with other types sorted according to the
|
||||
// reflect.Value.String() output which guarantees display stability.
|
||||
SortKeys bool
|
||||
|
||||
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||
// be spewed to strings and sorted by those strings. This is only
|
||||
// considered if SortKeys is true.
|
||||
SpewKeys bool
|
||||
}
|
||||
|
||||
// Config is the active configuration of the top-level functions.
|
||||
// The configuration can be changed by modifying the contents of spew.Config.
|
||||
var Config = ConfigState{Indent: " "}
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the formatted string as a value that satisfies error. See NewFormatter
|
||||
// for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
c.Printf, c.Println, or c.Printf.
|
||||
*/
|
||||
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(c, v)
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(c, w, a...)
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by modifying the public members
|
||||
of c. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func (c *ConfigState) Dump(a ...interface{}) {
|
||||
fdump(c, os.Stdout, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(c, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a spew Formatter interface using
|
||||
// the ConfigState associated with s.
|
||||
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = newFormatter(c, arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
|
||||
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||
//
|
||||
// Indent: " "
|
||||
// MaxDepth: 0
|
||||
// DisableMethods: false
|
||||
// DisablePointerMethods: false
|
||||
// ContinueOnMethod: false
|
||||
// SortKeys: false
|
||||
func NewDefaultConfig() *ConfigState {
|
||||
return &ConfigState{Indent: " "}
|
||||
}
|
||||
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging.
|
||||
|
||||
A quick overview of the additional features spew provides over the built-in
|
||||
printing facilities for Go data types are as follows:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output (only when using
|
||||
Dump style)
|
||||
|
||||
There are two different approaches spew allows for dumping Go data structures:
|
||||
|
||||
* Dump style which prints with newlines, customizable indentation,
|
||||
and additional debug information such as types and all pointer addresses
|
||||
used to indirect to the final value
|
||||
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||
similar to the default %v while providing the additional functionality
|
||||
outlined above and passing unsupported format verbs such as %x and %q
|
||||
along to fmt
|
||||
|
||||
Quick Start
|
||||
|
||||
This section demonstrates how to quickly get started with spew. See the
|
||||
sections below for further details on formatting and configuration options.
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||
%#+v (adds types and pointer addresses):
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available
|
||||
via the spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
The following configuration options are available:
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* DisablePointerAddresses
|
||||
DisablePointerAddresses specifies whether to disable the printing of
|
||||
pointer addresses. This is useful when diffing data structures in tests.
|
||||
|
||||
* DisableCapacities
|
||||
DisableCapacities specifies whether to disable the printing of
|
||||
capacities for arrays, slices, maps and channels. This is useful when
|
||||
diffing data structures in tests.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are
|
||||
supported with other types sorted according to the
|
||||
reflect.Value.String() output which guarantees display
|
||||
stability. Natural map order is used by default.
|
||||
|
||||
* SpewKeys
|
||||
Specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only
|
||||
considered if SortKeys is true.
|
||||
|
||||
Dump Usage
|
||||
|
||||
Simply call spew.Dump with a list of variables you want to dump:
|
||||
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
|
||||
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||
io.Writer. For example, to dump to standard error:
|
||||
|
||||
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||
|
||||
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Sample Dump Output
|
||||
|
||||
See the Dump example for details on the setup of the types and variables being
|
||||
shown here.
|
||||
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
(string) (len=3) "one": (bool) true
|
||||
}
|
||||
}
|
||||
|
||||
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||
command as shown.
|
||||
([]uint8) (len=32 cap=32) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
|
||||
Custom Formatter
|
||||
|
||||
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||
so that it integrates cleanly with standard fmt package printing functions. The
|
||||
formatter is useful for inline printing of smaller data types similar to the
|
||||
standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Custom Formatter Usage
|
||||
|
||||
The simplest way to make use of the spew custom formatter is to call one of the
|
||||
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||
functions have syntax you are most likely already familiar with:
|
||||
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Println(myVar, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
See the Index for the full list convenience functions.
|
||||
|
||||
Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
|
||||
See the Printf example for details on the setup of variables being shown
|
||||
here.
|
||||
|
||||
Errors
|
||||
|
||||
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||
detects them and handles them internally by printing the panic information
|
||||
inline with the output. Since spew is intended to provide deep pretty printing
|
||||
capabilities on structures, it intentionally does not return any errors.
|
||||
*/
|
||||
package spew
|
||||
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
@@ -0,0 +1,509 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||
// convert cgo types to uint8 slices for hexdumping.
|
||||
uint8Type = reflect.TypeOf(uint8(0))
|
||||
|
||||
// cCharRE is a regular expression that matches a cgo char.
|
||||
// It is used to detect character arrays to hexdump them.
|
||||
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||
|
||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||
// char. It is used to detect unsigned character arrays to hexdump
|
||||
// them.
|
||||
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||
|
||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||
// It is used to detect uint8_t arrays to hexdump them.
|
||||
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||
)
|
||||
|
||||
// dumpState contains information about the state of a dump operation.
|
||||
type dumpState struct {
|
||||
w io.Writer
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
ignoreNextIndent bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// indent performs indentation according to the depth level and cs.Indent
|
||||
// option.
|
||||
func (d *dumpState) indent() {
|
||||
if d.ignoreNextIndent {
|
||||
d.ignoreNextIndent = false
|
||||
return
|
||||
}
|
||||
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range d.pointers {
|
||||
if depth >= d.depth {
|
||||
delete(d.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by dereferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
d.pointers[addr] = d.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type information.
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
d.w.Write([]byte(ve.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
|
||||
// Display pointer information.
|
||||
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
d.w.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(d.w, addr)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
d.w.Write(openParenBytes)
|
||||
switch {
|
||||
case nilFound:
|
||||
d.w.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound:
|
||||
d.w.Write(circularBytes)
|
||||
|
||||
default:
|
||||
d.ignoreNextType = true
|
||||
d.dump(ve)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||
// Determine whether this type should be hex dumped or not. Also,
|
||||
// for types which should be hexdumped, try to use the underlying data
|
||||
// first, then fall back to trying to convert them to a uint8 slice.
|
||||
var buf []uint8
|
||||
doConvert := false
|
||||
doHexDump := false
|
||||
numEntries := v.Len()
|
||||
if numEntries > 0 {
|
||||
vt := v.Index(0).Type()
|
||||
vts := vt.String()
|
||||
switch {
|
||||
// C types that need to be converted.
|
||||
case cCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUnsignedCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUint8tCharRE.MatchString(vts):
|
||||
doConvert = true
|
||||
|
||||
// Try to use existing uint8 slices and fall back to converting
|
||||
// and copying if that fails.
|
||||
case vt.Kind() == reflect.Uint8:
|
||||
// We need an addressable interface to convert the type
|
||||
// to a byte slice. However, the reflect package won't
|
||||
// give us an interface on certain things like
|
||||
// unexported struct fields in order to enforce
|
||||
// visibility rules. We use unsafe, when available, to
|
||||
// bypass these restrictions since this package does not
|
||||
// mutate the values.
|
||||
vs := v
|
||||
if !vs.CanInterface() || !vs.CanAddr() {
|
||||
vs = unsafeReflectValue(vs)
|
||||
}
|
||||
if !UnsafeDisabled {
|
||||
vs = vs.Slice(0, numEntries)
|
||||
|
||||
// Use the existing uint8 slice if it can be
|
||||
// type asserted.
|
||||
iface := vs.Interface()
|
||||
if slice, ok := iface.([]uint8); ok {
|
||||
buf = slice
|
||||
doHexDump = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// The underlying data needs to be converted if it can't
|
||||
// be type asserted to a uint8 slice.
|
||||
doConvert = true
|
||||
}
|
||||
|
||||
// Copy and convert the underlying type if needed.
|
||||
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||
// Convert and copy each element into a uint8 byte
|
||||
// slice.
|
||||
buf = make([]uint8, numEntries)
|
||||
for i := 0; i < numEntries; i++ {
|
||||
vv := v.Index(i)
|
||||
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||
}
|
||||
doHexDump = true
|
||||
}
|
||||
}
|
||||
|
||||
// Hexdump the entire slice as needed.
|
||||
if doHexDump {
|
||||
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||
str := indent + hex.Dump(buf)
|
||||
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||
str = strings.TrimRight(str, d.cs.Indent)
|
||||
d.w.Write([]byte(str))
|
||||
return
|
||||
}
|
||||
|
||||
// Recursively call dump for each item.
|
||||
for i := 0; i < numEntries; i++ {
|
||||
d.dump(d.unpackValue(v.Index(i)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||
// value to figure out what kind of object we are dealing with and formats it
|
||||
// appropriately. It is a recursive function, however circular data structures
|
||||
// are detected and handled properly.
|
||||
func (d *dumpState) dump(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
d.w.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
d.indent()
|
||||
d.dumpPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !d.ignoreNextType {
|
||||
d.indent()
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write([]byte(v.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.ignoreNextType = false
|
||||
|
||||
// Display length and capacity if the built-in len and cap functions
|
||||
// work with the value's kind and the len/cap itself is non-zero.
|
||||
valueLen, valueCap := 0, 0
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||
valueLen, valueCap = v.Len(), v.Cap()
|
||||
case reflect.Map, reflect.String:
|
||||
valueLen = v.Len()
|
||||
}
|
||||
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
if valueLen != 0 {
|
||||
d.w.Write(lenEqualsBytes)
|
||||
printInt(d.w, int64(valueLen), 10)
|
||||
}
|
||||
if !d.cs.DisableCapacities && valueCap != 0 {
|
||||
if valueLen != 0 {
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.w.Write(capEqualsBytes)
|
||||
printInt(d.w, int64(valueCap), 10)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||
// is enabled
|
||||
if !d.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(d.w, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(d.w, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(d.w, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(d.w, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(d.w, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(d.w, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(d.w, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
d.dumpSlice(v)
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.String:
|
||||
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
keys := v.MapKeys()
|
||||
if d.cs.SortKeys {
|
||||
sortValues(keys, d.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
d.dump(d.unpackValue(key))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
numFields := v.NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
d.indent()
|
||||
vtf := vt.Field(i)
|
||||
d.w.Write([]byte(vtf.Name))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.Field(i)))
|
||||
if i < (numFields - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(d.w, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(d.w, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it in case any new
|
||||
// types are added.
|
||||
default:
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(d.w, "%v", v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fdump is a helper function to consolidate the logic from the various public
|
||||
// methods which take varying writers and config states.
|
||||
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||
for _, arg := range a {
|
||||
if arg == nil {
|
||||
w.Write(interfaceBytes)
|
||||
w.Write(spaceBytes)
|
||||
w.Write(nilAngleBytes)
|
||||
w.Write(newlineBytes)
|
||||
continue
|
||||
}
|
||||
|
||||
d := dumpState{w: w, cs: cs}
|
||||
d.pointers = make(map[uintptr]int)
|
||||
d.dump(reflect.ValueOf(arg))
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(&Config, w, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(&Config, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by an exported package global,
|
||||
spew.Config. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func Dump(a ...interface{}) {
|
||||
fdump(&Config, os.Stdout, a...)
|
||||
}
|
||||
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
@@ -0,0 +1,419 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||
const supportedFlags = "0-+# "
|
||||
|
||||
// formatState implements the fmt.Formatter interface and contains information
|
||||
// about the state of a formatting operation. The NewFormatter function can
|
||||
// be used to get a new Formatter which can be used directly as arguments
|
||||
// in standard fmt package printing calls.
|
||||
type formatState struct {
|
||||
value interface{}
|
||||
fs fmt.State
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// buildDefaultFormat recreates the original format string without precision
|
||||
// and width information to pass in to fmt.Sprintf in the case of an
|
||||
// unrecognized type. Unless new types are added to the language, this
|
||||
// function won't ever be called.
|
||||
func (f *formatState) buildDefaultFormat() (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteRune('v')
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// constructOrigFormat recreates the original format string including precision
|
||||
// and width information to pass along to the standard fmt package. This allows
|
||||
// automatic deferral of all format strings this package doesn't support.
|
||||
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
if width, ok := f.fs.Width(); ok {
|
||||
buf.WriteString(strconv.Itoa(width))
|
||||
}
|
||||
|
||||
if precision, ok := f.fs.Precision(); ok {
|
||||
buf.Write(precisionBytes)
|
||||
buf.WriteString(strconv.Itoa(precision))
|
||||
}
|
||||
|
||||
buf.WriteRune(verb)
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||
// ensures that types for values which have been unpacked from an interface
|
||||
// are displayed when the show types flag is also set.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface {
|
||||
f.ignoreNextType = false
|
||||
if !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (f *formatState) formatPtr(v reflect.Value) {
|
||||
// Display nil if top level pointer is nil.
|
||||
showTypes := f.fs.Flag('#')
|
||||
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range f.pointers {
|
||||
if depth >= f.depth {
|
||||
delete(f.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to possibly show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by derferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
f.pointers[addr] = f.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type or indirection level depending on flags.
|
||||
if showTypes && !f.ignoreNextType {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
f.fs.Write([]byte(ve.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
} else {
|
||||
if nilFound || cycleFound {
|
||||
indirects += strings.Count(ve.Type().String(), "*")
|
||||
}
|
||||
f.fs.Write(openAngleBytes)
|
||||
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||
f.fs.Write(closeAngleBytes)
|
||||
}
|
||||
|
||||
// Display pointer information depending on flags.
|
||||
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||
f.fs.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
f.fs.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(f.fs, addr)
|
||||
}
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
f.ignoreNextType = true
|
||||
f.format(ve)
|
||||
}
|
||||
}
|
||||
|
||||
// format is the main workhorse for providing the Formatter interface. It
|
||||
// uses the passed reflect value to figure out what kind of object we are
|
||||
// dealing with and formats it appropriately. It is a recursive function,
|
||||
// however circular data structures are detected and handled properly.
|
||||
func (f *formatState) format(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
f.fs.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
f.formatPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write([]byte(v.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
f.ignoreNextType = false
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods
|
||||
// flag is enabled.
|
||||
if !f.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(f.fs, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(f.fs, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(f.fs, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(f.fs, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(f.fs, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(f.fs, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(f.fs, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
f.fs.Write(openBracketBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
for i := 0; i < numEntries; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.Index(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBracketBytes)
|
||||
|
||||
case reflect.String:
|
||||
f.fs.Write([]byte(v.String()))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
f.fs.Write(openMapBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
keys := v.MapKeys()
|
||||
if f.cs.SortKeys {
|
||||
sortValues(keys, f.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(key))
|
||||
f.fs.Write(colonBytes)
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.MapIndex(key)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeMapBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
numFields := v.NumField()
|
||||
f.fs.Write(openBraceBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
for i := 0; i < numFields; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
vtf := vt.Field(i)
|
||||
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||
f.fs.Write([]byte(vtf.Name))
|
||||
f.fs.Write(colonBytes)
|
||||
}
|
||||
f.format(f.unpackValue(v.Field(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(f.fs, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it if any get added.
|
||||
default:
|
||||
format := f.buildDefaultFormat()
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(f.fs, format, v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(f.fs, format, v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||
// details.
|
||||
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||
f.fs = fs
|
||||
|
||||
// Use standard formatting for verbs that are not v.
|
||||
if verb != 'v' {
|
||||
format := f.constructOrigFormat(verb)
|
||||
fmt.Fprintf(fs, format, f.value)
|
||||
return
|
||||
}
|
||||
|
||||
if f.value == nil {
|
||||
if fs.Flag('#') {
|
||||
fs.Write(interfaceBytes)
|
||||
}
|
||||
fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
f.format(reflect.ValueOf(f.value))
|
||||
}
|
||||
|
||||
// newFormatter is a helper function to consolidate the logic from the various
|
||||
// public methods which take varying config states.
|
||||
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||
fs := &formatState{value: v, cs: cs}
|
||||
fs.pointers = make(map[uintptr]int)
|
||||
return fs
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
Printf, Println, or Fprintf.
|
||||
*/
|
||||
func NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(&Config, v)
|
||||
}
|
||||
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the formatted string as a value that satisfies error. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a default Formatter interface returned by NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a default spew Formatter interface.
|
||||
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = NewFormatter(arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
13
vendor/github.com/golang-migrate/migrate/v4/.dockerignore
generated
vendored
Normal file
13
vendor/github.com/golang-migrate/migrate/v4/.dockerignore
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# Project
|
||||
FAQ.md
|
||||
README.md
|
||||
LICENSE
|
||||
Makefile
|
||||
.gitignore
|
||||
.travis.yml
|
||||
CONTRIBUTING.md
|
||||
MIGRATIONS.md
|
||||
docker-deploy.sh
|
||||
|
||||
# Golang
|
||||
testing
|
||||
8
vendor/github.com/golang-migrate/migrate/v4/.gitignore
generated
vendored
Normal file
8
vendor/github.com/golang-migrate/migrate/v4/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
.DS_Store
|
||||
cli/build
|
||||
cli/cli
|
||||
cli/migrate
|
||||
.coverage
|
||||
.godoc.pid
|
||||
vendor/
|
||||
.vscode/
|
||||
27
vendor/github.com/golang-migrate/migrate/v4/.golangci.yml
generated
vendored
Normal file
27
vendor/github.com/golang-migrate/migrate/v4/.golangci.yml
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
deadline: 2m
|
||||
linters:
|
||||
enable:
|
||||
#- golint
|
||||
- interfacer
|
||||
- unconvert
|
||||
#- dupl
|
||||
- goconst
|
||||
- gofmt
|
||||
- misspell
|
||||
- maligned
|
||||
- unparam
|
||||
- nakedret
|
||||
- prealloc
|
||||
#- gosec
|
||||
linters-settings:
|
||||
misspell:
|
||||
locale: US
|
||||
issues:
|
||||
max-same: 0
|
||||
max-per-linter: 0
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
# gosec: Duplicated errcheck checks
|
||||
- G104
|
||||
128
vendor/github.com/golang-migrate/migrate/v4/.travis.yml
generated
vendored
Normal file
128
vendor/github.com/golang-migrate/migrate/v4/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
language: go
|
||||
sudo: required
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: master
|
||||
include:
|
||||
# Supported versions of Go: https://golang.org/dl/
|
||||
- go: "1.11.x"
|
||||
- go: "1.12.x"
|
||||
- go: master
|
||||
|
||||
go_import_path: github.com/golang-migrate/migrate
|
||||
|
||||
env:
|
||||
global:
|
||||
- GO111MODULE=on
|
||||
- MIGRATE_TEST_CONTAINER_BOOT_TIMEOUT=60
|
||||
- DOCKER_USERNAME=golangmigrate
|
||||
- secure: "oSOznzUrgr5h45qW4PONkREpisPAt40tnM+KFWtS/Ggu5UI2Ie0CmyYXWuBjbt7B97a4yN9Qzmn8FxJHJ7kk+ABOi3muhkxeIhr6esXbzHhX/Jhv0mj1xkzX7KoVN9oHBz3cOI/QeRyEAO68xjDHNE2kby4RTT9VBt6TQUakKVkqI5qkqLBTADepCjVC+9XhxVxUNyeWKU8ormaUfJBjoNVoDlwXekUPnJenfmfZqXxUInvBCfUyp7Pq+kurBORmg4yc6qOlRYuK67Xw+i5xpjbZouNlXPk0rq7pPy5zjhmZQ3kImoFPvNMeKViDcI6kSIJKtjdhms9/g/6MgXS9HlL5kFy8tYKbsyiHnHB1BsvaLAKXctbUZFDPstgMPADfnad2kZXPrNqIhfWKZrGRWidawCYJ1sKKwYxLMKrtA0umqgMoL90MmBOELhuGmvMV0cFJB+zo+K2YWjEiMGd8xRb5mC5aAy0ZcCehO46jGtpr217EJmMF8Ywr7cFqM2Shg5U2jev9qUpYiXwmPnJKDuoT2ZHuHmPgFIkYiWC5yeJnnmG5bed1sKBp93AFrJX+1Rx5oC4BpNegewmBZKpOSwls/D1uMAeQK3dPmQHLsT6o2VBLfeDGr+zY0R85ywwPZCv00vGol02zYoTqN7eFqr6Qhjr/qx5K1nnxJdFK3Ts="
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $GOPATH/pkg
|
||||
|
||||
before_install:
|
||||
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.16.0
|
||||
- echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}"
|
||||
|
||||
install:
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- golangci-lint run
|
||||
- make test COVERAGE_DIR=/tmp/coverage
|
||||
|
||||
after_success:
|
||||
- goveralls -service=travis-ci -coverprofile /tmp/coverage/combined.txt
|
||||
- make list-external-deps > dependency_tree.txt && cat dependency_tree.txt
|
||||
- make build-cli
|
||||
- gem install --no-document fpm
|
||||
- fpm -s dir -t deb -n migrate -v "$(git describe --tags 2>/dev/null | cut -c 2-)" --license MIT -m dhui@users.noreply.github.com --url https://github.com/golang-migrate/migrate --description='Database migrations' -a amd64 -p migrate.$(git describe --tags 2>/dev/null | cut -c 2-).deb --deb-no-default-config-files -f -C cli/build migrate.linux-amd64=/usr/local/bin/migrate
|
||||
|
||||
deploy:
|
||||
- provider: releases
|
||||
api_key:
|
||||
secure: hWH1HLPpzpfA8pXQ93T1qKQVFSpQp0as/JLQ7D91jHuJ8p+RxVeqblDrR6HQY/95R/nyiE9GJmvUolSuw5h449LSrGxPtVWhdh6EnkxlQHlen5XeMhVjRjFV0sE9qGe8v7uAkiTfRO61ktTWHrEAvw5qpyqnNISodmZS78XIasPODQbNlzwINhWhDTHIjXGb4FpizYaL3OGCanrxfR9fQyCaqKGGBjRq3Mfq8U6Yd4mApmsE+uJxgaZV8K5zBqpkSzQRWhcVGNL5DuLsU3gfSJOo7kZeA2G71SHffH577dBoqtCZ4VFv169CoUZehLWCb+7XKJZmHXVujCURATSySLGUOPc6EoLFAn3YtsCA04mS4bZVo5FZPWVwfhjmkhtDR4f6wscKp7r1HsFHSOgm59QfETQdrn4MnZ44H2Jd39axqndn5DvK9EcZVjPHynOPnueXP2u6mTuUgh2VyyWBCDO3CNo0fGlo7VJI69IkIWNSD87K9cHZWYMClyKZkUzS+PmRAhHRYbVd+9ZjKOmnU36kUHNDG/ft1D4ogsY+rhVtXB4lgWDM5adri+EIScYdYnB1/pQexLBigcJY9uE7nQTR0U6QgVNYvun7uRNs40E0c4voSfmPdFO0FlOD2y1oQhnaXfWLbu9nMcTcs4RFGrcC7NzkUN4/WjG8s285V6w=
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.12.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
file:
|
||||
- cli/build/migrate.linux-amd64.tar.gz
|
||||
- cli/build/migrate.darwin-amd64.tar.gz
|
||||
- cli/build/migrate.windows-amd64.exe.tar.gz
|
||||
- cli/build/sha256sum.txt
|
||||
- dependency_tree.txt
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: ubuntu/xenial
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.12.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: ubuntu/bionic
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.12.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: ubuntu/cosmic
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.12.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: debian/stretch
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.12.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: debian/buster
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.12.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: script
|
||||
script: ./docker-deploy.sh
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.12.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
24
vendor/github.com/golang-migrate/migrate/v4/CONTRIBUTING.md
generated
vendored
Normal file
24
vendor/github.com/golang-migrate/migrate/v4/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# Development, Testing and Contributing
|
||||
|
||||
1. Make sure you have a running Docker daemon
|
||||
(Install for [MacOS](https://docs.docker.com/docker-for-mac/))
|
||||
1. Use a version of Go that supports [modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) (e.g. Go 1.11+)
|
||||
1. Fork this repo and `git clone` somewhere to `$GOPATH/src/github.com/golang-migrate/migrate`
|
||||
* Ensure that [Go modules are enabled](https://golang.org/cmd/go/#hdr-Preliminary_module_support) (e.g. your repo path or the `GO111MODULE` environment variable are set correctly)
|
||||
1. Install [golangci-lint](https://github.com/golangci/golangci-lint#install)
|
||||
1. Run the linter: `golangci-lint run`
|
||||
1. Confirm tests are working: `make test-short`
|
||||
1. Write awesome code ...
|
||||
1. `make test` to run all tests against all database versions
|
||||
1. Push code and open Pull Request
|
||||
|
||||
Some more helpful commands:
|
||||
|
||||
* You can specify which database/ source tests to run:
|
||||
`make test-short SOURCE='file go_bindata' DATABASE='postgres cassandra'`
|
||||
* After `make test`, run `make html-coverage` which opens a shiny test coverage overview.
|
||||
* `make build-cli` builds the CLI in directory `cli/build/`.
|
||||
* `make list-external-deps` lists all external dependencies for each package
|
||||
* `make docs && make open-docs` opens godoc in your browser, `make kill-docs` kills the godoc server.
|
||||
Repeatedly call `make docs` to refresh the server.
|
||||
* Set the `DOCKER_API_VERSION` environment variable to the latest supported version if you get errors regarding the docker client API version being too new.
|
||||
23
vendor/github.com/golang-migrate/migrate/v4/Dockerfile
generated
vendored
Normal file
23
vendor/github.com/golang-migrate/migrate/v4/Dockerfile
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM golang:1.12-alpine3.9 AS downloader
|
||||
ARG VERSION
|
||||
|
||||
RUN apk add --no-cache git gcc musl-dev
|
||||
|
||||
WORKDIR /go/src/github.com/golang-migrate/migrate
|
||||
|
||||
COPY . ./
|
||||
|
||||
ENV GO111MODULE=on
|
||||
ENV DATABASES="postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb sqlserver"
|
||||
ENV SOURCES="file go_bindata github github_ee aws_s3 google_cloud_storage godoc_vfs gitlab"
|
||||
|
||||
RUN go build -a -o build/migrate.linux-386 -ldflags="-X main.Version=${VERSION}" -tags "$DATABASES $SOURCES" ./cmd/migrate
|
||||
|
||||
FROM alpine:3.9
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
COPY --from=downloader /go/src/github.com/golang-migrate/migrate/build/migrate.linux-386 /migrate
|
||||
|
||||
ENTRYPOINT ["/migrate"]
|
||||
CMD ["--help"]
|
||||
70
vendor/github.com/golang-migrate/migrate/v4/FAQ.md
generated
vendored
Normal file
70
vendor/github.com/golang-migrate/migrate/v4/FAQ.md
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
# FAQ
|
||||
|
||||
#### How is the code base structured?
|
||||
```
|
||||
/ package migrate (the heart of everything)
|
||||
/cli the CLI wrapper
|
||||
/database database driver and sub directories have the actual driver implementations
|
||||
/source source driver and sub directories have the actual driver implementations
|
||||
```
|
||||
|
||||
#### Why is there no `source/driver.go:Last()`?
|
||||
It's not needed. And unless the source has a "native" way to read a directory in reversed order,
|
||||
it might be expensive to do a full directory scan in order to get the last element.
|
||||
|
||||
#### What is a NilMigration? NilVersion?
|
||||
NilMigration defines a migration without a body. NilVersion is defined as const -1.
|
||||
|
||||
#### What is the difference between uint(version) and int(targetVersion)?
|
||||
version refers to an existing migration version coming from a source and therefor can never be negative.
|
||||
targetVersion can either be a version OR represent a NilVersion, which equals -1.
|
||||
|
||||
#### What's the difference between Next/Previous and Up/Down?
|
||||
```
|
||||
1_first_migration.up.extension next -> 2_second_migration.up.extension ...
|
||||
1_first_migration.down.extension <- previous 2_second_migration.down.extension ...
|
||||
```
|
||||
|
||||
#### Why two separate files (up and down) for a migration?
|
||||
It makes all of our lives easier. No new markup/syntax to learn for users
|
||||
and existing database utility tools continue to work as expected.
|
||||
|
||||
#### How many migrations can migrate handle?
|
||||
Whatever the maximum positive signed integer value is for your platform.
|
||||
For 32bit it would be 2,147,483,647 migrations. Migrate only keeps references to
|
||||
the currently run and pre-fetched migrations in memory. Please note that some
|
||||
source drivers need to do build a full "directory" tree first, which puts some
|
||||
heat on the memory consumption.
|
||||
|
||||
#### Are the table tests in migrate_test.go bloated?
|
||||
Yes and no. There are duplicate test cases for sure but they don't hurt here. In fact
|
||||
the tests are very visual now and might help new users understand expected behaviors quickly.
|
||||
Migrate from version x to y and y is the last migration? Just check out the test for
|
||||
that particular case and know what's going on instantly.
|
||||
|
||||
#### What is Docker being used for?
|
||||
Only for testing. See [testing/docker.go](testing/docker.go)
|
||||
|
||||
#### Why not just use docker-compose?
|
||||
It doesn't give us enough runtime control for testing. We want to be able to bring up containers fast
|
||||
and whenever we want, not just once at the beginning of all tests.
|
||||
|
||||
#### Can I maintain my driver in my own repository?
|
||||
Yes, technically thats possible. We want to encourage you to contribute your driver to this respository though.
|
||||
The driver's functionality is dictated by migrate's interfaces. That means there should really
|
||||
just be one driver for a database/ source. We want to prevent a future where several drivers doing the exact same thing,
|
||||
just implemented a bit differently, co-exist somewhere on Github. If users have to do research first to find the
|
||||
"best" available driver for a database in order to get started, we would have failed as an open source community.
|
||||
|
||||
#### Can I mix multiple sources during a batch of migrations?
|
||||
No.
|
||||
|
||||
#### What does "dirty" database mean?
|
||||
Before a migration runs, each database sets a dirty flag. Execution stops if a migration fails and the dirty state persists,
|
||||
which prevents attempts to run more migrations on top of a failed migration. You need to manually fix the error
|
||||
and then "force" the expected version.
|
||||
|
||||
#### What happens if two programs try and update the database at the same time?
|
||||
Database-specific locking features are used by *some* database drivers to prevent multiple instances of migrate from running migrations at the same time
|
||||
the same database at the same time. For example, the MySQL driver uses the `GET_LOCK` function, while the Postgres driver uses
|
||||
the `pg_advisory_lock` function.
|
||||
28
vendor/github.com/golang-migrate/migrate/v4/LICENSE
generated
vendored
Normal file
28
vendor/github.com/golang-migrate/migrate/v4/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Original Work
|
||||
Copyright (c) 2016 Matthias Kadenbach
|
||||
https://github.com/mattes/migrate
|
||||
|
||||
Modified Work
|
||||
Copyright (c) 2018 Dale Hui
|
||||
https://github.com/golang-migrate/migrate
|
||||
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
86
vendor/github.com/golang-migrate/migrate/v4/MIGRATIONS.md
generated
vendored
Normal file
86
vendor/github.com/golang-migrate/migrate/v4/MIGRATIONS.md
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
# Migrations
|
||||
|
||||
## Migration Filename Format
|
||||
|
||||
A single logical migration is represented as two separate migration files, one
|
||||
to migrate "up" to the specified version from the previous version, and a second
|
||||
to migrate back "down" to the previous version. These migrations can be provided
|
||||
by any one of the supported [migration sources](./README.md#migration-sources).
|
||||
|
||||
The ordering and direction of the migration files is determined by the filenames
|
||||
used for them. `migrate` expects the filenames of migrations to have the format:
|
||||
|
||||
{version}_{title}.up.{extension}
|
||||
{version}_{title}.down.{extension}
|
||||
|
||||
The `title` of each migration is unused, and is only for readability. Similarly,
|
||||
the `extension` of the migration files is not checked by the library, and should
|
||||
be an appropriate format for the database in use (`.sql` for SQL variants, for
|
||||
instance).
|
||||
|
||||
Versions of migrations may be represented as any 64 bit unsigned integer.
|
||||
All migrations are applied upward in order of increasing version number, and
|
||||
downward by decreasing version number.
|
||||
|
||||
Common versioning schemes include incrementing integers:
|
||||
|
||||
1_initialize_schema.down.sql
|
||||
1_initialize_schema.up.sql
|
||||
2_add_table.down.sql
|
||||
2_add_table.up.sql
|
||||
...
|
||||
|
||||
Or timestamps at an appropriate resolution:
|
||||
|
||||
1500360784_initialize_schema.down.sql
|
||||
1500360784_initialize_schema.up.sql
|
||||
1500445949_add_table.down.sql
|
||||
1500445949_add_table.up.sql
|
||||
...
|
||||
|
||||
But any scheme resulting in distinct, incrementing integers as versions is valid.
|
||||
|
||||
It is suggested that the version number of corresponding `up` and `down` migration
|
||||
files be equivalent for clarity, but they are allowed to differ so long as the
|
||||
relative ordering of the migrations is preserved.
|
||||
|
||||
The migration files are permitted to be "empty", in the event that a migration
|
||||
is a no-op or is irreversible. It is recommended to still include both migration
|
||||
files by making the whole migration file consist of a comment.
|
||||
If your database does not support comments, then deleting the migration file will also work.
|
||||
Note, an actual empty file (e.g. a 0 byte file) may cause issues with your database since migrate
|
||||
will attempt to run an empty query. In this case, deleting the migration file will also work.
|
||||
For the rational of this behavior see:
|
||||
[#244 (comment)](https://github.com/golang-migrate/migrate/issues/244#issuecomment-510758270)
|
||||
|
||||
## Migration Content Format
|
||||
|
||||
The format of the migration files themselves varies between database systems.
|
||||
Different databases have different semantics around schema changes and when and
|
||||
how they are allowed to occur
|
||||
(for instance, [if schema changes can occur within a transaction](https://wiki.postgresql.org/wiki/Transactional_DDL_in_PostgreSQL:_A_Competitive_Analysis)).
|
||||
|
||||
As such, the `migrate` library has little to no checking around the format of
|
||||
migration sources. The migration files are generally processed directly by the
|
||||
drivers as raw operations.
|
||||
|
||||
## Reversibility of Migrations
|
||||
|
||||
Best practice for writing schema migration is that all migrations should be
|
||||
reversible. It should in theory be possible for run migrations down and back up
|
||||
through any and all versions with the state being fully cleaned and recreated
|
||||
by doing so.
|
||||
|
||||
By adhering to this recommended practice, development and deployment of new code
|
||||
is cleaner and easier (cleaning database state for a new feature should be as
|
||||
easy as migrating down to a prior version, and back up to the latest).
|
||||
|
||||
As opposed to some other migration libraries, `migrate` represents up and down
|
||||
migrations as separate files. This prevents any non-standard file syntax from
|
||||
being introduced which may result in unintended behavior or errors, depending
|
||||
on what database is processing the file.
|
||||
|
||||
While it is technically possible for an up or down migration to exist on its own
|
||||
without an equivalently versioned counterpart, it is strongly recommended to
|
||||
always include a down migration which cleans up the state of the corresponding
|
||||
up migration.
|
||||
105
vendor/github.com/golang-migrate/migrate/v4/Makefile
generated
vendored
Normal file
105
vendor/github.com/golang-migrate/migrate/v4/Makefile
generated
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
SOURCE ?= file go_bindata github github_ee aws_s3 google_cloud_storage godoc_vfs gitlab
|
||||
DATABASE ?= postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb sqlserver
|
||||
VERSION ?= $(shell git describe --tags 2>/dev/null | cut -c 2-)
|
||||
TEST_FLAGS ?=
|
||||
REPO_OWNER ?= $(shell cd .. && basename "$$(pwd)")
|
||||
COVERAGE_DIR ?= .coverage
|
||||
|
||||
|
||||
build-cli: clean
|
||||
-mkdir ./cli/build
|
||||
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o ../../cli/build/migrate.linux-amd64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
|
||||
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -a -o ../../cli/build/migrate.darwin-amd64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
|
||||
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -a -o ../../cli/build/migrate.windows-amd64.exe -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
|
||||
cd ./cli/build && find . -name 'migrate*' | xargs -I{} tar czf {}.tar.gz {}
|
||||
cd ./cli/build && shasum -a 256 * > sha256sum.txt
|
||||
cat ./cli/build/sha256sum.txt
|
||||
|
||||
|
||||
clean:
|
||||
-rm -r ./cli/build
|
||||
|
||||
|
||||
test-short:
|
||||
make test-with-flags --ignore-errors TEST_FLAGS='-short'
|
||||
|
||||
|
||||
test:
|
||||
@-rm -r $(COVERAGE_DIR)
|
||||
@mkdir $(COVERAGE_DIR)
|
||||
make test-with-flags TEST_FLAGS='-v -race -covermode atomic -coverprofile $$(COVERAGE_DIR)/combined.txt -bench=. -benchmem -timeout 20m'
|
||||
|
||||
|
||||
test-with-flags:
|
||||
@echo SOURCE: $(SOURCE)
|
||||
@echo DATABASE: $(DATABASE)
|
||||
|
||||
@go test $(TEST_FLAGS) ./...
|
||||
|
||||
|
||||
kill-orphaned-docker-containers:
|
||||
docker rm -f $(shell docker ps -aq --filter label=migrate_test)
|
||||
|
||||
|
||||
html-coverage:
|
||||
go tool cover -html=$(COVERAGE_DIR)/combined.txt
|
||||
|
||||
|
||||
list-external-deps:
|
||||
$(call external_deps,'.')
|
||||
$(call external_deps,'./cli/...')
|
||||
$(call external_deps,'./testing/...')
|
||||
|
||||
$(foreach v, $(SOURCE), $(call external_deps,'./source/$(v)/...'))
|
||||
$(call external_deps,'./source/testing/...')
|
||||
$(call external_deps,'./source/stub/...')
|
||||
|
||||
$(foreach v, $(DATABASE), $(call external_deps,'./database/$(v)/...'))
|
||||
$(call external_deps,'./database/testing/...')
|
||||
$(call external_deps,'./database/stub/...')
|
||||
|
||||
|
||||
restore-import-paths:
|
||||
find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/$(REPO_OWNER)/migrate%\"github.com/mattes/migrate%g '{}' \;
|
||||
|
||||
|
||||
rewrite-import-paths:
|
||||
find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/mattes/migrate%\"github.com/$(REPO_OWNER)/migrate%g '{}' \;
|
||||
|
||||
|
||||
# example: fswatch -0 --exclude .godoc.pid --event Updated . | xargs -0 -n1 -I{} make docs
|
||||
docs:
|
||||
-make kill-docs
|
||||
nohup godoc -play -http=127.0.0.1:6064 </dev/null >/dev/null 2>&1 & echo $$! > .godoc.pid
|
||||
cat .godoc.pid
|
||||
|
||||
|
||||
kill-docs:
|
||||
@cat .godoc.pid
|
||||
kill -9 $$(cat .godoc.pid)
|
||||
rm .godoc.pid
|
||||
|
||||
|
||||
open-docs:
|
||||
open http://localhost:6064/pkg/github.com/$(REPO_OWNER)/migrate
|
||||
|
||||
|
||||
# example: make release V=0.0.0
|
||||
release:
|
||||
git tag v$(V)
|
||||
@read -p "Press enter to confirm and push to origin ..." && git push origin v$(V)
|
||||
|
||||
|
||||
define external_deps
|
||||
@echo '-- $(1)'; go list -f '{{join .Deps "\n"}}' $(1) | grep -v github.com/$(REPO_OWNER)/migrate | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}'
|
||||
|
||||
endef
|
||||
|
||||
|
||||
.PHONY: build-cli clean test-short test test-with-flags html-coverage \
|
||||
restore-import-paths rewrite-import-paths list-external-deps release \
|
||||
docs kill-docs open-docs kill-orphaned-docker-containers
|
||||
|
||||
SHELL = /bin/bash
|
||||
RAND = $(shell echo $$RANDOM)
|
||||
|
||||
170
vendor/github.com/golang-migrate/migrate/v4/README.md
generated
vendored
Normal file
170
vendor/github.com/golang-migrate/migrate/v4/README.md
generated
vendored
Normal file
@@ -0,0 +1,170 @@
|
||||
[](https://travis-ci.com/golang-migrate/migrate)
|
||||
[](https://godoc.org/github.com/golang-migrate/migrate)
|
||||
[](https://coveralls.io/github/golang-migrate/migrate?branch=master)
|
||||
[](https://packagecloud.io/golang-migrate/migrate?filter=debs)
|
||||
[](https://hub.docker.com/r/migrate/migrate/)
|
||||

|
||||
[](https://github.com/golang-migrate/migrate/releases)
|
||||
|
||||
# migrate
|
||||
|
||||
__Database migrations written in Go. Use as [CLI](#cli-usage) or import as [library](#use-in-your-go-project).__
|
||||
|
||||
* Migrate reads migrations from [sources](#migration-sources)
|
||||
and applies them in correct order to a [database](#databases).
|
||||
* Drivers are "dumb", migrate glues everything together and makes sure the logic is bulletproof.
|
||||
(Keeps the drivers lightweight, too.)
|
||||
* Database drivers don't assume things or try to correct user input. When in doubt, fail.
|
||||
|
||||
Forked from [mattes/migrate](https://github.com/mattes/migrate)
|
||||
|
||||
## Databases
|
||||
|
||||
Database drivers run migrations. [Add a new database?](database/driver.go)
|
||||
|
||||
* [PostgreSQL](database/postgres)
|
||||
* [Redshift](database/redshift)
|
||||
* [Ql](database/ql)
|
||||
* [Cassandra](database/cassandra)
|
||||
* [SQLite](database/sqlite3) ([todo #165](https://github.com/mattes/migrate/issues/165))
|
||||
* [MySQL/ MariaDB](database/mysql)
|
||||
* [Neo4j](database/neo4j) ([todo #167](https://github.com/mattes/migrate/issues/167))
|
||||
* [MongoDB](database/mongodb)
|
||||
* [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170))
|
||||
* [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171))
|
||||
* [Google Cloud Spanner](database/spanner)
|
||||
* [CockroachDB](database/cockroachdb)
|
||||
* [ClickHouse](database/clickhouse)
|
||||
* [Firebird](database/firebird) ([todo #49](https://github.com/golang-migrate/migrate/issues/49))
|
||||
* [MS SQL Server](database/sqlserver)
|
||||
|
||||
### Database URLs
|
||||
|
||||
Database connection strings are specified via URLs. The URL format is driver dependent but generally has the form: `dbdriver://username:password@host:port/dbname?option1=true&option2=false`
|
||||
|
||||
Any [reserved URL characters](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_reserved_characters) need to be escaped. Note, the `%` character also [needs to be escaped](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_the_percent_character)
|
||||
|
||||
Explicitly, the following characters need to be escaped:
|
||||
`!`, `#`, `$`, `%`, `&`, `'`, `(`, `)`, `*`, `+`, `,`, `/`, `:`, `;`, `=`, `?`, `@`, `[`, `]`
|
||||
|
||||
It's easiest to always run the URL parts of your DB connection URL (e.g. username, password, etc) through an URL encoder. See the example Python snippets below:
|
||||
|
||||
```bash
|
||||
$ python3 -c 'import urllib.parse; print(urllib.parse.quote(input("String to encode: "), ""))'
|
||||
String to encode: FAKEpassword!#$%&'()*+,/:;=?@[]
|
||||
FAKEpassword%21%23%24%25%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D
|
||||
$ python2 -c 'import urllib; print urllib.quote(raw_input("String to encode: "), "")'
|
||||
String to encode: FAKEpassword!#$%&'()*+,/:;=?@[]
|
||||
FAKEpassword%21%23%24%25%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D
|
||||
$
|
||||
```
|
||||
|
||||
## Migration Sources
|
||||
|
||||
Source drivers read migrations from local or remote sources. [Add a new source?](source/driver.go)
|
||||
|
||||
* [Filesystem](source/file) - read from filesystem
|
||||
* [Go-Bindata](source/go_bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata))
|
||||
* [Github](source/github) - read from remote Github repositories
|
||||
* [Github Enterprise](source/github_ee) - read from remote Github Enterprise repositories
|
||||
* [Gitlab](source/gitlab) - read from remote Gitlab repositories
|
||||
* [AWS S3](source/aws_s3) - read from Amazon Web Services S3
|
||||
* [Google Cloud Storage](source/google_cloud_storage) - read from Google Cloud Platform Storage
|
||||
|
||||
## CLI usage
|
||||
|
||||
* Simple wrapper around this library.
|
||||
* Handles ctrl+c (SIGINT) gracefully.
|
||||
* No config search paths, no config files, no magic ENV var injections.
|
||||
|
||||
__[CLI Documentation](cmd/migrate)__
|
||||
|
||||
### Basic usage
|
||||
|
||||
```bash
|
||||
$ migrate -source file://path/to/migrations -database postgres://localhost:5432/database up 2
|
||||
```
|
||||
|
||||
### Docker usage
|
||||
|
||||
```bash
|
||||
$ docker run -v {{ migration dir }}:/migrations --network host migrate/migrate
|
||||
-path=/migrations/ -database postgres://localhost:5432/database up 2
|
||||
```
|
||||
|
||||
## Use in your Go project
|
||||
|
||||
* API is stable and frozen for this release (v3 & v4).
|
||||
* Uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies.
|
||||
* To help prevent database corruptions, it supports graceful stops via `GracefulStop chan bool`.
|
||||
* Bring your own logger.
|
||||
* Uses `io.Reader` streams internally for low memory overhead.
|
||||
* Thread-safe and no goroutine leaks.
|
||||
|
||||
__[Go Documentation](https://godoc.org/github.com/golang-migrate/migrate)__
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
_ "github.com/golang-migrate/migrate/v4/database/postgres"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/github"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m, err := migrate.New(
|
||||
"github://mattes:personal-access-token@mattes/migrate_test",
|
||||
"postgres://localhost:5432/database?sslmode=enable")
|
||||
m.Steps(2)
|
||||
}
|
||||
```
|
||||
|
||||
Want to use an existing database client?
|
||||
|
||||
```go
|
||||
import (
|
||||
"database/sql"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"github.com/golang-migrate/migrate/v4/database/postgres"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
)
|
||||
|
||||
func main() {
|
||||
db, err := sql.Open("postgres", "postgres://localhost:5432/database?sslmode=enable")
|
||||
driver, err := postgres.WithInstance(db, &postgres.Config{})
|
||||
m, err := migrate.NewWithDatabaseInstance(
|
||||
"file:///migrations",
|
||||
"postgres", driver)
|
||||
m.Steps(2)
|
||||
}
|
||||
```
|
||||
|
||||
## Migration files
|
||||
|
||||
Each migration has an up and down migration. [Why?](FAQ.md#why-two-separate-files-up-and-down-for-a-migration)
|
||||
|
||||
```bash
|
||||
1481574547_create_users_table.up.sql
|
||||
1481574547_create_users_table.down.sql
|
||||
```
|
||||
|
||||
[Best practices: How to write migrations.](MIGRATIONS.md)
|
||||
|
||||
## Versions
|
||||
|
||||
Version | Supported? | Import | Notes
|
||||
--------|------------|--------|------
|
||||
**master** | :white_check_mark: | `import "github.com/golang-migrate/migrate/v4"` | New features and bug fixes arrive here first |
|
||||
**v4** | :white_check_mark: | `import "github.com/golang-migrate/migrate/v4"` | Used for stable releases |
|
||||
**v3** | :x: | `import "github.com/golang-migrate/migrate"` (with package manager) or `import "gopkg.in/golang-migrate/migrate.v3"` (not recommended) | **DO NOT USE** - No longer supported |
|
||||
|
||||
## Development and Contributing
|
||||
|
||||
Yes, please! [`Makefile`](Makefile) is your friend,
|
||||
read the [development guide](CONTRIBUTING.md).
|
||||
|
||||
Also have a look at the [FAQ](FAQ.md).
|
||||
|
||||
---
|
||||
|
||||
Looking for alternatives? [https://awesome-go.com/#database](https://awesome-go.com/#database).
|
||||
126
vendor/github.com/golang-migrate/migrate/v4/database/driver.go
generated
vendored
Normal file
126
vendor/github.com/golang-migrate/migrate/v4/database/driver.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
// Package database provides the Database interface.
|
||||
// All database drivers must implement this interface, register themselves,
|
||||
// optionally provide a `WithInstance` function and pass the tests
|
||||
// in package database/testing.
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
nurl "net/url"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrLocked = fmt.Errorf("can't acquire lock")
|
||||
)
|
||||
|
||||
const NilVersion int = -1
|
||||
|
||||
var driversMu sync.RWMutex
|
||||
var drivers = make(map[string]Driver)
|
||||
|
||||
// Driver is the interface every database driver must implement.
|
||||
//
|
||||
// How to implement a database driver?
|
||||
// 1. Implement this interface.
|
||||
// 2. Optionally, add a function named `WithInstance`.
|
||||
// This function should accept an existing DB instance and a Config{} struct
|
||||
// and return a driver instance.
|
||||
// 3. Add a test that calls database/testing.go:Test()
|
||||
// 4. Add own tests for Open(), WithInstance() (when provided) and Close().
|
||||
// All other functions are tested by tests in database/testing.
|
||||
// Saves you some time and makes sure all database drivers behave the same way.
|
||||
// 5. Call Register in init().
|
||||
// 6. Create a migrate/cli/build_<driver-name>.go file
|
||||
// 7. Add driver name in 'DATABASE' variable in Makefile
|
||||
//
|
||||
// Guidelines:
|
||||
// * Don't try to correct user input. Don't assume things.
|
||||
// When in doubt, return an error and explain the situation to the user.
|
||||
// * All configuration input must come from the URL string in func Open()
|
||||
// or the Config{} struct in WithInstance. Don't os.Getenv().
|
||||
type Driver interface {
|
||||
// Open returns a new driver instance configured with parameters
|
||||
// coming from the URL string. Migrate will call this function
|
||||
// only once per instance.
|
||||
Open(url string) (Driver, error)
|
||||
|
||||
// Close closes the underlying database instance managed by the driver.
|
||||
// Migrate will call this function only once per instance.
|
||||
Close() error
|
||||
|
||||
// Lock should acquire a database lock so that only one migration process
|
||||
// can run at a time. Migrate will call this function before Run is called.
|
||||
// If the implementation can't provide this functionality, return nil.
|
||||
// Return database.ErrLocked if database is already locked.
|
||||
Lock() error
|
||||
|
||||
// Unlock should release the lock. Migrate will call this function after
|
||||
// all migrations have been run.
|
||||
Unlock() error
|
||||
|
||||
// Run applies a migration to the database. migration is garantueed to be not nil.
|
||||
Run(migration io.Reader) error
|
||||
|
||||
// SetVersion saves version and dirty state.
|
||||
// Migrate will call this function before and after each call to Run.
|
||||
// version must be >= -1. -1 means NilVersion.
|
||||
SetVersion(version int, dirty bool) error
|
||||
|
||||
// Version returns the currently active version and if the database is dirty.
|
||||
// When no migration has been applied, it must return version -1.
|
||||
// Dirty means, a previous migration failed and user interaction is required.
|
||||
Version() (version int, dirty bool, err error)
|
||||
|
||||
// Drop deletes everything in the database.
|
||||
// Note that this is a breaking action, a new call to Open() is necessary to
|
||||
// ensure subsequent calls work as expected.
|
||||
Drop() error
|
||||
}
|
||||
|
||||
// Open returns a new driver instance.
|
||||
func Open(url string) (Driver, error) {
|
||||
u, err := nurl.Parse(url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to parse URL. Did you escape all reserved URL characters? "+
|
||||
"See: https://github.com/golang-migrate/migrate#database-urls Error: %v", err)
|
||||
}
|
||||
|
||||
if u.Scheme == "" {
|
||||
return nil, fmt.Errorf("database driver: invalid URL scheme")
|
||||
}
|
||||
|
||||
driversMu.RLock()
|
||||
d, ok := drivers[u.Scheme]
|
||||
driversMu.RUnlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("database driver: unknown driver %v (forgotten import?)", u.Scheme)
|
||||
}
|
||||
|
||||
return d.Open(url)
|
||||
}
|
||||
|
||||
// Register globally registers a driver.
|
||||
func Register(name string, driver Driver) {
|
||||
driversMu.Lock()
|
||||
defer driversMu.Unlock()
|
||||
if driver == nil {
|
||||
panic("Register driver is nil")
|
||||
}
|
||||
if _, dup := drivers[name]; dup {
|
||||
panic("Register called twice for driver " + name)
|
||||
}
|
||||
drivers[name] = driver
|
||||
}
|
||||
|
||||
// List lists the registered drivers
|
||||
func List() []string {
|
||||
driversMu.RLock()
|
||||
defer driversMu.RUnlock()
|
||||
names := make([]string, 0, len(drivers))
|
||||
for n := range drivers {
|
||||
names = append(names, n)
|
||||
}
|
||||
return names
|
||||
}
|
||||
27
vendor/github.com/golang-migrate/migrate/v4/database/error.go
generated
vendored
Normal file
27
vendor/github.com/golang-migrate/migrate/v4/database/error.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error should be used for errors involving queries ran against the database
|
||||
type Error struct {
|
||||
// Optional: the line number
|
||||
Line uint
|
||||
|
||||
// Query is a query excerpt
|
||||
Query []byte
|
||||
|
||||
// Err is a useful/helping error message for humans
|
||||
Err string
|
||||
|
||||
// OrigErr is the underlying error
|
||||
OrigErr error
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
if len(e.Err) == 0 {
|
||||
return fmt.Sprintf("%v in line %v: %s", e.OrigErr, e.Line, e.Query)
|
||||
}
|
||||
return fmt.Sprintf("%v in line %v: %s (details: %v)", e.Err, e.Line, e.Query, e.OrigErr)
|
||||
}
|
||||
19
vendor/github.com/golang-migrate/migrate/v4/database/util.go
generated
vendored
Normal file
19
vendor/github.com/golang-migrate/migrate/v4/database/util.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const advisoryLockIDSalt uint = 1486364155
|
||||
|
||||
// GenerateAdvisoryLockId inspired by rails migrations, see https://goo.gl/8o9bCT
|
||||
func GenerateAdvisoryLockId(databaseName string, additionalNames ...string) (string, error) { // nolint: golint
|
||||
if len(additionalNames) > 0 {
|
||||
databaseName = strings.Join(append(additionalNames, databaseName), "\x00")
|
||||
}
|
||||
sum := crc32.ChecksumIEEE([]byte(databaseName))
|
||||
sum = sum * uint32(advisoryLockIDSalt)
|
||||
return fmt.Sprint(sum), nil
|
||||
}
|
||||
5
vendor/github.com/golang-migrate/migrate/v4/docker-deploy.sh
generated
vendored
Normal file
5
vendor/github.com/golang-migrate/migrate/v4/docker-deploy.sh
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin && \
|
||||
docker build --build-arg VERSION="$TRAVIS_TAG" . -t migrate/migrate -t migrate/migrate:"$TRAVIS_TAG" && \
|
||||
docker push migrate/migrate:"$TRAVIS_TAG" && docker push migrate/migrate
|
||||
54
vendor/github.com/golang-migrate/migrate/v4/go.mod
generated
vendored
Normal file
54
vendor/github.com/golang-migrate/migrate/v4/go.mod
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
module github.com/golang-migrate/migrate/v4
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.37.4
|
||||
github.com/aws/aws-sdk-go v1.17.7
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
||||
github.com/cockroachdb/apd v1.1.0 // indirect
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c
|
||||
github.com/cznic/ql v1.2.0
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3
|
||||
github.com/dhui/dktest v0.3.0
|
||||
github.com/docker/docker v0.7.3-0.20190108045446-77df18c24acf
|
||||
github.com/fsouza/fake-gcs-server v1.7.0
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4
|
||||
github.com/gogo/protobuf v1.2.1 // indirect
|
||||
github.com/golang/protobuf v1.3.1 // indirect
|
||||
github.com/golang/snappy v0.0.1 // indirect
|
||||
github.com/google/go-github v17.0.0+incompatible
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/hashicorp/golang-lru v0.5.1 // indirect
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
|
||||
github.com/jackc/pgx v3.2.0+incompatible // indirect
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/kshvakov/clickhouse v1.3.5
|
||||
github.com/lib/pq v1.0.0
|
||||
github.com/mattn/go-sqlite3 v1.10.0
|
||||
github.com/mongodb/mongo-go-driver v0.3.0
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8
|
||||
github.com/pkg/errors v0.8.1 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
|
||||
github.com/sirupsen/logrus v1.4.1 // indirect
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51 // indirect
|
||||
github.com/xanzy/go-gitlab v0.15.0
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c // indirect
|
||||
github.com/xdg/stringprep v1.0.0 // indirect
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b // indirect
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 // indirect
|
||||
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a // indirect
|
||||
golang.org/x/sys v0.0.0-20190426135247-a129542de9ae // indirect
|
||||
golang.org/x/text v0.3.2 // indirect
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
|
||||
golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a
|
||||
google.golang.org/api v0.4.0
|
||||
google.golang.org/appengine v1.5.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb
|
||||
google.golang.org/grpc v1.20.1 // indirect
|
||||
)
|
||||
304
vendor/github.com/golang-migrate/migrate/v4/go.sum
generated
vendored
Normal file
304
vendor/github.com/golang-migrate/migrate/v4/go.sum
generated
vendored
Normal file
@@ -0,0 +1,304 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
|
||||
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q=
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/aws/aws-sdk-go v1.17.7 h1:/4+rDPe0W95KBmNGYCG+NUvdL8ssPYBMxL+aSCg6nIA=
|
||||
github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0=
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
|
||||
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07 h1:UHFGPvSxX4C4YBApSPvmUfL8tTvWLj2ryqvT9K4Jcuk=
|
||||
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8=
|
||||
github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f h1:7uSNgsgcarNk4oiN/nNkO0J7KAjlsF5Yv5Gf/tFdHas=
|
||||
github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg=
|
||||
github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4 h1:CVAqftqbj+exlab+8KJQrE+kNIVlQfJt58j4GxCMF1s=
|
||||
github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
|
||||
github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00 h1:FHpbUtp2K8X53/b4aFNj4my5n+i3x+CQCZWNuHWH/+E=
|
||||
github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4=
|
||||
github.com/cznic/lldb v1.1.0 h1:AIA+ham6TSJ+XkMe8imQ/g8KPzMUVWAwqUQQdtuMsHs=
|
||||
github.com/cznic/lldb v1.1.0/go.mod h1:FIZVUmYUVhPwRiPzL8nD/mpFcJ/G7SSXjjXYG4uRI3A=
|
||||
github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369 h1:XNT/Zf5l++1Pyg08/HV04ppB0gKxAqtZQBRYiYrUuYk=
|
||||
github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
|
||||
github.com/cznic/ql v1.2.0 h1:lcKp95ZtdF0XkWhGnVIXGF8dVD2X+ClS08tglKtf+ak=
|
||||
github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE=
|
||||
github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65 h1:hxuZop6tSoOi0sxFzoGGYdRqNrPubyaIf9KoBG9tPiE=
|
||||
github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
|
||||
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186 h1:0rkFMAbn5KBKNpJyHQ6Prb95vIKanmAe62KxsrN+sqA=
|
||||
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
|
||||
github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc h1:YKKpTb2BrXN2GYyGaygIdis1vXbE7SSAG9axGWIMClg=
|
||||
github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3 h1:tkum0XDgfR0jcVVXuTsYv/erY2NnEDqwRojbxR1rBYA=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
|
||||
github.com/dhui/dktest v0.3.0 h1:kwX5a7EkLcjo7VpsPQSYJcKGbXBXdjI9FGjuUj1jn6I=
|
||||
github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc=
|
||||
github.com/docker/distribution v2.7.0+incompatible h1:neUDAlf3wX6Ml4HdqTrbcOHXtfRN0TFIwt6YFL7N9RU=
|
||||
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190108045446-77df18c24acf h1:2v/98rHzs3v6X0AHtoCH9u+e56SdnpogB1Z2fFe1KqQ=
|
||||
github.com/docker/docker v0.7.3-0.20190108045446-77df18c24acf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 h1:aaQcKT9WumO6JEJcRyTqFVq4XUZiUcKR2/GI31TOcz8=
|
||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsouza/fake-gcs-server v1.7.0 h1:Un0BXUXrRWYSmYyC1Rqm2e2WJfTPyDy/HGMz31emTi8=
|
||||
github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4 h1:vF83LI8tAakwEwvWZtrIEx7pOySacl2TOxx6eXk4ePo=
|
||||
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.1 h1:Dw4jY2nghMMRsh1ol8dv1axHkDwMQK2DHerMNJsIpJU=
|
||||
github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
|
||||
github.com/jackc/pgx v3.2.0+incompatible h1:0Vihzu20St42/UDsvZGdNE6jak7oi/UOeMzwMPHkgFY=
|
||||
github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kshvakov/clickhouse v1.3.5 h1:PDTYk9VYgbjPAWry3AoDREeMgOVUFij6bh6IjlloHL0=
|
||||
github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE=
|
||||
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
|
||||
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mongodb/mongo-go-driver v0.3.0 h1:00tKWMrabkVU1e57/TTP4ZBIfhn/wmjlSiRnIM9d0T8=
|
||||
github.com/mongodb/mongo-go-driver v0.3.0/go.mod h1:NK/HWDIIZkaYsnYa0hmtP443T5ELr0KDecmIioVuuyU=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8 h1:P48LjvUQpTReR3TQRbxSeSBsMXzfK0uol7eRcr7VBYQ=
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51 h1:BP2bjP495BBPaBcS5rmqviTfrOkN5rO5ceKAMRZCRFc=
|
||||
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/xanzy/go-gitlab v0.15.0 h1:rWtwKTgEnXyNUGrOArN7yyc3THRkpYcKXIXia9abywQ=
|
||||
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b h1:7gd+rd8P3bqcn/96gOZa3F5dpJr/vEiDQYlNb/y2uNs=
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
|
||||
go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 h1:p/H982KKEjUnLJkM3tt/LemDnOc1GiZL5FCVlORJ5zo=
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6 h1:FP8hkuE6yUEaJnK7O2eTuejKWwW+Rhfj80dQ2JcKxCU=
|
||||
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190426135247-a129542de9ae h1:mQLHiymj/JXKnnjc62tb7nD5pZLs940/sXJu+Xp3DBA=
|
||||
golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a h1:jd4PGQGmrzmDZANUzIol3eClsCB/Jp5GmpGWMhi6hnY=
|
||||
golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.3.2 h1:iTp+3yyl/KOtxa/d1/JUE0GGSoR6FuW5udver22iwpw=
|
||||
google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0 h1:KKgc1aqhV8wDPbDzlDtpvyjZFY3vjz85FP7p4wcQUyI=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo=
|
||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
12
vendor/github.com/golang-migrate/migrate/v4/log.go
generated
vendored
Normal file
12
vendor/github.com/golang-migrate/migrate/v4/log.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
package migrate
|
||||
|
||||
// Logger is an interface so you can pass in your own
|
||||
// logging implementation.
|
||||
type Logger interface {
|
||||
|
||||
// Printf is like fmt.Printf
|
||||
Printf(format string, v ...interface{})
|
||||
|
||||
// Verbose should return true when verbose logging output is wanted
|
||||
Verbose() bool
|
||||
}
|
||||
977
vendor/github.com/golang-migrate/migrate/v4/migrate.go
generated
vendored
Normal file
977
vendor/github.com/golang-migrate/migrate/v4/migrate.go
generated
vendored
Normal file
@@ -0,0 +1,977 @@
|
||||
// Package migrate reads migrations from sources and runs them against databases.
|
||||
// Sources are defined by the `source.Driver` and databases by the `database.Driver`
|
||||
// interface. The driver interfaces are kept "dump", all migration logic is kept
|
||||
// in this package.
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4/database"
|
||||
"github.com/golang-migrate/migrate/v4/source"
|
||||
)
|
||||
|
||||
// DefaultPrefetchMigrations sets the number of migrations to pre-read
|
||||
// from the source. This is helpful if the source is remote, but has little
|
||||
// effect for a local source (i.e. file system).
|
||||
// Please note that this setting has a major impact on the memory usage,
|
||||
// since each pre-read migration is buffered in memory. See DefaultBufferSize.
|
||||
var DefaultPrefetchMigrations = uint(10)
|
||||
|
||||
// DefaultLockTimeout sets the max time a database driver has to acquire a lock.
|
||||
var DefaultLockTimeout = 15 * time.Second
|
||||
|
||||
var (
|
||||
ErrNoChange = errors.New("no change")
|
||||
ErrNilVersion = errors.New("no migration")
|
||||
ErrInvalidVersion = errors.New("version must be >= -1")
|
||||
ErrLocked = errors.New("database locked")
|
||||
ErrLockTimeout = errors.New("timeout: can't acquire database lock")
|
||||
)
|
||||
|
||||
// ErrShortLimit is an error returned when not enough migrations
|
||||
// can be returned by a source for a given limit.
|
||||
type ErrShortLimit struct {
|
||||
Short uint
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e ErrShortLimit) Error() string {
|
||||
return fmt.Sprintf("limit %v short", e.Short)
|
||||
}
|
||||
|
||||
type ErrDirty struct {
|
||||
Version int
|
||||
}
|
||||
|
||||
func (e ErrDirty) Error() string {
|
||||
return fmt.Sprintf("Dirty database version %v. Fix and force version.", e.Version)
|
||||
}
|
||||
|
||||
type Migrate struct {
|
||||
sourceName string
|
||||
sourceDrv source.Driver
|
||||
databaseName string
|
||||
databaseDrv database.Driver
|
||||
|
||||
// Log accepts a Logger interface
|
||||
Log Logger
|
||||
|
||||
// GracefulStop accepts `true` and will stop executing migrations
|
||||
// as soon as possible at a safe break point, so that the database
|
||||
// is not corrupted.
|
||||
GracefulStop chan bool
|
||||
isLockedMu *sync.Mutex
|
||||
|
||||
isGracefulStop bool
|
||||
isLocked bool
|
||||
|
||||
// PrefetchMigrations defaults to DefaultPrefetchMigrations,
|
||||
// but can be set per Migrate instance.
|
||||
PrefetchMigrations uint
|
||||
|
||||
// LockTimeout defaults to DefaultLockTimeout,
|
||||
// but can be set per Migrate instance.
|
||||
LockTimeout time.Duration
|
||||
}
|
||||
|
||||
// New returns a new Migrate instance from a source URL and a database URL.
|
||||
// The URL scheme is defined by each driver.
|
||||
func New(sourceURL, databaseURL string) (*Migrate, error) {
|
||||
m := newCommon()
|
||||
|
||||
sourceName, err := sourceSchemeFromURL(sourceURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.sourceName = sourceName
|
||||
|
||||
databaseName, err := databaseSchemeFromURL(databaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.databaseName = databaseName
|
||||
|
||||
sourceDrv, err := source.Open(sourceURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.sourceDrv = sourceDrv
|
||||
|
||||
databaseDrv, err := database.Open(databaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.databaseDrv = databaseDrv
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// NewWithDatabaseInstance returns a new Migrate instance from a source URL
|
||||
// and an existing database instance. The source URL scheme is defined by each driver.
|
||||
// Use any string that can serve as an identifier during logging as databaseName.
|
||||
// You are responsible for closing the underlying database client if necessary.
|
||||
func NewWithDatabaseInstance(sourceURL string, databaseName string, databaseInstance database.Driver) (*Migrate, error) {
|
||||
m := newCommon()
|
||||
|
||||
sourceName, err := schemeFromURL(sourceURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.sourceName = sourceName
|
||||
|
||||
m.databaseName = databaseName
|
||||
|
||||
sourceDrv, err := source.Open(sourceURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.sourceDrv = sourceDrv
|
||||
|
||||
m.databaseDrv = databaseInstance
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// NewWithSourceInstance returns a new Migrate instance from an existing source instance
|
||||
// and a database URL. The database URL scheme is defined by each driver.
|
||||
// Use any string that can serve as an identifier during logging as sourceName.
|
||||
// You are responsible for closing the underlying source client if necessary.
|
||||
func NewWithSourceInstance(sourceName string, sourceInstance source.Driver, databaseURL string) (*Migrate, error) {
|
||||
m := newCommon()
|
||||
|
||||
databaseName, err := schemeFromURL(databaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.databaseName = databaseName
|
||||
|
||||
m.sourceName = sourceName
|
||||
|
||||
databaseDrv, err := database.Open(databaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.databaseDrv = databaseDrv
|
||||
|
||||
m.sourceDrv = sourceInstance
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// NewWithInstance returns a new Migrate instance from an existing source and
|
||||
// database instance. Use any string that can serve as an identifier during logging
|
||||
// as sourceName and databaseName. You are responsible for closing down
|
||||
// the underlying source and database client if necessary.
|
||||
func NewWithInstance(sourceName string, sourceInstance source.Driver, databaseName string, databaseInstance database.Driver) (*Migrate, error) {
|
||||
m := newCommon()
|
||||
|
||||
m.sourceName = sourceName
|
||||
m.databaseName = databaseName
|
||||
|
||||
m.sourceDrv = sourceInstance
|
||||
m.databaseDrv = databaseInstance
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func newCommon() *Migrate {
|
||||
return &Migrate{
|
||||
GracefulStop: make(chan bool, 1),
|
||||
PrefetchMigrations: DefaultPrefetchMigrations,
|
||||
LockTimeout: DefaultLockTimeout,
|
||||
isLockedMu: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the source and the database.
|
||||
func (m *Migrate) Close() (source error, database error) {
|
||||
databaseSrvClose := make(chan error)
|
||||
sourceSrvClose := make(chan error)
|
||||
|
||||
m.logVerbosePrintf("Closing source and database\n")
|
||||
|
||||
go func() {
|
||||
databaseSrvClose <- m.databaseDrv.Close()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
sourceSrvClose <- m.sourceDrv.Close()
|
||||
}()
|
||||
|
||||
return <-sourceSrvClose, <-databaseSrvClose
|
||||
}
|
||||
|
||||
// Migrate looks at the currently active migration version,
|
||||
// then migrates either up or down to the specified version.
|
||||
func (m *Migrate) Migrate(version uint) error {
|
||||
if err := m.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
curVersion, dirty, err := m.databaseDrv.Version()
|
||||
if err != nil {
|
||||
return m.unlockErr(err)
|
||||
}
|
||||
|
||||
if dirty {
|
||||
return m.unlockErr(ErrDirty{curVersion})
|
||||
}
|
||||
|
||||
ret := make(chan interface{}, m.PrefetchMigrations)
|
||||
go m.read(curVersion, int(version), ret)
|
||||
|
||||
return m.unlockErr(m.runMigrations(ret))
|
||||
}
|
||||
|
||||
// Steps looks at the currently active migration version.
|
||||
// It will migrate up if n > 0, and down if n < 0.
|
||||
func (m *Migrate) Steps(n int) error {
|
||||
if n == 0 {
|
||||
return ErrNoChange
|
||||
}
|
||||
|
||||
if err := m.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
curVersion, dirty, err := m.databaseDrv.Version()
|
||||
if err != nil {
|
||||
return m.unlockErr(err)
|
||||
}
|
||||
|
||||
if dirty {
|
||||
return m.unlockErr(ErrDirty{curVersion})
|
||||
}
|
||||
|
||||
ret := make(chan interface{}, m.PrefetchMigrations)
|
||||
|
||||
if n > 0 {
|
||||
go m.readUp(curVersion, n, ret)
|
||||
} else {
|
||||
go m.readDown(curVersion, -n, ret)
|
||||
}
|
||||
|
||||
return m.unlockErr(m.runMigrations(ret))
|
||||
}
|
||||
|
||||
// Up looks at the currently active migration version
|
||||
// and will migrate all the way up (applying all up migrations).
|
||||
func (m *Migrate) Up() error {
|
||||
if err := m.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
curVersion, dirty, err := m.databaseDrv.Version()
|
||||
if err != nil {
|
||||
return m.unlockErr(err)
|
||||
}
|
||||
|
||||
if dirty {
|
||||
return m.unlockErr(ErrDirty{curVersion})
|
||||
}
|
||||
|
||||
ret := make(chan interface{}, m.PrefetchMigrations)
|
||||
|
||||
go m.readUp(curVersion, -1, ret)
|
||||
return m.unlockErr(m.runMigrations(ret))
|
||||
}
|
||||
|
||||
// Down looks at the currently active migration version
|
||||
// and will migrate all the way down (applying all down migrations).
|
||||
func (m *Migrate) Down() error {
|
||||
if err := m.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
curVersion, dirty, err := m.databaseDrv.Version()
|
||||
if err != nil {
|
||||
return m.unlockErr(err)
|
||||
}
|
||||
|
||||
if dirty {
|
||||
return m.unlockErr(ErrDirty{curVersion})
|
||||
}
|
||||
|
||||
ret := make(chan interface{}, m.PrefetchMigrations)
|
||||
go m.readDown(curVersion, -1, ret)
|
||||
return m.unlockErr(m.runMigrations(ret))
|
||||
}
|
||||
|
||||
// Drop deletes everything in the database.
|
||||
func (m *Migrate) Drop() error {
|
||||
if err := m.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.databaseDrv.Drop(); err != nil {
|
||||
return m.unlockErr(err)
|
||||
}
|
||||
return m.unlock()
|
||||
}
|
||||
|
||||
// Run runs any migration provided by you against the database.
|
||||
// It does not check any currently active version in database.
|
||||
// Usually you don't need this function at all. Use Migrate,
|
||||
// Steps, Up or Down instead.
|
||||
func (m *Migrate) Run(migration ...*Migration) error {
|
||||
if len(migration) == 0 {
|
||||
return ErrNoChange
|
||||
}
|
||||
|
||||
if err := m.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
curVersion, dirty, err := m.databaseDrv.Version()
|
||||
if err != nil {
|
||||
return m.unlockErr(err)
|
||||
}
|
||||
|
||||
if dirty {
|
||||
return m.unlockErr(ErrDirty{curVersion})
|
||||
}
|
||||
|
||||
ret := make(chan interface{}, m.PrefetchMigrations)
|
||||
|
||||
go func() {
|
||||
defer close(ret)
|
||||
for _, migr := range migration {
|
||||
if m.PrefetchMigrations > 0 && migr.Body != nil {
|
||||
m.logVerbosePrintf("Start buffering %v\n", migr.LogString())
|
||||
} else {
|
||||
m.logVerbosePrintf("Scheduled %v\n", migr.LogString())
|
||||
}
|
||||
|
||||
ret <- migr
|
||||
go func(migr *Migration) {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}(migr)
|
||||
}
|
||||
}()
|
||||
|
||||
return m.unlockErr(m.runMigrations(ret))
|
||||
}
|
||||
|
||||
// Force sets a migration version.
|
||||
// It does not check any currently active version in database.
|
||||
// It resets the dirty state to false.
|
||||
func (m *Migrate) Force(version int) error {
|
||||
if version < -1 {
|
||||
return ErrInvalidVersion
|
||||
}
|
||||
|
||||
if err := m.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.databaseDrv.SetVersion(version, false); err != nil {
|
||||
return m.unlockErr(err)
|
||||
}
|
||||
|
||||
return m.unlock()
|
||||
}
|
||||
|
||||
// Version returns the currently active migration version.
|
||||
// If no migration has been applied, yet, it will return ErrNilVersion.
|
||||
func (m *Migrate) Version() (version uint, dirty bool, err error) {
|
||||
v, d, err := m.databaseDrv.Version()
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
if v == database.NilVersion {
|
||||
return 0, false, ErrNilVersion
|
||||
}
|
||||
|
||||
return suint(v), d, nil
|
||||
}
|
||||
|
||||
// read reads either up or down migrations from source `from` to `to`.
|
||||
// Each migration is then written to the ret channel.
|
||||
// If an error occurs during reading, that error is written to the ret channel, too.
|
||||
// Once read is done reading it will close the ret channel.
|
||||
func (m *Migrate) read(from int, to int, ret chan<- interface{}) {
|
||||
defer close(ret)
|
||||
|
||||
// check if from version exists
|
||||
if from >= 0 {
|
||||
if err := m.versionExists(suint(from)); err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// check if to version exists
|
||||
if to >= 0 {
|
||||
if err := m.versionExists(suint(to)); err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// no change?
|
||||
if from == to {
|
||||
ret <- ErrNoChange
|
||||
return
|
||||
}
|
||||
|
||||
if from < to {
|
||||
// it's going up
|
||||
// apply first migration if from is nil version
|
||||
if from == -1 {
|
||||
firstVersion, err := m.sourceDrv.First()
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
migr, err := m.newMigration(firstVersion, int(firstVersion))
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
|
||||
from = int(firstVersion)
|
||||
}
|
||||
|
||||
// run until we reach target ...
|
||||
for from < to {
|
||||
if m.stop() {
|
||||
return
|
||||
}
|
||||
|
||||
next, err := m.sourceDrv.Next(suint(from))
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
migr, err := m.newMigration(next, int(next))
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
|
||||
from = int(next)
|
||||
}
|
||||
|
||||
} else {
|
||||
// it's going down
|
||||
// run until we reach target ...
|
||||
for from > to && from >= 0 {
|
||||
if m.stop() {
|
||||
return
|
||||
}
|
||||
|
||||
prev, err := m.sourceDrv.Prev(suint(from))
|
||||
if os.IsNotExist(err) && to == -1 {
|
||||
// apply nil migration
|
||||
migr, err := m.newMigration(suint(from), -1)
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
|
||||
return
|
||||
|
||||
} else if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
migr, err := m.newMigration(suint(from), int(prev))
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
|
||||
from = int(prev)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// readUp reads up migrations from `from` limitted by `limit`.
|
||||
// limit can be -1, implying no limit and reading until there are no more migrations.
|
||||
// Each migration is then written to the ret channel.
|
||||
// If an error occurs during reading, that error is written to the ret channel, too.
|
||||
// Once readUp is done reading it will close the ret channel.
|
||||
func (m *Migrate) readUp(from int, limit int, ret chan<- interface{}) {
|
||||
defer close(ret)
|
||||
|
||||
// check if from version exists
|
||||
if from >= 0 {
|
||||
if err := m.versionExists(suint(from)); err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
ret <- ErrNoChange
|
||||
return
|
||||
}
|
||||
|
||||
count := 0
|
||||
for count < limit || limit == -1 {
|
||||
if m.stop() {
|
||||
return
|
||||
}
|
||||
|
||||
// apply first migration if from is nil version
|
||||
if from == -1 {
|
||||
firstVersion, err := m.sourceDrv.First()
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
migr, err := m.newMigration(firstVersion, int(firstVersion))
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
from = int(firstVersion)
|
||||
count++
|
||||
continue
|
||||
}
|
||||
|
||||
// apply next migration
|
||||
next, err := m.sourceDrv.Next(suint(from))
|
||||
if os.IsNotExist(err) {
|
||||
// no limit, but no migrations applied?
|
||||
if limit == -1 && count == 0 {
|
||||
ret <- ErrNoChange
|
||||
return
|
||||
}
|
||||
|
||||
// no limit, reached end
|
||||
if limit == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
// reached end, and didn't apply any migrations
|
||||
if limit > 0 && count == 0 {
|
||||
ret <- os.ErrNotExist
|
||||
return
|
||||
}
|
||||
|
||||
// applied less migrations than limit?
|
||||
if count < limit {
|
||||
ret <- ErrShortLimit{suint(limit - count)}
|
||||
return
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
migr, err := m.newMigration(next, int(next))
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
from = int(next)
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
// readDown reads down migrations from `from` limitted by `limit`.
|
||||
// limit can be -1, implying no limit and reading until there are no more migrations.
|
||||
// Each migration is then written to the ret channel.
|
||||
// If an error occurs during reading, that error is written to the ret channel, too.
|
||||
// Once readDown is done reading it will close the ret channel.
|
||||
func (m *Migrate) readDown(from int, limit int, ret chan<- interface{}) {
|
||||
defer close(ret)
|
||||
|
||||
// check if from version exists
|
||||
if from >= 0 {
|
||||
if err := m.versionExists(suint(from)); err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
ret <- ErrNoChange
|
||||
return
|
||||
}
|
||||
|
||||
// no change if already at nil version
|
||||
if from == -1 && limit == -1 {
|
||||
ret <- ErrNoChange
|
||||
return
|
||||
}
|
||||
|
||||
// can't go over limit if already at nil version
|
||||
if from == -1 && limit > 0 {
|
||||
ret <- os.ErrNotExist
|
||||
return
|
||||
}
|
||||
|
||||
count := 0
|
||||
for count < limit || limit == -1 {
|
||||
if m.stop() {
|
||||
return
|
||||
}
|
||||
|
||||
prev, err := m.sourceDrv.Prev(suint(from))
|
||||
if os.IsNotExist(err) {
|
||||
// no limit or haven't reached limit, apply "first" migration
|
||||
if limit == -1 || limit-count > 0 {
|
||||
firstVersion, err := m.sourceDrv.First()
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
migr, err := m.newMigration(firstVersion, -1)
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
count++
|
||||
}
|
||||
|
||||
if count < limit {
|
||||
ret <- ErrShortLimit{suint(limit - count)}
|
||||
}
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
migr, err := m.newMigration(suint(from), int(prev))
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
from = int(prev)
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
// runMigrations reads *Migration and error from a channel. Any other type
|
||||
// sent on this channel will result in a panic. Each migration is then
|
||||
// proxied to the database driver and run against the database.
|
||||
// Before running a newly received migration it will check if it's supposed
|
||||
// to stop execution because it might have received a stop signal on the
|
||||
// GracefulStop channel.
|
||||
func (m *Migrate) runMigrations(ret <-chan interface{}) error {
|
||||
for r := range ret {
|
||||
|
||||
if m.stop() {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch r := r.(type) {
|
||||
case error:
|
||||
return r
|
||||
|
||||
case *Migration:
|
||||
migr := r
|
||||
|
||||
// set version with dirty state
|
||||
if err := m.databaseDrv.SetVersion(migr.TargetVersion, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if migr.Body != nil {
|
||||
m.logVerbosePrintf("Read and execute %v\n", migr.LogString())
|
||||
if err := m.databaseDrv.Run(migr.BufferedBody); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// set clean state
|
||||
if err := m.databaseDrv.SetVersion(migr.TargetVersion, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
endTime := time.Now()
|
||||
readTime := migr.FinishedReading.Sub(migr.StartedBuffering)
|
||||
runTime := endTime.Sub(migr.FinishedReading)
|
||||
|
||||
// log either verbose or normal
|
||||
if m.Log != nil {
|
||||
if m.Log.Verbose() {
|
||||
m.logPrintf("Finished %v (read %v, ran %v)\n", migr.LogString(), readTime, runTime)
|
||||
} else {
|
||||
m.logPrintf("%v (%v)\n", migr.LogString(), readTime+runTime)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown type: %T with value: %+v", r, r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// versionExists checks the source if either the up or down migration for
|
||||
// the specified migration version exists.
|
||||
func (m *Migrate) versionExists(version uint) (result error) {
|
||||
// try up migration first
|
||||
up, _, err := m.sourceDrv.ReadUp(version)
|
||||
if err == nil {
|
||||
defer func() {
|
||||
if errClose := up.Close(); errClose != nil {
|
||||
result = multierror.Append(result, errClose)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if os.IsExist(err) {
|
||||
return nil
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// then try down migration
|
||||
down, _, err := m.sourceDrv.ReadDown(version)
|
||||
if err == nil {
|
||||
defer func() {
|
||||
if errClose := down.Close(); errClose != nil {
|
||||
result = multierror.Append(result, errClose)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if os.IsExist(err) {
|
||||
return nil
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
// stop returns true if no more migrations should be run against the database
|
||||
// because a stop signal was received on the GracefulStop channel.
|
||||
// Calls are cheap and this function is not blocking.
|
||||
func (m *Migrate) stop() bool {
|
||||
if m.isGracefulStop {
|
||||
return true
|
||||
}
|
||||
|
||||
select {
|
||||
case <-m.GracefulStop:
|
||||
m.isGracefulStop = true
|
||||
return true
|
||||
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// newMigration is a helper func that returns a *Migration for the
|
||||
// specified version and targetVersion.
|
||||
func (m *Migrate) newMigration(version uint, targetVersion int) (*Migration, error) {
|
||||
var migr *Migration
|
||||
|
||||
if targetVersion >= int(version) {
|
||||
r, identifier, err := m.sourceDrv.ReadUp(version)
|
||||
if os.IsNotExist(err) {
|
||||
// create "empty" migration
|
||||
migr, err = NewMigration(nil, "", version, targetVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
|
||||
} else {
|
||||
// create migration from up source
|
||||
migr, err = NewMigration(r, identifier, version, targetVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
r, identifier, err := m.sourceDrv.ReadDown(version)
|
||||
if os.IsNotExist(err) {
|
||||
// create "empty" migration
|
||||
migr, err = NewMigration(nil, "", version, targetVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
|
||||
} else {
|
||||
// create migration from down source
|
||||
migr, err = NewMigration(r, identifier, version, targetVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if m.PrefetchMigrations > 0 && migr.Body != nil {
|
||||
m.logVerbosePrintf("Start buffering %v\n", migr.LogString())
|
||||
} else {
|
||||
m.logVerbosePrintf("Scheduled %v\n", migr.LogString())
|
||||
}
|
||||
|
||||
return migr, nil
|
||||
}
|
||||
|
||||
// lock is a thread safe helper function to lock the database.
|
||||
// It should be called as late as possible when running migrations.
|
||||
func (m *Migrate) lock() error {
|
||||
m.isLockedMu.Lock()
|
||||
defer m.isLockedMu.Unlock()
|
||||
|
||||
if m.isLocked {
|
||||
return ErrLocked
|
||||
}
|
||||
|
||||
// create done channel, used in the timeout goroutine
|
||||
done := make(chan bool, 1)
|
||||
defer func() {
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// use errchan to signal error back to this context
|
||||
errchan := make(chan error, 2)
|
||||
|
||||
// start timeout goroutine
|
||||
timeout := time.After(m.LockTimeout)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case <-timeout:
|
||||
errchan <- ErrLockTimeout
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// now try to acquire the lock
|
||||
go func() {
|
||||
if err := m.databaseDrv.Lock(); err != nil {
|
||||
errchan <- err
|
||||
} else {
|
||||
errchan <- nil
|
||||
}
|
||||
}()
|
||||
|
||||
// wait until we either receive ErrLockTimeout or error from Lock operation
|
||||
err := <-errchan
|
||||
if err == nil {
|
||||
m.isLocked = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// unlock is a thread safe helper function to unlock the database.
|
||||
// It should be called as early as possible when no more migrations are
|
||||
// expected to be executed.
|
||||
func (m *Migrate) unlock() error {
|
||||
m.isLockedMu.Lock()
|
||||
defer m.isLockedMu.Unlock()
|
||||
|
||||
if err := m.databaseDrv.Unlock(); err != nil {
|
||||
// BUG: Can potentially create a deadlock. Add a timeout.
|
||||
return err
|
||||
}
|
||||
|
||||
m.isLocked = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// unlockErr calls unlock and returns a combined error
|
||||
// if a prevErr is not nil.
|
||||
func (m *Migrate) unlockErr(prevErr error) error {
|
||||
if err := m.unlock(); err != nil {
|
||||
return multierror.Append(prevErr, err)
|
||||
}
|
||||
return prevErr
|
||||
}
|
||||
|
||||
// logPrintf writes to m.Log if not nil
|
||||
func (m *Migrate) logPrintf(format string, v ...interface{}) {
|
||||
if m.Log != nil {
|
||||
m.Log.Printf(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// logVerbosePrintf writes to m.Log if not nil. Use for verbose logging output.
|
||||
func (m *Migrate) logVerbosePrintf(format string, v ...interface{}) {
|
||||
if m.Log != nil && m.Log.Verbose() {
|
||||
m.Log.Printf(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// logErr writes error to m.Log if not nil
|
||||
func (m *Migrate) logErr(err error) {
|
||||
if m.Log != nil {
|
||||
m.Log.Printf("error: %v", err)
|
||||
}
|
||||
}
|
||||
160
vendor/github.com/golang-migrate/migrate/v4/migration.go
generated
vendored
Normal file
160
vendor/github.com/golang-migrate/migrate/v4/migration.go
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultBufferSize sets the in memory buffer size (in Bytes) for every
|
||||
// pre-read migration (see DefaultPrefetchMigrations).
|
||||
var DefaultBufferSize = uint(100000)
|
||||
|
||||
// Migration holds information about a migration.
|
||||
// It is initially created from data coming from the source and then
|
||||
// used when run against the database.
|
||||
type Migration struct {
|
||||
// Identifier can be any string to help identifying
|
||||
// the migration in the source.
|
||||
Identifier string
|
||||
|
||||
// Version is the version of this migration.
|
||||
Version uint
|
||||
|
||||
// TargetVersion is the migration version after this migration
|
||||
// has been applied to the database.
|
||||
// Can be -1, implying that this is a NilVersion.
|
||||
TargetVersion int
|
||||
|
||||
// Body holds an io.ReadCloser to the source.
|
||||
Body io.ReadCloser
|
||||
|
||||
// BufferedBody holds an buffered io.Reader to the underlying Body.
|
||||
BufferedBody io.Reader
|
||||
|
||||
// BufferSize defaults to DefaultBufferSize
|
||||
BufferSize uint
|
||||
|
||||
// bufferWriter holds an io.WriteCloser and pipes to BufferBody.
|
||||
// It's an *Closer for flow control.
|
||||
bufferWriter io.WriteCloser
|
||||
|
||||
// Scheduled is the time when the migration was scheduled/ queued.
|
||||
Scheduled time.Time
|
||||
|
||||
// StartedBuffering is the time when buffering of the migration source started.
|
||||
StartedBuffering time.Time
|
||||
|
||||
// FinishedBuffering is the time when buffering of the migration source finished.
|
||||
FinishedBuffering time.Time
|
||||
|
||||
// FinishedReading is the time when the migration source is fully read.
|
||||
FinishedReading time.Time
|
||||
|
||||
// BytesRead holds the number of Bytes read from the migration source.
|
||||
BytesRead int64
|
||||
}
|
||||
|
||||
// NewMigration returns a new Migration and sets the body, identifier,
|
||||
// version and targetVersion. Body can be nil, which turns this migration
|
||||
// into a "NilMigration". If no identifier is provided, it will default to "<empty>".
|
||||
// targetVersion can be -1, implying it is a NilVersion.
|
||||
//
|
||||
// What is a NilMigration?
|
||||
// Usually each migration version coming from source is expected to have an
|
||||
// Up and Down migration. This is not a hard requirement though, leading to
|
||||
// a situation where only the Up or Down migration is present. So let's say
|
||||
// the user wants to migrate up to a version that doesn't have the actual Up
|
||||
// migration, in that case we still want to apply the version, but with an empty
|
||||
// body. We are calling that a NilMigration, a migration with an empty body.
|
||||
//
|
||||
// What is a NilVersion?
|
||||
// NilVersion is a const(-1). When running down migrations and we are at the
|
||||
// last down migration, there is no next down migration, the targetVersion should
|
||||
// be nil. Nil in this case is represented by -1 (because type int).
|
||||
func NewMigration(body io.ReadCloser, identifier string,
|
||||
version uint, targetVersion int) (*Migration, error) {
|
||||
tnow := time.Now()
|
||||
m := &Migration{
|
||||
Identifier: identifier,
|
||||
Version: version,
|
||||
TargetVersion: targetVersion,
|
||||
Scheduled: tnow,
|
||||
}
|
||||
|
||||
if body == nil {
|
||||
if len(identifier) == 0 {
|
||||
m.Identifier = "<empty>"
|
||||
}
|
||||
|
||||
m.StartedBuffering = tnow
|
||||
m.FinishedBuffering = tnow
|
||||
m.FinishedReading = tnow
|
||||
return m, nil
|
||||
}
|
||||
|
||||
br, bw := io.Pipe()
|
||||
m.Body = body // want to simulate low latency? newSlowReader(body)
|
||||
m.BufferSize = DefaultBufferSize
|
||||
m.BufferedBody = br
|
||||
m.bufferWriter = bw
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// String implements string.Stringer and is used in tests.
|
||||
func (m *Migration) String() string {
|
||||
return fmt.Sprintf("%v [%v=>%v]", m.Identifier, m.Version, m.TargetVersion)
|
||||
}
|
||||
|
||||
// LogString returns a string describing this migration to humans.
|
||||
func (m *Migration) LogString() string {
|
||||
directionStr := "u"
|
||||
if m.TargetVersion < int(m.Version) {
|
||||
directionStr = "d"
|
||||
}
|
||||
return fmt.Sprintf("%v/%v %v", m.Version, directionStr, m.Identifier)
|
||||
}
|
||||
|
||||
// Buffer buffers Body up to BufferSize.
|
||||
// Calling this function blocks. Call with goroutine.
|
||||
func (m *Migration) Buffer() error {
|
||||
if m.Body == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
m.StartedBuffering = time.Now()
|
||||
|
||||
b := bufio.NewReaderSize(m.Body, int(m.BufferSize))
|
||||
|
||||
// start reading from body, peek won't move the read pointer though
|
||||
// poor man's solution?
|
||||
if _, err := b.Peek(int(m.BufferSize)); err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
m.FinishedBuffering = time.Now()
|
||||
|
||||
// write to bufferWriter, this will block until
|
||||
// something starts reading from m.Buffer
|
||||
n, err := b.WriteTo(m.bufferWriter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.FinishedReading = time.Now()
|
||||
m.BytesRead = n
|
||||
|
||||
// close bufferWriter so Buffer knows that there is no
|
||||
// more data coming
|
||||
if err := m.bufferWriter.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// it's safe to close the Body too
|
||||
if err := m.Body.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
118
vendor/github.com/golang-migrate/migrate/v4/source/driver.go
generated
vendored
Normal file
118
vendor/github.com/golang-migrate/migrate/v4/source/driver.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
// Package source provides the Source interface.
|
||||
// All source drivers must implement this interface, register themselves,
|
||||
// optionally provide a `WithInstance` function and pass the tests
|
||||
// in package source/testing.
|
||||
package source
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
nurl "net/url"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var driversMu sync.RWMutex
|
||||
var drivers = make(map[string]Driver)
|
||||
|
||||
// Driver is the interface every source driver must implement.
|
||||
//
|
||||
// How to implement a source driver?
|
||||
// 1. Implement this interface.
|
||||
// 2. Optionally, add a function named `WithInstance`.
|
||||
// This function should accept an existing source instance and a Config{} struct
|
||||
// and return a driver instance.
|
||||
// 3. Add a test that calls source/testing.go:Test()
|
||||
// 4. Add own tests for Open(), WithInstance() (when provided) and Close().
|
||||
// All other functions are tested by tests in source/testing.
|
||||
// Saves you some time and makes sure all source drivers behave the same way.
|
||||
// 5. Call Register in init().
|
||||
//
|
||||
// Guidelines:
|
||||
// * All configuration input must come from the URL string in func Open()
|
||||
// or the Config{} struct in WithInstance. Don't os.Getenv().
|
||||
// * Drivers are supposed to be read only.
|
||||
// * Ideally don't load any contents (into memory) in Open or WithInstance.
|
||||
type Driver interface {
|
||||
// Open returns a a new driver instance configured with parameters
|
||||
// coming from the URL string. Migrate will call this function
|
||||
// only once per instance.
|
||||
Open(url string) (Driver, error)
|
||||
|
||||
// Close closes the underlying source instance managed by the driver.
|
||||
// Migrate will call this function only once per instance.
|
||||
Close() error
|
||||
|
||||
// First returns the very first migration version available to the driver.
|
||||
// Migrate will call this function multiple times.
|
||||
// If there is no version available, it must return os.ErrNotExist.
|
||||
First() (version uint, err error)
|
||||
|
||||
// Prev returns the previous version for a given version available to the driver.
|
||||
// Migrate will call this function multiple times.
|
||||
// If there is no previous version available, it must return os.ErrNotExist.
|
||||
Prev(version uint) (prevVersion uint, err error)
|
||||
|
||||
// Next returns the next version for a given version available to the driver.
|
||||
// Migrate will call this function multiple times.
|
||||
// If there is no next version available, it must return os.ErrNotExist.
|
||||
Next(version uint) (nextVersion uint, err error)
|
||||
|
||||
// ReadUp returns the UP migration body and an identifier that helps
|
||||
// finding this migration in the source for a given version.
|
||||
// If there is no up migration available for this version,
|
||||
// it must return os.ErrNotExist.
|
||||
// Do not start reading, just return the ReadCloser!
|
||||
ReadUp(version uint) (r io.ReadCloser, identifier string, err error)
|
||||
|
||||
// ReadDown returns the DOWN migration body and an identifier that helps
|
||||
// finding this migration in the source for a given version.
|
||||
// If there is no down migration available for this version,
|
||||
// it must return os.ErrNotExist.
|
||||
// Do not start reading, just return the ReadCloser!
|
||||
ReadDown(version uint) (r io.ReadCloser, identifier string, err error)
|
||||
}
|
||||
|
||||
// Open returns a new driver instance.
|
||||
func Open(url string) (Driver, error) {
|
||||
u, err := nurl.Parse(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if u.Scheme == "" {
|
||||
return nil, fmt.Errorf("source driver: invalid URL scheme")
|
||||
}
|
||||
|
||||
driversMu.RLock()
|
||||
d, ok := drivers[u.Scheme]
|
||||
driversMu.RUnlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("source driver: unknown driver %v (forgotten import?)", u.Scheme)
|
||||
}
|
||||
|
||||
return d.Open(url)
|
||||
}
|
||||
|
||||
// Register globally registers a driver.
|
||||
func Register(name string, driver Driver) {
|
||||
driversMu.Lock()
|
||||
defer driversMu.Unlock()
|
||||
if driver == nil {
|
||||
panic("Register driver is nil")
|
||||
}
|
||||
if _, dup := drivers[name]; dup {
|
||||
panic("Register called twice for driver " + name)
|
||||
}
|
||||
drivers[name] = driver
|
||||
}
|
||||
|
||||
// List lists the registered drivers
|
||||
func List() []string {
|
||||
driversMu.RLock()
|
||||
defer driversMu.RUnlock()
|
||||
names := make([]string, 0, len(drivers))
|
||||
for n := range drivers {
|
||||
names = append(names, n)
|
||||
}
|
||||
return names
|
||||
}
|
||||
143
vendor/github.com/golang-migrate/migrate/v4/source/migration.go
generated
vendored
Normal file
143
vendor/github.com/golang-migrate/migrate/v4/source/migration.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
package source
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Direction is either up or down.
|
||||
type Direction string
|
||||
|
||||
const (
|
||||
Down Direction = "down"
|
||||
Up Direction = "up"
|
||||
)
|
||||
|
||||
// Migration is a helper struct for source drivers that need to
|
||||
// build the full directory tree in memory.
|
||||
// Migration is fully independent from migrate.Migration.
|
||||
type Migration struct {
|
||||
// Version is the version of this migration.
|
||||
Version uint
|
||||
|
||||
// Identifier can be any string that helps identifying
|
||||
// this migration in the source.
|
||||
Identifier string
|
||||
|
||||
// Direction is either Up or Down.
|
||||
Direction Direction
|
||||
|
||||
// Raw holds the raw location path to this migration in source.
|
||||
// ReadUp and ReadDown will use this.
|
||||
Raw string
|
||||
}
|
||||
|
||||
// Migrations wraps Migration and has an internal index
|
||||
// to keep track of Migration order.
|
||||
type Migrations struct {
|
||||
index uintSlice
|
||||
migrations map[uint]map[Direction]*Migration
|
||||
}
|
||||
|
||||
func NewMigrations() *Migrations {
|
||||
return &Migrations{
|
||||
index: make(uintSlice, 0),
|
||||
migrations: make(map[uint]map[Direction]*Migration),
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Migrations) Append(m *Migration) (ok bool) {
|
||||
if m == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if i.migrations[m.Version] == nil {
|
||||
i.migrations[m.Version] = make(map[Direction]*Migration)
|
||||
}
|
||||
|
||||
// reject duplicate versions
|
||||
if _, dup := i.migrations[m.Version][m.Direction]; dup {
|
||||
return false
|
||||
}
|
||||
|
||||
i.migrations[m.Version][m.Direction] = m
|
||||
i.buildIndex()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (i *Migrations) buildIndex() {
|
||||
i.index = make(uintSlice, 0)
|
||||
for version := range i.migrations {
|
||||
i.index = append(i.index, version)
|
||||
}
|
||||
sort.Sort(i.index)
|
||||
}
|
||||
|
||||
func (i *Migrations) First() (version uint, ok bool) {
|
||||
if len(i.index) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
return i.index[0], true
|
||||
}
|
||||
|
||||
func (i *Migrations) Prev(version uint) (prevVersion uint, ok bool) {
|
||||
pos := i.findPos(version)
|
||||
if pos >= 1 && len(i.index) > pos-1 {
|
||||
return i.index[pos-1], true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (i *Migrations) Next(version uint) (nextVersion uint, ok bool) {
|
||||
pos := i.findPos(version)
|
||||
if pos >= 0 && len(i.index) > pos+1 {
|
||||
return i.index[pos+1], true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (i *Migrations) Up(version uint) (m *Migration, ok bool) {
|
||||
if _, ok := i.migrations[version]; ok {
|
||||
if mx, ok := i.migrations[version][Up]; ok {
|
||||
return mx, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (i *Migrations) Down(version uint) (m *Migration, ok bool) {
|
||||
if _, ok := i.migrations[version]; ok {
|
||||
if mx, ok := i.migrations[version][Down]; ok {
|
||||
return mx, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (i *Migrations) findPos(version uint) int {
|
||||
if len(i.index) > 0 {
|
||||
ix := i.index.Search(version)
|
||||
if ix < len(i.index) && i.index[ix] == version {
|
||||
return ix
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
type uintSlice []uint
|
||||
|
||||
func (s uintSlice) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s uintSlice) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s uintSlice) Less(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
}
|
||||
|
||||
func (s uintSlice) Search(x uint) int {
|
||||
return sort.Search(len(s), func(i int) bool { return s[i] >= x })
|
||||
}
|
||||
39
vendor/github.com/golang-migrate/migrate/v4/source/parse.go
generated
vendored
Normal file
39
vendor/github.com/golang-migrate/migrate/v4/source/parse.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package source
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrParse = fmt.Errorf("no match")
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultParse = Parse
|
||||
DefaultRegex = Regex
|
||||
)
|
||||
|
||||
// Regex matches the following pattern:
|
||||
// 123_name.up.ext
|
||||
// 123_name.down.ext
|
||||
var Regex = regexp.MustCompile(`^([0-9]+)_(.*)\.(` + string(Down) + `|` + string(Up) + `)\.(.*)$`)
|
||||
|
||||
// Parse returns Migration for matching Regex pattern.
|
||||
func Parse(raw string) (*Migration, error) {
|
||||
m := Regex.FindStringSubmatch(raw)
|
||||
if len(m) == 5 {
|
||||
versionUint64, err := strconv.ParseUint(m[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Migration{
|
||||
Version: uint(versionUint64),
|
||||
Identifier: m[2],
|
||||
Direction: Direction(m[3]),
|
||||
Raw: raw,
|
||||
}, nil
|
||||
}
|
||||
return nil, ErrParse
|
||||
}
|
||||
99
vendor/github.com/golang-migrate/migrate/v4/util.go
generated
vendored
Normal file
99
vendor/github.com/golang-migrate/migrate/v4/util.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
nurl "net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MultiError holds multiple errors.
|
||||
//
|
||||
// Deprecated: Use github.com/hashicorp/go-multierror instead
|
||||
type MultiError struct {
|
||||
Errs []error
|
||||
}
|
||||
|
||||
// NewMultiError returns an error type holding multiple errors.
|
||||
//
|
||||
// Deprecated: Use github.com/hashicorp/go-multierror instead
|
||||
//
|
||||
func NewMultiError(errs ...error) MultiError {
|
||||
compactErrs := make([]error, 0)
|
||||
for _, e := range errs {
|
||||
if e != nil {
|
||||
compactErrs = append(compactErrs, e)
|
||||
}
|
||||
}
|
||||
return MultiError{compactErrs}
|
||||
}
|
||||
|
||||
// Error implements error. Multiple errors are concatenated with 'and's.
|
||||
func (m MultiError) Error() string {
|
||||
var strs = make([]string, 0)
|
||||
for _, e := range m.Errs {
|
||||
if len(e.Error()) > 0 {
|
||||
strs = append(strs, e.Error())
|
||||
}
|
||||
}
|
||||
return strings.Join(strs, " and ")
|
||||
}
|
||||
|
||||
// suint safely converts int to uint
|
||||
// see https://goo.gl/wEcqof
|
||||
// see https://goo.gl/pai7Dr
|
||||
func suint(n int) uint {
|
||||
if n < 0 {
|
||||
panic(fmt.Sprintf("suint(%v) expects input >= 0", n))
|
||||
}
|
||||
return uint(n)
|
||||
}
|
||||
|
||||
var errNoScheme = errors.New("no scheme")
|
||||
var errEmptyURL = errors.New("URL cannot be empty")
|
||||
|
||||
func sourceSchemeFromURL(url string) (string, error) {
|
||||
u, err := schemeFromURL(url)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("source: %v", err)
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func databaseSchemeFromURL(url string) (string, error) {
|
||||
u, err := schemeFromURL(url)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("database: %v", err)
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// schemeFromURL returns the scheme from a URL string
|
||||
func schemeFromURL(url string) (string, error) {
|
||||
if url == "" {
|
||||
return "", errEmptyURL
|
||||
}
|
||||
|
||||
u, err := nurl.Parse(url)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(u.Scheme) == 0 {
|
||||
return "", errNoScheme
|
||||
}
|
||||
|
||||
return u.Scheme, nil
|
||||
}
|
||||
|
||||
// FilterCustomQuery filters all query values starting with `x-`
|
||||
func FilterCustomQuery(u *nurl.URL) *nurl.URL {
|
||||
ux := *u
|
||||
vx := make(nurl.Values)
|
||||
for k, v := range ux.Query() {
|
||||
if len(k) <= 1 || (len(k) > 1 && k[0:2] != "x-") {
|
||||
vx[k] = v
|
||||
}
|
||||
}
|
||||
ux.RawQuery = vx.Encode()
|
||||
return &ux
|
||||
}
|
||||
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
Normal file
3
vendor/github.com/golang/protobuf/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
||||
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
Normal file
3
vendor/github.com/golang/protobuf/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
||||
28
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
Normal file
28
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
Copyright 2010 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
Normal file
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
Normal file
@@ -0,0 +1,253 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer deep copy and merge.
|
||||
// TODO: RawMessage.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clone returns a deep copy of a protocol buffer.
|
||||
func Clone(src Message) Message {
|
||||
in := reflect.ValueOf(src)
|
||||
if in.IsNil() {
|
||||
return src
|
||||
}
|
||||
out := reflect.New(in.Type().Elem())
|
||||
dst := out.Interface().(Message)
|
||||
Merge(dst, src)
|
||||
return dst
|
||||
}
|
||||
|
||||
// Merger is the interface representing objects that can merge messages of the same type.
|
||||
type Merger interface {
|
||||
// Merge merges src into this message.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
//
|
||||
// Merge may panic if called with a different argument type than the receiver.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// generatedMerger is the custom merge method that generated protos will have.
|
||||
// We must add this method since a generate Merge method will conflict with
|
||||
// many existing protos that have a Merge data field already defined.
|
||||
type generatedMerger interface {
|
||||
XXX_Merge(src Message)
|
||||
}
|
||||
|
||||
// Merge merges src into dst.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||
func Merge(dst, src Message) {
|
||||
if m, ok := dst.(Merger); ok {
|
||||
m.Merge(src)
|
||||
return
|
||||
}
|
||||
|
||||
in := reflect.ValueOf(src)
|
||||
out := reflect.ValueOf(dst)
|
||||
if out.IsNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if in.Type() != out.Type() {
|
||||
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
|
||||
}
|
||||
if in.IsNil() {
|
||||
return // Merge from nil src is a noop
|
||||
}
|
||||
if m, ok := dst.(generatedMerger); ok {
|
||||
m.XXX_Merge(src)
|
||||
return
|
||||
}
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
}
|
||||
|
||||
func mergeStruct(out, in reflect.Value) {
|
||||
sprop := GetProperties(in.Type())
|
||||
for i := 0; i < in.NumField(); i++ {
|
||||
f := in.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||
}
|
||||
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return
|
||||
}
|
||||
uin := uf.Bytes()
|
||||
if len(uin) > 0 {
|
||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
||||
}
|
||||
}
|
||||
|
||||
// mergeAny performs a merge between two values of the same type.
|
||||
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
||||
// prop is set if this is a struct field (it may be nil).
|
||||
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
|
||||
if in.Type() == protoMessageType {
|
||||
if !in.IsNil() {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
||||
} else {
|
||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
if !viaPtr && isProto3Zero(in) {
|
||||
return
|
||||
}
|
||||
out.Set(in)
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; copy non-nil values.
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
// Allocate destination if it is not set, or set to a different type.
|
||||
// Otherwise we will merge as normal.
|
||||
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
|
||||
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), false, nil)
|
||||
case reflect.Map:
|
||||
if in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(in.Type()))
|
||||
}
|
||||
// For maps with value types of *T or []byte we need to deep copy each value.
|
||||
elemKind := in.Type().Elem().Kind()
|
||||
for _, key := range in.MapKeys() {
|
||||
var val reflect.Value
|
||||
switch elemKind {
|
||||
case reflect.Ptr:
|
||||
val = reflect.New(in.Type().Elem().Elem())
|
||||
mergeAny(val, in.MapIndex(key), false, nil)
|
||||
case reflect.Slice:
|
||||
val = in.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
default:
|
||||
val = in.MapIndex(key)
|
||||
}
|
||||
out.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(in.Elem().Type()))
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), true, nil)
|
||||
case reflect.Slice:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// []byte is a scalar bytes field, not a repeated field.
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value, and should not
|
||||
// be merged.
|
||||
if prop != nil && prop.proto3 && in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Make a deep copy.
|
||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
||||
// with a nil result.
|
||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
||||
return
|
||||
}
|
||||
n := in.Len()
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
||||
}
|
||||
switch in.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(reflect.AppendSlice(out, in))
|
||||
default:
|
||||
for i := 0; i < n; i++ {
|
||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
||||
mergeAny(x, in.Index(i), false, nil)
|
||||
out.Set(reflect.Append(out, x))
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
mergeStruct(out, in)
|
||||
default:
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to copy %v", in)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeExtension(out, in map[int32]Extension) {
|
||||
for extNum, eIn := range in {
|
||||
eOut := Extension{desc: eIn.desc}
|
||||
if eIn.value != nil {
|
||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
||||
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
|
||||
eOut.value = v.Interface()
|
||||
}
|
||||
if eIn.enc != nil {
|
||||
eOut.enc = make([]byte, len(eIn.enc))
|
||||
copy(eOut.enc, eIn.enc)
|
||||
}
|
||||
|
||||
out[extNum] = eOut
|
||||
}
|
||||
}
|
||||
427
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
Normal file
427
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
Normal file
@@ -0,0 +1,427 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// errOverflow is returned when an integer is too large to be represented.
|
||||
var errOverflow = errors.New("proto: integer overflow")
|
||||
|
||||
// ErrInternalBadWireType is returned by generated code when an incorrect
|
||||
// wire type is encountered. It does not get returned to user code.
|
||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||
// It returns the integer and the number of bytes consumed, or
|
||||
// zero if there is not enough.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
}
|
||||
b := uint64(buf[n])
|
||||
n++
|
||||
x |= (b & 0x7F) << shift
|
||||
if (b & 0x80) == 0 {
|
||||
return x, n
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if i >= l {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
b := p.buf[i]
|
||||
i++
|
||||
x |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
p.index = i
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
err = errOverflow
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
i := p.index
|
||||
buf := p.buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
p.index++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return p.decodeVarintSlow()
|
||||
}
|
||||
|
||||
var b uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) - 0x80
|
||||
i++
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 7
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 7
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 14
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 14
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 21
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 21
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 28
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 28
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 35
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 35
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 42
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 42
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 49
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 49
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 56
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 56
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 63
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
p.index = i
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 8
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-8])
|
||||
x |= uint64(p.buf[i-7]) << 8
|
||||
x |= uint64(p.buf[i-6]) << 16
|
||||
x |= uint64(p.buf[i-5]) << 24
|
||||
x |= uint64(p.buf[i-4]) << 32
|
||||
x |= uint64(p.buf[i-3]) << 40
|
||||
x |= uint64(p.buf[i-2]) << 48
|
||||
x |= uint64(p.buf[i-1]) << 56
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 4
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-4])
|
||||
x |= uint64(p.buf[i-3]) << 8
|
||||
x |= uint64(p.buf[i-2]) << 16
|
||||
x |= uint64(p.buf[i-1]) << 24
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
||||
n, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nb := int(n)
|
||||
if nb < 0 {
|
||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
||||
}
|
||||
end := p.index + nb
|
||||
if end < p.index || end > len(p.buf) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if !alloc {
|
||||
// todo: check if can get more uses of alloc=false
|
||||
buf = p.buf[p.index:end]
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
buf = make([]byte, nb)
|
||||
copy(buf, p.buf[p.index:])
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||
buf, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The argument points to data that may be
|
||||
// overwritten, so implementations should not keep references to the
|
||||
// buffer.
|
||||
// Unmarshal implementations should not clear the receiver.
|
||||
// Any unmarshaled data should be merged into the receiver.
|
||||
// Callers of Unmarshal that do not want to retain existing data
|
||||
// should Reset the receiver before calling Unmarshal.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// newUnmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The semantics are identical to Unmarshaler.
|
||||
//
|
||||
// This exists to support protoc-gen-go generated messages.
|
||||
// The proto package will stop type-asserting to this interface in the future.
|
||||
//
|
||||
// DO NOT DEPEND ON THIS.
|
||||
type newUnmarshaler interface {
|
||||
XXX_Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||
// decoded result in pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// Unmarshal resets pb before starting to unmarshal, so any
|
||||
// existing data in pb is always removed. Use UnmarshalMerge
|
||||
// to preserve and append to existing data.
|
||||
func Unmarshal(buf []byte, pb Message) error {
|
||||
pb.Reset()
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// UnmarshalMerge merges into existing data in pb.
|
||||
// Most code should use Unmarshal instead.
|
||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeMessage reads a count-delimited message from the Buffer.
|
||||
func (p *Buffer) DecodeMessage(pb Message) error {
|
||||
enc, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return NewBuffer(enc).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||
// StartGroup tag is already consumed. This function consumes
|
||||
// EndGroup tag.
|
||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||
b := p.buf[p.index:]
|
||||
x, y := findEndGroup(b)
|
||||
if x < 0 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
err := Unmarshal(b[:x], pb)
|
||||
p.index += y
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in the
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
//
|
||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
err := u.XXX_Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
err := u.Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// Slow workaround for messages that aren't Unmarshalers.
|
||||
// This includes some hand-coded .pb.go files and
|
||||
// bootstrap protos.
|
||||
// TODO: fix all of those and then add Unmarshal to
|
||||
// the Message interface. Then:
|
||||
// The cast above and code below can be deleted.
|
||||
// The old unmarshaler can be deleted.
|
||||
// Clients can call Unmarshal directly (can already do that, actually).
|
||||
var info InternalMessageInfo
|
||||
err := info.Unmarshal(pb, p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
63
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
Normal file
63
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import "errors"
|
||||
|
||||
// Deprecated: do not use.
|
||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||
|
||||
// Deprecated: do not use.
|
||||
func GetStats() Stats { return Stats{} }
|
||||
|
||||
// Deprecated: do not use.
|
||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func RegisterMessageSetType(Message, int32, string) {}
|
||||
350
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
Normal file
350
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
Normal file
@@ -0,0 +1,350 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type generatedDiscarder interface {
|
||||
XXX_DiscardUnknown()
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields from this message
|
||||
// and all embedded messages.
|
||||
//
|
||||
// When unmarshaling a message with unrecognized fields, the tags and values
|
||||
// of such fields are preserved in the Message. This allows a later call to
|
||||
// marshal to be able to produce a message that continues to have those
|
||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||
// explicitly clear the unknown fields after unmarshaling.
|
||||
//
|
||||
// For proto2 messages, the unknown fields of message extensions are only
|
||||
// discarded from messages that have been accessed via GetExtension.
|
||||
func DiscardUnknown(m Message) {
|
||||
if m, ok := m.(generatedDiscarder); ok {
|
||||
m.XXX_DiscardUnknown()
|
||||
return
|
||||
}
|
||||
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
|
||||
// but the master branch has no implementation for InternalMessageInfo,
|
||||
// so it would be more work to replicate that approach.
|
||||
discardLegacy(m)
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields.
|
||||
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
di := atomicLoadDiscardInfo(&a.discard)
|
||||
if di == nil {
|
||||
di = getDiscardInfo(reflect.TypeOf(m).Elem())
|
||||
atomicStoreDiscardInfo(&a.discard, di)
|
||||
}
|
||||
di.discard(toPointer(&m))
|
||||
}
|
||||
|
||||
type discardInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []discardFieldInfo
|
||||
unrecognized field
|
||||
}
|
||||
|
||||
type discardFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
discard func(src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
discardInfoMap = map[reflect.Type]*discardInfo{}
|
||||
discardInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getDiscardInfo(t reflect.Type) *discardInfo {
|
||||
discardInfoLock.Lock()
|
||||
defer discardInfoLock.Unlock()
|
||||
di := discardInfoMap[t]
|
||||
if di == nil {
|
||||
di = &discardInfo{typ: t}
|
||||
discardInfoMap[t] = di
|
||||
}
|
||||
return di
|
||||
}
|
||||
|
||||
func (di *discardInfo) discard(src pointer) {
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&di.initialized) == 0 {
|
||||
di.computeDiscardInfo()
|
||||
}
|
||||
|
||||
for _, fi := range di.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
fi.discard(sfp)
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
|
||||
// Ignore lock since DiscardUnknown is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
DiscardUnknown(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if di.unrecognized.IsValid() {
|
||||
*src.offset(di.unrecognized).toBytes() = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (di *discardInfo) computeDiscardInfo() {
|
||||
di.lock.Lock()
|
||||
defer di.lock.Unlock()
|
||||
if di.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := di.typ
|
||||
n := t.NumField()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
dfi := discardFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
|
||||
dfi.discard = func(src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
DiscardUnknown(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dfi.discard = func(pointer) {} // Noop
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
dfi.discard = func(src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
DiscardUnknown(sv.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
di.fields = append(di.fields, dfi)
|
||||
}
|
||||
|
||||
di.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
di.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&di.initialized, 1)
|
||||
}
|
||||
|
||||
func discardLegacy(m Message) {
|
||||
v := reflect.ValueOf(m)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return
|
||||
}
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Struct {
|
||||
return
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
vf := v.Field(i)
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
discardLegacy(vf.Index(j).Interface().(Message))
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
tv := vf.Type().Elem()
|
||||
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
|
||||
for _, key := range vf.MapKeys() {
|
||||
val := vf.MapIndex(key)
|
||||
discardLegacy(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
|
||||
default: // E.g., test_proto.isCommunique_Union interface
|
||||
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
|
||||
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
|
||||
if !vf.IsNil() {
|
||||
vf = vf.Elem() // E.g., test_proto.Communique_Msg
|
||||
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
|
||||
if vf.Kind() == reflect.Ptr {
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
|
||||
if vf.Type() != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
vf.Set(reflect.ValueOf([]byte(nil)))
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(m); err == nil {
|
||||
// Ignore lock since discardLegacy is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
discardLegacy(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
203
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
Normal file
203
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
Normal file
@@ -0,0 +1,203 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
// errRepeatedHasNil is the error returned if Marshal is called with
|
||||
// a struct with a repeated field containing a nil element.
|
||||
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
|
||||
|
||||
// errOneofHasNil is the error returned if Marshal is called with
|
||||
// a struct with a oneof field containing a nil element.
|
||||
errOneofHasNil = errors.New("proto: oneof field has nil value")
|
||||
|
||||
// ErrNil is the error returned if Marshal is called with nil.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// ErrTooLarge is the error returned if Marshal is called with a
|
||||
// message that encodes to >2GB.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
)
|
||||
|
||||
// The fundamental encoders that put bytes on the wire.
|
||||
// Those that take integer types all accept uint64 and are
|
||||
// therefore of type valueEncoder.
|
||||
|
||||
const maxVarintBytes = 10 // maximum length of a varint
|
||||
|
||||
// EncodeVarint returns the varint encoding of x.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
// Not used by the package itself, but helpful to clients
|
||||
// wishing to use the same encoding.
|
||||
func EncodeVarint(x uint64) []byte {
|
||||
var buf [maxVarintBytes]byte
|
||||
var n int
|
||||
for n = 0; x > 127; n++ {
|
||||
buf[n] = 0x80 | uint8(x&0x7F)
|
||||
x >>= 7
|
||||
}
|
||||
buf[n] = uint8(x)
|
||||
n++
|
||||
return buf[0:n]
|
||||
}
|
||||
|
||||
// EncodeVarint writes a varint-encoded integer to the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) EncodeVarint(x uint64) error {
|
||||
for x >= 1<<7 {
|
||||
p.buf = append(p.buf, uint8(x&0x7f|0x80))
|
||||
x >>= 7
|
||||
}
|
||||
p.buf = append(p.buf, uint8(x))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SizeVarint returns the varint encoding size of an integer.
|
||||
func SizeVarint(x uint64) int {
|
||||
switch {
|
||||
case x < 1<<7:
|
||||
return 1
|
||||
case x < 1<<14:
|
||||
return 2
|
||||
case x < 1<<21:
|
||||
return 3
|
||||
case x < 1<<28:
|
||||
return 4
|
||||
case x < 1<<35:
|
||||
return 5
|
||||
case x < 1<<42:
|
||||
return 6
|
||||
case x < 1<<49:
|
||||
return 7
|
||||
case x < 1<<56:
|
||||
return 8
|
||||
case x < 1<<63:
|
||||
return 9
|
||||
}
|
||||
return 10
|
||||
}
|
||||
|
||||
// EncodeFixed64 writes a 64-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed64(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24),
|
||||
uint8(x>>32),
|
||||
uint8(x>>40),
|
||||
uint8(x>>48),
|
||||
uint8(x>>56))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed32 writes a 32-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed32(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag64(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag32(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) EncodeRawBytes(b []byte) error {
|
||||
p.EncodeVarint(uint64(len(b)))
|
||||
p.buf = append(p.buf, b...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes writes an encoded string to the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) EncodeStringBytes(s string) error {
|
||||
p.EncodeVarint(uint64(len(s)))
|
||||
p.buf = append(p.buf, s...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshaler is the interface representing objects that can marshal themselves.
|
||||
type Marshaler interface {
|
||||
Marshal() ([]byte, error)
|
||||
}
|
||||
|
||||
// EncodeMessage writes the protocol buffer to the Buffer,
|
||||
// prefixed by a varint-encoded length.
|
||||
func (p *Buffer) EncodeMessage(pb Message) error {
|
||||
siz := Size(pb)
|
||||
p.EncodeVarint(uint64(siz))
|
||||
return p.Marshal(pb)
|
||||
}
|
||||
|
||||
// All protocol buffer fields are nillable, but be careful.
|
||||
func isNil(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
||||
301
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
Normal file
301
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
Normal file
@@ -0,0 +1,301 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer comparison.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
Equal returns true iff protocol buffers a and b are equal.
|
||||
The arguments must both be pointers to protocol buffer structs.
|
||||
|
||||
Equality is defined in this way:
|
||||
- Two messages are equal iff they are the same type,
|
||||
corresponding fields are equal, unknown field sets
|
||||
are equal, and extensions sets are equal.
|
||||
- Two set scalar fields are equal iff their values are equal.
|
||||
If the fields are of a floating-point type, remember that
|
||||
NaN != x for all x, including NaN. If the message is defined
|
||||
in a proto3 .proto file, fields are not "set"; specifically,
|
||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal. Note a "bytes" field,
|
||||
although represented by []byte, is not a repeated field and the
|
||||
rule for the scalar fields described above applies.
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Two map fields are equal iff their lengths are the same,
|
||||
and they contain the same set of elements. Zero-length map
|
||||
fields are equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
*/
|
||||
func Equal(a, b Message) bool {
|
||||
if a == nil || b == nil {
|
||||
return a == b
|
||||
}
|
||||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
||||
if v1.Type() != v2.Type() {
|
||||
return false
|
||||
}
|
||||
if v1.Kind() == reflect.Ptr {
|
||||
if v1.IsNil() {
|
||||
return v2.IsNil()
|
||||
}
|
||||
if v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
v1, v2 = v1.Elem(), v2.Elem()
|
||||
}
|
||||
if v1.Kind() != reflect.Struct {
|
||||
return false
|
||||
}
|
||||
return equalStruct(v1, v2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalStruct(v1, v2 reflect.Value) bool {
|
||||
sprop := GetProperties(v1.Type())
|
||||
for i := 0; i < v1.NumField(); i++ {
|
||||
f := v1.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
f1, f2 := v1.Field(i), v2.Field(i)
|
||||
if f.Type.Kind() == reflect.Ptr {
|
||||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
||||
// both unset
|
||||
continue
|
||||
} else if n1 != n2 {
|
||||
// set/unset mismatch
|
||||
return false
|
||||
}
|
||||
f1, f2 = f1.Elem(), f2.Elem()
|
||||
}
|
||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_InternalExtensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_extensions")
|
||||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
uf := v1.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return true
|
||||
}
|
||||
|
||||
u1 := uf.Bytes()
|
||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||
return bytes.Equal(u1, u2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
// prop may be nil.
|
||||
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
||||
if v1.Type() == protoMessageType {
|
||||
m1, _ := v1.Interface().(Message)
|
||||
m2, _ := v2.Interface().(Message)
|
||||
return Equal(m1, m2)
|
||||
}
|
||||
switch v1.Kind() {
|
||||
case reflect.Bool:
|
||||
return v1.Bool() == v2.Bool()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v1.Float() == v2.Float()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v1.Int() == v2.Int()
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; compare the inner values.
|
||||
n1, n2 := v1.IsNil(), v2.IsNil()
|
||||
if n1 || n2 {
|
||||
return n1 == n2
|
||||
}
|
||||
e1, e2 := v1.Elem(), v2.Elem()
|
||||
if e1.Type() != e2.Type() {
|
||||
return false
|
||||
}
|
||||
return equalAny(e1, e2, nil)
|
||||
case reflect.Map:
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for _, key := range v1.MapKeys() {
|
||||
val2 := v2.MapIndex(key)
|
||||
if !val2.IsValid() {
|
||||
// This key was not found in the second map.
|
||||
return false
|
||||
}
|
||||
if !equalAny(v1.MapIndex(key), val2, nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Ptr:
|
||||
// Maps may have nil values in them, so check for nil.
|
||||
if v1.IsNil() && v2.IsNil() {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||
case reflect.Slice:
|
||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// short circuit: []byte
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value.
|
||||
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
||||
}
|
||||
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < v1.Len(); i++ {
|
||||
if !equalAny(v1.Index(i), v2.Index(i), prop) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.String:
|
||||
return v1.Interface().(string) == v2.Interface().(string)
|
||||
case reflect.Struct:
|
||||
return equalStruct(v1, v2)
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v1.Uint() == v2.Uint()
|
||||
}
|
||||
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to compare %v", v1)
|
||||
return false
|
||||
}
|
||||
|
||||
// base is the struct type that the extensions are based on.
|
||||
// x1 and x2 are InternalExtensions.
|
||||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
||||
em1, _ := x1.extensionsRead()
|
||||
em2, _ := x2.extensionsRead()
|
||||
return equalExtMap(base, em1, em2)
|
||||
}
|
||||
|
||||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
if len(em1) != len(em2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for extNum, e1 := range em1 {
|
||||
e2, ok := em2[extNum]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
m1 := extensionAsLegacyType(e1.value)
|
||||
m2 := extensionAsLegacyType(e2.value)
|
||||
|
||||
if m1 == nil && m2 == nil {
|
||||
// Both have only encoded form.
|
||||
if bytes.Equal(e1.enc, e2.enc) {
|
||||
continue
|
||||
}
|
||||
// The bytes are different, but the extensions might still be
|
||||
// equal. We need to decode them to compare.
|
||||
}
|
||||
|
||||
if m1 != nil && m2 != nil {
|
||||
// Both are unencoded.
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// At least one is encoded. To do a semantically correct comparison
|
||||
// we need to unmarshal them first.
|
||||
var desc *ExtensionDesc
|
||||
if m := extensionMaps[base]; m != nil {
|
||||
desc = m[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// If both have only encoded form and the bytes are the same,
|
||||
// it is handled above. We get here when the bytes are different.
|
||||
// We don't know how to decode it, so just compare them as byte
|
||||
// slices.
|
||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||
return false
|
||||
}
|
||||
var err error
|
||||
if m1 == nil {
|
||||
m1, err = decodeExtension(e1.enc, desc)
|
||||
}
|
||||
if m2 == nil && err == nil {
|
||||
m2, err = decodeExtension(e2.enc, desc)
|
||||
}
|
||||
if err != nil {
|
||||
// The encoded form is invalid.
|
||||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
||||
return false
|
||||
}
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
607
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
Normal file
607
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
Normal file
@@ -0,0 +1,607 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Types and routines for supporting protocol buffer extensions.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||
|
||||
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
||||
// Used in code generated by the protocol compiler.
|
||||
type ExtensionRange struct {
|
||||
Start, End int32 // both inclusive
|
||||
}
|
||||
|
||||
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
||||
// proto compiler that may be extended.
|
||||
type extendableProto interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
extensionsWrite() map[int32]Extension
|
||||
extensionsRead() (map[int32]Extension, sync.Locker)
|
||||
}
|
||||
|
||||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
||||
// version of the proto compiler that may be extended.
|
||||
type extendableProtoV1 interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
ExtensionMap() map[int32]Extension
|
||||
}
|
||||
|
||||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
||||
type extensionAdapter struct {
|
||||
extendableProtoV1
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
||||
return e.ExtensionMap()
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
return e.ExtensionMap(), notLocker{}
|
||||
}
|
||||
|
||||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
||||
type notLocker struct{}
|
||||
|
||||
func (n notLocker) Lock() {}
|
||||
func (n notLocker) Unlock() {}
|
||||
|
||||
// extendable returns the extendableProto interface for the given generated proto message.
|
||||
// If the proto message has the old extension format, it returns a wrapper that implements
|
||||
// the extendableProto interface.
|
||||
func extendable(p interface{}) (extendableProto, error) {
|
||||
switch p := p.(type) {
|
||||
case extendableProto:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return p, nil
|
||||
case extendableProtoV1:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return extensionAdapter{p}, nil
|
||||
}
|
||||
// Don't allocate a specific error containing %T:
|
||||
// this is the hot path for Clone and MarshalText.
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
||||
|
||||
func isNilPtr(x interface{}) bool {
|
||||
v := reflect.ValueOf(x)
|
||||
return v.Kind() == reflect.Ptr && v.IsNil()
|
||||
}
|
||||
|
||||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
||||
//
|
||||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
||||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
||||
//
|
||||
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
||||
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
||||
type XXX_InternalExtensions struct {
|
||||
// The struct must be indirect so that if a user inadvertently copies a
|
||||
// generated message and its embedded XXX_InternalExtensions, they
|
||||
// avoid the mayhem of a copied mutex.
|
||||
//
|
||||
// The mutex serializes all logically read-only operations to p.extensionMap.
|
||||
// It is up to the client to ensure that write operations to p.extensionMap are
|
||||
// mutually exclusive with other accesses.
|
||||
p *struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
}
|
||||
}
|
||||
|
||||
// extensionsWrite returns the extension map, creating it on first use.
|
||||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
||||
if e.p == nil {
|
||||
e.p = new(struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
})
|
||||
e.p.extensionMap = make(map[int32]Extension)
|
||||
}
|
||||
return e.p.extensionMap
|
||||
}
|
||||
|
||||
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
||||
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
||||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
if e.p == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return e.p.extensionMap, &e.p.mu
|
||||
}
|
||||
|
||||
// ExtensionDesc represents an extension specification.
|
||||
// Used in generated code from the protocol compiler.
|
||||
type ExtensionDesc struct {
|
||||
ExtendedType Message // nil pointer to the type that is being extended
|
||||
ExtensionType interface{} // nil pointer to the extension type
|
||||
Field int32 // field number
|
||||
Name string // fully-qualified name of extension, for text formatting
|
||||
Tag string // protobuf tag style
|
||||
Filename string // name of the file in which the extension is defined
|
||||
}
|
||||
|
||||
func (ed *ExtensionDesc) repeated() bool {
|
||||
t := reflect.TypeOf(ed.ExtensionType)
|
||||
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
|
||||
}
|
||||
|
||||
// Extension represents an extension in a message.
|
||||
type Extension struct {
|
||||
// When an extension is stored in a message using SetExtension
|
||||
// only desc and value are set. When the message is marshaled
|
||||
// enc will be set to the encoded form of the message.
|
||||
//
|
||||
// When a message is unmarshaled and contains extensions, each
|
||||
// extension will have only enc set. When such an extension is
|
||||
// accessed using GetExtension (or GetExtensions) desc and value
|
||||
// will be set.
|
||||
desc *ExtensionDesc
|
||||
|
||||
// value is a concrete value for the extension field. Let the type of
|
||||
// desc.ExtensionType be the "API type" and the type of Extension.value
|
||||
// be the "storage type". The API type and storage type are the same except:
|
||||
// * For scalars (except []byte), the API type uses *T,
|
||||
// while the storage type uses T.
|
||||
// * For repeated fields, the API type uses []T, while the storage type
|
||||
// uses *[]T.
|
||||
//
|
||||
// The reason for the divergence is so that the storage type more naturally
|
||||
// matches what is expected of when retrieving the values through the
|
||||
// protobuf reflection APIs.
|
||||
//
|
||||
// The value may only be populated if desc is also populated.
|
||||
value interface{}
|
||||
|
||||
// enc is the raw bytes for the extension field.
|
||||
enc []byte
|
||||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
func SetRawExtension(base Message, id int32, b []byte) {
|
||||
epb, err := extendable(base)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[id] = Extension{enc: b}
|
||||
}
|
||||
|
||||
// isExtensionField returns true iff the given field number is in an extension range.
|
||||
func isExtensionField(pb extendableProto, field int32) bool {
|
||||
for _, er := range pb.ExtensionRangeArray() {
|
||||
if er.Start <= field && field <= er.End {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||
var pbi interface{} = pb
|
||||
// Check the extended type.
|
||||
if ea, ok := pbi.(extensionAdapter); ok {
|
||||
pbi = ea.extendableProtoV1
|
||||
}
|
||||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
|
||||
}
|
||||
// Check the range.
|
||||
if !isExtensionField(pb, extension.Field) {
|
||||
return errors.New("proto: bad extension number; not in declared ranges")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extPropKey is sufficient to uniquely identify an extension.
|
||||
type extPropKey struct {
|
||||
base reflect.Type
|
||||
field int32
|
||||
}
|
||||
|
||||
var extProp = struct {
|
||||
sync.RWMutex
|
||||
m map[extPropKey]*Properties
|
||||
}{
|
||||
m: make(map[extPropKey]*Properties),
|
||||
}
|
||||
|
||||
func extensionProperties(ed *ExtensionDesc) *Properties {
|
||||
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
|
||||
|
||||
extProp.RLock()
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
extProp.RUnlock()
|
||||
return prop
|
||||
}
|
||||
extProp.RUnlock()
|
||||
|
||||
extProp.Lock()
|
||||
defer extProp.Unlock()
|
||||
// Check again.
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(Properties)
|
||||
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
|
||||
extProp.m[key] = prop
|
||||
return prop
|
||||
}
|
||||
|
||||
// HasExtension returns whether the given extension is present in pb.
|
||||
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
extmap, mu := epb.extensionsRead()
|
||||
if extmap == nil {
|
||||
return false
|
||||
}
|
||||
mu.Lock()
|
||||
_, ok := extmap[extension.Field]
|
||||
mu.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
extmap := epb.extensionsWrite()
|
||||
delete(extmap, extension.Field)
|
||||
}
|
||||
|
||||
// GetExtension retrieves a proto2 extended field from pb.
|
||||
//
|
||||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
||||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
||||
// If the field is not present, then the default value is returned (if one is specified),
|
||||
// otherwise ErrMissingExtension is reported.
|
||||
//
|
||||
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||
// then GetExtension returns the raw encoded bytes of the field extension.
|
||||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if extension.ExtendedType != nil {
|
||||
// can only check type if this is a complete descriptor
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
e, ok := emap[extension.Field]
|
||||
if !ok {
|
||||
// defaultExtensionValue returns the default value or
|
||||
// ErrMissingExtension if there is no default.
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor
|
||||
return e.enc, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = extensionAsStorageType(v)
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
// defaultExtensionValue returns the default value for extension.
|
||||
// If no default for an extension is defined ErrMissingExtension is returned.
|
||||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor, so no default
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
props := extensionProperties(extension)
|
||||
|
||||
sf, _, err := fieldDefault(t, props)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sf == nil || sf.value == nil {
|
||||
// There is no default value.
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Ptr {
|
||||
// We do not need to return a Ptr, we can directly return sf.value.
|
||||
return sf.value, nil
|
||||
}
|
||||
|
||||
// We need to return an interface{} that is a pointer to sf.value.
|
||||
value := reflect.New(t).Elem()
|
||||
value.Set(reflect.New(value.Type().Elem()))
|
||||
if sf.kind == reflect.Int32 {
|
||||
// We may have an int32 or an enum, but the underlying data is int32.
|
||||
// Since we can't set an int32 into a non int32 reflect.value directly
|
||||
// set it as a int32.
|
||||
value.Elem().SetInt(int64(sf.value.(int32)))
|
||||
} else {
|
||||
value.Elem().Set(reflect.ValueOf(sf.value))
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// decodeExtension decodes an extension encoded in b.
|
||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
unmarshal := typeUnmarshaler(t, extension.Tag)
|
||||
|
||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||
// Allocate space to store the pointer/slice.
|
||||
value := reflect.New(t).Elem()
|
||||
|
||||
var err error
|
||||
for {
|
||||
x, n := decodeVarint(b)
|
||||
if n == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
b = b[n:]
|
||||
wire := int(x) & 7
|
||||
|
||||
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extensions = make([]interface{}, len(es))
|
||||
for i, e := range es {
|
||||
extensions[i], err = GetExtension(epb, e)
|
||||
if err == ErrMissingExtension {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
||||
// just the Field field, which defines the extension's field number.
|
||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
registeredExtensions := RegisteredExtensions(pb)
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return nil, nil
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
||||
for extid, e := range emap {
|
||||
desc := e.desc
|
||||
if desc == nil {
|
||||
desc = registeredExtensions[extid]
|
||||
if desc == nil {
|
||||
desc = &ExtensionDesc{Field: extid}
|
||||
}
|
||||
}
|
||||
|
||||
extensions = append(extensions, desc)
|
||||
}
|
||||
return extensions, nil
|
||||
}
|
||||
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return err
|
||||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
if typ != reflect.TypeOf(value) {
|
||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
|
||||
}
|
||||
// nil extension values need to be caught early, because the
|
||||
// encoder can't distinguish an ErrNil due to a nil extension
|
||||
// from an ErrNil due to a missing field. Extensions are
|
||||
// always optional, so the encoder would just swallow the error
|
||||
// and drop all the extensions from the encoded message.
|
||||
if reflect.ValueOf(value).IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||
}
|
||||
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearAllExtensions clears all extensions from pb.
|
||||
func ClearAllExtensions(pb Message) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
m := epb.extensionsWrite()
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
|
||||
// A global registry of extensions.
|
||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
||||
|
||||
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
|
||||
|
||||
// RegisterExtension is called from the generated code.
|
||||
func RegisterExtension(desc *ExtensionDesc) {
|
||||
st := reflect.TypeOf(desc.ExtendedType).Elem()
|
||||
m := extensionMaps[st]
|
||||
if m == nil {
|
||||
m = make(map[int32]*ExtensionDesc)
|
||||
extensionMaps[st] = m
|
||||
}
|
||||
if _, ok := m[desc.Field]; ok {
|
||||
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
|
||||
}
|
||||
m[desc.Field] = desc
|
||||
}
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions of a
|
||||
// protocol buffer struct, indexed by the extension number.
|
||||
// The argument pb should be a nil pointer to the struct type.
|
||||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||
}
|
||||
|
||||
// extensionAsLegacyType converts an value in the storage type as the API type.
|
||||
// See Extension.value.
|
||||
func extensionAsLegacyType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
// Represent primitive types as a pointer to the value.
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Slice:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// extensionAsStorageType converts an value in the API type as the storage type.
|
||||
// See Extension.value.
|
||||
func extensionAsStorageType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Represent slice types as a pointer to the value.
|
||||
if rv.Type().Elem().Kind() != reflect.Uint8 {
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
965
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
Normal file
965
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
Normal file
@@ -0,0 +1,965 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package proto converts data structures to and from the wire format of
|
||||
protocol buffers. It works in concert with the Go source code generated
|
||||
for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed by the enclosing message's name, or by the
|
||||
enum's type name if it is a top-level enum. Enum types have a String
|
||||
method, and a Enum method to assist in message construction.
|
||||
- Nested messages, groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Oneof field sets are given a single field in their message,
|
||||
with distinguished wrapper types for each possible field value.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
Given file test.proto, containing
|
||||
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; }
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
optional group OptionalGroup = 4 {
|
||||
required string RequiredField = 5;
|
||||
}
|
||||
oneof union {
|
||||
int32 number = 6;
|
||||
string name = 7;
|
||||
}
|
||||
}
|
||||
|
||||
The resulting file, test.pb.go, is:
|
||||
|
||||
package example
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import math "math"
|
||||
|
||||
type FOO int32
|
||||
const (
|
||||
FOO_X FOO = 17
|
||||
)
|
||||
var FOO_name = map[int32]string{
|
||||
17: "X",
|
||||
}
|
||||
var FOO_value = map[string]int32{
|
||||
"X": 17,
|
||||
}
|
||||
|
||||
func (x FOO) Enum() *FOO {
|
||||
p := new(FOO)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x FOO) String() string {
|
||||
return proto.EnumName(FOO_name, int32(x))
|
||||
}
|
||||
func (x *FOO) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = FOO(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
|
||||
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
|
||||
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
|
||||
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
|
||||
// Types that are valid to be assigned to Union:
|
||||
// *Test_Number
|
||||
// *Test_Name
|
||||
Union isTest_Union `protobuf_oneof:"union"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
func (m *Test) Reset() { *m = Test{} }
|
||||
func (m *Test) String() string { return proto.CompactTextString(m) }
|
||||
func (*Test) ProtoMessage() {}
|
||||
|
||||
type isTest_Union interface {
|
||||
isTest_Union()
|
||||
}
|
||||
|
||||
type Test_Number struct {
|
||||
Number int32 `protobuf:"varint,6,opt,name=number"`
|
||||
}
|
||||
type Test_Name struct {
|
||||
Name string `protobuf:"bytes,7,opt,name=name"`
|
||||
}
|
||||
|
||||
func (*Test_Number) isTest_Union() {}
|
||||
func (*Test_Name) isTest_Union() {}
|
||||
|
||||
func (m *Test) GetUnion() isTest_Union {
|
||||
if m != nil {
|
||||
return m.Union
|
||||
}
|
||||
return nil
|
||||
}
|
||||
const Default_Test_Type int32 = 77
|
||||
|
||||
func (m *Test) GetLabel() string {
|
||||
if m != nil && m.Label != nil {
|
||||
return *m.Label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetType() int32 {
|
||||
if m != nil && m.Type != nil {
|
||||
return *m.Type
|
||||
}
|
||||
return Default_Test_Type
|
||||
}
|
||||
|
||||
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
|
||||
if m != nil {
|
||||
return m.Optionalgroup
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test_OptionalGroup struct {
|
||||
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
|
||||
}
|
||||
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
|
||||
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
|
||||
|
||||
func (m *Test_OptionalGroup) GetRequiredField() string {
|
||||
if m != nil && m.RequiredField != nil {
|
||||
return *m.RequiredField
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetNumber() int32 {
|
||||
if x, ok := m.GetUnion().(*Test_Number); ok {
|
||||
return x.Number
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Test) GetName() string {
|
||||
if x, ok := m.GetUnion().(*Test_Name); ok {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
|
||||
}
|
||||
|
||||
To create and play with a Test object:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "./example.pb"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &pb.Test{
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Reps: []int64{1, 2, 3},
|
||||
Optionalgroup: &pb.Test_OptionalGroup{
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
Union: &pb.Test_Name{"fred"},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := &pb.Test{}
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// Use a type switch to determine which oneof was set.
|
||||
switch u := test.Union.(type) {
|
||||
case *pb.Test_Number: // u.Number contains the number.
|
||||
case *pb.Test_Name: // u.Name contains the string.
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
*/
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
|
||||
// Marshal reports this when a required field is not initialized.
|
||||
// Unmarshal reports this when a required field is missing from the wire data.
|
||||
type RequiredNotSetError struct{ field string }
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
if e.field == "" {
|
||||
return fmt.Sprintf("proto: required field not set")
|
||||
}
|
||||
return fmt.Sprintf("proto: required field %q not set", e.field)
|
||||
}
|
||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type invalidUTF8Error struct{ field string }
|
||||
|
||||
func (e *invalidUTF8Error) Error() string {
|
||||
if e.field == "" {
|
||||
return "proto: invalid UTF-8 detected"
|
||||
}
|
||||
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
|
||||
}
|
||||
func (e *invalidUTF8Error) InvalidUTF8() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
|
||||
// This error should not be exposed to the external API as such errors should
|
||||
// be recreated with the field information.
|
||||
var errInvalidUTF8 = &invalidUTF8Error{}
|
||||
|
||||
// isNonFatal reports whether the error is either a RequiredNotSet error
|
||||
// or a InvalidUTF8 error.
|
||||
func isNonFatal(err error) bool {
|
||||
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
|
||||
return true
|
||||
}
|
||||
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type nonFatal struct{ E error }
|
||||
|
||||
// Merge merges err into nf and reports whether it was successful.
|
||||
// Otherwise it returns false for any fatal non-nil errors.
|
||||
func (nf *nonFatal) Merge(err error) (ok bool) {
|
||||
if err == nil {
|
||||
return true // not an error
|
||||
}
|
||||
if !isNonFatal(err) {
|
||||
return false // fatal error
|
||||
}
|
||||
if nf.E == nil {
|
||||
nf.E = err // store first instance of non-fatal error
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Message is implemented by generated protocol buffer messages.
|
||||
type Message interface {
|
||||
Reset()
|
||||
String() string
|
||||
ProtoMessage()
|
||||
}
|
||||
|
||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||
// protocol buffers. It may be reused between invocations to
|
||||
// reduce memory usage. It is not necessary to use a Buffer;
|
||||
// the global functions Marshal and Unmarshal create a
|
||||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // read point
|
||||
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||
// the contents of the argument slice.
|
||||
func NewBuffer(e []byte) *Buffer {
|
||||
return &Buffer{buf: e}
|
||||
}
|
||||
|
||||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
||||
func (p *Buffer) Reset() {
|
||||
p.buf = p.buf[0:0] // for reading/writing
|
||||
p.index = 0 // for reading
|
||||
}
|
||||
|
||||
// SetBuf replaces the internal buffer with the slice,
|
||||
// ready for unmarshaling the contents of the slice.
|
||||
func (p *Buffer) SetBuf(s []byte) {
|
||||
p.buf = s
|
||||
p.index = 0
|
||||
}
|
||||
|
||||
// Bytes returns the contents of the Buffer.
|
||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||
|
||||
// SetDeterministic sets whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (p *Buffer) SetDeterministic(deterministic bool) {
|
||||
p.deterministic = deterministic
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
||||
// Bool is a helper routine that allocates a new bool value
|
||||
// to store v and returns a pointer to it.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int32 is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it, but unlike Int32
|
||||
// its argument value is an int.
|
||||
func Int(v int) *int32 {
|
||||
p := new(int32)
|
||||
*p = int32(v)
|
||||
return p
|
||||
}
|
||||
|
||||
// Int64 is a helper routine that allocates a new int64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float32 is a helper routine that allocates a new float32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float32(v float32) *float32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64 is a helper routine that allocates a new float64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint32 is a helper routine that allocates a new uint32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint64 is a helper routine that allocates a new uint64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// String is a helper routine that allocates a new string value
|
||||
// to store v and returns a pointer to it.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// EnumName is a helper function to simplify printing protocol buffer enums
|
||||
// by name. Given an enum map and a value, it returns a useful string.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
||||
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
||||
// names to its int values, and a byte buffer containing the JSON-encoded
|
||||
// value, it returns an int32 that can be cast to the enum type by the caller.
|
||||
//
|
||||
// The function can deal with both JSON representations, numeric and symbolic.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
||||
// including the string s. Used in testing but made available for general debugging.
|
||||
func (p *Buffer) DebugPrint(s string, b []byte) {
|
||||
var u uint64
|
||||
|
||||
obuf := p.buf
|
||||
index := p.index
|
||||
p.buf = b
|
||||
p.index = 0
|
||||
depth := 0
|
||||
|
||||
fmt.Printf("\n--- %s ---\n", s)
|
||||
|
||||
out:
|
||||
for {
|
||||
for i := 0; i < depth; i++ {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
|
||||
index := p.index
|
||||
if index == len(p.buf) {
|
||||
break
|
||||
}
|
||||
|
||||
op, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: fetching op err %v\n", index, err)
|
||||
break out
|
||||
}
|
||||
tag := op >> 3
|
||||
wire := op & 7
|
||||
|
||||
switch wire {
|
||||
default:
|
||||
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
|
||||
index, tag, wire)
|
||||
break out
|
||||
|
||||
case WireBytes:
|
||||
var r []byte
|
||||
|
||||
r, err = p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
|
||||
if len(r) <= 6 {
|
||||
for i := 0; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 3; i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
fmt.Printf(" ..")
|
||||
for i := len(r) - 3; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
case WireFixed32:
|
||||
u, err = p.DecodeFixed32()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
|
||||
|
||||
case WireFixed64:
|
||||
u, err = p.DecodeFixed64()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
|
||||
|
||||
case WireVarint:
|
||||
u, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
|
||||
|
||||
case WireStartGroup:
|
||||
fmt.Printf("%3d: t=%3d start\n", index, tag)
|
||||
depth++
|
||||
|
||||
case WireEndGroup:
|
||||
depth--
|
||||
fmt.Printf("%3d: t=%3d end\n", index, tag)
|
||||
}
|
||||
}
|
||||
|
||||
if depth != 0 {
|
||||
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
p.buf = obuf
|
||||
p.index = index
|
||||
}
|
||||
|
||||
// SetDefaults sets unset protocol buffer fields to their default values.
|
||||
// It only modifies fields that are both unset and have defined defaults.
|
||||
// It recursively sets default values in any non-nil sub-messages.
|
||||
func SetDefaults(pb Message) {
|
||||
setDefaults(reflect.ValueOf(pb), true, false)
|
||||
}
|
||||
|
||||
// v is a pointer to a struct.
|
||||
func setDefaults(v reflect.Value, recur, zeros bool) {
|
||||
v = v.Elem()
|
||||
|
||||
defaultMu.RLock()
|
||||
dm, ok := defaults[v.Type()]
|
||||
defaultMu.RUnlock()
|
||||
if !ok {
|
||||
dm = buildDefaultMessage(v.Type())
|
||||
defaultMu.Lock()
|
||||
defaults[v.Type()] = dm
|
||||
defaultMu.Unlock()
|
||||
}
|
||||
|
||||
for _, sf := range dm.scalars {
|
||||
f := v.Field(sf.index)
|
||||
if !f.IsNil() {
|
||||
// field already set
|
||||
continue
|
||||
}
|
||||
dv := sf.value
|
||||
if dv == nil && !zeros {
|
||||
// no explicit default, and don't want to set zeros
|
||||
continue
|
||||
}
|
||||
fptr := f.Addr().Interface() // **T
|
||||
// TODO: Consider batching the allocations we do here.
|
||||
switch sf.kind {
|
||||
case reflect.Bool:
|
||||
b := new(bool)
|
||||
if dv != nil {
|
||||
*b = dv.(bool)
|
||||
}
|
||||
*(fptr.(**bool)) = b
|
||||
case reflect.Float32:
|
||||
f := new(float32)
|
||||
if dv != nil {
|
||||
*f = dv.(float32)
|
||||
}
|
||||
*(fptr.(**float32)) = f
|
||||
case reflect.Float64:
|
||||
f := new(float64)
|
||||
if dv != nil {
|
||||
*f = dv.(float64)
|
||||
}
|
||||
*(fptr.(**float64)) = f
|
||||
case reflect.Int32:
|
||||
// might be an enum
|
||||
if ft := f.Type(); ft != int32PtrType {
|
||||
// enum
|
||||
f.Set(reflect.New(ft.Elem()))
|
||||
if dv != nil {
|
||||
f.Elem().SetInt(int64(dv.(int32)))
|
||||
}
|
||||
} else {
|
||||
// int32 field
|
||||
i := new(int32)
|
||||
if dv != nil {
|
||||
*i = dv.(int32)
|
||||
}
|
||||
*(fptr.(**int32)) = i
|
||||
}
|
||||
case reflect.Int64:
|
||||
i := new(int64)
|
||||
if dv != nil {
|
||||
*i = dv.(int64)
|
||||
}
|
||||
*(fptr.(**int64)) = i
|
||||
case reflect.String:
|
||||
s := new(string)
|
||||
if dv != nil {
|
||||
*s = dv.(string)
|
||||
}
|
||||
*(fptr.(**string)) = s
|
||||
case reflect.Uint8:
|
||||
// exceptional case: []byte
|
||||
var b []byte
|
||||
if dv != nil {
|
||||
db := dv.([]byte)
|
||||
b = make([]byte, len(db))
|
||||
copy(b, db)
|
||||
} else {
|
||||
b = []byte{}
|
||||
}
|
||||
*(fptr.(*[]byte)) = b
|
||||
case reflect.Uint32:
|
||||
u := new(uint32)
|
||||
if dv != nil {
|
||||
*u = dv.(uint32)
|
||||
}
|
||||
*(fptr.(**uint32)) = u
|
||||
case reflect.Uint64:
|
||||
u := new(uint64)
|
||||
if dv != nil {
|
||||
*u = dv.(uint64)
|
||||
}
|
||||
*(fptr.(**uint64)) = u
|
||||
default:
|
||||
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ni := range dm.nested {
|
||||
f := v.Field(ni)
|
||||
// f is *T or []*T or map[T]*T
|
||||
switch f.Kind() {
|
||||
case reflect.Ptr:
|
||||
if f.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(f, recur, zeros)
|
||||
|
||||
case reflect.Slice:
|
||||
for i := 0; i < f.Len(); i++ {
|
||||
e := f.Index(i)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
for _, k := range f.MapKeys() {
|
||||
e := f.MapIndex(k)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// defaults maps a protocol buffer struct type to a slice of the fields,
|
||||
// with its scalar fields set to their proto-declared non-zero default values.
|
||||
defaultMu sync.RWMutex
|
||||
defaults = make(map[reflect.Type]defaultMessage)
|
||||
|
||||
int32PtrType = reflect.TypeOf((*int32)(nil))
|
||||
)
|
||||
|
||||
// defaultMessage represents information about the default values of a message.
|
||||
type defaultMessage struct {
|
||||
scalars []scalarField
|
||||
nested []int // struct field index of nested messages
|
||||
}
|
||||
|
||||
type scalarField struct {
|
||||
index int // struct field index
|
||||
kind reflect.Kind // element type (the T in *T or []T)
|
||||
value interface{} // the proto-declared default value, or nil
|
||||
}
|
||||
|
||||
// t is a struct type.
|
||||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
||||
sprop := GetProperties(t)
|
||||
for _, prop := range sprop.Prop {
|
||||
fi, ok := sprop.decoderTags.get(prop.Tag)
|
||||
if !ok {
|
||||
// XXX_unrecognized
|
||||
continue
|
||||
}
|
||||
ft := t.Field(fi).Type
|
||||
|
||||
sf, nested, err := fieldDefault(ft, prop)
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Print(err)
|
||||
case nested:
|
||||
dm.nested = append(dm.nested, fi)
|
||||
case sf != nil:
|
||||
sf.index = fi
|
||||
dm.scalars = append(dm.scalars, *sf)
|
||||
}
|
||||
}
|
||||
|
||||
return dm
|
||||
}
|
||||
|
||||
// fieldDefault returns the scalarField for field type ft.
|
||||
// sf will be nil if the field can not have a default.
|
||||
// nestedMessage will be true if this is a nested message.
|
||||
// Note that sf.index is not set on return.
|
||||
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
|
||||
var canHaveDefault bool
|
||||
switch ft.Kind() {
|
||||
case reflect.Ptr:
|
||||
if ft.Elem().Kind() == reflect.Struct {
|
||||
nestedMessage = true
|
||||
} else {
|
||||
canHaveDefault = true // proto2 scalar field
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Ptr:
|
||||
nestedMessage = true // repeated message
|
||||
case reflect.Uint8:
|
||||
canHaveDefault = true // bytes field
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
if ft.Elem().Kind() == reflect.Ptr {
|
||||
nestedMessage = true // map with message values
|
||||
}
|
||||
}
|
||||
|
||||
if !canHaveDefault {
|
||||
if nestedMessage {
|
||||
return nil, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// We now know that ft is a pointer or slice.
|
||||
sf = &scalarField{kind: ft.Elem().Kind()}
|
||||
|
||||
// scalar fields without defaults
|
||||
if !prop.HasDefault {
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// a scalar field: either *T or []byte
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Bool:
|
||||
x, err := strconv.ParseBool(prop.Default)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Float32:
|
||||
x, err := strconv.ParseFloat(prop.Default, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = float32(x)
|
||||
case reflect.Float64:
|
||||
x, err := strconv.ParseFloat(prop.Default, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Int32:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = int32(x)
|
||||
case reflect.Int64:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.String:
|
||||
sf.value = prop.Default
|
||||
case reflect.Uint8:
|
||||
// []byte (not *uint8)
|
||||
sf.value = []byte(prop.Default)
|
||||
case reflect.Uint32:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = uint32(x)
|
||||
case reflect.Uint64:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
default:
|
||||
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
|
||||
}
|
||||
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// mapKeys returns a sort.Interface to be used for sorting the map keys.
|
||||
// Map fields may have key types of non-float scalars, strings and enums.
|
||||
func mapKeys(vs []reflect.Value) sort.Interface {
|
||||
s := mapKeySorter{vs: vs}
|
||||
|
||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
|
||||
if len(vs) == 0 {
|
||||
return s
|
||||
}
|
||||
switch vs[0].Kind() {
|
||||
case reflect.Int32, reflect.Int64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
||||
case reflect.Bool:
|
||||
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
|
||||
case reflect.String:
|
||||
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
type mapKeySorter struct {
|
||||
vs []reflect.Value
|
||||
less func(a, b reflect.Value) bool
|
||||
}
|
||||
|
||||
func (s mapKeySorter) Len() int { return len(s.vs) }
|
||||
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
|
||||
func (s mapKeySorter) Less(i, j int) bool {
|
||||
return s.less(s.vs[i], s.vs[j])
|
||||
}
|
||||
|
||||
// isProto3Zero reports whether v is a zero proto3 value.
|
||||
func isProto3Zero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.String:
|
||||
return v.String() == ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const (
|
||||
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion3 = true
|
||||
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion2 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion1 = true
|
||||
)
|
||||
|
||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||
// This type is not intended to be used by non-generated code.
|
||||
// This type is not subject to any compatibility guarantee.
|
||||
type InternalMessageInfo struct {
|
||||
marshal *marshalInfo
|
||||
unmarshal *unmarshalInfo
|
||||
merge *mergeInfo
|
||||
discard *discardInfo
|
||||
}
|
||||
181
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
Normal file
181
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Support for message sets.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||
// A message type ID is required for storing a protocol buffer in a message set.
|
||||
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
|
||||
|
||||
// The first two types (_MessageSet_Item and messageSet)
|
||||
// model what the protocol compiler produces for the following protocol message:
|
||||
// message MessageSet {
|
||||
// repeated group Item = 1 {
|
||||
// required int32 type_id = 2;
|
||||
// required string message = 3;
|
||||
// };
|
||||
// }
|
||||
// That is the MessageSet wire format. We can't use a proto to generate these
|
||||
// because that would introduce a circular dependency between it and this package.
|
||||
|
||||
type _MessageSet_Item struct {
|
||||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
||||
Message []byte `protobuf:"bytes,3,req,name=message"`
|
||||
}
|
||||
|
||||
type messageSet struct {
|
||||
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
||||
XXX_unrecognized []byte
|
||||
// TODO: caching?
|
||||
}
|
||||
|
||||
// Make sure messageSet is a Message.
|
||||
var _ Message = (*messageSet)(nil)
|
||||
|
||||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
||||
// that may be stored in a MessageSet.
|
||||
type messageTypeIder interface {
|
||||
MessageTypeId() int32
|
||||
}
|
||||
|
||||
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
id := mti.MessageTypeId()
|
||||
for _, item := range ms.Item {
|
||||
if *item.TypeId == id {
|
||||
return item
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Has(pb Message) bool {
|
||||
return ms.find(pb) != nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Unmarshal(pb Message) error {
|
||||
if item := ms.find(pb); item != nil {
|
||||
return Unmarshal(item.Message, pb)
|
||||
}
|
||||
if _, ok := pb.(messageTypeIder); !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
return nil // TODO: return error instead?
|
||||
}
|
||||
|
||||
func (ms *messageSet) Marshal(pb Message) error {
|
||||
msg, err := Marshal(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if item := ms.find(pb); item != nil {
|
||||
// reuse existing item
|
||||
item.Message = msg
|
||||
return nil
|
||||
}
|
||||
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
|
||||
mtid := mti.MessageTypeId()
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: &mtid,
|
||||
Message: msg,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Reset() { *ms = messageSet{} }
|
||||
func (ms *messageSet) String() string { return CompactTextString(ms) }
|
||||
func (*messageSet) ProtoMessage() {}
|
||||
|
||||
// Support for the message_set_wire_format message option.
|
||||
|
||||
func skipVarint(buf []byte) []byte {
|
||||
i := 0
|
||||
for ; buf[i]&0x80 != 0; i++ {
|
||||
}
|
||||
return buf[i+1:]
|
||||
}
|
||||
|
||||
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func unmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
m = exts.extensionsWrite()
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return errors.New("proto: not an extension map")
|
||||
}
|
||||
|
||||
ms := new(messageSet)
|
||||
if err := Unmarshal(buf, ms); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range ms.Item {
|
||||
id := *item.TypeId
|
||||
msg := item.Message
|
||||
|
||||
// Restore wire type and field number varint, plus length varint.
|
||||
// Be careful to preserve duplicate items.
|
||||
b := EncodeVarint(uint64(id)<<3 | WireBytes)
|
||||
if ext, ok := m[id]; ok {
|
||||
// Existing data; rip off the tag and length varint
|
||||
// so we join the new data correctly.
|
||||
// We can assume that ext.enc is set because we are unmarshaling.
|
||||
o := ext.enc[len(b):] // skip wire type and field number
|
||||
_, n := DecodeVarint(o) // calculate length of length varint
|
||||
o = o[n:] // skip length varint
|
||||
msg = append(o, msg...) // join old data and new data
|
||||
}
|
||||
b = append(b, EncodeVarint(uint64(len(msg)))...)
|
||||
b = append(b, msg...)
|
||||
|
||||
m[id] = Extension{enc: b}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
360
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
Normal file
360
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
Normal file
@@ -0,0 +1,360 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build purego appengine js
|
||||
|
||||
// This file contains an implementation of proto field accesses using package reflect.
|
||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||
// be used on App Engine.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const unsafeAllowed = false
|
||||
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by the sequence of field indices
|
||||
// passed to reflect's FieldByIndex.
|
||||
type field []int
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return f.Index
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
var invalidField = field(nil)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
var zeroField = field([]int{})
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool { return f != nil }
|
||||
|
||||
// The pointer type is for the table-driven decoder.
|
||||
// The implementation here uses a reflect.Value of pointer type to
|
||||
// create a generic pointer. In pointer_unsafe.go we use unsafe
|
||||
// instead of reflect to implement the same (but faster) interface.
|
||||
type pointer struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
return pointer{v: reflect.ValueOf(*i)}
|
||||
}
|
||||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
|
||||
v := reflect.ValueOf(*i)
|
||||
u := reflect.New(v.Type())
|
||||
u.Elem().Set(v)
|
||||
if deref {
|
||||
u = u.Elem()
|
||||
}
|
||||
return pointer{v: u}
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{v: v}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// grow updates the slice s in place to make it one element longer.
|
||||
// s must be addressable.
|
||||
// Returns the (addressable) new element.
|
||||
func grow(s reflect.Value) reflect.Value {
|
||||
n, m := s.Len(), s.Cap()
|
||||
if n < m {
|
||||
s.SetLen(n + 1)
|
||||
} else {
|
||||
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
|
||||
}
|
||||
return s.Index(n)
|
||||
}
|
||||
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return p.v.Interface().(*int64)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return p.v.Interface().(**int64)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return p.v.Interface().(*[]int64)
|
||||
}
|
||||
|
||||
var int32ptr = reflect.TypeOf((*int32)(nil))
|
||||
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return p.v.Convert(int32ptr).Interface().(*int32)
|
||||
}
|
||||
|
||||
// The toInt32Ptr/Slice methods don't work because of enums.
|
||||
// Instead, we must use set/get methods for the int32ptr/slice case.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return p.v.Interface().(**int32)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return p.v.Interface().(*[]int32)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().(*int32)
|
||||
}
|
||||
// an enum
|
||||
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
// Allocate value in a *int32. Possibly convert that to a *enum.
|
||||
// Then assign it to a **int32 or **enum.
|
||||
// Note: we can convert *int32 to *enum, but we can't convert
|
||||
// **int32 to **enum!
|
||||
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
|
||||
}
|
||||
|
||||
// getInt32Slice copies []int32 from p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().([]int32)
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []int32, then assign []enum's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := p.v.Elem()
|
||||
s := make([]int32, slice.Len())
|
||||
for i := 0; i < slice.Len(); i++ {
|
||||
s[i] = int32(slice.Index(i).Int())
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// setInt32Slice copies []int32 into p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
p.v.Elem().Set(reflect.ValueOf(v))
|
||||
return
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []enum, then assign []int32's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
|
||||
for i, x := range v {
|
||||
slice.Index(i).SetInt(int64(x))
|
||||
}
|
||||
p.v.Elem().Set(slice)
|
||||
}
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
grow(p.v.Elem()).SetInt(int64(v))
|
||||
}
|
||||
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return p.v.Interface().(*uint64)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return p.v.Interface().(**uint64)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return p.v.Interface().(*[]uint64)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return p.v.Interface().(*uint32)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return p.v.Interface().(**uint32)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return p.v.Interface().(*[]uint32)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return p.v.Interface().(*bool)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return p.v.Interface().(**bool)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return p.v.Interface().(*[]bool)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return p.v.Interface().(*float64)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return p.v.Interface().(**float64)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return p.v.Interface().(*[]float64)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return p.v.Interface().(*float32)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return p.v.Interface().(**float32)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return p.v.Interface().(*[]float32)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return p.v.Interface().(*string)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return p.v.Interface().(**string)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return p.v.Interface().(*[]string)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return p.v.Interface().(*[]byte)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return p.v.Interface().(*[][]byte)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return p.v.Interface().(*XXX_InternalExtensions)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return p.v.Interface().(*map[int32]Extension)
|
||||
}
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
p.v.Elem().Set(q.v)
|
||||
}
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
grow(p.v.Elem()).Set(q.v)
|
||||
}
|
||||
|
||||
// getPointerSlice copies []*T from p as a new []pointer.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
if p.v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
n := p.v.Elem().Len()
|
||||
s := make([]pointer, n)
|
||||
for i := 0; i < n; i++ {
|
||||
s[i] = pointer{v: p.v.Elem().Index(i)}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// setPointerSlice copies []pointer into p as a new []*T.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
if v == nil {
|
||||
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
|
||||
return
|
||||
}
|
||||
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
|
||||
for _, p := range v {
|
||||
s = reflect.Append(s, p.v)
|
||||
}
|
||||
p.v.Elem().Set(s)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
if p.v.Elem().IsNil() {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
|
||||
}
|
||||
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
// TODO: check that p.v.Type().Elem() == t?
|
||||
return p.v
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
|
||||
var atomicLock sync.Mutex
|
||||
313
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
Normal file
313
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
Normal file
@@ -0,0 +1,313 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build !purego,!appengine,!js
|
||||
|
||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const unsafeAllowed = true
|
||||
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||
type field uintptr
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return field(f.Offset)
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
const invalidField = ^field(0)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
const zeroField = field(0)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool {
|
||||
return f != invalidField
|
||||
}
|
||||
|
||||
// The pointer type below is for the new table-driven encoder/decoder.
|
||||
// The implementation here uses unsafe.Pointer to create a generic pointer.
|
||||
// In pointer_reflect.go we use reflect instead of unsafe to implement
|
||||
// the same (but slower) interface.
|
||||
type pointer struct {
|
||||
p unsafe.Pointer
|
||||
}
|
||||
|
||||
// size of pointer
|
||||
var ptrSize = unsafe.Sizeof(uintptr(0))
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
// Saves ~25ns over the equivalent:
|
||||
// return valToPointer(reflect.ValueOf(*i))
|
||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
|
||||
// Super-tricky - read or get the address of data word of interface value.
|
||||
if isptr {
|
||||
// The interface is of pointer type, thus it is a direct interface.
|
||||
// The data word is the pointer data itself. We take its address.
|
||||
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||
} else {
|
||||
// The interface is not of pointer type. The data word is the pointer
|
||||
// to the data.
|
||||
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
if deref {
|
||||
p.p = *(*unsafe.Pointer)(p.p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{p: unsafe.Pointer(v.Pointer())}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
// For safety, we should panic if !f.IsValid, however calling panic causes
|
||||
// this to no longer be inlineable, which is a serious performance cost.
|
||||
/*
|
||||
if !f.IsValid() {
|
||||
panic("invalid field")
|
||||
}
|
||||
*/
|
||||
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.p == nil
|
||||
}
|
||||
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return (*int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return (**int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return (*[]int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return (*int32)(p.p)
|
||||
}
|
||||
|
||||
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return (**int32)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return (*[]int32)(p.p)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
return *(**int32)(p.p)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
*(**int32)(p.p) = &v
|
||||
}
|
||||
|
||||
// getInt32Slice loads a []int32 from p.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
return *(*[]int32)(p.p)
|
||||
}
|
||||
|
||||
// setInt32Slice stores a []int32 to p.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
*(*[]int32)(p.p) = v
|
||||
}
|
||||
|
||||
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
s := (*[]int32)(p.p)
|
||||
*s = append(*s, v)
|
||||
}
|
||||
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return (*uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return (**uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return (*[]uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return (*uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return (**uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return (*[]uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return (*bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return (**bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return (*[]bool)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return (*float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return (**float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return (*[]float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return (*float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return (**float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return (*[]float32)(p.p)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return (*string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return (**string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return (*[]string)(p.p)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return (*[]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return (*[][]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return (*XXX_InternalExtensions)(p.p)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return (*map[int32]Extension)(p.p)
|
||||
}
|
||||
|
||||
// getPointerSlice loads []*T from p as a []pointer.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We load it as []pointer.
|
||||
return *(*[]pointer)(p.p)
|
||||
}
|
||||
|
||||
// setPointerSlice stores []pointer into p as a []*T.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We store it as []pointer.
|
||||
*(*[]pointer)(p.p) = v
|
||||
}
|
||||
|
||||
// getPointer loads the pointer at p and returns it.
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{p: *(*unsafe.Pointer)(p.p)}
|
||||
}
|
||||
|
||||
// setPointer stores the pointer q at p.
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
*(*unsafe.Pointer)(p.p) = q.p
|
||||
}
|
||||
|
||||
// append q to the slice pointed to by p.
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
s := (*[]unsafe.Pointer)(p.p)
|
||||
*s = append(*s, q.p)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
|
||||
}
|
||||
|
||||
// asPointerTo returns a reflect.Value that is a pointer to an
|
||||
// object of type t stored at p.
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
return reflect.NewAt(t, p.p)
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
544
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
Normal file
544
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
Normal file
@@ -0,0 +1,544 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const debug bool = false
|
||||
|
||||
// Constants that identify the encoding of a value on the wire.
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
WireFixed32 = 5
|
||||
)
|
||||
|
||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
||||
// numbers.
|
||||
type tagMap struct {
|
||||
fastTags []int
|
||||
slowTags map[int]int
|
||||
}
|
||||
|
||||
// tagMapFastLimit is the upper bound on the tag number that will be stored in
|
||||
// the tagMap slice rather than its map.
|
||||
const tagMapFastLimit = 1024
|
||||
|
||||
func (p *tagMap) get(t int) (int, bool) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
if t >= len(p.fastTags) {
|
||||
return 0, false
|
||||
}
|
||||
fi := p.fastTags[t]
|
||||
return fi, fi >= 0
|
||||
}
|
||||
fi, ok := p.slowTags[t]
|
||||
return fi, ok
|
||||
}
|
||||
|
||||
func (p *tagMap) put(t int, fi int) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
for len(p.fastTags) < t+1 {
|
||||
p.fastTags = append(p.fastTags, -1)
|
||||
}
|
||||
p.fastTags[t] = fi
|
||||
return
|
||||
}
|
||||
if p.slowTags == nil {
|
||||
p.slowTags = make(map[int]int)
|
||||
}
|
||||
p.slowTags[t] = fi
|
||||
}
|
||||
|
||||
// StructProperties represents properties for all the fields of a struct.
|
||||
// decoderTags and decoderOrigNames should only be used by the decoder.
|
||||
type StructProperties struct {
|
||||
Prop []*Properties // properties for each field
|
||||
reqCount int // required count
|
||||
decoderTags tagMap // map from proto tag to struct field number
|
||||
decoderOrigNames map[string]int // map from original name to struct field number
|
||||
order []int // list of struct field numbers in tag order
|
||||
|
||||
// OneofTypes contains information about the oneof fields in this message.
|
||||
// It is keyed by the original name of a field.
|
||||
OneofTypes map[string]*OneofProperties
|
||||
}
|
||||
|
||||
// OneofProperties represents information about a specific field in a oneof.
|
||||
type OneofProperties struct {
|
||||
Type reflect.Type // pointer to generated struct type for this oneof field
|
||||
Field int // struct field number of the containing oneof in the message
|
||||
Prop *Properties
|
||||
}
|
||||
|
||||
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
|
||||
// See encode.go, (*Buffer).enc_struct.
|
||||
|
||||
func (sp *StructProperties) Len() int { return len(sp.order) }
|
||||
func (sp *StructProperties) Less(i, j int) bool {
|
||||
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
|
||||
}
|
||||
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
|
||||
|
||||
// Properties represents the protocol-specific behavior of a single struct field.
|
||||
type Properties struct {
|
||||
Name string // name of the field, for error messages
|
||||
OrigName string // original name before protocol compiler (always set)
|
||||
JSONName string // name to use for JSON; determined by protoc
|
||||
Wire string
|
||||
WireType int
|
||||
Tag int
|
||||
Required bool
|
||||
Optional bool
|
||||
Repeated bool
|
||||
Packed bool // relevant for repeated primitives only
|
||||
Enum string // set for enum types only
|
||||
proto3 bool // whether this is known to be a proto3 field
|
||||
oneof bool // whether this is a oneof field
|
||||
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
|
||||
stype reflect.Type // set for struct types only
|
||||
sprop *StructProperties // set for struct types only
|
||||
|
||||
mtype reflect.Type // set for map types only
|
||||
MapKeyProp *Properties // set for map types only
|
||||
MapValProp *Properties // set for map types only
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
func (p *Properties) String() string {
|
||||
s := p.Wire
|
||||
s += ","
|
||||
s += strconv.Itoa(p.Tag)
|
||||
if p.Required {
|
||||
s += ",req"
|
||||
}
|
||||
if p.Optional {
|
||||
s += ",opt"
|
||||
}
|
||||
if p.Repeated {
|
||||
s += ",rep"
|
||||
}
|
||||
if p.Packed {
|
||||
s += ",packed"
|
||||
}
|
||||
s += ",name=" + p.OrigName
|
||||
if p.JSONName != p.OrigName {
|
||||
s += ",json=" + p.JSONName
|
||||
}
|
||||
if p.proto3 {
|
||||
s += ",proto3"
|
||||
}
|
||||
if p.oneof {
|
||||
s += ",oneof"
|
||||
}
|
||||
if len(p.Enum) > 0 {
|
||||
s += ",enum=" + p.Enum
|
||||
}
|
||||
if p.HasDefault {
|
||||
s += ",def=" + p.Default
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||
func (p *Properties) Parse(s string) {
|
||||
// "bytes,49,opt,name=foo,def=hello!"
|
||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
||||
if len(fields) < 2 {
|
||||
log.Printf("proto: tag has too few fields: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
p.Wire = fields[0]
|
||||
switch p.Wire {
|
||||
case "varint":
|
||||
p.WireType = WireVarint
|
||||
case "fixed32":
|
||||
p.WireType = WireFixed32
|
||||
case "fixed64":
|
||||
p.WireType = WireFixed64
|
||||
case "zigzag32":
|
||||
p.WireType = WireVarint
|
||||
case "zigzag64":
|
||||
p.WireType = WireVarint
|
||||
case "bytes", "group":
|
||||
p.WireType = WireBytes
|
||||
// no numeric converter for non-numeric types
|
||||
default:
|
||||
log.Printf("proto: tag has unknown wire type: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
p.Tag, err = strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
outer:
|
||||
for i := 2; i < len(fields); i++ {
|
||||
f := fields[i]
|
||||
switch {
|
||||
case f == "req":
|
||||
p.Required = true
|
||||
case f == "opt":
|
||||
p.Optional = true
|
||||
case f == "rep":
|
||||
p.Repeated = true
|
||||
case f == "packed":
|
||||
p.Packed = true
|
||||
case strings.HasPrefix(f, "name="):
|
||||
p.OrigName = f[5:]
|
||||
case strings.HasPrefix(f, "json="):
|
||||
p.JSONName = f[5:]
|
||||
case strings.HasPrefix(f, "enum="):
|
||||
p.Enum = f[5:]
|
||||
case f == "proto3":
|
||||
p.proto3 = true
|
||||
case f == "oneof":
|
||||
p.oneof = true
|
||||
case strings.HasPrefix(f, "def="):
|
||||
p.HasDefault = true
|
||||
p.Default = f[4:] // rest of string
|
||||
if i+1 < len(fields) {
|
||||
// Commas aren't escaped, and def is always last.
|
||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
||||
|
||||
// setFieldProps initializes the field properties for submessages and maps.
|
||||
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
||||
switch t1 := typ; t1.Kind() {
|
||||
case reflect.Ptr:
|
||||
if t1.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t1.Elem()
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t2.Elem()
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
p.mtype = t1
|
||||
p.MapKeyProp = &Properties{}
|
||||
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||
p.MapValProp = &Properties{}
|
||||
vtype := p.mtype.Elem()
|
||||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
||||
// The value type is not a message (*T) or bytes ([]byte),
|
||||
// so we need encoders for the pointer to this type.
|
||||
vtype = reflect.PtrTo(vtype)
|
||||
}
|
||||
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
}
|
||||
|
||||
if p.stype != nil {
|
||||
if lockGetProp {
|
||||
p.sprop = GetProperties(p.stype)
|
||||
} else {
|
||||
p.sprop = getPropertiesLocked(p.stype)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Init populates the properties from a protocol buffer struct tag.
|
||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||
p.init(typ, name, tag, f, true)
|
||||
}
|
||||
|
||||
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
|
||||
// "bytes,49,opt,def=hello!"
|
||||
p.Name = name
|
||||
p.OrigName = name
|
||||
if tag == "" {
|
||||
return
|
||||
}
|
||||
p.Parse(tag)
|
||||
p.setFieldProps(typ, f, lockGetProp)
|
||||
}
|
||||
|
||||
var (
|
||||
propertiesMu sync.RWMutex
|
||||
propertiesMap = make(map[reflect.Type]*StructProperties)
|
||||
)
|
||||
|
||||
// GetProperties returns the list of properties for the type represented by t.
|
||||
// t must represent a generated struct type of a protocol message.
|
||||
func GetProperties(t reflect.Type) *StructProperties {
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic("proto: type must have kind struct")
|
||||
}
|
||||
|
||||
// Most calls to GetProperties in a long-running program will be
|
||||
// retrieving details for types we have seen before.
|
||||
propertiesMu.RLock()
|
||||
sprop, ok := propertiesMap[t]
|
||||
propertiesMu.RUnlock()
|
||||
if ok {
|
||||
return sprop
|
||||
}
|
||||
|
||||
propertiesMu.Lock()
|
||||
sprop = getPropertiesLocked(t)
|
||||
propertiesMu.Unlock()
|
||||
return sprop
|
||||
}
|
||||
|
||||
type (
|
||||
oneofFuncsIface interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
oneofWrappersIface interface {
|
||||
XXX_OneofWrappers() []interface{}
|
||||
}
|
||||
)
|
||||
|
||||
// getPropertiesLocked requires that propertiesMu is held.
|
||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
if prop, ok := propertiesMap[t]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(StructProperties)
|
||||
// in case of recursive protos, fill this in now.
|
||||
propertiesMap[t] = prop
|
||||
|
||||
// build properties
|
||||
prop.Prop = make([]*Properties, t.NumField())
|
||||
prop.order = make([]int, t.NumField())
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
p := new(Properties)
|
||||
name := f.Name
|
||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||
|
||||
oneof := f.Tag.Get("protobuf_oneof") // special case
|
||||
if oneof != "" {
|
||||
// Oneof fields don't use the traditional protobuf tag.
|
||||
p.OrigName = oneof
|
||||
}
|
||||
prop.Prop[i] = p
|
||||
prop.order[i] = i
|
||||
if debug {
|
||||
print(i, " ", f.Name, " ", t.String(), " ")
|
||||
if p.Tag > 0 {
|
||||
print(p.String())
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
}
|
||||
|
||||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
var oots []interface{}
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oots = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oots = m.XXX_OneofWrappers()
|
||||
}
|
||||
if len(oots) > 0 {
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, oot := range oots {
|
||||
oop := &OneofProperties{
|
||||
Type: reflect.ValueOf(oot).Type(), // *T
|
||||
Prop: new(Properties),
|
||||
}
|
||||
sft := oop.Type.Elem().Field(0)
|
||||
oop.Prop.Name = sft.Name
|
||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
||||
// There will be exactly one interface field that
|
||||
// this new value is assignable to.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Type.Kind() != reflect.Interface {
|
||||
continue
|
||||
}
|
||||
if !oop.Type.AssignableTo(f.Type) {
|
||||
continue
|
||||
}
|
||||
oop.Field = i
|
||||
break
|
||||
}
|
||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
||||
}
|
||||
}
|
||||
|
||||
// build required counts
|
||||
// build tags
|
||||
reqCount := 0
|
||||
prop.decoderOrigNames = make(map[string]int)
|
||||
for i, p := range prop.Prop {
|
||||
if strings.HasPrefix(p.Name, "XXX_") {
|
||||
// Internal fields should not appear in tags/origNames maps.
|
||||
// They are handled specially when encoding and decoding.
|
||||
continue
|
||||
}
|
||||
if p.Required {
|
||||
reqCount++
|
||||
}
|
||||
prop.decoderTags.put(p.Tag, i)
|
||||
prop.decoderOrigNames[p.OrigName] = i
|
||||
}
|
||||
prop.reqCount = reqCount
|
||||
|
||||
return prop
|
||||
}
|
||||
|
||||
// A global registry of enum types.
|
||||
// The generated code will register the generated maps by calling RegisterEnum.
|
||||
|
||||
var enumValueMaps = make(map[string]map[string]int32)
|
||||
|
||||
// RegisterEnum is called from the generated code to install the enum descriptor
|
||||
// maps into the global table to aid parsing text format protocol buffers.
|
||||
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
|
||||
if _, ok := enumValueMaps[typeName]; ok {
|
||||
panic("proto: duplicate enum registered: " + typeName)
|
||||
}
|
||||
enumValueMaps[typeName] = valueMap
|
||||
}
|
||||
|
||||
// EnumValueMap returns the mapping from names to integers of the
|
||||
// enum type enumType, or a nil if not found.
|
||||
func EnumValueMap(enumType string) map[string]int32 {
|
||||
return enumValueMaps[enumType]
|
||||
}
|
||||
|
||||
// A registry of all linked message types.
|
||||
// The string is a fully-qualified proto name ("pkg.Message").
|
||||
var (
|
||||
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
|
||||
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
|
||||
revProtoTypes = make(map[reflect.Type]string)
|
||||
)
|
||||
|
||||
// RegisterType is called from generated code and maps from the fully qualified
|
||||
// proto name to the type (pointer to struct) of the protocol buffer.
|
||||
func RegisterType(x Message, name string) {
|
||||
if _, ok := protoTypedNils[name]; ok {
|
||||
// TODO: Some day, make this a panic.
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
|
||||
// Generated code always calls RegisterType with nil x.
|
||||
// This check is just for extra safety.
|
||||
protoTypedNils[name] = x
|
||||
} else {
|
||||
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
|
||||
}
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// RegisterMapType is called from generated code and maps from the fully qualified
|
||||
// proto name to the native map type of the proto map definition.
|
||||
func RegisterMapType(x interface{}, name string) {
|
||||
if reflect.TypeOf(x).Kind() != reflect.Map {
|
||||
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
|
||||
}
|
||||
if _, ok := protoMapTypes[name]; ok {
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
protoMapTypes[name] = t
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// MessageName returns the fully-qualified proto name for the given message type.
|
||||
func MessageName(x Message) string {
|
||||
type xname interface {
|
||||
XXX_MessageName() string
|
||||
}
|
||||
if m, ok := x.(xname); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return revProtoTypes[reflect.TypeOf(x)]
|
||||
}
|
||||
|
||||
// MessageType returns the message type (pointer to struct) for a named message.
|
||||
// The type is not guaranteed to implement proto.Message if the name refers to a
|
||||
// map entry.
|
||||
func MessageType(name string) reflect.Type {
|
||||
if t, ok := protoTypedNils[name]; ok {
|
||||
return reflect.TypeOf(t)
|
||||
}
|
||||
return protoMapTypes[name]
|
||||
}
|
||||
|
||||
// A registry of all linked proto files.
|
||||
var (
|
||||
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
||||
)
|
||||
|
||||
// RegisterFile is called from generated code and maps from the
|
||||
// full file name of a .proto file to its compressed FileDescriptorProto.
|
||||
func RegisterFile(filename string, fileDescriptor []byte) {
|
||||
protoFiles[filename] = fileDescriptor
|
||||
}
|
||||
|
||||
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
||||
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
|
||||
2776
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
Normal file
2776
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
Normal file
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
Normal file
@@ -0,0 +1,654 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Merge merges the src message into dst.
|
||||
// This assumes that dst and src of the same type and are non-nil.
|
||||
func (a *InternalMessageInfo) Merge(dst, src Message) {
|
||||
mi := atomicLoadMergeInfo(&a.merge)
|
||||
if mi == nil {
|
||||
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
|
||||
atomicStoreMergeInfo(&a.merge, mi)
|
||||
}
|
||||
mi.merge(toPointer(&dst), toPointer(&src))
|
||||
}
|
||||
|
||||
type mergeInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []mergeFieldInfo
|
||||
unrecognized field // Offset of XXX_unrecognized
|
||||
}
|
||||
|
||||
type mergeFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
|
||||
// isPointer reports whether the value in the field is a pointer.
|
||||
// This is true for the following situations:
|
||||
// * Pointer to struct
|
||||
// * Pointer to basic type (proto2 only)
|
||||
// * Slice (first value in slice header is a pointer)
|
||||
// * String (first value in string header is a pointer)
|
||||
isPointer bool
|
||||
|
||||
// basicWidth reports the width of the field assuming that it is directly
|
||||
// embedded in the struct (as is the case for basic types in proto3).
|
||||
// The possible values are:
|
||||
// 0: invalid
|
||||
// 1: bool
|
||||
// 4: int32, uint32, float32
|
||||
// 8: int64, uint64, float64
|
||||
basicWidth int
|
||||
|
||||
// Where dst and src are pointers to the types being merged.
|
||||
merge func(dst, src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
mergeInfoMap = map[reflect.Type]*mergeInfo{}
|
||||
mergeInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getMergeInfo(t reflect.Type) *mergeInfo {
|
||||
mergeInfoLock.Lock()
|
||||
defer mergeInfoLock.Unlock()
|
||||
mi := mergeInfoMap[t]
|
||||
if mi == nil {
|
||||
mi = &mergeInfo{typ: t}
|
||||
mergeInfoMap[t] = mi
|
||||
}
|
||||
return mi
|
||||
}
|
||||
|
||||
// merge merges src into dst assuming they are both of type *mi.typ.
|
||||
func (mi *mergeInfo) merge(dst, src pointer) {
|
||||
if dst.isNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&mi.initialized) == 0 {
|
||||
mi.computeMergeInfo()
|
||||
}
|
||||
|
||||
for _, fi := range mi.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
|
||||
continue
|
||||
}
|
||||
if fi.basicWidth > 0 {
|
||||
switch {
|
||||
case fi.basicWidth == 1 && !*sfp.toBool():
|
||||
continue
|
||||
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
|
||||
continue
|
||||
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dfp := dst.offset(fi.field)
|
||||
fi.merge(dfp, sfp)
|
||||
}
|
||||
|
||||
// TODO: Make this faster?
|
||||
out := dst.asPointerTo(mi.typ).Elem()
|
||||
in := src.asPointerTo(mi.typ).Elem()
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
if mi.unrecognized.IsValid() {
|
||||
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
|
||||
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mi *mergeInfo) computeMergeInfo() {
|
||||
mi.lock.Lock()
|
||||
defer mi.lock.Unlock()
|
||||
if mi.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := mi.typ
|
||||
n := t.NumField()
|
||||
|
||||
props := GetProperties(t)
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
mfi := mergeFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
switch tf.Kind() {
|
||||
case reflect.Ptr, reflect.Slice, reflect.String:
|
||||
// As a special case, we assume slices and strings are pointers
|
||||
// since we know that the first field in the SliceSlice or
|
||||
// StringHeader is a data pointer.
|
||||
mfi.isPointer = true
|
||||
case reflect.Bool:
|
||||
mfi.basicWidth = 1
|
||||
case reflect.Int32, reflect.Uint32, reflect.Float32:
|
||||
mfi.basicWidth = 4
|
||||
case reflect.Int64, reflect.Uint64, reflect.Float64:
|
||||
mfi.basicWidth = 8
|
||||
}
|
||||
}
|
||||
|
||||
// Unwrap tf to get at its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic("both pointer and slice for basic type in " + tf.Name())
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Int32:
|
||||
switch {
|
||||
case isSlice: // E.g., []int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfsp := src.toInt32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfs := src.getInt32Slice()
|
||||
if sfs != nil {
|
||||
dfs := dst.getInt32Slice()
|
||||
dfs = append(dfs, sfs...)
|
||||
if dfs == nil {
|
||||
dfs = []int32{}
|
||||
}
|
||||
dst.setInt32Slice(dfs)
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfpp := src.toInt32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfp := src.getInt32Ptr()
|
||||
if sfp != nil {
|
||||
dfp := dst.getInt32Ptr()
|
||||
if dfp == nil {
|
||||
dst.setInt32Ptr(*sfp)
|
||||
} else {
|
||||
*dfp = *sfp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt32(); v != 0 {
|
||||
*dst.toInt32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Int64:
|
||||
switch {
|
||||
case isSlice: // E.g., []int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toInt64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toInt64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt64(); v != 0 {
|
||||
*dst.toInt64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint32:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint32(); v != 0 {
|
||||
*dst.toUint32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint64:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint64(); v != 0 {
|
||||
*dst.toUint64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float32:
|
||||
switch {
|
||||
case isSlice: // E.g., []float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat32(); v != 0 {
|
||||
*dst.toFloat32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float64:
|
||||
switch {
|
||||
case isSlice: // E.g., []float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat64(); v != 0 {
|
||||
*dst.toFloat64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch {
|
||||
case isSlice: // E.g., []bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toBoolSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toBoolSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []bool{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toBoolPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toBoolPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Bool(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toBool(); v {
|
||||
*dst.toBool() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
switch {
|
||||
case isSlice: // E.g., []string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toStringSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toStringSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []string{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toStringPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toStringPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = String(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toString(); v != "" {
|
||||
*dst.toString() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
isProto3 := props.Prop[i].proto3
|
||||
switch {
|
||||
case isPointer:
|
||||
panic("bad pointer in byte slice case in " + tf.Name())
|
||||
case tf.Elem().Kind() != reflect.Uint8:
|
||||
panic("bad element kind in byte slice case in " + tf.Name())
|
||||
case isSlice: // E.g., [][]byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbsp := src.toBytesSlice()
|
||||
if *sbsp != nil {
|
||||
dbsp := dst.toBytesSlice()
|
||||
for _, sb := range *sbsp {
|
||||
if sb == nil {
|
||||
*dbsp = append(*dbsp, nil)
|
||||
} else {
|
||||
*dbsp = append(*dbsp, append([]byte{}, sb...))
|
||||
}
|
||||
}
|
||||
if *dbsp == nil {
|
||||
*dbsp = [][]byte{}
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., []byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbp := src.toBytes()
|
||||
if *sbp != nil {
|
||||
dbp := dst.toBytes()
|
||||
if !isProto3 || len(*sbp) > 0 {
|
||||
*dbp = append([]byte{}, *sbp...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("message field %s without pointer", tf))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
if sps != nil {
|
||||
dps := dst.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
var dp pointer
|
||||
if !sp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
if dps == nil {
|
||||
dps = []pointer{}
|
||||
}
|
||||
dst.setPointerSlice(dps)
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
dp := dst.getPointer()
|
||||
if dp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
dst.setPointer(dp)
|
||||
}
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in map case in " + tf.Name())
|
||||
default: // E.g., map[K]V
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
dm := dst.asPointerTo(tf).Elem()
|
||||
if dm.IsNil() {
|
||||
dm.Set(reflect.MakeMap(tf))
|
||||
}
|
||||
|
||||
switch tf.Elem().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(Clone(val.Interface().(Message)))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
default: // Basic type (e.g., string)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in interface case in " + tf.Name())
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
du := dst.asPointerTo(tf).Elem()
|
||||
typ := su.Elem().Type()
|
||||
if du.IsNil() || du.Elem().Type() != typ {
|
||||
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
|
||||
}
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
dv := du.Elem().Elem().Field(0)
|
||||
if dv.Kind() == reflect.Ptr && dv.IsNil() {
|
||||
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
Merge(dv.Interface().(Message), sv.Interface().(Message))
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
|
||||
default: // Basic type (e.g., string)
|
||||
dv.Set(sv)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("merger not found for type:%s", tf))
|
||||
}
|
||||
mi.fields = append(mi.fields, mfi)
|
||||
}
|
||||
|
||||
mi.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
mi.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&mi.initialized, 1)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user