mirror of
https://github.com/vacp2p/mvds.git
synced 2026-01-09 12:07:55 -05:00
Compare commits
31 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d6b27d3843 | ||
|
|
daa5e8b8a2 | ||
|
|
748b61123f | ||
|
|
d34db70222 | ||
|
|
9f50ecac54 | ||
|
|
2512e45906 | ||
|
|
945a249144 | ||
|
|
463f9a7f6c | ||
|
|
217a49bd54 | ||
|
|
f0cc0e7841 | ||
|
|
75124ea56c | ||
|
|
46125cfe6c | ||
|
|
899aeeacb4 | ||
|
|
87839f9f3a | ||
|
|
943ab5e228 | ||
|
|
3233b23080 | ||
|
|
a8dc37599b | ||
|
|
afd2e97e38 | ||
|
|
7c42852bfd | ||
|
|
3d5dc5b3e3 | ||
|
|
c9d04bbf91 | ||
|
|
72ff1b0d14 | ||
|
|
48a12fa637 | ||
|
|
6291da6ee1 | ||
|
|
645d54357a | ||
|
|
291827907c | ||
|
|
9e1addbac0 | ||
|
|
9af764814a | ||
|
|
5fdf62d7b7 | ||
|
|
b8e366e4e6 | ||
|
|
ec87e4ab5e |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -10,3 +10,8 @@
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Ignore Intellij config
|
||||
.idea
|
||||
|
||||
mvds
|
||||
|
||||
39
.travis.yml
Normal file
39
.travis.yml
Normal file
@@ -0,0 +1,39 @@
|
||||
notifications:
|
||||
email: false
|
||||
|
||||
language: go
|
||||
|
||||
install: true
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
before_install:
|
||||
- |
|
||||
if ! git diff --name-only $TRAVIS_COMMIT_RANGE | grep -qvE '(.md)|(.html)|^(LICENSE)|^(docs)'
|
||||
then
|
||||
echo "Only docs were updated, not running the CI."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
before_script:
|
||||
- make install-linter
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: "1.11.x"
|
||||
env: GOFLAGS=-mod=vendor
|
||||
script:
|
||||
- make lint
|
||||
# fails without -a
|
||||
- go test -a ./... # make test
|
||||
- go: "1.12.x"
|
||||
env: GOFLAGS=-mod=vendor
|
||||
script:
|
||||
- make lint
|
||||
# fails without -a
|
||||
- go test -a ./... # make test
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Status
|
||||
Copyright (c) 2019 Vac
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
51
Makefile
51
Makefile
@@ -2,6 +2,57 @@ SHELL := /bin/bash
|
||||
|
||||
GO111MODULE = on
|
||||
|
||||
build:
|
||||
go build
|
||||
.PHONY: build
|
||||
|
||||
test:
|
||||
go test -v ./...
|
||||
.PHONY: test
|
||||
|
||||
protobuf:
|
||||
protoc --go_out=. ./protobuf/*.proto
|
||||
.PHONY: protobuf
|
||||
|
||||
lint:
|
||||
golangci-lint run -v
|
||||
.PHONY: lint
|
||||
|
||||
install-linter:
|
||||
# install linter
|
||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.17.1
|
||||
.PHONY: install-linter
|
||||
|
||||
mock-install:
|
||||
go get -u github.com/golang/mock/mockgen
|
||||
go get -u github.com/golang/mock
|
||||
.PHONY: mock-install
|
||||
|
||||
mock:
|
||||
mockgen -package=internal -destination=node/internal/syncstate_mock.go -source=state/state.go
|
||||
.PHONY: mock
|
||||
|
||||
vendor:
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
modvendor -copy="**/*.c **/*.h" -v
|
||||
.PHONY: vendor
|
||||
|
||||
generate:
|
||||
go generate ./...
|
||||
.PHONY: generate
|
||||
|
||||
create-migration:
|
||||
@if [ -z "$$DIR" ]; then \
|
||||
echo 'missing DIR var'; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
@if [ -z "$$NAME" ]; then \
|
||||
echo 'missing NAME var'; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
mkdir -p $(DIR)
|
||||
touch $(DIR)/`date +"%s"`_$(NAME).down.sql ./$(DIR)/`date +"%s"`_$(NAME).up.sql
|
||||
.PHONY: create-migration
|
||||
|
||||
19
README.md
19
README.md
@@ -1,15 +1,28 @@
|
||||
# Minimal Viable Data Sync
|
||||
|
||||

|
||||

|
||||
[](LICENSE)
|
||||
[](https://godoc.org/github.com/status-im/mvds) [](https://goreportcard.com/report/github.com/status-im/mvds)
|
||||
)](https://godoc.org/github.com/vacp2p/mvds)
|
||||
[](https://goreportcard.com/report/github.com/vacp2p/mvds)
|
||||
[](https://travis-ci.com/vacp2p/mvds)
|
||||
|
||||
Experimental implementation of the [minimal viable data sync protocol specification](https://github.com/status-im/bigbrother-specs/blob/master/data_sync/mvds.md).
|
||||
Experimental implementation of the [minimal viable data sync protocol specification](https://specs.vac.dev/specs/mvds.html) including the [metadata format specification](https://specs.vac.dev/specs/mdf.html).
|
||||
|
||||
## Usage
|
||||
|
||||
Listening to MVDS messages is fairly simple:
|
||||
|
||||
```go
|
||||
sub := node.Subscribe()
|
||||
|
||||
for {
|
||||
msg := <-sub
|
||||
print(msg)
|
||||
}
|
||||
```
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Ensure you have `protoc` (Protobuf) and Golang installed. Then run `make`.
|
||||
|
||||
317
dependency/migrations/migrations.go
Normal file
317
dependency/migrations/migrations.go
Normal file
@@ -0,0 +1,317 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1572614870_initial_schema.down.sql (30B)
|
||||
// 1572614870_initial_schema.up.sql (157B)
|
||||
// doc.go (377B)
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
digest [sha256.Size]byte
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var __1572614870_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x4f\x49\x2d\x48\xcd\x4b\x49\xcd\x4b\xce\x4c\x2d\xb6\xe6\x02\x04\x00\x00\xff\xff\x13\x64\x97\xf6\x1e\x00\x00\x00")
|
||||
|
||||
func _1572614870_initial_schemaDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1572614870_initial_schemaDownSql,
|
||||
"1572614870_initial_schema.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1572614870_initial_schemaDownSql() (*asset, error) {
|
||||
bytes, err := _1572614870_initial_schemaDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1572614870_initial_schema.down.sql", size: 30, mode: os.FileMode(0644), modTime: time.Unix(1572706379, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfc, 0xb3, 0xc3, 0x5e, 0x81, 0xae, 0x77, 0x21, 0x3b, 0xd0, 0xa0, 0x6a, 0xf4, 0x7b, 0xb2, 0x1c, 0x92, 0xd7, 0x5d, 0x4c, 0xa6, 0x4f, 0xe, 0xc6, 0x2d, 0xe4, 0x18, 0x5d, 0x56, 0xe, 0x18, 0x6a}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1572614870_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x4f\x49\x2d\x48\xcd\x4b\x49\xcd\x4b\xce\x4c\x2d\x56\xd0\xe0\x52\x50\x50\x50\xc8\x2d\x4e\x8f\xcf\x4c\x51\x70\xf2\xf1\x77\x52\x08\x08\xf2\xf4\x75\x0c\x8a\x54\xf0\x76\x8d\xd4\x01\x4b\xc2\xd5\x57\x42\x14\xf8\xf9\x87\x28\xf8\x85\xfa\xf8\x70\x69\x5a\x73\x71\x41\x8d\xf7\xf4\x73\x71\x8d\x50\xc8\x4c\xa9\x88\x47\x52\xed\xef\x87\x69\xa1\x06\x42\x5e\xd3\x9a\x0b\x10\x00\x00\xff\xff\x11\xfa\x7b\x28\x9d\x00\x00\x00")
|
||||
|
||||
func _1572614870_initial_schemaUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1572614870_initial_schemaUpSql,
|
||||
"1572614870_initial_schema.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1572614870_initial_schemaUpSql() (*asset, error) {
|
||||
bytes, err := _1572614870_initial_schemaUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1572614870_initial_schema.up.sql", size: 157, mode: os.FileMode(0644), modTime: time.Unix(1572742027, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xad, 0x62, 0x2e, 0xb1, 0x1c, 0x65, 0x39, 0xb9, 0xe4, 0xae, 0xf6, 0x28, 0x6d, 0xbe, 0x62, 0xba, 0x93, 0x80, 0x6c, 0x47, 0x4f, 0x98, 0x5b, 0xf0, 0xfa, 0x16, 0x2, 0x7e, 0xaa, 0x15, 0x63, 0xf2}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbb\x6e\xc3\x30\x0c\x45\x77\x7f\xc5\x45\x96\x2c\xb5\xb4\x74\xea\xd6\xb1\x7b\x7f\x80\x91\x68\x89\x88\x1e\xae\x48\xe7\xf1\xf7\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\xd2\x7b\x7c\x66\x51\x2c\x52\x18\xa2\x68\x1c\x58\x95\xc6\x1d\x27\x0e\xb4\x29\xe3\x90\xc4\xf2\x76\x72\xa1\x57\xaf\x46\xb6\xe9\x2c\xd5\x57\x49\x83\x8c\xfd\xe5\xf5\x30\x79\x8f\x40\xed\x68\xc8\xd4\x62\xe1\x47\x4b\xa1\x46\xc3\xa4\x25\x5c\xc5\x32\x08\xeb\xe0\x45\x6e\x0e\xef\x86\xc2\xa4\x06\xcb\x64\x47\x85\x65\x46\x20\xe5\x3d\xb3\xf4\x81\xd4\xe7\x93\xb4\x48\x46\x6e\x47\x1f\xcb\x13\xd9\x17\x06\x2a\x85\x23\x96\xd1\xeb\xc3\x55\xaa\x8c\x28\x83\x83\xf5\x71\x7f\x01\xa9\xb2\xa1\x51\x65\xdd\xfd\x4c\x17\x46\xeb\xbf\xe7\x41\x2d\xfe\xff\x11\xae\x7d\x9c\x15\xa4\xe0\xdb\xca\xc1\x38\xba\x69\x5a\x29\x9c\x29\x31\xf4\xab\x88\xf1\x34\x79\x9f\xfa\x5b\xe2\xc6\xbb\xf5\xbc\x71\x5e\xcf\x09\x3f\x35\xe9\x4d\x31\x77\x38\xe7\xff\x80\x4b\x1d\x6e\xfa\x0e\x00\x00\xff\xff\x9d\x60\x3d\x88\x79\x01\x00\x00")
|
||||
|
||||
func docGoBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_docGo,
|
||||
"doc.go",
|
||||
)
|
||||
}
|
||||
|
||||
func docGo() (*asset, error) {
|
||||
bytes, err := docGoBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1572706379, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// AssetString returns the asset contents as a string (instead of a []byte).
|
||||
func AssetString(name string) (string, error) {
|
||||
data, err := Asset(name)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// MustAssetString is like AssetString but panics when Asset would return an
|
||||
// error. It simplifies safe initialization of global variables.
|
||||
func MustAssetString(name string) string {
|
||||
return string(MustAsset(name))
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetDigest returns the digest of the file with the given name. It returns an
|
||||
// error if the asset could not be found or the digest could not be loaded.
|
||||
func AssetDigest(name string) ([sha256.Size]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.digest, nil
|
||||
}
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
|
||||
}
|
||||
|
||||
// Digests returns a map of all known files and their checksums.
|
||||
func Digests() (map[string][sha256.Size]byte, error) {
|
||||
mp := make(map[string][sha256.Size]byte, len(_bindata))
|
||||
for name := range _bindata {
|
||||
a, err := _bindata[name]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mp[name] = a.digest
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1572614870_initial_schema.down.sql": _1572614870_initial_schemaDownSql,
|
||||
"1572614870_initial_schema.up.sql": _1572614870_initial_schemaUpSql,
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"},
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"},
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"1572614870_initial_schema.down.sql": &bintree{_1572614870_initial_schemaDownSql, map[string]*bintree{}},
|
||||
"1572614870_initial_schema.up.sql": &bintree{_1572614870_initial_schemaUpSql, map[string]*bintree{}},
|
||||
"doc.go": &bintree{docGo, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory.
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively.
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE mvds_dependencies;
|
||||
@@ -0,0 +1,6 @@
|
||||
CREATE TABLE mvds_dependencies (
|
||||
msg_id BLOB PRIMARY KEY,
|
||||
dependency BLOB NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX idx_dependency ON mvds_dependencies(dependency);
|
||||
9
dependency/migrations/sqlite/doc.go
Normal file
9
dependency/migrations/sqlite/doc.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// This file is necessary because "github.com/status-im/migrate/v4"
|
||||
// can't handle files starting with a prefix. At least that's the case
|
||||
// for go-bindata.
|
||||
// If go-bindata is called from the same directory, asset names
|
||||
// have no prefix and "github.com/status-im/migrate/v4" works as expected.
|
||||
|
||||
package sqlite
|
||||
|
||||
//go:generate go-bindata -pkg migrations -o ../migrations.go .
|
||||
12
dependency/tracker.go
Normal file
12
dependency/tracker.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package dependency
|
||||
|
||||
import (
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type Tracker interface {
|
||||
Add(msg, dependency state.MessageID) error
|
||||
Dependants(id state.MessageID) ([]state.MessageID, error)
|
||||
Resolve(msg state.MessageID, dependency state.MessageID) error
|
||||
IsResolved(id state.MessageID) (bool, error)
|
||||
}
|
||||
71
dependency/tracker_memory.go
Normal file
71
dependency/tracker_memory.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package dependency
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
// Verify that Tracker interface is implemented.
|
||||
var _ Tracker = (*inMemoryTracker)(nil)
|
||||
|
||||
type inMemoryTracker struct {
|
||||
sync.Mutex
|
||||
|
||||
dependents map[state.MessageID][]state.MessageID
|
||||
dependencies map[state.MessageID]int
|
||||
|
||||
}
|
||||
|
||||
func NewInMemoryTracker() *inMemoryTracker {
|
||||
return &inMemoryTracker{
|
||||
dependents: make(map[state.MessageID][]state.MessageID),
|
||||
dependencies: make(map[state.MessageID]int),
|
||||
}
|
||||
}
|
||||
|
||||
func (md *inMemoryTracker) Add(msg, dependency state.MessageID) error {
|
||||
md.Lock()
|
||||
defer md.Unlock()
|
||||
// @todo check it wasn't already added
|
||||
md.dependents[dependency] = append(md.dependents[dependency], msg)
|
||||
md.dependencies[msg] += 1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *inMemoryTracker) Dependants(id state.MessageID) ([]state.MessageID, error) {
|
||||
md.Lock()
|
||||
defer md.Unlock()
|
||||
|
||||
return md.dependents[id], nil
|
||||
}
|
||||
|
||||
func (md *inMemoryTracker) Resolve(msg state.MessageID, dependency state.MessageID) error {
|
||||
md.Lock()
|
||||
defer md.Unlock()
|
||||
|
||||
for i, item := range md.dependents[dependency] {
|
||||
if !reflect.DeepEqual(msg[:], item[:]) {
|
||||
continue
|
||||
}
|
||||
|
||||
md.dependents[dependency] = remove(md.dependents[dependency], i)
|
||||
md.dependencies[msg] -= 1
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *inMemoryTracker) IsResolved(id state.MessageID) (bool, error) {
|
||||
md.Lock()
|
||||
defer md.Unlock()
|
||||
|
||||
return md.dependencies[id] == 0, nil
|
||||
}
|
||||
|
||||
func remove(s []state.MessageID, i int) []state.MessageID {
|
||||
s[len(s)-1], s[i] = s[i], s[len(s)-1]
|
||||
return s[:len(s)-1]
|
||||
}
|
||||
42
dependency/tracker_persistency_test.go
Normal file
42
dependency/tracker_persistency_test.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package dependency
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/vacp2p/mvds/dependency/migrations"
|
||||
"github.com/vacp2p/mvds/persistenceutil"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
func TestTrackerSQLitePersistence(t *testing.T) {
|
||||
tmpFile, err := ioutil.TempFile("", "")
|
||||
require.NoError(t, err)
|
||||
db, err := persistenceutil.Open(tmpFile.Name(), "", persistenceutil.MigrationConfig{
|
||||
AssetNames: migrations.AssetNames(),
|
||||
AssetGetter: migrations.Asset,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
d := NewPersistentTracker(db)
|
||||
|
||||
msg := state.MessageID{0x01}
|
||||
dependency := state.MessageID{0x02}
|
||||
|
||||
err = d.Add(msg, dependency)
|
||||
require.NoError(t, err)
|
||||
dependants, err := d.Dependants(dependency)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, msg, dependants[0])
|
||||
|
||||
res, err := d.IsResolved(msg)
|
||||
require.NoError(t, err)
|
||||
require.False(t, res)
|
||||
|
||||
err = d.Resolve(msg, dependency)
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err = d.IsResolved(msg)
|
||||
require.NoError(t, err)
|
||||
require.True(t, res)
|
||||
}
|
||||
71
dependency/tracker_sqlite.go
Normal file
71
dependency/tracker_sqlite.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package dependency
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
// Verify that Tracker interface is implemented.
|
||||
var _ Tracker = (*sqliteTracker)(nil)
|
||||
|
||||
type sqliteTracker struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewPersistentTracker(db *sql.DB) *sqliteTracker {
|
||||
return &sqliteTracker{db: db}
|
||||
}
|
||||
|
||||
func (sd *sqliteTracker) Add(msg, dependency state.MessageID) error {
|
||||
_, err := sd.db.Exec(`INSERT INTO mvds_dependencies (msg_id, dependency) VALUES (?, ?)`, msg[:], dependency[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sd *sqliteTracker) Dependants(id state.MessageID) ([]state.MessageID, error) {
|
||||
rows, err := sd.db.Query(`SELECT msg_id FROM mvds_dependencies WHERE dependency = ?`, id[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var msgs []state.MessageID
|
||||
|
||||
for rows.Next() {
|
||||
var msg []byte
|
||||
err := rows.Scan(&msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msgs = append(msgs, state.ToMessageID(msg))
|
||||
}
|
||||
|
||||
return msgs, nil
|
||||
}
|
||||
|
||||
func (sd *sqliteTracker) Resolve(msg state.MessageID, dependency state.MessageID) error {
|
||||
_, err := sd.db.Exec(
|
||||
`DELETE FROM mvds_dependencies WHERE msg_id = ? AND dependency = ?`,
|
||||
msg[:],
|
||||
dependency[:],
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (sd *sqliteTracker) IsResolved(id state.MessageID) (bool, error) {
|
||||
result := sd.db.QueryRow(`SELECT COUNT(*) FROM mvds_dependencies WHERE msg_id = ?`, id[:])
|
||||
var num int64
|
||||
err := result.Scan(&num)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return num == 0, nil
|
||||
}
|
||||
|
||||
18
go.mod
18
go.mod
@@ -1,14 +1,16 @@
|
||||
module github.com/status-im/mvds
|
||||
module github.com/vacp2p/mvds
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcd v0.0.0-20190427004231-96897255fd17 // indirect
|
||||
github.com/ethereum/go-ethereum v1.8.27
|
||||
github.com/golang/protobuf v1.3.1
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/golang-migrate/migrate/v4 v4.6.2 // indirect
|
||||
github.com/golang/mock v1.2.0
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f // indirect
|
||||
golang.org/x/tools v0.0.0-20190525145741-7be61e1b0e51
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
|
||||
github.com/status-im/migrate/v4 v4.6.2-status.2
|
||||
github.com/stretchr/testify v1.3.1-0.20190712000136-221dbe5ed467
|
||||
go.uber.org/atomic v1.4.0 // indirect
|
||||
go.uber.org/multierr v1.1.0 // indirect
|
||||
go.uber.org/zap v1.10.0
|
||||
)
|
||||
|
||||
238
go.sum
238
go.sum
@@ -1,56 +1,242 @@
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/btcsuite/btcd v0.0.0-20190427004231-96897255fd17 h1:m0N5Vg5nP3zEz8TREZpwX3gt4Biw3/8fbIf4A3hO96g=
|
||||
github.com/btcsuite/btcd v0.0.0-20190427004231-96897255fd17/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495 h1:6IyqGr3fnd0tM3YxipK27TUskaOVUjU2nG45yzwcQKY=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/ethereum/go-ethereum v1.8.23 h1:xVKYpRpe3cbkaWN8gsRgStsyTvz3s82PcQsbEofjhEQ=
|
||||
github.com/ethereum/go-ethereum v1.8.23/go.mod h1:PwpWDrCLZrV+tfrhqqF6kPknbISMHaJv9Ln3kPCZLwY=
|
||||
github.com/ethereum/go-ethereum v1.8.27 h1:d+gkiLaBDk5fn3Pe/xNVaMrB/ozI+AUB2IlVBp29IrY=
|
||||
github.com/ethereum/go-ethereum v1.8.27/go.mod h1:PwpWDrCLZrV+tfrhqqF6kPknbISMHaJv9Ln3kPCZLwY=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
|
||||
github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8=
|
||||
github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg=
|
||||
github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
|
||||
github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4=
|
||||
github.com/cznic/lldb v1.1.0/go.mod h1:FIZVUmYUVhPwRiPzL8nD/mpFcJ/G7SSXjjXYG4uRI3A=
|
||||
github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
|
||||
github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE=
|
||||
github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
|
||||
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
|
||||
github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
|
||||
github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc=
|
||||
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang-migrate/migrate/v4 v4.6.2 h1:LDDOHo/q1W5UDj6PbkxdCv7lv9yunyZHXvxuwDkGo3k=
|
||||
github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
|
||||
github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
|
||||
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f h1:hd3r+uv9DNLScbOrnlj82rBldHQf3XWmCeXAWbw8euQ=
|
||||
github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f/go.mod h1:MyUWrZlB1aI5bs7j9/pJ8ckLLZ4QcCYcNiSbsAW32D4=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/status-im/migrate/v4 v4.6.2-status.2 h1:SdC+sMDl/aI7vUlwD2qj2p7KsK4T60IS9z4/rYCCbI8=
|
||||
github.com/status-im/migrate/v4 v4.6.2-status.2/go.mod h1:c/kc90n47GZu/58nnz1OMLTf7uE4Da4gZP5qmU+A/v8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.3.1-0.20190712000136-221dbe5ed467 h1:/pva5wyh0PKqe0bnHBbndEzbqsilMKFNXI0GPbO+L8c=
|
||||
github.com/stretchr/testify v1.3.1-0.20190712000136-221dbe5ed467/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
|
||||
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo=
|
||||
golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190225153610-fe579d43d832/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6 h1:FP8hkuE6yUEaJnK7O2eTuejKWwW+Rhfj80dQ2JcKxCU=
|
||||
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/tools v0.0.0-20190525145741-7be61e1b0e51 h1:RhYYBLDB5MoVkvoNGMNk+DSj7WoGhySvIvtEjTyiP74=
|
||||
golang.org/x/tools v0.0.0-20190525145741-7be61e1b0e51/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
||||
71
main.go
71
main.go
@@ -8,10 +8,13 @@ import (
|
||||
math "math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/status-im/mvds/node"
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/status-im/mvds/store"
|
||||
"github.com/status-im/mvds/transport"
|
||||
"github.com/vacp2p/mvds/dependency"
|
||||
"github.com/vacp2p/mvds/node"
|
||||
"github.com/vacp2p/mvds/peers"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
"github.com/vacp2p/mvds/store"
|
||||
"github.com/vacp2p/mvds/transport"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -23,18 +26,19 @@ var (
|
||||
interactive int
|
||||
)
|
||||
|
||||
func init() {
|
||||
func parseFlags() {
|
||||
flag.IntVar(&offline, "offline", 90, "percentage of time a node is offline")
|
||||
flag.IntVar(&nodeCount, "nodes", 3, "amount of nodes")
|
||||
flag.IntVar(&communicating, "communicating", 2, "amount of nodes sending messages")
|
||||
flag.IntVar(&sharing, "sharing", 2, "amount of nodes each node shares with")
|
||||
flag.Int64Var(&interval, "interval", 5, "seconds between messages")
|
||||
flag.IntVar(&interactive, "interactive", 3, "amount of nodes to use INTERACTIVE mode, the rest will be BATCH") // @todo should probably just be how many nodes are interactive
|
||||
flag.IntVar(&interactive, "interactive", 3, "amount of nodes to use InteractiveMode mode, the rest will be BatchMode") // @todo should probably just be how many nodes are interactive
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
parseFlags()
|
||||
// @todo validate flags
|
||||
|
||||
transports := make([]*transport.ChannelTransport, 0)
|
||||
@@ -48,14 +52,18 @@ func main() {
|
||||
input = append(input, in)
|
||||
transports = append(transports, t)
|
||||
|
||||
mode := node.INTERACTIVE
|
||||
mode := node.InteractiveMode
|
||||
if i+1 >= interactive {
|
||||
mode = node.BATCH
|
||||
mode = node.BatchMode
|
||||
}
|
||||
|
||||
node, err := createNode(t, peerID(), mode)
|
||||
if err != nil {
|
||||
log.Printf("Could not create node: %+v\n", err)
|
||||
}
|
||||
nodes = append(
|
||||
nodes,
|
||||
createNode(t, peerID(), mode),
|
||||
node,
|
||||
)
|
||||
}
|
||||
|
||||
@@ -69,14 +77,14 @@ func main() {
|
||||
peer := nodes[p].ID
|
||||
|
||||
transports[i].AddOutput(peer, input[p])
|
||||
n.AddPeer(group, peer)
|
||||
_ = n.AddPeer(group, peer)
|
||||
|
||||
log.Printf("%x sharing with %x", n.ID[:4], peer[:4])
|
||||
}
|
||||
}
|
||||
|
||||
for _, n := range nodes {
|
||||
n.Start()
|
||||
n.Start(1 * time.Second)
|
||||
}
|
||||
|
||||
chat(group, nodes[:communicating-1]...)
|
||||
@@ -105,17 +113,26 @@ OUTER:
|
||||
return peers
|
||||
}
|
||||
|
||||
func createNode(transport transport.Transport, id state.PeerID, mode node.Mode) *node.Node {
|
||||
ds := store.NewDummyStore()
|
||||
func createNode(transport transport.Transport, id state.PeerID, mode node.Mode) (*node.Node, error) {
|
||||
ds := store.NewMemoryMessageStore()
|
||||
logger, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return node.NewNode(
|
||||
&ds,
|
||||
ds,
|
||||
transport,
|
||||
state.NewSyncState(),
|
||||
state.NewMemorySyncState(),
|
||||
Calc,
|
||||
0,
|
||||
id,
|
||||
mode,
|
||||
)
|
||||
peers.NewMemoryPersistence(),
|
||||
dependency.NewInMemoryTracker(),
|
||||
node.EventualMode,
|
||||
logger,
|
||||
), nil
|
||||
}
|
||||
|
||||
func chat(group state.GroupID, nodes ...*node.Node) {
|
||||
@@ -135,22 +152,16 @@ func Calc(count uint64, epoch int64) int64 {
|
||||
return epoch + int64(count*2)
|
||||
}
|
||||
|
||||
func peerID() state.PeerID {
|
||||
bytes := make([]byte, 65)
|
||||
rand.Read(bytes)
|
||||
func peerID() (id state.PeerID) {
|
||||
_, _ = rand.Read(id[:])
|
||||
return
|
||||
}
|
||||
|
||||
id := state.PeerID{}
|
||||
copy(id[:], bytes)
|
||||
|
||||
|
||||
|
||||
func groupId() (id state.GroupID) {
|
||||
_, _ = rand.Read(id[:])
|
||||
return id
|
||||
}
|
||||
|
||||
func groupId() state.GroupID {
|
||||
bytes := make([]byte, 32)
|
||||
rand.Read(bytes)
|
||||
|
||||
id := state.GroupID{}
|
||||
copy(id[:], bytes)
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
139
mvds_batch_test.go
Normal file
139
mvds_batch_test.go
Normal file
@@ -0,0 +1,139 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/vacp2p/mvds/dependency"
|
||||
"github.com/vacp2p/mvds/node"
|
||||
"github.com/vacp2p/mvds/peers"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
"github.com/vacp2p/mvds/store"
|
||||
"github.com/vacp2p/mvds/transport"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func TestMVDSBatchSuite(t *testing.T) {
|
||||
suite.Run(t, new(MVDSBatchSuite))
|
||||
}
|
||||
|
||||
type MVDSBatchSuite struct {
|
||||
suite.Suite
|
||||
client1 *node.Node
|
||||
client2 *node.Node
|
||||
ds1 store.MessageStore
|
||||
ds2 store.MessageStore
|
||||
state1 state.SyncState
|
||||
state2 state.SyncState
|
||||
peers1 peers.Persistence
|
||||
peers2 peers.Persistence
|
||||
groupID state.GroupID
|
||||
}
|
||||
|
||||
func (s *MVDSBatchSuite) SetupTest() {
|
||||
|
||||
logger := zap.NewNop()
|
||||
|
||||
in1 := make(chan transport.Packet)
|
||||
t1 := transport.NewChannelTransport(0, in1)
|
||||
s.ds1 = store.NewMemoryMessageStore()
|
||||
s.state1 = state.NewMemorySyncState()
|
||||
s.peers1 = peers.NewMemoryPersistence()
|
||||
p1 := [65]byte{0x01}
|
||||
s.client1 = node.NewNode(s.ds1, t1, s.state1, Calc, 0, p1, node.BatchMode, s.peers1, dependency.NewInMemoryTracker(), node.EventualMode, logger)
|
||||
|
||||
in2 := make(chan transport.Packet)
|
||||
t2 := transport.NewChannelTransport(0, in2)
|
||||
s.ds2 = store.NewMemoryMessageStore()
|
||||
s.state2 = state.NewMemorySyncState()
|
||||
p2 := [65]byte{0x02}
|
||||
s.peers2 = peers.NewMemoryPersistence()
|
||||
s.client2 = node.NewNode(s.ds2, t2, s.state2, Calc, 0, p2, node.BatchMode, s.peers2, dependency.NewInMemoryTracker(), node.EventualMode, logger)
|
||||
|
||||
t2.AddOutput(p1, in1)
|
||||
t1.AddOutput(p2, in2)
|
||||
|
||||
s.groupID = [32]byte{0x01, 0x2, 0x3, 0x4}
|
||||
|
||||
s.Require().NoError(s.client1.AddPeer(s.groupID, p2))
|
||||
s.Require().NoError(s.client2.AddPeer(s.groupID, p1))
|
||||
|
||||
// We run the tick manually
|
||||
s.client1.Start(10 * time.Millisecond)
|
||||
s.client2.Start(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
func (s *MVDSBatchSuite) TearDownTest() {
|
||||
s.client1.Stop()
|
||||
s.client2.Stop()
|
||||
}
|
||||
|
||||
func (s *MVDSBatchSuite) TestSendClient1ToClient2() {
|
||||
subscription := s.client2.Subscribe()
|
||||
content := []byte("message 1")
|
||||
|
||||
messageID, err := s.client1.AppendMessage(s.groupID, content)
|
||||
s.Require().NoError(err)
|
||||
|
||||
// Check message is in store
|
||||
message1Sender, err := s.ds1.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Sender)
|
||||
|
||||
message := <-subscription
|
||||
s.Equal(message.Body, content)
|
||||
|
||||
message1Receiver, err := s.ds2.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Receiver)
|
||||
}
|
||||
|
||||
func (s *MVDSBatchSuite) TestSendClient2ToClient1() {
|
||||
subscription := s.client1.Subscribe()
|
||||
content := []byte("message 1")
|
||||
|
||||
messageID, err := s.client2.AppendMessage(s.groupID, content)
|
||||
s.Require().NoError(err)
|
||||
|
||||
// Check message is in store
|
||||
message1Sender, err := s.ds2.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Sender)
|
||||
|
||||
message := <-subscription
|
||||
s.Equal(message.Body, content)
|
||||
|
||||
message1Receiver, err := s.ds1.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Receiver)
|
||||
}
|
||||
|
||||
func (s *MVDSBatchSuite) TestAcks() {
|
||||
subscription := s.client2.Subscribe()
|
||||
messageID, err := s.client1.AppendMessage(s.groupID, []byte("message 1"))
|
||||
s.Require().NoError(err)
|
||||
|
||||
// Check message is in store
|
||||
message1Sender, err := s.ds1.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Sender)
|
||||
|
||||
// Check state is updated correctly
|
||||
states, err := s.state1.All(s.client1.CurrentEpoch())
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(1, len(states))
|
||||
|
||||
<-subscription
|
||||
|
||||
message1Receiver, err := s.ds2.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Receiver)
|
||||
|
||||
// Check state is removed
|
||||
s.Require().Eventually(func() bool {
|
||||
states, err := s.state1.All(s.client1.CurrentEpoch())
|
||||
return err == nil && len(states) == 0
|
||||
|
||||
}, 1*time.Second, 10*time.Millisecond)
|
||||
}
|
||||
103
mvds_interactive_test.go
Normal file
103
mvds_interactive_test.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/vacp2p/mvds/dependency"
|
||||
"github.com/vacp2p/mvds/node"
|
||||
"github.com/vacp2p/mvds/peers"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
"github.com/vacp2p/mvds/store"
|
||||
"github.com/vacp2p/mvds/transport"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func TestMVDSInteractiveSuite(t *testing.T) {
|
||||
suite.Run(t, new(MVDSInteractiveSuite))
|
||||
}
|
||||
|
||||
type MVDSInteractiveSuite struct {
|
||||
suite.Suite
|
||||
client1 *node.Node
|
||||
client2 *node.Node
|
||||
ds1 store.MessageStore
|
||||
ds2 store.MessageStore
|
||||
state1 state.SyncState
|
||||
state2 state.SyncState
|
||||
peers1 peers.Persistence
|
||||
peers2 peers.Persistence
|
||||
groupID state.GroupID
|
||||
}
|
||||
|
||||
func (s *MVDSInteractiveSuite) SetupTest() {
|
||||
|
||||
logger := zap.NewNop()
|
||||
|
||||
in1 := make(chan transport.Packet)
|
||||
t1 := transport.NewChannelTransport(0, in1)
|
||||
s.ds1 = store.NewMemoryMessageStore()
|
||||
s.state1 = state.NewMemorySyncState()
|
||||
s.peers1 = peers.NewMemoryPersistence()
|
||||
p1 := [65]byte{0x01}
|
||||
s.client1 = node.NewNode(s.ds1, t1, s.state1, Calc, 0, p1, node.InteractiveMode, s.peers1, dependency.NewInMemoryTracker(), node.EventualMode, logger)
|
||||
|
||||
in2 := make(chan transport.Packet)
|
||||
t2 := transport.NewChannelTransport(0, in2)
|
||||
s.ds2 = store.NewMemoryMessageStore()
|
||||
s.state2 = state.NewMemorySyncState()
|
||||
p2 := [65]byte{0x02}
|
||||
s.peers2 = peers.NewMemoryPersistence()
|
||||
s.client2 = node.NewNode(s.ds2, t2, s.state2, Calc, 0, p2, node.InteractiveMode, s.peers2, dependency.NewInMemoryTracker(), node.EventualMode, logger)
|
||||
|
||||
t2.AddOutput(p1, in1)
|
||||
t1.AddOutput(p2, in2)
|
||||
|
||||
s.groupID = [32]byte{0x01, 0x2, 0x3, 0x4}
|
||||
|
||||
s.Require().NoError(s.client1.AddPeer(s.groupID, p2))
|
||||
s.Require().NoError(s.client2.AddPeer(s.groupID, p1))
|
||||
|
||||
s.client1.Start(10 * time.Millisecond)
|
||||
s.client2.Start(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
func (s *MVDSInteractiveSuite) TearDownTest() {
|
||||
s.client1.Stop()
|
||||
s.client2.Stop()
|
||||
}
|
||||
|
||||
func (s *MVDSInteractiveSuite) TestInteractiveMode() {
|
||||
subscription := s.client2.Subscribe()
|
||||
messageID, err := s.client1.AppendMessage(s.groupID, []byte("message 1"))
|
||||
s.Require().NoError(err)
|
||||
|
||||
// Check message is in store
|
||||
message1Sender, err := s.ds1.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Sender)
|
||||
|
||||
// Check state is updated correctly
|
||||
states, err := s.state1.All(s.client1.CurrentEpoch())
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(1, len(states))
|
||||
|
||||
// Check we store the request
|
||||
s.Require().Eventually(func() bool {
|
||||
states, err := s.state2.All(s.client2.CurrentEpoch())
|
||||
return err == nil && len(states) == 1 && states[0].Type == state.REQUEST
|
||||
}, 1*time.Second, 10*time.Millisecond, "An request is stored in the state")
|
||||
|
||||
<-subscription
|
||||
message1Receiver, err := s.ds2.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Receiver)
|
||||
|
||||
// Check state is removed
|
||||
s.Require().Eventually(func() bool {
|
||||
states, err := s.state1.All(s.client1.CurrentEpoch())
|
||||
return err == nil && len(states) == 0
|
||||
|
||||
}, 1*time.Second, 10*time.Millisecond, "We clear all the state")
|
||||
}
|
||||
33
node/epoch_persistency.go
Normal file
33
node/epoch_persistency.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type epochSQLitePersistence struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func newEpochSQLitePersistence(db *sql.DB) *epochSQLitePersistence {
|
||||
return &epochSQLitePersistence{db: db}
|
||||
}
|
||||
|
||||
func (p *epochSQLitePersistence) Get(nodeID state.PeerID) (epoch int64, err error) {
|
||||
row := p.db.QueryRow(`SELECT epoch FROM mvds_epoch WHERE peer_id = ?`, nodeID[:])
|
||||
err = row.Scan(&epoch)
|
||||
if err == sql.ErrNoRows {
|
||||
err = nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *epochSQLitePersistence) Set(nodeID state.PeerID, epoch int64) error {
|
||||
_, err := p.db.Exec(`
|
||||
INSERT OR REPLACE INTO mvds_epoch (peer_id, epoch) VALUES (?, ?)`,
|
||||
nodeID[:],
|
||||
epoch,
|
||||
)
|
||||
return err
|
||||
}
|
||||
38
node/epoch_persistency_test.go
Normal file
38
node/epoch_persistency_test.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/vacp2p/mvds/node/migrations"
|
||||
"github.com/vacp2p/mvds/persistenceutil"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
func TestEpochSQLitePersistence(t *testing.T) {
|
||||
tmpFile, err := ioutil.TempFile("", "")
|
||||
require.NoError(t, err)
|
||||
db, err := persistenceutil.Open(tmpFile.Name(), "", persistenceutil.MigrationConfig{
|
||||
AssetNames: migrations.AssetNames(),
|
||||
AssetGetter: migrations.Asset,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
p := newEpochSQLitePersistence(db)
|
||||
|
||||
err = p.Set(state.PeerID{0x01}, 1)
|
||||
require.NoError(t, err)
|
||||
epoch, err := p.Get(state.PeerID{0x01})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(1), epoch)
|
||||
|
||||
err = p.Set(state.PeerID{0x01}, 2)
|
||||
require.NoError(t, err)
|
||||
epoch, err = p.Get(state.PeerID{0x01})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(2), epoch)
|
||||
|
||||
epoch, err = p.Get(state.PeerID{0xff})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(0), epoch)
|
||||
}
|
||||
91
node/internal/syncstate_mock.go
Normal file
91
node/internal/syncstate_mock.go
Normal file
@@ -0,0 +1,91 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: state/state.go
|
||||
|
||||
// Package internal is a generated GoMock package.
|
||||
package internal
|
||||
|
||||
import (
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
state "github.com/vacp2p/mvds/state"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// MockSyncState is a mock of SyncState interface
|
||||
type MockSyncState struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockSyncStateMockRecorder
|
||||
}
|
||||
|
||||
// MockSyncStateMockRecorder is the mock recorder for MockSyncState
|
||||
type MockSyncStateMockRecorder struct {
|
||||
mock *MockSyncState
|
||||
}
|
||||
|
||||
// NewMockSyncState creates a new mock instance
|
||||
func NewMockSyncState(ctrl *gomock.Controller) *MockSyncState {
|
||||
mock := &MockSyncState{ctrl: ctrl}
|
||||
mock.recorder = &MockSyncStateMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use
|
||||
func (m *MockSyncState) EXPECT() *MockSyncStateMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Add mocks base method
|
||||
func (m *MockSyncState) Add(newState state.State) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Add", newState)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Add indicates an expected call of Add
|
||||
func (mr *MockSyncStateMockRecorder) Add(newState interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockSyncState)(nil).Add), newState)
|
||||
}
|
||||
|
||||
// Remove mocks base method
|
||||
func (m *MockSyncState) Remove(id state.MessageID, peer state.PeerID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Remove", id, peer)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Remove indicates an expected call of Remove
|
||||
func (mr *MockSyncStateMockRecorder) Remove(id, peer interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockSyncState)(nil).Remove), id, peer)
|
||||
}
|
||||
|
||||
// All mocks base method
|
||||
func (m *MockSyncState) All(epoch int64) ([]state.State, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "All", epoch)
|
||||
ret0, _ := ret[0].([]state.State)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// All indicates an expected call of All
|
||||
func (mr *MockSyncStateMockRecorder) All(epoch interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "All", reflect.TypeOf((*MockSyncState)(nil).All), epoch)
|
||||
}
|
||||
|
||||
// Map mocks base method
|
||||
func (m *MockSyncState) Map(epoch int64, process func(state.State) state.State) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Map", epoch, process)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Map indicates an expected call of Map
|
||||
func (mr *MockSyncStateMockRecorder) Map(epoch, process interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Map", reflect.TypeOf((*MockSyncState)(nil).Map), epoch, process)
|
||||
}
|
||||
317
node/migrations/migrations.go
Normal file
317
node/migrations/migrations.go
Normal file
@@ -0,0 +1,317 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1565345162_initial_schema.down.sql (23B)
|
||||
// 1565345162_initial_schema.up.sql (86B)
|
||||
// doc.go (377B)
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
digest [sha256.Size]byte
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var __1565345162_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x4f\x2d\xc8\x4f\xce\xb0\xe6\x02\x04\x00\x00\xff\xff\xd3\x00\xf3\x23\x17\x00\x00\x00")
|
||||
|
||||
func _1565345162_initial_schemaDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565345162_initial_schemaDownSql,
|
||||
"1565345162_initial_schema.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565345162_initial_schemaDownSql() (*asset, error) {
|
||||
bytes, err := _1565345162_initial_schemaDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565345162_initial_schema.down.sql", size: 23, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7c, 0x69, 0xd2, 0x3, 0xea, 0x82, 0x7c, 0xb3, 0x44, 0x6c, 0xef, 0x64, 0x2c, 0x99, 0x62, 0xa2, 0x8b, 0x6f, 0x96, 0x4f, 0x34, 0x41, 0x87, 0xd5, 0x4e, 0x3, 0x7f, 0x4a, 0xd1, 0x91, 0x9, 0x99}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1565345162_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x4f\x2d\xc8\x4f\xce\x50\xd0\xe0\x52\x50\x50\x50\x28\x48\x4d\x2d\x8a\xcf\x4c\x51\x70\xf2\xf1\x77\x52\x08\x08\xf2\xf4\x75\x0c\x8a\x54\xf0\x76\x8d\xd4\x01\xcb\x42\x54\x7a\xfa\x85\xb8\xba\xbb\x06\x29\xf8\xf9\x87\x28\xf8\x85\xfa\xf8\x70\x69\x5a\x73\x01\x02\x00\x00\xff\xff\x51\x96\x2d\xcb\x56\x00\x00\x00")
|
||||
|
||||
func _1565345162_initial_schemaUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565345162_initial_schemaUpSql,
|
||||
"1565345162_initial_schema.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565345162_initial_schemaUpSql() (*asset, error) {
|
||||
bytes, err := _1565345162_initial_schemaUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565345162_initial_schema.up.sql", size: 86, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x78, 0x7c, 0xdd, 0x67, 0x61, 0x3e, 0x7f, 0xd4, 0xce, 0xb0, 0x17, 0xbe, 0x5a, 0xa7, 0x9e, 0x93, 0x34, 0xe8, 0xbb, 0x44, 0xfb, 0x88, 0xd6, 0x18, 0x6d, 0x9f, 0xb4, 0x22, 0xda, 0xbc, 0x87, 0x94}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbb\x6e\xc3\x30\x0c\x45\x77\x7f\xc5\x45\x96\x2c\xb5\xb4\x74\xea\xd6\xb1\x7b\x7f\x80\x91\x68\x89\x88\x1e\xae\x48\xe7\xf1\xf7\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\xd2\x7b\x7c\x66\x51\x2c\x52\x18\xa2\x68\x1c\x58\x95\xc6\x1d\x27\x0e\xb4\x29\xe3\x90\xc4\xf2\x76\x72\xa1\x57\xaf\x46\xb6\xe9\x2c\xd5\x57\x49\x83\x8c\xfd\xe5\xf5\x30\x79\x8f\x40\xed\x68\xc8\xd4\x62\xe1\x47\x4b\xa1\x46\xc3\xa4\x25\x5c\xc5\x32\x08\xeb\xe0\x45\x6e\x0e\xef\x86\xc2\xa4\x06\xcb\x64\x47\x85\x65\x46\x20\xe5\x3d\xb3\xf4\x81\xd4\xe7\x93\xb4\x48\x46\x6e\x47\x1f\xcb\x13\xd9\x17\x06\x2a\x85\x23\x96\xd1\xeb\xc3\x55\xaa\x8c\x28\x83\x83\xf5\x71\x7f\x01\xa9\xb2\xa1\x51\x65\xdd\xfd\x4c\x17\x46\xeb\xbf\xe7\x41\x2d\xfe\xff\x11\xae\x7d\x9c\x15\xa4\xe0\xdb\xca\xc1\x38\xba\x69\x5a\x29\x9c\x29\x31\xf4\xab\x88\xf1\x34\x79\x9f\xfa\x5b\xe2\xc6\xbb\xf5\xbc\x71\x5e\xcf\x09\x3f\x35\xe9\x4d\x31\x77\x38\xe7\xff\x80\x4b\x1d\x6e\xfa\x0e\x00\x00\xff\xff\x9d\x60\x3d\x88\x79\x01\x00\x00")
|
||||
|
||||
func docGoBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_docGo,
|
||||
"doc.go",
|
||||
)
|
||||
}
|
||||
|
||||
func docGo() (*asset, error) {
|
||||
bytes, err := docGoBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// AssetString returns the asset contents as a string (instead of a []byte).
|
||||
func AssetString(name string) (string, error) {
|
||||
data, err := Asset(name)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// MustAssetString is like AssetString but panics when Asset would return an
|
||||
// error. It simplifies safe initialization of global variables.
|
||||
func MustAssetString(name string) string {
|
||||
return string(MustAsset(name))
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetDigest returns the digest of the file with the given name. It returns an
|
||||
// error if the asset could not be found or the digest could not be loaded.
|
||||
func AssetDigest(name string) ([sha256.Size]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.digest, nil
|
||||
}
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
|
||||
}
|
||||
|
||||
// Digests returns a map of all known files and their checksums.
|
||||
func Digests() (map[string][sha256.Size]byte, error) {
|
||||
mp := make(map[string][sha256.Size]byte, len(_bindata))
|
||||
for name := range _bindata {
|
||||
a, err := _bindata[name]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mp[name] = a.digest
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1565345162_initial_schema.down.sql": _1565345162_initial_schemaDownSql,
|
||||
"1565345162_initial_schema.up.sql": _1565345162_initial_schemaUpSql,
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"},
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"},
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"1565345162_initial_schema.down.sql": &bintree{_1565345162_initial_schemaDownSql, map[string]*bintree{}},
|
||||
"1565345162_initial_schema.up.sql": &bintree{_1565345162_initial_schemaUpSql, map[string]*bintree{}},
|
||||
"doc.go": &bintree{docGo, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory.
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively.
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE mvds_epoch;
|
||||
4
node/migrations/sqlite/1565345162_initial_schema.up.sql
Normal file
4
node/migrations/sqlite/1565345162_initial_schema.up.sql
Normal file
@@ -0,0 +1,4 @@
|
||||
CREATE TABLE mvds_epoch (
|
||||
peer_id BLOB PRIMARY KEY,
|
||||
epoch INTEGER NOT NULL
|
||||
);
|
||||
9
node/migrations/sqlite/doc.go
Normal file
9
node/migrations/sqlite/doc.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// This file is necessary because "github.com/status-im/migrate/v4"
|
||||
// can't handle files starting with a prefix. At least that's the case
|
||||
// for go-bindata.
|
||||
// If go-bindata is called from the same directory, asset names
|
||||
// have no prefix and "github.com/status-im/migrate/v4" works as expected.
|
||||
|
||||
package sqlite
|
||||
|
||||
//go:generate go-bindata -pkg migrations -o ../migrations.go .
|
||||
697
node/node.go
697
node/node.go
@@ -1,34 +1,51 @@
|
||||
// Package Node contains node logic.
|
||||
// Package node contains node logic.
|
||||
package node
|
||||
|
||||
// @todo this is a very rough implementation that needs cleanup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/status-im/mvds/protobuf"
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/status-im/mvds/store"
|
||||
"github.com/status-im/mvds/transport"
|
||||
"github.com/vacp2p/mvds/dependency"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/vacp2p/mvds/peers"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
"github.com/vacp2p/mvds/store"
|
||||
"github.com/vacp2p/mvds/transport"
|
||||
)
|
||||
|
||||
// Mode represents the synchronization mode.
|
||||
type Mode int
|
||||
|
||||
const (
|
||||
INTERACTIVE Mode = iota
|
||||
BATCH
|
||||
InteractiveMode Mode = iota + 1
|
||||
BatchMode
|
||||
)
|
||||
|
||||
type calculateNextEpoch func(count uint64, epoch int64) int64
|
||||
// ResolutionMode defines how message dependencies should be resolved.
|
||||
type ResolutionMode int
|
||||
|
||||
const (
|
||||
// EventualMode is non-blocking and will return messages before dependencies are resolved.
|
||||
EventualMode ResolutionMode = iota + 1
|
||||
// ConsistentMode blocks and does not return messages until dependencies have been resolved.
|
||||
ConsistentMode
|
||||
)
|
||||
|
||||
// CalculateNextEpoch is a function used to calculate the next `SendEpoch` for a given message.
|
||||
type CalculateNextEpoch func(count uint64, epoch int64) int64
|
||||
|
||||
// Node represents an MVDS node, it runs all the logic like sending and receiving protocol messages.
|
||||
type Node struct {
|
||||
// This needs to be declared first: https://github.com/golang/go/issues/9959
|
||||
epoch int64
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
@@ -37,18 +54,92 @@ type Node struct {
|
||||
|
||||
syncState state.SyncState
|
||||
|
||||
peers map[state.GroupID][]state.PeerID
|
||||
peers peers.Persistence
|
||||
|
||||
payloads payloads
|
||||
|
||||
nextEpoch calculateNextEpoch
|
||||
dependencies dependency.Tracker
|
||||
|
||||
nextEpoch CalculateNextEpoch
|
||||
|
||||
ID state.PeerID
|
||||
|
||||
epoch int64
|
||||
mode Mode
|
||||
epochPersistence *epochSQLitePersistence
|
||||
|
||||
subscription chan<- protobuf.Message
|
||||
mode Mode
|
||||
resolution ResolutionMode
|
||||
|
||||
subscription chan protobuf.Message
|
||||
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
func NewPersistentNode(
|
||||
db *sql.DB,
|
||||
st transport.Transport,
|
||||
id state.PeerID,
|
||||
mode Mode,
|
||||
resolution ResolutionMode,
|
||||
nextEpoch CalculateNextEpoch,
|
||||
logger *zap.Logger,
|
||||
) (*Node, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
|
||||
node := Node{
|
||||
ID: id,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
store: store.NewPersistentMessageStore(db),
|
||||
transport: st,
|
||||
peers: peers.NewSQLitePersistence(db),
|
||||
syncState: state.NewPersistentSyncState(db),
|
||||
payloads: newPayloads(),
|
||||
epochPersistence: newEpochSQLitePersistence(db),
|
||||
nextEpoch: nextEpoch,
|
||||
dependencies: dependency.NewPersistentTracker(db),
|
||||
logger: logger.With(zap.Namespace("mvds")),
|
||||
mode: mode,
|
||||
resolution: resolution,
|
||||
}
|
||||
if currentEpoch, err := node.epochPersistence.Get(id); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
node.epoch = currentEpoch
|
||||
}
|
||||
return &node, nil
|
||||
}
|
||||
|
||||
func NewEphemeralNode(
|
||||
id state.PeerID,
|
||||
t transport.Transport,
|
||||
nextEpoch CalculateNextEpoch,
|
||||
currentEpoch int64,
|
||||
mode Mode,
|
||||
logger *zap.Logger,
|
||||
) *Node {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
|
||||
return &Node{
|
||||
ID: id,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
store: store.NewMemoryMessageStore(),
|
||||
transport: t,
|
||||
syncState: state.NewMemorySyncState(),
|
||||
peers: peers.NewMemoryPersistence(),
|
||||
payloads: newPayloads(),
|
||||
dependencies: dependency.NewInMemoryTracker(),
|
||||
nextEpoch: nextEpoch,
|
||||
epoch: currentEpoch,
|
||||
logger: logger.With(zap.Namespace("mvds")),
|
||||
mode: mode,
|
||||
}
|
||||
}
|
||||
|
||||
// NewNode returns a new node.
|
||||
@@ -56,12 +147,19 @@ func NewNode(
|
||||
ms store.MessageStore,
|
||||
st transport.Transport,
|
||||
ss state.SyncState,
|
||||
nextEpoch calculateNextEpoch,
|
||||
nextEpoch CalculateNextEpoch,
|
||||
currentEpoch int64,
|
||||
id state.PeerID,
|
||||
mode Mode,
|
||||
pp peers.Persistence,
|
||||
md dependency.Tracker,
|
||||
resolution ResolutionMode,
|
||||
logger *zap.Logger,
|
||||
) *Node {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
if logger == nil {
|
||||
logger = zap.NewNop()
|
||||
}
|
||||
|
||||
return &Node{
|
||||
ctx: ctx,
|
||||
@@ -69,26 +167,33 @@ func NewNode(
|
||||
store: ms,
|
||||
transport: st,
|
||||
syncState: ss,
|
||||
peers: make(map[state.GroupID][]state.PeerID),
|
||||
peers: pp,
|
||||
payloads: newPayloads(),
|
||||
nextEpoch: nextEpoch,
|
||||
ID: id,
|
||||
epoch: currentEpoch,
|
||||
logger: logger.With(zap.Namespace("mvds")),
|
||||
mode: mode,
|
||||
dependencies: md,
|
||||
resolution: resolution,
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) CurrentEpoch() int64 {
|
||||
return atomic.LoadInt64(&n.epoch)
|
||||
}
|
||||
|
||||
// Start listens for new messages received by the node and sends out those required every epoch.
|
||||
func (n *Node) Start() {
|
||||
func (n *Node) Start(duration time.Duration) {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-n.ctx.Done():
|
||||
log.Print("Watch stopped")
|
||||
n.logger.Info("Watch stopped")
|
||||
return
|
||||
default:
|
||||
p := n.transport.Watch()
|
||||
go n.onPayload(p.Group, p.Sender, p.Payload)
|
||||
go n.onPayload(p.Sender, p.Payload)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -97,14 +202,22 @@ func (n *Node) Start() {
|
||||
for {
|
||||
select {
|
||||
case <-n.ctx.Done():
|
||||
log.Print("Epoch processing stopped")
|
||||
n.logger.Info("Epoch processing stopped")
|
||||
return
|
||||
default:
|
||||
log.Printf("Node: %x Epoch: %d", n.ID[:4], n.epoch)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
n.sendMessages()
|
||||
n.logger.Debug("Epoch processing", zap.String("node", hex.EncodeToString(n.ID[:4])), zap.Int64("epoch", n.epoch))
|
||||
time.Sleep(duration)
|
||||
err := n.sendMessages()
|
||||
if err != nil {
|
||||
n.logger.Error("Error sending messages.", zap.Error(err))
|
||||
}
|
||||
atomic.AddInt64(&n.epoch, 1)
|
||||
// When a persistent node is used, the epoch needs to be saved.
|
||||
if n.epochPersistence != nil {
|
||||
if err := n.epochPersistence.Set(n.ID, n.epoch); err != nil {
|
||||
n.logger.Error("Failed to persisten epoch", zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -112,259 +225,509 @@ func (n *Node) Start() {
|
||||
|
||||
// Stop message reading and epoch processing
|
||||
func (n *Node) Stop() {
|
||||
n.logger.Info("Stopping node")
|
||||
n.Unsubscribe()
|
||||
n.cancel()
|
||||
}
|
||||
|
||||
// Subscribe subscribes to incoming messages.
|
||||
func (n *Node) Subscribe(sub chan <-protobuf.Message) {
|
||||
n.subscription = sub
|
||||
func (n *Node) Subscribe() chan protobuf.Message {
|
||||
n.subscription = make(chan protobuf.Message)
|
||||
return n.subscription
|
||||
}
|
||||
|
||||
// Unsubscribe closes the listening channels
|
||||
func (n *Node) Unsubscribe() {
|
||||
if n.subscription != nil {
|
||||
close(n.subscription)
|
||||
}
|
||||
n.subscription = nil
|
||||
}
|
||||
|
||||
// AppendMessage sends a message to a given group.
|
||||
func (n *Node) AppendMessage(group state.GroupID, data []byte) (state.MessageID, error) {
|
||||
m := protobuf.Message{
|
||||
GroupId: group[:],
|
||||
func (n *Node) AppendMessage(groupID state.GroupID, data []byte) (state.MessageID, error) {
|
||||
p, err := n.store.GetMessagesWithoutChildren(groupID)
|
||||
parents := make([][]byte, len(p))
|
||||
if err != nil {
|
||||
n.logger.Error("Failed to retrieve parents",
|
||||
zap.String("groupID", hex.EncodeToString(groupID[:4])),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
for i, id := range p {
|
||||
parents[i] = id[:]
|
||||
}
|
||||
|
||||
return n.AppendMessageWithMetadata(groupID, data, &protobuf.Metadata{Ephemeral: false, Parents: parents})
|
||||
}
|
||||
|
||||
// AppendEphemeralMessage sends a message to a given group that has the `no_ack_required` flag set to `true`.
|
||||
func (n *Node) AppendEphemeralMessage(groupID state.GroupID, data []byte) (state.MessageID, error) {
|
||||
return n.AppendMessageWithMetadata(groupID, data, &protobuf.Metadata{Ephemeral: true})
|
||||
}
|
||||
|
||||
// AppendMessageWithMetadata sends a message to a given group with metadata.
|
||||
func (n *Node) AppendMessageWithMetadata(groupID state.GroupID, data []byte, metadata *protobuf.Metadata) (state.MessageID, error) {
|
||||
m := &protobuf.Message{
|
||||
GroupId: groupID[:],
|
||||
Timestamp: time.Now().Unix(),
|
||||
Body: data,
|
||||
Metadata: metadata,
|
||||
}
|
||||
|
||||
id := m.ID()
|
||||
|
||||
peers, ok := n.peers[group]
|
||||
if !ok {
|
||||
return state.MessageID{}, fmt.Errorf("trying to send to unknown group %x", group[:4])
|
||||
}
|
||||
|
||||
err := n.store.Add(m)
|
||||
if err != nil {
|
||||
return state.MessageID{}, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
for _, p := range peers {
|
||||
if !n.IsPeerInGroup(group, p) {
|
||||
continue
|
||||
}
|
||||
err = n.broadcastToGroup(groupID, n.ID, m)
|
||||
if err != nil {
|
||||
return state.MessageID{}, err
|
||||
}
|
||||
|
||||
t := state.OFFER
|
||||
if n.mode == BATCH {
|
||||
t = state.MESSAGE
|
||||
}
|
||||
|
||||
n.insertSyncState(group, id, p, t)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("[%x] node %x sending %x\n", group[:4], n.ID[:4], id[:4])
|
||||
// @todo think about a way to insta trigger send messages when send was selected, we don't wanna wait for ticks here
|
||||
n.logger.Debug("Appending Message to Sync State",
|
||||
zap.String("node", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("groupID", hex.EncodeToString(groupID[:4])),
|
||||
zap.String("id", hex.EncodeToString(id[:4])))
|
||||
// @todo think about a way to insta trigger pushToSub messages when pushToSub was selected, we don't wanna wait for ticks here
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
||||
// RequestMessage adds a REQUEST record to the next payload for a given message ID.
|
||||
func (n *Node) RequestMessage(group state.GroupID, id state.MessageID) error {
|
||||
peers, ok := n.peers[group]
|
||||
if !ok {
|
||||
peers, err := n.peers.GetByGroupID(group)
|
||||
if err != nil {
|
||||
return fmt.Errorf("trying to request from an unknown group %x", group[:4])
|
||||
}
|
||||
|
||||
go func() {
|
||||
for _, p := range peers {
|
||||
if !n.IsPeerInGroup(group, p) {
|
||||
continue
|
||||
}
|
||||
|
||||
n.insertSyncState(group, id, p, state.REQUEST)
|
||||
}
|
||||
}()
|
||||
for _, p := range peers {
|
||||
n.insertSyncState(&group, id, p, state.REQUEST)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddPeer adds a peer to a specific group making it a recipient of messages.
|
||||
func (n *Node) AddPeer(group state.GroupID, id state.PeerID) {
|
||||
if _, ok := n.peers[group]; !ok {
|
||||
n.peers[group] = make([]state.PeerID, 0)
|
||||
}
|
||||
|
||||
n.peers[group] = append(n.peers[group], id)
|
||||
func (n *Node) AddPeer(group state.GroupID, id state.PeerID) error {
|
||||
return n.peers.Add(group, id)
|
||||
}
|
||||
|
||||
// IsPeerInGroup checks whether a peer is in the specified group.
|
||||
func (n Node) IsPeerInGroup(g state.GroupID, p state.PeerID) bool {
|
||||
for _, peer := range n.peers[g] {
|
||||
if bytes.Equal(peer[:], p[:]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
func (n *Node) IsPeerInGroup(g state.GroupID, p state.PeerID) (bool, error) {
|
||||
return n.peers.Exists(g, p)
|
||||
}
|
||||
|
||||
func (n *Node) sendMessages() {
|
||||
err := n.syncState.Map(n.epoch, func(g state.GroupID, m state.MessageID, p state.PeerID, s state.State) state.State {
|
||||
if !n.IsPeerInGroup(g, p) {
|
||||
return s
|
||||
}
|
||||
func (n *Node) sendMessages() error {
|
||||
|
||||
var toRemove []state.State
|
||||
|
||||
err := n.syncState.Map(n.epoch, func(s state.State) state.State {
|
||||
m := s.MessageID
|
||||
p := s.PeerID
|
||||
switch s.Type {
|
||||
case state.OFFER:
|
||||
n.payloads.AddOffers(g, p, m[:])
|
||||
n.payloads.AddOffers(p, m[:])
|
||||
case state.REQUEST:
|
||||
n.payloads.AddRequests(g, p, m[:])
|
||||
log.Printf("[%x] sending REQUEST (%x -> %x): %x\n", g[:4], n.ID[:4], p[:4], m[:4])
|
||||
n.payloads.AddRequests(p, m[:])
|
||||
n.logger.Debug("sending REQUEST",
|
||||
zap.String("from", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("to", hex.EncodeToString(p[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(m[:4])),
|
||||
)
|
||||
|
||||
case state.MESSAGE:
|
||||
msg, err := n.store.Get(m)
|
||||
g := *s.GroupID
|
||||
exist, err := n.IsPeerInGroup(g, p)
|
||||
if err != nil {
|
||||
log.Printf("failed to retreive message %x %s", m[:4], err.Error())
|
||||
return s
|
||||
}
|
||||
|
||||
n.payloads.AddMessages(g, p, &msg)
|
||||
log.Printf("[%x] sending MESSAGE (%x -> %x): %x\n", g[:4], n.ID[:4], p[:4], m[:4])
|
||||
if !exist {
|
||||
return s
|
||||
}
|
||||
|
||||
msg, err := n.store.Get(m)
|
||||
if err != nil {
|
||||
n.logger.Error("Failed to retreive message",
|
||||
zap.String("messageID", hex.EncodeToString(m[:4])),
|
||||
zap.Error(err),
|
||||
)
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
n.payloads.AddMessages(p, msg)
|
||||
n.logger.Debug("sending MESSAGE",
|
||||
zap.String("groupID", hex.EncodeToString(g[:4])),
|
||||
zap.String("from", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("to", hex.EncodeToString(p[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(m[:4])),
|
||||
)
|
||||
|
||||
if msg.Metadata != nil && msg.Metadata.Ephemeral {
|
||||
toRemove = append(toRemove, s)
|
||||
}
|
||||
}
|
||||
|
||||
return n.updateSendEpoch(s)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Printf("error while mapping sync state: %s", err.Error())
|
||||
n.logger.Error("error while mapping sync state", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
n.payloads.MapAndClear(func(id state.GroupID, peer state.PeerID, payload protobuf.Payload) {
|
||||
err := n.transport.Send(id, n.ID, peer, payload)
|
||||
return n.payloads.MapAndClear(func(peer state.PeerID, payload protobuf.Payload) error {
|
||||
err := n.transport.Send(n.ID, peer, payload)
|
||||
if err != nil {
|
||||
log.Printf("error sending message: %s", err.Error())
|
||||
// @todo
|
||||
n.logger.Error("error sending message", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func (n *Node) onPayload(group state.GroupID, sender state.PeerID, payload protobuf.Payload) {
|
||||
func (n *Node) onPayload(sender state.PeerID, payload protobuf.Payload) {
|
||||
// Acks, Requests and Offers are all arrays of bytes as protobuf doesn't allow type aliases otherwise arrays of messageIDs would be nicer.
|
||||
n.onAck(group, sender, payload.Acks)
|
||||
n.onRequest(group, sender, payload.Requests)
|
||||
n.onOffer(group, sender, payload.Offers)
|
||||
n.payloads.AddAcks(group, sender, n.onMessages(group, sender, payload.Messages)...)
|
||||
if err := n.onAck(sender, payload.Acks); err != nil {
|
||||
n.logger.Error("error processing acks", zap.Error(err))
|
||||
}
|
||||
if err := n.onRequest(sender, payload.Requests); err != nil {
|
||||
n.logger.Error("error processing requests", zap.Error(err))
|
||||
}
|
||||
if err := n.onOffer(sender, payload.Offers); err != nil {
|
||||
n.logger.Error("error processing offers", zap.Error(err))
|
||||
}
|
||||
messageIds := n.onMessages(sender, payload.Messages)
|
||||
n.payloads.AddAcks(sender, messageIds)
|
||||
}
|
||||
|
||||
func (n *Node) onOffer(group state.GroupID, sender state.PeerID, offers [][]byte) {
|
||||
func (n *Node) onOffer(sender state.PeerID, offers [][]byte) error {
|
||||
for _, raw := range offers {
|
||||
id := toMessageID(raw)
|
||||
log.Printf("[%x] OFFER (%x -> %x): %x received.\n", group[:4], sender[:4], n.ID[:4], id[:4])
|
||||
id := state.ToMessageID(raw)
|
||||
n.logger.Debug("OFFER received",
|
||||
zap.String("from", hex.EncodeToString(sender[:4])),
|
||||
zap.String("to", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
|
||||
exist, err := n.store.Has(id)
|
||||
// @todo maybe ack?
|
||||
if n.store.Has(id) {
|
||||
continue
|
||||
}
|
||||
|
||||
n.insertSyncState(group, id, sender, state.REQUEST)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) onRequest(group state.GroupID, sender state.PeerID, requests [][]byte) {
|
||||
for _, raw := range requests {
|
||||
id := toMessageID(raw)
|
||||
log.Printf("[%x] REQUEST (%x -> %x): %x received.\n", group[:4], sender[:4], n.ID[:4], id[:4])
|
||||
|
||||
if !n.IsPeerInGroup(group, sender) {
|
||||
log.Printf("[%x] peer %x is not in group", group[:4], sender[:4])
|
||||
continue
|
||||
}
|
||||
|
||||
if !n.store.Has(id) {
|
||||
log.Printf("message %x does not exist", id[:4])
|
||||
continue
|
||||
}
|
||||
|
||||
n.insertSyncState(group, id, sender, state.MESSAGE)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) onAck(group state.GroupID, sender state.PeerID, acks [][]byte) {
|
||||
for _, raw := range acks {
|
||||
id := toMessageID(raw)
|
||||
|
||||
err := n.syncState.Remove(group, id, sender)
|
||||
if err != nil {
|
||||
log.Printf("error while removing sync state %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if exist {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("[%x] ACK (%x -> %x): %x received.\n", group[:4], sender[:4], n.ID[:4], id[:4])
|
||||
n.insertSyncState(nil, id, sender, state.REQUEST)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) onMessages(group state.GroupID, sender state.PeerID, messages []*protobuf.Message) [][]byte {
|
||||
func (n *Node) onRequest(sender state.PeerID, requests [][]byte) error {
|
||||
for _, raw := range requests {
|
||||
id := state.ToMessageID(raw)
|
||||
n.logger.Debug("REQUEST received",
|
||||
zap.String("from", hex.EncodeToString(sender[:4])),
|
||||
zap.String("to", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
|
||||
message, err := n.store.Get(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if message == nil {
|
||||
n.logger.Error("message does not exist", zap.String("messageID", hex.EncodeToString(id[:4])))
|
||||
continue
|
||||
}
|
||||
|
||||
groupID := state.ToGroupID(message.GroupId)
|
||||
|
||||
exist, err := n.IsPeerInGroup(groupID, sender)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !exist {
|
||||
n.logger.Error("peer is not in group",
|
||||
zap.String("groupID", hex.EncodeToString(groupID[:4])),
|
||||
zap.String("peer", hex.EncodeToString(sender[:4])),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
n.insertSyncState(&groupID, id, sender, state.MESSAGE)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) onAck(sender state.PeerID, acks [][]byte) error {
|
||||
for _, ack := range acks {
|
||||
id := state.ToMessageID(ack)
|
||||
|
||||
err := n.syncState.Remove(id, sender)
|
||||
if err != nil {
|
||||
n.logger.Error("Error while removing sync state.", zap.Error(err))
|
||||
return err
|
||||
}
|
||||
|
||||
n.logger.Debug("ACK received",
|
||||
zap.String("from", hex.EncodeToString(sender[:4])),
|
||||
zap.String("to", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) onMessages(sender state.PeerID, messages []*protobuf.Message) [][]byte {
|
||||
a := make([][]byte, 0)
|
||||
|
||||
for _, m := range messages {
|
||||
err := n.onMessage(group, sender, *m)
|
||||
groupID := state.ToGroupID(m.GroupId)
|
||||
err := n.onMessage(sender, m)
|
||||
if err != nil {
|
||||
// @todo
|
||||
n.logger.Error("Error processing message", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
id := m.ID()
|
||||
log.Printf("[%x] sending ACK (%x -> %x): %x\n", group[:4], n.ID[:4], sender[:4], id[:4])
|
||||
|
||||
if m.Metadata != nil && m.Metadata.Ephemeral {
|
||||
n.logger.Debug("not sending ACK",
|
||||
zap.String("groupID", hex.EncodeToString(groupID[:4])),
|
||||
zap.String("from", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("", hex.EncodeToString(sender[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
n.logger.Debug("sending ACK",
|
||||
zap.String("groupID", hex.EncodeToString(groupID[:4])),
|
||||
zap.String("from", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("", hex.EncodeToString(sender[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
|
||||
a = append(a, id[:])
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func (n *Node) onMessage(group state.GroupID, sender state.PeerID, msg protobuf.Message) error {
|
||||
// @todo cleanup this function
|
||||
func (n *Node) onMessage(sender state.PeerID, msg *protobuf.Message) error {
|
||||
id := msg.ID()
|
||||
log.Printf("[%x] MESSAGE (%x -> %x): %x received.\n", group[:4], sender[:4], n.ID[:4], id[:4])
|
||||
groupID := state.ToGroupID(msg.GroupId)
|
||||
n.logger.Debug("MESSAGE received",
|
||||
zap.String("from", hex.EncodeToString(sender[:4])),
|
||||
zap.String("to", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
|
||||
err := n.syncState.Remove(group, id, sender)
|
||||
if err != nil {
|
||||
err := n.syncState.Remove(id, sender)
|
||||
if err != nil && err != state.ErrStateNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
for _, peer := range n.peers[group] {
|
||||
if peer == sender {
|
||||
continue
|
||||
}
|
||||
|
||||
n.insertSyncState(group, id, peer, state.OFFER)
|
||||
if msg.Metadata == nil || !msg.Metadata.Ephemeral {
|
||||
err = n.store.Add(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}()
|
||||
|
||||
if n.subscription != nil {
|
||||
n.subscription <- msg
|
||||
}
|
||||
|
||||
err = n.store.Add(msg)
|
||||
err = n.broadcastToGroup(groupID, sender, msg)
|
||||
if err != nil {
|
||||
return err
|
||||
// @todo process, should this function ever even have an error?
|
||||
}
|
||||
|
||||
n.resolve(sender, msg)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) broadcastToGroup(group state.GroupID, sender state.PeerID, msg *protobuf.Message) error {
|
||||
p, err := n.peers.GetByGroupID(group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
id := msg.ID()
|
||||
|
||||
for _, peer := range p {
|
||||
if peer == sender {
|
||||
continue
|
||||
}
|
||||
|
||||
t := state.OFFER
|
||||
if n.mode == BatchMode || (msg.Metadata == nil && !msg.Metadata.Ephemeral) {
|
||||
t = state.MESSAGE
|
||||
}
|
||||
|
||||
n.insertSyncState(&group, id, peer, t)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) insertSyncState(group state.GroupID, id state.MessageID, p state.PeerID, t state.RecordType) {
|
||||
// @todo I do not think this will work, this needs be some recrusive function
|
||||
// @todo add method to select depth of how far we resolve dependencies
|
||||
|
||||
func (n *Node) resolve(sender state.PeerID, msg *protobuf.Message) {
|
||||
if n.resolution == EventualMode {
|
||||
n.resolveEventually(sender, msg)
|
||||
return
|
||||
}
|
||||
|
||||
n.resolveConsistently(sender, msg)
|
||||
}
|
||||
|
||||
func (n *Node) resolveEventually(sender state.PeerID, msg *protobuf.Message) {
|
||||
if msg.Metadata == nil || len(msg.Metadata.Parents) == 0 {
|
||||
n.pushToSub(msg)
|
||||
return
|
||||
}
|
||||
|
||||
for _, parent := range msg.Metadata.Parents {
|
||||
pid := state.ToMessageID(parent)
|
||||
|
||||
if has, _ := n.store.Has(pid); has {
|
||||
continue
|
||||
}
|
||||
|
||||
group := state.ToGroupID(msg.GroupId)
|
||||
n.insertSyncState(&group, pid, sender, state.REQUEST)
|
||||
}
|
||||
|
||||
n.pushToSub(msg)
|
||||
}
|
||||
|
||||
func (n *Node) resolveConsistently(sender state.PeerID, msg *protobuf.Message) {
|
||||
id := msg.ID()
|
||||
|
||||
// We push any messages whose parents have now been resolved
|
||||
dependants, err := n.dependencies.Dependants(id)
|
||||
if err != nil {
|
||||
n.logger.Error("error getting dependants",
|
||||
zap.Error(err),
|
||||
zap.String("msg", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
}
|
||||
|
||||
for _, dependant := range dependants {
|
||||
err := n.dependencies.Resolve(dependant, id)
|
||||
if err != nil {
|
||||
n.logger.Error("error marking resolved dependency",
|
||||
zap.Error(err),
|
||||
zap.String("msg", hex.EncodeToString(dependant[:4])),
|
||||
zap.String("dependency", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
}
|
||||
|
||||
resolved, err := n.dependencies.IsResolved(dependant)
|
||||
if err != nil {
|
||||
n.logger.Error("error getting unresolved dependencies",
|
||||
zap.Error(err),
|
||||
zap.String("msg", hex.EncodeToString(dependant[:4])),
|
||||
)
|
||||
}
|
||||
|
||||
if !resolved {
|
||||
continue
|
||||
}
|
||||
|
||||
dmsg, err := n.store.Get(dependant)
|
||||
if err != nil {
|
||||
n.logger.Error("error getting message",
|
||||
zap.Error(err),
|
||||
zap.String("messageID", hex.EncodeToString(dependant[:4])),
|
||||
)
|
||||
}
|
||||
|
||||
if dmsg != nil {
|
||||
n.pushToSub(dmsg)
|
||||
}
|
||||
}
|
||||
|
||||
// @todo add parent dependencies to child, then we can have multiple levels?
|
||||
if msg.Metadata == nil || len(msg.Metadata.Parents) == 0 {
|
||||
n.pushToSub(msg)
|
||||
return
|
||||
}
|
||||
|
||||
hasUnresolvedDependencies := false
|
||||
for _, parent := range msg.Metadata.Parents {
|
||||
pid := state.ToMessageID(parent)
|
||||
|
||||
if has, _ := n.store.Has(pid); has {
|
||||
continue
|
||||
}
|
||||
|
||||
group := state.ToGroupID(msg.GroupId)
|
||||
n.insertSyncState(&group, pid, sender, state.REQUEST)
|
||||
hasUnresolvedDependencies = true
|
||||
|
||||
err := n.dependencies.Add(id, pid)
|
||||
if err != nil {
|
||||
n.logger.Error("error adding dependency",
|
||||
zap.Error(err),
|
||||
zap.String("msg", hex.EncodeToString(id[:4])),
|
||||
zap.String("dependency", hex.EncodeToString(pid[:4])),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if hasUnresolvedDependencies {
|
||||
return
|
||||
}
|
||||
|
||||
n.pushToSub(msg)
|
||||
}
|
||||
|
||||
func (n *Node) pushToSub(msg *protobuf.Message) {
|
||||
if n.subscription == nil {
|
||||
return
|
||||
}
|
||||
|
||||
n.subscription <- *msg
|
||||
}
|
||||
|
||||
func (n *Node) insertSyncState(groupID *state.GroupID, messageID state.MessageID, peerID state.PeerID, t state.RecordType) {
|
||||
s := state.State{
|
||||
GroupID: groupID,
|
||||
MessageID: messageID,
|
||||
PeerID: peerID,
|
||||
Type: t,
|
||||
SendEpoch: n.epoch + 1,
|
||||
}
|
||||
|
||||
err := n.syncState.Set(group, id, p, s)
|
||||
err := n.syncState.Add(s)
|
||||
if err != nil {
|
||||
log.Printf("error (%s) setting sync state group: %x id: %x peer: %x", err.Error(), group[:4], id[:4], p[:4])
|
||||
n.logger.Error("error setting sync states",
|
||||
zap.Error(err),
|
||||
zap.String("groupID", hex.EncodeToString(groupID[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(messageID[:4])),
|
||||
zap.String("peerID", hex.EncodeToString(peerID[:4])),
|
||||
)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func (n Node) updateSendEpoch(s state.State) state.State {
|
||||
func (n *Node) updateSendEpoch(s state.State) state.State {
|
||||
s.SendCount += 1
|
||||
s.SendEpoch += n.nextEpoch(s.SendCount, n.epoch)
|
||||
s.SendEpoch = n.nextEpoch(s.SendCount, n.epoch)
|
||||
return s
|
||||
}
|
||||
|
||||
func toMessageID(b []byte) state.MessageID {
|
||||
var id state.MessageID
|
||||
copy(id[:], b)
|
||||
return id
|
||||
}
|
||||
|
||||
130
node/node_test.go
Normal file
130
node/node_test.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/vacp2p/mvds/dependency"
|
||||
"github.com/vacp2p/mvds/node/internal"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
"github.com/vacp2p/mvds/store"
|
||||
)
|
||||
|
||||
func TestNode_resolveEventually(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
syncstate := internal.NewMockSyncState(ctrl)
|
||||
|
||||
node := Node{
|
||||
syncState: syncstate,
|
||||
store: store.NewMemoryMessageStore(),
|
||||
}
|
||||
|
||||
channel := node.Subscribe()
|
||||
|
||||
peer := peerID()
|
||||
group := groupID()
|
||||
parent := messageID()
|
||||
|
||||
msg := &protobuf.Message{
|
||||
GroupId: group[:],
|
||||
Timestamp: time.Now().Unix(),
|
||||
Body: []byte{0x01},
|
||||
Metadata: &protobuf.Metadata{Ephemeral: false, Parents: [][]byte{parent[:]}},
|
||||
}
|
||||
|
||||
expectedState := state.State{
|
||||
GroupID: &group,
|
||||
MessageID: parent,
|
||||
PeerID: peer,
|
||||
Type: state.REQUEST,
|
||||
SendEpoch: 1,
|
||||
}
|
||||
|
||||
syncstate.EXPECT().Add(expectedState).Return(nil)
|
||||
|
||||
go node.resolveEventually(peer, msg)
|
||||
|
||||
received := <-channel
|
||||
|
||||
if !reflect.DeepEqual(*msg, received) {
|
||||
t.Error("expected message did not match received")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNode_resolveConsistently(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
syncstate := internal.NewMockSyncState(ctrl)
|
||||
|
||||
node := Node{
|
||||
syncState: syncstate,
|
||||
store: store.NewMemoryMessageStore(),
|
||||
dependencies: dependency.NewInMemoryTracker(),
|
||||
}
|
||||
|
||||
channel := node.Subscribe()
|
||||
|
||||
peer := peerID()
|
||||
group := groupID()
|
||||
|
||||
parent := &protobuf.Message{
|
||||
GroupId: group[:],
|
||||
Timestamp: time.Now().Unix(),
|
||||
Body: []byte{0x02},
|
||||
}
|
||||
|
||||
parentID := parent.ID()
|
||||
|
||||
msg := &protobuf.Message{
|
||||
GroupId: group[:],
|
||||
Timestamp: time.Now().Unix(),
|
||||
Body: []byte{0x01},
|
||||
Metadata: &protobuf.Metadata{Ephemeral: false, Parents: [][]byte{parentID[:]}},
|
||||
}
|
||||
|
||||
// @todo we need to make sure to add the message cause we are going through a subset of the flow
|
||||
_ = node.store.Add(msg)
|
||||
|
||||
syncstate.EXPECT().Add(gomock.Any()).DoAndReturn(func(state.State) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
node.resolveConsistently(peer, msg)
|
||||
|
||||
go node.resolveConsistently(peer, parent)
|
||||
|
||||
received := <-channel
|
||||
|
||||
if !reflect.DeepEqual(*msg, received) {
|
||||
t.Error("expected message did not match received")
|
||||
}
|
||||
|
||||
received = <-channel
|
||||
|
||||
if !reflect.DeepEqual(*parent, received) {
|
||||
t.Error("expected message did not match received")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func peerID() (id state.PeerID) {
|
||||
_, _ = rand.Read(id[:])
|
||||
return id
|
||||
}
|
||||
|
||||
func groupID() (id state.GroupID) {
|
||||
_, _ = rand.Read(id[:])
|
||||
return id
|
||||
}
|
||||
|
||||
func messageID() (id state.MessageID) {
|
||||
_, _ = rand.Read(id[:])
|
||||
return id
|
||||
}
|
||||
@@ -3,92 +3,90 @@ package node
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/status-im/mvds/protobuf"
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type payloads struct {
|
||||
sync.Mutex
|
||||
|
||||
payloads map[state.GroupID]map[state.PeerID]protobuf.Payload
|
||||
payloads map[state.PeerID]protobuf.Payload
|
||||
}
|
||||
|
||||
// @todo check in all the functions below that we aren't duplicating stuff
|
||||
|
||||
func newPayloads() payloads {
|
||||
return payloads{
|
||||
payloads: make(map[state.GroupID]map[state.PeerID]protobuf.Payload),
|
||||
payloads: make(map[state.PeerID]protobuf.Payload),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *payloads) AddOffers(group state.GroupID, peer state.PeerID, offers ...[]byte) {
|
||||
func (p *payloads) AddOffers(peer state.PeerID, offers ...[]byte) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
payload := p.get(group, peer)
|
||||
payload := p.get(peer)
|
||||
|
||||
payload.Offers = append(payload.Offers, offers...)
|
||||
|
||||
p.set(group, peer, payload)
|
||||
p.set(peer, payload)
|
||||
}
|
||||
|
||||
func (p *payloads) AddAcks(group state.GroupID, peer state.PeerID, acks ...[]byte) {
|
||||
func (p *payloads) AddAcks(peer state.PeerID, acks [][]byte) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
payload := p.get(group, peer)
|
||||
payload := p.get(peer)
|
||||
|
||||
payload.Requests = append(payload.Requests, acks...)
|
||||
payload.Acks = append(payload.Acks, acks...)
|
||||
|
||||
p.set(group, peer, payload)
|
||||
p.set(peer, payload)
|
||||
}
|
||||
|
||||
func (p *payloads) AddRequests(group state.GroupID, peer state.PeerID, request ...[]byte) {
|
||||
func (p *payloads) AddRequests(peer state.PeerID, request ...[]byte) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
payload := p.get(group, peer)
|
||||
payload := p.get(peer)
|
||||
|
||||
payload.Requests = append(payload.Requests, request...)
|
||||
|
||||
p.set(group, peer, payload)
|
||||
p.set(peer, payload)
|
||||
}
|
||||
|
||||
func (p *payloads) AddMessages(group state.GroupID, peer state.PeerID, messages ...*protobuf.Message) {
|
||||
func (p *payloads) AddMessages(peer state.PeerID, messages ...*protobuf.Message) {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
payload := p.get(group, peer)
|
||||
payload := p.get(peer)
|
||||
if payload.Messages == nil {
|
||||
payload.Messages = make([]*protobuf.Message, 0)
|
||||
}
|
||||
|
||||
payload.Messages = append(payload.Messages, messages...)
|
||||
p.set(group, peer, payload)
|
||||
p.set(peer, payload)
|
||||
}
|
||||
|
||||
func (p *payloads) MapAndClear(f func(state.GroupID, state.PeerID, protobuf.Payload)) {
|
||||
func (p *payloads) MapAndClear(f func(state.PeerID, protobuf.Payload) error) error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
for g, payloads := range p.payloads {
|
||||
for peer, payload := range payloads {
|
||||
f(g, peer, payload)
|
||||
for peer, payload := range p.payloads {
|
||||
err := f(peer, payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
p.payloads = make(map[state.GroupID]map[state.PeerID]protobuf.Payload)
|
||||
// TODO: this should only be called upon confirmation that the message has been sent
|
||||
p.payloads = make(map[state.PeerID]protobuf.Payload)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *payloads) get(id state.GroupID, peer state.PeerID) protobuf.Payload {
|
||||
payload, _ := p.payloads[id][peer]
|
||||
return payload
|
||||
func (p *payloads) get(peer state.PeerID) protobuf.Payload {
|
||||
return p.payloads[peer]
|
||||
}
|
||||
|
||||
func (p *payloads) set(id state.GroupID, peer state.PeerID, payload protobuf.Payload) {
|
||||
_, ok := p.payloads[id]
|
||||
if !ok {
|
||||
p.payloads[id] = make(map[state.PeerID]protobuf.Payload)
|
||||
}
|
||||
|
||||
p.payloads[id][peer] = payload
|
||||
func (p *payloads) set(peer state.PeerID, payload protobuf.Payload) {
|
||||
p.payloads[peer] = payload
|
||||
}
|
||||
|
||||
317
peers/migrations/migrations.go
Normal file
317
peers/migrations/migrations.go
Normal file
@@ -0,0 +1,317 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1565249278_initial_schema.down.sql (23B)
|
||||
// 1565249278_initial_schema.up.sql (140B)
|
||||
// doc.go (377B)
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
digest [sha256.Size]byte
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var __1565249278_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x2f\x48\x4d\x2d\x2a\xb6\xe6\x02\x04\x00\x00\xff\xff\x58\x44\x68\xf7\x17\x00\x00\x00")
|
||||
|
||||
func _1565249278_initial_schemaDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565249278_initial_schemaDownSql,
|
||||
"1565249278_initial_schema.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565249278_initial_schemaDownSql() (*asset, error) {
|
||||
bytes, err := _1565249278_initial_schemaDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565249278_initial_schema.down.sql", size: 23, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4, 0xfb, 0x5, 0x92, 0xf0, 0x93, 0xaa, 0x83, 0xb7, 0xdf, 0x66, 0xe2, 0x97, 0x53, 0x9d, 0x34, 0xd3, 0xca, 0x97, 0xd8, 0xe1, 0xed, 0xf0, 0x4a, 0x94, 0x1a, 0xb1, 0x8f, 0xcf, 0xc, 0xa4, 0x6}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1565249278_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x2f\x48\x4d\x2d\x2a\x56\xd0\xe0\x52\x50\x50\x50\x48\x2f\xca\x2f\x2d\x88\xcf\x4c\x51\x70\xf2\xf1\x77\x52\xf0\xf3\x0f\x51\xf0\x0b\xf5\xf1\xd1\x01\xcb\x81\xd4\xe1\x90\x0a\x08\xf2\xf4\x75\x0c\x8a\x54\xf0\x76\x8d\x54\xd0\x80\x99\xa1\x03\xd3\xa1\xa9\xe0\xef\xa7\xe0\xec\xef\xe7\xe6\xe3\xe9\x1c\xa2\x10\xe4\x1a\xe0\xe3\xe8\xec\xca\xa5\x69\xcd\x05\x08\x00\x00\xff\xff\x67\x63\xbf\x36\x8c\x00\x00\x00")
|
||||
|
||||
func _1565249278_initial_schemaUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565249278_initial_schemaUpSql,
|
||||
"1565249278_initial_schema.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565249278_initial_schemaUpSql() (*asset, error) {
|
||||
bytes, err := _1565249278_initial_schemaUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565249278_initial_schema.up.sql", size: 140, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8a, 0xbc, 0x3a, 0x87, 0x12, 0x93, 0xeb, 0xb4, 0xcc, 0x42, 0x6e, 0xb2, 0x7d, 0xfa, 0x9a, 0xa8, 0x3f, 0xb, 0x6b, 0xa8, 0x2d, 0x8b, 0xde, 0x67, 0x2a, 0xa8, 0xa5, 0x42, 0xad, 0x27, 0x15, 0x7e}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbb\x6e\xc3\x30\x0c\x45\x77\x7f\xc5\x45\x96\x2c\xb5\xb4\x74\xea\xd6\xb1\x7b\x7f\x80\x91\x68\x89\x88\x1e\xae\x48\xe7\xf1\xf7\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\xd2\x7b\x7c\x66\x51\x2c\x52\x18\xa2\x68\x1c\x58\x95\xc6\x1d\x27\x0e\xb4\x29\xe3\x90\xc4\xf2\x76\x72\xa1\x57\xaf\x46\xb6\xe9\x2c\xd5\x57\x49\x83\x8c\xfd\xe5\xf5\x30\x79\x8f\x40\xed\x68\xc8\xd4\x62\xe1\x47\x4b\xa1\x46\xc3\xa4\x25\x5c\xc5\x32\x08\xeb\xe0\x45\x6e\x0e\xef\x86\xc2\xa4\x06\xcb\x64\x47\x85\x65\x46\x20\xe5\x3d\xb3\xf4\x81\xd4\xe7\x93\xb4\x48\x46\x6e\x47\x1f\xcb\x13\xd9\x17\x06\x2a\x85\x23\x96\xd1\xeb\xc3\x55\xaa\x8c\x28\x83\x83\xf5\x71\x7f\x01\xa9\xb2\xa1\x51\x65\xdd\xfd\x4c\x17\x46\xeb\xbf\xe7\x41\x2d\xfe\xff\x11\xae\x7d\x9c\x15\xa4\xe0\xdb\xca\xc1\x38\xba\x69\x5a\x29\x9c\x29\x31\xf4\xab\x88\xf1\x34\x79\x9f\xfa\x5b\xe2\xc6\xbb\xf5\xbc\x71\x5e\xcf\x09\x3f\x35\xe9\x4d\x31\x77\x38\xe7\xff\x80\x4b\x1d\x6e\xfa\x0e\x00\x00\xff\xff\x9d\x60\x3d\x88\x79\x01\x00\x00")
|
||||
|
||||
func docGoBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_docGo,
|
||||
"doc.go",
|
||||
)
|
||||
}
|
||||
|
||||
func docGo() (*asset, error) {
|
||||
bytes, err := docGoBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// AssetString returns the asset contents as a string (instead of a []byte).
|
||||
func AssetString(name string) (string, error) {
|
||||
data, err := Asset(name)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// MustAssetString is like AssetString but panics when Asset would return an
|
||||
// error. It simplifies safe initialization of global variables.
|
||||
func MustAssetString(name string) string {
|
||||
return string(MustAsset(name))
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetDigest returns the digest of the file with the given name. It returns an
|
||||
// error if the asset could not be found or the digest could not be loaded.
|
||||
func AssetDigest(name string) ([sha256.Size]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.digest, nil
|
||||
}
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
|
||||
}
|
||||
|
||||
// Digests returns a map of all known files and their checksums.
|
||||
func Digests() (map[string][sha256.Size]byte, error) {
|
||||
mp := make(map[string][sha256.Size]byte, len(_bindata))
|
||||
for name := range _bindata {
|
||||
a, err := _bindata[name]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mp[name] = a.digest
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1565249278_initial_schema.down.sql": _1565249278_initial_schemaDownSql,
|
||||
"1565249278_initial_schema.up.sql": _1565249278_initial_schemaUpSql,
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"},
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"},
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"1565249278_initial_schema.down.sql": &bintree{_1565249278_initial_schemaDownSql, map[string]*bintree{}},
|
||||
"1565249278_initial_schema.up.sql": &bintree{_1565249278_initial_schemaUpSql, map[string]*bintree{}},
|
||||
"doc.go": &bintree{docGo, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory.
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively.
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE mvds_peers;
|
||||
5
peers/migrations/sqlite/1565249278_initial_schema.up.sql
Normal file
5
peers/migrations/sqlite/1565249278_initial_schema.up.sql
Normal file
@@ -0,0 +1,5 @@
|
||||
CREATE TABLE mvds_peers (
|
||||
group_id BLOB NOT NULL,
|
||||
peer_id BLOB NOT NULL,
|
||||
PRIMARY KEY (group_id, peer_id) ON CONFLICT REPLACE
|
||||
);
|
||||
9
peers/migrations/sqlite/doc.go
Normal file
9
peers/migrations/sqlite/doc.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// This file is necessary because "github.com/status-im/migrate/v4"
|
||||
// can't handle files starting with a prefix. At least that's the case
|
||||
// for go-bindata.
|
||||
// If go-bindata is called from the same directory, asset names
|
||||
// have no prefix and "github.com/status-im/migrate/v4" works as expected.
|
||||
|
||||
package sqlite
|
||||
|
||||
//go:generate go-bindata -pkg migrations -o ../migrations.go .
|
||||
11
peers/persistence.go
Normal file
11
peers/persistence.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package peers
|
||||
|
||||
import (
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type Persistence interface {
|
||||
Add(state.GroupID, state.PeerID) error
|
||||
GetByGroupID(group state.GroupID) ([]state.PeerID, error)
|
||||
Exists(state.GroupID, state.PeerID) (bool, error)
|
||||
}
|
||||
32
peers/persistence_memory.go
Normal file
32
peers/persistence_memory.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package peers
|
||||
|
||||
import "github.com/vacp2p/mvds/state"
|
||||
|
||||
type memoryPersistence struct {
|
||||
peers map[state.GroupID][]state.PeerID
|
||||
}
|
||||
|
||||
func NewMemoryPersistence() *memoryPersistence {
|
||||
return &memoryPersistence{
|
||||
peers: make(map[state.GroupID][]state.PeerID),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *memoryPersistence) Add(groupID state.GroupID, peerID state.PeerID) error {
|
||||
p.peers[groupID] = append(p.peers[groupID], peerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *memoryPersistence) Exists(groupID state.GroupID, peerID state.PeerID) (bool, error) {
|
||||
for _, peer := range p.peers[groupID] {
|
||||
if peer == peerID {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (p *memoryPersistence) GetByGroupID(groupID state.GroupID) ([]state.PeerID, error) {
|
||||
return p.peers[groupID], nil
|
||||
}
|
||||
|
||||
63
peers/persistence_sqlite.go
Normal file
63
peers/persistence_sqlite.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package peers
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrPeerNotFound = errors.New("peer not found")
|
||||
)
|
||||
|
||||
type sqlitePersistence struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewSQLitePersistence(db *sql.DB) sqlitePersistence {
|
||||
return sqlitePersistence{db: db}
|
||||
}
|
||||
|
||||
func (p sqlitePersistence) Add(groupID state.GroupID, peerID state.PeerID) error {
|
||||
_, err := p.db.Exec(`INSERT INTO mvds_peers (group_id, peer_id) VALUES (?, ?)`, groupID[:], peerID[:])
|
||||
return err
|
||||
}
|
||||
|
||||
func (p sqlitePersistence) Exists(groupID state.GroupID, peerID state.PeerID) (bool, error) {
|
||||
var result bool
|
||||
err := p.db.QueryRow(
|
||||
`SELECT EXISTS(SELECT 1 FROM mvds_peers WHERE group_id = ? AND peer_id = ?)`,
|
||||
groupID[:],
|
||||
peerID[:],
|
||||
).Scan(&result)
|
||||
switch err {
|
||||
case sql.ErrNoRows:
|
||||
return false, ErrPeerNotFound
|
||||
case nil:
|
||||
return result, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
func (p sqlitePersistence) GetByGroupID(groupID state.GroupID) (result []state.PeerID, err error) {
|
||||
rows, err := p.db.Query(`SELECT peer_id FROM mvds_peers WHERE group_id = ?`, groupID[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
peerIDBytes []byte
|
||||
peerID state.PeerID
|
||||
)
|
||||
if err := rows.Scan(&peerIDBytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
copy(peerID[:], peerIDBytes)
|
||||
result = append(result, peerID)
|
||||
}
|
||||
return
|
||||
}
|
||||
57
peers/persistence_sqlite_test.go
Normal file
57
peers/persistence_sqlite_test.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package peers
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/vacp2p/mvds/peers/migrations"
|
||||
|
||||
"github.com/vacp2p/mvds/persistenceutil"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSQLitePersistence(t *testing.T) {
|
||||
tmpFile, err := ioutil.TempFile("", "")
|
||||
require.NoError(t, err)
|
||||
db, err := persistenceutil.Open(tmpFile.Name(), "", persistenceutil.MigrationConfig{
|
||||
AssetNames: migrations.AssetNames(),
|
||||
AssetGetter: migrations.Asset,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
p := NewSQLitePersistence(db)
|
||||
|
||||
err = p.Add(state.GroupID{0x01}, state.PeerID{0x01})
|
||||
require.NoError(t, err)
|
||||
// Add the same again.
|
||||
err = p.Add(state.GroupID{0x01}, state.PeerID{0x01})
|
||||
require.NoError(t, err)
|
||||
// Add another peer to the same group.
|
||||
err = p.Add(state.GroupID{0x01}, state.PeerID{0x02})
|
||||
require.NoError(t, err)
|
||||
// Create a new group.
|
||||
err = p.Add(state.GroupID{0x02}, state.PeerID{0x01})
|
||||
require.NoError(t, err)
|
||||
err = p.Add(state.GroupID{0x02}, state.PeerID{0x02})
|
||||
require.NoError(t, err)
|
||||
err = p.Add(state.GroupID{0x02}, state.PeerID{0x03})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Validate group 0x01.
|
||||
peers, err := p.GetByGroupID(state.GroupID{0x01})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []state.PeerID{state.PeerID{0x01}, state.PeerID{0x02}}, peers)
|
||||
// Validate group 0x02.
|
||||
peers, err = p.GetByGroupID(state.GroupID{0x02})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []state.PeerID{state.PeerID{0x01}, state.PeerID{0x02}, state.PeerID{0x03}}, peers)
|
||||
|
||||
// Validate existence method.
|
||||
exists, err := p.Exists(state.GroupID{0x01}, state.PeerID{0x01})
|
||||
require.NoError(t, err)
|
||||
require.True(t, exists)
|
||||
exists, err = p.Exists(state.GroupID{0x01}, state.PeerID{0xFF})
|
||||
require.NoError(t, err)
|
||||
require.False(t, exists)
|
||||
}
|
||||
70
persistenceutil/migrations.go
Normal file
70
persistenceutil/migrations.go
Normal file
@@ -0,0 +1,70 @@
|
||||
package persistenceutil
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"strings"
|
||||
|
||||
nodemigrations "github.com/vacp2p/mvds/node/migrations"
|
||||
peersmigrations "github.com/vacp2p/mvds/peers/migrations"
|
||||
statemigrations "github.com/vacp2p/mvds/state/migrations"
|
||||
storemigrations "github.com/vacp2p/mvds/store/migrations"
|
||||
)
|
||||
|
||||
type getter func(string) ([]byte, error)
|
||||
|
||||
type Migration struct {
|
||||
Names []string
|
||||
Getter func(name string) ([]byte, error)
|
||||
}
|
||||
|
||||
func prepareMigrations(migrations []Migration) ([]string, getter, error) {
|
||||
var allNames []string
|
||||
nameToGetter := make(map[string]getter)
|
||||
|
||||
for _, m := range migrations {
|
||||
for _, name := range m.Names {
|
||||
if !validateName(name) {
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := nameToGetter[name]; ok {
|
||||
return nil, nil, errors.Errorf("migration with name %s already exists", name)
|
||||
}
|
||||
allNames = append(allNames, name)
|
||||
nameToGetter[name] = m.Getter
|
||||
}
|
||||
}
|
||||
|
||||
return allNames, func(name string) ([]byte, error) {
|
||||
getter, ok := nameToGetter[name]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("no migration for name %s", name)
|
||||
}
|
||||
return getter(name)
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DefaultMigrations is a collection of all mvds components migrations.
|
||||
var DefaultMigrations = []Migration{
|
||||
{
|
||||
Names: nodemigrations.AssetNames(),
|
||||
Getter: nodemigrations.Asset,
|
||||
},
|
||||
{
|
||||
Names: peersmigrations.AssetNames(),
|
||||
Getter: peersmigrations.Asset,
|
||||
},
|
||||
{
|
||||
Names: statemigrations.AssetNames(),
|
||||
Getter: statemigrations.Asset,
|
||||
},
|
||||
{
|
||||
Names: storemigrations.AssetNames(),
|
||||
Getter: storemigrations.Asset,
|
||||
},
|
||||
}
|
||||
|
||||
// validateName verifies that only *.sql files are taken into consideration.
|
||||
func validateName(name string) bool {
|
||||
return strings.HasSuffix(name, ".sql")
|
||||
}
|
||||
124
persistenceutil/sqlite.go
Normal file
124
persistenceutil/sqlite.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package persistenceutil
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/status-im/migrate/v4"
|
||||
"github.com/status-im/migrate/v4/database/sqlcipher"
|
||||
bindata "github.com/status-im/migrate/v4/source/go_bindata"
|
||||
)
|
||||
|
||||
// The reduced number of kdf iterations (for performance reasons) which is
|
||||
// currently used for derivation of the database key
|
||||
// https://github.com/status-im/status-go/pull/1343
|
||||
// https://notes.status.im/i8Y_l7ccTiOYq09HVgoFwA
|
||||
const reducedKdfIterationsNumber = 3200
|
||||
|
||||
// MigrationConfig is a struct that allows to define bindata migrations.
|
||||
type MigrationConfig struct {
|
||||
AssetNames []string
|
||||
AssetGetter func(name string) ([]byte, error)
|
||||
}
|
||||
|
||||
// Migrate migrates a provided sqldb
|
||||
func Migrate(db *sql.DB) error {
|
||||
assetNames, assetGetter, err := prepareMigrations(DefaultMigrations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ApplyMigrations(db, assetNames, assetGetter)
|
||||
|
||||
}
|
||||
|
||||
// Open opens or initializes a new database for a given file path.
|
||||
// MigrationConfig is optional but if provided migrations are applied automatically.
|
||||
func Open(path, key string, mc ...MigrationConfig) (*sql.DB, error) {
|
||||
return open(path, key, reducedKdfIterationsNumber, mc)
|
||||
}
|
||||
|
||||
// OpenWithIter allows to open a new database with a custom number of kdf iterations.
|
||||
// Higher kdf iterations number makes it slower to open the database.
|
||||
func OpenWithIter(path, key string, kdfIter int, mc ...MigrationConfig) (*sql.DB, error) {
|
||||
return open(path, key, kdfIter, mc)
|
||||
}
|
||||
|
||||
func open(path string, key string, kdfIter int, configs []MigrationConfig) (*sql.DB, error) {
|
||||
_, err := os.OpenFile(path, os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db, err := sql.Open("sqlite3", path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
keyString := fmt.Sprintf("PRAGMA key = '%s'", key)
|
||||
|
||||
// Disable concurrent access as not supported by the driver
|
||||
db.SetMaxOpenConns(1)
|
||||
|
||||
if _, err = db.Exec("PRAGMA foreign_keys=ON"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err = db.Exec(keyString); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kdfString := fmt.Sprintf("PRAGMA kdf_iter = '%d'", kdfIter)
|
||||
|
||||
if _, err = db.Exec(kdfString); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Apply all provided migrations.
|
||||
for _, mc := range configs {
|
||||
if err := ApplyMigrations(db, mc.AssetNames, mc.AssetGetter); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// ApplyMigrations allows to apply bindata migrations on the current *sql.DB.
|
||||
// `assetNames` is a list of assets with migrations and `assetGetter` is responsible
|
||||
// for returning the content of the asset with a given name.
|
||||
func ApplyMigrations(db *sql.DB, assetNames []string, assetGetter func(name string) ([]byte, error)) error {
|
||||
resources := bindata.Resource(
|
||||
assetNames,
|
||||
assetGetter,
|
||||
)
|
||||
|
||||
source, err := bindata.WithInstance(resources)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create migration source")
|
||||
}
|
||||
|
||||
driver, err := sqlcipher.WithInstance(db, &sqlcipher.Config{
|
||||
MigrationsTable: "mvds_" + sqlcipher.DefaultMigrationsTable,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create driver")
|
||||
}
|
||||
|
||||
m, err := migrate.NewWithInstance(
|
||||
"go-bindata",
|
||||
source,
|
||||
"sqlcipher",
|
||||
driver,
|
||||
)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to create migration instance")
|
||||
}
|
||||
|
||||
if err = m.Up(); err != migrate.ErrNoChange {
|
||||
return errors.Wrap(err, "failed to migrate")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
// ID creates the MessageID for a Message
|
||||
|
||||
7
protobuf/payload.go
Normal file
7
protobuf/payload.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package protobuf
|
||||
|
||||
// IsValid checks whether there are any known field in the protobuf
|
||||
// message
|
||||
func (m *Payload) IsValid() bool {
|
||||
return len(m.Messages)+len(m.Acks)+len(m.Offers)+len(m.Requests) != 0
|
||||
}
|
||||
@@ -83,20 +83,68 @@ func (m *Payload) GetMessages() []*Message {
|
||||
return nil
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
GroupId []byte `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
|
||||
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"`
|
||||
type Metadata struct {
|
||||
Parents [][]byte `protobuf:"bytes,1,rep,name=parents,proto3" json:"parents,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,2,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Metadata) Reset() { *m = Metadata{} }
|
||||
func (m *Metadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*Metadata) ProtoMessage() {}
|
||||
func (*Metadata) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2dca527c092c79d7, []int{1}
|
||||
}
|
||||
|
||||
func (m *Metadata) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Metadata.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Metadata) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Metadata.Merge(m, src)
|
||||
}
|
||||
func (m *Metadata) XXX_Size() int {
|
||||
return xxx_messageInfo_Metadata.Size(m)
|
||||
}
|
||||
func (m *Metadata) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Metadata.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Metadata proto.InternalMessageInfo
|
||||
|
||||
func (m *Metadata) GetParents() [][]byte {
|
||||
if m != nil {
|
||||
return m.Parents
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Metadata) GetEphemeral() bool {
|
||||
if m != nil {
|
||||
return m.Ephemeral
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
GroupId []byte `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
|
||||
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"`
|
||||
Metadata *Metadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Message) Reset() { *m = Message{} }
|
||||
func (m *Message) String() string { return proto.CompactTextString(m) }
|
||||
func (*Message) ProtoMessage() {}
|
||||
func (*Message) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2dca527c092c79d7, []int{1}
|
||||
return fileDescriptor_2dca527c092c79d7, []int{2}
|
||||
}
|
||||
|
||||
func (m *Message) XXX_Unmarshal(b []byte) error {
|
||||
@@ -138,27 +186,38 @@ func (m *Message) GetBody() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetMetadata() *Metadata {
|
||||
if m != nil {
|
||||
return m.Metadata
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Payload)(nil), "mvds.Payload")
|
||||
proto.RegisterType((*Message)(nil), "mvds.Message")
|
||||
proto.RegisterType((*Payload)(nil), "vac.mvds.Payload")
|
||||
proto.RegisterType((*Metadata)(nil), "vac.mvds.Metadata")
|
||||
proto.RegisterType((*Message)(nil), "vac.mvds.Message")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("protobuf/sync.proto", fileDescriptor_2dca527c092c79d7) }
|
||||
|
||||
var fileDescriptor_2dca527c092c79d7 = []byte{
|
||||
// 212 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x8f, 0xb1, 0x4a, 0x04, 0x31,
|
||||
0x10, 0x86, 0xd9, 0xcb, 0x72, 0x1b, 0xc7, 0xb3, 0x19, 0x41, 0xa2, 0x58, 0x84, 0xab, 0x62, 0xb3,
|
||||
0x82, 0xbe, 0x81, 0x9d, 0x85, 0x20, 0x29, 0x2c, 0x6c, 0x24, 0x7b, 0xc9, 0x1e, 0x87, 0xc6, 0xac,
|
||||
0x99, 0xac, 0xb0, 0xe0, 0xc3, 0xcb, 0x8d, 0xe7, 0x5e, 0xf7, 0x7f, 0xdf, 0xcf, 0x30, 0x33, 0x70,
|
||||
0x3e, 0xe4, 0x54, 0x52, 0x37, 0xf6, 0xb7, 0x34, 0x7d, 0x6e, 0x5a, 0x26, 0xac, 0xe3, 0xb7, 0xa7,
|
||||
0xf5, 0x0f, 0x34, 0xcf, 0x6e, 0xfa, 0x48, 0xce, 0x23, 0x42, 0xed, 0x36, 0xef, 0xa4, 0x2a, 0x2d,
|
||||
0xcc, 0xca, 0x72, 0xc6, 0x0b, 0x58, 0xa6, 0xbe, 0x0f, 0x99, 0xd4, 0x82, 0xed, 0x81, 0xf0, 0x0a,
|
||||
0x64, 0x0e, 0x5f, 0x63, 0xa0, 0x42, 0x4a, 0x70, 0x33, 0x33, 0xde, 0x80, 0x8c, 0x81, 0xc8, 0x6d,
|
||||
0x03, 0xa9, 0x5a, 0x0b, 0x73, 0x7a, 0x77, 0xd6, 0xee, 0x77, 0xb5, 0x4f, 0x7f, 0xd6, 0xce, 0xf5,
|
||||
0xfa, 0x05, 0x9a, 0x83, 0xc4, 0x4b, 0x90, 0xdb, 0x9c, 0xc6, 0xe1, 0x6d, 0xe7, 0x55, 0xa5, 0x2b,
|
||||
0xb3, 0xb2, 0x0d, 0xf3, 0xa3, 0xc7, 0x6b, 0x38, 0x29, 0xbb, 0x18, 0xa8, 0xb8, 0x38, 0xa8, 0x85,
|
||||
0xae, 0x8c, 0xb0, 0x47, 0xb1, 0x3f, 0xbb, 0x4b, 0x7e, 0x52, 0x82, 0x87, 0x38, 0x3f, 0xc0, 0xab,
|
||||
0xfc, 0x7f, 0xb9, 0x5b, 0x72, 0xba, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x4c, 0x0a, 0x11, 0xee,
|
||||
0x05, 0x01, 0x00, 0x00,
|
||||
// 265 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x50, 0x3d, 0x4f, 0xc3, 0x30,
|
||||
0x10, 0x95, 0x9b, 0xaa, 0x35, 0x57, 0x16, 0x0e, 0x09, 0x19, 0xc4, 0x10, 0x65, 0xca, 0x42, 0x90,
|
||||
0xca, 0x3f, 0xe8, 0xc6, 0x50, 0x09, 0x65, 0x64, 0x41, 0x97, 0xf8, 0x52, 0x2a, 0x9a, 0x3a, 0xd8,
|
||||
0x4e, 0xa5, 0x6c, 0x4c, 0xfc, 0x6e, 0x14, 0xa7, 0x49, 0xd9, 0xde, 0xc7, 0xf9, 0xf9, 0xde, 0xc1,
|
||||
0x6d, 0x63, 0x8d, 0x37, 0x45, 0x5b, 0x3d, 0xbb, 0xee, 0x58, 0x66, 0x81, 0xa1, 0x3c, 0x51, 0x99,
|
||||
0xd5, 0x27, 0xed, 0x92, 0x1f, 0x01, 0xcb, 0x37, 0xea, 0x0e, 0x86, 0x34, 0x22, 0xcc, 0xa9, 0xfc,
|
||||
0x72, 0x4a, 0xc4, 0x51, 0x7a, 0x9d, 0x07, 0x8c, 0x77, 0xb0, 0x30, 0x55, 0xc5, 0xd6, 0xa9, 0x59,
|
||||
0x50, 0xcf, 0x0c, 0x1f, 0x40, 0x5a, 0xfe, 0x6e, 0xd9, 0x79, 0xa7, 0xa2, 0xe0, 0x4c, 0x1c, 0x9f,
|
||||
0x40, 0xd6, 0xec, 0x1c, 0xed, 0xd8, 0xa9, 0x79, 0x1c, 0xa5, 0xab, 0xf5, 0x4d, 0x36, 0x7e, 0x98,
|
||||
0x6d, 0x07, 0x27, 0x9f, 0x46, 0x92, 0x0d, 0xc8, 0x2d, 0x7b, 0xd2, 0xe4, 0x09, 0x15, 0x2c, 0x1b,
|
||||
0xb2, 0x7c, 0xf4, 0xe3, 0x16, 0x23, 0xc5, 0x47, 0xb8, 0xe2, 0xe6, 0x93, 0x6b, 0xb6, 0x74, 0x50,
|
||||
0xb3, 0x58, 0xa4, 0x32, 0xbf, 0x08, 0xc9, 0xaf, 0x80, 0xe5, 0x39, 0x19, 0xef, 0x41, 0xee, 0xac,
|
||||
0x69, 0x9b, 0x8f, 0xbd, 0x56, 0x22, 0x16, 0x7d, 0x48, 0xe0, 0xaf, 0xba, 0x0f, 0xf1, 0xfb, 0x9a,
|
||||
0x9d, 0xa7, 0xba, 0x09, 0x21, 0x51, 0x7e, 0x11, 0xfa, 0xfe, 0x85, 0xd1, 0x9d, 0x8a, 0xc2, 0xa3,
|
||||
0x80, 0x31, 0xeb, 0xbb, 0x0c, 0xcb, 0xa9, 0x79, 0x2c, 0xd2, 0xd5, 0x1a, 0xff, 0x77, 0x19, 0x9c,
|
||||
0x7c, 0x9a, 0xd9, 0xc0, 0xbb, 0x1c, 0x0f, 0x5e, 0x2c, 0x02, 0x7a, 0xf9, 0x0b, 0x00, 0x00, 0xff,
|
||||
0xff, 0x8f, 0x0f, 0x88, 0x6c, 0x83, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package mvds;
|
||||
package vac.mvds;
|
||||
option go_package = "protobuf";
|
||||
|
||||
message Payload {
|
||||
@@ -10,8 +10,14 @@ message Payload {
|
||||
repeated Message messages = 4;
|
||||
}
|
||||
|
||||
message Metadata {
|
||||
repeated bytes parents = 1;
|
||||
bool ephemeral = 2;
|
||||
}
|
||||
|
||||
message Message {
|
||||
bytes group_id = 1;
|
||||
int64 timestamp = 2;
|
||||
bytes body = 3;
|
||||
Metadata metadata = 4;
|
||||
}
|
||||
|
||||
317
state/migrations/migrations.go
Normal file
317
state/migrations/migrations.go
Normal file
@@ -0,0 +1,317 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1565341329_initial_schema.down.sql (24B)
|
||||
// 1565341329_initial_schema.up.sql (294B)
|
||||
// doc.go (377B)
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
digest [sha256.Size]byte
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var __1565341329_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x2f\x2e\x49\x2c\x49\x2d\xb6\xe6\x02\x04\x00\x00\xff\xff\xb7\x43\xc1\xc1\x18\x00\x00\x00")
|
||||
|
||||
func _1565341329_initial_schemaDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565341329_initial_schemaDownSql,
|
||||
"1565341329_initial_schema.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565341329_initial_schemaDownSql() (*asset, error) {
|
||||
bytes, err := _1565341329_initial_schemaDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565341329_initial_schema.down.sql", size: 24, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x20, 0x56, 0x1a, 0x0, 0xc5, 0x81, 0xb3, 0xeb, 0x2a, 0xae, 0xed, 0xbb, 0x68, 0x51, 0x68, 0xc7, 0xe3, 0x31, 0xe, 0x1, 0x3e, 0xd2, 0x85, 0x9e, 0x6d, 0x55, 0xad, 0x55, 0xd6, 0x2f, 0x29, 0xca}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1565341329_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x8f\xc1\x8a\x83\x30\x14\x45\xf7\xf9\x8a\xbb\x54\xf0\x0f\x5c\xe9\x4c\x18\x64\xd2\x58\x42\x0a\x75\x15\xc4\x3c\xac\x0b\x35\x98\x58\xda\xbf\x2f\xb4\x15\xa5\x60\xb7\xf7\x1c\x1e\xef\xfc\x28\x9e\x69\x0e\x9d\xe5\x82\xa3\xbf\x5a\x6f\x7c\xa8\x03\x79\x44\x0c\x00\xc2\xdd\x11\x0a\xa9\xf9\x1f\x57\x90\xa5\x86\x3c\x09\x91\x3c\x91\xa7\xc1\x9a\x66\x9c\x87\xf0\x4d\x20\x37\x36\x97\x1d\xa1\x9d\xc6\xd9\x99\xce\x22\x17\x65\xfe\x9a\x1c\xd1\xb4\x2c\x1f\x76\x4f\xde\xd7\x2d\xed\xd0\xa3\x2a\x0e\x99\xaa\xf0\xcf\x2b\x44\xab\x9a\x2c\x17\x63\x16\xa7\x8c\xbd\x6b\x0b\xf9\xcb\xcf\xe8\xec\xcd\x6c\x7e\x2c\xe5\xb6\x3f\x5a\x49\x9c\xb2\x47\x00\x00\x00\xff\xff\x5e\xe5\x72\x74\x26\x01\x00\x00")
|
||||
|
||||
func _1565341329_initial_schemaUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565341329_initial_schemaUpSql,
|
||||
"1565341329_initial_schema.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565341329_initial_schemaUpSql() (*asset, error) {
|
||||
bytes, err := _1565341329_initial_schemaUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565341329_initial_schema.up.sql", size: 294, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3e, 0xa5, 0x37, 0x9d, 0x3f, 0xf3, 0xc9, 0xc8, 0x12, 0x74, 0x79, 0x74, 0xff, 0xfd, 0xb1, 0x5f, 0x13, 0xaf, 0xf2, 0x50, 0x14, 0x9f, 0xdf, 0xc8, 0xc5, 0xa7, 0xc3, 0xf5, 0xa4, 0x8e, 0x8a, 0xf6}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbb\x6e\xc3\x30\x0c\x45\x77\x7f\xc5\x45\x96\x2c\xb5\xb4\x74\xea\xd6\xb1\x7b\x7f\x80\x91\x68\x89\x88\x1e\xae\x48\xe7\xf1\xf7\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\xd2\x7b\x7c\x66\x51\x2c\x52\x18\xa2\x68\x1c\x58\x95\xc6\x1d\x27\x0e\xb4\x29\xe3\x90\xc4\xf2\x76\x72\xa1\x57\xaf\x46\xb6\xe9\x2c\xd5\x57\x49\x83\x8c\xfd\xe5\xf5\x30\x79\x8f\x40\xed\x68\xc8\xd4\x62\xe1\x47\x4b\xa1\x46\xc3\xa4\x25\x5c\xc5\x32\x08\xeb\xe0\x45\x6e\x0e\xef\x86\xc2\xa4\x06\xcb\x64\x47\x85\x65\x46\x20\xe5\x3d\xb3\xf4\x81\xd4\xe7\x93\xb4\x48\x46\x6e\x47\x1f\xcb\x13\xd9\x17\x06\x2a\x85\x23\x96\xd1\xeb\xc3\x55\xaa\x8c\x28\x83\x83\xf5\x71\x7f\x01\xa9\xb2\xa1\x51\x65\xdd\xfd\x4c\x17\x46\xeb\xbf\xe7\x41\x2d\xfe\xff\x11\xae\x7d\x9c\x15\xa4\xe0\xdb\xca\xc1\x38\xba\x69\x5a\x29\x9c\x29\x31\xf4\xab\x88\xf1\x34\x79\x9f\xfa\x5b\xe2\xc6\xbb\xf5\xbc\x71\x5e\xcf\x09\x3f\x35\xe9\x4d\x31\x77\x38\xe7\xff\x80\x4b\x1d\x6e\xfa\x0e\x00\x00\xff\xff\x9d\x60\x3d\x88\x79\x01\x00\x00")
|
||||
|
||||
func docGoBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_docGo,
|
||||
"doc.go",
|
||||
)
|
||||
}
|
||||
|
||||
func docGo() (*asset, error) {
|
||||
bytes, err := docGoBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// AssetString returns the asset contents as a string (instead of a []byte).
|
||||
func AssetString(name string) (string, error) {
|
||||
data, err := Asset(name)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// MustAssetString is like AssetString but panics when Asset would return an
|
||||
// error. It simplifies safe initialization of global variables.
|
||||
func MustAssetString(name string) string {
|
||||
return string(MustAsset(name))
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetDigest returns the digest of the file with the given name. It returns an
|
||||
// error if the asset could not be found or the digest could not be loaded.
|
||||
func AssetDigest(name string) ([sha256.Size]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.digest, nil
|
||||
}
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
|
||||
}
|
||||
|
||||
// Digests returns a map of all known files and their checksums.
|
||||
func Digests() (map[string][sha256.Size]byte, error) {
|
||||
mp := make(map[string][sha256.Size]byte, len(_bindata))
|
||||
for name := range _bindata {
|
||||
a, err := _bindata[name]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mp[name] = a.digest
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1565341329_initial_schema.down.sql": _1565341329_initial_schemaDownSql,
|
||||
"1565341329_initial_schema.up.sql": _1565341329_initial_schemaUpSql,
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"},
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"},
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"1565341329_initial_schema.down.sql": &bintree{_1565341329_initial_schemaDownSql, map[string]*bintree{}},
|
||||
"1565341329_initial_schema.up.sql": &bintree{_1565341329_initial_schemaUpSql, map[string]*bintree{}},
|
||||
"doc.go": &bintree{docGo, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory.
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively.
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE mvds_states;
|
||||
11
state/migrations/sqlite/1565341329_initial_schema.up.sql
Normal file
11
state/migrations/sqlite/1565341329_initial_schema.up.sql
Normal file
@@ -0,0 +1,11 @@
|
||||
CREATE TABLE mvds_states (
|
||||
type INTEGER NOT NULL,
|
||||
send_count INTEGER NOT NULL,
|
||||
send_epoch INTEGER NOT NULL,
|
||||
group_id BLOB,
|
||||
peer_id BLOB NOT NULL,
|
||||
message_id BLOB NOT NULL,
|
||||
PRIMARY KEY (message_id, peer_id)
|
||||
);
|
||||
|
||||
CREATE INDEX idx_send_epoch ON mvds_states(send_epoch);
|
||||
9
state/migrations/sqlite/doc.go
Normal file
9
state/migrations/sqlite/doc.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// This file is necessary because "github.com/status-im/migrate/v4"
|
||||
// can't handle files starting with a prefix. At least that's the case
|
||||
// for go-bindata.
|
||||
// If go-bindata is called from the same directory, asset names
|
||||
// have no prefix and "github.com/status-im/migrate/v4" works as expected.
|
||||
|
||||
package sqlite
|
||||
|
||||
//go:generate go-bindata -pkg migrations -o ../migrations.go .
|
||||
@@ -1,3 +1,4 @@
|
||||
package state
|
||||
|
||||
// PeerID is the ID for a specific peer.
|
||||
type PeerID [65]byte
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
// Package state contains everything related to the synchronization state for MVDS.
|
||||
package state
|
||||
|
||||
// RecordType is the type for a specific record, either `OFFER`, `REQUEST` or `MESSAGE`.
|
||||
type RecordType int
|
||||
|
||||
const (
|
||||
@@ -9,15 +10,20 @@ const (
|
||||
MESSAGE
|
||||
)
|
||||
|
||||
// State is a struct used to store a records [state](https://github.com/status-im/bigbrother-specs/blob/master/data_sync/mvds.md#state).
|
||||
type State struct {
|
||||
Type RecordType
|
||||
SendCount uint64
|
||||
SendEpoch int64
|
||||
// GroupID is optional, thus nullable
|
||||
GroupID *GroupID
|
||||
PeerID PeerID
|
||||
MessageID MessageID
|
||||
}
|
||||
|
||||
type SyncState interface {
|
||||
Get(group GroupID, id MessageID, peer PeerID) (State, error)
|
||||
Set(group GroupID, id MessageID, peer PeerID, newState State) error
|
||||
Remove(group GroupID, id MessageID, peer PeerID) error
|
||||
Map(epoch int64, process func(GroupID, MessageID, PeerID, State) State) error
|
||||
Add(newState State) error
|
||||
Remove(id MessageID, peer PeerID) error
|
||||
All(epoch int64) ([]State, error)
|
||||
Map(epoch int64, process func(State) State) error
|
||||
}
|
||||
|
||||
@@ -7,62 +7,55 @@ import (
|
||||
type memorySyncState struct {
|
||||
sync.Mutex
|
||||
|
||||
state map[GroupID]map[MessageID]map[PeerID]State
|
||||
state []State
|
||||
}
|
||||
|
||||
func NewSyncState() *memorySyncState {
|
||||
return &memorySyncState{
|
||||
state: make(map[GroupID]map[MessageID]map[PeerID]State),
|
||||
}
|
||||
func NewMemorySyncState() *memorySyncState {
|
||||
return &memorySyncState{}
|
||||
}
|
||||
|
||||
func (s *memorySyncState) Get(group GroupID, id MessageID, peer PeerID) (State, error) {
|
||||
func (s *memorySyncState) Add(newState State) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
state, _ := s.state[group][id][peer]
|
||||
return state, nil
|
||||
}
|
||||
s.state = append(s.state, newState)
|
||||
|
||||
func (s *memorySyncState) Set(group GroupID, id MessageID, peer PeerID, newState State) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if _, ok := s.state[group]; !ok {
|
||||
s.state[group] = make(map[MessageID]map[PeerID]State)
|
||||
}
|
||||
|
||||
if _, ok := s.state[group][id]; !ok {
|
||||
s.state[group][id] = make(map[PeerID]State)
|
||||
}
|
||||
|
||||
s.state[group][id][peer] = newState
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *memorySyncState) Remove(group GroupID, id MessageID, peer PeerID) error {
|
||||
func (s *memorySyncState) Remove(id MessageID, peer PeerID) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
var newState []State
|
||||
|
||||
delete(s.state[group][id], peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *memorySyncState) Map(epoch int64, process func(GroupID, MessageID, PeerID, State) State) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for group, syncstate := range s.state {
|
||||
for id, peers := range syncstate {
|
||||
for peer, state := range peers {
|
||||
if state.SendEpoch < epoch {
|
||||
continue
|
||||
}
|
||||
|
||||
s.state[group][id][peer] = process(group, id, peer, state)
|
||||
}
|
||||
for _, state := range s.state {
|
||||
if state.MessageID != id || state.PeerID != peer {
|
||||
newState = append(newState, state)
|
||||
}
|
||||
}
|
||||
|
||||
s.state = newState
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *memorySyncState) All(_ int64) ([]State, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.state, nil
|
||||
}
|
||||
|
||||
func (s *memorySyncState) Map(epoch int64, process func(State) State) error {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for i, state := range s.state {
|
||||
if state.SendEpoch > epoch {
|
||||
continue
|
||||
}
|
||||
|
||||
s.state[i] = process(state)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
166
state/state_sqlite.go
Normal file
166
state/state_sqlite.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"log"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrStateNotFound = errors.New("state not found")
|
||||
)
|
||||
|
||||
// Verify that SyncState interface is implemented.
|
||||
var _ SyncState = (*sqliteSyncState)(nil)
|
||||
|
||||
type sqliteSyncState struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewPersistentSyncState(db *sql.DB) *sqliteSyncState {
|
||||
return &sqliteSyncState{db: db}
|
||||
}
|
||||
|
||||
func (p *sqliteSyncState) Add(newState State) error {
|
||||
var groupIDBytes []byte
|
||||
if newState.GroupID != nil {
|
||||
groupIDBytes = newState.GroupID[:]
|
||||
}
|
||||
|
||||
_, err := p.db.Exec(`
|
||||
INSERT INTO mvds_states
|
||||
(type, send_count, send_epoch, group_id, peer_id, message_id)
|
||||
VALUES
|
||||
(?, ?, ?, ?, ?, ?)`,
|
||||
newState.Type,
|
||||
newState.SendCount,
|
||||
newState.SendEpoch,
|
||||
groupIDBytes,
|
||||
newState.PeerID[:],
|
||||
newState.MessageID[:],
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *sqliteSyncState) Remove(messageID MessageID, peerID PeerID) error {
|
||||
result, err := p.db.Exec(
|
||||
`DELETE FROM mvds_states WHERE message_id = ? AND peer_id = ?`,
|
||||
messageID[:],
|
||||
peerID[:],
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n, err := result.RowsAffected(); err != nil {
|
||||
return err
|
||||
} else if n == 0 {
|
||||
return ErrStateNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *sqliteSyncState) All(epoch int64) ([]State, error) {
|
||||
var result []State
|
||||
|
||||
rows, err := p.db.Query(`
|
||||
SELECT
|
||||
type, send_count, send_epoch, group_id, peer_id, message_id
|
||||
FROM
|
||||
mvds_states
|
||||
WHERE
|
||||
send_epoch <= ?
|
||||
`, epoch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var (
|
||||
state State
|
||||
groupID, peerID, messageID []byte
|
||||
)
|
||||
err := rows.Scan(
|
||||
&state.Type,
|
||||
&state.SendCount,
|
||||
&state.SendEpoch,
|
||||
&groupID,
|
||||
&peerID,
|
||||
&messageID,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(groupID) > 0 {
|
||||
val := GroupID{}
|
||||
copy(val[:], groupID)
|
||||
state.GroupID = &val
|
||||
}
|
||||
copy(state.PeerID[:], peerID)
|
||||
copy(state.MessageID[:], messageID)
|
||||
|
||||
result = append(result, state)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *sqliteSyncState) Map(epoch int64, process func(State) State) error {
|
||||
states, err := p.All(epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var updated []State
|
||||
|
||||
for _, state := range states {
|
||||
if err := invariant(state.SendEpoch <= epoch, "invalid state provided to process"); err != nil {
|
||||
log.Printf("%v", err)
|
||||
continue
|
||||
}
|
||||
newState := process(state)
|
||||
if newState != state {
|
||||
updated = append(updated, newState)
|
||||
}
|
||||
}
|
||||
|
||||
if len(updated) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, state := range updated {
|
||||
if err := updateInTx(tx, state); err != nil {
|
||||
_ = tx.Rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func updateInTx(tx *sql.Tx, state State) error {
|
||||
_, err := tx.Exec(`
|
||||
UPDATE mvds_states
|
||||
SET
|
||||
send_count = ?,
|
||||
send_epoch = ?
|
||||
WHERE
|
||||
message_id = ? AND
|
||||
peer_id = ?
|
||||
`,
|
||||
state.SendCount,
|
||||
state.SendEpoch,
|
||||
state.MessageID[:],
|
||||
state.PeerID[:],
|
||||
)
|
||||
return err
|
||||
}
|
||||
|
||||
func invariant(cond bool, message string) error {
|
||||
if !cond {
|
||||
return errors.New(message)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
56
state/state_sqlite_test.go
Normal file
56
state/state_sqlite_test.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package state
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/vacp2p/mvds/persistenceutil"
|
||||
"github.com/vacp2p/mvds/state/migrations"
|
||||
)
|
||||
|
||||
func TestPersistentSyncState(t *testing.T) {
|
||||
tmpFile, err := ioutil.TempFile("", "")
|
||||
require.NoError(t, err)
|
||||
db, err := persistenceutil.Open(tmpFile.Name(), "", persistenceutil.MigrationConfig{
|
||||
AssetNames: migrations.AssetNames(),
|
||||
AssetGetter: migrations.Asset,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
p := NewPersistentSyncState(db)
|
||||
|
||||
stateWithoutGroupID := State{
|
||||
Type: OFFER,
|
||||
SendCount: 1,
|
||||
SendEpoch: 1,
|
||||
GroupID: nil,
|
||||
PeerID: PeerID{0x01},
|
||||
MessageID: MessageID{0xaa},
|
||||
}
|
||||
err = p.Add(stateWithoutGroupID)
|
||||
require.NoError(t, err)
|
||||
|
||||
stateWithGroupID := stateWithoutGroupID
|
||||
stateWithGroupID.GroupID = &GroupID{0x01}
|
||||
stateWithGroupID.MessageID = MessageID{0xbb}
|
||||
err = p.Add(stateWithGroupID)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Getting states for the old epoch.
|
||||
allStates, err := p.All(0)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, allStates)
|
||||
|
||||
// Getting states for the current epoch.
|
||||
allStates, err = p.All(1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []State{stateWithoutGroupID, stateWithGroupID}, allStates)
|
||||
require.Nil(t, allStates[0].GroupID)
|
||||
require.EqualValues(t, &GroupID{0x01}, allStates[1].GroupID)
|
||||
|
||||
err = p.Remove(stateWithoutGroupID.MessageID, stateWithoutGroupID.PeerID)
|
||||
require.NoError(t, err)
|
||||
// remove non-existing row
|
||||
err = p.Remove(MessageID{0xff}, PeerID{0xff})
|
||||
require.EqualError(t, err, "state not found")
|
||||
}
|
||||
@@ -2,3 +2,17 @@ package state
|
||||
|
||||
type MessageID [32]byte
|
||||
type GroupID [32]byte
|
||||
|
||||
// ToMessageID converts a byte array to a MessageID.
|
||||
func ToMessageID(b []byte) MessageID {
|
||||
var id MessageID
|
||||
copy(id[:], b)
|
||||
return id
|
||||
}
|
||||
|
||||
// ToGroupID converts a byte array to a GroupID.
|
||||
func ToGroupID(b []byte) GroupID {
|
||||
var id GroupID
|
||||
copy(id[:], b)
|
||||
return id
|
||||
}
|
||||
|
||||
@@ -2,12 +2,13 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/status-im/mvds/protobuf"
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type MessageStore interface {
|
||||
Has(id state.MessageID) bool
|
||||
Get(id state.MessageID) (protobuf.Message, error)
|
||||
Add(message protobuf.Message) error
|
||||
Has(id state.MessageID) (bool, error)
|
||||
Get(id state.MessageID) (*protobuf.Message, error)
|
||||
Add(message *protobuf.Message) error
|
||||
GetMessagesWithoutChildren(id state.GroupID) ([]state.MessageID, error)
|
||||
}
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/status-im/mvds/protobuf"
|
||||
"github.com/status-im/mvds/state"
|
||||
)
|
||||
|
||||
type DummyStore struct {
|
||||
sync.Mutex
|
||||
ms map[state.MessageID]protobuf.Message
|
||||
}
|
||||
|
||||
func NewDummyStore() DummyStore {
|
||||
return DummyStore{ms: make(map[state.MessageID]protobuf.Message)}
|
||||
}
|
||||
|
||||
func (ds *DummyStore) Has(id state.MessageID) bool {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
|
||||
_, ok := ds.ms[id]; return ok
|
||||
}
|
||||
|
||||
func (ds *DummyStore) Get(id state.MessageID) (protobuf.Message, error) {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
|
||||
m, ok := ds.ms[id]
|
||||
if !ok {
|
||||
return protobuf.Message{}, errors.New("message does not exist")
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (ds *DummyStore) Add(message protobuf.Message) error {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
ds.ms[message.ID()] = message
|
||||
return nil
|
||||
}
|
||||
81
store/messagestore_memory.go
Normal file
81
store/messagestore_memory.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type memoryMessageStore struct {
|
||||
sync.Mutex
|
||||
ms map[state.MessageID]*protobuf.Message
|
||||
}
|
||||
|
||||
func NewMemoryMessageStore() *memoryMessageStore {
|
||||
return &memoryMessageStore{ms: make(map[state.MessageID]*protobuf.Message)}
|
||||
}
|
||||
|
||||
func (ds *memoryMessageStore) Has(id state.MessageID) (bool, error) {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
|
||||
_, ok := ds.ms[id]
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
func (ds *memoryMessageStore) Get(id state.MessageID) (*protobuf.Message, error) {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
|
||||
m, ok := ds.ms[id]
|
||||
if !ok {
|
||||
return nil, errors.New("message does not exist")
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (ds *memoryMessageStore) Add(message *protobuf.Message) error {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
ds.ms[message.ID()] = message
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ds *memoryMessageStore) GetMessagesWithoutChildren(group state.GroupID) ([]state.MessageID, error) {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
|
||||
hasChildren := make(map[state.MessageID]bool)
|
||||
|
||||
for id, msg := range ds.ms {
|
||||
if state.ToGroupID(msg.GroupId) != group {
|
||||
continue
|
||||
}
|
||||
|
||||
if msg.Metadata != nil {
|
||||
for _, parent := range msg.Metadata.Parents {
|
||||
hasChildren[state.ToMessageID(parent)] = true
|
||||
}
|
||||
}
|
||||
|
||||
if hasChildren[id] {
|
||||
continue
|
||||
}
|
||||
|
||||
hasChildren[id] = false
|
||||
}
|
||||
|
||||
msgs := make([]state.MessageID, 0)
|
||||
for id, hasChildren := range hasChildren {
|
||||
if hasChildren {
|
||||
continue
|
||||
}
|
||||
|
||||
msgs = append(msgs, id)
|
||||
}
|
||||
|
||||
return msgs, nil
|
||||
}
|
||||
160
store/messagestore_sqlite.go
Normal file
160
store/messagestore_sqlite.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/vacp2p/mvds/state"
|
||||
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrMessageNotFound = errors.New("message not found")
|
||||
)
|
||||
|
||||
type persistentMessageStore struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewPersistentMessageStore(db *sql.DB) *persistentMessageStore {
|
||||
return &persistentMessageStore{db: db}
|
||||
}
|
||||
|
||||
func (p *persistentMessageStore) Add(message *protobuf.Message) error {
|
||||
id := message.ID()
|
||||
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.Exec(
|
||||
`INSERT INTO mvds_messages (id, group_id, timestamp, body)
|
||||
VALUES (?, ?, ?, ?)`,
|
||||
id[:],
|
||||
message.GroupId,
|
||||
message.Timestamp,
|
||||
message.Body,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
if message.Metadata != nil && len(message.Metadata.Parents) > 0 {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("INSERT INTO mvds_parents(message_id, parent_id) VALUES ")
|
||||
var vals []interface{}
|
||||
|
||||
for _, row := range message.Metadata.Parents {
|
||||
sb.WriteString("(?, ?),")
|
||||
vals = append(vals, id[:], row[:])
|
||||
}
|
||||
|
||||
query := sb.String()
|
||||
stmt, err := tx.Prepare(query[0:len(query)-1])
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = stmt.Exec(vals...)
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *persistentMessageStore) Get(id state.MessageID) (*protobuf.Message, error) {
|
||||
var message protobuf.Message
|
||||
row := p.db.QueryRow(
|
||||
`SELECT group_id, timestamp, body FROM mvds_messages WHERE id = ?`,
|
||||
id[:],
|
||||
)
|
||||
if err := row.Scan(
|
||||
&message.GroupId,
|
||||
&message.Timestamp,
|
||||
&message.Body,
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
message.Metadata = &protobuf.Metadata{Ephemeral: false}
|
||||
|
||||
rows, err := p.db.Query(`SELECT parent_id FROM mvds_parents WHERE message_id = ?`, id[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var parent []byte
|
||||
err := rows.Scan(&parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
message.Metadata.Parents = append(message.Metadata.Parents, parent)
|
||||
}
|
||||
|
||||
err = rows.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &message, nil
|
||||
}
|
||||
|
||||
func (p *persistentMessageStore) Has(id state.MessageID) (bool, error) {
|
||||
var result bool
|
||||
err := p.db.QueryRow(
|
||||
`SELECT EXISTS(SELECT 1 FROM mvds_messages WHERE id = ?)`,
|
||||
id[:],
|
||||
).Scan(&result)
|
||||
switch err {
|
||||
case sql.ErrNoRows:
|
||||
return false, ErrMessageNotFound
|
||||
case nil:
|
||||
return result, nil
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
func (p *persistentMessageStore) GetMessagesWithoutChildren(id state.GroupID) ([]state.MessageID, error) {
|
||||
var result []state.MessageID
|
||||
rows, err := p.db.Query(
|
||||
`SELECT id FROM mvds_messages WHERE group_id = ? AND id NOT IN (SELECT parent_id FROM mvds_parents)`,
|
||||
id[:],
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var parent []byte
|
||||
err := rows.Scan(&parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = append(result, state.ToMessageID(parent))
|
||||
}
|
||||
|
||||
err = rows.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
104
store/messagestore_sqlite_test.go
Normal file
104
store/messagestore_sqlite_test.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/vacp2p/mvds/state"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/vacp2p/mvds/persistenceutil"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/store/migrations"
|
||||
)
|
||||
|
||||
func TestPersistentMessageStore(t *testing.T) {
|
||||
tmpFile, err := ioutil.TempFile("", "")
|
||||
require.NoError(t, err)
|
||||
db, err := persistenceutil.Open(tmpFile.Name(), "", persistenceutil.MigrationConfig{
|
||||
AssetNames: migrations.AssetNames(),
|
||||
AssetGetter: migrations.Asset,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
p := NewPersistentMessageStore(db)
|
||||
|
||||
now := time.Now().Unix()
|
||||
message := protobuf.Message{
|
||||
GroupId: []byte{0x01},
|
||||
Timestamp: now,
|
||||
Body: []byte{0xaa, 0xbb, 0xcc},
|
||||
Metadata: &protobuf.Metadata{Ephemeral: false, Parents: [][]byte{{0xaa, 0xbb, 0xcc}}},
|
||||
}
|
||||
|
||||
err = p.Add(&message)
|
||||
require.NoError(t, err)
|
||||
// Adding the same message twice is not allowed.
|
||||
err = p.Add(&message)
|
||||
require.EqualError(t, err, "UNIQUE constraint failed: mvds_messages.id")
|
||||
// Verify if saved.
|
||||
exists, err := p.Has(message.ID())
|
||||
require.NoError(t, err)
|
||||
require.True(t, exists)
|
||||
recvMessage, err := p.Get(message.ID())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, message, *recvMessage)
|
||||
|
||||
// Verify methods against non existing message.
|
||||
recvMessage, err = p.Get(state.MessageID{0xff})
|
||||
require.EqualError(t, err, "sql: no rows in result set")
|
||||
require.Nil(t, recvMessage)
|
||||
exists, err = p.Has(state.MessageID{0xff})
|
||||
require.NoError(t, err)
|
||||
require.False(t, exists)
|
||||
}
|
||||
|
||||
func TestPersistentMessageStore_GetMessagesWithoutChildren(t *testing.T) {
|
||||
tmpFile, err := ioutil.TempFile("", "")
|
||||
require.NoError(t, err)
|
||||
db, err := persistenceutil.Open(tmpFile.Name(), "", persistenceutil.MigrationConfig{
|
||||
AssetNames: migrations.AssetNames(),
|
||||
AssetGetter: migrations.Asset,
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
p := NewPersistentMessageStore(db)
|
||||
|
||||
group := groupId()
|
||||
|
||||
now := time.Now().Unix()
|
||||
msg := &protobuf.Message{
|
||||
GroupId: group[:],
|
||||
Timestamp: now,
|
||||
Body: []byte{0xaa, 0xbb, 0xcc},
|
||||
Metadata: &protobuf.Metadata{Ephemeral: false, Parents: [][]byte{}},
|
||||
}
|
||||
|
||||
err = p.Add(msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
id := msg.ID()
|
||||
|
||||
child := &protobuf.Message{
|
||||
GroupId: group[:],
|
||||
Timestamp: now,
|
||||
Body: []byte{0xaa, 0xcc},
|
||||
Metadata: &protobuf.Metadata{Ephemeral: false, Parents: [][]byte{id[:]}},
|
||||
}
|
||||
|
||||
err = p.Add(child)
|
||||
require.NoError(t, err)
|
||||
|
||||
msgs, err := p.GetMessagesWithoutChildren(group)
|
||||
require.NoError(t, err)
|
||||
|
||||
if msgs[0] != child.ID() {
|
||||
t.Errorf("not same \n expected %v \n actual: %v", msgs[0], child.ID())
|
||||
}
|
||||
}
|
||||
|
||||
func groupId() (id state.GroupID) {
|
||||
_, _ = rand.Read(id[:])
|
||||
return id
|
||||
}
|
||||
317
store/migrations/migrations.go
Normal file
317
store/migrations/migrations.go
Normal file
@@ -0,0 +1,317 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1572372377_initial_schema.down.sql (55B)
|
||||
// 1572372377_initial_schema.up.sql (365B)
|
||||
// doc.go (377B)
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
digest [sha256.Size]byte
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var __1572372377_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x71\xf5\x71\x0d\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\xcf\x4d\x2d\x2e\x4e\x4c\x4f\x2d\xb6\xe6\xc2\x94\x2b\x48\x2c\x4a\xcd\x2b\x29\xb6\xe6\x02\x04\x00\x00\xff\xff\xa9\xdd\x32\x20\x37\x00\x00\x00")
|
||||
|
||||
func _1572372377_initial_schemaDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1572372377_initial_schemaDownSql,
|
||||
"1572372377_initial_schema.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1572372377_initial_schemaDownSql() (*asset, error) {
|
||||
bytes, err := _1572372377_initial_schemaDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1572372377_initial_schema.down.sql", size: 55, mode: os.FileMode(0644), modTime: time.Unix(1572706379, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc1, 0x69, 0x72, 0x9f, 0x13, 0xdd, 0x23, 0x1b, 0xef, 0x2e, 0x95, 0x19, 0x42, 0xa3, 0x57, 0x8d, 0x77, 0x4, 0x73, 0xf6, 0x8a, 0xab, 0xad, 0xc1, 0xe6, 0xc7, 0x59, 0xbc, 0xee, 0x86, 0xce, 0x2f}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1572372377_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x64\x90\xbd\x4e\xc4\x30\x10\x84\x7b\x3f\xc5\x94\xb1\xc4\x1b\x50\x25\xc7\xde\xc9\xc2\xd8\xc8\x18\x89\xab\xa2\x20\x5b\x27\x17\x26\x51\x1c\x10\xbc\x3d\x72\x7e\xc8\x0f\xdb\x7e\x3b\xb3\x33\x7b\x32\x54\x5a\x82\x2d\x2b\x49\x88\x5f\x2e\xd5\xd1\xa7\xd4\xdc\x7c\x42\xc1\x00\x20\x38\xcc\x53\x49\x5d\xe1\xd9\x88\xa7\xd2\x5c\xf1\x48\xd7\xbb\x91\xdf\xfa\xf6\xb3\xab\xf3\xd6\xc8\x01\x28\x6d\xa1\x5e\xa5\x9c\xf8\x10\xa2\x4f\x43\x13\x3b\x08\x65\xe9\x42\xe6\xc0\xdf\x5b\xf7\xb3\xf1\xdf\xe8\x19\xbf\x67\x6c\xce\x27\xd4\x03\xbd\x21\xb8\xef\xfa\xef\x9e\x56\xfb\xbc\xc5\x42\xb2\x8c\xfd\xef\xd5\x35\xbd\xff\x18\x96\x5a\xb3\x2a\x1b\x8d\x77\xf7\xa1\xa6\xdd\xb5\xd5\x9e\x9e\xb5\x21\x71\x51\xf9\x07\x28\x56\x23\x0e\x43\x67\x32\xa4\x4e\xf4\x72\x7c\x65\x70\x9c\x71\xf6\x1b\x00\x00\xff\xff\xed\x46\xeb\x1a\x6d\x01\x00\x00")
|
||||
|
||||
func _1572372377_initial_schemaUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1572372377_initial_schemaUpSql,
|
||||
"1572372377_initial_schema.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1572372377_initial_schemaUpSql() (*asset, error) {
|
||||
bytes, err := _1572372377_initial_schemaUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1572372377_initial_schema.up.sql", size: 365, mode: os.FileMode(0644), modTime: time.Unix(1572895921, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9b, 0xb2, 0x13, 0xf6, 0x6c, 0xa8, 0xdb, 0xfb, 0xb5, 0x8d, 0xe3, 0xa9, 0x50, 0x67, 0xb, 0xe2, 0x53, 0x6b, 0x24, 0xde, 0x18, 0x19, 0xac, 0x39, 0x3f, 0x52, 0x3a, 0xe1, 0xb8, 0x91, 0x8d, 0xd0}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbb\x6e\xc3\x30\x0c\x45\x77\x7f\xc5\x45\x96\x2c\xb5\xb4\x74\xea\xd6\xb1\x7b\x7f\x80\x91\x68\x89\x88\x1e\xae\x48\xe7\xf1\xf7\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\xd2\x7b\x7c\x66\x51\x2c\x52\x18\xa2\x68\x1c\x58\x95\xc6\x1d\x27\x0e\xb4\x29\xe3\x90\xc4\xf2\x76\x72\xa1\x57\xaf\x46\xb6\xe9\x2c\xd5\x57\x49\x83\x8c\xfd\xe5\xf5\x30\x79\x8f\x40\xed\x68\xc8\xd4\x62\xe1\x47\x4b\xa1\x46\xc3\xa4\x25\x5c\xc5\x32\x08\xeb\xe0\x45\x6e\x0e\xef\x86\xc2\xa4\x06\xcb\x64\x47\x85\x65\x46\x20\xe5\x3d\xb3\xf4\x81\xd4\xe7\x93\xb4\x48\x46\x6e\x47\x1f\xcb\x13\xd9\x17\x06\x2a\x85\x23\x96\xd1\xeb\xc3\x55\xaa\x8c\x28\x83\x83\xf5\x71\x7f\x01\xa9\xb2\xa1\x51\x65\xdd\xfd\x4c\x17\x46\xeb\xbf\xe7\x41\x2d\xfe\xff\x11\xae\x7d\x9c\x15\xa4\xe0\xdb\xca\xc1\x38\xba\x69\x5a\x29\x9c\x29\x31\xf4\xab\x88\xf1\x34\x79\x9f\xfa\x5b\xe2\xc6\xbb\xf5\xbc\x71\x5e\xcf\x09\x3f\x35\xe9\x4d\x31\x77\x38\xe7\xff\x80\x4b\x1d\x6e\xfa\x0e\x00\x00\xff\xff\x9d\x60\x3d\x88\x79\x01\x00\x00")
|
||||
|
||||
func docGoBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_docGo,
|
||||
"doc.go",
|
||||
)
|
||||
}
|
||||
|
||||
func docGo() (*asset, error) {
|
||||
bytes, err := docGoBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// AssetString returns the asset contents as a string (instead of a []byte).
|
||||
func AssetString(name string) (string, error) {
|
||||
data, err := Asset(name)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// MustAssetString is like AssetString but panics when Asset would return an
|
||||
// error. It simplifies safe initialization of global variables.
|
||||
func MustAssetString(name string) string {
|
||||
return string(MustAsset(name))
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetDigest returns the digest of the file with the given name. It returns an
|
||||
// error if the asset could not be found or the digest could not be loaded.
|
||||
func AssetDigest(name string) ([sha256.Size]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.digest, nil
|
||||
}
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
|
||||
}
|
||||
|
||||
// Digests returns a map of all known files and their checksums.
|
||||
func Digests() (map[string][sha256.Size]byte, error) {
|
||||
mp := make(map[string][sha256.Size]byte, len(_bindata))
|
||||
for name := range _bindata {
|
||||
a, err := _bindata[name]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mp[name] = a.digest
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1572372377_initial_schema.down.sql": _1572372377_initial_schemaDownSql,
|
||||
"1572372377_initial_schema.up.sql": _1572372377_initial_schemaUpSql,
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"},
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"},
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"1572372377_initial_schema.down.sql": &bintree{_1572372377_initial_schemaDownSql, map[string]*bintree{}},
|
||||
"1572372377_initial_schema.up.sql": &bintree{_1572372377_initial_schemaUpSql, map[string]*bintree{}},
|
||||
"doc.go": &bintree{docGo, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory.
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively.
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
||||
@@ -0,0 +1,2 @@
|
||||
DELETE TABLE mvds_messages;
|
||||
DELETE TABLE mvds_parents;
|
||||
15
store/migrations/sqlite/1572372377_initial_schema.up.sql
Normal file
15
store/migrations/sqlite/1572372377_initial_schema.up.sql
Normal file
@@ -0,0 +1,15 @@
|
||||
CREATE TABLE mvds_messages (
|
||||
id BLOB PRIMARY KEY,
|
||||
group_id BLOB NOT NULL,
|
||||
timestamp INTEGER NOT NULL,
|
||||
body BLOB NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX idx_group_id ON mvds_messages(group_id);
|
||||
|
||||
|
||||
CREATE TABLE mvds_parents (
|
||||
message_id BLOB NOT NULL,
|
||||
parent_id BLOB NOT NULL,
|
||||
FOREIGN KEY (message_id) REFERENCES mvds_messages (id)
|
||||
)
|
||||
9
store/migrations/sqlite/doc.go
Normal file
9
store/migrations/sqlite/doc.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// This file is necessary because "github.com/status-im/migrate/v4"
|
||||
// can't handle files starting with a prefix. At least that's the case
|
||||
// for go-bindata.
|
||||
// If go-bindata is called from the same directory, asset names
|
||||
// have no prefix and "github.com/status-im/migrate/v4" works as expected.
|
||||
|
||||
package sqlite
|
||||
|
||||
//go:generate go-bindata -pkg migrations -o ../migrations.go .
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/status-im/mvds/protobuf"
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
// ChannelTransport implements a basic MVDS transport using channels for basic testing purposes.
|
||||
@@ -23,12 +23,12 @@ type ChannelTransport struct {
|
||||
func NewChannelTransport(offline int, in <-chan Packet) *ChannelTransport {
|
||||
return &ChannelTransport{
|
||||
offline: offline,
|
||||
in: in,
|
||||
out: make(map[state.PeerID]chan<- Packet),
|
||||
in: in,
|
||||
out: make(map[state.PeerID]chan<- Packet),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *ChannelTransport) AddOutput(id state.PeerID, c chan<-Packet) {
|
||||
func (t *ChannelTransport) AddOutput(id state.PeerID, c chan<- Packet) {
|
||||
t.out[id] = c
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func (t *ChannelTransport) Watch() Packet {
|
||||
return <-t.in
|
||||
}
|
||||
|
||||
func (t *ChannelTransport) Send(group state.GroupID, sender state.PeerID, peer state.PeerID, payload protobuf.Payload) error {
|
||||
func (t *ChannelTransport) Send(sender state.PeerID, peer state.PeerID, payload protobuf.Payload) error {
|
||||
// @todo we can do this better, we put node onlineness into a goroutine where we just stop the nodes for x seconds
|
||||
// outside of this class
|
||||
math.Seed(time.Now().UnixNano())
|
||||
@@ -49,6 +49,6 @@ func (t *ChannelTransport) Send(group state.GroupID, sender state.PeerID, peer s
|
||||
return errors.New("peer unknown")
|
||||
}
|
||||
|
||||
c <- Packet{Group: group, Sender: sender, Payload: payload}
|
||||
c <- Packet{Sender: sender, Payload: payload}
|
||||
return nil
|
||||
}
|
||||
@@ -2,12 +2,11 @@
|
||||
package transport
|
||||
|
||||
import (
|
||||
"github.com/status-im/mvds/protobuf"
|
||||
"github.com/status-im/mvds/state"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type Packet struct {
|
||||
Group state.GroupID
|
||||
Sender state.PeerID
|
||||
Payload protobuf.Payload
|
||||
}
|
||||
@@ -15,5 +14,5 @@ type Packet struct {
|
||||
// Transport defines an interface allowing for agnostic transport implementations.
|
||||
type Transport interface {
|
||||
Watch() Packet
|
||||
Send(group state.GroupID, sender state.PeerID, peer state.PeerID, payload protobuf.Payload) error
|
||||
Send(sender state.PeerID, peer state.PeerID, payload protobuf.Payload) error
|
||||
}
|
||||
|
||||
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = false
|
||||
|
||||
// ptrSize is the size of a pointer on the current arch.
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
type flag uintptr
|
||||
|
||||
var (
|
||||
// flagRO indicates whether the value field of a reflect.Value
|
||||
// is read-only.
|
||||
flagRO flag
|
||||
|
||||
// flagAddr indicates whether the address of the reflect.Value's
|
||||
// value may be taken.
|
||||
flagAddr flag
|
||||
)
|
||||
|
||||
// flagKindMask holds the bits that make up the kind
|
||||
// part of the flags field. In all the supported versions,
|
||||
// it is in the lower 5 bits.
|
||||
const flagKindMask = flag(0x1f)
|
||||
|
||||
// Different versions of Go have used different
|
||||
// bit layouts for the flags type. This table
|
||||
// records the known combinations.
|
||||
var okFlags = []struct {
|
||||
ro, addr flag
|
||||
}{{
|
||||
// From Go 1.4 to 1.5
|
||||
ro: 1 << 5,
|
||||
addr: 1 << 7,
|
||||
}, {
|
||||
// Up to Go tip.
|
||||
ro: 1<<5 | 1<<6,
|
||||
addr: 1 << 8,
|
||||
}}
|
||||
|
||||
var flagValOffset = func() uintptr {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
return field.Offset
|
||||
}()
|
||||
|
||||
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||
func flagField(v *reflect.Value) *flag {
|
||||
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
// the typical safety restrictions preventing access to unaddressable and
|
||||
// unexported data. It works by digging the raw pointer to the underlying
|
||||
// value out of the protected value and generating a new unprotected (unsafe)
|
||||
// reflect.Value to it.
|
||||
//
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||
return v
|
||||
}
|
||||
flagFieldPtr := flagField(&v)
|
||||
*flagFieldPtr &^= flagRO
|
||||
*flagFieldPtr |= flagAddr
|
||||
return v
|
||||
}
|
||||
|
||||
// Sanity checks against future reflect package changes
|
||||
// to the type or semantics of the Value.flag field.
|
||||
func init() {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||
panic("reflect.Value flag field has changed kind")
|
||||
}
|
||||
type t0 int
|
||||
var t struct {
|
||||
A t0
|
||||
// t0 will have flagEmbedRO set.
|
||||
t0
|
||||
// a will have flagStickyRO set
|
||||
a t0
|
||||
}
|
||||
vA := reflect.ValueOf(t).FieldByName("A")
|
||||
va := reflect.ValueOf(t).FieldByName("a")
|
||||
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||
|
||||
// Infer flagRO from the difference between the flags
|
||||
// for the (otherwise identical) fields in t.
|
||||
flagPublic := *flagField(&vA)
|
||||
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||
flagRO = flagPublic ^ flagWithRO
|
||||
|
||||
// Infer flagAddr from the difference between a value
|
||||
// taken from a pointer and not.
|
||||
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||
flagNoPtr := *flagField(&vA)
|
||||
flagPtr := *flagField(&vPtrA)
|
||||
flagAddr = flagNoPtr ^ flagPtr
|
||||
|
||||
// Check that the inferred flags tally with one of the known versions.
|
||||
for _, f := range okFlags {
|
||||
if flagRO == f.ro && flagAddr == f.addr {
|
||||
return
|
||||
}
|
||||
}
|
||||
panic("reflect.Value read-only flag has changed semantics")
|
||||
}
|
||||
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build js appengine safe disableunsafe !go1.4
|
||||
|
||||
package spew
|
||||
|
||||
import "reflect"
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = true
|
||||
)
|
||||
|
||||
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||
// that bypasses the typical safety restrictions preventing access to
|
||||
// unaddressable and unexported data. However, doing this relies on access to
|
||||
// the unsafe package. This is a stub version which simply returns the passed
|
||||
// reflect.Value when the unsafe package is not available.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
return v
|
||||
}
|
||||
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
@@ -0,0 +1,341 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||
// the technique used in the fmt package.
|
||||
var (
|
||||
panicBytes = []byte("(PANIC=")
|
||||
plusBytes = []byte("+")
|
||||
iBytes = []byte("i")
|
||||
trueBytes = []byte("true")
|
||||
falseBytes = []byte("false")
|
||||
interfaceBytes = []byte("(interface {})")
|
||||
commaNewlineBytes = []byte(",\n")
|
||||
newlineBytes = []byte("\n")
|
||||
openBraceBytes = []byte("{")
|
||||
openBraceNewlineBytes = []byte("{\n")
|
||||
closeBraceBytes = []byte("}")
|
||||
asteriskBytes = []byte("*")
|
||||
colonBytes = []byte(":")
|
||||
colonSpaceBytes = []byte(": ")
|
||||
openParenBytes = []byte("(")
|
||||
closeParenBytes = []byte(")")
|
||||
spaceBytes = []byte(" ")
|
||||
pointerChainBytes = []byte("->")
|
||||
nilAngleBytes = []byte("<nil>")
|
||||
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||
maxShortBytes = []byte("<max>")
|
||||
circularBytes = []byte("<already shown>")
|
||||
circularShortBytes = []byte("<shown>")
|
||||
invalidAngleBytes = []byte("<invalid>")
|
||||
openBracketBytes = []byte("[")
|
||||
closeBracketBytes = []byte("]")
|
||||
percentBytes = []byte("%")
|
||||
precisionBytes = []byte(".")
|
||||
openAngleBytes = []byte("<")
|
||||
closeAngleBytes = []byte(">")
|
||||
openMapBytes = []byte("map[")
|
||||
closeMapBytes = []byte("]")
|
||||
lenEqualsBytes = []byte("len=")
|
||||
capEqualsBytes = []byte("cap=")
|
||||
)
|
||||
|
||||
// hexDigits is used to map a decimal value to a hex digit.
|
||||
var hexDigits = "0123456789abcdef"
|
||||
|
||||
// catchPanic handles any panics that might occur during the handleMethods
|
||||
// calls.
|
||||
func catchPanic(w io.Writer, v reflect.Value) {
|
||||
if err := recover(); err != nil {
|
||||
w.Write(panicBytes)
|
||||
fmt.Fprintf(w, "%v", err)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// handleMethods attempts to call the Error and String methods on the underlying
|
||||
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||
//
|
||||
// It handles panics in any called methods by catching and displaying the error
|
||||
// as the formatted value.
|
||||
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||
// We need an interface to check if the type implements the error or
|
||||
// Stringer interface. However, the reflect package won't give us an
|
||||
// interface on certain things like unexported struct fields in order
|
||||
// to enforce visibility rules. We use unsafe, when it's available,
|
||||
// to bypass these restrictions since this package does not mutate the
|
||||
// values.
|
||||
if !v.CanInterface() {
|
||||
if UnsafeDisabled {
|
||||
return false
|
||||
}
|
||||
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
|
||||
// Choose whether or not to do error and Stringer interface lookups against
|
||||
// the base type or a pointer to the base type depending on settings.
|
||||
// Technically calling one of these methods with a pointer receiver can
|
||||
// mutate the value, however, types which choose to satisify an error or
|
||||
// Stringer interface with a pointer receiver should not be mutating their
|
||||
// state inside these interface methods.
|
||||
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
|
||||
// Is it an error or Stringer?
|
||||
switch iface := v.Interface().(type) {
|
||||
case error:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.Error()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
|
||||
w.Write([]byte(iface.Error()))
|
||||
return true
|
||||
|
||||
case fmt.Stringer:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.String()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
w.Write([]byte(iface.String()))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// printBool outputs a boolean value as true or false to Writer w.
|
||||
func printBool(w io.Writer, val bool) {
|
||||
if val {
|
||||
w.Write(trueBytes)
|
||||
} else {
|
||||
w.Write(falseBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// printInt outputs a signed integer value to Writer w.
|
||||
func printInt(w io.Writer, val int64, base int) {
|
||||
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||
}
|
||||
|
||||
// printUint outputs an unsigned integer value to Writer w.
|
||||
func printUint(w io.Writer, val uint64, base int) {
|
||||
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||
}
|
||||
|
||||
// printFloat outputs a floating point value using the specified precision,
|
||||
// which is expected to be 32 or 64bit, to Writer w.
|
||||
func printFloat(w io.Writer, val float64, precision int) {
|
||||
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||
}
|
||||
|
||||
// printComplex outputs a complex value using the specified float precision
|
||||
// for the real and imaginary parts to Writer w.
|
||||
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||
r := real(c)
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||
i := imag(c)
|
||||
if i >= 0 {
|
||||
w.Write(plusBytes)
|
||||
}
|
||||
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||
w.Write(iBytes)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
num := uint64(p)
|
||||
if num == 0 {
|
||||
w.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||
buf := make([]byte, 18)
|
||||
|
||||
// It's simpler to construct the hex string right to left.
|
||||
base := uint64(16)
|
||||
i := len(buf) - 1
|
||||
for num >= base {
|
||||
buf[i] = hexDigits[num%base]
|
||||
num /= base
|
||||
i--
|
||||
}
|
||||
buf[i] = hexDigits[num]
|
||||
|
||||
// Add '0x' prefix.
|
||||
i--
|
||||
buf[i] = 'x'
|
||||
i--
|
||||
buf[i] = '0'
|
||||
|
||||
// Strip unused leading bytes.
|
||||
buf = buf[i:]
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||
// elements to be sorted.
|
||||
type valuesSorter struct {
|
||||
values []reflect.Value
|
||||
strings []string // either nil or same len and values
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||
// surrogate keys on which the data should be sorted. It uses flags in
|
||||
// ConfigState to decide if and how to populate those surrogate keys.
|
||||
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||
vs := &valuesSorter{values: values, cs: cs}
|
||||
if canSortSimply(vs.values[0].Kind()) {
|
||||
return vs
|
||||
}
|
||||
if !cs.DisableMethods {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
b := bytes.Buffer{}
|
||||
if !handleMethods(cs, &b, vs.values[i]) {
|
||||
vs.strings = nil
|
||||
break
|
||||
}
|
||||
vs.strings[i] = b.String()
|
||||
}
|
||||
}
|
||||
if vs.strings == nil && cs.SpewKeys {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||
// directly, or whether it should be considered for sorting by surrogate keys
|
||||
// (if the ConfigState allows it).
|
||||
func canSortSimply(kind reflect.Kind) bool {
|
||||
// This switch parallels valueSortLess, except for the default case.
|
||||
switch kind {
|
||||
case reflect.Bool:
|
||||
return true
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return true
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return true
|
||||
case reflect.String:
|
||||
return true
|
||||
case reflect.Uintptr:
|
||||
return true
|
||||
case reflect.Array:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Len returns the number of values in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Len() int {
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// Swap swaps the values at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Swap(i, j int) {
|
||||
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||
if s.strings != nil {
|
||||
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||
}
|
||||
}
|
||||
|
||||
// valueSortLess returns whether the first value should sort before the second
|
||||
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||
// implementation.
|
||||
func valueSortLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.String:
|
||||
return a.String() < b.String()
|
||||
case reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Array:
|
||||
// Compare the contents of both arrays.
|
||||
l := a.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
av := a.Index(i)
|
||||
bv := b.Index(i)
|
||||
if av.Interface() == bv.Interface() {
|
||||
continue
|
||||
}
|
||||
return valueSortLess(av, bv)
|
||||
}
|
||||
}
|
||||
return a.String() < b.String()
|
||||
}
|
||||
|
||||
// Less returns whether the value at index i should sort before the
|
||||
// value at index j. It is part of the sort.Interface implementation.
|
||||
func (s *valuesSorter) Less(i, j int) bool {
|
||||
if s.strings == nil {
|
||||
return valueSortLess(s.values[i], s.values[j])
|
||||
}
|
||||
return s.strings[i] < s.strings[j]
|
||||
}
|
||||
|
||||
// sortValues is a sort function that handles both native types and any type that
|
||||
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||
// their Value.String() value to ensure display stability.
|
||||
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Sort(newValuesSorter(values, cs))
|
||||
}
|
||||
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
@@ -0,0 +1,306 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ConfigState houses the configuration options used by spew to format and
|
||||
// display values. There is a global instance, Config, that is used to control
|
||||
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||
// provides methods equivalent to the top-level functions.
|
||||
//
|
||||
// The zero value for ConfigState provides no indentation. You would typically
|
||||
// want to set it to a space or a tab.
|
||||
//
|
||||
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||
// with default settings. See the documentation of NewDefaultConfig for default
|
||||
// values.
|
||||
type ConfigState struct {
|
||||
// Indent specifies the string to use for each indentation level. The
|
||||
// global config instance that all top-level functions use set this to a
|
||||
// single space by default. If you would like more indentation, you might
|
||||
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||
Indent string
|
||||
|
||||
// MaxDepth controls the maximum number of levels to descend into nested
|
||||
// data structures. The default, 0, means there is no limit.
|
||||
//
|
||||
// NOTE: Circular data structures are properly detected, so it is not
|
||||
// necessary to set this value unless you specifically want to limit deeply
|
||||
// nested data structures.
|
||||
MaxDepth int
|
||||
|
||||
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||
// invoked for types that implement them.
|
||||
DisableMethods bool
|
||||
|
||||
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||
// error and Stringer interfaces on types which only accept a pointer
|
||||
// receiver when the current type is not a pointer.
|
||||
//
|
||||
// NOTE: This might be an unsafe action since calling one of these methods
|
||||
// with a pointer receiver could technically mutate the value, however,
|
||||
// in practice, types which choose to satisify an error or Stringer
|
||||
// interface with a pointer receiver should not be mutating their state
|
||||
// inside these interface methods. As a result, this option relies on
|
||||
// access to the unsafe package, so it will not have any effect when
|
||||
// running in environments without access to the unsafe package such as
|
||||
// Google App Engine or with the "safe" build tag specified.
|
||||
DisablePointerMethods bool
|
||||
|
||||
// DisablePointerAddresses specifies whether to disable the printing of
|
||||
// pointer addresses. This is useful when diffing data structures in tests.
|
||||
DisablePointerAddresses bool
|
||||
|
||||
// DisableCapacities specifies whether to disable the printing of capacities
|
||||
// for arrays, slices, maps and channels. This is useful when diffing
|
||||
// data structures in tests.
|
||||
DisableCapacities bool
|
||||
|
||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||
// a custom error or Stringer interface is invoked. The default, false,
|
||||
// means it will print the results of invoking the custom error or Stringer
|
||||
// interface and return immediately instead of continuing to recurse into
|
||||
// the internals of the data type.
|
||||
//
|
||||
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||
// via the DisableMethods or DisablePointerMethods options.
|
||||
ContinueOnMethod bool
|
||||
|
||||
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||
// this to have a more deterministic, diffable output. Note that only
|
||||
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||
// that support the error or Stringer interfaces (if methods are
|
||||
// enabled) are supported, with other types sorted according to the
|
||||
// reflect.Value.String() output which guarantees display stability.
|
||||
SortKeys bool
|
||||
|
||||
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||
// be spewed to strings and sorted by those strings. This is only
|
||||
// considered if SortKeys is true.
|
||||
SpewKeys bool
|
||||
}
|
||||
|
||||
// Config is the active configuration of the top-level functions.
|
||||
// The configuration can be changed by modifying the contents of spew.Config.
|
||||
var Config = ConfigState{Indent: " "}
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the formatted string as a value that satisfies error. See NewFormatter
|
||||
// for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
c.Printf, c.Println, or c.Printf.
|
||||
*/
|
||||
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(c, v)
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(c, w, a...)
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by modifying the public members
|
||||
of c. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func (c *ConfigState) Dump(a ...interface{}) {
|
||||
fdump(c, os.Stdout, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(c, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a spew Formatter interface using
|
||||
// the ConfigState associated with s.
|
||||
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = newFormatter(c, arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
|
||||
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||
//
|
||||
// Indent: " "
|
||||
// MaxDepth: 0
|
||||
// DisableMethods: false
|
||||
// DisablePointerMethods: false
|
||||
// ContinueOnMethod: false
|
||||
// SortKeys: false
|
||||
func NewDefaultConfig() *ConfigState {
|
||||
return &ConfigState{Indent: " "}
|
||||
}
|
||||
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging.
|
||||
|
||||
A quick overview of the additional features spew provides over the built-in
|
||||
printing facilities for Go data types are as follows:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output (only when using
|
||||
Dump style)
|
||||
|
||||
There are two different approaches spew allows for dumping Go data structures:
|
||||
|
||||
* Dump style which prints with newlines, customizable indentation,
|
||||
and additional debug information such as types and all pointer addresses
|
||||
used to indirect to the final value
|
||||
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||
similar to the default %v while providing the additional functionality
|
||||
outlined above and passing unsupported format verbs such as %x and %q
|
||||
along to fmt
|
||||
|
||||
Quick Start
|
||||
|
||||
This section demonstrates how to quickly get started with spew. See the
|
||||
sections below for further details on formatting and configuration options.
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||
%#+v (adds types and pointer addresses):
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available
|
||||
via the spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
The following configuration options are available:
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* DisablePointerAddresses
|
||||
DisablePointerAddresses specifies whether to disable the printing of
|
||||
pointer addresses. This is useful when diffing data structures in tests.
|
||||
|
||||
* DisableCapacities
|
||||
DisableCapacities specifies whether to disable the printing of
|
||||
capacities for arrays, slices, maps and channels. This is useful when
|
||||
diffing data structures in tests.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are
|
||||
supported with other types sorted according to the
|
||||
reflect.Value.String() output which guarantees display
|
||||
stability. Natural map order is used by default.
|
||||
|
||||
* SpewKeys
|
||||
Specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only
|
||||
considered if SortKeys is true.
|
||||
|
||||
Dump Usage
|
||||
|
||||
Simply call spew.Dump with a list of variables you want to dump:
|
||||
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
|
||||
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||
io.Writer. For example, to dump to standard error:
|
||||
|
||||
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||
|
||||
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Sample Dump Output
|
||||
|
||||
See the Dump example for details on the setup of the types and variables being
|
||||
shown here.
|
||||
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
(string) (len=3) "one": (bool) true
|
||||
}
|
||||
}
|
||||
|
||||
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||
command as shown.
|
||||
([]uint8) (len=32 cap=32) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
|
||||
Custom Formatter
|
||||
|
||||
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||
so that it integrates cleanly with standard fmt package printing functions. The
|
||||
formatter is useful for inline printing of smaller data types similar to the
|
||||
standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Custom Formatter Usage
|
||||
|
||||
The simplest way to make use of the spew custom formatter is to call one of the
|
||||
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||
functions have syntax you are most likely already familiar with:
|
||||
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Println(myVar, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
See the Index for the full list convenience functions.
|
||||
|
||||
Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
|
||||
See the Printf example for details on the setup of variables being shown
|
||||
here.
|
||||
|
||||
Errors
|
||||
|
||||
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||
detects them and handles them internally by printing the panic information
|
||||
inline with the output. Since spew is intended to provide deep pretty printing
|
||||
capabilities on structures, it intentionally does not return any errors.
|
||||
*/
|
||||
package spew
|
||||
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
@@ -0,0 +1,509 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||
// convert cgo types to uint8 slices for hexdumping.
|
||||
uint8Type = reflect.TypeOf(uint8(0))
|
||||
|
||||
// cCharRE is a regular expression that matches a cgo char.
|
||||
// It is used to detect character arrays to hexdump them.
|
||||
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||
|
||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||
// char. It is used to detect unsigned character arrays to hexdump
|
||||
// them.
|
||||
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||
|
||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||
// It is used to detect uint8_t arrays to hexdump them.
|
||||
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||
)
|
||||
|
||||
// dumpState contains information about the state of a dump operation.
|
||||
type dumpState struct {
|
||||
w io.Writer
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
ignoreNextIndent bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// indent performs indentation according to the depth level and cs.Indent
|
||||
// option.
|
||||
func (d *dumpState) indent() {
|
||||
if d.ignoreNextIndent {
|
||||
d.ignoreNextIndent = false
|
||||
return
|
||||
}
|
||||
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range d.pointers {
|
||||
if depth >= d.depth {
|
||||
delete(d.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by dereferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
d.pointers[addr] = d.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type information.
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
d.w.Write([]byte(ve.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
|
||||
// Display pointer information.
|
||||
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
d.w.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(d.w, addr)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
d.w.Write(openParenBytes)
|
||||
switch {
|
||||
case nilFound:
|
||||
d.w.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound:
|
||||
d.w.Write(circularBytes)
|
||||
|
||||
default:
|
||||
d.ignoreNextType = true
|
||||
d.dump(ve)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||
// Determine whether this type should be hex dumped or not. Also,
|
||||
// for types which should be hexdumped, try to use the underlying data
|
||||
// first, then fall back to trying to convert them to a uint8 slice.
|
||||
var buf []uint8
|
||||
doConvert := false
|
||||
doHexDump := false
|
||||
numEntries := v.Len()
|
||||
if numEntries > 0 {
|
||||
vt := v.Index(0).Type()
|
||||
vts := vt.String()
|
||||
switch {
|
||||
// C types that need to be converted.
|
||||
case cCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUnsignedCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUint8tCharRE.MatchString(vts):
|
||||
doConvert = true
|
||||
|
||||
// Try to use existing uint8 slices and fall back to converting
|
||||
// and copying if that fails.
|
||||
case vt.Kind() == reflect.Uint8:
|
||||
// We need an addressable interface to convert the type
|
||||
// to a byte slice. However, the reflect package won't
|
||||
// give us an interface on certain things like
|
||||
// unexported struct fields in order to enforce
|
||||
// visibility rules. We use unsafe, when available, to
|
||||
// bypass these restrictions since this package does not
|
||||
// mutate the values.
|
||||
vs := v
|
||||
if !vs.CanInterface() || !vs.CanAddr() {
|
||||
vs = unsafeReflectValue(vs)
|
||||
}
|
||||
if !UnsafeDisabled {
|
||||
vs = vs.Slice(0, numEntries)
|
||||
|
||||
// Use the existing uint8 slice if it can be
|
||||
// type asserted.
|
||||
iface := vs.Interface()
|
||||
if slice, ok := iface.([]uint8); ok {
|
||||
buf = slice
|
||||
doHexDump = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// The underlying data needs to be converted if it can't
|
||||
// be type asserted to a uint8 slice.
|
||||
doConvert = true
|
||||
}
|
||||
|
||||
// Copy and convert the underlying type if needed.
|
||||
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||
// Convert and copy each element into a uint8 byte
|
||||
// slice.
|
||||
buf = make([]uint8, numEntries)
|
||||
for i := 0; i < numEntries; i++ {
|
||||
vv := v.Index(i)
|
||||
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||
}
|
||||
doHexDump = true
|
||||
}
|
||||
}
|
||||
|
||||
// Hexdump the entire slice as needed.
|
||||
if doHexDump {
|
||||
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||
str := indent + hex.Dump(buf)
|
||||
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||
str = strings.TrimRight(str, d.cs.Indent)
|
||||
d.w.Write([]byte(str))
|
||||
return
|
||||
}
|
||||
|
||||
// Recursively call dump for each item.
|
||||
for i := 0; i < numEntries; i++ {
|
||||
d.dump(d.unpackValue(v.Index(i)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||
// value to figure out what kind of object we are dealing with and formats it
|
||||
// appropriately. It is a recursive function, however circular data structures
|
||||
// are detected and handled properly.
|
||||
func (d *dumpState) dump(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
d.w.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
d.indent()
|
||||
d.dumpPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !d.ignoreNextType {
|
||||
d.indent()
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write([]byte(v.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.ignoreNextType = false
|
||||
|
||||
// Display length and capacity if the built-in len and cap functions
|
||||
// work with the value's kind and the len/cap itself is non-zero.
|
||||
valueLen, valueCap := 0, 0
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||
valueLen, valueCap = v.Len(), v.Cap()
|
||||
case reflect.Map, reflect.String:
|
||||
valueLen = v.Len()
|
||||
}
|
||||
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
if valueLen != 0 {
|
||||
d.w.Write(lenEqualsBytes)
|
||||
printInt(d.w, int64(valueLen), 10)
|
||||
}
|
||||
if !d.cs.DisableCapacities && valueCap != 0 {
|
||||
if valueLen != 0 {
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.w.Write(capEqualsBytes)
|
||||
printInt(d.w, int64(valueCap), 10)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||
// is enabled
|
||||
if !d.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(d.w, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(d.w, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(d.w, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(d.w, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(d.w, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(d.w, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(d.w, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
d.dumpSlice(v)
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.String:
|
||||
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
keys := v.MapKeys()
|
||||
if d.cs.SortKeys {
|
||||
sortValues(keys, d.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
d.dump(d.unpackValue(key))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
numFields := v.NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
d.indent()
|
||||
vtf := vt.Field(i)
|
||||
d.w.Write([]byte(vtf.Name))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.Field(i)))
|
||||
if i < (numFields - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(d.w, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(d.w, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it in case any new
|
||||
// types are added.
|
||||
default:
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(d.w, "%v", v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fdump is a helper function to consolidate the logic from the various public
|
||||
// methods which take varying writers and config states.
|
||||
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||
for _, arg := range a {
|
||||
if arg == nil {
|
||||
w.Write(interfaceBytes)
|
||||
w.Write(spaceBytes)
|
||||
w.Write(nilAngleBytes)
|
||||
w.Write(newlineBytes)
|
||||
continue
|
||||
}
|
||||
|
||||
d := dumpState{w: w, cs: cs}
|
||||
d.pointers = make(map[uintptr]int)
|
||||
d.dump(reflect.ValueOf(arg))
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(&Config, w, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(&Config, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by an exported package global,
|
||||
spew.Config. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func Dump(a ...interface{}) {
|
||||
fdump(&Config, os.Stdout, a...)
|
||||
}
|
||||
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
@@ -0,0 +1,419 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||
const supportedFlags = "0-+# "
|
||||
|
||||
// formatState implements the fmt.Formatter interface and contains information
|
||||
// about the state of a formatting operation. The NewFormatter function can
|
||||
// be used to get a new Formatter which can be used directly as arguments
|
||||
// in standard fmt package printing calls.
|
||||
type formatState struct {
|
||||
value interface{}
|
||||
fs fmt.State
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// buildDefaultFormat recreates the original format string without precision
|
||||
// and width information to pass in to fmt.Sprintf in the case of an
|
||||
// unrecognized type. Unless new types are added to the language, this
|
||||
// function won't ever be called.
|
||||
func (f *formatState) buildDefaultFormat() (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteRune('v')
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// constructOrigFormat recreates the original format string including precision
|
||||
// and width information to pass along to the standard fmt package. This allows
|
||||
// automatic deferral of all format strings this package doesn't support.
|
||||
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
if width, ok := f.fs.Width(); ok {
|
||||
buf.WriteString(strconv.Itoa(width))
|
||||
}
|
||||
|
||||
if precision, ok := f.fs.Precision(); ok {
|
||||
buf.Write(precisionBytes)
|
||||
buf.WriteString(strconv.Itoa(precision))
|
||||
}
|
||||
|
||||
buf.WriteRune(verb)
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||
// ensures that types for values which have been unpacked from an interface
|
||||
// are displayed when the show types flag is also set.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface {
|
||||
f.ignoreNextType = false
|
||||
if !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (f *formatState) formatPtr(v reflect.Value) {
|
||||
// Display nil if top level pointer is nil.
|
||||
showTypes := f.fs.Flag('#')
|
||||
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range f.pointers {
|
||||
if depth >= f.depth {
|
||||
delete(f.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to possibly show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by derferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
f.pointers[addr] = f.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type or indirection level depending on flags.
|
||||
if showTypes && !f.ignoreNextType {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
f.fs.Write([]byte(ve.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
} else {
|
||||
if nilFound || cycleFound {
|
||||
indirects += strings.Count(ve.Type().String(), "*")
|
||||
}
|
||||
f.fs.Write(openAngleBytes)
|
||||
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||
f.fs.Write(closeAngleBytes)
|
||||
}
|
||||
|
||||
// Display pointer information depending on flags.
|
||||
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||
f.fs.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
f.fs.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(f.fs, addr)
|
||||
}
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
f.ignoreNextType = true
|
||||
f.format(ve)
|
||||
}
|
||||
}
|
||||
|
||||
// format is the main workhorse for providing the Formatter interface. It
|
||||
// uses the passed reflect value to figure out what kind of object we are
|
||||
// dealing with and formats it appropriately. It is a recursive function,
|
||||
// however circular data structures are detected and handled properly.
|
||||
func (f *formatState) format(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
f.fs.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
f.formatPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write([]byte(v.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
f.ignoreNextType = false
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods
|
||||
// flag is enabled.
|
||||
if !f.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(f.fs, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(f.fs, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(f.fs, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(f.fs, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(f.fs, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(f.fs, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(f.fs, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
f.fs.Write(openBracketBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
for i := 0; i < numEntries; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.Index(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBracketBytes)
|
||||
|
||||
case reflect.String:
|
||||
f.fs.Write([]byte(v.String()))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
f.fs.Write(openMapBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
keys := v.MapKeys()
|
||||
if f.cs.SortKeys {
|
||||
sortValues(keys, f.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(key))
|
||||
f.fs.Write(colonBytes)
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.MapIndex(key)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeMapBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
numFields := v.NumField()
|
||||
f.fs.Write(openBraceBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
for i := 0; i < numFields; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
vtf := vt.Field(i)
|
||||
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||
f.fs.Write([]byte(vtf.Name))
|
||||
f.fs.Write(colonBytes)
|
||||
}
|
||||
f.format(f.unpackValue(v.Field(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(f.fs, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it if any get added.
|
||||
default:
|
||||
format := f.buildDefaultFormat()
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(f.fs, format, v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(f.fs, format, v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||
// details.
|
||||
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||
f.fs = fs
|
||||
|
||||
// Use standard formatting for verbs that are not v.
|
||||
if verb != 'v' {
|
||||
format := f.constructOrigFormat(verb)
|
||||
fmt.Fprintf(fs, format, f.value)
|
||||
return
|
||||
}
|
||||
|
||||
if f.value == nil {
|
||||
if fs.Flag('#') {
|
||||
fs.Write(interfaceBytes)
|
||||
}
|
||||
fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
f.format(reflect.ValueOf(f.value))
|
||||
}
|
||||
|
||||
// newFormatter is a helper function to consolidate the logic from the various
|
||||
// public methods which take varying config states.
|
||||
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||
fs := &formatState{value: v, cs: cs}
|
||||
fs.pointers = make(map[uintptr]int)
|
||||
return fs
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
Printf, Println, or Fprintf.
|
||||
*/
|
||||
func NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(&Config, v)
|
||||
}
|
||||
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the formatted string as a value that satisfies error. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a default Formatter interface returned by NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a default spew Formatter interface.
|
||||
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = NewFormatter(arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
13
vendor/github.com/golang-migrate/migrate/v4/.dockerignore
generated
vendored
Normal file
13
vendor/github.com/golang-migrate/migrate/v4/.dockerignore
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# Project
|
||||
FAQ.md
|
||||
README.md
|
||||
LICENSE
|
||||
Makefile
|
||||
.gitignore
|
||||
.travis.yml
|
||||
CONTRIBUTING.md
|
||||
MIGRATIONS.md
|
||||
docker-deploy.sh
|
||||
|
||||
# Golang
|
||||
testing
|
||||
8
vendor/github.com/golang-migrate/migrate/v4/.gitignore
generated
vendored
Normal file
8
vendor/github.com/golang-migrate/migrate/v4/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
.DS_Store
|
||||
cli/build
|
||||
cli/cli
|
||||
cli/migrate
|
||||
.coverage
|
||||
.godoc.pid
|
||||
vendor/
|
||||
.vscode/
|
||||
27
vendor/github.com/golang-migrate/migrate/v4/.golangci.yml
generated
vendored
Normal file
27
vendor/github.com/golang-migrate/migrate/v4/.golangci.yml
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
deadline: 2m
|
||||
linters:
|
||||
enable:
|
||||
#- golint
|
||||
- interfacer
|
||||
- unconvert
|
||||
#- dupl
|
||||
- goconst
|
||||
- gofmt
|
||||
- misspell
|
||||
- maligned
|
||||
- unparam
|
||||
- nakedret
|
||||
- prealloc
|
||||
#- gosec
|
||||
linters-settings:
|
||||
misspell:
|
||||
locale: US
|
||||
issues:
|
||||
max-same: 0
|
||||
max-per-linter: 0
|
||||
exclude-use-default: false
|
||||
exclude:
|
||||
# gosec: Duplicated errcheck checks
|
||||
- G104
|
||||
135
vendor/github.com/golang-migrate/migrate/v4/.travis.yml
generated
vendored
Normal file
135
vendor/github.com/golang-migrate/migrate/v4/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
language: go
|
||||
sudo: required
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: master
|
||||
include:
|
||||
# Supported versions of Go: https://golang.org/dl/
|
||||
- go: "1.11.x"
|
||||
- go: "1.12.x"
|
||||
- go: master
|
||||
|
||||
go_import_path: github.com/golang-migrate/migrate
|
||||
|
||||
env:
|
||||
global:
|
||||
- GO111MODULE=on
|
||||
- MIGRATE_TEST_CONTAINER_BOOT_TIMEOUT=60
|
||||
- DOCKER_USERNAME=golangmigrate
|
||||
- secure: "oSOznzUrgr5h45qW4PONkREpisPAt40tnM+KFWtS/Ggu5UI2Ie0CmyYXWuBjbt7B97a4yN9Qzmn8FxJHJ7kk+ABOi3muhkxeIhr6esXbzHhX/Jhv0mj1xkzX7KoVN9oHBz3cOI/QeRyEAO68xjDHNE2kby4RTT9VBt6TQUakKVkqI5qkqLBTADepCjVC+9XhxVxUNyeWKU8ormaUfJBjoNVoDlwXekUPnJenfmfZqXxUInvBCfUyp7Pq+kurBORmg4yc6qOlRYuK67Xw+i5xpjbZouNlXPk0rq7pPy5zjhmZQ3kImoFPvNMeKViDcI6kSIJKtjdhms9/g/6MgXS9HlL5kFy8tYKbsyiHnHB1BsvaLAKXctbUZFDPstgMPADfnad2kZXPrNqIhfWKZrGRWidawCYJ1sKKwYxLMKrtA0umqgMoL90MmBOELhuGmvMV0cFJB+zo+K2YWjEiMGd8xRb5mC5aAy0ZcCehO46jGtpr217EJmMF8Ywr7cFqM2Shg5U2jev9qUpYiXwmPnJKDuoT2ZHuHmPgFIkYiWC5yeJnnmG5bed1sKBp93AFrJX+1Rx5oC4BpNegewmBZKpOSwls/D1uMAeQK3dPmQHLsT6o2VBLfeDGr+zY0R85ywwPZCv00vGol02zYoTqN7eFqr6Qhjr/qx5K1nnxJdFK3Ts="
|
||||
|
||||
services:
|
||||
- docker
|
||||
|
||||
cache:
|
||||
directories:
|
||||
- $GOPATH/pkg
|
||||
|
||||
|
||||
before_install:
|
||||
# Update docker to latest version: https://docs.travis-ci.com/user/docker/#installing-a-newer-docker-version
|
||||
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
- sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
- sudo apt-get update
|
||||
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
|
||||
# Install golangci-lint
|
||||
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1
|
||||
- echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}"
|
||||
|
||||
install:
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- golangci-lint run
|
||||
- make test COVERAGE_DIR=/tmp/coverage
|
||||
|
||||
after_success:
|
||||
- goveralls -service=travis-ci -coverprofile /tmp/coverage/combined.txt
|
||||
- make list-external-deps > dependency_tree.txt && cat dependency_tree.txt
|
||||
- make build-cli
|
||||
- gem install --no-document fpm
|
||||
- fpm -s dir -t deb -n migrate -v "$(git describe --tags 2>/dev/null | cut -c 2-)" --license MIT -m dhui@users.noreply.github.com --url https://github.com/golang-migrate/migrate --description='Database migrations' -a amd64 -p migrate.$(git describe --tags 2>/dev/null | cut -c 2-).deb --deb-no-default-config-files -f -C cli/build migrate.linux-amd64=/usr/local/bin/migrate
|
||||
|
||||
deploy:
|
||||
- provider: releases
|
||||
api_key:
|
||||
secure: hWH1HLPpzpfA8pXQ93T1qKQVFSpQp0as/JLQ7D91jHuJ8p+RxVeqblDrR6HQY/95R/nyiE9GJmvUolSuw5h449LSrGxPtVWhdh6EnkxlQHlen5XeMhVjRjFV0sE9qGe8v7uAkiTfRO61ktTWHrEAvw5qpyqnNISodmZS78XIasPODQbNlzwINhWhDTHIjXGb4FpizYaL3OGCanrxfR9fQyCaqKGGBjRq3Mfq8U6Yd4mApmsE+uJxgaZV8K5zBqpkSzQRWhcVGNL5DuLsU3gfSJOo7kZeA2G71SHffH577dBoqtCZ4VFv169CoUZehLWCb+7XKJZmHXVujCURATSySLGUOPc6EoLFAn3YtsCA04mS4bZVo5FZPWVwfhjmkhtDR4f6wscKp7r1HsFHSOgm59QfETQdrn4MnZ44H2Jd39axqndn5DvK9EcZVjPHynOPnueXP2u6mTuUgh2VyyWBCDO3CNo0fGlo7VJI69IkIWNSD87K9cHZWYMClyKZkUzS+PmRAhHRYbVd+9ZjKOmnU36kUHNDG/ft1D4ogsY+rhVtXB4lgWDM5adri+EIScYdYnB1/pQexLBigcJY9uE7nQTR0U6QgVNYvun7uRNs40E0c4voSfmPdFO0FlOD2y1oQhnaXfWLbu9nMcTcs4RFGrcC7NzkUN4/WjG8s285V6w=
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.12.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
file:
|
||||
- cli/build/migrate.linux-amd64.tar.gz
|
||||
- cli/build/migrate.darwin-amd64.tar.gz
|
||||
- cli/build/migrate.windows-amd64.exe.tar.gz
|
||||
- cli/build/sha256sum.txt
|
||||
- dependency_tree.txt
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: ubuntu/xenial
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.12.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: ubuntu/bionic
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.12.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: ubuntu/cosmic
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.12.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: debian/stretch
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.12.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: packagecloud
|
||||
repository: migrate
|
||||
username: golang-migrate
|
||||
token:
|
||||
secure: aICwu3gJ1sJ1QVCD3elpg+Jxzt4P+Zj1uoh5f0sOwnjDNIZ4FwUT1cMrWloP8P2KD0iyCOawuZER27o/kQ21oX2OxHvQbYPReA2znLm7lHzCmypAAOHPxpgnQ4rMGHHJXd+OsxtdclGs67c+EbdBfoRRbK400Qz/vjPJEDeH4mh02ZHC2nw4Nk/wV4jjBIkIt9dGEx6NgOA17FCMa3MaPHlHeFIzU7IfTlDHbS0mCCYbg/wafWBWcbGqtZLWAYtJDmfjrAStmDLdAX5J5PsB7taGSGPZHmPmpGoVgrKt/tb9Xz1rFBGslTpGROOiO4CiMAvkEKFn8mxrBGjfSBqp7Dp3eeSalKXB1DJAbEXx2sEbMcvmnoR9o43meaAn+ZRts8lRL8S/skBloe6Nk8bx3NlJCGB9WPK1G56b7c/fZnJxQbrCw6hxDfbZwm8S2YPviFTo/z1BfZDhRsL74reKsN2kgnGo2W/k38vvzIpsssQ9DHN1b0TLCxolCNPtQ7oHcQ1ohcjP2UgYXk0FhqDoL+9LQva/DU4N9sKH0UbAaqsMVSErLeG8A4aauuFcVrWRBaDYyTag4dQqzTulEy7iru2kDDIBgSQ1gMW/yoBOIPK4oi6MtbTf1X39fzXFLS1cDd3LW61yAu3YrbjAetpfx2frIvrRAiL9TxWA1gnrs5o=
|
||||
dist: debian/buster
|
||||
package_glob: '*.deb'
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.12.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
- provider: script
|
||||
script: ./docker-deploy.sh
|
||||
skip_cleanup: true
|
||||
on:
|
||||
go: "1.12.x"
|
||||
repo: golang-migrate/migrate
|
||||
tags: true
|
||||
24
vendor/github.com/golang-migrate/migrate/v4/CONTRIBUTING.md
generated
vendored
Normal file
24
vendor/github.com/golang-migrate/migrate/v4/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# Development, Testing and Contributing
|
||||
|
||||
1. Make sure you have a running Docker daemon
|
||||
(Install for [MacOS](https://docs.docker.com/docker-for-mac/))
|
||||
1. Use a version of Go that supports [modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) (e.g. Go 1.11+)
|
||||
1. Fork this repo and `git clone` somewhere to `$GOPATH/src/github.com/golang-migrate/migrate`
|
||||
* Ensure that [Go modules are enabled](https://golang.org/cmd/go/#hdr-Preliminary_module_support) (e.g. your repo path or the `GO111MODULE` environment variable are set correctly)
|
||||
1. Install [golangci-lint](https://github.com/golangci/golangci-lint#install)
|
||||
1. Run the linter: `golangci-lint run`
|
||||
1. Confirm tests are working: `make test-short`
|
||||
1. Write awesome code ...
|
||||
1. `make test` to run all tests against all database versions
|
||||
1. Push code and open Pull Request
|
||||
|
||||
Some more helpful commands:
|
||||
|
||||
* You can specify which database/ source tests to run:
|
||||
`make test-short SOURCE='file go_bindata' DATABASE='postgres cassandra'`
|
||||
* After `make test`, run `make html-coverage` which opens a shiny test coverage overview.
|
||||
* `make build-cli` builds the CLI in directory `cli/build/`.
|
||||
* `make list-external-deps` lists all external dependencies for each package
|
||||
* `make docs && make open-docs` opens godoc in your browser, `make kill-docs` kills the godoc server.
|
||||
Repeatedly call `make docs` to refresh the server.
|
||||
* Set the `DOCKER_API_VERSION` environment variable to the latest supported version if you get errors regarding the docker client API version being too new.
|
||||
23
vendor/github.com/golang-migrate/migrate/v4/Dockerfile
generated
vendored
Normal file
23
vendor/github.com/golang-migrate/migrate/v4/Dockerfile
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM golang:1.12-alpine3.10 AS downloader
|
||||
ARG VERSION
|
||||
|
||||
RUN apk add --no-cache git gcc musl-dev
|
||||
|
||||
WORKDIR /go/src/github.com/golang-migrate/migrate
|
||||
|
||||
COPY . ./
|
||||
|
||||
ENV GO111MODULE=on
|
||||
ENV DATABASES="postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb sqlserver"
|
||||
ENV SOURCES="file go_bindata github github_ee aws_s3 google_cloud_storage godoc_vfs gitlab"
|
||||
|
||||
RUN go build -a -o build/migrate.linux-386 -ldflags="-s -w -X main.Version=${VERSION}" -tags "$DATABASES $SOURCES" ./cmd/migrate
|
||||
|
||||
FROM alpine:3.10
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
COPY --from=downloader /go/src/github.com/golang-migrate/migrate/build/migrate.linux-386 /migrate
|
||||
|
||||
ENTRYPOINT ["/migrate"]
|
||||
CMD ["--help"]
|
||||
76
vendor/github.com/golang-migrate/migrate/v4/FAQ.md
generated
vendored
Normal file
76
vendor/github.com/golang-migrate/migrate/v4/FAQ.md
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
# FAQ
|
||||
|
||||
#### How is the code base structured?
|
||||
```
|
||||
/ package migrate (the heart of everything)
|
||||
/cli the CLI wrapper
|
||||
/database database driver and sub directories have the actual driver implementations
|
||||
/source source driver and sub directories have the actual driver implementations
|
||||
```
|
||||
|
||||
#### Why is there no `source/driver.go:Last()`?
|
||||
It's not needed. And unless the source has a "native" way to read a directory in reversed order,
|
||||
it might be expensive to do a full directory scan in order to get the last element.
|
||||
|
||||
#### What is a NilMigration? NilVersion?
|
||||
NilMigration defines a migration without a body. NilVersion is defined as const -1.
|
||||
|
||||
#### What is the difference between uint(version) and int(targetVersion)?
|
||||
version refers to an existing migration version coming from a source and therefor can never be negative.
|
||||
targetVersion can either be a version OR represent a NilVersion, which equals -1.
|
||||
|
||||
#### What's the difference between Next/Previous and Up/Down?
|
||||
```
|
||||
1_first_migration.up.extension next -> 2_second_migration.up.extension ...
|
||||
1_first_migration.down.extension <- previous 2_second_migration.down.extension ...
|
||||
```
|
||||
|
||||
#### Why two separate files (up and down) for a migration?
|
||||
It makes all of our lives easier. No new markup/syntax to learn for users
|
||||
and existing database utility tools continue to work as expected.
|
||||
|
||||
#### How many migrations can migrate handle?
|
||||
Whatever the maximum positive signed integer value is for your platform.
|
||||
For 32bit it would be 2,147,483,647 migrations. Migrate only keeps references to
|
||||
the currently run and pre-fetched migrations in memory. Please note that some
|
||||
source drivers need to do build a full "directory" tree first, which puts some
|
||||
heat on the memory consumption.
|
||||
|
||||
#### Are the table tests in migrate_test.go bloated?
|
||||
Yes and no. There are duplicate test cases for sure but they don't hurt here. In fact
|
||||
the tests are very visual now and might help new users understand expected behaviors quickly.
|
||||
Migrate from version x to y and y is the last migration? Just check out the test for
|
||||
that particular case and know what's going on instantly.
|
||||
|
||||
#### What is Docker being used for?
|
||||
Only for testing. See [testing/docker.go](testing/docker.go)
|
||||
|
||||
#### Why not just use docker-compose?
|
||||
It doesn't give us enough runtime control for testing. We want to be able to bring up containers fast
|
||||
and whenever we want, not just once at the beginning of all tests.
|
||||
|
||||
#### Can I maintain my driver in my own repository?
|
||||
Yes, technically thats possible. We want to encourage you to contribute your driver to this respository though.
|
||||
The driver's functionality is dictated by migrate's interfaces. That means there should really
|
||||
just be one driver for a database/ source. We want to prevent a future where several drivers doing the exact same thing,
|
||||
just implemented a bit differently, co-exist somewhere on Github. If users have to do research first to find the
|
||||
"best" available driver for a database in order to get started, we would have failed as an open source community.
|
||||
|
||||
#### Can I mix multiple sources during a batch of migrations?
|
||||
No.
|
||||
|
||||
#### What does "dirty" database mean?
|
||||
Before a migration runs, each database sets a dirty flag. Execution stops if a migration fails and the dirty state persists,
|
||||
which prevents attempts to run more migrations on top of a failed migration. You need to manually fix the error
|
||||
and then "force" the expected version.
|
||||
|
||||
#### What happens if two programs try and update the database at the same time?
|
||||
Database-specific locking features are used by *some* database drivers to prevent multiple instances of migrate from running migrations at the same time
|
||||
the same database at the same time. For example, the MySQL driver uses the `GET_LOCK` function, while the Postgres driver uses
|
||||
the `pg_advisory_lock` function.
|
||||
|
||||
#### Do I need to create a table for tracking migration version used?
|
||||
No, it is done automatically.
|
||||
|
||||
#### Can I use migrate with a non-Go project?
|
||||
Yes, you can use the migrate CLI in a non-Go project, but there are probably other libraries/frameworks available that offer better test and deploy integrations in that language/framework.
|
||||
43
vendor/github.com/golang-migrate/migrate/v4/GETTING_STARTED.md
generated
vendored
Normal file
43
vendor/github.com/golang-migrate/migrate/v4/GETTING_STARTED.md
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
# Getting started
|
||||
Before you start, you should understand the concept of forward/up and reverse/down database migrations.
|
||||
|
||||
Configure a database for your application. Make sure that your database driver is supported [here](README.md#databases)
|
||||
|
||||
## Create migrations
|
||||
Create some migrations using migrate CLI. Here is an example:
|
||||
```
|
||||
migrate create -ext sql -dir db/migrations -seq create_users_table
|
||||
```
|
||||
Once you create your files, you should fill them.
|
||||
|
||||
**IMPORTANT:** In a project developed by more than one person there is a chance of migrations inconsistency - e.g. two developers can create conflicting migrations, and the developer that created his migration later gets it merged to the repository first.
|
||||
Developers and Teams should keep an eye on such cases (especially during code review).
|
||||
[Here](https://github.com/golang-migrate/migrate/issues/179#issuecomment-475821264) is the issue summary if you would like to read more.
|
||||
|
||||
Consider making your migrations idempotent - we can run the same sql code twice in a row with the same result. This makes our migrations more robust. On the other hand, it causes slightly less control over database schema - e.g. let's say you forgot to drop the table in down migration. You run down migration - the table is still there. When you run up migration again - `CREATE TABLE` would return an error, helping you find an issue in down migration, while `CREATE TABLE IF NOT EXISTS` would not. Use those conditions wisely.
|
||||
|
||||
In case you would like to run several commands/queries in one migration, you should wrap them in a transaction (if your database supports it).
|
||||
This way if one of commands fails, our database will remain unchanged.
|
||||
|
||||
## Run migrations
|
||||
Run your migrations through the CLI or your app and check if they applied expected changes.
|
||||
Just to give you an idea:
|
||||
```
|
||||
migrate -database YOUR_DATBASE_URL -path PATH_TO_YOUR_MIGRATIONS up
|
||||
```
|
||||
|
||||
Just add the code to your app and you're ready to go!
|
||||
|
||||
Before commiting your migrations you should run your migrations up, down, and then up again to see if migrations are working properly both ways.
|
||||
(e.g. if you created a table in a migration but reverse migration did not delete it, you will encounter an error when running the forward migration again)
|
||||
It's also worth checking your migrations in a separate, containerized environment. You can find some tools in the end of this document.
|
||||
|
||||
**IMPORTANT:** If you would like to run multiple instances of your app on different machines be sure to use a database that supports locking when running migrations. Otherwise you may encounter issues.
|
||||
|
||||
## Further reading:
|
||||
- [PostgreSQL tutorial](database/postgres/TUTORIAL.md)
|
||||
- [Best practices](MIGRATIONS.md)
|
||||
- [FAQ](FAQ.md)
|
||||
- Tools for testing your migrations in a container:
|
||||
- https://github.com/dhui/dktest
|
||||
- https://github.com/ory/dockertest
|
||||
28
vendor/github.com/golang-migrate/migrate/v4/LICENSE
generated
vendored
Normal file
28
vendor/github.com/golang-migrate/migrate/v4/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Original Work
|
||||
Copyright (c) 2016 Matthias Kadenbach
|
||||
https://github.com/mattes/migrate
|
||||
|
||||
Modified Work
|
||||
Copyright (c) 2018 Dale Hui
|
||||
https://github.com/golang-migrate/migrate
|
||||
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
86
vendor/github.com/golang-migrate/migrate/v4/MIGRATIONS.md
generated
vendored
Normal file
86
vendor/github.com/golang-migrate/migrate/v4/MIGRATIONS.md
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
# Migrations
|
||||
|
||||
## Migration Filename Format
|
||||
|
||||
A single logical migration is represented as two separate migration files, one
|
||||
to migrate "up" to the specified version from the previous version, and a second
|
||||
to migrate back "down" to the previous version. These migrations can be provided
|
||||
by any one of the supported [migration sources](./README.md#migration-sources).
|
||||
|
||||
The ordering and direction of the migration files is determined by the filenames
|
||||
used for them. `migrate` expects the filenames of migrations to have the format:
|
||||
|
||||
{version}_{title}.up.{extension}
|
||||
{version}_{title}.down.{extension}
|
||||
|
||||
The `title` of each migration is unused, and is only for readability. Similarly,
|
||||
the `extension` of the migration files is not checked by the library, and should
|
||||
be an appropriate format for the database in use (`.sql` for SQL variants, for
|
||||
instance).
|
||||
|
||||
Versions of migrations may be represented as any 64 bit unsigned integer.
|
||||
All migrations are applied upward in order of increasing version number, and
|
||||
downward by decreasing version number.
|
||||
|
||||
Common versioning schemes include incrementing integers:
|
||||
|
||||
1_initialize_schema.down.sql
|
||||
1_initialize_schema.up.sql
|
||||
2_add_table.down.sql
|
||||
2_add_table.up.sql
|
||||
...
|
||||
|
||||
Or timestamps at an appropriate resolution:
|
||||
|
||||
1500360784_initialize_schema.down.sql
|
||||
1500360784_initialize_schema.up.sql
|
||||
1500445949_add_table.down.sql
|
||||
1500445949_add_table.up.sql
|
||||
...
|
||||
|
||||
But any scheme resulting in distinct, incrementing integers as versions is valid.
|
||||
|
||||
It is suggested that the version number of corresponding `up` and `down` migration
|
||||
files be equivalent for clarity, but they are allowed to differ so long as the
|
||||
relative ordering of the migrations is preserved.
|
||||
|
||||
The migration files are permitted to be "empty", in the event that a migration
|
||||
is a no-op or is irreversible. It is recommended to still include both migration
|
||||
files by making the whole migration file consist of a comment.
|
||||
If your database does not support comments, then deleting the migration file will also work.
|
||||
Note, an actual empty file (e.g. a 0 byte file) may cause issues with your database since migrate
|
||||
will attempt to run an empty query. In this case, deleting the migration file will also work.
|
||||
For the rational of this behavior see:
|
||||
[#244 (comment)](https://github.com/golang-migrate/migrate/issues/244#issuecomment-510758270)
|
||||
|
||||
## Migration Content Format
|
||||
|
||||
The format of the migration files themselves varies between database systems.
|
||||
Different databases have different semantics around schema changes and when and
|
||||
how they are allowed to occur
|
||||
(for instance, [if schema changes can occur within a transaction](https://wiki.postgresql.org/wiki/Transactional_DDL_in_PostgreSQL:_A_Competitive_Analysis)).
|
||||
|
||||
As such, the `migrate` library has little to no checking around the format of
|
||||
migration sources. The migration files are generally processed directly by the
|
||||
drivers as raw operations.
|
||||
|
||||
## Reversibility of Migrations
|
||||
|
||||
Best practice for writing schema migration is that all migrations should be
|
||||
reversible. It should in theory be possible for run migrations down and back up
|
||||
through any and all versions with the state being fully cleaned and recreated
|
||||
by doing so.
|
||||
|
||||
By adhering to this recommended practice, development and deployment of new code
|
||||
is cleaner and easier (cleaning database state for a new feature should be as
|
||||
easy as migrating down to a prior version, and back up to the latest).
|
||||
|
||||
As opposed to some other migration libraries, `migrate` represents up and down
|
||||
migrations as separate files. This prevents any non-standard file syntax from
|
||||
being introduced which may result in unintended behavior or errors, depending
|
||||
on what database is processing the file.
|
||||
|
||||
While it is technically possible for an up or down migration to exist on its own
|
||||
without an equivalently versioned counterpart, it is strongly recommended to
|
||||
always include a down migration which cleans up the state of the corresponding
|
||||
up migration.
|
||||
105
vendor/github.com/golang-migrate/migrate/v4/Makefile
generated
vendored
Normal file
105
vendor/github.com/golang-migrate/migrate/v4/Makefile
generated
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
SOURCE ?= file go_bindata github github_ee aws_s3 google_cloud_storage godoc_vfs gitlab
|
||||
DATABASE ?= postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb sqlserver
|
||||
VERSION ?= $(shell git describe --tags 2>/dev/null | cut -c 2-)
|
||||
TEST_FLAGS ?=
|
||||
REPO_OWNER ?= $(shell cd .. && basename "$$(pwd)")
|
||||
COVERAGE_DIR ?= .coverage
|
||||
|
||||
|
||||
build-cli: clean
|
||||
-mkdir ./cli/build
|
||||
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o ../../cli/build/migrate.linux-amd64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
|
||||
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -a -o ../../cli/build/migrate.darwin-amd64 -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
|
||||
cd ./cmd/migrate && CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -a -o ../../cli/build/migrate.windows-amd64.exe -ldflags='-X main.Version=$(VERSION) -extldflags "-static"' -tags '$(DATABASE) $(SOURCE)' .
|
||||
cd ./cli/build && find . -name 'migrate*' | xargs -I{} tar czf {}.tar.gz {}
|
||||
cd ./cli/build && shasum -a 256 * > sha256sum.txt
|
||||
cat ./cli/build/sha256sum.txt
|
||||
|
||||
|
||||
clean:
|
||||
-rm -r ./cli/build
|
||||
|
||||
|
||||
test-short:
|
||||
make test-with-flags --ignore-errors TEST_FLAGS='-short'
|
||||
|
||||
|
||||
test:
|
||||
@-rm -r $(COVERAGE_DIR)
|
||||
@mkdir $(COVERAGE_DIR)
|
||||
make test-with-flags TEST_FLAGS='-v -race -covermode atomic -coverprofile $$(COVERAGE_DIR)/combined.txt -bench=. -benchmem -timeout 20m'
|
||||
|
||||
|
||||
test-with-flags:
|
||||
@echo SOURCE: $(SOURCE)
|
||||
@echo DATABASE: $(DATABASE)
|
||||
|
||||
@go test $(TEST_FLAGS) ./...
|
||||
|
||||
|
||||
kill-orphaned-docker-containers:
|
||||
docker rm -f $(shell docker ps -aq --filter label=migrate_test)
|
||||
|
||||
|
||||
html-coverage:
|
||||
go tool cover -html=$(COVERAGE_DIR)/combined.txt
|
||||
|
||||
|
||||
list-external-deps:
|
||||
$(call external_deps,'.')
|
||||
$(call external_deps,'./cli/...')
|
||||
$(call external_deps,'./testing/...')
|
||||
|
||||
$(foreach v, $(SOURCE), $(call external_deps,'./source/$(v)/...'))
|
||||
$(call external_deps,'./source/testing/...')
|
||||
$(call external_deps,'./source/stub/...')
|
||||
|
||||
$(foreach v, $(DATABASE), $(call external_deps,'./database/$(v)/...'))
|
||||
$(call external_deps,'./database/testing/...')
|
||||
$(call external_deps,'./database/stub/...')
|
||||
|
||||
|
||||
restore-import-paths:
|
||||
find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/$(REPO_OWNER)/migrate%\"github.com/mattes/migrate%g '{}' \;
|
||||
|
||||
|
||||
rewrite-import-paths:
|
||||
find . -name '*.go' -type f -execdir sed -i '' s%\"github.com/mattes/migrate%\"github.com/$(REPO_OWNER)/migrate%g '{}' \;
|
||||
|
||||
|
||||
# example: fswatch -0 --exclude .godoc.pid --event Updated . | xargs -0 -n1 -I{} make docs
|
||||
docs:
|
||||
-make kill-docs
|
||||
nohup godoc -play -http=127.0.0.1:6064 </dev/null >/dev/null 2>&1 & echo $$! > .godoc.pid
|
||||
cat .godoc.pid
|
||||
|
||||
|
||||
kill-docs:
|
||||
@cat .godoc.pid
|
||||
kill -9 $$(cat .godoc.pid)
|
||||
rm .godoc.pid
|
||||
|
||||
|
||||
open-docs:
|
||||
open http://localhost:6064/pkg/github.com/$(REPO_OWNER)/migrate
|
||||
|
||||
|
||||
# example: make release V=0.0.0
|
||||
release:
|
||||
git tag v$(V)
|
||||
@read -p "Press enter to confirm and push to origin ..." && git push origin v$(V)
|
||||
|
||||
|
||||
define external_deps
|
||||
@echo '-- $(1)'; go list -f '{{join .Deps "\n"}}' $(1) | grep -v github.com/$(REPO_OWNER)/migrate | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}'
|
||||
|
||||
endef
|
||||
|
||||
|
||||
.PHONY: build-cli clean test-short test test-with-flags html-coverage \
|
||||
restore-import-paths rewrite-import-paths list-external-deps release \
|
||||
docs kill-docs open-docs kill-orphaned-docker-containers
|
||||
|
||||
SHELL = /bin/bash
|
||||
RAND = $(shell echo $$RANDOM)
|
||||
|
||||
181
vendor/github.com/golang-migrate/migrate/v4/README.md
generated
vendored
Normal file
181
vendor/github.com/golang-migrate/migrate/v4/README.md
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
[](https://travis-ci.com/golang-migrate/migrate)
|
||||
[](https://godoc.org/github.com/golang-migrate/migrate)
|
||||
[](https://coveralls.io/github/golang-migrate/migrate?branch=master)
|
||||
[](https://packagecloud.io/golang-migrate/migrate?filter=debs)
|
||||
[](https://hub.docker.com/r/migrate/migrate/)
|
||||

|
||||
[](https://github.com/golang-migrate/migrate/releases)
|
||||
[](https://goreportcard.com/report/github.com/golang-migrate/migrate)
|
||||
|
||||
# migrate
|
||||
|
||||
__Database migrations written in Go. Use as [CLI](#cli-usage) or import as [library](#use-in-your-go-project).__
|
||||
|
||||
* Migrate reads migrations from [sources](#migration-sources)
|
||||
and applies them in correct order to a [database](#databases).
|
||||
* Drivers are "dumb", migrate glues everything together and makes sure the logic is bulletproof.
|
||||
(Keeps the drivers lightweight, too.)
|
||||
* Database drivers don't assume things or try to correct user input. When in doubt, fail.
|
||||
|
||||
Forked from [mattes/migrate](https://github.com/mattes/migrate)
|
||||
|
||||
## Databases
|
||||
|
||||
Database drivers run migrations. [Add a new database?](database/driver.go)
|
||||
|
||||
* [PostgreSQL](database/postgres)
|
||||
* [Redshift](database/redshift)
|
||||
* [Ql](database/ql)
|
||||
* [Cassandra](database/cassandra)
|
||||
* [SQLite](database/sqlite3) ([todo #165](https://github.com/mattes/migrate/issues/165))
|
||||
* [MySQL/ MariaDB](database/mysql)
|
||||
* [Neo4j](database/neo4j) ([todo #167](https://github.com/mattes/migrate/issues/167))
|
||||
* [MongoDB](database/mongodb)
|
||||
* [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170))
|
||||
* [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171))
|
||||
* [Google Cloud Spanner](database/spanner)
|
||||
* [CockroachDB](database/cockroachdb)
|
||||
* [ClickHouse](database/clickhouse)
|
||||
* [Firebird](database/firebird) ([todo #49](https://github.com/golang-migrate/migrate/issues/49))
|
||||
* [MS SQL Server](database/sqlserver)
|
||||
|
||||
### Database URLs
|
||||
|
||||
Database connection strings are specified via URLs. The URL format is driver dependent but generally has the form: `dbdriver://username:password@host:port/dbname?option1=true&option2=false`
|
||||
|
||||
Any [reserved URL characters](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_reserved_characters) need to be escaped. Note, the `%` character also [needs to be escaped](https://en.wikipedia.org/wiki/Percent-encoding#Percent-encoding_the_percent_character)
|
||||
|
||||
Explicitly, the following characters need to be escaped:
|
||||
`!`, `#`, `$`, `%`, `&`, `'`, `(`, `)`, `*`, `+`, `,`, `/`, `:`, `;`, `=`, `?`, `@`, `[`, `]`
|
||||
|
||||
It's easiest to always run the URL parts of your DB connection URL (e.g. username, password, etc) through an URL encoder. See the example Python snippets below:
|
||||
|
||||
```bash
|
||||
$ python3 -c 'import urllib.parse; print(urllib.parse.quote(input("String to encode: "), ""))'
|
||||
String to encode: FAKEpassword!#$%&'()*+,/:;=?@[]
|
||||
FAKEpassword%21%23%24%25%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D
|
||||
$ python2 -c 'import urllib; print urllib.quote(raw_input("String to encode: "), "")'
|
||||
String to encode: FAKEpassword!#$%&'()*+,/:;=?@[]
|
||||
FAKEpassword%21%23%24%25%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D
|
||||
$
|
||||
```
|
||||
|
||||
## Migration Sources
|
||||
|
||||
Source drivers read migrations from local or remote sources. [Add a new source?](source/driver.go)
|
||||
|
||||
* [Filesystem](source/file) - read from filesystem
|
||||
* [Go-Bindata](source/go_bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata))
|
||||
* [Github](source/github) - read from remote Github repositories
|
||||
* [Github Enterprise](source/github_ee) - read from remote Github Enterprise repositories
|
||||
* [Gitlab](source/gitlab) - read from remote Gitlab repositories
|
||||
* [AWS S3](source/aws_s3) - read from Amazon Web Services S3
|
||||
* [Google Cloud Storage](source/google_cloud_storage) - read from Google Cloud Platform Storage
|
||||
|
||||
## CLI usage
|
||||
|
||||
* Simple wrapper around this library.
|
||||
* Handles ctrl+c (SIGINT) gracefully.
|
||||
* No config search paths, no config files, no magic ENV var injections.
|
||||
|
||||
__[CLI Documentation](cmd/migrate)__
|
||||
|
||||
### Basic usage
|
||||
|
||||
```bash
|
||||
$ migrate -source file://path/to/migrations -database postgres://localhost:5432/database up 2
|
||||
```
|
||||
|
||||
### Docker usage
|
||||
|
||||
```bash
|
||||
$ docker run -v {{ migration dir }}:/migrations --network host migrate/migrate
|
||||
-path=/migrations/ -database postgres://localhost:5432/database up 2
|
||||
```
|
||||
|
||||
## Use in your Go project
|
||||
|
||||
* API is stable and frozen for this release (v3 & v4).
|
||||
* Uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies.
|
||||
* To help prevent database corruptions, it supports graceful stops via `GracefulStop chan bool`.
|
||||
* Bring your own logger.
|
||||
* Uses `io.Reader` streams internally for low memory overhead.
|
||||
* Thread-safe and no goroutine leaks.
|
||||
|
||||
__[Go Documentation](https://godoc.org/github.com/golang-migrate/migrate)__
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
_ "github.com/golang-migrate/migrate/v4/database/postgres"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/github"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m, err := migrate.New(
|
||||
"github://mattes:personal-access-token@mattes/migrate_test",
|
||||
"postgres://localhost:5432/database?sslmode=enable")
|
||||
m.Steps(2)
|
||||
}
|
||||
```
|
||||
|
||||
Want to use an existing database client?
|
||||
|
||||
```go
|
||||
import (
|
||||
"database/sql"
|
||||
_ "github.com/lib/pq"
|
||||
"github.com/golang-migrate/migrate/v4"
|
||||
"github.com/golang-migrate/migrate/v4/database/postgres"
|
||||
_ "github.com/golang-migrate/migrate/v4/source/file"
|
||||
)
|
||||
|
||||
func main() {
|
||||
db, err := sql.Open("postgres", "postgres://localhost:5432/database?sslmode=enable")
|
||||
driver, err := postgres.WithInstance(db, &postgres.Config{})
|
||||
m, err := migrate.NewWithDatabaseInstance(
|
||||
"file:///migrations",
|
||||
"postgres", driver)
|
||||
m.Steps(2)
|
||||
}
|
||||
```
|
||||
|
||||
## Getting started
|
||||
|
||||
Go to [getting started](GETTING_STARTED.md)
|
||||
|
||||
## Tutorials
|
||||
|
||||
- [PostgreSQL](database/postgres/TUTORIAL.md)
|
||||
|
||||
(more tutorials to come)
|
||||
|
||||
## Migration files
|
||||
|
||||
Each migration has an up and down migration. [Why?](FAQ.md#why-two-separate-files-up-and-down-for-a-migration)
|
||||
|
||||
```bash
|
||||
1481574547_create_users_table.up.sql
|
||||
1481574547_create_users_table.down.sql
|
||||
```
|
||||
|
||||
[Best practices: How to write migrations.](MIGRATIONS.md)
|
||||
|
||||
## Versions
|
||||
|
||||
Version | Supported? | Import | Notes
|
||||
--------|------------|--------|------
|
||||
**master** | :white_check_mark: | `import "github.com/golang-migrate/migrate/v4"` | New features and bug fixes arrive here first |
|
||||
**v4** | :white_check_mark: | `import "github.com/golang-migrate/migrate/v4"` | Used for stable releases |
|
||||
**v3** | :x: | `import "github.com/golang-migrate/migrate"` (with package manager) or `import "gopkg.in/golang-migrate/migrate.v3"` (not recommended) | **DO NOT USE** - No longer supported |
|
||||
|
||||
## Development and Contributing
|
||||
|
||||
Yes, please! [`Makefile`](Makefile) is your friend,
|
||||
read the [development guide](CONTRIBUTING.md).
|
||||
|
||||
Also have a look at the [FAQ](FAQ.md).
|
||||
|
||||
---
|
||||
|
||||
Looking for alternatives? [https://awesome-go.com/#database](https://awesome-go.com/#database).
|
||||
122
vendor/github.com/golang-migrate/migrate/v4/database/driver.go
generated
vendored
Normal file
122
vendor/github.com/golang-migrate/migrate/v4/database/driver.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
// Package database provides the Database interface.
|
||||
// All database drivers must implement this interface, register themselves,
|
||||
// optionally provide a `WithInstance` function and pass the tests
|
||||
// in package database/testing.
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
iurl "github.com/golang-migrate/migrate/v4/internal/url"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrLocked = fmt.Errorf("can't acquire lock")
|
||||
)
|
||||
|
||||
const NilVersion int = -1
|
||||
|
||||
var driversMu sync.RWMutex
|
||||
var drivers = make(map[string]Driver)
|
||||
|
||||
// Driver is the interface every database driver must implement.
|
||||
//
|
||||
// How to implement a database driver?
|
||||
// 1. Implement this interface.
|
||||
// 2. Optionally, add a function named `WithInstance`.
|
||||
// This function should accept an existing DB instance and a Config{} struct
|
||||
// and return a driver instance.
|
||||
// 3. Add a test that calls database/testing.go:Test()
|
||||
// 4. Add own tests for Open(), WithInstance() (when provided) and Close().
|
||||
// All other functions are tested by tests in database/testing.
|
||||
// Saves you some time and makes sure all database drivers behave the same way.
|
||||
// 5. Call Register in init().
|
||||
// 6. Create a migrate/cli/build_<driver-name>.go file
|
||||
// 7. Add driver name in 'DATABASE' variable in Makefile
|
||||
//
|
||||
// Guidelines:
|
||||
// * Don't try to correct user input. Don't assume things.
|
||||
// When in doubt, return an error and explain the situation to the user.
|
||||
// * All configuration input must come from the URL string in func Open()
|
||||
// or the Config{} struct in WithInstance. Don't os.Getenv().
|
||||
type Driver interface {
|
||||
// Open returns a new driver instance configured with parameters
|
||||
// coming from the URL string. Migrate will call this function
|
||||
// only once per instance.
|
||||
Open(url string) (Driver, error)
|
||||
|
||||
// Close closes the underlying database instance managed by the driver.
|
||||
// Migrate will call this function only once per instance.
|
||||
Close() error
|
||||
|
||||
// Lock should acquire a database lock so that only one migration process
|
||||
// can run at a time. Migrate will call this function before Run is called.
|
||||
// If the implementation can't provide this functionality, return nil.
|
||||
// Return database.ErrLocked if database is already locked.
|
||||
Lock() error
|
||||
|
||||
// Unlock should release the lock. Migrate will call this function after
|
||||
// all migrations have been run.
|
||||
Unlock() error
|
||||
|
||||
// Run applies a migration to the database. migration is garantueed to be not nil.
|
||||
Run(migration io.Reader) error
|
||||
|
||||
// SetVersion saves version and dirty state.
|
||||
// Migrate will call this function before and after each call to Run.
|
||||
// version must be >= -1. -1 means NilVersion.
|
||||
SetVersion(version int, dirty bool) error
|
||||
|
||||
// Version returns the currently active version and if the database is dirty.
|
||||
// When no migration has been applied, it must return version -1.
|
||||
// Dirty means, a previous migration failed and user interaction is required.
|
||||
Version() (version int, dirty bool, err error)
|
||||
|
||||
// Drop deletes everything in the database.
|
||||
// Note that this is a breaking action, a new call to Open() is necessary to
|
||||
// ensure subsequent calls work as expected.
|
||||
Drop() error
|
||||
}
|
||||
|
||||
// Open returns a new driver instance.
|
||||
func Open(url string) (Driver, error) {
|
||||
scheme, err := iurl.SchemeFromURL(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
driversMu.RLock()
|
||||
d, ok := drivers[scheme]
|
||||
driversMu.RUnlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("database driver: unknown driver %v (forgotten import?)", scheme)
|
||||
}
|
||||
|
||||
return d.Open(url)
|
||||
}
|
||||
|
||||
// Register globally registers a driver.
|
||||
func Register(name string, driver Driver) {
|
||||
driversMu.Lock()
|
||||
defer driversMu.Unlock()
|
||||
if driver == nil {
|
||||
panic("Register driver is nil")
|
||||
}
|
||||
if _, dup := drivers[name]; dup {
|
||||
panic("Register called twice for driver " + name)
|
||||
}
|
||||
drivers[name] = driver
|
||||
}
|
||||
|
||||
// List lists the registered drivers
|
||||
func List() []string {
|
||||
driversMu.RLock()
|
||||
defer driversMu.RUnlock()
|
||||
names := make([]string, 0, len(drivers))
|
||||
for n := range drivers {
|
||||
names = append(names, n)
|
||||
}
|
||||
return names
|
||||
}
|
||||
27
vendor/github.com/golang-migrate/migrate/v4/database/error.go
generated
vendored
Normal file
27
vendor/github.com/golang-migrate/migrate/v4/database/error.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error should be used for errors involving queries ran against the database
|
||||
type Error struct {
|
||||
// Optional: the line number
|
||||
Line uint
|
||||
|
||||
// Query is a query excerpt
|
||||
Query []byte
|
||||
|
||||
// Err is a useful/helping error message for humans
|
||||
Err string
|
||||
|
||||
// OrigErr is the underlying error
|
||||
OrigErr error
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
if len(e.Err) == 0 {
|
||||
return fmt.Sprintf("%v in line %v: %s", e.OrigErr, e.Line, e.Query)
|
||||
}
|
||||
return fmt.Sprintf("%v in line %v: %s (details: %v)", e.Err, e.Line, e.Query, e.OrigErr)
|
||||
}
|
||||
19
vendor/github.com/golang-migrate/migrate/v4/database/util.go
generated
vendored
Normal file
19
vendor/github.com/golang-migrate/migrate/v4/database/util.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const advisoryLockIDSalt uint = 1486364155
|
||||
|
||||
// GenerateAdvisoryLockId inspired by rails migrations, see https://goo.gl/8o9bCT
|
||||
func GenerateAdvisoryLockId(databaseName string, additionalNames ...string) (string, error) { // nolint: golint
|
||||
if len(additionalNames) > 0 {
|
||||
databaseName = strings.Join(append(additionalNames, databaseName), "\x00")
|
||||
}
|
||||
sum := crc32.ChecksumIEEE([]byte(databaseName))
|
||||
sum = sum * uint32(advisoryLockIDSalt)
|
||||
return fmt.Sprint(sum), nil
|
||||
}
|
||||
5
vendor/github.com/golang-migrate/migrate/v4/docker-deploy.sh
generated
vendored
Normal file
5
vendor/github.com/golang-migrate/migrate/v4/docker-deploy.sh
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin && \
|
||||
docker build --build-arg VERSION="$TRAVIS_TAG" . -t migrate/migrate -t migrate/migrate:"$TRAVIS_TAG" && \
|
||||
docker push migrate/migrate:"$TRAVIS_TAG" && docker push migrate/migrate
|
||||
56
vendor/github.com/golang-migrate/migrate/v4/go.mod
generated
vendored
Normal file
56
vendor/github.com/golang-migrate/migrate/v4/go.mod
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
module github.com/golang-migrate/migrate/v4
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.37.4
|
||||
github.com/aws/aws-sdk-go v1.17.7
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
||||
github.com/cockroachdb/apd v1.1.0 // indirect
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c
|
||||
github.com/containerd/containerd v1.2.7 // indirect
|
||||
github.com/cznic/ql v1.2.0
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3
|
||||
github.com/dhui/dktest v0.3.0
|
||||
github.com/docker/docker v0.7.3-0.20190817195342-4760db040282
|
||||
github.com/fsouza/fake-gcs-server v1.7.0
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4
|
||||
github.com/gogo/protobuf v1.2.1 // indirect
|
||||
github.com/golang/protobuf v1.3.1 // indirect
|
||||
github.com/golang/snappy v0.0.1 // indirect
|
||||
github.com/google/go-github v17.0.0+incompatible
|
||||
github.com/hashicorp/go-multierror v1.0.0
|
||||
github.com/hashicorp/golang-lru v0.5.1 // indirect
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect
|
||||
github.com/jackc/pgx v3.2.0+incompatible // indirect
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect
|
||||
github.com/kr/pretty v0.1.0 // indirect
|
||||
github.com/kshvakov/clickhouse v1.3.5
|
||||
github.com/lib/pq v1.0.0
|
||||
github.com/mattn/go-sqlite3 v1.10.0
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8
|
||||
github.com/pkg/errors v0.8.1 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect
|
||||
github.com/sirupsen/logrus v1.4.1 // indirect
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51 // indirect
|
||||
github.com/xanzy/go-gitlab v0.15.0
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c // indirect
|
||||
github.com/xdg/stringprep v1.0.0 // indirect
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b // indirect
|
||||
go.mongodb.org/mongo-driver v1.1.0
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 // indirect
|
||||
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a // indirect
|
||||
golang.org/x/sys v0.0.0-20190426135247-a129542de9ae // indirect
|
||||
golang.org/x/text v0.3.2 // indirect
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
|
||||
golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a
|
||||
google.golang.org/api v0.4.0
|
||||
google.golang.org/appengine v1.5.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb
|
||||
google.golang.org/grpc v1.20.1 // indirect
|
||||
)
|
||||
308
vendor/github.com/golang-migrate/migrate/v4/go.sum
generated
vendored
Normal file
308
vendor/github.com/golang-migrate/migrate/v4/go.sum
generated
vendored
Normal file
@@ -0,0 +1,308 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
|
||||
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q=
|
||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw=
|
||||
github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/aws/aws-sdk-go v1.17.7 h1:/4+rDPe0W95KBmNGYCG+NUvdL8ssPYBMxL+aSCg6nIA=
|
||||
github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=
|
||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0=
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
|
||||
github.com/containerd/containerd v1.2.7 h1:8lqLbl7u1j3MmiL9cJ/O275crSq7bfwUayvvatEupQk=
|
||||
github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07 h1:UHFGPvSxX4C4YBApSPvmUfL8tTvWLj2ryqvT9K4Jcuk=
|
||||
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8=
|
||||
github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f h1:7uSNgsgcarNk4oiN/nNkO0J7KAjlsF5Yv5Gf/tFdHas=
|
||||
github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg=
|
||||
github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4 h1:CVAqftqbj+exlab+8KJQrE+kNIVlQfJt58j4GxCMF1s=
|
||||
github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
|
||||
github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00 h1:FHpbUtp2K8X53/b4aFNj4my5n+i3x+CQCZWNuHWH/+E=
|
||||
github.com/cznic/internal v0.0.0-20180608152220-f44710a21d00/go.mod h1:olo7eAdKwJdXxb55TKGLiJ6xt1H0/tiiRCWKVLmtjY4=
|
||||
github.com/cznic/lldb v1.1.0 h1:AIA+ham6TSJ+XkMe8imQ/g8KPzMUVWAwqUQQdtuMsHs=
|
||||
github.com/cznic/lldb v1.1.0/go.mod h1:FIZVUmYUVhPwRiPzL8nD/mpFcJ/G7SSXjjXYG4uRI3A=
|
||||
github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369 h1:XNT/Zf5l++1Pyg08/HV04ppB0gKxAqtZQBRYiYrUuYk=
|
||||
github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
|
||||
github.com/cznic/ql v1.2.0 h1:lcKp95ZtdF0XkWhGnVIXGF8dVD2X+ClS08tglKtf+ak=
|
||||
github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE=
|
||||
github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65 h1:hxuZop6tSoOi0sxFzoGGYdRqNrPubyaIf9KoBG9tPiE=
|
||||
github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
|
||||
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186 h1:0rkFMAbn5KBKNpJyHQ6Prb95vIKanmAe62KxsrN+sqA=
|
||||
github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc=
|
||||
github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc h1:YKKpTb2BrXN2GYyGaygIdis1vXbE7SSAG9axGWIMClg=
|
||||
github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3 h1:tkum0XDgfR0jcVVXuTsYv/erY2NnEDqwRojbxR1rBYA=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
|
||||
github.com/dhui/dktest v0.3.0 h1:kwX5a7EkLcjo7VpsPQSYJcKGbXBXdjI9FGjuUj1jn6I=
|
||||
github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc=
|
||||
github.com/docker/distribution v2.7.0+incompatible h1:neUDAlf3wX6Ml4HdqTrbcOHXtfRN0TFIwt6YFL7N9RU=
|
||||
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190817195342-4760db040282 h1:mzrx39dGtGq0VEnTHjnakmczd4uFbhx2cZU3BJDsLdc=
|
||||
github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 h1:aaQcKT9WumO6JEJcRyTqFVq4XUZiUcKR2/GI31TOcz8=
|
||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsouza/fake-gcs-server v1.7.0 h1:Un0BXUXrRWYSmYyC1Rqm2e2WJfTPyDy/HGMz31emTi8=
|
||||
github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4 h1:vF83LI8tAakwEwvWZtrIEx7pOySacl2TOxx6eXk4ePo=
|
||||
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.1 h1:Dw4jY2nghMMRsh1ol8dv1axHkDwMQK2DHerMNJsIpJU=
|
||||
github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
|
||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc=
|
||||
github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ=
|
||||
github.com/jackc/pgx v3.2.0+incompatible h1:0Vihzu20St42/UDsvZGdNE6jak7oi/UOeMzwMPHkgFY=
|
||||
github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kshvakov/clickhouse v1.3.5 h1:PDTYk9VYgbjPAWry3AoDREeMgOVUFij6bh6IjlloHL0=
|
||||
github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE=
|
||||
github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
|
||||
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8 h1:P48LjvUQpTReR3TQRbxSeSBsMXzfK0uol7eRcr7VBYQ=
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51 h1:BP2bjP495BBPaBcS5rmqviTfrOkN5rO5ceKAMRZCRFc=
|
||||
github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/xanzy/go-gitlab v0.15.0 h1:rWtwKTgEnXyNUGrOArN7yyc3THRkpYcKXIXia9abywQ=
|
||||
github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b h1:7gd+rd8P3bqcn/96gOZa3F5dpJr/vEiDQYlNb/y2uNs=
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
|
||||
go.mongodb.org/mongo-driver v1.1.0 h1:aeOqSrhl9eDRAap/3T5pCfMBEBxZ0vuXBP+RMtp2KX8=
|
||||
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 h1:p/H982KKEjUnLJkM3tt/LemDnOc1GiZL5FCVlORJ5zo=
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6 h1:FP8hkuE6yUEaJnK7O2eTuejKWwW+Rhfj80dQ2JcKxCU=
|
||||
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190102155601-82a175fd1598/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190426135247-a129542de9ae h1:mQLHiymj/JXKnnjc62tb7nD5pZLs940/sXJu+Xp3DBA=
|
||||
golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a h1:jd4PGQGmrzmDZANUzIol3eClsCB/Jp5GmpGWMhi6hnY=
|
||||
golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.3.2 h1:iTp+3yyl/KOtxa/d1/JUE0GGSoR6FuW5udver22iwpw=
|
||||
google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0 h1:KKgc1aqhV8wDPbDzlDtpvyjZFY3vjz85FP7p4wcQUyI=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo=
|
||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
25
vendor/github.com/golang-migrate/migrate/v4/internal/url/url.go
generated
vendored
Normal file
25
vendor/github.com/golang-migrate/migrate/v4/internal/url/url.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package url
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var errNoScheme = errors.New("no scheme")
|
||||
var errEmptyURL = errors.New("URL cannot be empty")
|
||||
|
||||
// schemeFromURL returns the scheme from a URL string
|
||||
func SchemeFromURL(url string) (string, error) {
|
||||
if url == "" {
|
||||
return "", errEmptyURL
|
||||
}
|
||||
|
||||
i := strings.Index(url, ":")
|
||||
|
||||
// No : or : is the first character.
|
||||
if i < 1 {
|
||||
return "", errNoScheme
|
||||
}
|
||||
|
||||
return url[0:i], nil
|
||||
}
|
||||
12
vendor/github.com/golang-migrate/migrate/v4/log.go
generated
vendored
Normal file
12
vendor/github.com/golang-migrate/migrate/v4/log.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
package migrate
|
||||
|
||||
// Logger is an interface so you can pass in your own
|
||||
// logging implementation.
|
||||
type Logger interface {
|
||||
|
||||
// Printf is like fmt.Printf
|
||||
Printf(format string, v ...interface{})
|
||||
|
||||
// Verbose should return true when verbose logging output is wanted
|
||||
Verbose() bool
|
||||
}
|
||||
980
vendor/github.com/golang-migrate/migrate/v4/migrate.go
generated
vendored
Normal file
980
vendor/github.com/golang-migrate/migrate/v4/migrate.go
generated
vendored
Normal file
@@ -0,0 +1,980 @@
|
||||
// Package migrate reads migrations from sources and runs them against databases.
|
||||
// Sources are defined by the `source.Driver` and databases by the `database.Driver`
|
||||
// interface. The driver interfaces are kept "dump", all migration logic is kept
|
||||
// in this package.
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4/database"
|
||||
iurl "github.com/golang-migrate/migrate/v4/internal/url"
|
||||
"github.com/golang-migrate/migrate/v4/source"
|
||||
)
|
||||
|
||||
// DefaultPrefetchMigrations sets the number of migrations to pre-read
|
||||
// from the source. This is helpful if the source is remote, but has little
|
||||
// effect for a local source (i.e. file system).
|
||||
// Please note that this setting has a major impact on the memory usage,
|
||||
// since each pre-read migration is buffered in memory. See DefaultBufferSize.
|
||||
var DefaultPrefetchMigrations = uint(10)
|
||||
|
||||
// DefaultLockTimeout sets the max time a database driver has to acquire a lock.
|
||||
var DefaultLockTimeout = 15 * time.Second
|
||||
|
||||
var (
|
||||
ErrNoChange = errors.New("no change")
|
||||
ErrNilVersion = errors.New("no migration")
|
||||
ErrInvalidVersion = errors.New("version must be >= -1")
|
||||
ErrLocked = errors.New("database locked")
|
||||
ErrLockTimeout = errors.New("timeout: can't acquire database lock")
|
||||
)
|
||||
|
||||
// ErrShortLimit is an error returned when not enough migrations
|
||||
// can be returned by a source for a given limit.
|
||||
type ErrShortLimit struct {
|
||||
Short uint
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e ErrShortLimit) Error() string {
|
||||
return fmt.Sprintf("limit %v short", e.Short)
|
||||
}
|
||||
|
||||
type ErrDirty struct {
|
||||
Version int
|
||||
}
|
||||
|
||||
func (e ErrDirty) Error() string {
|
||||
return fmt.Sprintf("Dirty database version %v. Fix and force version.", e.Version)
|
||||
}
|
||||
|
||||
type Migrate struct {
|
||||
sourceName string
|
||||
sourceDrv source.Driver
|
||||
databaseName string
|
||||
databaseDrv database.Driver
|
||||
|
||||
// Log accepts a Logger interface
|
||||
Log Logger
|
||||
|
||||
// GracefulStop accepts `true` and will stop executing migrations
|
||||
// as soon as possible at a safe break point, so that the database
|
||||
// is not corrupted.
|
||||
GracefulStop chan bool
|
||||
isLockedMu *sync.Mutex
|
||||
|
||||
isGracefulStop bool
|
||||
isLocked bool
|
||||
|
||||
// PrefetchMigrations defaults to DefaultPrefetchMigrations,
|
||||
// but can be set per Migrate instance.
|
||||
PrefetchMigrations uint
|
||||
|
||||
// LockTimeout defaults to DefaultLockTimeout,
|
||||
// but can be set per Migrate instance.
|
||||
LockTimeout time.Duration
|
||||
}
|
||||
|
||||
// New returns a new Migrate instance from a source URL and a database URL.
|
||||
// The URL scheme is defined by each driver.
|
||||
func New(sourceURL, databaseURL string) (*Migrate, error) {
|
||||
m := newCommon()
|
||||
|
||||
sourceName, err := iurl.SchemeFromURL(sourceURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.sourceName = sourceName
|
||||
|
||||
databaseName, err := iurl.SchemeFromURL(databaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.databaseName = databaseName
|
||||
|
||||
sourceDrv, err := source.Open(sourceURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.sourceDrv = sourceDrv
|
||||
|
||||
databaseDrv, err := database.Open(databaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.databaseDrv = databaseDrv
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// NewWithDatabaseInstance returns a new Migrate instance from a source URL
|
||||
// and an existing database instance. The source URL scheme is defined by each driver.
|
||||
// Use any string that can serve as an identifier during logging as databaseName.
|
||||
// You are responsible for closing the underlying database client if necessary.
|
||||
func NewWithDatabaseInstance(sourceURL string, databaseName string, databaseInstance database.Driver) (*Migrate, error) {
|
||||
m := newCommon()
|
||||
|
||||
sourceName, err := iurl.SchemeFromURL(sourceURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.sourceName = sourceName
|
||||
|
||||
m.databaseName = databaseName
|
||||
|
||||
sourceDrv, err := source.Open(sourceURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.sourceDrv = sourceDrv
|
||||
|
||||
m.databaseDrv = databaseInstance
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// NewWithSourceInstance returns a new Migrate instance from an existing source instance
|
||||
// and a database URL. The database URL scheme is defined by each driver.
|
||||
// Use any string that can serve as an identifier during logging as sourceName.
|
||||
// You are responsible for closing the underlying source client if necessary.
|
||||
func NewWithSourceInstance(sourceName string, sourceInstance source.Driver, databaseURL string) (*Migrate, error) {
|
||||
m := newCommon()
|
||||
|
||||
databaseName, err := iurl.SchemeFromURL(databaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.databaseName = databaseName
|
||||
|
||||
m.sourceName = sourceName
|
||||
|
||||
databaseDrv, err := database.Open(databaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.databaseDrv = databaseDrv
|
||||
|
||||
m.sourceDrv = sourceInstance
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// NewWithInstance returns a new Migrate instance from an existing source and
|
||||
// database instance. Use any string that can serve as an identifier during logging
|
||||
// as sourceName and databaseName. You are responsible for closing down
|
||||
// the underlying source and database client if necessary.
|
||||
func NewWithInstance(sourceName string, sourceInstance source.Driver, databaseName string, databaseInstance database.Driver) (*Migrate, error) {
|
||||
m := newCommon()
|
||||
|
||||
m.sourceName = sourceName
|
||||
m.databaseName = databaseName
|
||||
|
||||
m.sourceDrv = sourceInstance
|
||||
m.databaseDrv = databaseInstance
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func newCommon() *Migrate {
|
||||
return &Migrate{
|
||||
GracefulStop: make(chan bool, 1),
|
||||
PrefetchMigrations: DefaultPrefetchMigrations,
|
||||
LockTimeout: DefaultLockTimeout,
|
||||
isLockedMu: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the source and the database.
|
||||
func (m *Migrate) Close() (source error, database error) {
|
||||
databaseSrvClose := make(chan error)
|
||||
sourceSrvClose := make(chan error)
|
||||
|
||||
m.logVerbosePrintf("Closing source and database\n")
|
||||
|
||||
go func() {
|
||||
databaseSrvClose <- m.databaseDrv.Close()
|
||||
}()
|
||||
|
||||
go func() {
|
||||
sourceSrvClose <- m.sourceDrv.Close()
|
||||
}()
|
||||
|
||||
return <-sourceSrvClose, <-databaseSrvClose
|
||||
}
|
||||
|
||||
// Migrate looks at the currently active migration version,
|
||||
// then migrates either up or down to the specified version.
|
||||
func (m *Migrate) Migrate(version uint) error {
|
||||
if err := m.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
curVersion, dirty, err := m.databaseDrv.Version()
|
||||
if err != nil {
|
||||
return m.unlockErr(err)
|
||||
}
|
||||
|
||||
if dirty {
|
||||
return m.unlockErr(ErrDirty{curVersion})
|
||||
}
|
||||
|
||||
ret := make(chan interface{}, m.PrefetchMigrations)
|
||||
go m.read(curVersion, int(version), ret)
|
||||
|
||||
return m.unlockErr(m.runMigrations(ret))
|
||||
}
|
||||
|
||||
// Steps looks at the currently active migration version.
|
||||
// It will migrate up if n > 0, and down if n < 0.
|
||||
func (m *Migrate) Steps(n int) error {
|
||||
if n == 0 {
|
||||
return ErrNoChange
|
||||
}
|
||||
|
||||
if err := m.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
curVersion, dirty, err := m.databaseDrv.Version()
|
||||
if err != nil {
|
||||
return m.unlockErr(err)
|
||||
}
|
||||
|
||||
if dirty {
|
||||
return m.unlockErr(ErrDirty{curVersion})
|
||||
}
|
||||
|
||||
ret := make(chan interface{}, m.PrefetchMigrations)
|
||||
|
||||
if n > 0 {
|
||||
go m.readUp(curVersion, n, ret)
|
||||
} else {
|
||||
go m.readDown(curVersion, -n, ret)
|
||||
}
|
||||
|
||||
return m.unlockErr(m.runMigrations(ret))
|
||||
}
|
||||
|
||||
// Up looks at the currently active migration version
|
||||
// and will migrate all the way up (applying all up migrations).
|
||||
func (m *Migrate) Up() error {
|
||||
if err := m.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
curVersion, dirty, err := m.databaseDrv.Version()
|
||||
if err != nil {
|
||||
return m.unlockErr(err)
|
||||
}
|
||||
|
||||
if dirty {
|
||||
return m.unlockErr(ErrDirty{curVersion})
|
||||
}
|
||||
|
||||
ret := make(chan interface{}, m.PrefetchMigrations)
|
||||
|
||||
go m.readUp(curVersion, -1, ret)
|
||||
return m.unlockErr(m.runMigrations(ret))
|
||||
}
|
||||
|
||||
// Down looks at the currently active migration version
|
||||
// and will migrate all the way down (applying all down migrations).
|
||||
func (m *Migrate) Down() error {
|
||||
if err := m.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
curVersion, dirty, err := m.databaseDrv.Version()
|
||||
if err != nil {
|
||||
return m.unlockErr(err)
|
||||
}
|
||||
|
||||
if dirty {
|
||||
return m.unlockErr(ErrDirty{curVersion})
|
||||
}
|
||||
|
||||
ret := make(chan interface{}, m.PrefetchMigrations)
|
||||
go m.readDown(curVersion, -1, ret)
|
||||
return m.unlockErr(m.runMigrations(ret))
|
||||
}
|
||||
|
||||
// Drop deletes everything in the database.
|
||||
func (m *Migrate) Drop() error {
|
||||
if err := m.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := m.databaseDrv.Drop(); err != nil {
|
||||
return m.unlockErr(err)
|
||||
}
|
||||
return m.unlock()
|
||||
}
|
||||
|
||||
// Run runs any migration provided by you against the database.
|
||||
// It does not check any currently active version in database.
|
||||
// Usually you don't need this function at all. Use Migrate,
|
||||
// Steps, Up or Down instead.
|
||||
func (m *Migrate) Run(migration ...*Migration) error {
|
||||
if len(migration) == 0 {
|
||||
return ErrNoChange
|
||||
}
|
||||
|
||||
if err := m.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
curVersion, dirty, err := m.databaseDrv.Version()
|
||||
if err != nil {
|
||||
return m.unlockErr(err)
|
||||
}
|
||||
|
||||
if dirty {
|
||||
return m.unlockErr(ErrDirty{curVersion})
|
||||
}
|
||||
|
||||
ret := make(chan interface{}, m.PrefetchMigrations)
|
||||
|
||||
go func() {
|
||||
defer close(ret)
|
||||
for _, migr := range migration {
|
||||
if m.PrefetchMigrations > 0 && migr.Body != nil {
|
||||
m.logVerbosePrintf("Start buffering %v\n", migr.LogString())
|
||||
} else {
|
||||
m.logVerbosePrintf("Scheduled %v\n", migr.LogString())
|
||||
}
|
||||
|
||||
ret <- migr
|
||||
go func(migr *Migration) {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}(migr)
|
||||
}
|
||||
}()
|
||||
|
||||
return m.unlockErr(m.runMigrations(ret))
|
||||
}
|
||||
|
||||
// Force sets a migration version.
|
||||
// It does not check any currently active version in database.
|
||||
// It resets the dirty state to false.
|
||||
func (m *Migrate) Force(version int) error {
|
||||
if version < -1 {
|
||||
return ErrInvalidVersion
|
||||
}
|
||||
|
||||
if err := m.lock(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := m.databaseDrv.SetVersion(version, false); err != nil {
|
||||
return m.unlockErr(err)
|
||||
}
|
||||
|
||||
return m.unlock()
|
||||
}
|
||||
|
||||
// Version returns the currently active migration version.
|
||||
// If no migration has been applied, yet, it will return ErrNilVersion.
|
||||
func (m *Migrate) Version() (version uint, dirty bool, err error) {
|
||||
v, d, err := m.databaseDrv.Version()
|
||||
if err != nil {
|
||||
return 0, false, err
|
||||
}
|
||||
|
||||
if v == database.NilVersion {
|
||||
return 0, false, ErrNilVersion
|
||||
}
|
||||
|
||||
return suint(v), d, nil
|
||||
}
|
||||
|
||||
// read reads either up or down migrations from source `from` to `to`.
|
||||
// Each migration is then written to the ret channel.
|
||||
// If an error occurs during reading, that error is written to the ret channel, too.
|
||||
// Once read is done reading it will close the ret channel.
|
||||
func (m *Migrate) read(from int, to int, ret chan<- interface{}) {
|
||||
defer close(ret)
|
||||
|
||||
// check if from version exists
|
||||
if from >= 0 {
|
||||
if err := m.versionExists(suint(from)); err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// check if to version exists
|
||||
if to >= 0 {
|
||||
if err := m.versionExists(suint(to)); err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// no change?
|
||||
if from == to {
|
||||
ret <- ErrNoChange
|
||||
return
|
||||
}
|
||||
|
||||
if from < to {
|
||||
// it's going up
|
||||
// apply first migration if from is nil version
|
||||
if from == -1 {
|
||||
firstVersion, err := m.sourceDrv.First()
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
migr, err := m.newMigration(firstVersion, int(firstVersion))
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
|
||||
from = int(firstVersion)
|
||||
}
|
||||
|
||||
// run until we reach target ...
|
||||
for from < to {
|
||||
if m.stop() {
|
||||
return
|
||||
}
|
||||
|
||||
next, err := m.sourceDrv.Next(suint(from))
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
migr, err := m.newMigration(next, int(next))
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
|
||||
from = int(next)
|
||||
}
|
||||
|
||||
} else {
|
||||
// it's going down
|
||||
// run until we reach target ...
|
||||
for from > to && from >= 0 {
|
||||
if m.stop() {
|
||||
return
|
||||
}
|
||||
|
||||
prev, err := m.sourceDrv.Prev(suint(from))
|
||||
if os.IsNotExist(err) && to == -1 {
|
||||
// apply nil migration
|
||||
migr, err := m.newMigration(suint(from), -1)
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
|
||||
return
|
||||
|
||||
} else if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
migr, err := m.newMigration(suint(from), int(prev))
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
|
||||
from = int(prev)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// readUp reads up migrations from `from` limitted by `limit`.
|
||||
// limit can be -1, implying no limit and reading until there are no more migrations.
|
||||
// Each migration is then written to the ret channel.
|
||||
// If an error occurs during reading, that error is written to the ret channel, too.
|
||||
// Once readUp is done reading it will close the ret channel.
|
||||
func (m *Migrate) readUp(from int, limit int, ret chan<- interface{}) {
|
||||
defer close(ret)
|
||||
|
||||
// check if from version exists
|
||||
if from >= 0 {
|
||||
if err := m.versionExists(suint(from)); err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
ret <- ErrNoChange
|
||||
return
|
||||
}
|
||||
|
||||
count := 0
|
||||
for count < limit || limit == -1 {
|
||||
if m.stop() {
|
||||
return
|
||||
}
|
||||
|
||||
// apply first migration if from is nil version
|
||||
if from == -1 {
|
||||
firstVersion, err := m.sourceDrv.First()
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
migr, err := m.newMigration(firstVersion, int(firstVersion))
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
from = int(firstVersion)
|
||||
count++
|
||||
continue
|
||||
}
|
||||
|
||||
// apply next migration
|
||||
next, err := m.sourceDrv.Next(suint(from))
|
||||
if os.IsNotExist(err) {
|
||||
// no limit, but no migrations applied?
|
||||
if limit == -1 && count == 0 {
|
||||
ret <- ErrNoChange
|
||||
return
|
||||
}
|
||||
|
||||
// no limit, reached end
|
||||
if limit == -1 {
|
||||
return
|
||||
}
|
||||
|
||||
// reached end, and didn't apply any migrations
|
||||
if limit > 0 && count == 0 {
|
||||
ret <- os.ErrNotExist
|
||||
return
|
||||
}
|
||||
|
||||
// applied less migrations than limit?
|
||||
if count < limit {
|
||||
ret <- ErrShortLimit{suint(limit - count)}
|
||||
return
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
migr, err := m.newMigration(next, int(next))
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
from = int(next)
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
// readDown reads down migrations from `from` limitted by `limit`.
|
||||
// limit can be -1, implying no limit and reading until there are no more migrations.
|
||||
// Each migration is then written to the ret channel.
|
||||
// If an error occurs during reading, that error is written to the ret channel, too.
|
||||
// Once readDown is done reading it will close the ret channel.
|
||||
func (m *Migrate) readDown(from int, limit int, ret chan<- interface{}) {
|
||||
defer close(ret)
|
||||
|
||||
// check if from version exists
|
||||
if from >= 0 {
|
||||
if err := m.versionExists(suint(from)); err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if limit == 0 {
|
||||
ret <- ErrNoChange
|
||||
return
|
||||
}
|
||||
|
||||
// no change if already at nil version
|
||||
if from == -1 && limit == -1 {
|
||||
ret <- ErrNoChange
|
||||
return
|
||||
}
|
||||
|
||||
// can't go over limit if already at nil version
|
||||
if from == -1 && limit > 0 {
|
||||
ret <- os.ErrNotExist
|
||||
return
|
||||
}
|
||||
|
||||
count := 0
|
||||
for count < limit || limit == -1 {
|
||||
if m.stop() {
|
||||
return
|
||||
}
|
||||
|
||||
prev, err := m.sourceDrv.Prev(suint(from))
|
||||
if os.IsNotExist(err) {
|
||||
// no limit or haven't reached limit, apply "first" migration
|
||||
if limit == -1 || limit-count > 0 {
|
||||
firstVersion, err := m.sourceDrv.First()
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
migr, err := m.newMigration(firstVersion, -1)
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
count++
|
||||
}
|
||||
|
||||
if count < limit {
|
||||
ret <- ErrShortLimit{suint(limit - count)}
|
||||
}
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
migr, err := m.newMigration(suint(from), int(prev))
|
||||
if err != nil {
|
||||
ret <- err
|
||||
return
|
||||
}
|
||||
|
||||
ret <- migr
|
||||
go func() {
|
||||
if err := migr.Buffer(); err != nil {
|
||||
m.logErr(err)
|
||||
}
|
||||
}()
|
||||
from = int(prev)
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
// runMigrations reads *Migration and error from a channel. Any other type
|
||||
// sent on this channel will result in a panic. Each migration is then
|
||||
// proxied to the database driver and run against the database.
|
||||
// Before running a newly received migration it will check if it's supposed
|
||||
// to stop execution because it might have received a stop signal on the
|
||||
// GracefulStop channel.
|
||||
func (m *Migrate) runMigrations(ret <-chan interface{}) error {
|
||||
for r := range ret {
|
||||
|
||||
if m.stop() {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch r := r.(type) {
|
||||
case error:
|
||||
return r
|
||||
|
||||
case *Migration:
|
||||
migr := r
|
||||
|
||||
// set version with dirty state
|
||||
if err := m.databaseDrv.SetVersion(migr.TargetVersion, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if migr.Body != nil {
|
||||
m.logVerbosePrintf("Read and execute %v\n", migr.LogString())
|
||||
if err := m.databaseDrv.Run(migr.BufferedBody); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// set clean state
|
||||
if err := m.databaseDrv.SetVersion(migr.TargetVersion, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
endTime := time.Now()
|
||||
readTime := migr.FinishedReading.Sub(migr.StartedBuffering)
|
||||
runTime := endTime.Sub(migr.FinishedReading)
|
||||
|
||||
// log either verbose or normal
|
||||
if m.Log != nil {
|
||||
if m.Log.Verbose() {
|
||||
m.logPrintf("Finished %v (read %v, ran %v)\n", migr.LogString(), readTime, runTime)
|
||||
} else {
|
||||
m.logPrintf("%v (%v)\n", migr.LogString(), readTime+runTime)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown type: %T with value: %+v", r, r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// versionExists checks the source if either the up or down migration for
|
||||
// the specified migration version exists.
|
||||
func (m *Migrate) versionExists(version uint) (result error) {
|
||||
// try up migration first
|
||||
up, _, err := m.sourceDrv.ReadUp(version)
|
||||
if err == nil {
|
||||
defer func() {
|
||||
if errClose := up.Close(); errClose != nil {
|
||||
result = multierror.Append(result, errClose)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if os.IsExist(err) {
|
||||
return nil
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
// then try down migration
|
||||
down, _, err := m.sourceDrv.ReadDown(version)
|
||||
if err == nil {
|
||||
defer func() {
|
||||
if errClose := down.Close(); errClose != nil {
|
||||
result = multierror.Append(result, errClose)
|
||||
}
|
||||
}()
|
||||
}
|
||||
if os.IsExist(err) {
|
||||
return nil
|
||||
} else if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
m.logErr(fmt.Errorf("no migration found for version %d", version))
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
// stop returns true if no more migrations should be run against the database
|
||||
// because a stop signal was received on the GracefulStop channel.
|
||||
// Calls are cheap and this function is not blocking.
|
||||
func (m *Migrate) stop() bool {
|
||||
if m.isGracefulStop {
|
||||
return true
|
||||
}
|
||||
|
||||
select {
|
||||
case <-m.GracefulStop:
|
||||
m.isGracefulStop = true
|
||||
return true
|
||||
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// newMigration is a helper func that returns a *Migration for the
|
||||
// specified version and targetVersion.
|
||||
func (m *Migrate) newMigration(version uint, targetVersion int) (*Migration, error) {
|
||||
var migr *Migration
|
||||
|
||||
if targetVersion >= int(version) {
|
||||
r, identifier, err := m.sourceDrv.ReadUp(version)
|
||||
if os.IsNotExist(err) {
|
||||
// create "empty" migration
|
||||
migr, err = NewMigration(nil, "", version, targetVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
|
||||
} else {
|
||||
// create migration from up source
|
||||
migr, err = NewMigration(r, identifier, version, targetVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
r, identifier, err := m.sourceDrv.ReadDown(version)
|
||||
if os.IsNotExist(err) {
|
||||
// create "empty" migration
|
||||
migr, err = NewMigration(nil, "", version, targetVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
|
||||
} else {
|
||||
// create migration from down source
|
||||
migr, err = NewMigration(r, identifier, version, targetVersion)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if m.PrefetchMigrations > 0 && migr.Body != nil {
|
||||
m.logVerbosePrintf("Start buffering %v\n", migr.LogString())
|
||||
} else {
|
||||
m.logVerbosePrintf("Scheduled %v\n", migr.LogString())
|
||||
}
|
||||
|
||||
return migr, nil
|
||||
}
|
||||
|
||||
// lock is a thread safe helper function to lock the database.
|
||||
// It should be called as late as possible when running migrations.
|
||||
func (m *Migrate) lock() error {
|
||||
m.isLockedMu.Lock()
|
||||
defer m.isLockedMu.Unlock()
|
||||
|
||||
if m.isLocked {
|
||||
return ErrLocked
|
||||
}
|
||||
|
||||
// create done channel, used in the timeout goroutine
|
||||
done := make(chan bool, 1)
|
||||
defer func() {
|
||||
done <- true
|
||||
}()
|
||||
|
||||
// use errchan to signal error back to this context
|
||||
errchan := make(chan error, 2)
|
||||
|
||||
// start timeout goroutine
|
||||
timeout := time.After(m.LockTimeout)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-done:
|
||||
return
|
||||
case <-timeout:
|
||||
errchan <- ErrLockTimeout
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// now try to acquire the lock
|
||||
go func() {
|
||||
if err := m.databaseDrv.Lock(); err != nil {
|
||||
errchan <- err
|
||||
} else {
|
||||
errchan <- nil
|
||||
}
|
||||
}()
|
||||
|
||||
// wait until we either receive ErrLockTimeout or error from Lock operation
|
||||
err := <-errchan
|
||||
if err == nil {
|
||||
m.isLocked = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// unlock is a thread safe helper function to unlock the database.
|
||||
// It should be called as early as possible when no more migrations are
|
||||
// expected to be executed.
|
||||
func (m *Migrate) unlock() error {
|
||||
m.isLockedMu.Lock()
|
||||
defer m.isLockedMu.Unlock()
|
||||
|
||||
if err := m.databaseDrv.Unlock(); err != nil {
|
||||
// BUG: Can potentially create a deadlock. Add a timeout.
|
||||
return err
|
||||
}
|
||||
|
||||
m.isLocked = false
|
||||
return nil
|
||||
}
|
||||
|
||||
// unlockErr calls unlock and returns a combined error
|
||||
// if a prevErr is not nil.
|
||||
func (m *Migrate) unlockErr(prevErr error) error {
|
||||
if err := m.unlock(); err != nil {
|
||||
return multierror.Append(prevErr, err)
|
||||
}
|
||||
return prevErr
|
||||
}
|
||||
|
||||
// logPrintf writes to m.Log if not nil
|
||||
func (m *Migrate) logPrintf(format string, v ...interface{}) {
|
||||
if m.Log != nil {
|
||||
m.Log.Printf(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// logVerbosePrintf writes to m.Log if not nil. Use for verbose logging output.
|
||||
func (m *Migrate) logVerbosePrintf(format string, v ...interface{}) {
|
||||
if m.Log != nil && m.Log.Verbose() {
|
||||
m.Log.Printf(format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
// logErr writes error to m.Log if not nil
|
||||
func (m *Migrate) logErr(err error) {
|
||||
if m.Log != nil {
|
||||
m.Log.Printf("error: %v", err)
|
||||
}
|
||||
}
|
||||
160
vendor/github.com/golang-migrate/migrate/v4/migration.go
generated
vendored
Normal file
160
vendor/github.com/golang-migrate/migrate/v4/migration.go
generated
vendored
Normal file
@@ -0,0 +1,160 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultBufferSize sets the in memory buffer size (in Bytes) for every
|
||||
// pre-read migration (see DefaultPrefetchMigrations).
|
||||
var DefaultBufferSize = uint(100000)
|
||||
|
||||
// Migration holds information about a migration.
|
||||
// It is initially created from data coming from the source and then
|
||||
// used when run against the database.
|
||||
type Migration struct {
|
||||
// Identifier can be any string to help identifying
|
||||
// the migration in the source.
|
||||
Identifier string
|
||||
|
||||
// Version is the version of this migration.
|
||||
Version uint
|
||||
|
||||
// TargetVersion is the migration version after this migration
|
||||
// has been applied to the database.
|
||||
// Can be -1, implying that this is a NilVersion.
|
||||
TargetVersion int
|
||||
|
||||
// Body holds an io.ReadCloser to the source.
|
||||
Body io.ReadCloser
|
||||
|
||||
// BufferedBody holds an buffered io.Reader to the underlying Body.
|
||||
BufferedBody io.Reader
|
||||
|
||||
// BufferSize defaults to DefaultBufferSize
|
||||
BufferSize uint
|
||||
|
||||
// bufferWriter holds an io.WriteCloser and pipes to BufferBody.
|
||||
// It's an *Closer for flow control.
|
||||
bufferWriter io.WriteCloser
|
||||
|
||||
// Scheduled is the time when the migration was scheduled/ queued.
|
||||
Scheduled time.Time
|
||||
|
||||
// StartedBuffering is the time when buffering of the migration source started.
|
||||
StartedBuffering time.Time
|
||||
|
||||
// FinishedBuffering is the time when buffering of the migration source finished.
|
||||
FinishedBuffering time.Time
|
||||
|
||||
// FinishedReading is the time when the migration source is fully read.
|
||||
FinishedReading time.Time
|
||||
|
||||
// BytesRead holds the number of Bytes read from the migration source.
|
||||
BytesRead int64
|
||||
}
|
||||
|
||||
// NewMigration returns a new Migration and sets the body, identifier,
|
||||
// version and targetVersion. Body can be nil, which turns this migration
|
||||
// into a "NilMigration". If no identifier is provided, it will default to "<empty>".
|
||||
// targetVersion can be -1, implying it is a NilVersion.
|
||||
//
|
||||
// What is a NilMigration?
|
||||
// Usually each migration version coming from source is expected to have an
|
||||
// Up and Down migration. This is not a hard requirement though, leading to
|
||||
// a situation where only the Up or Down migration is present. So let's say
|
||||
// the user wants to migrate up to a version that doesn't have the actual Up
|
||||
// migration, in that case we still want to apply the version, but with an empty
|
||||
// body. We are calling that a NilMigration, a migration with an empty body.
|
||||
//
|
||||
// What is a NilVersion?
|
||||
// NilVersion is a const(-1). When running down migrations and we are at the
|
||||
// last down migration, there is no next down migration, the targetVersion should
|
||||
// be nil. Nil in this case is represented by -1 (because type int).
|
||||
func NewMigration(body io.ReadCloser, identifier string,
|
||||
version uint, targetVersion int) (*Migration, error) {
|
||||
tnow := time.Now()
|
||||
m := &Migration{
|
||||
Identifier: identifier,
|
||||
Version: version,
|
||||
TargetVersion: targetVersion,
|
||||
Scheduled: tnow,
|
||||
}
|
||||
|
||||
if body == nil {
|
||||
if len(identifier) == 0 {
|
||||
m.Identifier = "<empty>"
|
||||
}
|
||||
|
||||
m.StartedBuffering = tnow
|
||||
m.FinishedBuffering = tnow
|
||||
m.FinishedReading = tnow
|
||||
return m, nil
|
||||
}
|
||||
|
||||
br, bw := io.Pipe()
|
||||
m.Body = body // want to simulate low latency? newSlowReader(body)
|
||||
m.BufferSize = DefaultBufferSize
|
||||
m.BufferedBody = br
|
||||
m.bufferWriter = bw
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// String implements string.Stringer and is used in tests.
|
||||
func (m *Migration) String() string {
|
||||
return fmt.Sprintf("%v [%v=>%v]", m.Identifier, m.Version, m.TargetVersion)
|
||||
}
|
||||
|
||||
// LogString returns a string describing this migration to humans.
|
||||
func (m *Migration) LogString() string {
|
||||
directionStr := "u"
|
||||
if m.TargetVersion < int(m.Version) {
|
||||
directionStr = "d"
|
||||
}
|
||||
return fmt.Sprintf("%v/%v %v", m.Version, directionStr, m.Identifier)
|
||||
}
|
||||
|
||||
// Buffer buffers Body up to BufferSize.
|
||||
// Calling this function blocks. Call with goroutine.
|
||||
func (m *Migration) Buffer() error {
|
||||
if m.Body == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
m.StartedBuffering = time.Now()
|
||||
|
||||
b := bufio.NewReaderSize(m.Body, int(m.BufferSize))
|
||||
|
||||
// start reading from body, peek won't move the read pointer though
|
||||
// poor man's solution?
|
||||
if _, err := b.Peek(int(m.BufferSize)); err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
|
||||
m.FinishedBuffering = time.Now()
|
||||
|
||||
// write to bufferWriter, this will block until
|
||||
// something starts reading from m.Buffer
|
||||
n, err := b.WriteTo(m.bufferWriter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m.FinishedReading = time.Now()
|
||||
m.BytesRead = n
|
||||
|
||||
// close bufferWriter so Buffer knows that there is no
|
||||
// more data coming
|
||||
if err := m.bufferWriter.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// it's safe to close the Body too
|
||||
if err := m.Body.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
118
vendor/github.com/golang-migrate/migrate/v4/source/driver.go
generated
vendored
Normal file
118
vendor/github.com/golang-migrate/migrate/v4/source/driver.go
generated
vendored
Normal file
@@ -0,0 +1,118 @@
|
||||
// Package source provides the Source interface.
|
||||
// All source drivers must implement this interface, register themselves,
|
||||
// optionally provide a `WithInstance` function and pass the tests
|
||||
// in package source/testing.
|
||||
package source
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
nurl "net/url"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var driversMu sync.RWMutex
|
||||
var drivers = make(map[string]Driver)
|
||||
|
||||
// Driver is the interface every source driver must implement.
|
||||
//
|
||||
// How to implement a source driver?
|
||||
// 1. Implement this interface.
|
||||
// 2. Optionally, add a function named `WithInstance`.
|
||||
// This function should accept an existing source instance and a Config{} struct
|
||||
// and return a driver instance.
|
||||
// 3. Add a test that calls source/testing.go:Test()
|
||||
// 4. Add own tests for Open(), WithInstance() (when provided) and Close().
|
||||
// All other functions are tested by tests in source/testing.
|
||||
// Saves you some time and makes sure all source drivers behave the same way.
|
||||
// 5. Call Register in init().
|
||||
//
|
||||
// Guidelines:
|
||||
// * All configuration input must come from the URL string in func Open()
|
||||
// or the Config{} struct in WithInstance. Don't os.Getenv().
|
||||
// * Drivers are supposed to be read only.
|
||||
// * Ideally don't load any contents (into memory) in Open or WithInstance.
|
||||
type Driver interface {
|
||||
// Open returns a a new driver instance configured with parameters
|
||||
// coming from the URL string. Migrate will call this function
|
||||
// only once per instance.
|
||||
Open(url string) (Driver, error)
|
||||
|
||||
// Close closes the underlying source instance managed by the driver.
|
||||
// Migrate will call this function only once per instance.
|
||||
Close() error
|
||||
|
||||
// First returns the very first migration version available to the driver.
|
||||
// Migrate will call this function multiple times.
|
||||
// If there is no version available, it must return os.ErrNotExist.
|
||||
First() (version uint, err error)
|
||||
|
||||
// Prev returns the previous version for a given version available to the driver.
|
||||
// Migrate will call this function multiple times.
|
||||
// If there is no previous version available, it must return os.ErrNotExist.
|
||||
Prev(version uint) (prevVersion uint, err error)
|
||||
|
||||
// Next returns the next version for a given version available to the driver.
|
||||
// Migrate will call this function multiple times.
|
||||
// If there is no next version available, it must return os.ErrNotExist.
|
||||
Next(version uint) (nextVersion uint, err error)
|
||||
|
||||
// ReadUp returns the UP migration body and an identifier that helps
|
||||
// finding this migration in the source for a given version.
|
||||
// If there is no up migration available for this version,
|
||||
// it must return os.ErrNotExist.
|
||||
// Do not start reading, just return the ReadCloser!
|
||||
ReadUp(version uint) (r io.ReadCloser, identifier string, err error)
|
||||
|
||||
// ReadDown returns the DOWN migration body and an identifier that helps
|
||||
// finding this migration in the source for a given version.
|
||||
// If there is no down migration available for this version,
|
||||
// it must return os.ErrNotExist.
|
||||
// Do not start reading, just return the ReadCloser!
|
||||
ReadDown(version uint) (r io.ReadCloser, identifier string, err error)
|
||||
}
|
||||
|
||||
// Open returns a new driver instance.
|
||||
func Open(url string) (Driver, error) {
|
||||
u, err := nurl.Parse(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if u.Scheme == "" {
|
||||
return nil, fmt.Errorf("source driver: invalid URL scheme")
|
||||
}
|
||||
|
||||
driversMu.RLock()
|
||||
d, ok := drivers[u.Scheme]
|
||||
driversMu.RUnlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("source driver: unknown driver %v (forgotten import?)", u.Scheme)
|
||||
}
|
||||
|
||||
return d.Open(url)
|
||||
}
|
||||
|
||||
// Register globally registers a driver.
|
||||
func Register(name string, driver Driver) {
|
||||
driversMu.Lock()
|
||||
defer driversMu.Unlock()
|
||||
if driver == nil {
|
||||
panic("Register driver is nil")
|
||||
}
|
||||
if _, dup := drivers[name]; dup {
|
||||
panic("Register called twice for driver " + name)
|
||||
}
|
||||
drivers[name] = driver
|
||||
}
|
||||
|
||||
// List lists the registered drivers
|
||||
func List() []string {
|
||||
driversMu.RLock()
|
||||
defer driversMu.RUnlock()
|
||||
names := make([]string, 0, len(drivers))
|
||||
for n := range drivers {
|
||||
names = append(names, n)
|
||||
}
|
||||
return names
|
||||
}
|
||||
143
vendor/github.com/golang-migrate/migrate/v4/source/migration.go
generated
vendored
Normal file
143
vendor/github.com/golang-migrate/migrate/v4/source/migration.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
package source
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Direction is either up or down.
|
||||
type Direction string
|
||||
|
||||
const (
|
||||
Down Direction = "down"
|
||||
Up Direction = "up"
|
||||
)
|
||||
|
||||
// Migration is a helper struct for source drivers that need to
|
||||
// build the full directory tree in memory.
|
||||
// Migration is fully independent from migrate.Migration.
|
||||
type Migration struct {
|
||||
// Version is the version of this migration.
|
||||
Version uint
|
||||
|
||||
// Identifier can be any string that helps identifying
|
||||
// this migration in the source.
|
||||
Identifier string
|
||||
|
||||
// Direction is either Up or Down.
|
||||
Direction Direction
|
||||
|
||||
// Raw holds the raw location path to this migration in source.
|
||||
// ReadUp and ReadDown will use this.
|
||||
Raw string
|
||||
}
|
||||
|
||||
// Migrations wraps Migration and has an internal index
|
||||
// to keep track of Migration order.
|
||||
type Migrations struct {
|
||||
index uintSlice
|
||||
migrations map[uint]map[Direction]*Migration
|
||||
}
|
||||
|
||||
func NewMigrations() *Migrations {
|
||||
return &Migrations{
|
||||
index: make(uintSlice, 0),
|
||||
migrations: make(map[uint]map[Direction]*Migration),
|
||||
}
|
||||
}
|
||||
|
||||
func (i *Migrations) Append(m *Migration) (ok bool) {
|
||||
if m == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if i.migrations[m.Version] == nil {
|
||||
i.migrations[m.Version] = make(map[Direction]*Migration)
|
||||
}
|
||||
|
||||
// reject duplicate versions
|
||||
if _, dup := i.migrations[m.Version][m.Direction]; dup {
|
||||
return false
|
||||
}
|
||||
|
||||
i.migrations[m.Version][m.Direction] = m
|
||||
i.buildIndex()
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (i *Migrations) buildIndex() {
|
||||
i.index = make(uintSlice, 0)
|
||||
for version := range i.migrations {
|
||||
i.index = append(i.index, version)
|
||||
}
|
||||
sort.Sort(i.index)
|
||||
}
|
||||
|
||||
func (i *Migrations) First() (version uint, ok bool) {
|
||||
if len(i.index) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
return i.index[0], true
|
||||
}
|
||||
|
||||
func (i *Migrations) Prev(version uint) (prevVersion uint, ok bool) {
|
||||
pos := i.findPos(version)
|
||||
if pos >= 1 && len(i.index) > pos-1 {
|
||||
return i.index[pos-1], true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (i *Migrations) Next(version uint) (nextVersion uint, ok bool) {
|
||||
pos := i.findPos(version)
|
||||
if pos >= 0 && len(i.index) > pos+1 {
|
||||
return i.index[pos+1], true
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func (i *Migrations) Up(version uint) (m *Migration, ok bool) {
|
||||
if _, ok := i.migrations[version]; ok {
|
||||
if mx, ok := i.migrations[version][Up]; ok {
|
||||
return mx, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (i *Migrations) Down(version uint) (m *Migration, ok bool) {
|
||||
if _, ok := i.migrations[version]; ok {
|
||||
if mx, ok := i.migrations[version][Down]; ok {
|
||||
return mx, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (i *Migrations) findPos(version uint) int {
|
||||
if len(i.index) > 0 {
|
||||
ix := i.index.Search(version)
|
||||
if ix < len(i.index) && i.index[ix] == version {
|
||||
return ix
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
type uintSlice []uint
|
||||
|
||||
func (s uintSlice) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s uintSlice) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s uintSlice) Less(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
}
|
||||
|
||||
func (s uintSlice) Search(x uint) int {
|
||||
return sort.Search(len(s), func(i int) bool { return s[i] >= x })
|
||||
}
|
||||
39
vendor/github.com/golang-migrate/migrate/v4/source/parse.go
generated
vendored
Normal file
39
vendor/github.com/golang-migrate/migrate/v4/source/parse.go
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
package source
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrParse = fmt.Errorf("no match")
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultParse = Parse
|
||||
DefaultRegex = Regex
|
||||
)
|
||||
|
||||
// Regex matches the following pattern:
|
||||
// 123_name.up.ext
|
||||
// 123_name.down.ext
|
||||
var Regex = regexp.MustCompile(`^([0-9]+)_(.*)\.(` + string(Down) + `|` + string(Up) + `)\.(.*)$`)
|
||||
|
||||
// Parse returns Migration for matching Regex pattern.
|
||||
func Parse(raw string) (*Migration, error) {
|
||||
m := Regex.FindStringSubmatch(raw)
|
||||
if len(m) == 5 {
|
||||
versionUint64, err := strconv.ParseUint(m[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Migration{
|
||||
Version: uint(versionUint64),
|
||||
Identifier: m[2],
|
||||
Direction: Direction(m[3]),
|
||||
Raw: raw,
|
||||
}, nil
|
||||
}
|
||||
return nil, ErrParse
|
||||
}
|
||||
62
vendor/github.com/golang-migrate/migrate/v4/util.go
generated
vendored
Normal file
62
vendor/github.com/golang-migrate/migrate/v4/util.go
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
nurl "net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MultiError holds multiple errors.
|
||||
//
|
||||
// Deprecated: Use github.com/hashicorp/go-multierror instead
|
||||
type MultiError struct {
|
||||
Errs []error
|
||||
}
|
||||
|
||||
// NewMultiError returns an error type holding multiple errors.
|
||||
//
|
||||
// Deprecated: Use github.com/hashicorp/go-multierror instead
|
||||
//
|
||||
func NewMultiError(errs ...error) MultiError {
|
||||
compactErrs := make([]error, 0)
|
||||
for _, e := range errs {
|
||||
if e != nil {
|
||||
compactErrs = append(compactErrs, e)
|
||||
}
|
||||
}
|
||||
return MultiError{compactErrs}
|
||||
}
|
||||
|
||||
// Error implements error. Multiple errors are concatenated with 'and's.
|
||||
func (m MultiError) Error() string {
|
||||
var strs = make([]string, 0)
|
||||
for _, e := range m.Errs {
|
||||
if len(e.Error()) > 0 {
|
||||
strs = append(strs, e.Error())
|
||||
}
|
||||
}
|
||||
return strings.Join(strs, " and ")
|
||||
}
|
||||
|
||||
// suint safely converts int to uint
|
||||
// see https://goo.gl/wEcqof
|
||||
// see https://goo.gl/pai7Dr
|
||||
func suint(n int) uint {
|
||||
if n < 0 {
|
||||
panic(fmt.Sprintf("suint(%v) expects input >= 0", n))
|
||||
}
|
||||
return uint(n)
|
||||
}
|
||||
|
||||
// FilterCustomQuery filters all query values starting with `x-`
|
||||
func FilterCustomQuery(u *nurl.URL) *nurl.URL {
|
||||
ux := *u
|
||||
vx := make(nurl.Values)
|
||||
for k, v := range ux.Query() {
|
||||
if len(k) <= 1 || (len(k) > 1 && k[0:2] != "x-") {
|
||||
vx[k] = v
|
||||
}
|
||||
}
|
||||
ux.RawQuery = vx.Encode()
|
||||
return &ux
|
||||
}
|
||||
12
vendor/github.com/golang/mock/AUTHORS
generated
vendored
Normal file
12
vendor/github.com/golang/mock/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# This is the official list of GoMock authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Alex Reece <awreece@gmail.com>
|
||||
Google Inc.
|
||||
37
vendor/github.com/golang/mock/CONTRIBUTORS
generated
vendored
Normal file
37
vendor/github.com/golang/mock/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
# This is the official list of people who can contribute (and typically
|
||||
# have contributed) code to the gomock repository.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# The submission process automatically checks to make sure
|
||||
# that people submitting code are listed in this file (by email address).
|
||||
#
|
||||
# Names should be added to this file only after verifying that
|
||||
# the individual or the individual's organization has agreed to
|
||||
# the appropriate Contributor License Agreement, found here:
|
||||
#
|
||||
# http://code.google.com/legal/individual-cla-v1.0.html
|
||||
# http://code.google.com/legal/corporate-cla-v1.0.html
|
||||
#
|
||||
# The agreement for individuals can be filled out on the web.
|
||||
#
|
||||
# When adding J Random Contributor's name to this file,
|
||||
# either J's name or J's organization's name should be
|
||||
# added to the AUTHORS file, depending on whether the
|
||||
# individual or corporate CLA was used.
|
||||
|
||||
# Names should be added to this file like so:
|
||||
# Name <email address>
|
||||
#
|
||||
# An entry with two email addresses specifies that the
|
||||
# first address should be used in the submit logs and
|
||||
# that the second address should be recognized as the
|
||||
# same person when interacting with Rietveld.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Aaron Jacobs <jacobsa@google.com> <aaronjjacobs@gmail.com>
|
||||
Alex Reece <awreece@gmail.com>
|
||||
David Symonds <dsymonds@golang.org>
|
||||
Ryan Barrett <ryanb@google.com>
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user