mirror of
https://github.com/vacp2p/mvds.git
synced 2026-01-09 12:07:55 -05:00
Compare commits
15 Commits
v0.0.21
...
update-dep
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
864dcc5f27 | ||
|
|
d6b27d3843 | ||
|
|
daa5e8b8a2 | ||
|
|
748b61123f | ||
|
|
d34db70222 | ||
|
|
9f50ecac54 | ||
|
|
2512e45906 | ||
|
|
945a249144 | ||
|
|
463f9a7f6c | ||
|
|
217a49bd54 | ||
|
|
f0cc0e7841 | ||
|
|
75124ea56c | ||
|
|
46125cfe6c | ||
|
|
899aeeacb4 | ||
|
|
87839f9f3a |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -11,4 +11,7 @@
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Ignore Intellij config
|
||||
.idea
|
||||
|
||||
mvds
|
||||
|
||||
12
.travis.yml
12
.travis.yml
@@ -5,6 +5,18 @@ language: go
|
||||
|
||||
install: true
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
before_install:
|
||||
- |
|
||||
if ! git diff --name-only $TRAVIS_COMMIT_RANGE | grep -qvE '(.md)|(.html)|^(LICENSE)|^(docs)'
|
||||
then
|
||||
echo "Only docs were updated, not running the CI."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Status
|
||||
Copyright (c) 2019 Vac
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
9
Makefile
9
Makefile
@@ -23,6 +23,15 @@ install-linter:
|
||||
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.17.1
|
||||
.PHONY: install-linter
|
||||
|
||||
mock-install:
|
||||
go get -u github.com/golang/mock/mockgen
|
||||
go get -u github.com/golang/mock
|
||||
.PHONY: mock-install
|
||||
|
||||
mock:
|
||||
mockgen -package=internal -destination=node/internal/syncstate_mock.go -source=state/state.go
|
||||
.PHONY: mock
|
||||
|
||||
vendor:
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
|
||||
13
README.md
13
README.md
@@ -8,10 +8,21 @@ https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/6874
|
||||
[](https://goreportcard.com/report/github.com/vacp2p/mvds)
|
||||
[](https://travis-ci.com/vacp2p/mvds)
|
||||
|
||||
Experimental implementation of the [minimal viable data sync protocol specification](https://specs.vac.dev/mvds.html).
|
||||
Experimental implementation of the [minimal viable data sync protocol specification](https://specs.vac.dev/specs/mvds.html) including the [metadata format specification](https://specs.vac.dev/specs/mdf.html).
|
||||
|
||||
## Usage
|
||||
|
||||
Listening to MVDS messages is fairly simple:
|
||||
|
||||
```go
|
||||
sub := node.Subscribe()
|
||||
|
||||
for {
|
||||
msg := <-sub
|
||||
print(msg)
|
||||
}
|
||||
```
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Ensure you have `protoc` (Protobuf) and Golang installed. Then run `make`.
|
||||
|
||||
317
dependency/migrations/migrations.go
Normal file
317
dependency/migrations/migrations.go
Normal file
@@ -0,0 +1,317 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1572614870_initial_schema.down.sql (30B)
|
||||
// 1572614870_initial_schema.up.sql (157B)
|
||||
// doc.go (377B)
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %v", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
digest [sha256.Size]byte
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var __1572614870_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x4f\x49\x2d\x48\xcd\x4b\x49\xcd\x4b\xce\x4c\x2d\xb6\xe6\x02\x04\x00\x00\xff\xff\x13\x64\x97\xf6\x1e\x00\x00\x00")
|
||||
|
||||
func _1572614870_initial_schemaDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1572614870_initial_schemaDownSql,
|
||||
"1572614870_initial_schema.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1572614870_initial_schemaDownSql() (*asset, error) {
|
||||
bytes, err := _1572614870_initial_schemaDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1572614870_initial_schema.down.sql", size: 30, mode: os.FileMode(0644), modTime: time.Unix(1572706379, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xfc, 0xb3, 0xc3, 0x5e, 0x81, 0xae, 0x77, 0x21, 0x3b, 0xd0, 0xa0, 0x6a, 0xf4, 0x7b, 0xb2, 0x1c, 0x92, 0xd7, 0x5d, 0x4c, 0xa6, 0x4f, 0xe, 0xc6, 0x2d, 0xe4, 0x18, 0x5d, 0x56, 0xe, 0x18, 0x6a}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1572614870_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x4f\x49\x2d\x48\xcd\x4b\x49\xcd\x4b\xce\x4c\x2d\x56\xd0\xe0\x52\x50\x50\x50\xc8\x2d\x4e\x8f\xcf\x4c\x51\x70\xf2\xf1\x77\x52\x08\x08\xf2\xf4\x75\x0c\x8a\x54\xf0\x76\x8d\xd4\x01\x4b\xc2\xd5\x57\x42\x14\xf8\xf9\x87\x28\xf8\x85\xfa\xf8\x70\x69\x5a\x73\x71\x41\x8d\xf7\xf4\x73\x71\x8d\x50\xc8\x4c\xa9\x88\x47\x52\xed\xef\x87\x69\xa1\x06\x42\x5e\xd3\x9a\x0b\x10\x00\x00\xff\xff\x11\xfa\x7b\x28\x9d\x00\x00\x00")
|
||||
|
||||
func _1572614870_initial_schemaUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1572614870_initial_schemaUpSql,
|
||||
"1572614870_initial_schema.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1572614870_initial_schemaUpSql() (*asset, error) {
|
||||
bytes, err := _1572614870_initial_schemaUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1572614870_initial_schema.up.sql", size: 157, mode: os.FileMode(0644), modTime: time.Unix(1572742027, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xad, 0x62, 0x2e, 0xb1, 0x1c, 0x65, 0x39, 0xb9, 0xe4, 0xae, 0xf6, 0x28, 0x6d, 0xbe, 0x62, 0xba, 0x93, 0x80, 0x6c, 0x47, 0x4f, 0x98, 0x5b, 0xf0, 0xfa, 0x16, 0x2, 0x7e, 0xaa, 0x15, 0x63, 0xf2}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var _docGo = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x84\x8f\xbb\x6e\xc3\x30\x0c\x45\x77\x7f\xc5\x45\x96\x2c\xb5\xb4\x74\xea\xd6\xb1\x7b\x7f\x80\x91\x68\x89\x88\x1e\xae\x48\xe7\xf1\xf7\x85\xd3\x02\xcd\xd6\xf5\x00\xe7\xf0\xd2\x7b\x7c\x66\x51\x2c\x52\x18\xa2\x68\x1c\x58\x95\xc6\x1d\x27\x0e\xb4\x29\xe3\x90\xc4\xf2\x76\x72\xa1\x57\xaf\x46\xb6\xe9\x2c\xd5\x57\x49\x83\x8c\xfd\xe5\xf5\x30\x79\x8f\x40\xed\x68\xc8\xd4\x62\xe1\x47\x4b\xa1\x46\xc3\xa4\x25\x5c\xc5\x32\x08\xeb\xe0\x45\x6e\x0e\xef\x86\xc2\xa4\x06\xcb\x64\x47\x85\x65\x46\x20\xe5\x3d\xb3\xf4\x81\xd4\xe7\x93\xb4\x48\x46\x6e\x47\x1f\xcb\x13\xd9\x17\x06\x2a\x85\x23\x96\xd1\xeb\xc3\x55\xaa\x8c\x28\x83\x83\xf5\x71\x7f\x01\xa9\xb2\xa1\x51\x65\xdd\xfd\x4c\x17\x46\xeb\xbf\xe7\x41\x2d\xfe\xff\x11\xae\x7d\x9c\x15\xa4\xe0\xdb\xca\xc1\x38\xba\x69\x5a\x29\x9c\x29\x31\xf4\xab\x88\xf1\x34\x79\x9f\xfa\x5b\xe2\xc6\xbb\xf5\xbc\x71\x5e\xcf\x09\x3f\x35\xe9\x4d\x31\x77\x38\xe7\xff\x80\x4b\x1d\x6e\xfa\x0e\x00\x00\xff\xff\x9d\x60\x3d\x88\x79\x01\x00\x00")
|
||||
|
||||
func docGoBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
_docGo,
|
||||
"doc.go",
|
||||
)
|
||||
}
|
||||
|
||||
func docGo() (*asset, error) {
|
||||
bytes, err := docGoBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1572706379, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// AssetString returns the asset contents as a string (instead of a []byte).
|
||||
func AssetString(name string) (string, error) {
|
||||
data, err := Asset(name)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// MustAssetString is like AssetString but panics when Asset would return an
|
||||
// error. It simplifies safe initialization of global variables.
|
||||
func MustAssetString(name string) string {
|
||||
return string(MustAsset(name))
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetDigest returns the digest of the file with the given name. It returns an
|
||||
// error if the asset could not be found or the digest could not be loaded.
|
||||
func AssetDigest(name string) ([sha256.Size]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.digest, nil
|
||||
}
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
|
||||
}
|
||||
|
||||
// Digests returns a map of all known files and their checksums.
|
||||
func Digests() (map[string][sha256.Size]byte, error) {
|
||||
mp := make(map[string][sha256.Size]byte, len(_bindata))
|
||||
for name := range _bindata {
|
||||
a, err := _bindata[name]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mp[name] = a.digest
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1572614870_initial_schema.down.sql": _1572614870_initial_schemaDownSql,
|
||||
"1572614870_initial_schema.up.sql": _1572614870_initial_schemaUpSql,
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"},
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"},
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"1572614870_initial_schema.down.sql": &bintree{_1572614870_initial_schemaDownSql, map[string]*bintree{}},
|
||||
"1572614870_initial_schema.up.sql": &bintree{_1572614870_initial_schemaUpSql, map[string]*bintree{}},
|
||||
"doc.go": &bintree{docGo, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory.
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively.
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
DROP TABLE mvds_dependencies;
|
||||
@@ -0,0 +1,6 @@
|
||||
CREATE TABLE mvds_dependencies (
|
||||
msg_id BLOB PRIMARY KEY,
|
||||
dependency BLOB NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX idx_dependency ON mvds_dependencies(dependency);
|
||||
9
dependency/migrations/sqlite/doc.go
Normal file
9
dependency/migrations/sqlite/doc.go
Normal file
@@ -0,0 +1,9 @@
|
||||
// This file is necessary because "github.com/status-im/migrate/v4"
|
||||
// can't handle files starting with a prefix. At least that's the case
|
||||
// for go-bindata.
|
||||
// If go-bindata is called from the same directory, asset names
|
||||
// have no prefix and "github.com/status-im/migrate/v4" works as expected.
|
||||
|
||||
package sqlite
|
||||
|
||||
//go:generate go-bindata -pkg migrations -o ../migrations.go .
|
||||
12
dependency/tracker.go
Normal file
12
dependency/tracker.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package dependency
|
||||
|
||||
import (
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type Tracker interface {
|
||||
Add(msg, dependency state.MessageID) error
|
||||
Dependants(id state.MessageID) ([]state.MessageID, error)
|
||||
Resolve(msg state.MessageID, dependency state.MessageID) error
|
||||
IsResolved(id state.MessageID) (bool, error)
|
||||
}
|
||||
71
dependency/tracker_memory.go
Normal file
71
dependency/tracker_memory.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package dependency
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
// Verify that Tracker interface is implemented.
|
||||
var _ Tracker = (*inMemoryTracker)(nil)
|
||||
|
||||
type inMemoryTracker struct {
|
||||
sync.Mutex
|
||||
|
||||
dependents map[state.MessageID][]state.MessageID
|
||||
dependencies map[state.MessageID]int
|
||||
|
||||
}
|
||||
|
||||
func NewInMemoryTracker() *inMemoryTracker {
|
||||
return &inMemoryTracker{
|
||||
dependents: make(map[state.MessageID][]state.MessageID),
|
||||
dependencies: make(map[state.MessageID]int),
|
||||
}
|
||||
}
|
||||
|
||||
func (md *inMemoryTracker) Add(msg, dependency state.MessageID) error {
|
||||
md.Lock()
|
||||
defer md.Unlock()
|
||||
// @todo check it wasn't already added
|
||||
md.dependents[dependency] = append(md.dependents[dependency], msg)
|
||||
md.dependencies[msg] += 1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *inMemoryTracker) Dependants(id state.MessageID) ([]state.MessageID, error) {
|
||||
md.Lock()
|
||||
defer md.Unlock()
|
||||
|
||||
return md.dependents[id], nil
|
||||
}
|
||||
|
||||
func (md *inMemoryTracker) Resolve(msg state.MessageID, dependency state.MessageID) error {
|
||||
md.Lock()
|
||||
defer md.Unlock()
|
||||
|
||||
for i, item := range md.dependents[dependency] {
|
||||
if !reflect.DeepEqual(msg[:], item[:]) {
|
||||
continue
|
||||
}
|
||||
|
||||
md.dependents[dependency] = remove(md.dependents[dependency], i)
|
||||
md.dependencies[msg] -= 1
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *inMemoryTracker) IsResolved(id state.MessageID) (bool, error) {
|
||||
md.Lock()
|
||||
defer md.Unlock()
|
||||
|
||||
return md.dependencies[id] == 0, nil
|
||||
}
|
||||
|
||||
func remove(s []state.MessageID, i int) []state.MessageID {
|
||||
s[len(s)-1], s[i] = s[i], s[len(s)-1]
|
||||
return s[:len(s)-1]
|
||||
}
|
||||
42
dependency/tracker_persistency_test.go
Normal file
42
dependency/tracker_persistency_test.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package dependency
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/vacp2p/mvds/dependency/migrations"
|
||||
"github.com/vacp2p/mvds/persistenceutil"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
func TestTrackerSQLitePersistence(t *testing.T) {
|
||||
tmpFile, err := ioutil.TempFile("", "")
|
||||
require.NoError(t, err)
|
||||
db, err := persistenceutil.Open(tmpFile.Name(), "", persistenceutil.MigrationConfig{
|
||||
AssetNames: migrations.AssetNames(),
|
||||
AssetGetter: migrations.Asset,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
d := NewPersistentTracker(db)
|
||||
|
||||
msg := state.MessageID{0x01}
|
||||
dependency := state.MessageID{0x02}
|
||||
|
||||
err = d.Add(msg, dependency)
|
||||
require.NoError(t, err)
|
||||
dependants, err := d.Dependants(dependency)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, msg, dependants[0])
|
||||
|
||||
res, err := d.IsResolved(msg)
|
||||
require.NoError(t, err)
|
||||
require.False(t, res)
|
||||
|
||||
err = d.Resolve(msg, dependency)
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err = d.IsResolved(msg)
|
||||
require.NoError(t, err)
|
||||
require.True(t, res)
|
||||
}
|
||||
71
dependency/tracker_sqlite.go
Normal file
71
dependency/tracker_sqlite.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package dependency
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
// Verify that Tracker interface is implemented.
|
||||
var _ Tracker = (*sqliteTracker)(nil)
|
||||
|
||||
type sqliteTracker struct {
|
||||
db *sql.DB
|
||||
}
|
||||
|
||||
func NewPersistentTracker(db *sql.DB) *sqliteTracker {
|
||||
return &sqliteTracker{db: db}
|
||||
}
|
||||
|
||||
func (sd *sqliteTracker) Add(msg, dependency state.MessageID) error {
|
||||
_, err := sd.db.Exec(`INSERT INTO mvds_dependencies (msg_id, dependency) VALUES (?, ?)`, msg[:], dependency[:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sd *sqliteTracker) Dependants(id state.MessageID) ([]state.MessageID, error) {
|
||||
rows, err := sd.db.Query(`SELECT msg_id FROM mvds_dependencies WHERE dependency = ?`, id[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
var msgs []state.MessageID
|
||||
|
||||
for rows.Next() {
|
||||
var msg []byte
|
||||
err := rows.Scan(&msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msgs = append(msgs, state.ToMessageID(msg))
|
||||
}
|
||||
|
||||
return msgs, nil
|
||||
}
|
||||
|
||||
func (sd *sqliteTracker) Resolve(msg state.MessageID, dependency state.MessageID) error {
|
||||
_, err := sd.db.Exec(
|
||||
`DELETE FROM mvds_dependencies WHERE msg_id = ? AND dependency = ?`,
|
||||
msg[:],
|
||||
dependency[:],
|
||||
)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (sd *sqliteTracker) IsResolved(id state.MessageID) (bool, error) {
|
||||
result := sd.db.QueryRow(`SELECT COUNT(*) FROM mvds_dependencies WHERE msg_id = ?`, id[:])
|
||||
var num int64
|
||||
err := result.Scan(&num)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return num == 0, nil
|
||||
}
|
||||
|
||||
8
go.mod
8
go.mod
@@ -3,14 +3,14 @@ module github.com/vacp2p/mvds
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/golang-migrate/migrate v3.5.4+incompatible // indirect
|
||||
github.com/golang-migrate/migrate/v4 v4.5.0 // indirect
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/golang-migrate/migrate/v4 v4.6.2 // indirect
|
||||
github.com/golang/mock v1.2.0
|
||||
github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/status-im/migrate/v4 v4.0.0-20190821140204-a9d340ec8fb76af4afda06acf01740d45d2661ed
|
||||
github.com/status-im/migrate/v4 v4.6.2-status.2
|
||||
github.com/stretchr/testify v1.3.1-0.20190712000136-221dbe5ed467
|
||||
go.uber.org/atomic v1.4.0 // indirect
|
||||
go.uber.org/multierr v1.1.0 // indirect
|
||||
go.uber.org/zap v1.10.0
|
||||
google.golang.org/protobuf v1.26.0
|
||||
)
|
||||
|
||||
27
go.sum
27
go.sum
@@ -17,6 +17,7 @@ github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dR
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
|
||||
github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8=
|
||||
github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg=
|
||||
github.com/cznic/golex v0.0.0-20170803123110-4ab7c5e190e4/go.mod h1:+bmmJDNmKlhWNG+gwWCkaBoTy39Fs+bzRxVBzoTQbIc=
|
||||
@@ -34,7 +35,7 @@ github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zA
|
||||
github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc=
|
||||
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190108045446-77df18c24acf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
@@ -51,22 +52,22 @@ github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhD
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/golang-migrate/migrate v3.5.4+incompatible h1:R7OzwvCJTCgwapPCiX6DyBiu2czIUMDCB118gFTKTUA=
|
||||
github.com/golang-migrate/migrate v3.5.4+incompatible/go.mod h1:IsVUlFN5puWOmXrqjgGUfIRIbU7mr8oNBE2tyERd9Wk=
|
||||
github.com/golang-migrate/migrate/v4 v4.5.0 h1:ucd2qJu1BAKTtmjh7QlWYiq01DwlE/xXYQkOPE0ZTsI=
|
||||
github.com/golang-migrate/migrate/v4 v4.5.0/go.mod h1:SzAcz2l+yDJVhQC7fwiF7T2MAFPMIkigJz98klRJ4OE=
|
||||
github.com/golang-migrate/migrate/v4 v4.6.2 h1:LDDOHo/q1W5UDj6PbkxdCv7lv9yunyZHXvxuwDkGo3k=
|
||||
github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
@@ -104,7 +105,7 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
|
||||
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mongodb/mongo-go-driver v0.3.0/go.mod h1:NK/HWDIIZkaYsnYa0hmtP443T5ELr0KDecmIioVuuyU=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f h1:hd3r+uv9DNLScbOrnlj82rBldHQf3XWmCeXAWbw8euQ=
|
||||
github.com/mutecomm/go-sqlcipher v0.0.0-20190227152316-55dbde17881f/go.mod h1:MyUWrZlB1aI5bs7j9/pJ8ckLLZ4QcCYcNiSbsAW32D4=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
@@ -133,8 +134,8 @@ github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdh
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/status-im/migrate/v4 v4.0.0-20190821140204-a9d340ec8fb76af4afda06acf01740d45d2661ed h1:K2iga8l8OQIHnk2bBq2QsZTO2Q38YWy04xIspdITCdM=
|
||||
github.com/status-im/migrate/v4 v4.0.0-20190821140204-a9d340ec8fb76af4afda06acf01740d45d2661ed/go.mod h1:r8HggRBZ/k7TRwByq/Hp3P/ubFppIna0nvyavVK0pjA=
|
||||
github.com/status-im/migrate/v4 v4.6.2-status.2 h1:SdC+sMDl/aI7vUlwD2qj2p7KsK4T60IS9z4/rYCCbI8=
|
||||
github.com/status-im/migrate/v4 v4.6.2-status.2/go.mod h1:c/kc90n47GZu/58nnz1OMLTf7uE4Da4gZP5qmU+A/v8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
@@ -148,6 +149,7 @@ github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVT
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
|
||||
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
@@ -210,6 +212,8 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425222832-ad9eeb80039a/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
@@ -224,6 +228,9 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
||||
38
main.go
38
main.go
@@ -8,6 +8,7 @@ import (
|
||||
math "math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/vacp2p/mvds/dependency"
|
||||
"github.com/vacp2p/mvds/node"
|
||||
"github.com/vacp2p/mvds/peers"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
@@ -25,18 +26,19 @@ var (
|
||||
interactive int
|
||||
)
|
||||
|
||||
func init() {
|
||||
func parseFlags() {
|
||||
flag.IntVar(&offline, "offline", 90, "percentage of time a node is offline")
|
||||
flag.IntVar(&nodeCount, "nodes", 3, "amount of nodes")
|
||||
flag.IntVar(&communicating, "communicating", 2, "amount of nodes sending messages")
|
||||
flag.IntVar(&sharing, "sharing", 2, "amount of nodes each node shares with")
|
||||
flag.Int64Var(&interval, "interval", 5, "seconds between messages")
|
||||
flag.IntVar(&interactive, "interactive", 3, "amount of nodes to use INTERACTIVE mode, the rest will be BATCH") // @todo should probably just be how many nodes are interactive
|
||||
flag.IntVar(&interactive, "interactive", 3, "amount of nodes to use InteractiveMode mode, the rest will be BatchMode") // @todo should probably just be how many nodes are interactive
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
parseFlags()
|
||||
// @todo validate flags
|
||||
|
||||
transports := make([]*transport.ChannelTransport, 0)
|
||||
@@ -50,9 +52,9 @@ func main() {
|
||||
input = append(input, in)
|
||||
transports = append(transports, t)
|
||||
|
||||
mode := node.INTERACTIVE
|
||||
mode := node.InteractiveMode
|
||||
if i+1 >= interactive {
|
||||
mode = node.BATCH
|
||||
mode = node.BatchMode
|
||||
}
|
||||
|
||||
node, err := createNode(t, peerID(), mode)
|
||||
@@ -112,7 +114,7 @@ OUTER:
|
||||
}
|
||||
|
||||
func createNode(transport transport.Transport, id state.PeerID, mode node.Mode) (*node.Node, error) {
|
||||
ds := store.NewDummyStore()
|
||||
ds := store.NewMemoryMessageStore()
|
||||
logger, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -121,12 +123,14 @@ func createNode(transport transport.Transport, id state.PeerID, mode node.Mode)
|
||||
return node.NewNode(
|
||||
ds,
|
||||
transport,
|
||||
state.NewSyncState(),
|
||||
state.NewMemorySyncState(),
|
||||
Calc,
|
||||
0,
|
||||
id,
|
||||
mode,
|
||||
peers.NewMemoryPersistence(),
|
||||
dependency.NewInMemoryTracker(),
|
||||
node.EventualMode,
|
||||
logger,
|
||||
), nil
|
||||
}
|
||||
@@ -148,22 +152,16 @@ func Calc(count uint64, epoch int64) int64 {
|
||||
return epoch + int64(count*2)
|
||||
}
|
||||
|
||||
func peerID() state.PeerID {
|
||||
bytes := make([]byte, 65)
|
||||
_, _ = rand.Read(bytes)
|
||||
func peerID() (id state.PeerID) {
|
||||
_, _ = rand.Read(id[:])
|
||||
return
|
||||
}
|
||||
|
||||
id := state.PeerID{}
|
||||
copy(id[:], bytes)
|
||||
|
||||
|
||||
|
||||
func groupId() (id state.GroupID) {
|
||||
_, _ = rand.Read(id[:])
|
||||
return id
|
||||
}
|
||||
|
||||
func groupId() state.GroupID {
|
||||
bytes := make([]byte, 32)
|
||||
_, _ = rand.Read(bytes)
|
||||
|
||||
id := state.GroupID{}
|
||||
copy(id[:], bytes)
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/vacp2p/mvds/dependency"
|
||||
"github.com/vacp2p/mvds/node"
|
||||
"github.com/vacp2p/mvds/peers"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
@@ -32,24 +33,23 @@ type MVDSBatchSuite struct {
|
||||
|
||||
func (s *MVDSBatchSuite) SetupTest() {
|
||||
|
||||
logger, err := zap.NewDevelopment()
|
||||
s.Require().NoError(err)
|
||||
logger := zap.NewNop()
|
||||
|
||||
in1 := make(chan transport.Packet)
|
||||
t1 := transport.NewChannelTransport(0, in1)
|
||||
s.ds1 = store.NewDummyStore()
|
||||
s.state1 = state.NewSyncState()
|
||||
s.ds1 = store.NewMemoryMessageStore()
|
||||
s.state1 = state.NewMemorySyncState()
|
||||
s.peers1 = peers.NewMemoryPersistence()
|
||||
p1 := [65]byte{0x01}
|
||||
s.client1 = node.NewNode(s.ds1, t1, s.state1, Calc, 0, p1, node.BATCH, s.peers1, logger)
|
||||
s.client1 = node.NewNode(s.ds1, t1, s.state1, Calc, 0, p1, node.BatchMode, s.peers1, dependency.NewInMemoryTracker(), node.EventualMode, logger)
|
||||
|
||||
in2 := make(chan transport.Packet)
|
||||
t2 := transport.NewChannelTransport(0, in2)
|
||||
s.ds2 = store.NewDummyStore()
|
||||
s.state2 = state.NewSyncState()
|
||||
s.ds2 = store.NewMemoryMessageStore()
|
||||
s.state2 = state.NewMemorySyncState()
|
||||
p2 := [65]byte{0x02}
|
||||
s.peers2 = peers.NewMemoryPersistence()
|
||||
s.client2 = node.NewNode(s.ds2, t2, s.state2, Calc, 0, p2, node.BATCH, s.peers2, logger)
|
||||
s.client2 = node.NewNode(s.ds2, t2, s.state2, Calc, 0, p2, node.BatchMode, s.peers2, dependency.NewInMemoryTracker(), node.EventualMode, logger)
|
||||
|
||||
t2.AddOutput(p1, in1)
|
||||
t1.AddOutput(p2, in2)
|
||||
@@ -81,14 +81,12 @@ func (s *MVDSBatchSuite) TestSendClient1ToClient2() {
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Sender)
|
||||
|
||||
// Check message is received
|
||||
s.Require().Eventually(func() bool {
|
||||
message1Receiver, err := s.ds2.Get(messageID)
|
||||
return err == nil && message1Receiver != nil
|
||||
}, 1*time.Second, 10*time.Millisecond)
|
||||
|
||||
message := <-subscription
|
||||
s.Equal(message.Body, content)
|
||||
|
||||
message1Receiver, err := s.ds2.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Receiver)
|
||||
}
|
||||
|
||||
func (s *MVDSBatchSuite) TestSendClient2ToClient1() {
|
||||
@@ -103,17 +101,16 @@ func (s *MVDSBatchSuite) TestSendClient2ToClient1() {
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Sender)
|
||||
|
||||
// Check message is received
|
||||
s.Require().Eventually(func() bool {
|
||||
message1Receiver, err := s.ds1.Get(messageID)
|
||||
return err == nil && message1Receiver != nil
|
||||
}, 1*time.Second, 10*time.Millisecond)
|
||||
|
||||
message := <-subscription
|
||||
s.Equal(message.Body, content)
|
||||
|
||||
message1Receiver, err := s.ds1.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Receiver)
|
||||
}
|
||||
|
||||
func (s *MVDSBatchSuite) TestAcks() {
|
||||
subscription := s.client2.Subscribe()
|
||||
messageID, err := s.client1.AppendMessage(s.groupID, []byte("message 1"))
|
||||
s.Require().NoError(err)
|
||||
|
||||
@@ -127,11 +124,11 @@ func (s *MVDSBatchSuite) TestAcks() {
|
||||
s.Require().NoError(err)
|
||||
s.Require().Equal(1, len(states))
|
||||
|
||||
// Check message is received
|
||||
s.Require().Eventually(func() bool {
|
||||
message1Receiver, err := s.ds2.Get(messageID)
|
||||
return err == nil && message1Receiver != nil
|
||||
}, 1*time.Second, 10*time.Millisecond)
|
||||
<-subscription
|
||||
|
||||
message1Receiver, err := s.ds2.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Receiver)
|
||||
|
||||
// Check state is removed
|
||||
s.Require().Eventually(func() bool {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/suite"
|
||||
"github.com/vacp2p/mvds/dependency"
|
||||
"github.com/vacp2p/mvds/node"
|
||||
"github.com/vacp2p/mvds/peers"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
@@ -32,24 +33,23 @@ type MVDSInteractiveSuite struct {
|
||||
|
||||
func (s *MVDSInteractiveSuite) SetupTest() {
|
||||
|
||||
logger, err := zap.NewDevelopment()
|
||||
s.Require().NoError(err)
|
||||
logger := zap.NewNop()
|
||||
|
||||
in1 := make(chan transport.Packet)
|
||||
t1 := transport.NewChannelTransport(0, in1)
|
||||
s.ds1 = store.NewDummyStore()
|
||||
s.state1 = state.NewSyncState()
|
||||
s.ds1 = store.NewMemoryMessageStore()
|
||||
s.state1 = state.NewMemorySyncState()
|
||||
s.peers1 = peers.NewMemoryPersistence()
|
||||
p1 := [65]byte{0x01}
|
||||
s.client1 = node.NewNode(s.ds1, t1, s.state1, Calc, 0, p1, node.INTERACTIVE, s.peers1, logger)
|
||||
s.client1 = node.NewNode(s.ds1, t1, s.state1, Calc, 0, p1, node.InteractiveMode, s.peers1, dependency.NewInMemoryTracker(), node.EventualMode, logger)
|
||||
|
||||
in2 := make(chan transport.Packet)
|
||||
t2 := transport.NewChannelTransport(0, in2)
|
||||
s.ds2 = store.NewDummyStore()
|
||||
s.state2 = state.NewSyncState()
|
||||
s.ds2 = store.NewMemoryMessageStore()
|
||||
s.state2 = state.NewMemorySyncState()
|
||||
p2 := [65]byte{0x02}
|
||||
s.peers2 = peers.NewMemoryPersistence()
|
||||
s.client2 = node.NewNode(s.ds2, t2, s.state2, Calc, 0, p2, node.INTERACTIVE, s.peers2, logger)
|
||||
s.client2 = node.NewNode(s.ds2, t2, s.state2, Calc, 0, p2, node.InteractiveMode, s.peers2, dependency.NewInMemoryTracker(), node.EventualMode, logger)
|
||||
|
||||
t2.AddOutput(p1, in1)
|
||||
t1.AddOutput(p2, in2)
|
||||
@@ -69,6 +69,7 @@ func (s *MVDSInteractiveSuite) TearDownTest() {
|
||||
}
|
||||
|
||||
func (s *MVDSInteractiveSuite) TestInteractiveMode() {
|
||||
subscription := s.client2.Subscribe()
|
||||
messageID, err := s.client1.AppendMessage(s.groupID, []byte("message 1"))
|
||||
s.Require().NoError(err)
|
||||
|
||||
@@ -88,11 +89,10 @@ func (s *MVDSInteractiveSuite) TestInteractiveMode() {
|
||||
return err == nil && len(states) == 1 && states[0].Type == state.REQUEST
|
||||
}, 1*time.Second, 10*time.Millisecond, "An request is stored in the state")
|
||||
|
||||
// Check we eventually get the message
|
||||
s.Require().Eventually(func() bool {
|
||||
message1Receiver, err := s.ds1.Get(messageID)
|
||||
return err == nil && message1Receiver != nil
|
||||
}, 1*time.Second, 10*time.Millisecond, "The message is eventually received")
|
||||
<-subscription
|
||||
message1Receiver, err := s.ds2.Get(messageID)
|
||||
s.Require().NoError(err)
|
||||
s.Require().NotNil(message1Receiver)
|
||||
|
||||
// Check state is removed
|
||||
s.Require().Eventually(func() bool {
|
||||
|
||||
91
node/internal/syncstate_mock.go
Normal file
91
node/internal/syncstate_mock.go
Normal file
@@ -0,0 +1,91 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: state/state.go
|
||||
|
||||
// Package internal is a generated GoMock package.
|
||||
package internal
|
||||
|
||||
import (
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
state "github.com/vacp2p/mvds/state"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// MockSyncState is a mock of SyncState interface
|
||||
type MockSyncState struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockSyncStateMockRecorder
|
||||
}
|
||||
|
||||
// MockSyncStateMockRecorder is the mock recorder for MockSyncState
|
||||
type MockSyncStateMockRecorder struct {
|
||||
mock *MockSyncState
|
||||
}
|
||||
|
||||
// NewMockSyncState creates a new mock instance
|
||||
func NewMockSyncState(ctrl *gomock.Controller) *MockSyncState {
|
||||
mock := &MockSyncState{ctrl: ctrl}
|
||||
mock.recorder = &MockSyncStateMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use
|
||||
func (m *MockSyncState) EXPECT() *MockSyncStateMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Add mocks base method
|
||||
func (m *MockSyncState) Add(newState state.State) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Add", newState)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Add indicates an expected call of Add
|
||||
func (mr *MockSyncStateMockRecorder) Add(newState interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockSyncState)(nil).Add), newState)
|
||||
}
|
||||
|
||||
// Remove mocks base method
|
||||
func (m *MockSyncState) Remove(id state.MessageID, peer state.PeerID) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Remove", id, peer)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Remove indicates an expected call of Remove
|
||||
func (mr *MockSyncStateMockRecorder) Remove(id, peer interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockSyncState)(nil).Remove), id, peer)
|
||||
}
|
||||
|
||||
// All mocks base method
|
||||
func (m *MockSyncState) All(epoch int64) ([]state.State, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "All", epoch)
|
||||
ret0, _ := ret[0].([]state.State)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// All indicates an expected call of All
|
||||
func (mr *MockSyncStateMockRecorder) All(epoch interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "All", reflect.TypeOf((*MockSyncState)(nil).All), epoch)
|
||||
}
|
||||
|
||||
// Map mocks base method
|
||||
func (m *MockSyncState) Map(epoch int64, process func(state.State) state.State) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Map", epoch, process)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Map indicates an expected call of Map
|
||||
func (mr *MockSyncStateMockRecorder) Map(epoch, process interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Map", reflect.TypeOf((*MockSyncState)(nil).Map), epoch, process)
|
||||
}
|
||||
@@ -86,7 +86,7 @@ func _1565345162_initial_schemaDownSql() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565345162_initial_schema.down.sql", size: 23, mode: os.FileMode(0644), modTime: time.Unix(1565345447, 0)}
|
||||
info := bindataFileInfo{name: "1565345162_initial_schema.down.sql", size: 23, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x7c, 0x69, 0xd2, 0x3, 0xea, 0x82, 0x7c, 0xb3, 0x44, 0x6c, 0xef, 0x64, 0x2c, 0x99, 0x62, 0xa2, 0x8b, 0x6f, 0x96, 0x4f, 0x34, 0x41, 0x87, 0xd5, 0x4e, 0x3, 0x7f, 0x4a, 0xd1, 0x91, 0x9, 0x99}}
|
||||
return a, nil
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func _1565345162_initial_schemaUpSql() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565345162_initial_schema.up.sql", size: 86, mode: os.FileMode(0644), modTime: time.Unix(1565345708, 0)}
|
||||
info := bindataFileInfo{name: "1565345162_initial_schema.up.sql", size: 86, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x78, 0x7c, 0xdd, 0x67, 0x61, 0x3e, 0x7f, 0xd4, 0xce, 0xb0, 0x17, 0xbe, 0x5a, 0xa7, 0x9e, 0x93, 0x34, 0xe8, 0xbb, 0x44, 0xfb, 0x88, 0xd6, 0x18, 0x6d, 0x9f, 0xb4, 0x22, 0xda, 0xbc, 0x87, 0x94}}
|
||||
return a, nil
|
||||
}
|
||||
@@ -126,7 +126,7 @@ func docGo() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1565345207, 0)}
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
@@ -223,10 +223,8 @@ func AssetNames() []string {
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1565345162_initial_schema.down.sql": _1565345162_initial_schemaDownSql,
|
||||
|
||||
"1565345162_initial_schema.up.sql": _1565345162_initial_schemaUpSql,
|
||||
|
||||
"doc.go": docGo,
|
||||
"1565345162_initial_schema.up.sql": _1565345162_initial_schemaUpSql,
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
|
||||
345
node/node.go
345
node/node.go
@@ -11,6 +11,7 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/vacp2p/mvds/dependency"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/vacp2p/mvds/peers"
|
||||
@@ -24,8 +25,18 @@ import (
|
||||
type Mode int
|
||||
|
||||
const (
|
||||
INTERACTIVE Mode = iota
|
||||
BATCH
|
||||
InteractiveMode Mode = iota + 1
|
||||
BatchMode
|
||||
)
|
||||
|
||||
// ResolutionMode defines how message dependencies should be resolved.
|
||||
type ResolutionMode int
|
||||
|
||||
const (
|
||||
// EventualMode is non-blocking and will return messages before dependencies are resolved.
|
||||
EventualMode ResolutionMode = iota + 1
|
||||
// ConsistentMode blocks and does not return messages until dependencies have been resolved.
|
||||
ConsistentMode
|
||||
)
|
||||
|
||||
// CalculateNextEpoch is a function used to calculate the next `SendEpoch` for a given message.
|
||||
@@ -47,12 +58,16 @@ type Node struct {
|
||||
|
||||
payloads payloads
|
||||
|
||||
dependencies dependency.Tracker
|
||||
|
||||
nextEpoch CalculateNextEpoch
|
||||
|
||||
ID state.PeerID
|
||||
|
||||
epochPersistence *epochSQLitePersistence
|
||||
mode Mode
|
||||
|
||||
mode Mode
|
||||
resolution ResolutionMode
|
||||
|
||||
subscription chan protobuf.Message
|
||||
|
||||
@@ -64,6 +79,7 @@ func NewPersistentNode(
|
||||
st transport.Transport,
|
||||
id state.PeerID,
|
||||
mode Mode,
|
||||
resolution ResolutionMode,
|
||||
nextEpoch CalculateNextEpoch,
|
||||
logger *zap.Logger,
|
||||
) (*Node, error) {
|
||||
@@ -83,8 +99,10 @@ func NewPersistentNode(
|
||||
payloads: newPayloads(),
|
||||
epochPersistence: newEpochSQLitePersistence(db),
|
||||
nextEpoch: nextEpoch,
|
||||
dependencies: dependency.NewPersistentTracker(db),
|
||||
logger: logger.With(zap.Namespace("mvds")),
|
||||
mode: mode,
|
||||
resolution: resolution,
|
||||
}
|
||||
if currentEpoch, err := node.epochPersistence.Get(id); err != nil {
|
||||
return nil, err
|
||||
@@ -108,18 +126,19 @@ func NewEphemeralNode(
|
||||
}
|
||||
|
||||
return &Node{
|
||||
ID: id,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
store: store.NewDummyStore(),
|
||||
transport: t,
|
||||
syncState: state.NewSyncState(),
|
||||
peers: peers.NewMemoryPersistence(),
|
||||
payloads: newPayloads(),
|
||||
nextEpoch: nextEpoch,
|
||||
epoch: currentEpoch,
|
||||
logger: logger.With(zap.Namespace("mvds")),
|
||||
mode: mode,
|
||||
ID: id,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
store: store.NewMemoryMessageStore(),
|
||||
transport: t,
|
||||
syncState: state.NewMemorySyncState(),
|
||||
peers: peers.NewMemoryPersistence(),
|
||||
payloads: newPayloads(),
|
||||
dependencies: dependency.NewInMemoryTracker(),
|
||||
nextEpoch: nextEpoch,
|
||||
epoch: currentEpoch,
|
||||
logger: logger.With(zap.Namespace("mvds")),
|
||||
mode: mode,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,6 +152,8 @@ func NewNode(
|
||||
id state.PeerID,
|
||||
mode Mode,
|
||||
pp peers.Persistence,
|
||||
md dependency.Tracker,
|
||||
resolution ResolutionMode,
|
||||
logger *zap.Logger,
|
||||
) *Node {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -141,18 +162,20 @@ func NewNode(
|
||||
}
|
||||
|
||||
return &Node{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
store: ms,
|
||||
transport: st,
|
||||
syncState: ss,
|
||||
peers: pp,
|
||||
payloads: newPayloads(),
|
||||
nextEpoch: nextEpoch,
|
||||
ID: id,
|
||||
epoch: currentEpoch,
|
||||
logger: logger.With(zap.Namespace("mvds")),
|
||||
mode: mode,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
store: ms,
|
||||
transport: st,
|
||||
syncState: ss,
|
||||
peers: pp,
|
||||
payloads: newPayloads(),
|
||||
nextEpoch: nextEpoch,
|
||||
ID: id,
|
||||
epoch: currentEpoch,
|
||||
logger: logger.With(zap.Namespace("mvds")),
|
||||
mode: mode,
|
||||
dependencies: md,
|
||||
resolution: resolution,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,38 +246,53 @@ func (n *Node) Unsubscribe() {
|
||||
|
||||
// AppendMessage sends a message to a given group.
|
||||
func (n *Node) AppendMessage(groupID state.GroupID, data []byte) (state.MessageID, error) {
|
||||
m := protobuf.Message{
|
||||
p, err := n.store.GetMessagesWithoutChildren(groupID)
|
||||
parents := make([][]byte, len(p))
|
||||
if err != nil {
|
||||
n.logger.Error("Failed to retrieve parents",
|
||||
zap.String("groupID", hex.EncodeToString(groupID[:4])),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
|
||||
for i, id := range p {
|
||||
parents[i] = id[:]
|
||||
}
|
||||
|
||||
return n.AppendMessageWithMetadata(groupID, data, &protobuf.Metadata{Ephemeral: false, Parents: parents})
|
||||
}
|
||||
|
||||
// AppendEphemeralMessage sends a message to a given group that has the `no_ack_required` flag set to `true`.
|
||||
func (n *Node) AppendEphemeralMessage(groupID state.GroupID, data []byte) (state.MessageID, error) {
|
||||
return n.AppendMessageWithMetadata(groupID, data, &protobuf.Metadata{Ephemeral: true})
|
||||
}
|
||||
|
||||
// AppendMessageWithMetadata sends a message to a given group with metadata.
|
||||
func (n *Node) AppendMessageWithMetadata(groupID state.GroupID, data []byte, metadata *protobuf.Metadata) (state.MessageID, error) {
|
||||
m := &protobuf.Message{
|
||||
GroupId: groupID[:],
|
||||
Timestamp: time.Now().Unix(),
|
||||
Body: data,
|
||||
Metadata: metadata,
|
||||
}
|
||||
|
||||
id := m.ID()
|
||||
|
||||
peers, err := n.peers.GetByGroupID(groupID)
|
||||
if err != nil {
|
||||
return state.MessageID{}, fmt.Errorf("trying to send to unknown group %x", groupID[:4])
|
||||
}
|
||||
|
||||
err = n.store.Add(&m)
|
||||
err := n.store.Add(m)
|
||||
if err != nil {
|
||||
return state.MessageID{}, err
|
||||
}
|
||||
|
||||
for _, p := range peers {
|
||||
t := state.OFFER
|
||||
if n.mode == BATCH {
|
||||
t = state.MESSAGE
|
||||
}
|
||||
|
||||
n.insertSyncState(&groupID, id, p, t)
|
||||
err = n.broadcastToGroup(groupID, n.ID, m)
|
||||
if err != nil {
|
||||
return state.MessageID{}, err
|
||||
}
|
||||
|
||||
n.logger.Debug("Sending message",
|
||||
n.logger.Debug("Appending Message to Sync State",
|
||||
zap.String("node", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("groupID", hex.EncodeToString(groupID[:4])),
|
||||
zap.String("id", hex.EncodeToString(id[:4])))
|
||||
// @todo think about a way to insta trigger send messages when send was selected, we don't wanna wait for ticks here
|
||||
// @todo think about a way to insta trigger pushToSub messages when pushToSub was selected, we don't wanna wait for ticks here
|
||||
|
||||
return id, nil
|
||||
}
|
||||
@@ -267,15 +305,6 @@ func (n *Node) RequestMessage(group state.GroupID, id state.MessageID) error {
|
||||
}
|
||||
|
||||
for _, p := range peers {
|
||||
exist, err := n.IsPeerInGroup(group, p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exist {
|
||||
continue
|
||||
}
|
||||
|
||||
n.insertSyncState(&group, id, p, state.REQUEST)
|
||||
}
|
||||
|
||||
@@ -293,6 +322,9 @@ func (n *Node) IsPeerInGroup(g state.GroupID, p state.PeerID) (bool, error) {
|
||||
}
|
||||
|
||||
func (n *Node) sendMessages() error {
|
||||
|
||||
var toRemove []state.State
|
||||
|
||||
err := n.syncState.Map(n.epoch, func(s state.State) state.State {
|
||||
m := s.MessageID
|
||||
p := s.PeerID
|
||||
@@ -309,7 +341,6 @@ func (n *Node) sendMessages() error {
|
||||
|
||||
case state.MESSAGE:
|
||||
g := *s.GroupID
|
||||
// TODO: Handle errors
|
||||
exist, err := n.IsPeerInGroup(g, p)
|
||||
if err != nil {
|
||||
return s
|
||||
@@ -337,6 +368,9 @@ func (n *Node) sendMessages() error {
|
||||
zap.String("messageID", hex.EncodeToString(m[:4])),
|
||||
)
|
||||
|
||||
if msg.Metadata != nil && msg.Metadata.Ephemeral {
|
||||
toRemove = append(toRemove, s)
|
||||
}
|
||||
}
|
||||
|
||||
return n.updateSendEpoch(s)
|
||||
@@ -347,7 +381,7 @@ func (n *Node) sendMessages() error {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.payloads.MapAndClear(func(peer state.PeerID, payload protobuf.Payload) error {
|
||||
return n.payloads.MapAndClear(func(peer state.PeerID, payload *protobuf.Payload) error {
|
||||
err := n.transport.Send(n.ID, peer, payload)
|
||||
if err != nil {
|
||||
n.logger.Error("error sending message", zap.Error(err))
|
||||
@@ -358,7 +392,7 @@ func (n *Node) sendMessages() error {
|
||||
|
||||
}
|
||||
|
||||
func (n *Node) onPayload(sender state.PeerID, payload protobuf.Payload) {
|
||||
func (n *Node) onPayload(sender state.PeerID, payload *protobuf.Payload) {
|
||||
// Acks, Requests and Offers are all arrays of bytes as protobuf doesn't allow type aliases otherwise arrays of messageIDs would be nicer.
|
||||
if err := n.onAck(sender, payload.Acks); err != nil {
|
||||
n.logger.Error("error processing acks", zap.Error(err))
|
||||
@@ -375,7 +409,7 @@ func (n *Node) onPayload(sender state.PeerID, payload protobuf.Payload) {
|
||||
|
||||
func (n *Node) onOffer(sender state.PeerID, offers [][]byte) error {
|
||||
for _, raw := range offers {
|
||||
id := toMessageID(raw)
|
||||
id := state.ToMessageID(raw)
|
||||
n.logger.Debug("OFFER received",
|
||||
zap.String("from", hex.EncodeToString(sender[:4])),
|
||||
zap.String("to", hex.EncodeToString(n.ID[:4])),
|
||||
@@ -399,7 +433,7 @@ func (n *Node) onOffer(sender state.PeerID, offers [][]byte) error {
|
||||
|
||||
func (n *Node) onRequest(sender state.PeerID, requests [][]byte) error {
|
||||
for _, raw := range requests {
|
||||
id := toMessageID(raw)
|
||||
id := state.ToMessageID(raw)
|
||||
n.logger.Debug("REQUEST received",
|
||||
zap.String("from", hex.EncodeToString(sender[:4])),
|
||||
zap.String("to", hex.EncodeToString(n.ID[:4])),
|
||||
@@ -416,7 +450,7 @@ func (n *Node) onRequest(sender state.PeerID, requests [][]byte) error {
|
||||
continue
|
||||
}
|
||||
|
||||
groupID := toGroupID(message.GroupId)
|
||||
groupID := state.ToGroupID(message.GroupId)
|
||||
|
||||
exist, err := n.IsPeerInGroup(groupID, sender)
|
||||
if err != nil {
|
||||
@@ -439,7 +473,7 @@ func (n *Node) onRequest(sender state.PeerID, requests [][]byte) error {
|
||||
|
||||
func (n *Node) onAck(sender state.PeerID, acks [][]byte) error {
|
||||
for _, ack := range acks {
|
||||
id := toMessageID(ack)
|
||||
id := state.ToMessageID(ack)
|
||||
|
||||
err := n.syncState.Remove(id, sender)
|
||||
if err != nil {
|
||||
@@ -461,14 +495,25 @@ func (n *Node) onMessages(sender state.PeerID, messages []*protobuf.Message) [][
|
||||
a := make([][]byte, 0)
|
||||
|
||||
for _, m := range messages {
|
||||
groupID := toGroupID(m.GroupId)
|
||||
err := n.onMessage(sender, *m)
|
||||
groupID := state.ToGroupID(m.GroupId)
|
||||
err := n.onMessage(sender, m)
|
||||
if err != nil {
|
||||
n.logger.Error("Error processing message", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
id := m.ID()
|
||||
|
||||
if m.Metadata != nil && m.Metadata.Ephemeral {
|
||||
n.logger.Debug("not sending ACK",
|
||||
zap.String("groupID", hex.EncodeToString(groupID[:4])),
|
||||
zap.String("from", hex.EncodeToString(n.ID[:4])),
|
||||
zap.String("", hex.EncodeToString(sender[:4])),
|
||||
zap.String("messageID", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
n.logger.Debug("sending ACK",
|
||||
zap.String("groupID", hex.EncodeToString(groupID[:4])),
|
||||
zap.String("from", hex.EncodeToString(n.ID[:4])),
|
||||
@@ -482,9 +527,10 @@ func (n *Node) onMessages(sender state.PeerID, messages []*protobuf.Message) [][
|
||||
return a
|
||||
}
|
||||
|
||||
func (n *Node) onMessage(sender state.PeerID, msg protobuf.Message) error {
|
||||
// @todo cleanup this function
|
||||
func (n *Node) onMessage(sender state.PeerID, msg *protobuf.Message) error {
|
||||
id := msg.ID()
|
||||
groupID := toGroupID(msg.GroupId)
|
||||
groupID := state.ToGroupID(msg.GroupId)
|
||||
n.logger.Debug("MESSAGE received",
|
||||
zap.String("from", hex.EncodeToString(sender[:4])),
|
||||
zap.String("to", hex.EncodeToString(n.ID[:4])),
|
||||
@@ -492,36 +538,173 @@ func (n *Node) onMessage(sender state.PeerID, msg protobuf.Message) error {
|
||||
)
|
||||
|
||||
err := n.syncState.Remove(id, sender)
|
||||
if err != nil && err != state.ErrStateNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
if msg.Metadata == nil || !msg.Metadata.Ephemeral {
|
||||
err = n.store.Add(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = n.broadcastToGroup(groupID, sender, msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = n.store.Add(&msg)
|
||||
if err != nil {
|
||||
return err
|
||||
// @todo process, should this function ever even have an error?
|
||||
}
|
||||
n.resolve(sender, msg)
|
||||
|
||||
peers, err := n.peers.GetByGroupID(groupID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *Node) broadcastToGroup(group state.GroupID, sender state.PeerID, msg *protobuf.Message) error {
|
||||
p, err := n.peers.GetByGroupID(group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, peer := range peers {
|
||||
id := msg.ID()
|
||||
|
||||
for _, peer := range p {
|
||||
if peer == sender {
|
||||
continue
|
||||
}
|
||||
|
||||
n.insertSyncState(&groupID, id, peer, state.OFFER)
|
||||
}
|
||||
t := state.OFFER
|
||||
if n.mode == BatchMode || (msg.Metadata == nil && !msg.Metadata.Ephemeral) {
|
||||
t = state.MESSAGE
|
||||
}
|
||||
|
||||
if n.subscription != nil {
|
||||
n.subscription <- msg
|
||||
n.insertSyncState(&group, id, peer, t)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// @todo I do not think this will work, this needs be some recrusive function
|
||||
// @todo add method to select depth of how far we resolve dependencies
|
||||
|
||||
func (n *Node) resolve(sender state.PeerID, msg *protobuf.Message) {
|
||||
if n.resolution == EventualMode {
|
||||
n.resolveEventually(sender, msg)
|
||||
return
|
||||
}
|
||||
|
||||
n.resolveConsistently(sender, msg)
|
||||
}
|
||||
|
||||
func (n *Node) resolveEventually(sender state.PeerID, msg *protobuf.Message) {
|
||||
if msg.Metadata == nil || len(msg.Metadata.Parents) == 0 {
|
||||
n.pushToSub(msg)
|
||||
return
|
||||
}
|
||||
|
||||
for _, parent := range msg.Metadata.Parents {
|
||||
pid := state.ToMessageID(parent)
|
||||
|
||||
if has, _ := n.store.Has(pid); has {
|
||||
continue
|
||||
}
|
||||
|
||||
group := state.ToGroupID(msg.GroupId)
|
||||
n.insertSyncState(&group, pid, sender, state.REQUEST)
|
||||
}
|
||||
|
||||
n.pushToSub(msg)
|
||||
}
|
||||
|
||||
func (n *Node) resolveConsistently(sender state.PeerID, msg *protobuf.Message) {
|
||||
id := msg.ID()
|
||||
|
||||
// We push any messages whose parents have now been resolved
|
||||
dependants, err := n.dependencies.Dependants(id)
|
||||
if err != nil {
|
||||
n.logger.Error("error getting dependants",
|
||||
zap.Error(err),
|
||||
zap.String("msg", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
}
|
||||
|
||||
for _, dependant := range dependants {
|
||||
err := n.dependencies.Resolve(dependant, id)
|
||||
if err != nil {
|
||||
n.logger.Error("error marking resolved dependency",
|
||||
zap.Error(err),
|
||||
zap.String("msg", hex.EncodeToString(dependant[:4])),
|
||||
zap.String("dependency", hex.EncodeToString(id[:4])),
|
||||
)
|
||||
}
|
||||
|
||||
resolved, err := n.dependencies.IsResolved(dependant)
|
||||
if err != nil {
|
||||
n.logger.Error("error getting unresolved dependencies",
|
||||
zap.Error(err),
|
||||
zap.String("msg", hex.EncodeToString(dependant[:4])),
|
||||
)
|
||||
}
|
||||
|
||||
if !resolved {
|
||||
continue
|
||||
}
|
||||
|
||||
dmsg, err := n.store.Get(dependant)
|
||||
if err != nil {
|
||||
n.logger.Error("error getting message",
|
||||
zap.Error(err),
|
||||
zap.String("messageID", hex.EncodeToString(dependant[:4])),
|
||||
)
|
||||
}
|
||||
|
||||
if dmsg != nil {
|
||||
n.pushToSub(dmsg)
|
||||
}
|
||||
}
|
||||
|
||||
// @todo add parent dependencies to child, then we can have multiple levels?
|
||||
if msg.Metadata == nil || len(msg.Metadata.Parents) == 0 {
|
||||
n.pushToSub(msg)
|
||||
return
|
||||
}
|
||||
|
||||
hasUnresolvedDependencies := false
|
||||
for _, parent := range msg.Metadata.Parents {
|
||||
pid := state.ToMessageID(parent)
|
||||
|
||||
if has, _ := n.store.Has(pid); has {
|
||||
continue
|
||||
}
|
||||
|
||||
group := state.ToGroupID(msg.GroupId)
|
||||
n.insertSyncState(&group, pid, sender, state.REQUEST)
|
||||
hasUnresolvedDependencies = true
|
||||
|
||||
err := n.dependencies.Add(id, pid)
|
||||
if err != nil {
|
||||
n.logger.Error("error adding dependency",
|
||||
zap.Error(err),
|
||||
zap.String("msg", hex.EncodeToString(id[:4])),
|
||||
zap.String("dependency", hex.EncodeToString(pid[:4])),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if hasUnresolvedDependencies {
|
||||
return
|
||||
}
|
||||
|
||||
n.pushToSub(msg)
|
||||
}
|
||||
|
||||
func (n *Node) pushToSub(msg *protobuf.Message) {
|
||||
if n.subscription == nil {
|
||||
return
|
||||
}
|
||||
|
||||
n.subscription <- *msg
|
||||
}
|
||||
|
||||
func (n *Node) insertSyncState(groupID *state.GroupID, messageID state.MessageID, peerID state.PeerID, t state.RecordType) {
|
||||
s := state.State{
|
||||
GroupID: groupID,
|
||||
@@ -545,18 +728,6 @@ func (n *Node) insertSyncState(groupID *state.GroupID, messageID state.MessageID
|
||||
|
||||
func (n *Node) updateSendEpoch(s state.State) state.State {
|
||||
s.SendCount += 1
|
||||
s.SendEpoch += n.nextEpoch(s.SendCount, n.epoch)
|
||||
s.SendEpoch = n.nextEpoch(s.SendCount, n.epoch)
|
||||
return s
|
||||
}
|
||||
|
||||
func toMessageID(b []byte) state.MessageID {
|
||||
var id state.MessageID
|
||||
copy(id[:], b)
|
||||
return id
|
||||
}
|
||||
|
||||
func toGroupID(b []byte) state.GroupID {
|
||||
var id state.GroupID
|
||||
copy(id[:], b)
|
||||
return id
|
||||
}
|
||||
|
||||
130
node/node_test.go
Normal file
130
node/node_test.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/vacp2p/mvds/dependency"
|
||||
"github.com/vacp2p/mvds/node/internal"
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
"github.com/vacp2p/mvds/store"
|
||||
)
|
||||
|
||||
func TestNode_resolveEventually(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
syncstate := internal.NewMockSyncState(ctrl)
|
||||
|
||||
node := Node{
|
||||
syncState: syncstate,
|
||||
store: store.NewMemoryMessageStore(),
|
||||
}
|
||||
|
||||
channel := node.Subscribe()
|
||||
|
||||
peer := peerID()
|
||||
group := groupID()
|
||||
parent := messageID()
|
||||
|
||||
msg := &protobuf.Message{
|
||||
GroupId: group[:],
|
||||
Timestamp: time.Now().Unix(),
|
||||
Body: []byte{0x01},
|
||||
Metadata: &protobuf.Metadata{Ephemeral: false, Parents: [][]byte{parent[:]}},
|
||||
}
|
||||
|
||||
expectedState := state.State{
|
||||
GroupID: &group,
|
||||
MessageID: parent,
|
||||
PeerID: peer,
|
||||
Type: state.REQUEST,
|
||||
SendEpoch: 1,
|
||||
}
|
||||
|
||||
syncstate.EXPECT().Add(expectedState).Return(nil)
|
||||
|
||||
go node.resolveEventually(peer, msg)
|
||||
|
||||
received := <-channel
|
||||
|
||||
if !reflect.DeepEqual(*msg, received) {
|
||||
t.Error("expected message did not match received")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNode_resolveConsistently(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
syncstate := internal.NewMockSyncState(ctrl)
|
||||
|
||||
node := Node{
|
||||
syncState: syncstate,
|
||||
store: store.NewMemoryMessageStore(),
|
||||
dependencies: dependency.NewInMemoryTracker(),
|
||||
}
|
||||
|
||||
channel := node.Subscribe()
|
||||
|
||||
peer := peerID()
|
||||
group := groupID()
|
||||
|
||||
parent := &protobuf.Message{
|
||||
GroupId: group[:],
|
||||
Timestamp: time.Now().Unix(),
|
||||
Body: []byte{0x02},
|
||||
}
|
||||
|
||||
parentID := parent.ID()
|
||||
|
||||
msg := &protobuf.Message{
|
||||
GroupId: group[:],
|
||||
Timestamp: time.Now().Unix(),
|
||||
Body: []byte{0x01},
|
||||
Metadata: &protobuf.Metadata{Ephemeral: false, Parents: [][]byte{parentID[:]}},
|
||||
}
|
||||
|
||||
// @todo we need to make sure to add the message cause we are going through a subset of the flow
|
||||
_ = node.store.Add(msg)
|
||||
|
||||
syncstate.EXPECT().Add(gomock.Any()).DoAndReturn(func(state.State) error {
|
||||
return nil
|
||||
})
|
||||
|
||||
node.resolveConsistently(peer, msg)
|
||||
|
||||
go node.resolveConsistently(peer, parent)
|
||||
|
||||
received := <-channel
|
||||
|
||||
if !reflect.DeepEqual(*msg, received) {
|
||||
t.Error("expected message did not match received")
|
||||
}
|
||||
|
||||
received = <-channel
|
||||
|
||||
if !reflect.DeepEqual(*parent, received) {
|
||||
t.Error("expected message did not match received")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func peerID() (id state.PeerID) {
|
||||
_, _ = rand.Read(id[:])
|
||||
return id
|
||||
}
|
||||
|
||||
func groupID() (id state.GroupID) {
|
||||
_, _ = rand.Read(id[:])
|
||||
return id
|
||||
}
|
||||
|
||||
func messageID() (id state.MessageID) {
|
||||
_, _ = rand.Read(id[:])
|
||||
return id
|
||||
}
|
||||
@@ -10,14 +10,14 @@ import (
|
||||
type payloads struct {
|
||||
sync.Mutex
|
||||
|
||||
payloads map[state.PeerID]protobuf.Payload
|
||||
payloads map[state.PeerID]*protobuf.Payload
|
||||
}
|
||||
|
||||
// @todo check in all the functions below that we aren't duplicating stuff
|
||||
|
||||
func newPayloads() payloads {
|
||||
return payloads{
|
||||
payloads: make(map[state.PeerID]protobuf.Payload),
|
||||
payloads: make(map[state.PeerID]*protobuf.Payload),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,7 +67,7 @@ func (p *payloads) AddMessages(peer state.PeerID, messages ...*protobuf.Message)
|
||||
p.set(peer, payload)
|
||||
}
|
||||
|
||||
func (p *payloads) MapAndClear(f func(state.PeerID, protobuf.Payload) error) error {
|
||||
func (p *payloads) MapAndClear(f func(state.PeerID, *protobuf.Payload) error) error {
|
||||
p.Lock()
|
||||
defer p.Unlock()
|
||||
|
||||
@@ -79,14 +79,19 @@ func (p *payloads) MapAndClear(f func(state.PeerID, protobuf.Payload) error) err
|
||||
}
|
||||
|
||||
// TODO: this should only be called upon confirmation that the message has been sent
|
||||
p.payloads = make(map[state.PeerID]protobuf.Payload)
|
||||
p.payloads = make(map[state.PeerID]*protobuf.Payload)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *payloads) get(peer state.PeerID) protobuf.Payload {
|
||||
return p.payloads[peer]
|
||||
func (p *payloads) get(peer state.PeerID) *protobuf.Payload {
|
||||
result := p.payloads[peer]
|
||||
if result == nil {
|
||||
result = &protobuf.Payload{}
|
||||
p.payloads[peer] = result
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (p *payloads) set(peer state.PeerID, payload protobuf.Payload) {
|
||||
func (p *payloads) set(peer state.PeerID, payload *protobuf.Payload) {
|
||||
p.payloads[peer] = payload
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ func _1565249278_initial_schemaDownSql() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565249278_initial_schema.down.sql", size: 23, mode: os.FileMode(0644), modTime: time.Unix(1565249542, 0)}
|
||||
info := bindataFileInfo{name: "1565249278_initial_schema.down.sql", size: 23, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4, 0xfb, 0x5, 0x92, 0xf0, 0x93, 0xaa, 0x83, 0xb7, 0xdf, 0x66, 0xe2, 0x97, 0x53, 0x9d, 0x34, 0xd3, 0xca, 0x97, 0xd8, 0xe1, 0xed, 0xf0, 0x4a, 0x94, 0x1a, 0xb1, 0x8f, 0xcf, 0xc, 0xa4, 0x6}}
|
||||
return a, nil
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func _1565249278_initial_schemaUpSql() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565249278_initial_schema.up.sql", size: 140, mode: os.FileMode(0644), modTime: time.Unix(1565251147, 0)}
|
||||
info := bindataFileInfo{name: "1565249278_initial_schema.up.sql", size: 140, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8a, 0xbc, 0x3a, 0x87, 0x12, 0x93, 0xeb, 0xb4, 0xcc, 0x42, 0x6e, 0xb2, 0x7d, 0xfa, 0x9a, 0xa8, 0x3f, 0xb, 0x6b, 0xa8, 0x2d, 0x8b, 0xde, 0x67, 0x2a, 0xa8, 0xa5, 0x42, 0xad, 0x27, 0x15, 0x7e}}
|
||||
return a, nil
|
||||
}
|
||||
@@ -126,7 +126,7 @@ func docGo() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1565250280, 0)}
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
@@ -223,10 +223,8 @@ func AssetNames() []string {
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1565249278_initial_schema.down.sql": _1565249278_initial_schemaDownSql,
|
||||
|
||||
"1565249278_initial_schema.up.sql": _1565249278_initial_schemaUpSql,
|
||||
|
||||
"doc.go": docGo,
|
||||
"1565249278_initial_schema.up.sql": _1565249278_initial_schemaUpSql,
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
|
||||
@@ -2,22 +2,22 @@ package peers
|
||||
|
||||
import "github.com/vacp2p/mvds/state"
|
||||
|
||||
type MemoryPersistence struct {
|
||||
type memoryPersistence struct {
|
||||
peers map[state.GroupID][]state.PeerID
|
||||
}
|
||||
|
||||
func NewMemoryPersistence() *MemoryPersistence {
|
||||
return &MemoryPersistence{
|
||||
func NewMemoryPersistence() *memoryPersistence {
|
||||
return &memoryPersistence{
|
||||
peers: make(map[state.GroupID][]state.PeerID),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *MemoryPersistence) Add(groupID state.GroupID, peerID state.PeerID) error {
|
||||
func (p *memoryPersistence) Add(groupID state.GroupID, peerID state.PeerID) error {
|
||||
p.peers[groupID] = append(p.peers[groupID], peerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *MemoryPersistence) Exists(groupID state.GroupID, peerID state.PeerID) (bool, error) {
|
||||
func (p *memoryPersistence) Exists(groupID state.GroupID, peerID state.PeerID) (bool, error) {
|
||||
for _, peer := range p.peers[groupID] {
|
||||
if peer == peerID {
|
||||
return true, nil
|
||||
@@ -26,7 +26,7 @@ func (p *MemoryPersistence) Exists(groupID state.GroupID, peerID state.PeerID) (
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (p *MemoryPersistence) GetByGroupID(groupID state.GroupID) ([]state.PeerID, error) {
|
||||
func (p *memoryPersistence) GetByGroupID(groupID state.GroupID) ([]state.PeerID, error) {
|
||||
return p.peers[groupID], nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,164 +1,338 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: protobuf/sync.proto
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v3.14.0
|
||||
// source: sync.proto
|
||||
|
||||
package protobuf
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
math "math"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Payload struct {
|
||||
Acks [][]byte `protobuf:"bytes,5001,rep,name=acks,proto3" json:"acks,omitempty"`
|
||||
Offers [][]byte `protobuf:"bytes,5002,rep,name=offers,proto3" json:"offers,omitempty"`
|
||||
Requests [][]byte `protobuf:"bytes,5003,rep,name=requests,proto3" json:"requests,omitempty"`
|
||||
Messages []*Message `protobuf:"bytes,5004,rep,name=messages,proto3" json:"messages,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Acks [][]byte `protobuf:"bytes,1,rep,name=acks,proto3" json:"acks,omitempty"`
|
||||
Offers [][]byte `protobuf:"bytes,2,rep,name=offers,proto3" json:"offers,omitempty"`
|
||||
Requests [][]byte `protobuf:"bytes,3,rep,name=requests,proto3" json:"requests,omitempty"`
|
||||
Messages []*Message `protobuf:"bytes,4,rep,name=messages,proto3" json:"messages,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Payload) Reset() { *m = Payload{} }
|
||||
func (m *Payload) String() string { return proto.CompactTextString(m) }
|
||||
func (*Payload) ProtoMessage() {}
|
||||
func (x *Payload) Reset() {
|
||||
*x = Payload{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_sync_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Payload) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Payload) ProtoMessage() {}
|
||||
|
||||
func (x *Payload) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_sync_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Payload.ProtoReflect.Descriptor instead.
|
||||
func (*Payload) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2dca527c092c79d7, []int{0}
|
||||
return file_sync_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (m *Payload) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Payload.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Payload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Payload.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Payload) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Payload.Merge(m, src)
|
||||
}
|
||||
func (m *Payload) XXX_Size() int {
|
||||
return xxx_messageInfo_Payload.Size(m)
|
||||
}
|
||||
func (m *Payload) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Payload.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Payload proto.InternalMessageInfo
|
||||
|
||||
func (m *Payload) GetAcks() [][]byte {
|
||||
if m != nil {
|
||||
return m.Acks
|
||||
func (x *Payload) GetAcks() [][]byte {
|
||||
if x != nil {
|
||||
return x.Acks
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Payload) GetOffers() [][]byte {
|
||||
if m != nil {
|
||||
return m.Offers
|
||||
func (x *Payload) GetOffers() [][]byte {
|
||||
if x != nil {
|
||||
return x.Offers
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Payload) GetRequests() [][]byte {
|
||||
if m != nil {
|
||||
return m.Requests
|
||||
func (x *Payload) GetRequests() [][]byte {
|
||||
if x != nil {
|
||||
return x.Requests
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Payload) GetMessages() []*Message {
|
||||
if m != nil {
|
||||
return m.Messages
|
||||
func (x *Payload) GetMessages() []*Message {
|
||||
if x != nil {
|
||||
return x.Messages
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Metadata struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Parents [][]byte `protobuf:"bytes,1,rep,name=parents,proto3" json:"parents,omitempty"`
|
||||
Ephemeral bool `protobuf:"varint,2,opt,name=ephemeral,proto3" json:"ephemeral,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Metadata) Reset() {
|
||||
*x = Metadata{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_sync_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Metadata) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Metadata) ProtoMessage() {}
|
||||
|
||||
func (x *Metadata) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_sync_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Metadata.ProtoReflect.Descriptor instead.
|
||||
func (*Metadata) Descriptor() ([]byte, []int) {
|
||||
return file_sync_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *Metadata) GetParents() [][]byte {
|
||||
if x != nil {
|
||||
return x.Parents
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Metadata) GetEphemeral() bool {
|
||||
if x != nil {
|
||||
return x.Ephemeral
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Message struct {
|
||||
GroupId []byte `protobuf:"bytes,6001,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
|
||||
Timestamp int64 `protobuf:"varint,6002,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
Body []byte `protobuf:"bytes,6003,opt,name=body,proto3" json:"body,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
GroupId []byte `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
|
||||
Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
Body []byte `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"`
|
||||
Metadata *Metadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Message) Reset() { *m = Message{} }
|
||||
func (m *Message) String() string { return proto.CompactTextString(m) }
|
||||
func (*Message) ProtoMessage() {}
|
||||
func (x *Message) Reset() {
|
||||
*x = Message{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_sync_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Message) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Message) ProtoMessage() {}
|
||||
|
||||
func (x *Message) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_sync_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Message.ProtoReflect.Descriptor instead.
|
||||
func (*Message) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_2dca527c092c79d7, []int{1}
|
||||
return file_sync_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (m *Message) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Message.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Message.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Message) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Message.Merge(m, src)
|
||||
}
|
||||
func (m *Message) XXX_Size() int {
|
||||
return xxx_messageInfo_Message.Size(m)
|
||||
}
|
||||
func (m *Message) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Message.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Message proto.InternalMessageInfo
|
||||
|
||||
func (m *Message) GetGroupId() []byte {
|
||||
if m != nil {
|
||||
return m.GroupId
|
||||
func (x *Message) GetGroupId() []byte {
|
||||
if x != nil {
|
||||
return x.GroupId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Message) GetTimestamp() int64 {
|
||||
if m != nil {
|
||||
return m.Timestamp
|
||||
func (x *Message) GetTimestamp() int64 {
|
||||
if x != nil {
|
||||
return x.Timestamp
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Message) GetBody() []byte {
|
||||
if m != nil {
|
||||
return m.Body
|
||||
func (x *Message) GetBody() []byte {
|
||||
if x != nil {
|
||||
return x.Body
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Payload)(nil), "mvds.Payload")
|
||||
proto.RegisterType((*Message)(nil), "mvds.Message")
|
||||
func (x *Message) GetMetadata() *Metadata {
|
||||
if x != nil {
|
||||
return x.Metadata
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("protobuf/sync.proto", fileDescriptor_2dca527c092c79d7) }
|
||||
var File_sync_proto protoreflect.FileDescriptor
|
||||
|
||||
var fileDescriptor_2dca527c092c79d7 = []byte{
|
||||
// 215 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2e, 0x28, 0xca, 0x2f,
|
||||
0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xae, 0xcc, 0x4b, 0xd6, 0x03, 0xf3, 0x84, 0x58, 0x72, 0xcb,
|
||||
0x52, 0x8a, 0x95, 0x1a, 0x18, 0xb9, 0xd8, 0x03, 0x12, 0x2b, 0x73, 0xf2, 0x13, 0x53, 0x84, 0x84,
|
||||
0xb9, 0x58, 0x12, 0x93, 0xb3, 0x8b, 0x25, 0x3a, 0xd5, 0x15, 0x98, 0x35, 0x78, 0x82, 0xc0, 0x1c,
|
||||
0x21, 0x71, 0x2e, 0xb6, 0xfc, 0xb4, 0xb4, 0xd4, 0xa2, 0x62, 0x89, 0x2e, 0x88, 0x30, 0x94, 0x2b,
|
||||
0x24, 0xcd, 0xc5, 0x51, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x52, 0x2c, 0xd1, 0x0d, 0x91, 0x82,
|
||||
0x0b, 0x08, 0x69, 0x71, 0x71, 0xe4, 0xa6, 0x16, 0x17, 0x27, 0xa6, 0xa7, 0x16, 0x4b, 0xf4, 0x80,
|
||||
0x24, 0xb9, 0x8d, 0x78, 0xf5, 0x40, 0x16, 0xea, 0xf9, 0x42, 0x84, 0x83, 0xe0, 0xf2, 0x4a, 0x91,
|
||||
0x5c, 0xec, 0x50, 0x41, 0x21, 0x29, 0x2e, 0x8e, 0xf4, 0xa2, 0xfc, 0xd2, 0x82, 0xf8, 0xcc, 0x14,
|
||||
0x89, 0x8f, 0x7a, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0xec, 0x60, 0x01, 0xcf, 0x14, 0x21, 0x59, 0x2e,
|
||||
0xce, 0x92, 0xcc, 0xdc, 0xd4, 0xe2, 0x92, 0xc4, 0xdc, 0x02, 0x89, 0x4f, 0x20, 0x49, 0xe6, 0x20,
|
||||
0x84, 0x08, 0xc8, 0xf1, 0x49, 0xf9, 0x29, 0x95, 0x12, 0x9f, 0x21, 0xda, 0xc0, 0x1c, 0x27, 0xae,
|
||||
0x28, 0x0e, 0x98, 0xd7, 0x93, 0xd8, 0xc0, 0x2c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x18,
|
||||
0xdd, 0xc7, 0x8d, 0x0d, 0x01, 0x00, 0x00,
|
||||
var file_sync_proto_rawDesc = []byte{
|
||||
0x0a, 0x0a, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x76, 0x61,
|
||||
0x63, 0x2e, 0x6d, 0x76, 0x64, 0x73, 0x22, 0x80, 0x01, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f,
|
||||
0x61, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c,
|
||||
0x52, 0x04, 0x61, 0x63, 0x6b, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x73,
|
||||
0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x65, 0x72, 0x73, 0x12, 0x1a,
|
||||
0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c,
|
||||
0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x2d, 0x0a, 0x08, 0x6d, 0x65,
|
||||
0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x76,
|
||||
0x61, 0x63, 0x2e, 0x6d, 0x76, 0x64, 0x73, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52,
|
||||
0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x42, 0x0a, 0x08, 0x4d, 0x65, 0x74,
|
||||
0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73,
|
||||
0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x12,
|
||||
0x1c, 0x0a, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x08, 0x52, 0x09, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x22, 0x86, 0x01,
|
||||
0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f,
|
||||
0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x67, 0x72, 0x6f,
|
||||
0x75, 0x70, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
|
||||
0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
|
||||
0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c,
|
||||
0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x2e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
|
||||
0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x76, 0x61, 0x63, 0x2e, 0x6d,
|
||||
0x76, 0x64, 0x73, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65,
|
||||
0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x0d, 0x5a, 0x0b, 0x2e, 0x2f, 0x3b, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_sync_proto_rawDescOnce sync.Once
|
||||
file_sync_proto_rawDescData = file_sync_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_sync_proto_rawDescGZIP() []byte {
|
||||
file_sync_proto_rawDescOnce.Do(func() {
|
||||
file_sync_proto_rawDescData = protoimpl.X.CompressGZIP(file_sync_proto_rawDescData)
|
||||
})
|
||||
return file_sync_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_sync_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||
var file_sync_proto_goTypes = []interface{}{
|
||||
(*Payload)(nil), // 0: vac.mvds.Payload
|
||||
(*Metadata)(nil), // 1: vac.mvds.Metadata
|
||||
(*Message)(nil), // 2: vac.mvds.Message
|
||||
}
|
||||
var file_sync_proto_depIdxs = []int32{
|
||||
2, // 0: vac.mvds.Payload.messages:type_name -> vac.mvds.Message
|
||||
1, // 1: vac.mvds.Message.metadata:type_name -> vac.mvds.Metadata
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_sync_proto_init() }
|
||||
func file_sync_proto_init() {
|
||||
if File_sync_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_sync_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Payload); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_sync_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Metadata); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_sync_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Message); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_sync_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 3,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_sync_proto_goTypes,
|
||||
DependencyIndexes: file_sync_proto_depIdxs,
|
||||
MessageInfos: file_sync_proto_msgTypes,
|
||||
}.Build()
|
||||
File_sync_proto = out.File
|
||||
file_sync_proto_rawDesc = nil
|
||||
file_sync_proto_goTypes = nil
|
||||
file_sync_proto_depIdxs = nil
|
||||
}
|
||||
|
||||
@@ -1,17 +1,23 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package mvds;
|
||||
option go_package = "protobuf";
|
||||
package vac.mvds;
|
||||
option go_package = "./;protobuf";
|
||||
|
||||
message Payload {
|
||||
repeated bytes acks = 5001;
|
||||
repeated bytes offers = 5002;
|
||||
repeated bytes requests = 5003;
|
||||
repeated Message messages = 5004;
|
||||
repeated bytes acks = 1;
|
||||
repeated bytes offers = 2;
|
||||
repeated bytes requests = 3;
|
||||
repeated Message messages = 4;
|
||||
}
|
||||
|
||||
message Metadata {
|
||||
repeated bytes parents = 1;
|
||||
bool ephemeral = 2;
|
||||
}
|
||||
|
||||
message Message {
|
||||
bytes group_id = 6001;
|
||||
int64 timestamp = 6002;
|
||||
bytes body = 6003;
|
||||
bytes group_id = 1;
|
||||
int64 timestamp = 2;
|
||||
bytes body = 3;
|
||||
Metadata metadata = 4;
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1565341329_initial_schema.down.sql (24B)
|
||||
// 1565341329_initial_schema.up.sql (237B)
|
||||
// 1565341329_initial_schema.up.sql (294B)
|
||||
// doc.go (377B)
|
||||
|
||||
package migrations
|
||||
@@ -86,12 +86,12 @@ func _1565341329_initial_schemaDownSql() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565341329_initial_schema.down.sql", size: 24, mode: os.FileMode(0644), modTime: time.Unix(1565341770, 0)}
|
||||
info := bindataFileInfo{name: "1565341329_initial_schema.down.sql", size: 24, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x20, 0x56, 0x1a, 0x0, 0xc5, 0x81, 0xb3, 0xeb, 0x2a, 0xae, 0xed, 0xbb, 0x68, 0x51, 0x68, 0xc7, 0xe3, 0x31, 0xe, 0x1, 0x3e, 0xd2, 0x85, 0x9e, 0x6d, 0x55, 0xad, 0x55, 0xd6, 0x2f, 0x29, 0xca}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1565341329_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\x2f\x2e\x49\x2c\x49\x2d\x56\xd0\xe0\x52\x50\x50\x50\x28\xa9\x2c\x48\x55\xf0\xf4\x0b\x71\x75\x77\x0d\x52\xf0\xf3\x0f\x51\xf0\x0b\xf5\xf1\xd1\x01\x4b\x15\xa7\xe6\xa5\xc4\x27\xe7\x97\xe6\x95\xe0\x53\x90\x5a\x90\x9f\x9c\x81\x43\x41\x7a\x51\x7e\x69\x41\x7c\x66\x8a\x82\x93\x8f\xbf\x13\x44\xa8\x20\x35\xb5\x08\x26\x82\xa6\x3a\x37\xb5\xb8\x38\x31\x3d\x15\x87\x6c\x40\x90\xa7\xaf\x63\x50\xa4\x82\xb7\x6b\xa4\x82\x06\x42\xa9\x0e\xcc\x44\x4d\x2e\x4d\x6b\x2e\x40\x00\x00\x00\xff\xff\x8b\xf0\x0a\x3b\xed\x00\x00\x00")
|
||||
var __1565341329_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x7c\x8f\xc1\x8a\x83\x30\x14\x45\xf7\xf9\x8a\xbb\x54\xf0\x0f\x5c\xe9\x4c\x18\x64\xd2\x58\x42\x0a\x75\x15\xc4\x3c\xac\x0b\x35\x98\x58\xda\xbf\x2f\xb4\x15\xa5\x60\xb7\xf7\x1c\x1e\xef\xfc\x28\x9e\x69\x0e\x9d\xe5\x82\xa3\xbf\x5a\x6f\x7c\xa8\x03\x79\x44\x0c\x00\xc2\xdd\x11\x0a\xa9\xf9\x1f\x57\x90\xa5\x86\x3c\x09\x91\x3c\x91\xa7\xc1\x9a\x66\x9c\x87\xf0\x4d\x20\x37\x36\x97\x1d\xa1\x9d\xc6\xd9\x99\xce\x22\x17\x65\xfe\x9a\x1c\xd1\xb4\x2c\x1f\x76\x4f\xde\xd7\x2d\xed\xd0\xa3\x2a\x0e\x99\xaa\xf0\xcf\x2b\x44\xab\x9a\x2c\x17\x63\x16\xa7\x8c\xbd\x6b\x0b\xf9\xcb\xcf\xe8\xec\xcd\x6c\x7e\x2c\xe5\xb6\x3f\x5a\x49\x9c\xb2\x47\x00\x00\x00\xff\xff\x5e\xe5\x72\x74\x26\x01\x00\x00")
|
||||
|
||||
func _1565341329_initial_schemaUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
@@ -106,8 +106,8 @@ func _1565341329_initial_schemaUpSql() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565341329_initial_schema.up.sql", size: 237, mode: os.FileMode(0644), modTime: time.Unix(1565342998, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x8f, 0xf4, 0x34, 0xb, 0x4e, 0x94, 0x3b, 0xe, 0xf2, 0xc2, 0x36, 0x31, 0x47, 0x2e, 0xf8, 0x85, 0xab, 0x1a, 0x82, 0x24, 0x74, 0x39, 0xf6, 0x83, 0xaf, 0xb6, 0xb1, 0x79, 0x9, 0xda, 0x59, 0xd0}}
|
||||
info := bindataFileInfo{name: "1565341329_initial_schema.up.sql", size: 294, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x3e, 0xa5, 0x37, 0x9d, 0x3f, 0xf3, 0xc9, 0xc8, 0x12, 0x74, 0x79, 0x74, 0xff, 0xfd, 0xb1, 0x5f, 0x13, 0xaf, 0xf2, 0x50, 0x14, 0x9f, 0xdf, 0xc8, 0xc5, 0xa7, 0xc3, 0xf5, 0xa4, 0x8e, 0x8a, 0xf6}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
@@ -126,7 +126,7 @@ func docGo() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1565341287, 0)}
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
@@ -223,10 +223,8 @@ func AssetNames() []string {
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1565341329_initial_schema.down.sql": _1565341329_initial_schemaDownSql,
|
||||
|
||||
"1565341329_initial_schema.up.sql": _1565341329_initial_schemaUpSql,
|
||||
|
||||
"doc.go": docGo,
|
||||
"1565341329_initial_schema.up.sql": _1565341329_initial_schemaUpSql,
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
|
||||
@@ -10,7 +10,7 @@ type memorySyncState struct {
|
||||
state []State
|
||||
}
|
||||
|
||||
func NewSyncState() *memorySyncState {
|
||||
func NewMemorySyncState() *memorySyncState {
|
||||
return &memorySyncState{}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,3 +2,17 @@ package state
|
||||
|
||||
type MessageID [32]byte
|
||||
type GroupID [32]byte
|
||||
|
||||
// ToMessageID converts a byte array to a MessageID.
|
||||
func ToMessageID(b []byte) MessageID {
|
||||
var id MessageID
|
||||
copy(id[:], b)
|
||||
return id
|
||||
}
|
||||
|
||||
// ToGroupID converts a byte array to a GroupID.
|
||||
func ToGroupID(b []byte) GroupID {
|
||||
var id GroupID
|
||||
copy(id[:], b)
|
||||
return id
|
||||
}
|
||||
|
||||
@@ -10,4 +10,5 @@ type MessageStore interface {
|
||||
Has(id state.MessageID) (bool, error)
|
||||
Get(id state.MessageID) (*protobuf.Message, error)
|
||||
Add(message *protobuf.Message) error
|
||||
GetMessagesWithoutChildren(id state.GroupID) ([]state.MessageID, error)
|
||||
}
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type DummyStore struct {
|
||||
sync.Mutex
|
||||
ms map[state.MessageID]*protobuf.Message
|
||||
}
|
||||
|
||||
func NewDummyStore() *DummyStore {
|
||||
return &DummyStore{ms: make(map[state.MessageID]*protobuf.Message)}
|
||||
}
|
||||
|
||||
func (ds *DummyStore) Has(id state.MessageID) (bool, error) {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
|
||||
_, ok := ds.ms[id]
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
func (ds *DummyStore) Get(id state.MessageID) (*protobuf.Message, error) {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
|
||||
m, ok := ds.ms[id]
|
||||
if !ok {
|
||||
return nil, errors.New("message does not exist")
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (ds *DummyStore) Add(message *protobuf.Message) error {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
ds.ms[message.ID()] = message
|
||||
return nil
|
||||
}
|
||||
81
store/messagestore_memory.go
Normal file
81
store/messagestore_memory.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/vacp2p/mvds/protobuf"
|
||||
"github.com/vacp2p/mvds/state"
|
||||
)
|
||||
|
||||
type memoryMessageStore struct {
|
||||
sync.Mutex
|
||||
ms map[state.MessageID]*protobuf.Message
|
||||
}
|
||||
|
||||
func NewMemoryMessageStore() *memoryMessageStore {
|
||||
return &memoryMessageStore{ms: make(map[state.MessageID]*protobuf.Message)}
|
||||
}
|
||||
|
||||
func (ds *memoryMessageStore) Has(id state.MessageID) (bool, error) {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
|
||||
_, ok := ds.ms[id]
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
func (ds *memoryMessageStore) Get(id state.MessageID) (*protobuf.Message, error) {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
|
||||
m, ok := ds.ms[id]
|
||||
if !ok {
|
||||
return nil, errors.New("message does not exist")
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (ds *memoryMessageStore) Add(message *protobuf.Message) error {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
ds.ms[message.ID()] = message
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ds *memoryMessageStore) GetMessagesWithoutChildren(group state.GroupID) ([]state.MessageID, error) {
|
||||
ds.Lock()
|
||||
defer ds.Unlock()
|
||||
|
||||
hasChildren := make(map[state.MessageID]bool)
|
||||
|
||||
for id, msg := range ds.ms {
|
||||
if state.ToGroupID(msg.GroupId) != group {
|
||||
continue
|
||||
}
|
||||
|
||||
if msg.Metadata != nil {
|
||||
for _, parent := range msg.Metadata.Parents {
|
||||
hasChildren[state.ToMessageID(parent)] = true
|
||||
}
|
||||
}
|
||||
|
||||
if hasChildren[id] {
|
||||
continue
|
||||
}
|
||||
|
||||
hasChildren[id] = false
|
||||
}
|
||||
|
||||
msgs := make([]state.MessageID, 0)
|
||||
for id, hasChildren := range hasChildren {
|
||||
if hasChildren {
|
||||
continue
|
||||
}
|
||||
|
||||
msgs = append(msgs, id)
|
||||
}
|
||||
|
||||
return msgs, nil
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package store
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/vacp2p/mvds/state"
|
||||
|
||||
@@ -23,7 +24,13 @@ func NewPersistentMessageStore(db *sql.DB) *persistentMessageStore {
|
||||
|
||||
func (p *persistentMessageStore) Add(message *protobuf.Message) error {
|
||||
id := message.ID()
|
||||
_, err := p.db.Exec(
|
||||
|
||||
tx, err := p.db.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.Exec(
|
||||
`INSERT INTO mvds_messages (id, group_id, timestamp, body)
|
||||
VALUES (?, ?, ?, ?)`,
|
||||
id[:],
|
||||
@@ -31,7 +38,37 @@ func (p *persistentMessageStore) Add(message *protobuf.Message) error {
|
||||
message.Timestamp,
|
||||
message.Body,
|
||||
)
|
||||
return err
|
||||
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
if message.Metadata != nil && len(message.Metadata.Parents) > 0 {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("INSERT INTO mvds_parents(message_id, parent_id) VALUES ")
|
||||
var vals []interface{}
|
||||
|
||||
for _, row := range message.Metadata.Parents {
|
||||
sb.WriteString("(?, ?),")
|
||||
vals = append(vals, id[:], row[:])
|
||||
}
|
||||
|
||||
query := sb.String()
|
||||
stmt, err := tx.Prepare(query[0:len(query)-1])
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = stmt.Exec(vals...)
|
||||
if err != nil {
|
||||
_ = tx.Rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
func (p *persistentMessageStore) Get(id state.MessageID) (*protobuf.Message, error) {
|
||||
@@ -47,6 +84,31 @@ func (p *persistentMessageStore) Get(id state.MessageID) (*protobuf.Message, err
|
||||
); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
message.Metadata = &protobuf.Metadata{Ephemeral: false}
|
||||
|
||||
rows, err := p.db.Query(`SELECT parent_id FROM mvds_parents WHERE message_id = ?`, id[:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var parent []byte
|
||||
err := rows.Scan(&parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
message.Metadata.Parents = append(message.Metadata.Parents, parent)
|
||||
}
|
||||
|
||||
err = rows.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &message, nil
|
||||
}
|
||||
|
||||
@@ -65,3 +127,34 @@ func (p *persistentMessageStore) Has(id state.MessageID) (bool, error) {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
func (p *persistentMessageStore) GetMessagesWithoutChildren(id state.GroupID) ([]state.MessageID, error) {
|
||||
var result []state.MessageID
|
||||
rows, err := p.db.Query(
|
||||
`SELECT id FROM mvds_messages WHERE group_id = ? AND id NOT IN (SELECT parent_id FROM mvds_parents)`,
|
||||
id[:],
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var parent []byte
|
||||
err := rows.Scan(&parent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = append(result, state.ToMessageID(parent))
|
||||
}
|
||||
|
||||
err = rows.Err()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package store
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -28,6 +29,7 @@ func TestPersistentMessageStore(t *testing.T) {
|
||||
GroupId: []byte{0x01},
|
||||
Timestamp: now,
|
||||
Body: []byte{0xaa, 0xbb, 0xcc},
|
||||
Metadata: &protobuf.Metadata{Ephemeral: false, Parents: [][]byte{{0xaa, 0xbb, 0xcc}}},
|
||||
}
|
||||
|
||||
err = p.Add(&message)
|
||||
@@ -51,3 +53,52 @@ func TestPersistentMessageStore(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.False(t, exists)
|
||||
}
|
||||
|
||||
func TestPersistentMessageStore_GetMessagesWithoutChildren(t *testing.T) {
|
||||
tmpFile, err := ioutil.TempFile("", "")
|
||||
require.NoError(t, err)
|
||||
db, err := persistenceutil.Open(tmpFile.Name(), "", persistenceutil.MigrationConfig{
|
||||
AssetNames: migrations.AssetNames(),
|
||||
AssetGetter: migrations.Asset,
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
p := NewPersistentMessageStore(db)
|
||||
|
||||
group := groupId()
|
||||
|
||||
now := time.Now().Unix()
|
||||
msg := &protobuf.Message{
|
||||
GroupId: group[:],
|
||||
Timestamp: now,
|
||||
Body: []byte{0xaa, 0xbb, 0xcc},
|
||||
Metadata: &protobuf.Metadata{Ephemeral: false, Parents: [][]byte{}},
|
||||
}
|
||||
|
||||
err = p.Add(msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
id := msg.ID()
|
||||
|
||||
child := &protobuf.Message{
|
||||
GroupId: group[:],
|
||||
Timestamp: now,
|
||||
Body: []byte{0xaa, 0xcc},
|
||||
Metadata: &protobuf.Metadata{Ephemeral: false, Parents: [][]byte{id[:]}},
|
||||
}
|
||||
|
||||
err = p.Add(child)
|
||||
require.NoError(t, err)
|
||||
|
||||
msgs, err := p.GetMessagesWithoutChildren(group)
|
||||
require.NoError(t, err)
|
||||
|
||||
if msgs[0] != child.ID() {
|
||||
t.Errorf("not same \n expected %v \n actual: %v", msgs[0], child.ID())
|
||||
}
|
||||
}
|
||||
|
||||
func groupId() (id state.GroupID) {
|
||||
_, _ = rand.Read(id[:])
|
||||
return id
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1565447861_initial_schema.down.sql (28B)
|
||||
// 1565447861_initial_schema.up.sql (140B)
|
||||
// 1572372377_initial_schema.down.sql (55B)
|
||||
// 1572372377_initial_schema.up.sql (365B)
|
||||
// doc.go (377B)
|
||||
|
||||
package migrations
|
||||
@@ -71,43 +71,43 @@ func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var __1565447861_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x71\xf5\x71\x0d\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\xcf\x4d\x2d\x2e\x4e\x4c\x4f\x2d\xb6\xe6\x02\x04\x00\x00\xff\xff\xc2\x3a\x18\x9b\x1c\x00\x00\x00")
|
||||
var __1572372377_initial_schemaDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x71\xf5\x71\x0d\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\xcf\x4d\x2d\x2e\x4e\x4c\x4f\x2d\xb6\xe6\xc2\x94\x2b\x48\x2c\x4a\xcd\x2b\x29\xb6\xe6\x02\x04\x00\x00\xff\xff\xa9\xdd\x32\x20\x37\x00\x00\x00")
|
||||
|
||||
func _1565447861_initial_schemaDownSqlBytes() ([]byte, error) {
|
||||
func _1572372377_initial_schemaDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565447861_initial_schemaDownSql,
|
||||
"1565447861_initial_schema.down.sql",
|
||||
__1572372377_initial_schemaDownSql,
|
||||
"1572372377_initial_schema.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565447861_initial_schemaDownSql() (*asset, error) {
|
||||
bytes, err := _1565447861_initial_schemaDownSqlBytes()
|
||||
func _1572372377_initial_schemaDownSql() (*asset, error) {
|
||||
bytes, err := _1572372377_initial_schemaDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565447861_initial_schema.down.sql", size: 28, mode: os.FileMode(0644), modTime: time.Unix(1565447902, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x92, 0x55, 0x8d, 0x3, 0x68, 0x1a, 0x9c, 0xd7, 0xc7, 0xb4, 0x5a, 0xb1, 0x27, 0x47, 0xf4, 0xc6, 0x8d, 0x85, 0xbb, 0xae, 0xb6, 0x69, 0xc5, 0xbc, 0x21, 0xba, 0xc0, 0xc6, 0x2a, 0xc8, 0xb2, 0xf7}}
|
||||
info := bindataFileInfo{name: "1572372377_initial_schema.down.sql", size: 55, mode: os.FileMode(0644), modTime: time.Unix(1572706379, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc1, 0x69, 0x72, 0x9f, 0x13, 0xdd, 0x23, 0x1b, 0xef, 0x2e, 0x95, 0x19, 0x42, 0xa3, 0x57, 0x8d, 0x77, 0x4, 0x73, 0xf6, 0x8a, 0xab, 0xad, 0xc1, 0xe6, 0xc7, 0x59, 0xbc, 0xee, 0x86, 0xce, 0x2f}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1565447861_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x0e\x72\x75\x0c\x71\x55\x08\x71\x74\xf2\x71\x55\xc8\x2d\x4b\x29\x8e\xcf\x4d\x2d\x2e\x4e\x4c\x4f\x2d\x56\xd0\xe0\x52\x50\x50\x50\xc8\x4c\x51\x70\xf2\xf1\x77\x52\x08\x08\xf2\xf4\x75\x0c\x8a\x54\xf0\x76\x8d\xd4\x01\x4b\xa4\x17\xe5\x97\x16\xc4\xc3\xa4\xfd\xfc\x43\x14\xfc\x42\x7d\x7c\x20\x72\x25\x99\xb9\xa9\xc5\x25\x89\xb9\x05\x0a\x9e\x7e\x21\xae\xee\xae\x41\x68\xf2\x49\xf9\x29\x95\xa8\xfa\xb8\x34\xad\xb9\x00\x01\x00\x00\xff\xff\xae\x9b\x57\x51\x8c\x00\x00\x00")
|
||||
var __1572372377_initial_schemaUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x64\x90\xbd\x4e\xc4\x30\x10\x84\x7b\x3f\xc5\x94\xb1\xc4\x1b\x50\x25\xc7\xde\xc9\xc2\xd8\xc8\x18\x89\xab\xa2\x20\x5b\x27\x17\x26\x51\x1c\x10\xbc\x3d\x72\x7e\xc8\x0f\xdb\x7e\x3b\xb3\x33\x7b\x32\x54\x5a\x82\x2d\x2b\x49\x88\x5f\x2e\xd5\xd1\xa7\xd4\xdc\x7c\x42\xc1\x00\x20\x38\xcc\x53\x49\x5d\xe1\xd9\x88\xa7\xd2\x5c\xf1\x48\xd7\xbb\x91\xdf\xfa\xf6\xb3\xab\xf3\xd6\xc8\x01\x28\x6d\xa1\x5e\xa5\x9c\xf8\x10\xa2\x4f\x43\x13\x3b\x08\x65\xe9\x42\xe6\xc0\xdf\x5b\xf7\xb3\xf1\xdf\xe8\x19\xbf\x67\x6c\xce\x27\xd4\x03\xbd\x21\xb8\xef\xfa\xef\x9e\x56\xfb\xbc\xc5\x42\xb2\x8c\xfd\xef\xd5\x35\xbd\xff\x18\x96\x5a\xb3\x2a\x1b\x8d\x77\xf7\xa1\xa6\xdd\xb5\xd5\x9e\x9e\xb5\x21\x71\x51\xf9\x07\x28\x56\x23\x0e\x43\x67\x32\xa4\x4e\xf4\x72\x7c\x65\x70\x9c\x71\xf6\x1b\x00\x00\xff\xff\xed\x46\xeb\x1a\x6d\x01\x00\x00")
|
||||
|
||||
func _1565447861_initial_schemaUpSqlBytes() ([]byte, error) {
|
||||
func _1572372377_initial_schemaUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1565447861_initial_schemaUpSql,
|
||||
"1565447861_initial_schema.up.sql",
|
||||
__1572372377_initial_schemaUpSql,
|
||||
"1572372377_initial_schema.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1565447861_initial_schemaUpSql() (*asset, error) {
|
||||
bytes, err := _1565447861_initial_schemaUpSqlBytes()
|
||||
func _1572372377_initial_schemaUpSql() (*asset, error) {
|
||||
bytes, err := _1572372377_initial_schemaUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1565447861_initial_schema.up.sql", size: 140, mode: os.FileMode(0644), modTime: time.Unix(1565448062, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x21, 0x13, 0xbf, 0x64, 0x18, 0xf7, 0xe2, 0xd8, 0xb5, 0x7d, 0x8, 0xf1, 0x66, 0xb9, 0xb3, 0x49, 0x68, 0xe2, 0xa2, 0xea, 0x90, 0x11, 0x70, 0x9c, 0x15, 0x28, 0x3f, 0x3f, 0x90, 0x3c, 0x76, 0xf}}
|
||||
info := bindataFileInfo{name: "1572372377_initial_schema.up.sql", size: 365, mode: os.FileMode(0644), modTime: time.Unix(1572895921, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9b, 0xb2, 0x13, 0xf6, 0x6c, 0xa8, 0xdb, 0xfb, 0xb5, 0x8d, 0xe3, 0xa9, 0x50, 0x67, 0xb, 0xe2, 0x53, 0x6b, 0x24, 0xde, 0x18, 0x19, 0xac, 0x39, 0x3f, 0x52, 0x3a, 0xe1, 0xb8, 0x91, 0x8d, 0xd0}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
@@ -126,7 +126,7 @@ func docGo() (*asset, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1565447878, 0)}
|
||||
info := bindataFileInfo{name: "doc.go", size: 377, mode: os.FileMode(0644), modTime: time.Unix(1569335635, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xef, 0xaf, 0xdf, 0xcf, 0x65, 0xae, 0x19, 0xfc, 0x9d, 0x29, 0xc1, 0x91, 0xaf, 0xb5, 0xd5, 0xb1, 0x56, 0xf3, 0xee, 0xa8, 0xba, 0x13, 0x65, 0xdb, 0xab, 0xcf, 0x4e, 0xac, 0x92, 0xe9, 0x60, 0xf1}}
|
||||
return a, nil
|
||||
}
|
||||
@@ -222,11 +222,9 @@ func AssetNames() []string {
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1565447861_initial_schema.down.sql": _1565447861_initial_schemaDownSql,
|
||||
|
||||
"1565447861_initial_schema.up.sql": _1565447861_initial_schemaUpSql,
|
||||
|
||||
"doc.go": docGo,
|
||||
"1572372377_initial_schema.down.sql": _1572372377_initial_schemaDownSql,
|
||||
"1572372377_initial_schema.up.sql": _1572372377_initial_schemaUpSql,
|
||||
"doc.go": docGo,
|
||||
}
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
@@ -270,8 +268,8 @@ type bintree struct {
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"1565447861_initial_schema.down.sql": &bintree{_1565447861_initial_schemaDownSql, map[string]*bintree{}},
|
||||
"1565447861_initial_schema.up.sql": &bintree{_1565447861_initial_schemaUpSql, map[string]*bintree{}},
|
||||
"1572372377_initial_schema.down.sql": &bintree{_1572372377_initial_schemaDownSql, map[string]*bintree{}},
|
||||
"1572372377_initial_schema.up.sql": &bintree{_1572372377_initial_schemaUpSql, map[string]*bintree{}},
|
||||
"doc.go": &bintree{docGo, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
CREATE TABLE mvds_messages (
|
||||
id BLOB PRIMARY KEY,
|
||||
group_id BLOB NOT NULL,
|
||||
timestamp INTEGER NOT NULL,
|
||||
body BLOB NOT NULL
|
||||
);
|
||||
@@ -1 +1,2 @@
|
||||
DELETE TABLE mvds_messages;
|
||||
DELETE TABLE mvds_parents;
|
||||
15
store/migrations/sqlite/1572372377_initial_schema.up.sql
Normal file
15
store/migrations/sqlite/1572372377_initial_schema.up.sql
Normal file
@@ -0,0 +1,15 @@
|
||||
CREATE TABLE mvds_messages (
|
||||
id BLOB PRIMARY KEY,
|
||||
group_id BLOB NOT NULL,
|
||||
timestamp INTEGER NOT NULL,
|
||||
body BLOB NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX idx_group_id ON mvds_messages(group_id);
|
||||
|
||||
|
||||
CREATE TABLE mvds_parents (
|
||||
message_id BLOB NOT NULL,
|
||||
parent_id BLOB NOT NULL,
|
||||
FOREIGN KEY (message_id) REFERENCES mvds_messages (id)
|
||||
)
|
||||
@@ -36,7 +36,7 @@ func (t *ChannelTransport) Watch() Packet {
|
||||
return <-t.in
|
||||
}
|
||||
|
||||
func (t *ChannelTransport) Send(sender state.PeerID, peer state.PeerID, payload protobuf.Payload) error {
|
||||
func (t *ChannelTransport) Send(sender state.PeerID, peer state.PeerID, payload *protobuf.Payload) error {
|
||||
// @todo we can do this better, we put node onlineness into a goroutine where we just stop the nodes for x seconds
|
||||
// outside of this class
|
||||
math.Seed(time.Now().UnixNano())
|
||||
|
||||
@@ -8,11 +8,11 @@ import (
|
||||
|
||||
type Packet struct {
|
||||
Sender state.PeerID
|
||||
Payload protobuf.Payload
|
||||
Payload *protobuf.Payload
|
||||
}
|
||||
|
||||
// Transport defines an interface allowing for agnostic transport implementations.
|
||||
type Transport interface {
|
||||
Watch() Packet
|
||||
Send(sender state.PeerID, peer state.PeerID, payload protobuf.Payload) error
|
||||
Send(sender state.PeerID, peer state.PeerID, payload *protobuf.Payload) error
|
||||
}
|
||||
|
||||
9
vendor/github.com/golang-migrate/migrate/v4/.travis.yml
generated
vendored
9
vendor/github.com/golang-migrate/migrate/v4/.travis.yml
generated
vendored
@@ -26,8 +26,15 @@ cache:
|
||||
directories:
|
||||
- $GOPATH/pkg
|
||||
|
||||
|
||||
before_install:
|
||||
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.16.0
|
||||
# Update docker to latest version: https://docs.travis-ci.com/user/docker/#installing-a-newer-docker-version
|
||||
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
- sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
- sudo apt-get update
|
||||
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
|
||||
# Install golangci-lint
|
||||
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1
|
||||
- echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}"
|
||||
|
||||
install:
|
||||
|
||||
6
vendor/github.com/golang-migrate/migrate/v4/Dockerfile
generated
vendored
6
vendor/github.com/golang-migrate/migrate/v4/Dockerfile
generated
vendored
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.12-alpine3.9 AS downloader
|
||||
FROM golang:1.12-alpine3.10 AS downloader
|
||||
ARG VERSION
|
||||
|
||||
RUN apk add --no-cache git gcc musl-dev
|
||||
@@ -11,9 +11,9 @@ ENV GO111MODULE=on
|
||||
ENV DATABASES="postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb sqlserver"
|
||||
ENV SOURCES="file go_bindata github github_ee aws_s3 google_cloud_storage godoc_vfs gitlab"
|
||||
|
||||
RUN go build -a -o build/migrate.linux-386 -ldflags="-X main.Version=${VERSION}" -tags "$DATABASES $SOURCES" ./cmd/migrate
|
||||
RUN go build -a -o build/migrate.linux-386 -ldflags="-s -w -X main.Version=${VERSION}" -tags "$DATABASES $SOURCES" ./cmd/migrate
|
||||
|
||||
FROM alpine:3.9
|
||||
FROM alpine:3.10
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
|
||||
6
vendor/github.com/golang-migrate/migrate/v4/FAQ.md
generated
vendored
6
vendor/github.com/golang-migrate/migrate/v4/FAQ.md
generated
vendored
@@ -68,3 +68,9 @@
|
||||
Database-specific locking features are used by *some* database drivers to prevent multiple instances of migrate from running migrations at the same time
|
||||
the same database at the same time. For example, the MySQL driver uses the `GET_LOCK` function, while the Postgres driver uses
|
||||
the `pg_advisory_lock` function.
|
||||
|
||||
#### Do I need to create a table for tracking migration version used?
|
||||
No, it is done automatically.
|
||||
|
||||
#### Can I use migrate with a non-Go project?
|
||||
Yes, you can use the migrate CLI in a non-Go project, but there are probably other libraries/frameworks available that offer better test and deploy integrations in that language/framework.
|
||||
43
vendor/github.com/golang-migrate/migrate/v4/GETTING_STARTED.md
generated
vendored
Normal file
43
vendor/github.com/golang-migrate/migrate/v4/GETTING_STARTED.md
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
# Getting started
|
||||
Before you start, you should understand the concept of forward/up and reverse/down database migrations.
|
||||
|
||||
Configure a database for your application. Make sure that your database driver is supported [here](README.md#databases)
|
||||
|
||||
## Create migrations
|
||||
Create some migrations using migrate CLI. Here is an example:
|
||||
```
|
||||
migrate create -ext sql -dir db/migrations -seq create_users_table
|
||||
```
|
||||
Once you create your files, you should fill them.
|
||||
|
||||
**IMPORTANT:** In a project developed by more than one person there is a chance of migrations inconsistency - e.g. two developers can create conflicting migrations, and the developer that created his migration later gets it merged to the repository first.
|
||||
Developers and Teams should keep an eye on such cases (especially during code review).
|
||||
[Here](https://github.com/golang-migrate/migrate/issues/179#issuecomment-475821264) is the issue summary if you would like to read more.
|
||||
|
||||
Consider making your migrations idempotent - we can run the same sql code twice in a row with the same result. This makes our migrations more robust. On the other hand, it causes slightly less control over database schema - e.g. let's say you forgot to drop the table in down migration. You run down migration - the table is still there. When you run up migration again - `CREATE TABLE` would return an error, helping you find an issue in down migration, while `CREATE TABLE IF NOT EXISTS` would not. Use those conditions wisely.
|
||||
|
||||
In case you would like to run several commands/queries in one migration, you should wrap them in a transaction (if your database supports it).
|
||||
This way if one of commands fails, our database will remain unchanged.
|
||||
|
||||
## Run migrations
|
||||
Run your migrations through the CLI or your app and check if they applied expected changes.
|
||||
Just to give you an idea:
|
||||
```
|
||||
migrate -database YOUR_DATBASE_URL -path PATH_TO_YOUR_MIGRATIONS up
|
||||
```
|
||||
|
||||
Just add the code to your app and you're ready to go!
|
||||
|
||||
Before commiting your migrations you should run your migrations up, down, and then up again to see if migrations are working properly both ways.
|
||||
(e.g. if you created a table in a migration but reverse migration did not delete it, you will encounter an error when running the forward migration again)
|
||||
It's also worth checking your migrations in a separate, containerized environment. You can find some tools in the end of this document.
|
||||
|
||||
**IMPORTANT:** If you would like to run multiple instances of your app on different machines be sure to use a database that supports locking when running migrations. Otherwise you may encounter issues.
|
||||
|
||||
## Further reading:
|
||||
- [PostgreSQL tutorial](database/postgres/TUTORIAL.md)
|
||||
- [Best practices](MIGRATIONS.md)
|
||||
- [FAQ](FAQ.md)
|
||||
- Tools for testing your migrations in a container:
|
||||
- https://github.com/dhui/dktest
|
||||
- https://github.com/ory/dockertest
|
||||
11
vendor/github.com/golang-migrate/migrate/v4/README.md
generated
vendored
11
vendor/github.com/golang-migrate/migrate/v4/README.md
generated
vendored
@@ -5,6 +5,7 @@
|
||||
[](https://hub.docker.com/r/migrate/migrate/)
|
||||

|
||||
[](https://github.com/golang-migrate/migrate/releases)
|
||||
[](https://goreportcard.com/report/github.com/golang-migrate/migrate)
|
||||
|
||||
# migrate
|
||||
|
||||
@@ -139,6 +140,16 @@ func main() {
|
||||
}
|
||||
```
|
||||
|
||||
## Getting started
|
||||
|
||||
Go to [getting started](GETTING_STARTED.md)
|
||||
|
||||
## Tutorials
|
||||
|
||||
- [PostgreSQL](database/postgres/TUTORIAL.md)
|
||||
|
||||
(more tutorials to come)
|
||||
|
||||
## Migration files
|
||||
|
||||
Each migration has an up and down migration. [Why?](FAQ.md#why-two-separate-files-up-and-down-for-a-migration)
|
||||
|
||||
16
vendor/github.com/golang-migrate/migrate/v4/database/driver.go
generated
vendored
16
vendor/github.com/golang-migrate/migrate/v4/database/driver.go
generated
vendored
@@ -7,8 +7,9 @@ package database
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
nurl "net/url"
|
||||
"sync"
|
||||
|
||||
iurl "github.com/golang-migrate/migrate/v4/internal/url"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -81,21 +82,16 @@ type Driver interface {
|
||||
|
||||
// Open returns a new driver instance.
|
||||
func Open(url string) (Driver, error) {
|
||||
u, err := nurl.Parse(url)
|
||||
scheme, err := iurl.SchemeFromURL(url)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to parse URL. Did you escape all reserved URL characters? "+
|
||||
"See: https://github.com/golang-migrate/migrate#database-urls Error: %v", err)
|
||||
}
|
||||
|
||||
if u.Scheme == "" {
|
||||
return nil, fmt.Errorf("database driver: invalid URL scheme")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
driversMu.RLock()
|
||||
d, ok := drivers[u.Scheme]
|
||||
d, ok := drivers[scheme]
|
||||
driversMu.RUnlock()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("database driver: unknown driver %v (forgotten import?)", u.Scheme)
|
||||
return nil, fmt.Errorf("database driver: unknown driver %v (forgotten import?)", scheme)
|
||||
}
|
||||
|
||||
return d.Open(url)
|
||||
|
||||
6
vendor/github.com/golang-migrate/migrate/v4/go.mod
generated
vendored
6
vendor/github.com/golang-migrate/migrate/v4/go.mod
generated
vendored
@@ -7,10 +7,11 @@ require (
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
||||
github.com/cockroachdb/apd v1.1.0 // indirect
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c
|
||||
github.com/containerd/containerd v1.2.7 // indirect
|
||||
github.com/cznic/ql v1.2.0
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3
|
||||
github.com/dhui/dktest v0.3.0
|
||||
github.com/docker/docker v0.7.3-0.20190108045446-77df18c24acf
|
||||
github.com/docker/docker v0.7.3-0.20190817195342-4760db040282
|
||||
github.com/fsouza/fake-gcs-server v1.7.0
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4
|
||||
@@ -28,7 +29,7 @@ require (
|
||||
github.com/kshvakov/clickhouse v1.3.5
|
||||
github.com/lib/pq v1.0.0
|
||||
github.com/mattn/go-sqlite3 v1.10.0
|
||||
github.com/mongodb/mongo-go-driver v0.3.0
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8
|
||||
github.com/pkg/errors v0.8.1 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
@@ -40,6 +41,7 @@ require (
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c // indirect
|
||||
github.com/xdg/stringprep v1.0.0 // indirect
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b // indirect
|
||||
go.mongodb.org/mongo-driver v1.1.0
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 // indirect
|
||||
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a // indirect
|
||||
|
||||
12
vendor/github.com/golang-migrate/migrate/v4/go.sum
generated
vendored
12
vendor/github.com/golang-migrate/migrate/v4/go.sum
generated
vendored
@@ -26,6 +26,8 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0=
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
|
||||
github.com/containerd/containerd v1.2.7 h1:8lqLbl7u1j3MmiL9cJ/O275crSq7bfwUayvvatEupQk=
|
||||
github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07 h1:UHFGPvSxX4C4YBApSPvmUfL8tTvWLj2ryqvT9K4Jcuk=
|
||||
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8=
|
||||
github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f h1:7uSNgsgcarNk4oiN/nNkO0J7KAjlsF5Yv5Gf/tFdHas=
|
||||
@@ -56,8 +58,8 @@ github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c
|
||||
github.com/docker/distribution v2.7.0+incompatible h1:neUDAlf3wX6Ml4HdqTrbcOHXtfRN0TFIwt6YFL7N9RU=
|
||||
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190108045446-77df18c24acf h1:2v/98rHzs3v6X0AHtoCH9u+e56SdnpogB1Z2fFe1KqQ=
|
||||
github.com/docker/docker v0.7.3-0.20190108045446-77df18c24acf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190817195342-4760db040282 h1:mzrx39dGtGq0VEnTHjnakmczd4uFbhx2cZU3BJDsLdc=
|
||||
github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
||||
@@ -147,8 +149,8 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
|
||||
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mongodb/mongo-go-driver v0.3.0 h1:00tKWMrabkVU1e57/TTP4ZBIfhn/wmjlSiRnIM9d0T8=
|
||||
github.com/mongodb/mongo-go-driver v0.3.0/go.mod h1:NK/HWDIIZkaYsnYa0hmtP443T5ELr0KDecmIioVuuyU=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8 h1:P48LjvUQpTReR3TQRbxSeSBsMXzfK0uol7eRcr7VBYQ=
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
|
||||
@@ -196,6 +198,8 @@ github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b h1:7gd+rd8P3bqcn/96gOZa3F5dpJr/vEiDQYlNb/y2uNs=
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
|
||||
go.mongodb.org/mongo-driver v1.1.0 h1:aeOqSrhl9eDRAap/3T5pCfMBEBxZ0vuXBP+RMtp2KX8=
|
||||
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
|
||||
|
||||
25
vendor/github.com/golang-migrate/migrate/v4/internal/url/url.go
generated
vendored
Normal file
25
vendor/github.com/golang-migrate/migrate/v4/internal/url/url.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package url
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var errNoScheme = errors.New("no scheme")
|
||||
var errEmptyURL = errors.New("URL cannot be empty")
|
||||
|
||||
// schemeFromURL returns the scheme from a URL string
|
||||
func SchemeFromURL(url string) (string, error) {
|
||||
if url == "" {
|
||||
return "", errEmptyURL
|
||||
}
|
||||
|
||||
i := strings.Index(url, ":")
|
||||
|
||||
// No : or : is the first character.
|
||||
if i < 1 {
|
||||
return "", errNoScheme
|
||||
}
|
||||
|
||||
return url[0:i], nil
|
||||
}
|
||||
13
vendor/github.com/golang-migrate/migrate/v4/migrate.go
generated
vendored
13
vendor/github.com/golang-migrate/migrate/v4/migrate.go
generated
vendored
@@ -7,12 +7,14 @@ package migrate
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4/database"
|
||||
iurl "github.com/golang-migrate/migrate/v4/internal/url"
|
||||
"github.com/golang-migrate/migrate/v4/source"
|
||||
)
|
||||
|
||||
@@ -85,13 +87,13 @@ type Migrate struct {
|
||||
func New(sourceURL, databaseURL string) (*Migrate, error) {
|
||||
m := newCommon()
|
||||
|
||||
sourceName, err := sourceSchemeFromURL(sourceURL)
|
||||
sourceName, err := iurl.SchemeFromURL(sourceURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.sourceName = sourceName
|
||||
|
||||
databaseName, err := databaseSchemeFromURL(databaseURL)
|
||||
databaseName, err := iurl.SchemeFromURL(databaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -119,7 +121,7 @@ func New(sourceURL, databaseURL string) (*Migrate, error) {
|
||||
func NewWithDatabaseInstance(sourceURL string, databaseName string, databaseInstance database.Driver) (*Migrate, error) {
|
||||
m := newCommon()
|
||||
|
||||
sourceName, err := schemeFromURL(sourceURL)
|
||||
sourceName, err := iurl.SchemeFromURL(sourceURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -145,7 +147,7 @@ func NewWithDatabaseInstance(sourceURL string, databaseName string, databaseInst
|
||||
func NewWithSourceInstance(sourceName string, sourceInstance source.Driver, databaseURL string) (*Migrate, error) {
|
||||
m := newCommon()
|
||||
|
||||
databaseName, err := schemeFromURL(databaseURL)
|
||||
databaseName, err := iurl.SchemeFromURL(databaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -804,6 +806,7 @@ func (m *Migrate) versionExists(version uint) (result error) {
|
||||
return err
|
||||
}
|
||||
|
||||
m.logErr(fmt.Errorf("no migration found for version %d", version))
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
|
||||
37
vendor/github.com/golang-migrate/migrate/v4/util.go
generated
vendored
37
vendor/github.com/golang-migrate/migrate/v4/util.go
generated
vendored
@@ -1,7 +1,6 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
nurl "net/url"
|
||||
"strings"
|
||||
@@ -49,42 +48,6 @@ func suint(n int) uint {
|
||||
return uint(n)
|
||||
}
|
||||
|
||||
var errNoScheme = errors.New("no scheme")
|
||||
var errEmptyURL = errors.New("URL cannot be empty")
|
||||
|
||||
func sourceSchemeFromURL(url string) (string, error) {
|
||||
u, err := schemeFromURL(url)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("source: %v", err)
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func databaseSchemeFromURL(url string) (string, error) {
|
||||
u, err := schemeFromURL(url)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("database: %v", err)
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// schemeFromURL returns the scheme from a URL string
|
||||
func schemeFromURL(url string) (string, error) {
|
||||
if url == "" {
|
||||
return "", errEmptyURL
|
||||
}
|
||||
|
||||
u, err := nurl.Parse(url)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(u.Scheme) == 0 {
|
||||
return "", errNoScheme
|
||||
}
|
||||
|
||||
return u.Scheme, nil
|
||||
}
|
||||
|
||||
// FilterCustomQuery filters all query values starting with `x-`
|
||||
func FilterCustomQuery(u *nurl.URL) *nurl.URL {
|
||||
ux := *u
|
||||
|
||||
12
vendor/github.com/golang/mock/AUTHORS
generated
vendored
Normal file
12
vendor/github.com/golang/mock/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# This is the official list of GoMock authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Alex Reece <awreece@gmail.com>
|
||||
Google Inc.
|
||||
37
vendor/github.com/golang/mock/CONTRIBUTORS
generated
vendored
Normal file
37
vendor/github.com/golang/mock/CONTRIBUTORS
generated
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
# This is the official list of people who can contribute (and typically
|
||||
# have contributed) code to the gomock repository.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# The submission process automatically checks to make sure
|
||||
# that people submitting code are listed in this file (by email address).
|
||||
#
|
||||
# Names should be added to this file only after verifying that
|
||||
# the individual or the individual's organization has agreed to
|
||||
# the appropriate Contributor License Agreement, found here:
|
||||
#
|
||||
# http://code.google.com/legal/individual-cla-v1.0.html
|
||||
# http://code.google.com/legal/corporate-cla-v1.0.html
|
||||
#
|
||||
# The agreement for individuals can be filled out on the web.
|
||||
#
|
||||
# When adding J Random Contributor's name to this file,
|
||||
# either J's name or J's organization's name should be
|
||||
# added to the AUTHORS file, depending on whether the
|
||||
# individual or corporate CLA was used.
|
||||
|
||||
# Names should be added to this file like so:
|
||||
# Name <email address>
|
||||
#
|
||||
# An entry with two email addresses specifies that the
|
||||
# first address should be used in the submit logs and
|
||||
# that the second address should be recognized as the
|
||||
# same person when interacting with Rietveld.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Aaron Jacobs <jacobsa@google.com> <aaronjjacobs@gmail.com>
|
||||
Alex Reece <awreece@gmail.com>
|
||||
David Symonds <dsymonds@golang.org>
|
||||
Ryan Barrett <ryanb@google.com>
|
||||
202
vendor/github.com/golang/mock/LICENSE
generated
vendored
Normal file
202
vendor/github.com/golang/mock/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
420
vendor/github.com/golang/mock/gomock/call.go
generated
vendored
Normal file
420
vendor/github.com/golang/mock/gomock/call.go
generated
vendored
Normal file
@@ -0,0 +1,420 @@
|
||||
// Copyright 2010 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gomock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Call represents an expected call to a mock.
|
||||
type Call struct {
|
||||
t TestHelper // for triggering test failures on invalid call setup
|
||||
|
||||
receiver interface{} // the receiver of the method call
|
||||
method string // the name of the method
|
||||
methodType reflect.Type // the type of the method
|
||||
args []Matcher // the args
|
||||
origin string // file and line number of call setup
|
||||
|
||||
preReqs []*Call // prerequisite calls
|
||||
|
||||
// Expectations
|
||||
minCalls, maxCalls int
|
||||
|
||||
numCalls int // actual number made
|
||||
|
||||
// actions are called when this Call is called. Each action gets the args and
|
||||
// can set the return values by returning a non-nil slice. Actions run in the
|
||||
// order they are created.
|
||||
actions []func([]interface{}) []interface{}
|
||||
}
|
||||
|
||||
// newCall creates a *Call. It requires the method type in order to support
|
||||
// unexported methods.
|
||||
func newCall(t TestHelper, receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call {
|
||||
t.Helper()
|
||||
|
||||
// TODO: check arity, types.
|
||||
margs := make([]Matcher, len(args))
|
||||
for i, arg := range args {
|
||||
if m, ok := arg.(Matcher); ok {
|
||||
margs[i] = m
|
||||
} else if arg == nil {
|
||||
// Handle nil specially so that passing a nil interface value
|
||||
// will match the typed nils of concrete args.
|
||||
margs[i] = Nil()
|
||||
} else {
|
||||
margs[i] = Eq(arg)
|
||||
}
|
||||
}
|
||||
|
||||
origin := callerInfo(3)
|
||||
actions := []func([]interface{}) []interface{}{func([]interface{}) []interface{} {
|
||||
// Synthesize the zero value for each of the return args' types.
|
||||
rets := make([]interface{}, methodType.NumOut())
|
||||
for i := 0; i < methodType.NumOut(); i++ {
|
||||
rets[i] = reflect.Zero(methodType.Out(i)).Interface()
|
||||
}
|
||||
return rets
|
||||
}}
|
||||
return &Call{t: t, receiver: receiver, method: method, methodType: methodType,
|
||||
args: margs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions}
|
||||
}
|
||||
|
||||
// AnyTimes allows the expectation to be called 0 or more times
|
||||
func (c *Call) AnyTimes() *Call {
|
||||
c.minCalls, c.maxCalls = 0, 1e8 // close enough to infinity
|
||||
return c
|
||||
}
|
||||
|
||||
// MinTimes requires the call to occur at least n times. If AnyTimes or MaxTimes have not been called, MinTimes also
|
||||
// sets the maximum number of calls to infinity.
|
||||
func (c *Call) MinTimes(n int) *Call {
|
||||
c.minCalls = n
|
||||
if c.maxCalls == 1 {
|
||||
c.maxCalls = 1e8
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// MaxTimes limits the number of calls to n times. If AnyTimes or MinTimes have not been called, MaxTimes also
|
||||
// sets the minimum number of calls to 0.
|
||||
func (c *Call) MaxTimes(n int) *Call {
|
||||
c.maxCalls = n
|
||||
if c.minCalls == 1 {
|
||||
c.minCalls = 0
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// DoAndReturn declares the action to run when the call is matched.
|
||||
// The return values from this function are returned by the mocked function.
|
||||
// It takes an interface{} argument to support n-arity functions.
|
||||
func (c *Call) DoAndReturn(f interface{}) *Call {
|
||||
// TODO: Check arity and types here, rather than dying badly elsewhere.
|
||||
v := reflect.ValueOf(f)
|
||||
|
||||
c.addAction(func(args []interface{}) []interface{} {
|
||||
vargs := make([]reflect.Value, len(args))
|
||||
ft := v.Type()
|
||||
for i := 0; i < len(args); i++ {
|
||||
if args[i] != nil {
|
||||
vargs[i] = reflect.ValueOf(args[i])
|
||||
} else {
|
||||
// Use the zero value for the arg.
|
||||
vargs[i] = reflect.Zero(ft.In(i))
|
||||
}
|
||||
}
|
||||
vrets := v.Call(vargs)
|
||||
rets := make([]interface{}, len(vrets))
|
||||
for i, ret := range vrets {
|
||||
rets[i] = ret.Interface()
|
||||
}
|
||||
return rets
|
||||
})
|
||||
return c
|
||||
}
|
||||
|
||||
// Do declares the action to run when the call is matched. The function's
|
||||
// return values are ignored to retain backward compatibility. To use the
|
||||
// return values call DoAndReturn.
|
||||
// It takes an interface{} argument to support n-arity functions.
|
||||
func (c *Call) Do(f interface{}) *Call {
|
||||
// TODO: Check arity and types here, rather than dying badly elsewhere.
|
||||
v := reflect.ValueOf(f)
|
||||
|
||||
c.addAction(func(args []interface{}) []interface{} {
|
||||
vargs := make([]reflect.Value, len(args))
|
||||
ft := v.Type()
|
||||
for i := 0; i < len(args); i++ {
|
||||
if args[i] != nil {
|
||||
vargs[i] = reflect.ValueOf(args[i])
|
||||
} else {
|
||||
// Use the zero value for the arg.
|
||||
vargs[i] = reflect.Zero(ft.In(i))
|
||||
}
|
||||
}
|
||||
v.Call(vargs)
|
||||
return nil
|
||||
})
|
||||
return c
|
||||
}
|
||||
|
||||
// Return declares the values to be returned by the mocked function call.
|
||||
func (c *Call) Return(rets ...interface{}) *Call {
|
||||
c.t.Helper()
|
||||
|
||||
mt := c.methodType
|
||||
if len(rets) != mt.NumOut() {
|
||||
c.t.Fatalf("wrong number of arguments to Return for %T.%v: got %d, want %d [%s]",
|
||||
c.receiver, c.method, len(rets), mt.NumOut(), c.origin)
|
||||
}
|
||||
for i, ret := range rets {
|
||||
if got, want := reflect.TypeOf(ret), mt.Out(i); got == want {
|
||||
// Identical types; nothing to do.
|
||||
} else if got == nil {
|
||||
// Nil needs special handling.
|
||||
switch want.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
// ok
|
||||
default:
|
||||
c.t.Fatalf("argument %d to Return for %T.%v is nil, but %v is not nillable [%s]",
|
||||
i, c.receiver, c.method, want, c.origin)
|
||||
}
|
||||
} else if got.AssignableTo(want) {
|
||||
// Assignable type relation. Make the assignment now so that the generated code
|
||||
// can return the values with a type assertion.
|
||||
v := reflect.New(want).Elem()
|
||||
v.Set(reflect.ValueOf(ret))
|
||||
rets[i] = v.Interface()
|
||||
} else {
|
||||
c.t.Fatalf("wrong type of argument %d to Return for %T.%v: %v is not assignable to %v [%s]",
|
||||
i, c.receiver, c.method, got, want, c.origin)
|
||||
}
|
||||
}
|
||||
|
||||
c.addAction(func([]interface{}) []interface{} {
|
||||
return rets
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Times declares the exact number of times a function call is expected to be executed.
|
||||
func (c *Call) Times(n int) *Call {
|
||||
c.minCalls, c.maxCalls = n, n
|
||||
return c
|
||||
}
|
||||
|
||||
// SetArg declares an action that will set the nth argument's value,
|
||||
// indirected through a pointer. Or, in the case of a slice, SetArg
|
||||
// will copy value's elements into the nth argument.
|
||||
func (c *Call) SetArg(n int, value interface{}) *Call {
|
||||
c.t.Helper()
|
||||
|
||||
mt := c.methodType
|
||||
// TODO: This will break on variadic methods.
|
||||
// We will need to check those at invocation time.
|
||||
if n < 0 || n >= mt.NumIn() {
|
||||
c.t.Fatalf("SetArg(%d, ...) called for a method with %d args [%s]",
|
||||
n, mt.NumIn(), c.origin)
|
||||
}
|
||||
// Permit setting argument through an interface.
|
||||
// In the interface case, we don't (nay, can't) check the type here.
|
||||
at := mt.In(n)
|
||||
switch at.Kind() {
|
||||
case reflect.Ptr:
|
||||
dt := at.Elem()
|
||||
if vt := reflect.TypeOf(value); !vt.AssignableTo(dt) {
|
||||
c.t.Fatalf("SetArg(%d, ...) argument is a %v, not assignable to %v [%s]",
|
||||
n, vt, dt, c.origin)
|
||||
}
|
||||
case reflect.Interface:
|
||||
// nothing to do
|
||||
case reflect.Slice:
|
||||
// nothing to do
|
||||
default:
|
||||
c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface non-slice type %v [%s]",
|
||||
n, at, c.origin)
|
||||
}
|
||||
|
||||
c.addAction(func(args []interface{}) []interface{} {
|
||||
v := reflect.ValueOf(value)
|
||||
switch reflect.TypeOf(args[n]).Kind() {
|
||||
case reflect.Slice:
|
||||
setSlice(args[n], v)
|
||||
default:
|
||||
reflect.ValueOf(args[n]).Elem().Set(v)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return c
|
||||
}
|
||||
|
||||
// isPreReq returns true if other is a direct or indirect prerequisite to c.
|
||||
func (c *Call) isPreReq(other *Call) bool {
|
||||
for _, preReq := range c.preReqs {
|
||||
if other == preReq || preReq.isPreReq(other) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// After declares that the call may only match after preReq has been exhausted.
|
||||
func (c *Call) After(preReq *Call) *Call {
|
||||
c.t.Helper()
|
||||
|
||||
if c == preReq {
|
||||
c.t.Fatalf("A call isn't allowed to be its own prerequisite")
|
||||
}
|
||||
if preReq.isPreReq(c) {
|
||||
c.t.Fatalf("Loop in call order: %v is a prerequisite to %v (possibly indirectly).", c, preReq)
|
||||
}
|
||||
|
||||
c.preReqs = append(c.preReqs, preReq)
|
||||
return c
|
||||
}
|
||||
|
||||
// Returns true if the minimum number of calls have been made.
|
||||
func (c *Call) satisfied() bool {
|
||||
return c.numCalls >= c.minCalls
|
||||
}
|
||||
|
||||
// Returns true iff the maximum number of calls have been made.
|
||||
func (c *Call) exhausted() bool {
|
||||
return c.numCalls >= c.maxCalls
|
||||
}
|
||||
|
||||
func (c *Call) String() string {
|
||||
args := make([]string, len(c.args))
|
||||
for i, arg := range c.args {
|
||||
args[i] = arg.String()
|
||||
}
|
||||
arguments := strings.Join(args, ", ")
|
||||
return fmt.Sprintf("%T.%v(%s) %s", c.receiver, c.method, arguments, c.origin)
|
||||
}
|
||||
|
||||
// Tests if the given call matches the expected call.
|
||||
// If yes, returns nil. If no, returns error with message explaining why it does not match.
|
||||
func (c *Call) matches(args []interface{}) error {
|
||||
if !c.methodType.IsVariadic() {
|
||||
if len(args) != len(c.args) {
|
||||
return fmt.Errorf("Expected call at %s has the wrong number of arguments. Got: %d, want: %d",
|
||||
c.origin, len(args), len(c.args))
|
||||
}
|
||||
|
||||
for i, m := range c.args {
|
||||
if !m.Matches(args[i]) {
|
||||
return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
|
||||
c.origin, strconv.Itoa(i), args[i], m)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if len(c.args) < c.methodType.NumIn()-1 {
|
||||
return fmt.Errorf("Expected call at %s has the wrong number of matchers. Got: %d, want: %d",
|
||||
c.origin, len(c.args), c.methodType.NumIn()-1)
|
||||
}
|
||||
if len(c.args) != c.methodType.NumIn() && len(args) != len(c.args) {
|
||||
return fmt.Errorf("Expected call at %s has the wrong number of arguments. Got: %d, want: %d",
|
||||
c.origin, len(args), len(c.args))
|
||||
}
|
||||
if len(args) < len(c.args)-1 {
|
||||
return fmt.Errorf("Expected call at %s has the wrong number of arguments. Got: %d, want: greater than or equal to %d",
|
||||
c.origin, len(args), len(c.args)-1)
|
||||
}
|
||||
|
||||
for i, m := range c.args {
|
||||
if i < c.methodType.NumIn()-1 {
|
||||
// Non-variadic args
|
||||
if !m.Matches(args[i]) {
|
||||
return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
|
||||
c.origin, strconv.Itoa(i), args[i], m)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// The last arg has a possibility of a variadic argument, so let it branch
|
||||
|
||||
// sample: Foo(a int, b int, c ...int)
|
||||
if i < len(c.args) && i < len(args) {
|
||||
if m.Matches(args[i]) {
|
||||
// Got Foo(a, b, c) want Foo(matcherA, matcherB, gomock.Any())
|
||||
// Got Foo(a, b, c) want Foo(matcherA, matcherB, someSliceMatcher)
|
||||
// Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC)
|
||||
// Got Foo(a, b) want Foo(matcherA, matcherB)
|
||||
// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// The number of actual args don't match the number of matchers,
|
||||
// or the last matcher is a slice and the last arg is not.
|
||||
// If this function still matches it is because the last matcher
|
||||
// matches all the remaining arguments or the lack of any.
|
||||
// Convert the remaining arguments, if any, into a slice of the
|
||||
// expected type.
|
||||
vargsType := c.methodType.In(c.methodType.NumIn() - 1)
|
||||
vargs := reflect.MakeSlice(vargsType, 0, len(args)-i)
|
||||
for _, arg := range args[i:] {
|
||||
vargs = reflect.Append(vargs, reflect.ValueOf(arg))
|
||||
}
|
||||
if m.Matches(vargs.Interface()) {
|
||||
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, gomock.Any())
|
||||
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, someSliceMatcher)
|
||||
// Got Foo(a, b) want Foo(matcherA, matcherB, gomock.Any())
|
||||
// Got Foo(a, b) want Foo(matcherA, matcherB, someEmptySliceMatcher)
|
||||
break
|
||||
}
|
||||
// Wrong number of matchers or not match. Fail.
|
||||
// Got Foo(a, b) want Foo(matcherA, matcherB, matcherC, matcherD)
|
||||
// Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC, matcherD)
|
||||
// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE)
|
||||
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD)
|
||||
// Got Foo(a, b, c) want Foo(matcherA, matcherB)
|
||||
return fmt.Errorf("Expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
|
||||
c.origin, strconv.Itoa(i), args[i:], c.args[i])
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Check that all prerequisite calls have been satisfied.
|
||||
for _, preReqCall := range c.preReqs {
|
||||
if !preReqCall.satisfied() {
|
||||
return fmt.Errorf("Expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v",
|
||||
c.origin, preReqCall, c)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the call is not exhausted.
|
||||
if c.exhausted() {
|
||||
return fmt.Errorf("Expected call at %s has already been called the max number of times.", c.origin)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dropPrereqs tells the expected Call to not re-check prerequisite calls any
|
||||
// longer, and to return its current set.
|
||||
func (c *Call) dropPrereqs() (preReqs []*Call) {
|
||||
preReqs = c.preReqs
|
||||
c.preReqs = nil
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Call) call(args []interface{}) []func([]interface{}) []interface{} {
|
||||
c.numCalls++
|
||||
return c.actions
|
||||
}
|
||||
|
||||
// InOrder declares that the given calls should occur in order.
|
||||
func InOrder(calls ...*Call) {
|
||||
for i := 1; i < len(calls); i++ {
|
||||
calls[i].After(calls[i-1])
|
||||
}
|
||||
}
|
||||
|
||||
func setSlice(arg interface{}, v reflect.Value) {
|
||||
va := reflect.ValueOf(arg)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
va.Index(i).Set(v.Index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Call) addAction(action func([]interface{}) []interface{}) {
|
||||
c.actions = append(c.actions, action)
|
||||
}
|
||||
108
vendor/github.com/golang/mock/gomock/callset.go
generated
vendored
Normal file
108
vendor/github.com/golang/mock/gomock/callset.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
// Copyright 2011 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gomock
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// callSet represents a set of expected calls, indexed by receiver and method
|
||||
// name.
|
||||
type callSet struct {
|
||||
// Calls that are still expected.
|
||||
expected map[callSetKey][]*Call
|
||||
// Calls that have been exhausted.
|
||||
exhausted map[callSetKey][]*Call
|
||||
}
|
||||
|
||||
// callSetKey is the key in the maps in callSet
|
||||
type callSetKey struct {
|
||||
receiver interface{}
|
||||
fname string
|
||||
}
|
||||
|
||||
func newCallSet() *callSet {
|
||||
return &callSet{make(map[callSetKey][]*Call), make(map[callSetKey][]*Call)}
|
||||
}
|
||||
|
||||
// Add adds a new expected call.
|
||||
func (cs callSet) Add(call *Call) {
|
||||
key := callSetKey{call.receiver, call.method}
|
||||
m := cs.expected
|
||||
if call.exhausted() {
|
||||
m = cs.exhausted
|
||||
}
|
||||
m[key] = append(m[key], call)
|
||||
}
|
||||
|
||||
// Remove removes an expected call.
|
||||
func (cs callSet) Remove(call *Call) {
|
||||
key := callSetKey{call.receiver, call.method}
|
||||
calls := cs.expected[key]
|
||||
for i, c := range calls {
|
||||
if c == call {
|
||||
// maintain order for remaining calls
|
||||
cs.expected[key] = append(calls[:i], calls[i+1:]...)
|
||||
cs.exhausted[key] = append(cs.exhausted[key], call)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FindMatch searches for a matching call. Returns error with explanation message if no call matched.
|
||||
func (cs callSet) FindMatch(receiver interface{}, method string, args []interface{}) (*Call, error) {
|
||||
key := callSetKey{receiver, method}
|
||||
|
||||
// Search through the expected calls.
|
||||
expected := cs.expected[key]
|
||||
var callsErrors bytes.Buffer
|
||||
for _, call := range expected {
|
||||
err := call.matches(args)
|
||||
if err != nil {
|
||||
fmt.Fprintf(&callsErrors, "\n%v", err)
|
||||
} else {
|
||||
return call, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If we haven't found a match then search through the exhausted calls so we
|
||||
// get useful error messages.
|
||||
exhausted := cs.exhausted[key]
|
||||
for _, call := range exhausted {
|
||||
if err := call.matches(args); err != nil {
|
||||
fmt.Fprintf(&callsErrors, "\n%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(expected)+len(exhausted) == 0 {
|
||||
fmt.Fprintf(&callsErrors, "there are no expected calls of the method %q for that receiver", method)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf(callsErrors.String())
|
||||
}
|
||||
|
||||
// Failures returns the calls that are not satisfied.
|
||||
func (cs callSet) Failures() []*Call {
|
||||
failures := make([]*Call, 0, len(cs.expected))
|
||||
for _, calls := range cs.expected {
|
||||
for _, call := range calls {
|
||||
if !call.satisfied() {
|
||||
failures = append(failures, call)
|
||||
}
|
||||
}
|
||||
}
|
||||
return failures
|
||||
}
|
||||
235
vendor/github.com/golang/mock/gomock/controller.go
generated
vendored
Normal file
235
vendor/github.com/golang/mock/gomock/controller.go
generated
vendored
Normal file
@@ -0,0 +1,235 @@
|
||||
// Copyright 2010 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// GoMock - a mock framework for Go.
|
||||
//
|
||||
// Standard usage:
|
||||
// (1) Define an interface that you wish to mock.
|
||||
// type MyInterface interface {
|
||||
// SomeMethod(x int64, y string)
|
||||
// }
|
||||
// (2) Use mockgen to generate a mock from the interface.
|
||||
// (3) Use the mock in a test:
|
||||
// func TestMyThing(t *testing.T) {
|
||||
// mockCtrl := gomock.NewController(t)
|
||||
// defer mockCtrl.Finish()
|
||||
//
|
||||
// mockObj := something.NewMockMyInterface(mockCtrl)
|
||||
// mockObj.EXPECT().SomeMethod(4, "blah")
|
||||
// // pass mockObj to a real object and play with it.
|
||||
// }
|
||||
//
|
||||
// By default, expected calls are not enforced to run in any particular order.
|
||||
// Call order dependency can be enforced by use of InOrder and/or Call.After.
|
||||
// Call.After can create more varied call order dependencies, but InOrder is
|
||||
// often more convenient.
|
||||
//
|
||||
// The following examples create equivalent call order dependencies.
|
||||
//
|
||||
// Example of using Call.After to chain expected call order:
|
||||
//
|
||||
// firstCall := mockObj.EXPECT().SomeMethod(1, "first")
|
||||
// secondCall := mockObj.EXPECT().SomeMethod(2, "second").After(firstCall)
|
||||
// mockObj.EXPECT().SomeMethod(3, "third").After(secondCall)
|
||||
//
|
||||
// Example of using InOrder to declare expected call order:
|
||||
//
|
||||
// gomock.InOrder(
|
||||
// mockObj.EXPECT().SomeMethod(1, "first"),
|
||||
// mockObj.EXPECT().SomeMethod(2, "second"),
|
||||
// mockObj.EXPECT().SomeMethod(3, "third"),
|
||||
// )
|
||||
//
|
||||
// TODO:
|
||||
// - Handle different argument/return types (e.g. ..., chan, map, interface).
|
||||
package gomock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A TestReporter is something that can be used to report test failures.
|
||||
// It is satisfied by the standard library's *testing.T.
|
||||
type TestReporter interface {
|
||||
Errorf(format string, args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
}
|
||||
|
||||
// TestHelper is a TestReporter that has the Helper method. It is satisfied
|
||||
// by the standard library's *testing.T.
|
||||
type TestHelper interface {
|
||||
TestReporter
|
||||
Helper()
|
||||
}
|
||||
|
||||
// A Controller represents the top-level control of a mock ecosystem.
|
||||
// It defines the scope and lifetime of mock objects, as well as their expectations.
|
||||
// It is safe to call Controller's methods from multiple goroutines.
|
||||
type Controller struct {
|
||||
// T should only be called within a generated mock. It is not intended to
|
||||
// be used in user code and may be changed in future versions. T is the
|
||||
// TestReporter passed in when creating the Controller via NewController.
|
||||
// If the TestReporter does not implment a TestHelper it will be wrapped
|
||||
// with a nopTestHelper.
|
||||
T TestHelper
|
||||
mu sync.Mutex
|
||||
expectedCalls *callSet
|
||||
finished bool
|
||||
}
|
||||
|
||||
func NewController(t TestReporter) *Controller {
|
||||
h, ok := t.(TestHelper)
|
||||
if !ok {
|
||||
h = nopTestHelper{t}
|
||||
}
|
||||
|
||||
return &Controller{
|
||||
T: h,
|
||||
expectedCalls: newCallSet(),
|
||||
}
|
||||
}
|
||||
|
||||
type cancelReporter struct {
|
||||
TestHelper
|
||||
cancel func()
|
||||
}
|
||||
|
||||
func (r *cancelReporter) Errorf(format string, args ...interface{}) {
|
||||
r.TestHelper.Errorf(format, args...)
|
||||
}
|
||||
func (r *cancelReporter) Fatalf(format string, args ...interface{}) {
|
||||
defer r.cancel()
|
||||
r.TestHelper.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// WithContext returns a new Controller and a Context, which is cancelled on any
|
||||
// fatal failure.
|
||||
func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) {
|
||||
h, ok := t.(TestHelper)
|
||||
if !ok {
|
||||
h = nopTestHelper{t}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return NewController(&cancelReporter{h, cancel}), ctx
|
||||
}
|
||||
|
||||
type nopTestHelper struct {
|
||||
TestReporter
|
||||
}
|
||||
|
||||
func (h nopTestHelper) Helper() {}
|
||||
|
||||
func (ctrl *Controller) RecordCall(receiver interface{}, method string, args ...interface{}) *Call {
|
||||
ctrl.T.Helper()
|
||||
|
||||
recv := reflect.ValueOf(receiver)
|
||||
for i := 0; i < recv.Type().NumMethod(); i++ {
|
||||
if recv.Type().Method(i).Name == method {
|
||||
return ctrl.RecordCallWithMethodType(receiver, method, recv.Method(i).Type(), args...)
|
||||
}
|
||||
}
|
||||
ctrl.T.Fatalf("gomock: failed finding method %s on %T", method, receiver)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func (ctrl *Controller) RecordCallWithMethodType(receiver interface{}, method string, methodType reflect.Type, args ...interface{}) *Call {
|
||||
ctrl.T.Helper()
|
||||
|
||||
call := newCall(ctrl.T, receiver, method, methodType, args...)
|
||||
|
||||
ctrl.mu.Lock()
|
||||
defer ctrl.mu.Unlock()
|
||||
ctrl.expectedCalls.Add(call)
|
||||
|
||||
return call
|
||||
}
|
||||
|
||||
func (ctrl *Controller) Call(receiver interface{}, method string, args ...interface{}) []interface{} {
|
||||
ctrl.T.Helper()
|
||||
|
||||
// Nest this code so we can use defer to make sure the lock is released.
|
||||
actions := func() []func([]interface{}) []interface{} {
|
||||
ctrl.T.Helper()
|
||||
ctrl.mu.Lock()
|
||||
defer ctrl.mu.Unlock()
|
||||
|
||||
expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args)
|
||||
if err != nil {
|
||||
origin := callerInfo(2)
|
||||
ctrl.T.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, args, origin, err)
|
||||
}
|
||||
|
||||
// Two things happen here:
|
||||
// * the matching call no longer needs to check prerequite calls,
|
||||
// * and the prerequite calls are no longer expected, so remove them.
|
||||
preReqCalls := expected.dropPrereqs()
|
||||
for _, preReqCall := range preReqCalls {
|
||||
ctrl.expectedCalls.Remove(preReqCall)
|
||||
}
|
||||
|
||||
actions := expected.call(args)
|
||||
if expected.exhausted() {
|
||||
ctrl.expectedCalls.Remove(expected)
|
||||
}
|
||||
return actions
|
||||
}()
|
||||
|
||||
var rets []interface{}
|
||||
for _, action := range actions {
|
||||
if r := action(args); r != nil {
|
||||
rets = r
|
||||
}
|
||||
}
|
||||
|
||||
return rets
|
||||
}
|
||||
|
||||
func (ctrl *Controller) Finish() {
|
||||
ctrl.T.Helper()
|
||||
|
||||
ctrl.mu.Lock()
|
||||
defer ctrl.mu.Unlock()
|
||||
|
||||
if ctrl.finished {
|
||||
ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.")
|
||||
}
|
||||
ctrl.finished = true
|
||||
|
||||
// If we're currently panicking, probably because this is a deferred call,
|
||||
// pass through the panic.
|
||||
if err := recover(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Check that all remaining expected calls are satisfied.
|
||||
failures := ctrl.expectedCalls.Failures()
|
||||
for _, call := range failures {
|
||||
ctrl.T.Errorf("missing call(s) to %v", call)
|
||||
}
|
||||
if len(failures) != 0 {
|
||||
ctrl.T.Fatalf("aborting test due to missing call(s)")
|
||||
}
|
||||
}
|
||||
|
||||
func callerInfo(skip int) string {
|
||||
if _, file, line, ok := runtime.Caller(skip + 1); ok {
|
||||
return fmt.Sprintf("%s:%d", file, line)
|
||||
}
|
||||
return "unknown file"
|
||||
}
|
||||
122
vendor/github.com/golang/mock/gomock/matchers.go
generated
vendored
Normal file
122
vendor/github.com/golang/mock/gomock/matchers.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
// Copyright 2010 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gomock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// A Matcher is a representation of a class of values.
|
||||
// It is used to represent the valid or expected arguments to a mocked method.
|
||||
type Matcher interface {
|
||||
// Matches returns whether x is a match.
|
||||
Matches(x interface{}) bool
|
||||
|
||||
// String describes what the matcher matches.
|
||||
String() string
|
||||
}
|
||||
|
||||
type anyMatcher struct{}
|
||||
|
||||
func (anyMatcher) Matches(x interface{}) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (anyMatcher) String() string {
|
||||
return "is anything"
|
||||
}
|
||||
|
||||
type eqMatcher struct {
|
||||
x interface{}
|
||||
}
|
||||
|
||||
func (e eqMatcher) Matches(x interface{}) bool {
|
||||
return reflect.DeepEqual(e.x, x)
|
||||
}
|
||||
|
||||
func (e eqMatcher) String() string {
|
||||
return fmt.Sprintf("is equal to %v", e.x)
|
||||
}
|
||||
|
||||
type nilMatcher struct{}
|
||||
|
||||
func (nilMatcher) Matches(x interface{}) bool {
|
||||
if x == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
v := reflect.ValueOf(x)
|
||||
switch v.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map,
|
||||
reflect.Ptr, reflect.Slice:
|
||||
return v.IsNil()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (nilMatcher) String() string {
|
||||
return "is nil"
|
||||
}
|
||||
|
||||
type notMatcher struct {
|
||||
m Matcher
|
||||
}
|
||||
|
||||
func (n notMatcher) Matches(x interface{}) bool {
|
||||
return !n.m.Matches(x)
|
||||
}
|
||||
|
||||
func (n notMatcher) String() string {
|
||||
// TODO: Improve this if we add a NotString method to the Matcher interface.
|
||||
return "not(" + n.m.String() + ")"
|
||||
}
|
||||
|
||||
type assignableToTypeOfMatcher struct {
|
||||
targetType reflect.Type
|
||||
}
|
||||
|
||||
func (m assignableToTypeOfMatcher) Matches(x interface{}) bool {
|
||||
return reflect.TypeOf(x).AssignableTo(m.targetType)
|
||||
}
|
||||
|
||||
func (m assignableToTypeOfMatcher) String() string {
|
||||
return "is assignable to " + m.targetType.Name()
|
||||
}
|
||||
|
||||
// Constructors
|
||||
func Any() Matcher { return anyMatcher{} }
|
||||
func Eq(x interface{}) Matcher { return eqMatcher{x} }
|
||||
func Nil() Matcher { return nilMatcher{} }
|
||||
func Not(x interface{}) Matcher {
|
||||
if m, ok := x.(Matcher); ok {
|
||||
return notMatcher{m}
|
||||
}
|
||||
return notMatcher{Eq(x)}
|
||||
}
|
||||
|
||||
// AssignableToTypeOf is a Matcher that matches if the parameter to the mock
|
||||
// function is assignable to the type of the parameter to this function.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// dbMock.EXPECT().
|
||||
// Insert(gomock.AssignableToTypeOf(&EmployeeRecord{})).
|
||||
// Return(errors.New("DB error"))
|
||||
//
|
||||
func AssignableToTypeOf(x interface{}) Matcher {
|
||||
return assignableToTypeOfMatcher{reflect.TypeOf(x)}
|
||||
}
|
||||
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
253
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
@@ -1,253 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer deep copy and merge.
|
||||
// TODO: RawMessage.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Clone returns a deep copy of a protocol buffer.
|
||||
func Clone(src Message) Message {
|
||||
in := reflect.ValueOf(src)
|
||||
if in.IsNil() {
|
||||
return src
|
||||
}
|
||||
out := reflect.New(in.Type().Elem())
|
||||
dst := out.Interface().(Message)
|
||||
Merge(dst, src)
|
||||
return dst
|
||||
}
|
||||
|
||||
// Merger is the interface representing objects that can merge messages of the same type.
|
||||
type Merger interface {
|
||||
// Merge merges src into this message.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
//
|
||||
// Merge may panic if called with a different argument type than the receiver.
|
||||
Merge(src Message)
|
||||
}
|
||||
|
||||
// generatedMerger is the custom merge method that generated protos will have.
|
||||
// We must add this method since a generate Merge method will conflict with
|
||||
// many existing protos that have a Merge data field already defined.
|
||||
type generatedMerger interface {
|
||||
XXX_Merge(src Message)
|
||||
}
|
||||
|
||||
// Merge merges src into dst.
|
||||
// Required and optional fields that are set in src will be set to that value in dst.
|
||||
// Elements of repeated fields will be appended.
|
||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||
func Merge(dst, src Message) {
|
||||
if m, ok := dst.(Merger); ok {
|
||||
m.Merge(src)
|
||||
return
|
||||
}
|
||||
|
||||
in := reflect.ValueOf(src)
|
||||
out := reflect.ValueOf(dst)
|
||||
if out.IsNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if in.Type() != out.Type() {
|
||||
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
|
||||
}
|
||||
if in.IsNil() {
|
||||
return // Merge from nil src is a noop
|
||||
}
|
||||
if m, ok := dst.(generatedMerger); ok {
|
||||
m.XXX_Merge(src)
|
||||
return
|
||||
}
|
||||
mergeStruct(out.Elem(), in.Elem())
|
||||
}
|
||||
|
||||
func mergeStruct(out, in reflect.Value) {
|
||||
sprop := GetProperties(in.Type())
|
||||
for i := 0; i < in.NumField(); i++ {
|
||||
f := in.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||
}
|
||||
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
uf := in.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return
|
||||
}
|
||||
uin := uf.Bytes()
|
||||
if len(uin) > 0 {
|
||||
out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
|
||||
}
|
||||
}
|
||||
|
||||
// mergeAny performs a merge between two values of the same type.
|
||||
// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
|
||||
// prop is set if this is a struct field (it may be nil).
|
||||
func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
|
||||
if in.Type() == protoMessageType {
|
||||
if !in.IsNil() {
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
|
||||
} else {
|
||||
Merge(out.Interface().(Message), in.Interface().(Message))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
if !viaPtr && isProto3Zero(in) {
|
||||
return
|
||||
}
|
||||
out.Set(in)
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; copy non-nil values.
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
// Allocate destination if it is not set, or set to a different type.
|
||||
// Otherwise we will merge as normal.
|
||||
if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
|
||||
out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), false, nil)
|
||||
case reflect.Map:
|
||||
if in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeMap(in.Type()))
|
||||
}
|
||||
// For maps with value types of *T or []byte we need to deep copy each value.
|
||||
elemKind := in.Type().Elem().Kind()
|
||||
for _, key := range in.MapKeys() {
|
||||
var val reflect.Value
|
||||
switch elemKind {
|
||||
case reflect.Ptr:
|
||||
val = reflect.New(in.Type().Elem().Elem())
|
||||
mergeAny(val, in.MapIndex(key), false, nil)
|
||||
case reflect.Slice:
|
||||
val = in.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
default:
|
||||
val = in.MapIndex(key)
|
||||
}
|
||||
out.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.New(in.Elem().Type()))
|
||||
}
|
||||
mergeAny(out.Elem(), in.Elem(), true, nil)
|
||||
case reflect.Slice:
|
||||
if in.IsNil() {
|
||||
return
|
||||
}
|
||||
if in.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// []byte is a scalar bytes field, not a repeated field.
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value, and should not
|
||||
// be merged.
|
||||
if prop != nil && prop.proto3 && in.Len() == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Make a deep copy.
|
||||
// Append to []byte{} instead of []byte(nil) so that we never end up
|
||||
// with a nil result.
|
||||
out.SetBytes(append([]byte{}, in.Bytes()...))
|
||||
return
|
||||
}
|
||||
n := in.Len()
|
||||
if out.IsNil() {
|
||||
out.Set(reflect.MakeSlice(in.Type(), 0, n))
|
||||
}
|
||||
switch in.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
|
||||
reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
out.Set(reflect.AppendSlice(out, in))
|
||||
default:
|
||||
for i := 0; i < n; i++ {
|
||||
x := reflect.Indirect(reflect.New(in.Type().Elem()))
|
||||
mergeAny(x, in.Index(i), false, nil)
|
||||
out.Set(reflect.Append(out, x))
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
mergeStruct(out, in)
|
||||
default:
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to copy %v", in)
|
||||
}
|
||||
}
|
||||
|
||||
func mergeExtension(out, in map[int32]Extension) {
|
||||
for extNum, eIn := range in {
|
||||
eOut := Extension{desc: eIn.desc}
|
||||
if eIn.value != nil {
|
||||
v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
|
||||
mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
|
||||
eOut.value = v.Interface()
|
||||
}
|
||||
if eIn.enc != nil {
|
||||
eOut.enc = make([]byte, len(eIn.enc))
|
||||
copy(eOut.enc, eIn.enc)
|
||||
}
|
||||
|
||||
out[extNum] = eOut
|
||||
}
|
||||
}
|
||||
427
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
427
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
@@ -1,427 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for decoding protocol buffer data to construct in-memory representations.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// errOverflow is returned when an integer is too large to be represented.
|
||||
var errOverflow = errors.New("proto: integer overflow")
|
||||
|
||||
// ErrInternalBadWireType is returned by generated code when an incorrect
|
||||
// wire type is encountered. It does not get returned to user code.
|
||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||
// It returns the integer and the number of bytes consumed, or
|
||||
// zero if there is not enough.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if n >= len(buf) {
|
||||
return 0, 0
|
||||
}
|
||||
b := uint64(buf[n])
|
||||
n++
|
||||
x |= (b & 0x7F) << shift
|
||||
if (b & 0x80) == 0 {
|
||||
return x, n
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
return 0, 0
|
||||
}
|
||||
|
||||
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||
i := p.index
|
||||
l := len(p.buf)
|
||||
|
||||
for shift := uint(0); shift < 64; shift += 7 {
|
||||
if i >= l {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
b := p.buf[i]
|
||||
i++
|
||||
x |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
p.index = i
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// The number is too large to represent in a 64-bit value.
|
||||
err = errOverflow
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||
i := p.index
|
||||
buf := p.buf
|
||||
|
||||
if i >= len(buf) {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
} else if buf[i] < 0x80 {
|
||||
p.index++
|
||||
return uint64(buf[i]), nil
|
||||
} else if len(buf)-i < 10 {
|
||||
return p.decodeVarintSlow()
|
||||
}
|
||||
|
||||
var b uint64
|
||||
// we already checked the first byte
|
||||
x = uint64(buf[i]) - 0x80
|
||||
i++
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 7
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 7
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 14
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 14
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 21
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 21
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 28
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 28
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 35
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 35
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 42
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 42
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 49
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 49
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 56
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
x -= 0x80 << 56
|
||||
|
||||
b = uint64(buf[i])
|
||||
i++
|
||||
x += b << 63
|
||||
if b&0x80 == 0 {
|
||||
goto done
|
||||
}
|
||||
|
||||
return 0, errOverflow
|
||||
|
||||
done:
|
||||
p.index = i
|
||||
return x, nil
|
||||
}
|
||||
|
||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed64() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 8
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-8])
|
||||
x |= uint64(p.buf[i-7]) << 8
|
||||
x |= uint64(p.buf[i-6]) << 16
|
||||
x |= uint64(p.buf[i-5]) << 24
|
||||
x |= uint64(p.buf[i-4]) << 32
|
||||
x |= uint64(p.buf[i-3]) << 40
|
||||
x |= uint64(p.buf[i-2]) << 48
|
||||
x |= uint64(p.buf[i-1]) << 56
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeFixed32 reads a 32-bit integer from the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) DecodeFixed32() (x uint64, err error) {
|
||||
// x, err already 0
|
||||
i := p.index + 4
|
||||
if i < 0 || i > len(p.buf) {
|
||||
err = io.ErrUnexpectedEOF
|
||||
return
|
||||
}
|
||||
p.index = i
|
||||
|
||||
x = uint64(p.buf[i-4])
|
||||
x |= uint64(p.buf[i-3]) << 8
|
||||
x |= uint64(p.buf[i-2]) << 16
|
||||
x |= uint64(p.buf[i-1]) << 24
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
|
||||
// from the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||
x, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
|
||||
n, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nb := int(n)
|
||||
if nb < 0 {
|
||||
return nil, fmt.Errorf("proto: bad byte length %d", nb)
|
||||
}
|
||||
end := p.index + nb
|
||||
if end < p.index || end > len(p.buf) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if !alloc {
|
||||
// todo: check if can get more uses of alloc=false
|
||||
buf = p.buf[p.index:end]
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
buf = make([]byte, nb)
|
||||
copy(buf, p.buf[p.index:])
|
||||
p.index += nb
|
||||
return
|
||||
}
|
||||
|
||||
// DecodeStringBytes reads an encoded string from the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||
buf, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// Unmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The argument points to data that may be
|
||||
// overwritten, so implementations should not keep references to the
|
||||
// buffer.
|
||||
// Unmarshal implementations should not clear the receiver.
|
||||
// Any unmarshaled data should be merged into the receiver.
|
||||
// Callers of Unmarshal that do not want to retain existing data
|
||||
// should Reset the receiver before calling Unmarshal.
|
||||
type Unmarshaler interface {
|
||||
Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// newUnmarshaler is the interface representing objects that can
|
||||
// unmarshal themselves. The semantics are identical to Unmarshaler.
|
||||
//
|
||||
// This exists to support protoc-gen-go generated messages.
|
||||
// The proto package will stop type-asserting to this interface in the future.
|
||||
//
|
||||
// DO NOT DEPEND ON THIS.
|
||||
type newUnmarshaler interface {
|
||||
XXX_Unmarshal([]byte) error
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||
// decoded result in pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// Unmarshal resets pb before starting to unmarshal, so any
|
||||
// existing data in pb is always removed. Use UnmarshalMerge
|
||||
// to preserve and append to existing data.
|
||||
func Unmarshal(buf []byte, pb Message) error {
|
||||
pb.Reset()
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||
// writes the decoded result to pb. If the struct underlying pb does not match
|
||||
// the data in buf, the results can be unpredictable.
|
||||
//
|
||||
// UnmarshalMerge merges into existing data in pb.
|
||||
// Most code should use Unmarshal instead.
|
||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
return u.XXX_Unmarshal(buf)
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
return u.Unmarshal(buf)
|
||||
}
|
||||
return NewBuffer(buf).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeMessage reads a count-delimited message from the Buffer.
|
||||
func (p *Buffer) DecodeMessage(pb Message) error {
|
||||
enc, err := p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return NewBuffer(enc).Unmarshal(pb)
|
||||
}
|
||||
|
||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||
// StartGroup tag is already consumed. This function consumes
|
||||
// EndGroup tag.
|
||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||
b := p.buf[p.index:]
|
||||
x, y := findEndGroup(b)
|
||||
if x < 0 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
err := Unmarshal(b[:x], pb)
|
||||
p.index += y
|
||||
return err
|
||||
}
|
||||
|
||||
// Unmarshal parses the protocol buffer representation in the
|
||||
// Buffer and places the decoded result in pb. If the struct
|
||||
// underlying pb does not match the data in the buffer, the results can be
|
||||
// unpredictable.
|
||||
//
|
||||
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||
func (p *Buffer) Unmarshal(pb Message) error {
|
||||
// If the object can unmarshal itself, let it.
|
||||
if u, ok := pb.(newUnmarshaler); ok {
|
||||
err := u.XXX_Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
if u, ok := pb.(Unmarshaler); ok {
|
||||
// NOTE: The history of proto have unfortunately been inconsistent
|
||||
// whether Unmarshaler should or should not implicitly clear itself.
|
||||
// Some implementations do, most do not.
|
||||
// Thus, calling this here may or may not do what people want.
|
||||
//
|
||||
// See https://github.com/golang/protobuf/issues/424
|
||||
err := u.Unmarshal(p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// Slow workaround for messages that aren't Unmarshalers.
|
||||
// This includes some hand-coded .pb.go files and
|
||||
// bootstrap protos.
|
||||
// TODO: fix all of those and then add Unmarshal to
|
||||
// the Message interface. Then:
|
||||
// The cast above and code below can be deleted.
|
||||
// The old unmarshaler can be deleted.
|
||||
// Clients can call Unmarshal directly (can already do that, actually).
|
||||
var info InternalMessageInfo
|
||||
err := info.Unmarshal(pb, p.buf[p.index:])
|
||||
p.index = len(p.buf)
|
||||
return err
|
||||
}
|
||||
63
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
63
vendor/github.com/golang/protobuf/proto/deprecated.go
generated
vendored
@@ -1,63 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import "errors"
|
||||
|
||||
// Deprecated: do not use.
|
||||
type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
|
||||
|
||||
// Deprecated: do not use.
|
||||
func GetStats() Stats { return Stats{} }
|
||||
|
||||
// Deprecated: do not use.
|
||||
func MarshalMessageSet(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func UnmarshalMessageSet([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func MarshalMessageSetJSON(interface{}) ([]byte, error) {
|
||||
return nil, errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func UnmarshalMessageSetJSON([]byte, interface{}) error {
|
||||
return errors.New("proto: not implemented")
|
||||
}
|
||||
|
||||
// Deprecated: do not use.
|
||||
func RegisterMessageSetType(Message, int32, string) {}
|
||||
350
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
350
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
@@ -1,350 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type generatedDiscarder interface {
|
||||
XXX_DiscardUnknown()
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields from this message
|
||||
// and all embedded messages.
|
||||
//
|
||||
// When unmarshaling a message with unrecognized fields, the tags and values
|
||||
// of such fields are preserved in the Message. This allows a later call to
|
||||
// marshal to be able to produce a message that continues to have those
|
||||
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||
// explicitly clear the unknown fields after unmarshaling.
|
||||
//
|
||||
// For proto2 messages, the unknown fields of message extensions are only
|
||||
// discarded from messages that have been accessed via GetExtension.
|
||||
func DiscardUnknown(m Message) {
|
||||
if m, ok := m.(generatedDiscarder); ok {
|
||||
m.XXX_DiscardUnknown()
|
||||
return
|
||||
}
|
||||
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
|
||||
// but the master branch has no implementation for InternalMessageInfo,
|
||||
// so it would be more work to replicate that approach.
|
||||
discardLegacy(m)
|
||||
}
|
||||
|
||||
// DiscardUnknown recursively discards all unknown fields.
|
||||
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
|
||||
di := atomicLoadDiscardInfo(&a.discard)
|
||||
if di == nil {
|
||||
di = getDiscardInfo(reflect.TypeOf(m).Elem())
|
||||
atomicStoreDiscardInfo(&a.discard, di)
|
||||
}
|
||||
di.discard(toPointer(&m))
|
||||
}
|
||||
|
||||
type discardInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []discardFieldInfo
|
||||
unrecognized field
|
||||
}
|
||||
|
||||
type discardFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
discard func(src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
discardInfoMap = map[reflect.Type]*discardInfo{}
|
||||
discardInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getDiscardInfo(t reflect.Type) *discardInfo {
|
||||
discardInfoLock.Lock()
|
||||
defer discardInfoLock.Unlock()
|
||||
di := discardInfoMap[t]
|
||||
if di == nil {
|
||||
di = &discardInfo{typ: t}
|
||||
discardInfoMap[t] = di
|
||||
}
|
||||
return di
|
||||
}
|
||||
|
||||
func (di *discardInfo) discard(src pointer) {
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&di.initialized) == 0 {
|
||||
di.computeDiscardInfo()
|
||||
}
|
||||
|
||||
for _, fi := range di.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
fi.discard(sfp)
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
|
||||
// Ignore lock since DiscardUnknown is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
DiscardUnknown(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if di.unrecognized.IsValid() {
|
||||
*src.offset(di.unrecognized).toBytes() = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (di *discardInfo) computeDiscardInfo() {
|
||||
di.lock.Lock()
|
||||
defer di.lock.Unlock()
|
||||
if di.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := di.typ
|
||||
n := t.NumField()
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
dfi := discardFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
di := getDiscardInfo(tf)
|
||||
dfi.discard = func(src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
di.discard(sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
|
||||
dfi.discard = func(src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
DiscardUnknown(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
dfi.discard = func(pointer) {} // Noop
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
dfi.discard = func(src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
DiscardUnknown(sv.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
continue
|
||||
}
|
||||
di.fields = append(di.fields, dfi)
|
||||
}
|
||||
|
||||
di.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
di.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&di.initialized, 1)
|
||||
}
|
||||
|
||||
func discardLegacy(m Message) {
|
||||
v := reflect.ValueOf(m)
|
||||
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||
return
|
||||
}
|
||||
v = v.Elem()
|
||||
if v.Kind() != reflect.Struct {
|
||||
return
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
vf := v.Field(i)
|
||||
tf := f.Type
|
||||
|
||||
// Unwrap tf to get its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
discardLegacy(vf.Index(j).Interface().(Message))
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
|
||||
default: // E.g., map[K]V
|
||||
tv := vf.Type().Elem()
|
||||
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
|
||||
for _, key := range vf.MapKeys() {
|
||||
val := vf.MapIndex(key)
|
||||
discardLegacy(val.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
|
||||
default: // E.g., test_proto.isCommunique_Union interface
|
||||
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
|
||||
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
|
||||
if !vf.IsNil() {
|
||||
vf = vf.Elem() // E.g., test_proto.Communique_Msg
|
||||
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
|
||||
if vf.Kind() == reflect.Ptr {
|
||||
discardLegacy(vf.Interface().(Message))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
|
||||
if vf.Type() != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
vf.Set(reflect.ValueOf([]byte(nil)))
|
||||
}
|
||||
|
||||
// For proto2 messages, only discard unknown fields in message extensions
|
||||
// that have been accessed via GetExtension.
|
||||
if em, err := extendable(m); err == nil {
|
||||
// Ignore lock since discardLegacy is not concurrency safe.
|
||||
emm, _ := em.extensionsRead()
|
||||
for _, mx := range emm {
|
||||
if m, ok := mx.value.(Message); ok {
|
||||
discardLegacy(m)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
203
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
203
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
@@ -1,203 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
// errRepeatedHasNil is the error returned if Marshal is called with
|
||||
// a struct with a repeated field containing a nil element.
|
||||
errRepeatedHasNil = errors.New("proto: repeated field has nil element")
|
||||
|
||||
// errOneofHasNil is the error returned if Marshal is called with
|
||||
// a struct with a oneof field containing a nil element.
|
||||
errOneofHasNil = errors.New("proto: oneof field has nil value")
|
||||
|
||||
// ErrNil is the error returned if Marshal is called with nil.
|
||||
ErrNil = errors.New("proto: Marshal called with nil")
|
||||
|
||||
// ErrTooLarge is the error returned if Marshal is called with a
|
||||
// message that encodes to >2GB.
|
||||
ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
|
||||
)
|
||||
|
||||
// The fundamental encoders that put bytes on the wire.
|
||||
// Those that take integer types all accept uint64 and are
|
||||
// therefore of type valueEncoder.
|
||||
|
||||
const maxVarintBytes = 10 // maximum length of a varint
|
||||
|
||||
// EncodeVarint returns the varint encoding of x.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
// Not used by the package itself, but helpful to clients
|
||||
// wishing to use the same encoding.
|
||||
func EncodeVarint(x uint64) []byte {
|
||||
var buf [maxVarintBytes]byte
|
||||
var n int
|
||||
for n = 0; x > 127; n++ {
|
||||
buf[n] = 0x80 | uint8(x&0x7F)
|
||||
x >>= 7
|
||||
}
|
||||
buf[n] = uint8(x)
|
||||
n++
|
||||
return buf[0:n]
|
||||
}
|
||||
|
||||
// EncodeVarint writes a varint-encoded integer to the Buffer.
|
||||
// This is the format for the
|
||||
// int32, int64, uint32, uint64, bool, and enum
|
||||
// protocol buffer types.
|
||||
func (p *Buffer) EncodeVarint(x uint64) error {
|
||||
for x >= 1<<7 {
|
||||
p.buf = append(p.buf, uint8(x&0x7f|0x80))
|
||||
x >>= 7
|
||||
}
|
||||
p.buf = append(p.buf, uint8(x))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SizeVarint returns the varint encoding size of an integer.
|
||||
func SizeVarint(x uint64) int {
|
||||
switch {
|
||||
case x < 1<<7:
|
||||
return 1
|
||||
case x < 1<<14:
|
||||
return 2
|
||||
case x < 1<<21:
|
||||
return 3
|
||||
case x < 1<<28:
|
||||
return 4
|
||||
case x < 1<<35:
|
||||
return 5
|
||||
case x < 1<<42:
|
||||
return 6
|
||||
case x < 1<<49:
|
||||
return 7
|
||||
case x < 1<<56:
|
||||
return 8
|
||||
case x < 1<<63:
|
||||
return 9
|
||||
}
|
||||
return 10
|
||||
}
|
||||
|
||||
// EncodeFixed64 writes a 64-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed64, sfixed64, and double protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed64(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24),
|
||||
uint8(x>>32),
|
||||
uint8(x>>40),
|
||||
uint8(x>>48),
|
||||
uint8(x>>56))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeFixed32 writes a 32-bit integer to the Buffer.
|
||||
// This is the format for the
|
||||
// fixed32, sfixed32, and float protocol buffer types.
|
||||
func (p *Buffer) EncodeFixed32(x uint64) error {
|
||||
p.buf = append(p.buf,
|
||||
uint8(x),
|
||||
uint8(x>>8),
|
||||
uint8(x>>16),
|
||||
uint8(x>>24))
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag64(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
|
||||
// to the Buffer.
|
||||
// This is the format used for the sint32 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag32(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
|
||||
}
|
||||
|
||||
// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
|
||||
// This is the format used for the bytes protocol buffer
|
||||
// type and for embedded messages.
|
||||
func (p *Buffer) EncodeRawBytes(b []byte) error {
|
||||
p.EncodeVarint(uint64(len(b)))
|
||||
p.buf = append(p.buf, b...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeStringBytes writes an encoded string to the Buffer.
|
||||
// This is the format used for the proto2 string type.
|
||||
func (p *Buffer) EncodeStringBytes(s string) error {
|
||||
p.EncodeVarint(uint64(len(s)))
|
||||
p.buf = append(p.buf, s...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Marshaler is the interface representing objects that can marshal themselves.
|
||||
type Marshaler interface {
|
||||
Marshal() ([]byte, error)
|
||||
}
|
||||
|
||||
// EncodeMessage writes the protocol buffer to the Buffer,
|
||||
// prefixed by a varint-encoded length.
|
||||
func (p *Buffer) EncodeMessage(pb Message) error {
|
||||
siz := Size(pb)
|
||||
p.EncodeVarint(uint64(siz))
|
||||
return p.Marshal(pb)
|
||||
}
|
||||
|
||||
// All protocol buffer fields are nillable, but be careful.
|
||||
func isNil(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
||||
301
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
301
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
@@ -1,301 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Protocol buffer comparison.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
Equal returns true iff protocol buffers a and b are equal.
|
||||
The arguments must both be pointers to protocol buffer structs.
|
||||
|
||||
Equality is defined in this way:
|
||||
- Two messages are equal iff they are the same type,
|
||||
corresponding fields are equal, unknown field sets
|
||||
are equal, and extensions sets are equal.
|
||||
- Two set scalar fields are equal iff their values are equal.
|
||||
If the fields are of a floating-point type, remember that
|
||||
NaN != x for all x, including NaN. If the message is defined
|
||||
in a proto3 .proto file, fields are not "set"; specifically,
|
||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||
- Two repeated fields are equal iff their lengths are the same,
|
||||
and their corresponding elements are equal. Note a "bytes" field,
|
||||
although represented by []byte, is not a repeated field and the
|
||||
rule for the scalar fields described above applies.
|
||||
- Two unset fields are equal.
|
||||
- Two unknown field sets are equal if their current
|
||||
encoded state is equal.
|
||||
- Two extension sets are equal iff they have corresponding
|
||||
elements that are pairwise equal.
|
||||
- Two map fields are equal iff their lengths are the same,
|
||||
and they contain the same set of elements. Zero-length map
|
||||
fields are equal.
|
||||
- Every other combination of things are not equal.
|
||||
|
||||
The return value is undefined if a and b are not protocol buffers.
|
||||
*/
|
||||
func Equal(a, b Message) bool {
|
||||
if a == nil || b == nil {
|
||||
return a == b
|
||||
}
|
||||
v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
|
||||
if v1.Type() != v2.Type() {
|
||||
return false
|
||||
}
|
||||
if v1.Kind() == reflect.Ptr {
|
||||
if v1.IsNil() {
|
||||
return v2.IsNil()
|
||||
}
|
||||
if v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
v1, v2 = v1.Elem(), v2.Elem()
|
||||
}
|
||||
if v1.Kind() != reflect.Struct {
|
||||
return false
|
||||
}
|
||||
return equalStruct(v1, v2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
func equalStruct(v1, v2 reflect.Value) bool {
|
||||
sprop := GetProperties(v1.Type())
|
||||
for i := 0; i < v1.NumField(); i++ {
|
||||
f := v1.Type().Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
f1, f2 := v1.Field(i), v2.Field(i)
|
||||
if f.Type.Kind() == reflect.Ptr {
|
||||
if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
|
||||
// both unset
|
||||
continue
|
||||
} else if n1 != n2 {
|
||||
// set/unset mismatch
|
||||
return false
|
||||
}
|
||||
f1, f2 = f1.Elem(), f2.Elem()
|
||||
}
|
||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_InternalExtensions")
|
||||
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||
em2 := v2.FieldByName("XXX_extensions")
|
||||
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
uf := v1.FieldByName("XXX_unrecognized")
|
||||
if !uf.IsValid() {
|
||||
return true
|
||||
}
|
||||
|
||||
u1 := uf.Bytes()
|
||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||
return bytes.Equal(u1, u2)
|
||||
}
|
||||
|
||||
// v1 and v2 are known to have the same type.
|
||||
// prop may be nil.
|
||||
func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
||||
if v1.Type() == protoMessageType {
|
||||
m1, _ := v1.Interface().(Message)
|
||||
m2, _ := v2.Interface().(Message)
|
||||
return Equal(m1, m2)
|
||||
}
|
||||
switch v1.Kind() {
|
||||
case reflect.Bool:
|
||||
return v1.Bool() == v2.Bool()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v1.Float() == v2.Float()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v1.Int() == v2.Int()
|
||||
case reflect.Interface:
|
||||
// Probably a oneof field; compare the inner values.
|
||||
n1, n2 := v1.IsNil(), v2.IsNil()
|
||||
if n1 || n2 {
|
||||
return n1 == n2
|
||||
}
|
||||
e1, e2 := v1.Elem(), v2.Elem()
|
||||
if e1.Type() != e2.Type() {
|
||||
return false
|
||||
}
|
||||
return equalAny(e1, e2, nil)
|
||||
case reflect.Map:
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for _, key := range v1.MapKeys() {
|
||||
val2 := v2.MapIndex(key)
|
||||
if !val2.IsValid() {
|
||||
// This key was not found in the second map.
|
||||
return false
|
||||
}
|
||||
if !equalAny(v1.MapIndex(key), val2, nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Ptr:
|
||||
// Maps may have nil values in them, so check for nil.
|
||||
if v1.IsNil() && v2.IsNil() {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||
case reflect.Slice:
|
||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||
// short circuit: []byte
|
||||
|
||||
// Edge case: if this is in a proto3 message, a zero length
|
||||
// bytes field is considered the zero value.
|
||||
if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
|
||||
return true
|
||||
}
|
||||
if v1.IsNil() != v2.IsNil() {
|
||||
return false
|
||||
}
|
||||
return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
|
||||
}
|
||||
|
||||
if v1.Len() != v2.Len() {
|
||||
return false
|
||||
}
|
||||
for i := 0; i < v1.Len(); i++ {
|
||||
if !equalAny(v1.Index(i), v2.Index(i), prop) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.String:
|
||||
return v1.Interface().(string) == v2.Interface().(string)
|
||||
case reflect.Struct:
|
||||
return equalStruct(v1, v2)
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v1.Uint() == v2.Uint()
|
||||
}
|
||||
|
||||
// unknown type, so not a protocol buffer
|
||||
log.Printf("proto: don't know how to compare %v", v1)
|
||||
return false
|
||||
}
|
||||
|
||||
// base is the struct type that the extensions are based on.
|
||||
// x1 and x2 are InternalExtensions.
|
||||
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
||||
em1, _ := x1.extensionsRead()
|
||||
em2, _ := x2.extensionsRead()
|
||||
return equalExtMap(base, em1, em2)
|
||||
}
|
||||
|
||||
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||
if len(em1) != len(em2) {
|
||||
return false
|
||||
}
|
||||
|
||||
for extNum, e1 := range em1 {
|
||||
e2, ok := em2[extNum]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
m1 := extensionAsLegacyType(e1.value)
|
||||
m2 := extensionAsLegacyType(e2.value)
|
||||
|
||||
if m1 == nil && m2 == nil {
|
||||
// Both have only encoded form.
|
||||
if bytes.Equal(e1.enc, e2.enc) {
|
||||
continue
|
||||
}
|
||||
// The bytes are different, but the extensions might still be
|
||||
// equal. We need to decode them to compare.
|
||||
}
|
||||
|
||||
if m1 != nil && m2 != nil {
|
||||
// Both are unencoded.
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// At least one is encoded. To do a semantically correct comparison
|
||||
// we need to unmarshal them first.
|
||||
var desc *ExtensionDesc
|
||||
if m := extensionMaps[base]; m != nil {
|
||||
desc = m[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// If both have only encoded form and the bytes are the same,
|
||||
// it is handled above. We get here when the bytes are different.
|
||||
// We don't know how to decode it, so just compare them as byte
|
||||
// slices.
|
||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||
return false
|
||||
}
|
||||
var err error
|
||||
if m1 == nil {
|
||||
m1, err = decodeExtension(e1.enc, desc)
|
||||
}
|
||||
if m2 == nil && err == nil {
|
||||
m2, err = decodeExtension(e2.enc, desc)
|
||||
}
|
||||
if err != nil {
|
||||
// The encoded form is invalid.
|
||||
log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
|
||||
return false
|
||||
}
|
||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
607
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
607
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
@@ -1,607 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Types and routines for supporting protocol buffer extensions.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
|
||||
var ErrMissingExtension = errors.New("proto: missing extension")
|
||||
|
||||
// ExtensionRange represents a range of message extensions for a protocol buffer.
|
||||
// Used in code generated by the protocol compiler.
|
||||
type ExtensionRange struct {
|
||||
Start, End int32 // both inclusive
|
||||
}
|
||||
|
||||
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
||||
// proto compiler that may be extended.
|
||||
type extendableProto interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
extensionsWrite() map[int32]Extension
|
||||
extensionsRead() (map[int32]Extension, sync.Locker)
|
||||
}
|
||||
|
||||
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
||||
// version of the proto compiler that may be extended.
|
||||
type extendableProtoV1 interface {
|
||||
Message
|
||||
ExtensionRangeArray() []ExtensionRange
|
||||
ExtensionMap() map[int32]Extension
|
||||
}
|
||||
|
||||
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
||||
type extensionAdapter struct {
|
||||
extendableProtoV1
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
||||
return e.ExtensionMap()
|
||||
}
|
||||
|
||||
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
return e.ExtensionMap(), notLocker{}
|
||||
}
|
||||
|
||||
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
||||
type notLocker struct{}
|
||||
|
||||
func (n notLocker) Lock() {}
|
||||
func (n notLocker) Unlock() {}
|
||||
|
||||
// extendable returns the extendableProto interface for the given generated proto message.
|
||||
// If the proto message has the old extension format, it returns a wrapper that implements
|
||||
// the extendableProto interface.
|
||||
func extendable(p interface{}) (extendableProto, error) {
|
||||
switch p := p.(type) {
|
||||
case extendableProto:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return p, nil
|
||||
case extendableProtoV1:
|
||||
if isNilPtr(p) {
|
||||
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||
}
|
||||
return extensionAdapter{p}, nil
|
||||
}
|
||||
// Don't allocate a specific error containing %T:
|
||||
// this is the hot path for Clone and MarshalText.
|
||||
return nil, errNotExtendable
|
||||
}
|
||||
|
||||
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
||||
|
||||
func isNilPtr(x interface{}) bool {
|
||||
v := reflect.ValueOf(x)
|
||||
return v.Kind() == reflect.Ptr && v.IsNil()
|
||||
}
|
||||
|
||||
// XXX_InternalExtensions is an internal representation of proto extensions.
|
||||
//
|
||||
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
||||
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
||||
//
|
||||
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
||||
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
||||
type XXX_InternalExtensions struct {
|
||||
// The struct must be indirect so that if a user inadvertently copies a
|
||||
// generated message and its embedded XXX_InternalExtensions, they
|
||||
// avoid the mayhem of a copied mutex.
|
||||
//
|
||||
// The mutex serializes all logically read-only operations to p.extensionMap.
|
||||
// It is up to the client to ensure that write operations to p.extensionMap are
|
||||
// mutually exclusive with other accesses.
|
||||
p *struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
}
|
||||
}
|
||||
|
||||
// extensionsWrite returns the extension map, creating it on first use.
|
||||
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
||||
if e.p == nil {
|
||||
e.p = new(struct {
|
||||
mu sync.Mutex
|
||||
extensionMap map[int32]Extension
|
||||
})
|
||||
e.p.extensionMap = make(map[int32]Extension)
|
||||
}
|
||||
return e.p.extensionMap
|
||||
}
|
||||
|
||||
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
||||
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
||||
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||
if e.p == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return e.p.extensionMap, &e.p.mu
|
||||
}
|
||||
|
||||
// ExtensionDesc represents an extension specification.
|
||||
// Used in generated code from the protocol compiler.
|
||||
type ExtensionDesc struct {
|
||||
ExtendedType Message // nil pointer to the type that is being extended
|
||||
ExtensionType interface{} // nil pointer to the extension type
|
||||
Field int32 // field number
|
||||
Name string // fully-qualified name of extension, for text formatting
|
||||
Tag string // protobuf tag style
|
||||
Filename string // name of the file in which the extension is defined
|
||||
}
|
||||
|
||||
func (ed *ExtensionDesc) repeated() bool {
|
||||
t := reflect.TypeOf(ed.ExtensionType)
|
||||
return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
|
||||
}
|
||||
|
||||
// Extension represents an extension in a message.
|
||||
type Extension struct {
|
||||
// When an extension is stored in a message using SetExtension
|
||||
// only desc and value are set. When the message is marshaled
|
||||
// enc will be set to the encoded form of the message.
|
||||
//
|
||||
// When a message is unmarshaled and contains extensions, each
|
||||
// extension will have only enc set. When such an extension is
|
||||
// accessed using GetExtension (or GetExtensions) desc and value
|
||||
// will be set.
|
||||
desc *ExtensionDesc
|
||||
|
||||
// value is a concrete value for the extension field. Let the type of
|
||||
// desc.ExtensionType be the "API type" and the type of Extension.value
|
||||
// be the "storage type". The API type and storage type are the same except:
|
||||
// * For scalars (except []byte), the API type uses *T,
|
||||
// while the storage type uses T.
|
||||
// * For repeated fields, the API type uses []T, while the storage type
|
||||
// uses *[]T.
|
||||
//
|
||||
// The reason for the divergence is so that the storage type more naturally
|
||||
// matches what is expected of when retrieving the values through the
|
||||
// protobuf reflection APIs.
|
||||
//
|
||||
// The value may only be populated if desc is also populated.
|
||||
value interface{}
|
||||
|
||||
// enc is the raw bytes for the extension field.
|
||||
enc []byte
|
||||
}
|
||||
|
||||
// SetRawExtension is for testing only.
|
||||
func SetRawExtension(base Message, id int32, b []byte) {
|
||||
epb, err := extendable(base)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[id] = Extension{enc: b}
|
||||
}
|
||||
|
||||
// isExtensionField returns true iff the given field number is in an extension range.
|
||||
func isExtensionField(pb extendableProto, field int32) bool {
|
||||
for _, er := range pb.ExtensionRangeArray() {
|
||||
if er.Start <= field && field <= er.End {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||
var pbi interface{} = pb
|
||||
// Check the extended type.
|
||||
if ea, ok := pbi.(extensionAdapter); ok {
|
||||
pbi = ea.extendableProtoV1
|
||||
}
|
||||
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
|
||||
}
|
||||
// Check the range.
|
||||
if !isExtensionField(pb, extension.Field) {
|
||||
return errors.New("proto: bad extension number; not in declared ranges")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// extPropKey is sufficient to uniquely identify an extension.
|
||||
type extPropKey struct {
|
||||
base reflect.Type
|
||||
field int32
|
||||
}
|
||||
|
||||
var extProp = struct {
|
||||
sync.RWMutex
|
||||
m map[extPropKey]*Properties
|
||||
}{
|
||||
m: make(map[extPropKey]*Properties),
|
||||
}
|
||||
|
||||
func extensionProperties(ed *ExtensionDesc) *Properties {
|
||||
key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
|
||||
|
||||
extProp.RLock()
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
extProp.RUnlock()
|
||||
return prop
|
||||
}
|
||||
extProp.RUnlock()
|
||||
|
||||
extProp.Lock()
|
||||
defer extProp.Unlock()
|
||||
// Check again.
|
||||
if prop, ok := extProp.m[key]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(Properties)
|
||||
prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
|
||||
extProp.m[key] = prop
|
||||
return prop
|
||||
}
|
||||
|
||||
// HasExtension returns whether the given extension is present in pb.
|
||||
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
extmap, mu := epb.extensionsRead()
|
||||
if extmap == nil {
|
||||
return false
|
||||
}
|
||||
mu.Lock()
|
||||
_, ok := extmap[extension.Field]
|
||||
mu.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// ClearExtension removes the given extension from pb.
|
||||
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// TODO: Check types, field numbers, etc.?
|
||||
extmap := epb.extensionsWrite()
|
||||
delete(extmap, extension.Field)
|
||||
}
|
||||
|
||||
// GetExtension retrieves a proto2 extended field from pb.
|
||||
//
|
||||
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
||||
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
||||
// If the field is not present, then the default value is returned (if one is specified),
|
||||
// otherwise ErrMissingExtension is reported.
|
||||
//
|
||||
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||
// then GetExtension returns the raw encoded bytes of the field extension.
|
||||
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if extension.ExtendedType != nil {
|
||||
// can only check type if this is a complete descriptor
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
e, ok := emap[extension.Field]
|
||||
if !ok {
|
||||
// defaultExtensionValue returns the default value or
|
||||
// ErrMissingExtension if there is no default.
|
||||
return defaultExtensionValue(extension)
|
||||
}
|
||||
|
||||
if e.value != nil {
|
||||
// Already decoded. Check the descriptor, though.
|
||||
if e.desc != extension {
|
||||
// This shouldn't happen. If it does, it means that
|
||||
// GetExtension was called twice with two different
|
||||
// descriptors with the same field number.
|
||||
return nil, errors.New("proto: descriptor conflict")
|
||||
}
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor
|
||||
return e.enc, nil
|
||||
}
|
||||
|
||||
v, err := decodeExtension(e.enc, extension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remember the decoded version and drop the encoded version.
|
||||
// That way it is safe to mutate what we return.
|
||||
e.value = extensionAsStorageType(v)
|
||||
e.desc = extension
|
||||
e.enc = nil
|
||||
emap[extension.Field] = e
|
||||
return extensionAsLegacyType(e.value), nil
|
||||
}
|
||||
|
||||
// defaultExtensionValue returns the default value for extension.
|
||||
// If no default for an extension is defined ErrMissingExtension is returned.
|
||||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
||||
if extension.ExtensionType == nil {
|
||||
// incomplete descriptor, so no default
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
props := extensionProperties(extension)
|
||||
|
||||
sf, _, err := fieldDefault(t, props)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sf == nil || sf.value == nil {
|
||||
// There is no default value.
|
||||
return nil, ErrMissingExtension
|
||||
}
|
||||
|
||||
if t.Kind() != reflect.Ptr {
|
||||
// We do not need to return a Ptr, we can directly return sf.value.
|
||||
return sf.value, nil
|
||||
}
|
||||
|
||||
// We need to return an interface{} that is a pointer to sf.value.
|
||||
value := reflect.New(t).Elem()
|
||||
value.Set(reflect.New(value.Type().Elem()))
|
||||
if sf.kind == reflect.Int32 {
|
||||
// We may have an int32 or an enum, but the underlying data is int32.
|
||||
// Since we can't set an int32 into a non int32 reflect.value directly
|
||||
// set it as a int32.
|
||||
value.Elem().SetInt(int64(sf.value.(int32)))
|
||||
} else {
|
||||
value.Elem().Set(reflect.ValueOf(sf.value))
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// decodeExtension decodes an extension encoded in b.
|
||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||
t := reflect.TypeOf(extension.ExtensionType)
|
||||
unmarshal := typeUnmarshaler(t, extension.Tag)
|
||||
|
||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||
// Allocate space to store the pointer/slice.
|
||||
value := reflect.New(t).Elem()
|
||||
|
||||
var err error
|
||||
for {
|
||||
x, n := decodeVarint(b)
|
||||
if n == 0 {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
b = b[n:]
|
||||
wire := int(x) & 7
|
||||
|
||||
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return value.Interface(), nil
|
||||
}
|
||||
|
||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extensions = make([]interface{}, len(es))
|
||||
for i, e := range es {
|
||||
extensions[i], err = GetExtension(epb, e)
|
||||
if err == ErrMissingExtension {
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
||||
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
||||
// just the Field field, which defines the extension's field number.
|
||||
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
registeredExtensions := RegisteredExtensions(pb)
|
||||
|
||||
emap, mu := epb.extensionsRead()
|
||||
if emap == nil {
|
||||
return nil, nil
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
extensions := make([]*ExtensionDesc, 0, len(emap))
|
||||
for extid, e := range emap {
|
||||
desc := e.desc
|
||||
if desc == nil {
|
||||
desc = registeredExtensions[extid]
|
||||
if desc == nil {
|
||||
desc = &ExtensionDesc{Field: extid}
|
||||
}
|
||||
}
|
||||
|
||||
extensions = append(extensions, desc)
|
||||
}
|
||||
return extensions, nil
|
||||
}
|
||||
|
||||
// SetExtension sets the specified extension of pb to the specified value.
|
||||
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||
return err
|
||||
}
|
||||
typ := reflect.TypeOf(extension.ExtensionType)
|
||||
if typ != reflect.TypeOf(value) {
|
||||
return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
|
||||
}
|
||||
// nil extension values need to be caught early, because the
|
||||
// encoder can't distinguish an ErrNil due to a nil extension
|
||||
// from an ErrNil due to a missing field. Extensions are
|
||||
// always optional, so the encoder would just swallow the error
|
||||
// and drop all the extensions from the encoded message.
|
||||
if reflect.ValueOf(value).IsNil() {
|
||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||
}
|
||||
|
||||
extmap := epb.extensionsWrite()
|
||||
extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearAllExtensions clears all extensions from pb.
|
||||
func ClearAllExtensions(pb Message) {
|
||||
epb, err := extendable(pb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
m := epb.extensionsWrite()
|
||||
for k := range m {
|
||||
delete(m, k)
|
||||
}
|
||||
}
|
||||
|
||||
// A global registry of extensions.
|
||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
||||
|
||||
var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
|
||||
|
||||
// RegisterExtension is called from the generated code.
|
||||
func RegisterExtension(desc *ExtensionDesc) {
|
||||
st := reflect.TypeOf(desc.ExtendedType).Elem()
|
||||
m := extensionMaps[st]
|
||||
if m == nil {
|
||||
m = make(map[int32]*ExtensionDesc)
|
||||
extensionMaps[st] = m
|
||||
}
|
||||
if _, ok := m[desc.Field]; ok {
|
||||
panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
|
||||
}
|
||||
m[desc.Field] = desc
|
||||
}
|
||||
|
||||
// RegisteredExtensions returns a map of the registered extensions of a
|
||||
// protocol buffer struct, indexed by the extension number.
|
||||
// The argument pb should be a nil pointer to the struct type.
|
||||
func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
|
||||
return extensionMaps[reflect.TypeOf(pb).Elem()]
|
||||
}
|
||||
|
||||
// extensionAsLegacyType converts an value in the storage type as the API type.
|
||||
// See Extension.value.
|
||||
func extensionAsLegacyType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
// Represent primitive types as a pointer to the value.
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Slice:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// extensionAsStorageType converts an value in the API type as the storage type.
|
||||
// See Extension.value.
|
||||
func extensionAsStorageType(v interface{}) interface{} {
|
||||
switch rv := reflect.ValueOf(v); rv.Kind() {
|
||||
case reflect.Ptr:
|
||||
// Represent slice types as the value itself.
|
||||
switch rv.Type().Elem().Kind() {
|
||||
case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
|
||||
if rv.IsNil() {
|
||||
v = reflect.Zero(rv.Type().Elem()).Interface()
|
||||
} else {
|
||||
v = rv.Elem().Interface()
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Represent slice types as a pointer to the value.
|
||||
if rv.Type().Elem().Kind() != reflect.Uint8 {
|
||||
rv2 := reflect.New(rv.Type())
|
||||
rv2.Elem().Set(rv)
|
||||
v = rv2.Interface()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
965
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
965
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
@@ -1,965 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package proto converts data structures to and from the wire format of
|
||||
protocol buffers. It works in concert with the Go source code generated
|
||||
for .proto files by the protocol compiler.
|
||||
|
||||
A summary of the properties of the protocol buffer interface
|
||||
for a protocol buffer variable v:
|
||||
|
||||
- Names are turned from camel_case to CamelCase for export.
|
||||
- There are no methods on v to set fields; just treat
|
||||
them as structure fields.
|
||||
- There are getters that return a field's value if set,
|
||||
and return the field's default value if unset.
|
||||
The getters work even if the receiver is a nil message.
|
||||
- The zero value for a struct is its correct initialization state.
|
||||
All desired fields must be set before marshaling.
|
||||
- A Reset() method will restore a protobuf struct to its zero state.
|
||||
- Non-repeated fields are pointers to the values; nil means unset.
|
||||
That is, optional or required field int32 f becomes F *int32.
|
||||
- Repeated fields are slices.
|
||||
- Helper functions are available to aid the setting of fields.
|
||||
msg.Foo = proto.String("hello") // set field
|
||||
- Constants are defined to hold the default values of all fields that
|
||||
have them. They have the form Default_StructName_FieldName.
|
||||
Because the getter methods handle defaulted values,
|
||||
direct use of these constants should be rare.
|
||||
- Enums are given type names and maps from names to values.
|
||||
Enum values are prefixed by the enclosing message's name, or by the
|
||||
enum's type name if it is a top-level enum. Enum types have a String
|
||||
method, and a Enum method to assist in message construction.
|
||||
- Nested messages, groups and enums have type names prefixed with the name of
|
||||
the surrounding message type.
|
||||
- Extensions are given descriptor names that start with E_,
|
||||
followed by an underscore-delimited list of the nested messages
|
||||
that contain it (if any) followed by the CamelCased name of the
|
||||
extension field itself. HasExtension, ClearExtension, GetExtension
|
||||
and SetExtension are functions for manipulating extensions.
|
||||
- Oneof field sets are given a single field in their message,
|
||||
with distinguished wrapper types for each possible field value.
|
||||
- Marshal and Unmarshal are functions to encode and decode the wire format.
|
||||
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
Given file test.proto, containing
|
||||
|
||||
package example;
|
||||
|
||||
enum FOO { X = 17; }
|
||||
|
||||
message Test {
|
||||
required string label = 1;
|
||||
optional int32 type = 2 [default=77];
|
||||
repeated int64 reps = 3;
|
||||
optional group OptionalGroup = 4 {
|
||||
required string RequiredField = 5;
|
||||
}
|
||||
oneof union {
|
||||
int32 number = 6;
|
||||
string name = 7;
|
||||
}
|
||||
}
|
||||
|
||||
The resulting file, test.pb.go, is:
|
||||
|
||||
package example
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import math "math"
|
||||
|
||||
type FOO int32
|
||||
const (
|
||||
FOO_X FOO = 17
|
||||
)
|
||||
var FOO_name = map[int32]string{
|
||||
17: "X",
|
||||
}
|
||||
var FOO_value = map[string]int32{
|
||||
"X": 17,
|
||||
}
|
||||
|
||||
func (x FOO) Enum() *FOO {
|
||||
p := new(FOO)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x FOO) String() string {
|
||||
return proto.EnumName(FOO_name, int32(x))
|
||||
}
|
||||
func (x *FOO) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(FOO_value, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = FOO(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
|
||||
Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
|
||||
Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
|
||||
Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
|
||||
// Types that are valid to be assigned to Union:
|
||||
// *Test_Number
|
||||
// *Test_Name
|
||||
Union isTest_Union `protobuf_oneof:"union"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
func (m *Test) Reset() { *m = Test{} }
|
||||
func (m *Test) String() string { return proto.CompactTextString(m) }
|
||||
func (*Test) ProtoMessage() {}
|
||||
|
||||
type isTest_Union interface {
|
||||
isTest_Union()
|
||||
}
|
||||
|
||||
type Test_Number struct {
|
||||
Number int32 `protobuf:"varint,6,opt,name=number"`
|
||||
}
|
||||
type Test_Name struct {
|
||||
Name string `protobuf:"bytes,7,opt,name=name"`
|
||||
}
|
||||
|
||||
func (*Test_Number) isTest_Union() {}
|
||||
func (*Test_Name) isTest_Union() {}
|
||||
|
||||
func (m *Test) GetUnion() isTest_Union {
|
||||
if m != nil {
|
||||
return m.Union
|
||||
}
|
||||
return nil
|
||||
}
|
||||
const Default_Test_Type int32 = 77
|
||||
|
||||
func (m *Test) GetLabel() string {
|
||||
if m != nil && m.Label != nil {
|
||||
return *m.Label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetType() int32 {
|
||||
if m != nil && m.Type != nil {
|
||||
return *m.Type
|
||||
}
|
||||
return Default_Test_Type
|
||||
}
|
||||
|
||||
func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
|
||||
if m != nil {
|
||||
return m.Optionalgroup
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Test_OptionalGroup struct {
|
||||
RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
|
||||
}
|
||||
func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
|
||||
func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
|
||||
|
||||
func (m *Test_OptionalGroup) GetRequiredField() string {
|
||||
if m != nil && m.RequiredField != nil {
|
||||
return *m.RequiredField
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Test) GetNumber() int32 {
|
||||
if x, ok := m.GetUnion().(*Test_Number); ok {
|
||||
return x.Number
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Test) GetName() string {
|
||||
if x, ok := m.GetUnion().(*Test_Name); ok {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
|
||||
}
|
||||
|
||||
To create and play with a Test object:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "./example.pb"
|
||||
)
|
||||
|
||||
func main() {
|
||||
test := &pb.Test{
|
||||
Label: proto.String("hello"),
|
||||
Type: proto.Int32(17),
|
||||
Reps: []int64{1, 2, 3},
|
||||
Optionalgroup: &pb.Test_OptionalGroup{
|
||||
RequiredField: proto.String("good bye"),
|
||||
},
|
||||
Union: &pb.Test_Name{"fred"},
|
||||
}
|
||||
data, err := proto.Marshal(test)
|
||||
if err != nil {
|
||||
log.Fatal("marshaling error: ", err)
|
||||
}
|
||||
newTest := &pb.Test{}
|
||||
err = proto.Unmarshal(data, newTest)
|
||||
if err != nil {
|
||||
log.Fatal("unmarshaling error: ", err)
|
||||
}
|
||||
// Now test and newTest contain the same data.
|
||||
if test.GetLabel() != newTest.GetLabel() {
|
||||
log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
|
||||
}
|
||||
// Use a type switch to determine which oneof was set.
|
||||
switch u := test.Union.(type) {
|
||||
case *pb.Test_Number: // u.Number contains the number.
|
||||
case *pb.Test_Name: // u.Name contains the string.
|
||||
}
|
||||
// etc.
|
||||
}
|
||||
*/
|
||||
package proto
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
|
||||
// Marshal reports this when a required field is not initialized.
|
||||
// Unmarshal reports this when a required field is missing from the wire data.
|
||||
type RequiredNotSetError struct{ field string }
|
||||
|
||||
func (e *RequiredNotSetError) Error() string {
|
||||
if e.field == "" {
|
||||
return fmt.Sprintf("proto: required field not set")
|
||||
}
|
||||
return fmt.Sprintf("proto: required field %q not set", e.field)
|
||||
}
|
||||
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type invalidUTF8Error struct{ field string }
|
||||
|
||||
func (e *invalidUTF8Error) Error() string {
|
||||
if e.field == "" {
|
||||
return "proto: invalid UTF-8 detected"
|
||||
}
|
||||
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
|
||||
}
|
||||
func (e *invalidUTF8Error) InvalidUTF8() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
|
||||
// This error should not be exposed to the external API as such errors should
|
||||
// be recreated with the field information.
|
||||
var errInvalidUTF8 = &invalidUTF8Error{}
|
||||
|
||||
// isNonFatal reports whether the error is either a RequiredNotSet error
|
||||
// or a InvalidUTF8 error.
|
||||
func isNonFatal(err error) bool {
|
||||
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
|
||||
return true
|
||||
}
|
||||
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type nonFatal struct{ E error }
|
||||
|
||||
// Merge merges err into nf and reports whether it was successful.
|
||||
// Otherwise it returns false for any fatal non-nil errors.
|
||||
func (nf *nonFatal) Merge(err error) (ok bool) {
|
||||
if err == nil {
|
||||
return true // not an error
|
||||
}
|
||||
if !isNonFatal(err) {
|
||||
return false // fatal error
|
||||
}
|
||||
if nf.E == nil {
|
||||
nf.E = err // store first instance of non-fatal error
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Message is implemented by generated protocol buffer messages.
|
||||
type Message interface {
|
||||
Reset()
|
||||
String() string
|
||||
ProtoMessage()
|
||||
}
|
||||
|
||||
// A Buffer is a buffer manager for marshaling and unmarshaling
|
||||
// protocol buffers. It may be reused between invocations to
|
||||
// reduce memory usage. It is not necessary to use a Buffer;
|
||||
// the global functions Marshal and Unmarshal create a
|
||||
// temporary Buffer and are fine for most applications.
|
||||
type Buffer struct {
|
||||
buf []byte // encode/decode byte stream
|
||||
index int // read point
|
||||
|
||||
deterministic bool
|
||||
}
|
||||
|
||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||
// the contents of the argument slice.
|
||||
func NewBuffer(e []byte) *Buffer {
|
||||
return &Buffer{buf: e}
|
||||
}
|
||||
|
||||
// Reset resets the Buffer, ready for marshaling a new protocol buffer.
|
||||
func (p *Buffer) Reset() {
|
||||
p.buf = p.buf[0:0] // for reading/writing
|
||||
p.index = 0 // for reading
|
||||
}
|
||||
|
||||
// SetBuf replaces the internal buffer with the slice,
|
||||
// ready for unmarshaling the contents of the slice.
|
||||
func (p *Buffer) SetBuf(s []byte) {
|
||||
p.buf = s
|
||||
p.index = 0
|
||||
}
|
||||
|
||||
// Bytes returns the contents of the Buffer.
|
||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||
|
||||
// SetDeterministic sets whether to use deterministic serialization.
|
||||
//
|
||||
// Deterministic serialization guarantees that for a given binary, equal
|
||||
// messages will always be serialized to the same bytes. This implies:
|
||||
//
|
||||
// - Repeated serialization of a message will return the same bytes.
|
||||
// - Different processes of the same binary (which may be executing on
|
||||
// different machines) will serialize equal messages to the same bytes.
|
||||
//
|
||||
// Note that the deterministic serialization is NOT canonical across
|
||||
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||
// across different builds with schema changes due to unknown fields.
|
||||
// Users who need canonical serialization (e.g., persistent storage in a
|
||||
// canonical form, fingerprinting, etc.) should define their own
|
||||
// canonicalization specification and implement their own serializer rather
|
||||
// than relying on this API.
|
||||
//
|
||||
// If deterministic serialization is requested, map entries will be sorted
|
||||
// by keys in lexographical order. This is an implementation detail and
|
||||
// subject to change.
|
||||
func (p *Buffer) SetDeterministic(deterministic bool) {
|
||||
p.deterministic = deterministic
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
||||
// Bool is a helper routine that allocates a new bool value
|
||||
// to store v and returns a pointer to it.
|
||||
func Bool(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int32 is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int32(v int32) *int32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Int is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it, but unlike Int32
|
||||
// its argument value is an int.
|
||||
func Int(v int) *int32 {
|
||||
p := new(int32)
|
||||
*p = int32(v)
|
||||
return p
|
||||
}
|
||||
|
||||
// Int64 is a helper routine that allocates a new int64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int64(v int64) *int64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float32 is a helper routine that allocates a new float32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float32(v float32) *float32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Float64 is a helper routine that allocates a new float64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float64(v float64) *float64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint32 is a helper routine that allocates a new uint32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// Uint64 is a helper routine that allocates a new uint64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 {
|
||||
return &v
|
||||
}
|
||||
|
||||
// String is a helper routine that allocates a new string value
|
||||
// to store v and returns a pointer to it.
|
||||
func String(v string) *string {
|
||||
return &v
|
||||
}
|
||||
|
||||
// EnumName is a helper function to simplify printing protocol buffer enums
|
||||
// by name. Given an enum map and a value, it returns a useful string.
|
||||
func EnumName(m map[int32]string, v int32) string {
|
||||
s, ok := m[v]
|
||||
if ok {
|
||||
return s
|
||||
}
|
||||
return strconv.Itoa(int(v))
|
||||
}
|
||||
|
||||
// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
|
||||
// from their JSON-encoded representation. Given a map from the enum's symbolic
|
||||
// names to its int values, and a byte buffer containing the JSON-encoded
|
||||
// value, it returns an int32 that can be cast to the enum type by the caller.
|
||||
//
|
||||
// The function can deal with both JSON representations, numeric and symbolic.
|
||||
func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
|
||||
if data[0] == '"' {
|
||||
// New style: enums are strings.
|
||||
var repr string
|
||||
if err := json.Unmarshal(data, &repr); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
val, ok := m[repr]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
// Old style: enums are ints.
|
||||
var val int32
|
||||
if err := json.Unmarshal(data, &val); err != nil {
|
||||
return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
|
||||
}
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// DebugPrint dumps the encoded data in b in a debugging format with a header
|
||||
// including the string s. Used in testing but made available for general debugging.
|
||||
func (p *Buffer) DebugPrint(s string, b []byte) {
|
||||
var u uint64
|
||||
|
||||
obuf := p.buf
|
||||
index := p.index
|
||||
p.buf = b
|
||||
p.index = 0
|
||||
depth := 0
|
||||
|
||||
fmt.Printf("\n--- %s ---\n", s)
|
||||
|
||||
out:
|
||||
for {
|
||||
for i := 0; i < depth; i++ {
|
||||
fmt.Print(" ")
|
||||
}
|
||||
|
||||
index := p.index
|
||||
if index == len(p.buf) {
|
||||
break
|
||||
}
|
||||
|
||||
op, err := p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: fetching op err %v\n", index, err)
|
||||
break out
|
||||
}
|
||||
tag := op >> 3
|
||||
wire := op & 7
|
||||
|
||||
switch wire {
|
||||
default:
|
||||
fmt.Printf("%3d: t=%3d unknown wire=%d\n",
|
||||
index, tag, wire)
|
||||
break out
|
||||
|
||||
case WireBytes:
|
||||
var r []byte
|
||||
|
||||
r, err = p.DecodeRawBytes(false)
|
||||
if err != nil {
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
|
||||
if len(r) <= 6 {
|
||||
for i := 0; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
} else {
|
||||
for i := 0; i < 3; i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
fmt.Printf(" ..")
|
||||
for i := len(r) - 3; i < len(r); i++ {
|
||||
fmt.Printf(" %.2x", r[i])
|
||||
}
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
case WireFixed32:
|
||||
u, err = p.DecodeFixed32()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
|
||||
|
||||
case WireFixed64:
|
||||
u, err = p.DecodeFixed64()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
|
||||
|
||||
case WireVarint:
|
||||
u, err = p.DecodeVarint()
|
||||
if err != nil {
|
||||
fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
|
||||
break out
|
||||
}
|
||||
fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
|
||||
|
||||
case WireStartGroup:
|
||||
fmt.Printf("%3d: t=%3d start\n", index, tag)
|
||||
depth++
|
||||
|
||||
case WireEndGroup:
|
||||
depth--
|
||||
fmt.Printf("%3d: t=%3d end\n", index, tag)
|
||||
}
|
||||
}
|
||||
|
||||
if depth != 0 {
|
||||
fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
|
||||
p.buf = obuf
|
||||
p.index = index
|
||||
}
|
||||
|
||||
// SetDefaults sets unset protocol buffer fields to their default values.
|
||||
// It only modifies fields that are both unset and have defined defaults.
|
||||
// It recursively sets default values in any non-nil sub-messages.
|
||||
func SetDefaults(pb Message) {
|
||||
setDefaults(reflect.ValueOf(pb), true, false)
|
||||
}
|
||||
|
||||
// v is a pointer to a struct.
|
||||
func setDefaults(v reflect.Value, recur, zeros bool) {
|
||||
v = v.Elem()
|
||||
|
||||
defaultMu.RLock()
|
||||
dm, ok := defaults[v.Type()]
|
||||
defaultMu.RUnlock()
|
||||
if !ok {
|
||||
dm = buildDefaultMessage(v.Type())
|
||||
defaultMu.Lock()
|
||||
defaults[v.Type()] = dm
|
||||
defaultMu.Unlock()
|
||||
}
|
||||
|
||||
for _, sf := range dm.scalars {
|
||||
f := v.Field(sf.index)
|
||||
if !f.IsNil() {
|
||||
// field already set
|
||||
continue
|
||||
}
|
||||
dv := sf.value
|
||||
if dv == nil && !zeros {
|
||||
// no explicit default, and don't want to set zeros
|
||||
continue
|
||||
}
|
||||
fptr := f.Addr().Interface() // **T
|
||||
// TODO: Consider batching the allocations we do here.
|
||||
switch sf.kind {
|
||||
case reflect.Bool:
|
||||
b := new(bool)
|
||||
if dv != nil {
|
||||
*b = dv.(bool)
|
||||
}
|
||||
*(fptr.(**bool)) = b
|
||||
case reflect.Float32:
|
||||
f := new(float32)
|
||||
if dv != nil {
|
||||
*f = dv.(float32)
|
||||
}
|
||||
*(fptr.(**float32)) = f
|
||||
case reflect.Float64:
|
||||
f := new(float64)
|
||||
if dv != nil {
|
||||
*f = dv.(float64)
|
||||
}
|
||||
*(fptr.(**float64)) = f
|
||||
case reflect.Int32:
|
||||
// might be an enum
|
||||
if ft := f.Type(); ft != int32PtrType {
|
||||
// enum
|
||||
f.Set(reflect.New(ft.Elem()))
|
||||
if dv != nil {
|
||||
f.Elem().SetInt(int64(dv.(int32)))
|
||||
}
|
||||
} else {
|
||||
// int32 field
|
||||
i := new(int32)
|
||||
if dv != nil {
|
||||
*i = dv.(int32)
|
||||
}
|
||||
*(fptr.(**int32)) = i
|
||||
}
|
||||
case reflect.Int64:
|
||||
i := new(int64)
|
||||
if dv != nil {
|
||||
*i = dv.(int64)
|
||||
}
|
||||
*(fptr.(**int64)) = i
|
||||
case reflect.String:
|
||||
s := new(string)
|
||||
if dv != nil {
|
||||
*s = dv.(string)
|
||||
}
|
||||
*(fptr.(**string)) = s
|
||||
case reflect.Uint8:
|
||||
// exceptional case: []byte
|
||||
var b []byte
|
||||
if dv != nil {
|
||||
db := dv.([]byte)
|
||||
b = make([]byte, len(db))
|
||||
copy(b, db)
|
||||
} else {
|
||||
b = []byte{}
|
||||
}
|
||||
*(fptr.(*[]byte)) = b
|
||||
case reflect.Uint32:
|
||||
u := new(uint32)
|
||||
if dv != nil {
|
||||
*u = dv.(uint32)
|
||||
}
|
||||
*(fptr.(**uint32)) = u
|
||||
case reflect.Uint64:
|
||||
u := new(uint64)
|
||||
if dv != nil {
|
||||
*u = dv.(uint64)
|
||||
}
|
||||
*(fptr.(**uint64)) = u
|
||||
default:
|
||||
log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ni := range dm.nested {
|
||||
f := v.Field(ni)
|
||||
// f is *T or []*T or map[T]*T
|
||||
switch f.Kind() {
|
||||
case reflect.Ptr:
|
||||
if f.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(f, recur, zeros)
|
||||
|
||||
case reflect.Slice:
|
||||
for i := 0; i < f.Len(); i++ {
|
||||
e := f.Index(i)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
for _, k := range f.MapKeys() {
|
||||
e := f.MapIndex(k)
|
||||
if e.IsNil() {
|
||||
continue
|
||||
}
|
||||
setDefaults(e, recur, zeros)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// defaults maps a protocol buffer struct type to a slice of the fields,
|
||||
// with its scalar fields set to their proto-declared non-zero default values.
|
||||
defaultMu sync.RWMutex
|
||||
defaults = make(map[reflect.Type]defaultMessage)
|
||||
|
||||
int32PtrType = reflect.TypeOf((*int32)(nil))
|
||||
)
|
||||
|
||||
// defaultMessage represents information about the default values of a message.
|
||||
type defaultMessage struct {
|
||||
scalars []scalarField
|
||||
nested []int // struct field index of nested messages
|
||||
}
|
||||
|
||||
type scalarField struct {
|
||||
index int // struct field index
|
||||
kind reflect.Kind // element type (the T in *T or []T)
|
||||
value interface{} // the proto-declared default value, or nil
|
||||
}
|
||||
|
||||
// t is a struct type.
|
||||
func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
|
||||
sprop := GetProperties(t)
|
||||
for _, prop := range sprop.Prop {
|
||||
fi, ok := sprop.decoderTags.get(prop.Tag)
|
||||
if !ok {
|
||||
// XXX_unrecognized
|
||||
continue
|
||||
}
|
||||
ft := t.Field(fi).Type
|
||||
|
||||
sf, nested, err := fieldDefault(ft, prop)
|
||||
switch {
|
||||
case err != nil:
|
||||
log.Print(err)
|
||||
case nested:
|
||||
dm.nested = append(dm.nested, fi)
|
||||
case sf != nil:
|
||||
sf.index = fi
|
||||
dm.scalars = append(dm.scalars, *sf)
|
||||
}
|
||||
}
|
||||
|
||||
return dm
|
||||
}
|
||||
|
||||
// fieldDefault returns the scalarField for field type ft.
|
||||
// sf will be nil if the field can not have a default.
|
||||
// nestedMessage will be true if this is a nested message.
|
||||
// Note that sf.index is not set on return.
|
||||
func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
|
||||
var canHaveDefault bool
|
||||
switch ft.Kind() {
|
||||
case reflect.Ptr:
|
||||
if ft.Elem().Kind() == reflect.Struct {
|
||||
nestedMessage = true
|
||||
} else {
|
||||
canHaveDefault = true // proto2 scalar field
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Ptr:
|
||||
nestedMessage = true // repeated message
|
||||
case reflect.Uint8:
|
||||
canHaveDefault = true // bytes field
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
if ft.Elem().Kind() == reflect.Ptr {
|
||||
nestedMessage = true // map with message values
|
||||
}
|
||||
}
|
||||
|
||||
if !canHaveDefault {
|
||||
if nestedMessage {
|
||||
return nil, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
// We now know that ft is a pointer or slice.
|
||||
sf = &scalarField{kind: ft.Elem().Kind()}
|
||||
|
||||
// scalar fields without defaults
|
||||
if !prop.HasDefault {
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// a scalar field: either *T or []byte
|
||||
switch ft.Elem().Kind() {
|
||||
case reflect.Bool:
|
||||
x, err := strconv.ParseBool(prop.Default)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Float32:
|
||||
x, err := strconv.ParseFloat(prop.Default, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = float32(x)
|
||||
case reflect.Float64:
|
||||
x, err := strconv.ParseFloat(prop.Default, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.Int32:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = int32(x)
|
||||
case reflect.Int64:
|
||||
x, err := strconv.ParseInt(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
case reflect.String:
|
||||
sf.value = prop.Default
|
||||
case reflect.Uint8:
|
||||
// []byte (not *uint8)
|
||||
sf.value = []byte(prop.Default)
|
||||
case reflect.Uint32:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 32)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = uint32(x)
|
||||
case reflect.Uint64:
|
||||
x, err := strconv.ParseUint(prop.Default, 10, 64)
|
||||
if err != nil {
|
||||
return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
|
||||
}
|
||||
sf.value = x
|
||||
default:
|
||||
return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
|
||||
}
|
||||
|
||||
return sf, false, nil
|
||||
}
|
||||
|
||||
// mapKeys returns a sort.Interface to be used for sorting the map keys.
|
||||
// Map fields may have key types of non-float scalars, strings and enums.
|
||||
func mapKeys(vs []reflect.Value) sort.Interface {
|
||||
s := mapKeySorter{vs: vs}
|
||||
|
||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
|
||||
if len(vs) == 0 {
|
||||
return s
|
||||
}
|
||||
switch vs[0].Kind() {
|
||||
case reflect.Int32, reflect.Int64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
||||
case reflect.Bool:
|
||||
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
|
||||
case reflect.String:
|
||||
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
type mapKeySorter struct {
|
||||
vs []reflect.Value
|
||||
less func(a, b reflect.Value) bool
|
||||
}
|
||||
|
||||
func (s mapKeySorter) Len() int { return len(s.vs) }
|
||||
func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
|
||||
func (s mapKeySorter) Less(i, j int) bool {
|
||||
return s.less(s.vs[i], s.vs[j])
|
||||
}
|
||||
|
||||
// isProto3Zero reports whether v is a zero proto3 value.
|
||||
func isProto3Zero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint32, reflect.Uint64:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.String:
|
||||
return v.String() == ""
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const (
|
||||
// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion3 = true
|
||||
|
||||
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion2 = true
|
||||
|
||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the proto package.
|
||||
ProtoPackageIsVersion1 = true
|
||||
)
|
||||
|
||||
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||
// This type is not intended to be used by non-generated code.
|
||||
// This type is not subject to any compatibility guarantee.
|
||||
type InternalMessageInfo struct {
|
||||
marshal *marshalInfo
|
||||
unmarshal *unmarshalInfo
|
||||
merge *mergeInfo
|
||||
discard *discardInfo
|
||||
}
|
||||
181
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
181
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
@@ -1,181 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Support for message sets.
|
||||
*/
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||
// A message type ID is required for storing a protocol buffer in a message set.
|
||||
var errNoMessageTypeID = errors.New("proto does not have a message type ID")
|
||||
|
||||
// The first two types (_MessageSet_Item and messageSet)
|
||||
// model what the protocol compiler produces for the following protocol message:
|
||||
// message MessageSet {
|
||||
// repeated group Item = 1 {
|
||||
// required int32 type_id = 2;
|
||||
// required string message = 3;
|
||||
// };
|
||||
// }
|
||||
// That is the MessageSet wire format. We can't use a proto to generate these
|
||||
// because that would introduce a circular dependency between it and this package.
|
||||
|
||||
type _MessageSet_Item struct {
|
||||
TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
|
||||
Message []byte `protobuf:"bytes,3,req,name=message"`
|
||||
}
|
||||
|
||||
type messageSet struct {
|
||||
Item []*_MessageSet_Item `protobuf:"group,1,rep"`
|
||||
XXX_unrecognized []byte
|
||||
// TODO: caching?
|
||||
}
|
||||
|
||||
// Make sure messageSet is a Message.
|
||||
var _ Message = (*messageSet)(nil)
|
||||
|
||||
// messageTypeIder is an interface satisfied by a protocol buffer type
|
||||
// that may be stored in a MessageSet.
|
||||
type messageTypeIder interface {
|
||||
MessageTypeId() int32
|
||||
}
|
||||
|
||||
func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
id := mti.MessageTypeId()
|
||||
for _, item := range ms.Item {
|
||||
if *item.TypeId == id {
|
||||
return item
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Has(pb Message) bool {
|
||||
return ms.find(pb) != nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Unmarshal(pb Message) error {
|
||||
if item := ms.find(pb); item != nil {
|
||||
return Unmarshal(item.Message, pb)
|
||||
}
|
||||
if _, ok := pb.(messageTypeIder); !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
return nil // TODO: return error instead?
|
||||
}
|
||||
|
||||
func (ms *messageSet) Marshal(pb Message) error {
|
||||
msg, err := Marshal(pb)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if item := ms.find(pb); item != nil {
|
||||
// reuse existing item
|
||||
item.Message = msg
|
||||
return nil
|
||||
}
|
||||
|
||||
mti, ok := pb.(messageTypeIder)
|
||||
if !ok {
|
||||
return errNoMessageTypeID
|
||||
}
|
||||
|
||||
mtid := mti.MessageTypeId()
|
||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
||||
TypeId: &mtid,
|
||||
Message: msg,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *messageSet) Reset() { *ms = messageSet{} }
|
||||
func (ms *messageSet) String() string { return CompactTextString(ms) }
|
||||
func (*messageSet) ProtoMessage() {}
|
||||
|
||||
// Support for the message_set_wire_format message option.
|
||||
|
||||
func skipVarint(buf []byte) []byte {
|
||||
i := 0
|
||||
for ; buf[i]&0x80 != 0; i++ {
|
||||
}
|
||||
return buf[i+1:]
|
||||
}
|
||||
|
||||
// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||
func unmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||
var m map[int32]Extension
|
||||
switch exts := exts.(type) {
|
||||
case *XXX_InternalExtensions:
|
||||
m = exts.extensionsWrite()
|
||||
case map[int32]Extension:
|
||||
m = exts
|
||||
default:
|
||||
return errors.New("proto: not an extension map")
|
||||
}
|
||||
|
||||
ms := new(messageSet)
|
||||
if err := Unmarshal(buf, ms); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, item := range ms.Item {
|
||||
id := *item.TypeId
|
||||
msg := item.Message
|
||||
|
||||
// Restore wire type and field number varint, plus length varint.
|
||||
// Be careful to preserve duplicate items.
|
||||
b := EncodeVarint(uint64(id)<<3 | WireBytes)
|
||||
if ext, ok := m[id]; ok {
|
||||
// Existing data; rip off the tag and length varint
|
||||
// so we join the new data correctly.
|
||||
// We can assume that ext.enc is set because we are unmarshaling.
|
||||
o := ext.enc[len(b):] // skip wire type and field number
|
||||
_, n := DecodeVarint(o) // calculate length of length varint
|
||||
o = o[n:] // skip length varint
|
||||
msg = append(o, msg...) // join old data and new data
|
||||
}
|
||||
b = append(b, EncodeVarint(uint64(len(msg)))...)
|
||||
b = append(b, msg...)
|
||||
|
||||
m[id] = Extension{enc: b}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
360
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
360
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
@@ -1,360 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build purego appengine js
|
||||
|
||||
// This file contains an implementation of proto field accesses using package reflect.
|
||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||
// be used on App Engine.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const unsafeAllowed = false
|
||||
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by the sequence of field indices
|
||||
// passed to reflect's FieldByIndex.
|
||||
type field []int
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return f.Index
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
var invalidField = field(nil)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
var zeroField = field([]int{})
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool { return f != nil }
|
||||
|
||||
// The pointer type is for the table-driven decoder.
|
||||
// The implementation here uses a reflect.Value of pointer type to
|
||||
// create a generic pointer. In pointer_unsafe.go we use unsafe
|
||||
// instead of reflect to implement the same (but faster) interface.
|
||||
type pointer struct {
|
||||
v reflect.Value
|
||||
}
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
return pointer{v: reflect.ValueOf(*i)}
|
||||
}
|
||||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
|
||||
v := reflect.ValueOf(*i)
|
||||
u := reflect.New(v.Type())
|
||||
u.Elem().Set(v)
|
||||
if deref {
|
||||
u = u.Elem()
|
||||
}
|
||||
return pointer{v: u}
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{v: v}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.v.IsNil()
|
||||
}
|
||||
|
||||
// grow updates the slice s in place to make it one element longer.
|
||||
// s must be addressable.
|
||||
// Returns the (addressable) new element.
|
||||
func grow(s reflect.Value) reflect.Value {
|
||||
n, m := s.Len(), s.Cap()
|
||||
if n < m {
|
||||
s.SetLen(n + 1)
|
||||
} else {
|
||||
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
|
||||
}
|
||||
return s.Index(n)
|
||||
}
|
||||
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return p.v.Interface().(*int64)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return p.v.Interface().(**int64)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return p.v.Interface().(*[]int64)
|
||||
}
|
||||
|
||||
var int32ptr = reflect.TypeOf((*int32)(nil))
|
||||
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return p.v.Convert(int32ptr).Interface().(*int32)
|
||||
}
|
||||
|
||||
// The toInt32Ptr/Slice methods don't work because of enums.
|
||||
// Instead, we must use set/get methods for the int32ptr/slice case.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return p.v.Interface().(**int32)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return p.v.Interface().(*[]int32)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().(*int32)
|
||||
}
|
||||
// an enum
|
||||
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
// Allocate value in a *int32. Possibly convert that to a *enum.
|
||||
// Then assign it to a **int32 or **enum.
|
||||
// Note: we can convert *int32 to *enum, but we can't convert
|
||||
// **int32 to **enum!
|
||||
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
|
||||
}
|
||||
|
||||
// getInt32Slice copies []int32 from p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
return p.v.Elem().Interface().([]int32)
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []int32, then assign []enum's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := p.v.Elem()
|
||||
s := make([]int32, slice.Len())
|
||||
for i := 0; i < slice.Len(); i++ {
|
||||
s[i] = int32(slice.Index(i).Int())
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// setInt32Slice copies []int32 into p as a new slice.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||
// raw int32 type
|
||||
p.v.Elem().Set(reflect.ValueOf(v))
|
||||
return
|
||||
}
|
||||
// an enum
|
||||
// Allocate a []enum, then assign []int32's values into it.
|
||||
// Note: we can't convert []enum to []int32.
|
||||
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
|
||||
for i, x := range v {
|
||||
slice.Index(i).SetInt(int64(x))
|
||||
}
|
||||
p.v.Elem().Set(slice)
|
||||
}
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
grow(p.v.Elem()).SetInt(int64(v))
|
||||
}
|
||||
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return p.v.Interface().(*uint64)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return p.v.Interface().(**uint64)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return p.v.Interface().(*[]uint64)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return p.v.Interface().(*uint32)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return p.v.Interface().(**uint32)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return p.v.Interface().(*[]uint32)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return p.v.Interface().(*bool)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return p.v.Interface().(**bool)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return p.v.Interface().(*[]bool)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return p.v.Interface().(*float64)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return p.v.Interface().(**float64)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return p.v.Interface().(*[]float64)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return p.v.Interface().(*float32)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return p.v.Interface().(**float32)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return p.v.Interface().(*[]float32)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return p.v.Interface().(*string)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return p.v.Interface().(**string)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return p.v.Interface().(*[]string)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return p.v.Interface().(*[]byte)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return p.v.Interface().(*[][]byte)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return p.v.Interface().(*XXX_InternalExtensions)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return p.v.Interface().(*map[int32]Extension)
|
||||
}
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
p.v.Elem().Set(q.v)
|
||||
}
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
grow(p.v.Elem()).Set(q.v)
|
||||
}
|
||||
|
||||
// getPointerSlice copies []*T from p as a new []pointer.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
if p.v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
n := p.v.Elem().Len()
|
||||
s := make([]pointer, n)
|
||||
for i := 0; i < n; i++ {
|
||||
s[i] = pointer{v: p.v.Elem().Index(i)}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// setPointerSlice copies []pointer into p as a new []*T.
|
||||
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
if v == nil {
|
||||
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
|
||||
return
|
||||
}
|
||||
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
|
||||
for _, p := range v {
|
||||
s = reflect.Append(s, p.v)
|
||||
}
|
||||
p.v.Elem().Set(s)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
if p.v.Elem().IsNil() {
|
||||
return pointer{v: p.v.Elem()}
|
||||
}
|
||||
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
|
||||
}
|
||||
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
// TODO: check that p.v.Type().Elem() == t?
|
||||
return p.v
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
return *p
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomicLock.Lock()
|
||||
defer atomicLock.Unlock()
|
||||
*p = v
|
||||
}
|
||||
|
||||
var atomicLock sync.Mutex
|
||||
313
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
313
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
@@ -1,313 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// +build !purego,!appengine,!js
|
||||
|
||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const unsafeAllowed = true
|
||||
|
||||
// A field identifies a field in a struct, accessible from a pointer.
|
||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||
type field uintptr
|
||||
|
||||
// toField returns a field equivalent to the given reflect field.
|
||||
func toField(f *reflect.StructField) field {
|
||||
return field(f.Offset)
|
||||
}
|
||||
|
||||
// invalidField is an invalid field identifier.
|
||||
const invalidField = ^field(0)
|
||||
|
||||
// zeroField is a noop when calling pointer.offset.
|
||||
const zeroField = field(0)
|
||||
|
||||
// IsValid reports whether the field identifier is valid.
|
||||
func (f field) IsValid() bool {
|
||||
return f != invalidField
|
||||
}
|
||||
|
||||
// The pointer type below is for the new table-driven encoder/decoder.
|
||||
// The implementation here uses unsafe.Pointer to create a generic pointer.
|
||||
// In pointer_reflect.go we use reflect instead of unsafe to implement
|
||||
// the same (but slower) interface.
|
||||
type pointer struct {
|
||||
p unsafe.Pointer
|
||||
}
|
||||
|
||||
// size of pointer
|
||||
var ptrSize = unsafe.Sizeof(uintptr(0))
|
||||
|
||||
// toPointer converts an interface of pointer type to a pointer
|
||||
// that points to the same target.
|
||||
func toPointer(i *Message) pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
// Saves ~25ns over the equivalent:
|
||||
// return valToPointer(reflect.ValueOf(*i))
|
||||
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
|
||||
// toAddrPointer converts an interface to a pointer that points to
|
||||
// the interface data.
|
||||
func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
|
||||
// Super-tricky - read or get the address of data word of interface value.
|
||||
if isptr {
|
||||
// The interface is of pointer type, thus it is a direct interface.
|
||||
// The data word is the pointer data itself. We take its address.
|
||||
p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||
} else {
|
||||
// The interface is not of pointer type. The data word is the pointer
|
||||
// to the data.
|
||||
p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||
}
|
||||
if deref {
|
||||
p.p = *(*unsafe.Pointer)(p.p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||
func valToPointer(v reflect.Value) pointer {
|
||||
return pointer{p: unsafe.Pointer(v.Pointer())}
|
||||
}
|
||||
|
||||
// offset converts from a pointer to a structure to a pointer to
|
||||
// one of its fields.
|
||||
func (p pointer) offset(f field) pointer {
|
||||
// For safety, we should panic if !f.IsValid, however calling panic causes
|
||||
// this to no longer be inlineable, which is a serious performance cost.
|
||||
/*
|
||||
if !f.IsValid() {
|
||||
panic("invalid field")
|
||||
}
|
||||
*/
|
||||
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
|
||||
}
|
||||
|
||||
func (p pointer) isNil() bool {
|
||||
return p.p == nil
|
||||
}
|
||||
|
||||
func (p pointer) toInt64() *int64 {
|
||||
return (*int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Ptr() **int64 {
|
||||
return (**int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt64Slice() *[]int64 {
|
||||
return (*[]int64)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32() *int32 {
|
||||
return (*int32)(p.p)
|
||||
}
|
||||
|
||||
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
|
||||
/*
|
||||
func (p pointer) toInt32Ptr() **int32 {
|
||||
return (**int32)(p.p)
|
||||
}
|
||||
func (p pointer) toInt32Slice() *[]int32 {
|
||||
return (*[]int32)(p.p)
|
||||
}
|
||||
*/
|
||||
func (p pointer) getInt32Ptr() *int32 {
|
||||
return *(**int32)(p.p)
|
||||
}
|
||||
func (p pointer) setInt32Ptr(v int32) {
|
||||
*(**int32)(p.p) = &v
|
||||
}
|
||||
|
||||
// getInt32Slice loads a []int32 from p.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getInt32Slice() []int32 {
|
||||
return *(*[]int32)(p.p)
|
||||
}
|
||||
|
||||
// setInt32Slice stores a []int32 to p.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setInt32Slice(v []int32) {
|
||||
*(*[]int32)(p.p) = v
|
||||
}
|
||||
|
||||
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
|
||||
func (p pointer) appendInt32Slice(v int32) {
|
||||
s := (*[]int32)(p.p)
|
||||
*s = append(*s, v)
|
||||
}
|
||||
|
||||
func (p pointer) toUint64() *uint64 {
|
||||
return (*uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Ptr() **uint64 {
|
||||
return (**uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint64Slice() *[]uint64 {
|
||||
return (*[]uint64)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32() *uint32 {
|
||||
return (*uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Ptr() **uint32 {
|
||||
return (**uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toUint32Slice() *[]uint32 {
|
||||
return (*[]uint32)(p.p)
|
||||
}
|
||||
func (p pointer) toBool() *bool {
|
||||
return (*bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolPtr() **bool {
|
||||
return (**bool)(p.p)
|
||||
}
|
||||
func (p pointer) toBoolSlice() *[]bool {
|
||||
return (*[]bool)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64() *float64 {
|
||||
return (*float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Ptr() **float64 {
|
||||
return (**float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat64Slice() *[]float64 {
|
||||
return (*[]float64)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32() *float32 {
|
||||
return (*float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Ptr() **float32 {
|
||||
return (**float32)(p.p)
|
||||
}
|
||||
func (p pointer) toFloat32Slice() *[]float32 {
|
||||
return (*[]float32)(p.p)
|
||||
}
|
||||
func (p pointer) toString() *string {
|
||||
return (*string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringPtr() **string {
|
||||
return (**string)(p.p)
|
||||
}
|
||||
func (p pointer) toStringSlice() *[]string {
|
||||
return (*[]string)(p.p)
|
||||
}
|
||||
func (p pointer) toBytes() *[]byte {
|
||||
return (*[]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toBytesSlice() *[][]byte {
|
||||
return (*[][]byte)(p.p)
|
||||
}
|
||||
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||
return (*XXX_InternalExtensions)(p.p)
|
||||
}
|
||||
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||
return (*map[int32]Extension)(p.p)
|
||||
}
|
||||
|
||||
// getPointerSlice loads []*T from p as a []pointer.
|
||||
// The value returned is aliased with the original slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) getPointerSlice() []pointer {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We load it as []pointer.
|
||||
return *(*[]pointer)(p.p)
|
||||
}
|
||||
|
||||
// setPointerSlice stores []pointer into p as a []*T.
|
||||
// The value set is aliased with the input slice.
|
||||
// This behavior differs from the implementation in pointer_reflect.go.
|
||||
func (p pointer) setPointerSlice(v []pointer) {
|
||||
// Super-tricky - p should point to a []*T where T is a
|
||||
// message type. We store it as []pointer.
|
||||
*(*[]pointer)(p.p) = v
|
||||
}
|
||||
|
||||
// getPointer loads the pointer at p and returns it.
|
||||
func (p pointer) getPointer() pointer {
|
||||
return pointer{p: *(*unsafe.Pointer)(p.p)}
|
||||
}
|
||||
|
||||
// setPointer stores the pointer q at p.
|
||||
func (p pointer) setPointer(q pointer) {
|
||||
*(*unsafe.Pointer)(p.p) = q.p
|
||||
}
|
||||
|
||||
// append q to the slice pointed to by p.
|
||||
func (p pointer) appendPointer(q pointer) {
|
||||
s := (*[]unsafe.Pointer)(p.p)
|
||||
*s = append(*s, q.p)
|
||||
}
|
||||
|
||||
// getInterfacePointer returns a pointer that points to the
|
||||
// interface data of the interface pointed by p.
|
||||
func (p pointer) getInterfacePointer() pointer {
|
||||
// Super-tricky - read pointer out of data word of interface value.
|
||||
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
|
||||
}
|
||||
|
||||
// asPointerTo returns a reflect.Value that is a pointer to an
|
||||
// object of type t stored at p.
|
||||
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||
return reflect.NewAt(t, p.p)
|
||||
}
|
||||
|
||||
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||
}
|
||||
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||
}
|
||||
544
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
544
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
@@ -1,544 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
/*
|
||||
* Routines for encoding data into the wire format for protocol buffers.
|
||||
*/
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const debug bool = false
|
||||
|
||||
// Constants that identify the encoding of a value on the wire.
|
||||
const (
|
||||
WireVarint = 0
|
||||
WireFixed64 = 1
|
||||
WireBytes = 2
|
||||
WireStartGroup = 3
|
||||
WireEndGroup = 4
|
||||
WireFixed32 = 5
|
||||
)
|
||||
|
||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
||||
// numbers.
|
||||
type tagMap struct {
|
||||
fastTags []int
|
||||
slowTags map[int]int
|
||||
}
|
||||
|
||||
// tagMapFastLimit is the upper bound on the tag number that will be stored in
|
||||
// the tagMap slice rather than its map.
|
||||
const tagMapFastLimit = 1024
|
||||
|
||||
func (p *tagMap) get(t int) (int, bool) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
if t >= len(p.fastTags) {
|
||||
return 0, false
|
||||
}
|
||||
fi := p.fastTags[t]
|
||||
return fi, fi >= 0
|
||||
}
|
||||
fi, ok := p.slowTags[t]
|
||||
return fi, ok
|
||||
}
|
||||
|
||||
func (p *tagMap) put(t int, fi int) {
|
||||
if t > 0 && t < tagMapFastLimit {
|
||||
for len(p.fastTags) < t+1 {
|
||||
p.fastTags = append(p.fastTags, -1)
|
||||
}
|
||||
p.fastTags[t] = fi
|
||||
return
|
||||
}
|
||||
if p.slowTags == nil {
|
||||
p.slowTags = make(map[int]int)
|
||||
}
|
||||
p.slowTags[t] = fi
|
||||
}
|
||||
|
||||
// StructProperties represents properties for all the fields of a struct.
|
||||
// decoderTags and decoderOrigNames should only be used by the decoder.
|
||||
type StructProperties struct {
|
||||
Prop []*Properties // properties for each field
|
||||
reqCount int // required count
|
||||
decoderTags tagMap // map from proto tag to struct field number
|
||||
decoderOrigNames map[string]int // map from original name to struct field number
|
||||
order []int // list of struct field numbers in tag order
|
||||
|
||||
// OneofTypes contains information about the oneof fields in this message.
|
||||
// It is keyed by the original name of a field.
|
||||
OneofTypes map[string]*OneofProperties
|
||||
}
|
||||
|
||||
// OneofProperties represents information about a specific field in a oneof.
|
||||
type OneofProperties struct {
|
||||
Type reflect.Type // pointer to generated struct type for this oneof field
|
||||
Field int // struct field number of the containing oneof in the message
|
||||
Prop *Properties
|
||||
}
|
||||
|
||||
// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
|
||||
// See encode.go, (*Buffer).enc_struct.
|
||||
|
||||
func (sp *StructProperties) Len() int { return len(sp.order) }
|
||||
func (sp *StructProperties) Less(i, j int) bool {
|
||||
return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
|
||||
}
|
||||
func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
|
||||
|
||||
// Properties represents the protocol-specific behavior of a single struct field.
|
||||
type Properties struct {
|
||||
Name string // name of the field, for error messages
|
||||
OrigName string // original name before protocol compiler (always set)
|
||||
JSONName string // name to use for JSON; determined by protoc
|
||||
Wire string
|
||||
WireType int
|
||||
Tag int
|
||||
Required bool
|
||||
Optional bool
|
||||
Repeated bool
|
||||
Packed bool // relevant for repeated primitives only
|
||||
Enum string // set for enum types only
|
||||
proto3 bool // whether this is known to be a proto3 field
|
||||
oneof bool // whether this is a oneof field
|
||||
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
|
||||
stype reflect.Type // set for struct types only
|
||||
sprop *StructProperties // set for struct types only
|
||||
|
||||
mtype reflect.Type // set for map types only
|
||||
MapKeyProp *Properties // set for map types only
|
||||
MapValProp *Properties // set for map types only
|
||||
}
|
||||
|
||||
// String formats the properties in the protobuf struct field tag style.
|
||||
func (p *Properties) String() string {
|
||||
s := p.Wire
|
||||
s += ","
|
||||
s += strconv.Itoa(p.Tag)
|
||||
if p.Required {
|
||||
s += ",req"
|
||||
}
|
||||
if p.Optional {
|
||||
s += ",opt"
|
||||
}
|
||||
if p.Repeated {
|
||||
s += ",rep"
|
||||
}
|
||||
if p.Packed {
|
||||
s += ",packed"
|
||||
}
|
||||
s += ",name=" + p.OrigName
|
||||
if p.JSONName != p.OrigName {
|
||||
s += ",json=" + p.JSONName
|
||||
}
|
||||
if p.proto3 {
|
||||
s += ",proto3"
|
||||
}
|
||||
if p.oneof {
|
||||
s += ",oneof"
|
||||
}
|
||||
if len(p.Enum) > 0 {
|
||||
s += ",enum=" + p.Enum
|
||||
}
|
||||
if p.HasDefault {
|
||||
s += ",def=" + p.Default
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Parse populates p by parsing a string in the protobuf struct field tag style.
|
||||
func (p *Properties) Parse(s string) {
|
||||
// "bytes,49,opt,name=foo,def=hello!"
|
||||
fields := strings.Split(s, ",") // breaks def=, but handled below.
|
||||
if len(fields) < 2 {
|
||||
log.Printf("proto: tag has too few fields: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
p.Wire = fields[0]
|
||||
switch p.Wire {
|
||||
case "varint":
|
||||
p.WireType = WireVarint
|
||||
case "fixed32":
|
||||
p.WireType = WireFixed32
|
||||
case "fixed64":
|
||||
p.WireType = WireFixed64
|
||||
case "zigzag32":
|
||||
p.WireType = WireVarint
|
||||
case "zigzag64":
|
||||
p.WireType = WireVarint
|
||||
case "bytes", "group":
|
||||
p.WireType = WireBytes
|
||||
// no numeric converter for non-numeric types
|
||||
default:
|
||||
log.Printf("proto: tag has unknown wire type: %q", s)
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
p.Tag, err = strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
outer:
|
||||
for i := 2; i < len(fields); i++ {
|
||||
f := fields[i]
|
||||
switch {
|
||||
case f == "req":
|
||||
p.Required = true
|
||||
case f == "opt":
|
||||
p.Optional = true
|
||||
case f == "rep":
|
||||
p.Repeated = true
|
||||
case f == "packed":
|
||||
p.Packed = true
|
||||
case strings.HasPrefix(f, "name="):
|
||||
p.OrigName = f[5:]
|
||||
case strings.HasPrefix(f, "json="):
|
||||
p.JSONName = f[5:]
|
||||
case strings.HasPrefix(f, "enum="):
|
||||
p.Enum = f[5:]
|
||||
case f == "proto3":
|
||||
p.proto3 = true
|
||||
case f == "oneof":
|
||||
p.oneof = true
|
||||
case strings.HasPrefix(f, "def="):
|
||||
p.HasDefault = true
|
||||
p.Default = f[4:] // rest of string
|
||||
if i+1 < len(fields) {
|
||||
// Commas aren't escaped, and def is always last.
|
||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
||||
|
||||
// setFieldProps initializes the field properties for submessages and maps.
|
||||
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
||||
switch t1 := typ; t1.Kind() {
|
||||
case reflect.Ptr:
|
||||
if t1.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t1.Elem()
|
||||
}
|
||||
|
||||
case reflect.Slice:
|
||||
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
|
||||
p.stype = t2.Elem()
|
||||
}
|
||||
|
||||
case reflect.Map:
|
||||
p.mtype = t1
|
||||
p.MapKeyProp = &Properties{}
|
||||
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||
p.MapValProp = &Properties{}
|
||||
vtype := p.mtype.Elem()
|
||||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
||||
// The value type is not a message (*T) or bytes ([]byte),
|
||||
// so we need encoders for the pointer to this type.
|
||||
vtype = reflect.PtrTo(vtype)
|
||||
}
|
||||
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||
}
|
||||
|
||||
if p.stype != nil {
|
||||
if lockGetProp {
|
||||
p.sprop = GetProperties(p.stype)
|
||||
} else {
|
||||
p.sprop = getPropertiesLocked(p.stype)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||
)
|
||||
|
||||
// Init populates the properties from a protocol buffer struct tag.
|
||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||
p.init(typ, name, tag, f, true)
|
||||
}
|
||||
|
||||
func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
|
||||
// "bytes,49,opt,def=hello!"
|
||||
p.Name = name
|
||||
p.OrigName = name
|
||||
if tag == "" {
|
||||
return
|
||||
}
|
||||
p.Parse(tag)
|
||||
p.setFieldProps(typ, f, lockGetProp)
|
||||
}
|
||||
|
||||
var (
|
||||
propertiesMu sync.RWMutex
|
||||
propertiesMap = make(map[reflect.Type]*StructProperties)
|
||||
)
|
||||
|
||||
// GetProperties returns the list of properties for the type represented by t.
|
||||
// t must represent a generated struct type of a protocol message.
|
||||
func GetProperties(t reflect.Type) *StructProperties {
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic("proto: type must have kind struct")
|
||||
}
|
||||
|
||||
// Most calls to GetProperties in a long-running program will be
|
||||
// retrieving details for types we have seen before.
|
||||
propertiesMu.RLock()
|
||||
sprop, ok := propertiesMap[t]
|
||||
propertiesMu.RUnlock()
|
||||
if ok {
|
||||
return sprop
|
||||
}
|
||||
|
||||
propertiesMu.Lock()
|
||||
sprop = getPropertiesLocked(t)
|
||||
propertiesMu.Unlock()
|
||||
return sprop
|
||||
}
|
||||
|
||||
type (
|
||||
oneofFuncsIface interface {
|
||||
XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
|
||||
}
|
||||
oneofWrappersIface interface {
|
||||
XXX_OneofWrappers() []interface{}
|
||||
}
|
||||
)
|
||||
|
||||
// getPropertiesLocked requires that propertiesMu is held.
|
||||
func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||
if prop, ok := propertiesMap[t]; ok {
|
||||
return prop
|
||||
}
|
||||
|
||||
prop := new(StructProperties)
|
||||
// in case of recursive protos, fill this in now.
|
||||
propertiesMap[t] = prop
|
||||
|
||||
// build properties
|
||||
prop.Prop = make([]*Properties, t.NumField())
|
||||
prop.order = make([]int, t.NumField())
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
p := new(Properties)
|
||||
name := f.Name
|
||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||
|
||||
oneof := f.Tag.Get("protobuf_oneof") // special case
|
||||
if oneof != "" {
|
||||
// Oneof fields don't use the traditional protobuf tag.
|
||||
p.OrigName = oneof
|
||||
}
|
||||
prop.Prop[i] = p
|
||||
prop.order[i] = i
|
||||
if debug {
|
||||
print(i, " ", f.Name, " ", t.String(), " ")
|
||||
if p.Tag > 0 {
|
||||
print(p.String())
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
}
|
||||
|
||||
// Re-order prop.order.
|
||||
sort.Sort(prop)
|
||||
|
||||
var oots []interface{}
|
||||
switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
|
||||
case oneofFuncsIface:
|
||||
_, _, _, oots = m.XXX_OneofFuncs()
|
||||
case oneofWrappersIface:
|
||||
oots = m.XXX_OneofWrappers()
|
||||
}
|
||||
if len(oots) > 0 {
|
||||
// Interpret oneof metadata.
|
||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||
for _, oot := range oots {
|
||||
oop := &OneofProperties{
|
||||
Type: reflect.ValueOf(oot).Type(), // *T
|
||||
Prop: new(Properties),
|
||||
}
|
||||
sft := oop.Type.Elem().Field(0)
|
||||
oop.Prop.Name = sft.Name
|
||||
oop.Prop.Parse(sft.Tag.Get("protobuf"))
|
||||
// There will be exactly one interface field that
|
||||
// this new value is assignable to.
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if f.Type.Kind() != reflect.Interface {
|
||||
continue
|
||||
}
|
||||
if !oop.Type.AssignableTo(f.Type) {
|
||||
continue
|
||||
}
|
||||
oop.Field = i
|
||||
break
|
||||
}
|
||||
prop.OneofTypes[oop.Prop.OrigName] = oop
|
||||
}
|
||||
}
|
||||
|
||||
// build required counts
|
||||
// build tags
|
||||
reqCount := 0
|
||||
prop.decoderOrigNames = make(map[string]int)
|
||||
for i, p := range prop.Prop {
|
||||
if strings.HasPrefix(p.Name, "XXX_") {
|
||||
// Internal fields should not appear in tags/origNames maps.
|
||||
// They are handled specially when encoding and decoding.
|
||||
continue
|
||||
}
|
||||
if p.Required {
|
||||
reqCount++
|
||||
}
|
||||
prop.decoderTags.put(p.Tag, i)
|
||||
prop.decoderOrigNames[p.OrigName] = i
|
||||
}
|
||||
prop.reqCount = reqCount
|
||||
|
||||
return prop
|
||||
}
|
||||
|
||||
// A global registry of enum types.
|
||||
// The generated code will register the generated maps by calling RegisterEnum.
|
||||
|
||||
var enumValueMaps = make(map[string]map[string]int32)
|
||||
|
||||
// RegisterEnum is called from the generated code to install the enum descriptor
|
||||
// maps into the global table to aid parsing text format protocol buffers.
|
||||
func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
|
||||
if _, ok := enumValueMaps[typeName]; ok {
|
||||
panic("proto: duplicate enum registered: " + typeName)
|
||||
}
|
||||
enumValueMaps[typeName] = valueMap
|
||||
}
|
||||
|
||||
// EnumValueMap returns the mapping from names to integers of the
|
||||
// enum type enumType, or a nil if not found.
|
||||
func EnumValueMap(enumType string) map[string]int32 {
|
||||
return enumValueMaps[enumType]
|
||||
}
|
||||
|
||||
// A registry of all linked message types.
|
||||
// The string is a fully-qualified proto name ("pkg.Message").
|
||||
var (
|
||||
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
|
||||
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
|
||||
revProtoTypes = make(map[reflect.Type]string)
|
||||
)
|
||||
|
||||
// RegisterType is called from generated code and maps from the fully qualified
|
||||
// proto name to the type (pointer to struct) of the protocol buffer.
|
||||
func RegisterType(x Message, name string) {
|
||||
if _, ok := protoTypedNils[name]; ok {
|
||||
// TODO: Some day, make this a panic.
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
|
||||
// Generated code always calls RegisterType with nil x.
|
||||
// This check is just for extra safety.
|
||||
protoTypedNils[name] = x
|
||||
} else {
|
||||
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
|
||||
}
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// RegisterMapType is called from generated code and maps from the fully qualified
|
||||
// proto name to the native map type of the proto map definition.
|
||||
func RegisterMapType(x interface{}, name string) {
|
||||
if reflect.TypeOf(x).Kind() != reflect.Map {
|
||||
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
|
||||
}
|
||||
if _, ok := protoMapTypes[name]; ok {
|
||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||
return
|
||||
}
|
||||
t := reflect.TypeOf(x)
|
||||
protoMapTypes[name] = t
|
||||
revProtoTypes[t] = name
|
||||
}
|
||||
|
||||
// MessageName returns the fully-qualified proto name for the given message type.
|
||||
func MessageName(x Message) string {
|
||||
type xname interface {
|
||||
XXX_MessageName() string
|
||||
}
|
||||
if m, ok := x.(xname); ok {
|
||||
return m.XXX_MessageName()
|
||||
}
|
||||
return revProtoTypes[reflect.TypeOf(x)]
|
||||
}
|
||||
|
||||
// MessageType returns the message type (pointer to struct) for a named message.
|
||||
// The type is not guaranteed to implement proto.Message if the name refers to a
|
||||
// map entry.
|
||||
func MessageType(name string) reflect.Type {
|
||||
if t, ok := protoTypedNils[name]; ok {
|
||||
return reflect.TypeOf(t)
|
||||
}
|
||||
return protoMapTypes[name]
|
||||
}
|
||||
|
||||
// A registry of all linked proto files.
|
||||
var (
|
||||
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
||||
)
|
||||
|
||||
// RegisterFile is called from generated code and maps from the
|
||||
// full file name of a .proto file to its compressed FileDescriptorProto.
|
||||
func RegisterFile(filename string, fileDescriptor []byte) {
|
||||
protoFiles[filename] = fileDescriptor
|
||||
}
|
||||
|
||||
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
||||
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
|
||||
2776
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
2776
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
File diff suppressed because it is too large
Load Diff
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
@@ -1,654 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Merge merges the src message into dst.
|
||||
// This assumes that dst and src of the same type and are non-nil.
|
||||
func (a *InternalMessageInfo) Merge(dst, src Message) {
|
||||
mi := atomicLoadMergeInfo(&a.merge)
|
||||
if mi == nil {
|
||||
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
|
||||
atomicStoreMergeInfo(&a.merge, mi)
|
||||
}
|
||||
mi.merge(toPointer(&dst), toPointer(&src))
|
||||
}
|
||||
|
||||
type mergeInfo struct {
|
||||
typ reflect.Type
|
||||
|
||||
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||
lock sync.Mutex
|
||||
|
||||
fields []mergeFieldInfo
|
||||
unrecognized field // Offset of XXX_unrecognized
|
||||
}
|
||||
|
||||
type mergeFieldInfo struct {
|
||||
field field // Offset of field, guaranteed to be valid
|
||||
|
||||
// isPointer reports whether the value in the field is a pointer.
|
||||
// This is true for the following situations:
|
||||
// * Pointer to struct
|
||||
// * Pointer to basic type (proto2 only)
|
||||
// * Slice (first value in slice header is a pointer)
|
||||
// * String (first value in string header is a pointer)
|
||||
isPointer bool
|
||||
|
||||
// basicWidth reports the width of the field assuming that it is directly
|
||||
// embedded in the struct (as is the case for basic types in proto3).
|
||||
// The possible values are:
|
||||
// 0: invalid
|
||||
// 1: bool
|
||||
// 4: int32, uint32, float32
|
||||
// 8: int64, uint64, float64
|
||||
basicWidth int
|
||||
|
||||
// Where dst and src are pointers to the types being merged.
|
||||
merge func(dst, src pointer)
|
||||
}
|
||||
|
||||
var (
|
||||
mergeInfoMap = map[reflect.Type]*mergeInfo{}
|
||||
mergeInfoLock sync.Mutex
|
||||
)
|
||||
|
||||
func getMergeInfo(t reflect.Type) *mergeInfo {
|
||||
mergeInfoLock.Lock()
|
||||
defer mergeInfoLock.Unlock()
|
||||
mi := mergeInfoMap[t]
|
||||
if mi == nil {
|
||||
mi = &mergeInfo{typ: t}
|
||||
mergeInfoMap[t] = mi
|
||||
}
|
||||
return mi
|
||||
}
|
||||
|
||||
// merge merges src into dst assuming they are both of type *mi.typ.
|
||||
func (mi *mergeInfo) merge(dst, src pointer) {
|
||||
if dst.isNil() {
|
||||
panic("proto: nil destination")
|
||||
}
|
||||
if src.isNil() {
|
||||
return // Nothing to do.
|
||||
}
|
||||
|
||||
if atomic.LoadInt32(&mi.initialized) == 0 {
|
||||
mi.computeMergeInfo()
|
||||
}
|
||||
|
||||
for _, fi := range mi.fields {
|
||||
sfp := src.offset(fi.field)
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
|
||||
continue
|
||||
}
|
||||
if fi.basicWidth > 0 {
|
||||
switch {
|
||||
case fi.basicWidth == 1 && !*sfp.toBool():
|
||||
continue
|
||||
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
|
||||
continue
|
||||
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dfp := dst.offset(fi.field)
|
||||
fi.merge(dfp, sfp)
|
||||
}
|
||||
|
||||
// TODO: Make this faster?
|
||||
out := dst.asPointerTo(mi.typ).Elem()
|
||||
in := src.asPointerTo(mi.typ).Elem()
|
||||
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||
emOut, _ := extendable(out.Addr().Interface())
|
||||
mIn, muIn := emIn.extensionsRead()
|
||||
if mIn != nil {
|
||||
mOut := emOut.extensionsWrite()
|
||||
muIn.Lock()
|
||||
mergeExtension(mOut, mIn)
|
||||
muIn.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
if mi.unrecognized.IsValid() {
|
||||
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
|
||||
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mi *mergeInfo) computeMergeInfo() {
|
||||
mi.lock.Lock()
|
||||
defer mi.lock.Unlock()
|
||||
if mi.initialized != 0 {
|
||||
return
|
||||
}
|
||||
t := mi.typ
|
||||
n := t.NumField()
|
||||
|
||||
props := GetProperties(t)
|
||||
for i := 0; i < n; i++ {
|
||||
f := t.Field(i)
|
||||
if strings.HasPrefix(f.Name, "XXX_") {
|
||||
continue
|
||||
}
|
||||
|
||||
mfi := mergeFieldInfo{field: toField(&f)}
|
||||
tf := f.Type
|
||||
|
||||
// As an optimization, we can avoid the merge function call cost
|
||||
// if we know for sure that the source will have no effect
|
||||
// by checking if it is the zero value.
|
||||
if unsafeAllowed {
|
||||
switch tf.Kind() {
|
||||
case reflect.Ptr, reflect.Slice, reflect.String:
|
||||
// As a special case, we assume slices and strings are pointers
|
||||
// since we know that the first field in the SliceSlice or
|
||||
// StringHeader is a data pointer.
|
||||
mfi.isPointer = true
|
||||
case reflect.Bool:
|
||||
mfi.basicWidth = 1
|
||||
case reflect.Int32, reflect.Uint32, reflect.Float32:
|
||||
mfi.basicWidth = 4
|
||||
case reflect.Int64, reflect.Uint64, reflect.Float64:
|
||||
mfi.basicWidth = 8
|
||||
}
|
||||
}
|
||||
|
||||
// Unwrap tf to get at its most basic type.
|
||||
var isPointer, isSlice bool
|
||||
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||
isSlice = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if tf.Kind() == reflect.Ptr {
|
||||
isPointer = true
|
||||
tf = tf.Elem()
|
||||
}
|
||||
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||
panic("both pointer and slice for basic type in " + tf.Name())
|
||||
}
|
||||
|
||||
switch tf.Kind() {
|
||||
case reflect.Int32:
|
||||
switch {
|
||||
case isSlice: // E.g., []int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfsp := src.toInt32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfs := src.getInt32Slice()
|
||||
if sfs != nil {
|
||||
dfs := dst.getInt32Slice()
|
||||
dfs = append(dfs, sfs...)
|
||||
if dfs == nil {
|
||||
dfs = []int32{}
|
||||
}
|
||||
dst.setInt32Slice(dfs)
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
|
||||
/*
|
||||
sfpp := src.toInt32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
*/
|
||||
sfp := src.getInt32Ptr()
|
||||
if sfp != nil {
|
||||
dfp := dst.getInt32Ptr()
|
||||
if dfp == nil {
|
||||
dst.setInt32Ptr(*sfp)
|
||||
} else {
|
||||
*dfp = *sfp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt32(); v != 0 {
|
||||
*dst.toInt32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Int64:
|
||||
switch {
|
||||
case isSlice: // E.g., []int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toInt64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toInt64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []int64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toInt64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toInt64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Int64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., int64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toInt64(); v != 0 {
|
||||
*dst.toInt64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint32:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint32(); v != 0 {
|
||||
*dst.toUint32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Uint64:
|
||||
switch {
|
||||
case isSlice: // E.g., []uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toUint64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toUint64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []uint64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toUint64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toUint64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Uint64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., uint64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toUint64(); v != 0 {
|
||||
*dst.toUint64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float32:
|
||||
switch {
|
||||
case isSlice: // E.g., []float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat32Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat32Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float32{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat32Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat32Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float32(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float32
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat32(); v != 0 {
|
||||
*dst.toFloat32() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Float64:
|
||||
switch {
|
||||
case isSlice: // E.g., []float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toFloat64Slice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toFloat64Slice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []float64{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toFloat64Ptr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toFloat64Ptr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Float64(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., float64
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toFloat64(); v != 0 {
|
||||
*dst.toFloat64() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch {
|
||||
case isSlice: // E.g., []bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toBoolSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toBoolSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []bool{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toBoolPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toBoolPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = Bool(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., bool
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toBool(); v {
|
||||
*dst.toBool() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
switch {
|
||||
case isSlice: // E.g., []string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfsp := src.toStringSlice()
|
||||
if *sfsp != nil {
|
||||
dfsp := dst.toStringSlice()
|
||||
*dfsp = append(*dfsp, *sfsp...)
|
||||
if *dfsp == nil {
|
||||
*dfsp = []string{}
|
||||
}
|
||||
}
|
||||
}
|
||||
case isPointer: // E.g., *string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sfpp := src.toStringPtr()
|
||||
if *sfpp != nil {
|
||||
dfpp := dst.toStringPtr()
|
||||
if *dfpp == nil {
|
||||
*dfpp = String(**sfpp)
|
||||
} else {
|
||||
**dfpp = **sfpp
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., string
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
if v := *src.toString(); v != "" {
|
||||
*dst.toString() = v
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
isProto3 := props.Prop[i].proto3
|
||||
switch {
|
||||
case isPointer:
|
||||
panic("bad pointer in byte slice case in " + tf.Name())
|
||||
case tf.Elem().Kind() != reflect.Uint8:
|
||||
panic("bad element kind in byte slice case in " + tf.Name())
|
||||
case isSlice: // E.g., [][]byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbsp := src.toBytesSlice()
|
||||
if *sbsp != nil {
|
||||
dbsp := dst.toBytesSlice()
|
||||
for _, sb := range *sbsp {
|
||||
if sb == nil {
|
||||
*dbsp = append(*dbsp, nil)
|
||||
} else {
|
||||
*dbsp = append(*dbsp, append([]byte{}, sb...))
|
||||
}
|
||||
}
|
||||
if *dbsp == nil {
|
||||
*dbsp = [][]byte{}
|
||||
}
|
||||
}
|
||||
}
|
||||
default: // E.g., []byte
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sbp := src.toBytes()
|
||||
if *sbp != nil {
|
||||
dbp := dst.toBytes()
|
||||
if !isProto3 || len(*sbp) > 0 {
|
||||
*dbp = append([]byte{}, *sbp...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Struct:
|
||||
switch {
|
||||
case !isPointer:
|
||||
panic(fmt.Sprintf("message field %s without pointer", tf))
|
||||
case isSlice: // E.g., []*pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sps := src.getPointerSlice()
|
||||
if sps != nil {
|
||||
dps := dst.getPointerSlice()
|
||||
for _, sp := range sps {
|
||||
var dp pointer
|
||||
if !sp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
if dps == nil {
|
||||
dps = []pointer{}
|
||||
}
|
||||
dst.setPointerSlice(dps)
|
||||
}
|
||||
}
|
||||
default: // E.g., *pb.T
|
||||
mi := getMergeInfo(tf)
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sp := src.getPointer()
|
||||
if !sp.isNil() {
|
||||
dp := dst.getPointer()
|
||||
if dp.isNil() {
|
||||
dp = valToPointer(reflect.New(tf))
|
||||
dst.setPointer(dp)
|
||||
}
|
||||
mi.merge(dp, sp)
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in map case in " + tf.Name())
|
||||
default: // E.g., map[K]V
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
sm := src.asPointerTo(tf).Elem()
|
||||
if sm.Len() == 0 {
|
||||
return
|
||||
}
|
||||
dm := dst.asPointerTo(tf).Elem()
|
||||
if dm.IsNil() {
|
||||
dm.Set(reflect.MakeMap(tf))
|
||||
}
|
||||
|
||||
switch tf.Elem().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(Clone(val.Interface().(Message)))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
default: // Basic type (e.g., string)
|
||||
for _, key := range sm.MapKeys() {
|
||||
val := sm.MapIndex(key)
|
||||
dm.SetMapIndex(key, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Interface:
|
||||
// Must be oneof field.
|
||||
switch {
|
||||
case isPointer || isSlice:
|
||||
panic("bad pointer or slice in interface case in " + tf.Name())
|
||||
default: // E.g., interface{}
|
||||
// TODO: Make this faster?
|
||||
mfi.merge = func(dst, src pointer) {
|
||||
su := src.asPointerTo(tf).Elem()
|
||||
if !su.IsNil() {
|
||||
du := dst.asPointerTo(tf).Elem()
|
||||
typ := su.Elem().Type()
|
||||
if du.IsNil() || du.Elem().Type() != typ {
|
||||
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
|
||||
}
|
||||
sv := su.Elem().Elem().Field(0)
|
||||
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||
return
|
||||
}
|
||||
dv := du.Elem().Elem().Field(0)
|
||||
if dv.Kind() == reflect.Ptr && dv.IsNil() {
|
||||
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
|
||||
}
|
||||
switch sv.Type().Kind() {
|
||||
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||
Merge(dv.Interface().(Message), sv.Interface().(Message))
|
||||
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
|
||||
default: // Basic type (e.g., string)
|
||||
dv.Set(sv)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("merger not found for type:%s", tf))
|
||||
}
|
||||
mi.fields = append(mi.fields, mfi)
|
||||
}
|
||||
|
||||
mi.unrecognized = invalidField
|
||||
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||
if f.Type != reflect.TypeOf([]byte{}) {
|
||||
panic("expected XXX_unrecognized to be of type []byte")
|
||||
}
|
||||
mi.unrecognized = toField(&f)
|
||||
}
|
||||
|
||||
atomic.StoreInt32(&mi.initialized, 1)
|
||||
}
|
||||
2053
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
2053
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
File diff suppressed because it is too large
Load Diff
843
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
843
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
@@ -1,843 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for writing the text protocol buffer format.
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
newline = []byte("\n")
|
||||
spaces = []byte(" ")
|
||||
endBraceNewline = []byte("}\n")
|
||||
backslashN = []byte{'\\', 'n'}
|
||||
backslashR = []byte{'\\', 'r'}
|
||||
backslashT = []byte{'\\', 't'}
|
||||
backslashDQ = []byte{'\\', '"'}
|
||||
backslashBS = []byte{'\\', '\\'}
|
||||
posInf = []byte("inf")
|
||||
negInf = []byte("-inf")
|
||||
nan = []byte("nan")
|
||||
)
|
||||
|
||||
type writer interface {
|
||||
io.Writer
|
||||
WriteByte(byte) error
|
||||
}
|
||||
|
||||
// textWriter is an io.Writer that tracks its indentation level.
|
||||
type textWriter struct {
|
||||
ind int
|
||||
complete bool // if the current position is a complete line
|
||||
compact bool // whether to write out as a one-liner
|
||||
w writer
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteString(s string) (n int, err error) {
|
||||
if !strings.Contains(s, "\n") {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
w.complete = false
|
||||
return io.WriteString(w.w, s)
|
||||
}
|
||||
// WriteString is typically called without newlines, so this
|
||||
// codepath and its copy are rare. We copy to avoid
|
||||
// duplicating all of Write's logic here.
|
||||
return w.Write([]byte(s))
|
||||
}
|
||||
|
||||
func (w *textWriter) Write(p []byte) (n int, err error) {
|
||||
newlines := bytes.Count(p, newline)
|
||||
if newlines == 0 {
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
n, err = w.w.Write(p)
|
||||
w.complete = false
|
||||
return n, err
|
||||
}
|
||||
|
||||
frags := bytes.SplitN(p, newline, newlines+1)
|
||||
if w.compact {
|
||||
for i, frag := range frags {
|
||||
if i > 0 {
|
||||
if err := w.w.WriteByte(' '); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
for i, frag := range frags {
|
||||
if w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
nn, err := w.w.Write(frag)
|
||||
n += nn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
if i+1 < len(frags) {
|
||||
if err := w.w.WriteByte('\n'); err != nil {
|
||||
return n, err
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
w.complete = len(frags[len(frags)-1]) == 0
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (w *textWriter) WriteByte(c byte) error {
|
||||
if w.compact && c == '\n' {
|
||||
c = ' '
|
||||
}
|
||||
if !w.compact && w.complete {
|
||||
w.writeIndent()
|
||||
}
|
||||
err := w.w.WriteByte(c)
|
||||
w.complete = c == '\n'
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *textWriter) indent() { w.ind++ }
|
||||
|
||||
func (w *textWriter) unindent() {
|
||||
if w.ind == 0 {
|
||||
log.Print("proto: textWriter unindented too far")
|
||||
return
|
||||
}
|
||||
w.ind--
|
||||
}
|
||||
|
||||
func writeName(w *textWriter, props *Properties) error {
|
||||
if _, err := w.WriteString(props.OrigName); err != nil {
|
||||
return err
|
||||
}
|
||||
if props.Wire != "group" {
|
||||
return w.WriteByte(':')
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func requiresQuotes(u string) bool {
|
||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||
for _, ch := range u {
|
||||
switch {
|
||||
case ch == '.' || ch == '/' || ch == '_':
|
||||
continue
|
||||
case '0' <= ch && ch <= '9':
|
||||
continue
|
||||
case 'A' <= ch && ch <= 'Z':
|
||||
continue
|
||||
case 'a' <= ch && ch <= 'z':
|
||||
continue
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isAny reports whether sv is a google.protobuf.Any message
|
||||
func isAny(sv reflect.Value) bool {
|
||||
type wkt interface {
|
||||
XXX_WellKnownType() string
|
||||
}
|
||||
t, ok := sv.Addr().Interface().(wkt)
|
||||
return ok && t.XXX_WellKnownType() == "Any"
|
||||
}
|
||||
|
||||
// writeProto3Any writes an expanded google.protobuf.Any message.
|
||||
//
|
||||
// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
|
||||
// required messages are not linked in).
|
||||
//
|
||||
// It returns (true, error) when sv was written in expanded format or an error
|
||||
// was encountered.
|
||||
func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
|
||||
turl := sv.FieldByName("TypeUrl")
|
||||
val := sv.FieldByName("Value")
|
||||
if !turl.IsValid() || !val.IsValid() {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
b, ok := val.Interface().([]byte)
|
||||
if !ok {
|
||||
return true, errors.New("proto: invalid google.protobuf.Any message")
|
||||
}
|
||||
|
||||
parts := strings.Split(turl.String(), "/")
|
||||
mt := MessageType(parts[len(parts)-1])
|
||||
if mt == nil {
|
||||
return false, nil
|
||||
}
|
||||
m := reflect.New(mt.Elem())
|
||||
if err := Unmarshal(b, m.Interface().(Message)); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
w.Write([]byte("["))
|
||||
u := turl.String()
|
||||
if requiresQuotes(u) {
|
||||
writeString(w, u)
|
||||
} else {
|
||||
w.Write([]byte(u))
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("]:<"))
|
||||
} else {
|
||||
w.Write([]byte("]: <\n"))
|
||||
w.ind++
|
||||
}
|
||||
if err := tm.writeStruct(w, m.Elem()); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if w.compact {
|
||||
w.Write([]byte("> "))
|
||||
} else {
|
||||
w.ind--
|
||||
w.Write([]byte(">\n"))
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||
if tm.ExpandAny && isAny(sv) {
|
||||
if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
|
||||
return err
|
||||
}
|
||||
}
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < sv.NumField(); i++ {
|
||||
fv := sv.Field(i)
|
||||
props := sprops.Prop[i]
|
||||
name := st.Field(i).Name
|
||||
|
||||
if name == "XXX_NoUnkeyedLiteral" {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "XXX_") {
|
||||
// There are two XXX_ fields:
|
||||
// XXX_unrecognized []byte
|
||||
// XXX_extensions map[int32]proto.Extension
|
||||
// The first is handled here;
|
||||
// the second is handled at the bottom of this function.
|
||||
if name == "XXX_unrecognized" && !fv.IsNil() {
|
||||
if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Field not filled in. This could be an optional field or
|
||||
// a required field that wasn't filled in. Either way, there
|
||||
// isn't anything we can show for it.
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Slice && fv.IsNil() {
|
||||
// Repeated field that is empty, or a bytes field that is unused.
|
||||
continue
|
||||
}
|
||||
|
||||
if props.Repeated && fv.Kind() == reflect.Slice {
|
||||
// Repeated field.
|
||||
for j := 0; j < fv.Len(); j++ {
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
v := fv.Index(j)
|
||||
if v.Kind() == reflect.Ptr && v.IsNil() {
|
||||
// A nil message in a repeated field is not valid,
|
||||
// but we can handle that more gracefully than panicking.
|
||||
if _, err := w.Write([]byte("<nil>\n")); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := tm.writeAny(w, v, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fv.Kind() == reflect.Map {
|
||||
// Map fields are rendered as a repeated struct with key/value fields.
|
||||
keys := fv.MapKeys()
|
||||
sort.Sort(mapKeys(keys))
|
||||
for _, key := range keys {
|
||||
val := fv.MapIndex(key)
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// open struct
|
||||
if err := w.WriteByte('<'); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
// key
|
||||
if _, err := w.WriteString("key:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
// nil values aren't legal, but we can avoid panicking because of them.
|
||||
if val.Kind() != reflect.Ptr || !val.IsNil() {
|
||||
// value
|
||||
if _, err := w.WriteString("value:"); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// close struct
|
||||
w.unindent()
|
||||
if err := w.WriteByte('>'); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
|
||||
// empty bytes field
|
||||
continue
|
||||
}
|
||||
if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
|
||||
// proto3 non-repeated scalar field; skip if zero value
|
||||
if isProto3Zero(fv) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if fv.Kind() == reflect.Interface {
|
||||
// Check if it is a oneof.
|
||||
if st.Field(i).Tag.Get("protobuf_oneof") != "" {
|
||||
// fv is nil, or holds a pointer to generated struct.
|
||||
// That generated struct has exactly one field,
|
||||
// which has a protobuf struct tag.
|
||||
if fv.IsNil() {
|
||||
continue
|
||||
}
|
||||
inner := fv.Elem().Elem() // interface -> *T -> T
|
||||
tag := inner.Type().Field(0).Tag.Get("protobuf")
|
||||
props = new(Properties) // Overwrite the outer props var, but not its pointee.
|
||||
props.Parse(tag)
|
||||
// Write the value in the oneof, not the oneof itself.
|
||||
fv = inner.Field(0)
|
||||
|
||||
// Special case to cope with malformed messages gracefully:
|
||||
// If the value in the oneof is a nil pointer, don't panic
|
||||
// in writeAny.
|
||||
if fv.Kind() == reflect.Ptr && fv.IsNil() {
|
||||
// Use errors.New so writeAny won't render quotes.
|
||||
msg := errors.New("/* nil */")
|
||||
fv = reflect.ValueOf(&msg).Elem()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := writeName(w, props); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Enums have a String method, so writeAny will work fine.
|
||||
if err := tm.writeAny(w, fv, props); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Extensions (the XXX_extensions field).
|
||||
pv := sv.Addr()
|
||||
if _, err := extendable(pv.Interface()); err == nil {
|
||||
if err := tm.writeExtensions(w, pv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeAny writes an arbitrary field.
|
||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||
v = reflect.Indirect(v)
|
||||
|
||||
// Floats have special cases.
|
||||
if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
|
||||
x := v.Float()
|
||||
var b []byte
|
||||
switch {
|
||||
case math.IsInf(x, 1):
|
||||
b = posInf
|
||||
case math.IsInf(x, -1):
|
||||
b = negInf
|
||||
case math.IsNaN(x):
|
||||
b = nan
|
||||
}
|
||||
if b != nil {
|
||||
_, err := w.Write(b)
|
||||
return err
|
||||
}
|
||||
// Other values are handled below.
|
||||
}
|
||||
|
||||
// We don't attempt to serialise every possible value type; only those
|
||||
// that can occur in protocol buffers.
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
// Should only be a []byte; repeated fields are handled in writeStruct.
|
||||
if err := writeString(w, string(v.Bytes())); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.String:
|
||||
if err := writeString(w, v.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
case reflect.Struct:
|
||||
// Required/optional group/message.
|
||||
var bra, ket byte = '<', '>'
|
||||
if props != nil && props.Wire == "group" {
|
||||
bra, ket = '{', '}'
|
||||
}
|
||||
if err := w.WriteByte(bra); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.indent()
|
||||
if v.CanAddr() {
|
||||
// Calling v.Interface on a struct causes the reflect package to
|
||||
// copy the entire struct. This is racy with the new Marshaler
|
||||
// since we atomically update the XXX_sizecache.
|
||||
//
|
||||
// Thus, we retrieve a pointer to the struct if possible to avoid
|
||||
// a race since v.Interface on the pointer doesn't copy the struct.
|
||||
//
|
||||
// If v is not addressable, then we are not worried about a race
|
||||
// since it implies that the binary Marshaler cannot possibly be
|
||||
// mutating this value.
|
||||
v = v.Addr()
|
||||
}
|
||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = w.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
if err := tm.writeStruct(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.unindent()
|
||||
if err := w.WriteByte(ket); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
_, err := fmt.Fprint(w, v.Interface())
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// equivalent to C's isprint.
|
||||
func isprint(c byte) bool {
|
||||
return c >= 0x20 && c < 0x7f
|
||||
}
|
||||
|
||||
// writeString writes a string in the protocol buffer text format.
|
||||
// It is similar to strconv.Quote except we don't use Go escape sequences,
|
||||
// we treat the string as a byte sequence, and we use octal escapes.
|
||||
// These differences are to maintain interoperability with the other
|
||||
// languages' implementations of the text format.
|
||||
func writeString(w *textWriter, s string) error {
|
||||
// use WriteByte here to get any needed indent
|
||||
if err := w.WriteByte('"'); err != nil {
|
||||
return err
|
||||
}
|
||||
// Loop over the bytes, not the runes.
|
||||
for i := 0; i < len(s); i++ {
|
||||
var err error
|
||||
// Divergence from C++: we don't escape apostrophes.
|
||||
// There's no need to escape them, and the C++ parser
|
||||
// copes with a naked apostrophe.
|
||||
switch c := s[i]; c {
|
||||
case '\n':
|
||||
_, err = w.w.Write(backslashN)
|
||||
case '\r':
|
||||
_, err = w.w.Write(backslashR)
|
||||
case '\t':
|
||||
_, err = w.w.Write(backslashT)
|
||||
case '"':
|
||||
_, err = w.w.Write(backslashDQ)
|
||||
case '\\':
|
||||
_, err = w.w.Write(backslashBS)
|
||||
default:
|
||||
if isprint(c) {
|
||||
err = w.w.WriteByte(c)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w.w, "\\%03o", c)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.WriteByte('"')
|
||||
}
|
||||
|
||||
func writeUnknownStruct(w *textWriter, data []byte) (err error) {
|
||||
if !w.compact {
|
||||
if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b := NewBuffer(data)
|
||||
for b.index < len(b.buf) {
|
||||
x, err := b.DecodeVarint()
|
||||
if err != nil {
|
||||
_, err := fmt.Fprintf(w, "/* %v */\n", err)
|
||||
return err
|
||||
}
|
||||
wire, tag := x&7, x>>3
|
||||
if wire == WireEndGroup {
|
||||
w.unindent()
|
||||
if _, err := w.Write(endBraceNewline); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if _, err := fmt.Fprint(w, tag); err != nil {
|
||||
return err
|
||||
}
|
||||
if wire != WireStartGroup {
|
||||
if err := w.WriteByte(':'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !w.compact || wire == WireStartGroup {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
switch wire {
|
||||
case WireBytes:
|
||||
buf, e := b.DecodeRawBytes(false)
|
||||
if e == nil {
|
||||
_, err = fmt.Fprintf(w, "%q", buf)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", e)
|
||||
}
|
||||
case WireFixed32:
|
||||
x, err = b.DecodeFixed32()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireFixed64:
|
||||
x, err = b.DecodeFixed64()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
case WireStartGroup:
|
||||
err = w.WriteByte('{')
|
||||
w.indent()
|
||||
case WireVarint:
|
||||
x, err = b.DecodeVarint()
|
||||
err = writeUnknownInt(w, x, err)
|
||||
default:
|
||||
_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err = w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeUnknownInt(w *textWriter, x uint64, err error) error {
|
||||
if err == nil {
|
||||
_, err = fmt.Fprint(w, x)
|
||||
} else {
|
||||
_, err = fmt.Fprintf(w, "/* %v */", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type int32Slice []int32
|
||||
|
||||
func (s int32Slice) Len() int { return len(s) }
|
||||
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
// writeExtensions writes all the extensions in pv.
|
||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||
emap := extensionMaps[pv.Type().Elem()]
|
||||
ep, _ := extendable(pv.Interface())
|
||||
|
||||
// Order the extensions by ID.
|
||||
// This isn't strictly necessary, but it will give us
|
||||
// canonical output, which will also make testing easier.
|
||||
m, mu := ep.extensionsRead()
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
mu.Lock()
|
||||
ids := make([]int32, 0, len(m))
|
||||
for id := range m {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Sort(int32Slice(ids))
|
||||
mu.Unlock()
|
||||
|
||||
for _, extNum := range ids {
|
||||
ext := m[extNum]
|
||||
var desc *ExtensionDesc
|
||||
if emap != nil {
|
||||
desc = emap[extNum]
|
||||
}
|
||||
if desc == nil {
|
||||
// Unknown extension.
|
||||
if err := writeUnknownStruct(w, ext.enc); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
pb, err := GetExtension(ep, desc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed getting extension: %v", err)
|
||||
}
|
||||
|
||||
// Repeated extensions will appear as a slice.
|
||||
if !desc.repeated() {
|
||||
if err := tm.writeExtension(w, desc.Name, pb); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v := reflect.ValueOf(pb)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
|
||||
if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
|
||||
return err
|
||||
}
|
||||
if !w.compact {
|
||||
if err := w.WriteByte(' '); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.WriteByte('\n'); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *textWriter) writeIndent() {
|
||||
if !w.complete {
|
||||
return
|
||||
}
|
||||
remain := w.ind * 2
|
||||
for remain > 0 {
|
||||
n := remain
|
||||
if n > len(spaces) {
|
||||
n = len(spaces)
|
||||
}
|
||||
w.w.Write(spaces[:n])
|
||||
remain -= n
|
||||
}
|
||||
w.complete = false
|
||||
}
|
||||
|
||||
// TextMarshaler is a configurable text format marshaler.
|
||||
type TextMarshaler struct {
|
||||
Compact bool // use compact text format (one line).
|
||||
ExpandAny bool // expand google.protobuf.Any messages of known types
|
||||
}
|
||||
|
||||
// Marshal writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
|
||||
val := reflect.ValueOf(pb)
|
||||
if pb == nil || val.IsNil() {
|
||||
w.Write([]byte("<nil>"))
|
||||
return nil
|
||||
}
|
||||
var bw *bufio.Writer
|
||||
ww, ok := w.(writer)
|
||||
if !ok {
|
||||
bw = bufio.NewWriter(w)
|
||||
ww = bw
|
||||
}
|
||||
aw := &textWriter{
|
||||
w: ww,
|
||||
complete: true,
|
||||
compact: tm.Compact,
|
||||
}
|
||||
|
||||
if etm, ok := pb.(encoding.TextMarshaler); ok {
|
||||
text, err := etm.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = aw.Write(text); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// Dereference the received pointer so we don't have outer < and >.
|
||||
v := reflect.Indirect(val)
|
||||
if err := tm.writeStruct(aw, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if bw != nil {
|
||||
return bw.Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Text is the same as Marshal, but returns the string directly.
|
||||
func (tm *TextMarshaler) Text(pb Message) string {
|
||||
var buf bytes.Buffer
|
||||
tm.Marshal(&buf, pb)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
var (
|
||||
defaultTextMarshaler = TextMarshaler{}
|
||||
compactTextMarshaler = TextMarshaler{Compact: true}
|
||||
)
|
||||
|
||||
// TODO: consider removing some of the Marshal functions below.
|
||||
|
||||
// MarshalText writes a given protocol buffer in text format.
|
||||
// The only errors returned are from w.
|
||||
func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
|
||||
|
||||
// MarshalTextString is the same as MarshalText, but returns the string directly.
|
||||
func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
|
||||
|
||||
// CompactText writes a given protocol buffer in compact text format (one line).
|
||||
func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
|
||||
|
||||
// CompactTextString is the same as CompactText, but returns the string directly.
|
||||
func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
|
||||
880
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
880
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
@@ -1,880 +0,0 @@
|
||||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package proto
|
||||
|
||||
// Functions for parsing the Text protocol buffer format.
|
||||
// TODO: message sets.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Error string emitted when deserializing Any and fields are already set
|
||||
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
||||
|
||||
type ParseError struct {
|
||||
Message string
|
||||
Line int // 1-based line number
|
||||
Offset int // 0-based byte offset from start of input
|
||||
}
|
||||
|
||||
func (p *ParseError) Error() string {
|
||||
if p.Line == 1 {
|
||||
// show offset only for first line
|
||||
return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
|
||||
}
|
||||
return fmt.Sprintf("line %d: %v", p.Line, p.Message)
|
||||
}
|
||||
|
||||
type token struct {
|
||||
value string
|
||||
err *ParseError
|
||||
line int // line number
|
||||
offset int // byte number from start of input, not start of line
|
||||
unquoted string // the unquoted version of value, if it was a quoted string
|
||||
}
|
||||
|
||||
func (t *token) String() string {
|
||||
if t.err == nil {
|
||||
return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
|
||||
}
|
||||
return fmt.Sprintf("parse error: %v", t.err)
|
||||
}
|
||||
|
||||
type textParser struct {
|
||||
s string // remaining input
|
||||
done bool // whether the parsing is finished (success or error)
|
||||
backed bool // whether back() was called
|
||||
offset, line int
|
||||
cur token
|
||||
}
|
||||
|
||||
func newTextParser(s string) *textParser {
|
||||
p := new(textParser)
|
||||
p.s = s
|
||||
p.line = 1
|
||||
p.cur.line = 1
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
|
||||
pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
|
||||
p.cur.err = pe
|
||||
p.done = true
|
||||
return pe
|
||||
}
|
||||
|
||||
// Numbers and identifiers are matched by [-+._A-Za-z0-9]
|
||||
func isIdentOrNumberChar(c byte) bool {
|
||||
switch {
|
||||
case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
|
||||
return true
|
||||
case '0' <= c && c <= '9':
|
||||
return true
|
||||
}
|
||||
switch c {
|
||||
case '-', '+', '.', '_':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWhitespace(c byte) bool {
|
||||
switch c {
|
||||
case ' ', '\t', '\n', '\r':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isQuote(c byte) bool {
|
||||
switch c {
|
||||
case '"', '\'':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *textParser) skipWhitespace() {
|
||||
i := 0
|
||||
for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
|
||||
if p.s[i] == '#' {
|
||||
// comment; skip to end of line or input
|
||||
for i < len(p.s) && p.s[i] != '\n' {
|
||||
i++
|
||||
}
|
||||
if i == len(p.s) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p.s[i] == '\n' {
|
||||
p.line++
|
||||
}
|
||||
i++
|
||||
}
|
||||
p.offset += i
|
||||
p.s = p.s[i:len(p.s)]
|
||||
if len(p.s) == 0 {
|
||||
p.done = true
|
||||
}
|
||||
}
|
||||
|
||||
func (p *textParser) advance() {
|
||||
// Skip whitespace
|
||||
p.skipWhitespace()
|
||||
if p.done {
|
||||
return
|
||||
}
|
||||
|
||||
// Start of non-whitespace
|
||||
p.cur.err = nil
|
||||
p.cur.offset, p.cur.line = p.offset, p.line
|
||||
p.cur.unquoted = ""
|
||||
switch p.s[0] {
|
||||
case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
|
||||
// Single symbol
|
||||
p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
|
||||
case '"', '\'':
|
||||
// Quoted string
|
||||
i := 1
|
||||
for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
|
||||
if p.s[i] == '\\' && i+1 < len(p.s) {
|
||||
// skip escaped char
|
||||
i++
|
||||
}
|
||||
i++
|
||||
}
|
||||
if i >= len(p.s) || p.s[i] != p.s[0] {
|
||||
p.errorf("unmatched quote")
|
||||
return
|
||||
}
|
||||
unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
|
||||
if err != nil {
|
||||
p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
|
||||
p.cur.unquoted = unq
|
||||
default:
|
||||
i := 0
|
||||
for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
|
||||
i++
|
||||
}
|
||||
if i == 0 {
|
||||
p.errorf("unexpected byte %#x", p.s[0])
|
||||
return
|
||||
}
|
||||
p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
|
||||
}
|
||||
p.offset += len(p.cur.value)
|
||||
}
|
||||
|
||||
var (
|
||||
errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||
)
|
||||
|
||||
func unquoteC(s string, quote rune) (string, error) {
|
||||
// This is based on C++'s tokenizer.cc.
|
||||
// Despite its name, this is *not* parsing C syntax.
|
||||
// For instance, "\0" is an invalid quoted string.
|
||||
|
||||
// Avoid allocation in trivial cases.
|
||||
simple := true
|
||||
for _, r := range s {
|
||||
if r == '\\' || r == quote {
|
||||
simple = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if simple {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
buf := make([]byte, 0, 3*len(s)/2)
|
||||
for len(s) > 0 {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
if r != '\\' {
|
||||
if r < utf8.RuneSelf {
|
||||
buf = append(buf, byte(r))
|
||||
} else {
|
||||
buf = append(buf, string(r)...)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ch, tail, err := unescape(s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
buf = append(buf, ch...)
|
||||
s = tail
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func unescape(s string) (ch string, tail string, err error) {
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
if r == utf8.RuneError && n == 1 {
|
||||
return "", "", errBadUTF8
|
||||
}
|
||||
s = s[n:]
|
||||
switch r {
|
||||
case 'a':
|
||||
return "\a", s, nil
|
||||
case 'b':
|
||||
return "\b", s, nil
|
||||
case 'f':
|
||||
return "\f", s, nil
|
||||
case 'n':
|
||||
return "\n", s, nil
|
||||
case 'r':
|
||||
return "\r", s, nil
|
||||
case 't':
|
||||
return "\t", s, nil
|
||||
case 'v':
|
||||
return "\v", s, nil
|
||||
case '?':
|
||||
return "?", s, nil // trigraph workaround
|
||||
case '\'', '"', '\\':
|
||||
return string(r), s, nil
|
||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||
if len(s) < 2 {
|
||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||
}
|
||||
ss := string(r) + s[:2]
|
||||
s = s[2:]
|
||||
i, err := strconv.ParseUint(ss, 8, 8)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||
}
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
case 'x', 'X', 'u', 'U':
|
||||
var n int
|
||||
switch r {
|
||||
case 'x', 'X':
|
||||
n = 2
|
||||
case 'u':
|
||||
n = 4
|
||||
case 'U':
|
||||
n = 8
|
||||
}
|
||||
if len(s) < n {
|
||||
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||
}
|
||||
ss := s[:n]
|
||||
s = s[n:]
|
||||
i, err := strconv.ParseUint(ss, 16, 64)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||
}
|
||||
if r == 'x' || r == 'X' {
|
||||
return string([]byte{byte(i)}), s, nil
|
||||
}
|
||||
if i > utf8.MaxRune {
|
||||
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||
}
|
||||
return string(i), s, nil
|
||||
}
|
||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||
}
|
||||
|
||||
// Back off the parser by one token. Can only be done between calls to next().
|
||||
// It makes the next advance() a no-op.
|
||||
func (p *textParser) back() { p.backed = true }
|
||||
|
||||
// Advances the parser and returns the new current token.
|
||||
func (p *textParser) next() *token {
|
||||
if p.backed || p.done {
|
||||
p.backed = false
|
||||
return &p.cur
|
||||
}
|
||||
p.advance()
|
||||
if p.done {
|
||||
p.cur.value = ""
|
||||
} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
|
||||
// Look for multiple quoted strings separated by whitespace,
|
||||
// and concatenate them.
|
||||
cat := p.cur
|
||||
for {
|
||||
p.skipWhitespace()
|
||||
if p.done || !isQuote(p.s[0]) {
|
||||
break
|
||||
}
|
||||
p.advance()
|
||||
if p.cur.err != nil {
|
||||
return &p.cur
|
||||
}
|
||||
cat.value += " " + p.cur.value
|
||||
cat.unquoted += p.cur.unquoted
|
||||
}
|
||||
p.done = false // parser may have seen EOF, but we want to return cat
|
||||
p.cur = cat
|
||||
}
|
||||
return &p.cur
|
||||
}
|
||||
|
||||
func (p *textParser) consumeToken(s string) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != s {
|
||||
p.back()
|
||||
return p.errorf("expected %q, found %q", s, tok.value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return a RequiredNotSetError indicating which required field was not set.
|
||||
func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
for i := 0; i < st.NumField(); i++ {
|
||||
if !isNil(sv.Field(i)) {
|
||||
continue
|
||||
}
|
||||
|
||||
props := sprops.Prop[i]
|
||||
if props.Required {
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
|
||||
}
|
||||
}
|
||||
return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
|
||||
}
|
||||
|
||||
// Returns the index in the struct for the named field, as well as the parsed tag properties.
|
||||
func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
|
||||
i, ok := sprops.decoderOrigNames[name]
|
||||
if ok {
|
||||
return i, sprops.Prop[i], true
|
||||
}
|
||||
return -1, nil, false
|
||||
}
|
||||
|
||||
// Consume a ':' from the input stream (if the next token is a colon),
|
||||
// returning an error if a colon is needed but not present.
|
||||
func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ":" {
|
||||
// Colon is optional when the field is a group or message.
|
||||
needColon := true
|
||||
switch props.Wire {
|
||||
case "group":
|
||||
needColon = false
|
||||
case "bytes":
|
||||
// A "bytes" field is either a message, a string, or a repeated field;
|
||||
// those three become *T, *string and []T respectively, so we can check for
|
||||
// this field being a pointer to a non-string.
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
// *T or *string
|
||||
if typ.Elem().Kind() == reflect.String {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.Slice {
|
||||
// []T or []*T
|
||||
if typ.Elem().Kind() != reflect.Ptr {
|
||||
break
|
||||
}
|
||||
} else if typ.Kind() == reflect.String {
|
||||
// The proto3 exception is for a string field,
|
||||
// which requires a colon.
|
||||
break
|
||||
}
|
||||
needColon = false
|
||||
}
|
||||
if needColon {
|
||||
return p.errorf("expected ':', found %q", tok.value)
|
||||
}
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||
st := sv.Type()
|
||||
sprops := GetProperties(st)
|
||||
reqCount := sprops.reqCount
|
||||
var reqFieldErr error
|
||||
fieldSet := make(map[string]bool)
|
||||
// A struct is a sequence of "name: value", terminated by one of
|
||||
// '>' or '}', or the end of the input. A name may also be
|
||||
// "[extension]" or "[type/url]".
|
||||
//
|
||||
// The whole struct can also be an expanded Any message, like:
|
||||
// [type/url] < ... struct contents ... >
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
if tok.value == "[" {
|
||||
// Looks like an extension or an Any.
|
||||
//
|
||||
// TODO: Check whether we need to handle
|
||||
// namespace rooted names (e.g. ".something.Foo").
|
||||
extName, err := p.consumeExtName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s := strings.LastIndex(extName, "/"); s >= 0 {
|
||||
// If it contains a slash, it's an Any type URL.
|
||||
messageName := extName[s+1:]
|
||||
mt := MessageType(messageName)
|
||||
if mt == nil {
|
||||
return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
|
||||
}
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
// consume an optional colon
|
||||
if tok.value == ":" {
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
}
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
v := reflect.New(mt.Elem())
|
||||
if pe := p.readStruct(v.Elem(), terminator); pe != nil {
|
||||
return pe
|
||||
}
|
||||
b, err := Marshal(v.Interface().(Message))
|
||||
if err != nil {
|
||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
||||
}
|
||||
if fieldSet["type_url"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
||||
}
|
||||
if fieldSet["value"] {
|
||||
return p.errorf(anyRepeatedlyUnpacked, "value")
|
||||
}
|
||||
sv.FieldByName("TypeUrl").SetString(extName)
|
||||
sv.FieldByName("Value").SetBytes(b)
|
||||
fieldSet["type_url"] = true
|
||||
fieldSet["value"] = true
|
||||
continue
|
||||
}
|
||||
|
||||
var desc *ExtensionDesc
|
||||
// This could be faster, but it's functional.
|
||||
// TODO: Do something smarter than a linear scan.
|
||||
for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
|
||||
if d.Name == extName {
|
||||
desc = d
|
||||
break
|
||||
}
|
||||
}
|
||||
if desc == nil {
|
||||
return p.errorf("unrecognized extension %q", extName)
|
||||
}
|
||||
|
||||
props := &Properties{}
|
||||
props.Parse(desc.Tag)
|
||||
|
||||
typ := reflect.TypeOf(desc.ExtensionType)
|
||||
if err := p.checkForColon(props, typ); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rep := desc.repeated()
|
||||
|
||||
// Read the extension structure, and set it in
|
||||
// the value we're constructing.
|
||||
var ext reflect.Value
|
||||
if !rep {
|
||||
ext = reflect.New(typ).Elem()
|
||||
} else {
|
||||
ext = reflect.New(typ.Elem()).Elem()
|
||||
}
|
||||
if err := p.readAny(ext, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
ep := sv.Addr().Interface().(Message)
|
||||
if !rep {
|
||||
SetExtension(ep, desc, ext.Interface())
|
||||
} else {
|
||||
old, err := GetExtension(ep, desc)
|
||||
var sl reflect.Value
|
||||
if err == nil {
|
||||
sl = reflect.ValueOf(old) // existing slice
|
||||
} else {
|
||||
sl = reflect.MakeSlice(typ, 0, 1)
|
||||
}
|
||||
sl = reflect.Append(sl, ext)
|
||||
SetExtension(ep, desc, sl.Interface())
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// This is a normal, non-extension field.
|
||||
name := tok.value
|
||||
var dst reflect.Value
|
||||
fi, props, ok := structFieldByName(sprops, name)
|
||||
if ok {
|
||||
dst = sv.Field(fi)
|
||||
} else if oop, ok := sprops.OneofTypes[name]; ok {
|
||||
// It is a oneof.
|
||||
props = oop.Prop
|
||||
nv := reflect.New(oop.Type.Elem())
|
||||
dst = nv.Elem().Field(0)
|
||||
field := sv.Field(oop.Field)
|
||||
if !field.IsNil() {
|
||||
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
|
||||
}
|
||||
field.Set(nv)
|
||||
}
|
||||
if !dst.IsValid() {
|
||||
return p.errorf("unknown field name %q in %v", name, st)
|
||||
}
|
||||
|
||||
if dst.Kind() == reflect.Map {
|
||||
// Consume any colon.
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Construct the map if it doesn't already exist.
|
||||
if dst.IsNil() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
}
|
||||
key := reflect.New(dst.Type().Key()).Elem()
|
||||
val := reflect.New(dst.Type().Elem()).Elem()
|
||||
|
||||
// The map entry should be this sequence of tokens:
|
||||
// < key : KEY value : VALUE >
|
||||
// However, implementations may omit key or value, and technically
|
||||
// we should support them in any order. See b/28924776 for a time
|
||||
// this went wrong.
|
||||
|
||||
tok := p.next()
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "<":
|
||||
terminator = ">"
|
||||
case "{":
|
||||
terminator = "}"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
for {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == terminator {
|
||||
break
|
||||
}
|
||||
switch tok.value {
|
||||
case "key":
|
||||
if err := p.consumeToken(":"); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(key, props.MapKeyProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
case "value":
|
||||
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.readAny(val, props.MapValProp); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
p.back()
|
||||
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||
}
|
||||
}
|
||||
|
||||
dst.SetMapIndex(key, val)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check that it's not already set if it's not a repeated field.
|
||||
if !props.Repeated && fieldSet[name] {
|
||||
return p.errorf("non-repeated field %q was repeated", name)
|
||||
}
|
||||
|
||||
if err := p.checkForColon(props, dst.Type()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Parse into the field.
|
||||
fieldSet[name] = true
|
||||
if err := p.readAny(dst, props); err != nil {
|
||||
if _, ok := err.(*RequiredNotSetError); !ok {
|
||||
return err
|
||||
}
|
||||
reqFieldErr = err
|
||||
}
|
||||
if props.Required {
|
||||
reqCount--
|
||||
}
|
||||
|
||||
if err := p.consumeOptionalSeparator(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if reqCount > 0 {
|
||||
return p.missingRequiredFieldError(sv)
|
||||
}
|
||||
return reqFieldErr
|
||||
}
|
||||
|
||||
// consumeExtName consumes extension name or expanded Any type URL and the
|
||||
// following ']'. It returns the name or URL consumed.
|
||||
func (p *textParser) consumeExtName() (string, error) {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return "", tok.err
|
||||
}
|
||||
|
||||
// If extension name or type url is quoted, it's a single token.
|
||||
if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
|
||||
name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return name, p.consumeToken("]")
|
||||
}
|
||||
|
||||
// Consume everything up to "]"
|
||||
var parts []string
|
||||
for tok.value != "]" {
|
||||
parts = append(parts, tok.value)
|
||||
tok = p.next()
|
||||
if tok.err != nil {
|
||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||
}
|
||||
if p.done && tok.value != "]" {
|
||||
return "", p.errorf("unclosed type_url or extension name")
|
||||
}
|
||||
}
|
||||
return strings.Join(parts, ""), nil
|
||||
}
|
||||
|
||||
// consumeOptionalSeparator consumes an optional semicolon or comma.
|
||||
// It is used in readStruct to provide backward compatibility.
|
||||
func (p *textParser) consumeOptionalSeparator() error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value != ";" && tok.value != "," {
|
||||
p.back()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "" {
|
||||
return p.errorf("unexpected EOF")
|
||||
}
|
||||
|
||||
switch fv := v; fv.Kind() {
|
||||
case reflect.Slice:
|
||||
at := v.Type()
|
||||
if at.Elem().Kind() == reflect.Uint8 {
|
||||
// Special case for []byte
|
||||
if tok.value[0] != '"' && tok.value[0] != '\'' {
|
||||
// Deliberately written out here, as the error after
|
||||
// this switch statement would write "invalid []byte: ...",
|
||||
// which is not as user-friendly.
|
||||
return p.errorf("invalid string: %v", tok.value)
|
||||
}
|
||||
bytes := []byte(tok.unquoted)
|
||||
fv.Set(reflect.ValueOf(bytes))
|
||||
return nil
|
||||
}
|
||||
// Repeated field.
|
||||
if tok.value == "[" {
|
||||
// Repeated field with list notation, like [1,2,3].
|
||||
for {
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
err := p.readAny(fv.Index(fv.Len()-1), props)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tok := p.next()
|
||||
if tok.err != nil {
|
||||
return tok.err
|
||||
}
|
||||
if tok.value == "]" {
|
||||
break
|
||||
}
|
||||
if tok.value != "," {
|
||||
return p.errorf("Expected ']' or ',' found %q", tok.value)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
// One value of the repeated field.
|
||||
p.back()
|
||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||
case reflect.Bool:
|
||||
// true/1/t/True or false/f/0/False.
|
||||
switch tok.value {
|
||||
case "true", "1", "t", "True":
|
||||
fv.SetBool(true)
|
||||
return nil
|
||||
case "false", "0", "f", "False":
|
||||
fv.SetBool(false)
|
||||
return nil
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
v := tok.value
|
||||
// Ignore 'f' for compatibility with output generated by C++, but don't
|
||||
// remove 'f' when the value is "-inf" or "inf".
|
||||
if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
|
||||
fv.SetFloat(f)
|
||||
return nil
|
||||
}
|
||||
case reflect.Int32:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(props.Enum) == 0 {
|
||||
break
|
||||
}
|
||||
m, ok := enumValueMaps[props.Enum]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
x, ok := m[tok.value]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
fv.SetInt(int64(x))
|
||||
return nil
|
||||
case reflect.Int64:
|
||||
if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
|
||||
fv.SetInt(x)
|
||||
return nil
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// A basic field (indirected through pointer), or a repeated message/group
|
||||
p.back()
|
||||
fv.Set(reflect.New(fv.Type().Elem()))
|
||||
return p.readAny(fv.Elem(), props)
|
||||
case reflect.String:
|
||||
if tok.value[0] == '"' || tok.value[0] == '\'' {
|
||||
fv.SetString(tok.unquoted)
|
||||
return nil
|
||||
}
|
||||
case reflect.Struct:
|
||||
var terminator string
|
||||
switch tok.value {
|
||||
case "{":
|
||||
terminator = "}"
|
||||
case "<":
|
||||
terminator = ">"
|
||||
default:
|
||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||
}
|
||||
// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
|
||||
return p.readStruct(fv, terminator)
|
||||
case reflect.Uint32:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
fv.SetUint(uint64(x))
|
||||
return nil
|
||||
}
|
||||
case reflect.Uint64:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
|
||||
fv.SetUint(x)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return p.errorf("invalid %v: %v", v.Type(), tok.value)
|
||||
}
|
||||
|
||||
// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
|
||||
// before starting to unmarshal, so any existing data in pb is always removed.
|
||||
// If a required field is not set and no other error occurs,
|
||||
// UnmarshalText returns *RequiredNotSetError.
|
||||
func UnmarshalText(s string, pb Message) error {
|
||||
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
||||
return um.UnmarshalText([]byte(s))
|
||||
}
|
||||
pb.Reset()
|
||||
v := reflect.ValueOf(pb)
|
||||
return newTextParser(s).readStruct(v.Elem(), "")
|
||||
}
|
||||
2
vendor/github.com/status-im/migrate/v4/.golangci.yml
generated
vendored
2
vendor/github.com/status-im/migrate/v4/.golangci.yml
generated
vendored
@@ -1,6 +1,6 @@
|
||||
run:
|
||||
# timeout for analysis, e.g. 30s, 5m, default is 1m
|
||||
deadline: 2m
|
||||
timeout: 2m
|
||||
linters:
|
||||
enable:
|
||||
#- golint
|
||||
|
||||
11
vendor/github.com/status-im/migrate/v4/.travis.yml
generated
vendored
11
vendor/github.com/status-im/migrate/v4/.travis.yml
generated
vendored
@@ -6,8 +6,8 @@ matrix:
|
||||
- go: master
|
||||
include:
|
||||
# Supported versions of Go: https://golang.org/dl/
|
||||
- go: "1.11.x"
|
||||
- go: "1.12.x"
|
||||
- go: "1.13.x"
|
||||
- go: master
|
||||
|
||||
go_import_path: github.com/golang-migrate/migrate
|
||||
@@ -26,8 +26,15 @@ cache:
|
||||
directories:
|
||||
- $GOPATH/pkg
|
||||
|
||||
|
||||
before_install:
|
||||
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.16.0
|
||||
# Update docker to latest version: https://docs.travis-ci.com/user/docker/#installing-a-newer-docker-version
|
||||
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
|
||||
- sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
|
||||
- sudo apt-get update
|
||||
- sudo apt-get -y -o Dpkg::Options::="--force-confnew" install docker-ce
|
||||
# Install golangci-lint
|
||||
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.20.0
|
||||
- echo "TRAVIS_GO_VERSION=${TRAVIS_GO_VERSION}"
|
||||
|
||||
install:
|
||||
|
||||
8
vendor/github.com/status-im/migrate/v4/Dockerfile
generated
vendored
8
vendor/github.com/status-im/migrate/v4/Dockerfile
generated
vendored
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.12-alpine3.9 AS downloader
|
||||
FROM golang:1.12-alpine3.10 AS downloader
|
||||
ARG VERSION
|
||||
|
||||
RUN apk add --no-cache git gcc musl-dev
|
||||
@@ -9,11 +9,11 @@ COPY . ./
|
||||
|
||||
ENV GO111MODULE=on
|
||||
ENV DATABASES="postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb sqlserver"
|
||||
ENV SOURCES="file go_bindata github aws_s3 google_cloud_storage godoc_vfs gitlab"
|
||||
ENV SOURCES="file go_bindata github github_ee aws_s3 google_cloud_storage godoc_vfs gitlab"
|
||||
|
||||
RUN go build -a -o build/migrate.linux-386 -ldflags="-X main.Version=${VERSION}" -tags "$DATABASES $SOURCES" ./cmd/migrate
|
||||
RUN go build -a -o build/migrate.linux-386 -ldflags="-s -w -X main.Version=${VERSION}" -tags "$DATABASES $SOURCES" ./cmd/migrate
|
||||
|
||||
FROM alpine:3.9
|
||||
FROM alpine:3.10
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
|
||||
6
vendor/github.com/status-im/migrate/v4/FAQ.md
generated
vendored
6
vendor/github.com/status-im/migrate/v4/FAQ.md
generated
vendored
@@ -68,3 +68,9 @@
|
||||
Database-specific locking features are used by *some* database drivers to prevent multiple instances of migrate from running migrations at the same time
|
||||
the same database at the same time. For example, the MySQL driver uses the `GET_LOCK` function, while the Postgres driver uses
|
||||
the `pg_advisory_lock` function.
|
||||
|
||||
#### Do I need to create a table for tracking migration version used?
|
||||
No, it is done automatically.
|
||||
|
||||
#### Can I use migrate with a non-Go project?
|
||||
Yes, you can use the migrate CLI in a non-Go project, but there are probably other libraries/frameworks available that offer better test and deploy integrations in that language/framework.
|
||||
43
vendor/github.com/status-im/migrate/v4/GETTING_STARTED.md
generated
vendored
Normal file
43
vendor/github.com/status-im/migrate/v4/GETTING_STARTED.md
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
# Getting started
|
||||
Before you start, you should understand the concept of forward/up and reverse/down database migrations.
|
||||
|
||||
Configure a database for your application. Make sure that your database driver is supported [here](README.md#databases)
|
||||
|
||||
## Create migrations
|
||||
Create some migrations using migrate CLI. Here is an example:
|
||||
```
|
||||
migrate create -ext sql -dir db/migrations -seq create_users_table
|
||||
```
|
||||
Once you create your files, you should fill them.
|
||||
|
||||
**IMPORTANT:** In a project developed by more than one person there is a chance of migrations inconsistency - e.g. two developers can create conflicting migrations, and the developer that created his migration later gets it merged to the repository first.
|
||||
Developers and Teams should keep an eye on such cases (especially during code review).
|
||||
[Here](https://github.com/golang-migrate/migrate/issues/179#issuecomment-475821264) is the issue summary if you would like to read more.
|
||||
|
||||
Consider making your migrations idempotent - we can run the same sql code twice in a row with the same result. This makes our migrations more robust. On the other hand, it causes slightly less control over database schema - e.g. let's say you forgot to drop the table in down migration. You run down migration - the table is still there. When you run up migration again - `CREATE TABLE` would return an error, helping you find an issue in down migration, while `CREATE TABLE IF NOT EXISTS` would not. Use those conditions wisely.
|
||||
|
||||
In case you would like to run several commands/queries in one migration, you should wrap them in a transaction (if your database supports it).
|
||||
This way if one of commands fails, our database will remain unchanged.
|
||||
|
||||
## Run migrations
|
||||
Run your migrations through the CLI or your app and check if they applied expected changes.
|
||||
Just to give you an idea:
|
||||
```
|
||||
migrate -database YOUR_DATBASE_URL -path PATH_TO_YOUR_MIGRATIONS up
|
||||
```
|
||||
|
||||
Just add the code to your app and you're ready to go!
|
||||
|
||||
Before commiting your migrations you should run your migrations up, down, and then up again to see if migrations are working properly both ways.
|
||||
(e.g. if you created a table in a migration but reverse migration did not delete it, you will encounter an error when running the forward migration again)
|
||||
It's also worth checking your migrations in a separate, containerized environment. You can find some tools in the end of this document.
|
||||
|
||||
**IMPORTANT:** If you would like to run multiple instances of your app on different machines be sure to use a database that supports locking when running migrations. Otherwise you may encounter issues.
|
||||
|
||||
## Further reading:
|
||||
- [PostgreSQL tutorial](database/postgres/TUTORIAL.md)
|
||||
- [Best practices](MIGRATIONS.md)
|
||||
- [FAQ](FAQ.md)
|
||||
- Tools for testing your migrations in a container:
|
||||
- https://github.com/dhui/dktest
|
||||
- https://github.com/ory/dockertest
|
||||
11
vendor/github.com/status-im/migrate/v4/MIGRATIONS.md
generated
vendored
11
vendor/github.com/status-im/migrate/v4/MIGRATIONS.md
generated
vendored
@@ -44,9 +44,14 @@ It is suggested that the version number of corresponding `up` and `down` migrati
|
||||
files be equivalent for clarity, but they are allowed to differ so long as the
|
||||
relative ordering of the migrations is preserved.
|
||||
|
||||
The migration files are permitted to be empty, so in the event that a migration
|
||||
is a no-op or is irreversible, it is recommended to still include both migration
|
||||
files, and either leaving them empty or adding a comment as appropriate.
|
||||
The migration files are permitted to be "empty", in the event that a migration
|
||||
is a no-op or is irreversible. It is recommended to still include both migration
|
||||
files by making the whole migration file consist of a comment.
|
||||
If your database does not support comments, then deleting the migration file will also work.
|
||||
Note, an actual empty file (e.g. a 0 byte file) may cause issues with your database since migrate
|
||||
will attempt to run an empty query. In this case, deleting the migration file will also work.
|
||||
For the rational of this behavior see:
|
||||
[#244 (comment)](https://github.com/golang-migrate/migrate/issues/244#issuecomment-510758270)
|
||||
|
||||
## Migration Content Format
|
||||
|
||||
|
||||
23
vendor/github.com/status-im/migrate/v4/Makefile
generated
vendored
23
vendor/github.com/status-im/migrate/v4/Makefile
generated
vendored
@@ -1,4 +1,4 @@
|
||||
SOURCE ?= file go_bindata github aws_s3 google_cloud_storage godoc_vfs gitlab
|
||||
SOURCE ?= file go_bindata github github_ee aws_s3 google_cloud_storage godoc_vfs gitlab
|
||||
DATABASE ?= postgres mysql redshift cassandra spanner cockroachdb clickhouse mongodb sqlserver
|
||||
VERSION ?= $(shell git describe --tags 2>/dev/null | cut -c 2-)
|
||||
TEST_FLAGS ?=
|
||||
@@ -27,27 +27,14 @@ test-short:
|
||||
test:
|
||||
@-rm -r $(COVERAGE_DIR)
|
||||
@mkdir $(COVERAGE_DIR)
|
||||
make test-with-flags TEST_FLAGS='-v -race -covermode atomic -coverprofile $$(COVERAGE_DIR)/_$$(RAND).txt -bench=. -benchmem -timeout 20m'
|
||||
@echo 'mode: atomic' > $(COVERAGE_DIR)/combined.txt
|
||||
@cat $(COVERAGE_DIR)/_*.txt | grep -v 'mode: atomic' >> $(COVERAGE_DIR)/combined.txt
|
||||
make test-with-flags TEST_FLAGS='-v -race -covermode atomic -coverprofile $$(COVERAGE_DIR)/combined.txt -bench=. -benchmem -timeout 20m'
|
||||
|
||||
|
||||
test-with-flags:
|
||||
@echo SOURCE: $(SOURCE)
|
||||
@echo SOURCE: $(SOURCE)
|
||||
@echo DATABASE: $(DATABASE)
|
||||
|
||||
@go test $(TEST_FLAGS) .
|
||||
@go test $(TEST_FLAGS) ./cli/...
|
||||
@go test $(TEST_FLAGS) ./database
|
||||
@go test $(TEST_FLAGS) ./testing/...
|
||||
|
||||
@echo -n '$(SOURCE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./source/{}
|
||||
@go test $(TEST_FLAGS) ./source/testing/...
|
||||
@go test $(TEST_FLAGS) ./source/stub/...
|
||||
|
||||
@echo -n '$(DATABASE)' | tr -s ' ' '\n' | xargs -I{} go test $(TEST_FLAGS) ./database/{}
|
||||
@go test $(TEST_FLAGS) ./database/testing/...
|
||||
@go test $(TEST_FLAGS) ./database/stub/...
|
||||
@go test $(TEST_FLAGS) ./...
|
||||
|
||||
|
||||
kill-orphaned-docker-containers:
|
||||
@@ -84,7 +71,7 @@ rewrite-import-paths:
|
||||
docs:
|
||||
-make kill-docs
|
||||
nohup godoc -play -http=127.0.0.1:6064 </dev/null >/dev/null 2>&1 & echo $$! > .godoc.pid
|
||||
cat .godoc.pid
|
||||
cat .godoc.pid
|
||||
|
||||
|
||||
kill-docs:
|
||||
|
||||
96
vendor/github.com/status-im/migrate/v4/README.md
generated
vendored
96
vendor/github.com/status-im/migrate/v4/README.md
generated
vendored
@@ -3,19 +3,19 @@
|
||||
[](https://coveralls.io/github/golang-migrate/migrate?branch=master)
|
||||
[](https://packagecloud.io/golang-migrate/migrate?filter=debs)
|
||||
[](https://hub.docker.com/r/migrate/migrate/)
|
||||

|
||||

|
||||
[](https://github.com/golang-migrate/migrate/releases)
|
||||
|
||||
[](https://goreportcard.com/report/github.com/golang-migrate/migrate)
|
||||
|
||||
# migrate
|
||||
|
||||
__Database migrations written in Go. Use as [CLI](#cli-usage) or import as [library](#use-in-your-go-project).__
|
||||
|
||||
* Migrate reads migrations from [sources](#migration-sources)
|
||||
* Migrate reads migrations from [sources](#migration-sources)
|
||||
and applies them in correct order to a [database](#databases).
|
||||
* Drivers are "dumb", migrate glues everything together and makes sure the logic is bulletproof.
|
||||
* Drivers are "dumb", migrate glues everything together and makes sure the logic is bulletproof.
|
||||
(Keeps the drivers lightweight, too.)
|
||||
* Database drivers don't assume things or try to correct user input. When in doubt, fail.
|
||||
* Database drivers don't assume things or try to correct user input. When in doubt, fail.
|
||||
|
||||
Forked from [mattes/migrate](https://github.com/mattes/migrate)
|
||||
|
||||
@@ -23,21 +23,21 @@ Forked from [mattes/migrate](https://github.com/mattes/migrate)
|
||||
|
||||
Database drivers run migrations. [Add a new database?](database/driver.go)
|
||||
|
||||
* [PostgreSQL](database/postgres)
|
||||
* [Redshift](database/redshift)
|
||||
* [Ql](database/ql)
|
||||
* [Cassandra](database/cassandra)
|
||||
* [SQLite](database/sqlite3) ([todo #165](https://github.com/mattes/migrate/issues/165))
|
||||
* [MySQL/ MariaDB](database/mysql)
|
||||
* [Neo4j](database/neo4j) ([todo #167](https://github.com/mattes/migrate/issues/167))
|
||||
* [MongoDB](database/mongodb)
|
||||
* [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170))
|
||||
* [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171))
|
||||
* [Google Cloud Spanner](database/spanner)
|
||||
* [CockroachDB](database/cockroachdb)
|
||||
* [ClickHouse](database/clickhouse)
|
||||
* [Firebird](database/firebird) ([todo #49](https://github.com/golang-migrate/migrate/issues/49))
|
||||
* [MS SQL Server](database/sqlserver)
|
||||
* [PostgreSQL](database/postgres)
|
||||
* [Redshift](database/redshift)
|
||||
* [Ql](database/ql)
|
||||
* [Cassandra](database/cassandra)
|
||||
* [SQLite](database/sqlite3) ([todo #165](https://github.com/mattes/migrate/issues/165))
|
||||
* [MySQL/ MariaDB](database/mysql)
|
||||
* [Neo4j](database/neo4j) ([todo #167](https://github.com/mattes/migrate/issues/167))
|
||||
* [MongoDB](database/mongodb)
|
||||
* [CrateDB](database/crate) ([todo #170](https://github.com/mattes/migrate/issues/170))
|
||||
* [Shell](database/shell) ([todo #171](https://github.com/mattes/migrate/issues/171))
|
||||
* [Google Cloud Spanner](database/spanner)
|
||||
* [CockroachDB](database/cockroachdb)
|
||||
* [ClickHouse](database/clickhouse)
|
||||
* [Firebird](database/firebird) ([todo #49](https://github.com/golang-migrate/migrate/issues/49))
|
||||
* [MS SQL Server](database/sqlserver)
|
||||
|
||||
### Database URLs
|
||||
|
||||
@@ -49,6 +49,7 @@ Explicitly, the following characters need to be escaped:
|
||||
`!`, `#`, `$`, `%`, `&`, `'`, `(`, `)`, `*`, `+`, `,`, `/`, `:`, `;`, `=`, `?`, `@`, `[`, `]`
|
||||
|
||||
It's easiest to always run the URL parts of your DB connection URL (e.g. username, password, etc) through an URL encoder. See the example Python snippets below:
|
||||
|
||||
```bash
|
||||
$ python3 -c 'import urllib.parse; print(urllib.parse.quote(input("String to encode: "), ""))'
|
||||
String to encode: FAKEpassword!#$%&'()*+,/:;=?@[]
|
||||
@@ -63,44 +64,43 @@ $
|
||||
|
||||
Source drivers read migrations from local or remote sources. [Add a new source?](source/driver.go)
|
||||
|
||||
* [Filesystem](source/file) - read from fileystem
|
||||
* [Go-Bindata](source/go_bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata))
|
||||
* [Github](source/github) - read from remote Github repositories
|
||||
* [Gitlab](source/gitlab) - read from remote Gitlab repositories
|
||||
* [AWS S3](source/aws_s3) - read from Amazon Web Services S3
|
||||
* [Google Cloud Storage](source/google_cloud_storage) - read from Google Cloud Platform Storage
|
||||
|
||||
|
||||
* [Filesystem](source/file) - read from filesystem
|
||||
* [Go-Bindata](source/go_bindata) - read from embedded binary data ([jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata))
|
||||
* [Github](source/github) - read from remote Github repositories
|
||||
* [Github Enterprise](source/github_ee) - read from remote Github Enterprise repositories
|
||||
* [Gitlab](source/gitlab) - read from remote Gitlab repositories
|
||||
* [AWS S3](source/aws_s3) - read from Amazon Web Services S3
|
||||
* [Google Cloud Storage](source/google_cloud_storage) - read from Google Cloud Platform Storage
|
||||
|
||||
## CLI usage
|
||||
|
||||
* Simple wrapper around this library.
|
||||
* Handles ctrl+c (SIGINT) gracefully.
|
||||
* No config search paths, no config files, no magic ENV var injections.
|
||||
* Simple wrapper around this library.
|
||||
* Handles ctrl+c (SIGINT) gracefully.
|
||||
* No config search paths, no config files, no magic ENV var injections.
|
||||
|
||||
__[CLI Documentation](cli)__
|
||||
__[CLI Documentation](cmd/migrate)__
|
||||
|
||||
### Basic usage:
|
||||
### Basic usage
|
||||
|
||||
```
|
||||
```bash
|
||||
$ migrate -source file://path/to/migrations -database postgres://localhost:5432/database up 2
|
||||
```
|
||||
|
||||
### Docker usage
|
||||
|
||||
```
|
||||
```bash
|
||||
$ docker run -v {{ migration dir }}:/migrations --network host migrate/migrate
|
||||
-path=/migrations/ -database postgres://localhost:5432/database up 2
|
||||
```
|
||||
|
||||
## Use in your Go project
|
||||
|
||||
* API is stable and frozen for this release (v3 & v4).
|
||||
* Uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies.
|
||||
* To help prevent database corruptions, it supports graceful stops via `GracefulStop chan bool`.
|
||||
* Bring your own logger.
|
||||
* Uses `io.Reader` streams internally for low memory overhead.
|
||||
* Thread-safe and no goroutine leaks.
|
||||
* API is stable and frozen for this release (v3 & v4).
|
||||
* Uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies.
|
||||
* To help prevent database corruptions, it supports graceful stops via `GracefulStop chan bool`.
|
||||
* Bring your own logger.
|
||||
* Uses `io.Reader` streams internally for low memory overhead.
|
||||
* Thread-safe and no goroutine leaks.
|
||||
|
||||
__[Go Documentation](https://godoc.org/github.com/golang-migrate/migrate)__
|
||||
|
||||
@@ -140,11 +140,21 @@ func main() {
|
||||
}
|
||||
```
|
||||
|
||||
## Getting started
|
||||
|
||||
Go to [getting started](GETTING_STARTED.md)
|
||||
|
||||
## Tutorials
|
||||
|
||||
- [PostgreSQL](database/postgres/TUTORIAL.md)
|
||||
|
||||
(more tutorials to come)
|
||||
|
||||
## Migration files
|
||||
|
||||
Each migration has an up and down migration. [Why?](FAQ.md#why-two-separate-files-up-and-down-for-a-migration)
|
||||
|
||||
```
|
||||
```bash
|
||||
1481574547_create_users_table.up.sql
|
||||
1481574547_create_users_table.down.sql
|
||||
```
|
||||
@@ -166,8 +176,6 @@ read the [development guide](CONTRIBUTING.md).
|
||||
|
||||
Also have a look at the [FAQ](FAQ.md).
|
||||
|
||||
|
||||
|
||||
---
|
||||
|
||||
Looking for alternatives? [https://awesome-go.com/#database](https://awesome-go.com/#database).
|
||||
|
||||
8
vendor/github.com/status-im/migrate/v4/go.mod
generated
vendored
8
vendor/github.com/status-im/migrate/v4/go.mod
generated
vendored
@@ -7,10 +7,11 @@ require (
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
||||
github.com/cockroachdb/apd v1.1.0 // indirect
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c
|
||||
github.com/containerd/containerd v1.2.7 // indirect
|
||||
github.com/cznic/ql v1.2.0
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3
|
||||
github.com/dhui/dktest v0.3.0
|
||||
github.com/docker/docker v0.7.3-0.20190108045446-77df18c24acf
|
||||
github.com/docker/docker v0.7.3-0.20190817195342-4760db040282
|
||||
github.com/fsouza/fake-gcs-server v1.7.0
|
||||
github.com/go-sql-driver/mysql v1.4.1
|
||||
github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4
|
||||
@@ -28,7 +29,7 @@ require (
|
||||
github.com/kshvakov/clickhouse v1.3.5
|
||||
github.com/lib/pq v1.0.0
|
||||
github.com/mattn/go-sqlite3 v1.10.0
|
||||
github.com/mongodb/mongo-go-driver v0.3.0
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8
|
||||
github.com/pkg/errors v0.8.1 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
@@ -40,6 +41,7 @@ require (
|
||||
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c // indirect
|
||||
github.com/xdg/stringprep v1.0.0 // indirect
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b // indirect
|
||||
go.mongodb.org/mongo-driver v1.1.0
|
||||
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 // indirect
|
||||
golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a // indirect
|
||||
@@ -52,3 +54,5 @@ require (
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb
|
||||
google.golang.org/grpc v1.20.1 // indirect
|
||||
)
|
||||
|
||||
go 1.12
|
||||
|
||||
12
vendor/github.com/status-im/migrate/v4/go.sum
generated
vendored
12
vendor/github.com/status-im/migrate/v4/go.sum
generated
vendored
@@ -26,6 +26,8 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0=
|
||||
github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk=
|
||||
github.com/containerd/containerd v1.2.7 h1:8lqLbl7u1j3MmiL9cJ/O275crSq7bfwUayvvatEupQk=
|
||||
github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07 h1:UHFGPvSxX4C4YBApSPvmUfL8tTvWLj2ryqvT9K4Jcuk=
|
||||
github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8=
|
||||
github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f h1:7uSNgsgcarNk4oiN/nNkO0J7KAjlsF5Yv5Gf/tFdHas=
|
||||
@@ -56,8 +58,8 @@ github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c
|
||||
github.com/docker/distribution v2.7.0+incompatible h1:neUDAlf3wX6Ml4HdqTrbcOHXtfRN0TFIwt6YFL7N9RU=
|
||||
github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190108045446-77df18c24acf h1:2v/98rHzs3v6X0AHtoCH9u+e56SdnpogB1Z2fFe1KqQ=
|
||||
github.com/docker/docker v0.7.3-0.20190108045446-77df18c24acf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v0.7.3-0.20190817195342-4760db040282 h1:mzrx39dGtGq0VEnTHjnakmczd4uFbhx2cZU3BJDsLdc=
|
||||
github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=
|
||||
@@ -147,8 +149,8 @@ github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
|
||||
github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mongodb/mongo-go-driver v0.3.0 h1:00tKWMrabkVU1e57/TTP4ZBIfhn/wmjlSiRnIM9d0T8=
|
||||
github.com/mongodb/mongo-go-driver v0.3.0/go.mod h1:NK/HWDIIZkaYsnYa0hmtP443T5ELr0KDecmIioVuuyU=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8 h1:P48LjvUQpTReR3TQRbxSeSBsMXzfK0uol7eRcr7VBYQ=
|
||||
github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
|
||||
@@ -196,6 +198,8 @@ github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
|
||||
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b h1:7gd+rd8P3bqcn/96gOZa3F5dpJr/vEiDQYlNb/y2uNs=
|
||||
gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
|
||||
go.mongodb.org/mongo-driver v1.1.0 h1:aeOqSrhl9eDRAap/3T5pCfMBEBxZ0vuXBP+RMtp2KX8=
|
||||
go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.opencensus.io v0.20.1 h1:pMEjRZ1M4ebWGikflH7nQpV6+Zr88KBMA2XJD3sbijw=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
|
||||
|
||||
25
vendor/github.com/status-im/migrate/v4/internal/url/url.go
generated
vendored
Normal file
25
vendor/github.com/status-im/migrate/v4/internal/url/url.go
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
package url
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var errNoScheme = errors.New("no scheme")
|
||||
var errEmptyURL = errors.New("URL cannot be empty")
|
||||
|
||||
// schemeFromURL returns the scheme from a URL string
|
||||
func SchemeFromURL(url string) (string, error) {
|
||||
if url == "" {
|
||||
return "", errEmptyURL
|
||||
}
|
||||
|
||||
i := strings.Index(url, ":")
|
||||
|
||||
// No : or : is the first character.
|
||||
if i < 1 {
|
||||
return "", errNoScheme
|
||||
}
|
||||
|
||||
return url[0:i], nil
|
||||
}
|
||||
15
vendor/github.com/status-im/migrate/v4/migrate.go
generated
vendored
15
vendor/github.com/status-im/migrate/v4/migrate.go
generated
vendored
@@ -7,12 +7,14 @@ package migrate
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-multierror"
|
||||
|
||||
"github.com/golang-migrate/migrate/v4/database"
|
||||
iurl "github.com/status-im/migrate/v4/internal/url"
|
||||
"github.com/golang-migrate/migrate/v4/source"
|
||||
)
|
||||
|
||||
@@ -85,13 +87,13 @@ type Migrate struct {
|
||||
func New(sourceURL, databaseURL string) (*Migrate, error) {
|
||||
m := newCommon()
|
||||
|
||||
sourceName, err := sourceSchemeFromURL(sourceURL)
|
||||
sourceName, err := iurl.SchemeFromURL(sourceURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m.sourceName = sourceName
|
||||
|
||||
databaseName, err := databaseSchemeFromURL(databaseURL)
|
||||
databaseName, err := iurl.SchemeFromURL(databaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -119,7 +121,7 @@ func New(sourceURL, databaseURL string) (*Migrate, error) {
|
||||
func NewWithDatabaseInstance(sourceURL string, databaseName string, databaseInstance database.Driver) (*Migrate, error) {
|
||||
m := newCommon()
|
||||
|
||||
sourceName, err := schemeFromURL(sourceURL)
|
||||
sourceName, err := iurl.SchemeFromURL(sourceURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -145,7 +147,7 @@ func NewWithDatabaseInstance(sourceURL string, databaseName string, databaseInst
|
||||
func NewWithSourceInstance(sourceName string, sourceInstance source.Driver, databaseURL string) (*Migrate, error) {
|
||||
m := newCommon()
|
||||
|
||||
databaseName, err := schemeFromURL(databaseURL)
|
||||
databaseName, err := iurl.SchemeFromURL(databaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -804,6 +806,7 @@ func (m *Migrate) versionExists(version uint) (result error) {
|
||||
return err
|
||||
}
|
||||
|
||||
m.logErr(fmt.Errorf("no migration found for version %d", version))
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
@@ -950,7 +953,7 @@ func (m *Migrate) unlock() error {
|
||||
// if a prevErr is not nil.
|
||||
func (m *Migrate) unlockErr(prevErr error) error {
|
||||
if err := m.unlock(); err != nil {
|
||||
return NewMultiError(prevErr, err)
|
||||
return multierror.Append(prevErr, err)
|
||||
}
|
||||
return prevErr
|
||||
}
|
||||
|
||||
42
vendor/github.com/status-im/migrate/v4/util.go
generated
vendored
42
vendor/github.com/status-im/migrate/v4/util.go
generated
vendored
@@ -1,18 +1,22 @@
|
||||
package migrate
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
nurl "net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MultiError holds multiple errors.
|
||||
//
|
||||
// Deprecated: Use github.com/hashicorp/go-multierror instead
|
||||
type MultiError struct {
|
||||
Errs []error
|
||||
}
|
||||
|
||||
// NewMultiError returns an error type holding multiple errors.
|
||||
//
|
||||
// Deprecated: Use github.com/hashicorp/go-multierror instead
|
||||
//
|
||||
func NewMultiError(errs ...error) MultiError {
|
||||
compactErrs := make([]error, 0)
|
||||
for _, e := range errs {
|
||||
@@ -44,42 +48,6 @@ func suint(n int) uint {
|
||||
return uint(n)
|
||||
}
|
||||
|
||||
var errNoScheme = errors.New("no scheme")
|
||||
var errEmptyURL = errors.New("URL cannot be empty")
|
||||
|
||||
func sourceSchemeFromURL(url string) (string, error) {
|
||||
u, err := schemeFromURL(url)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("source: %v", err)
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
func databaseSchemeFromURL(url string) (string, error) {
|
||||
u, err := schemeFromURL(url)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("database: %v", err)
|
||||
}
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// schemeFromURL returns the scheme from a URL string
|
||||
func schemeFromURL(url string) (string, error) {
|
||||
if url == "" {
|
||||
return "", errEmptyURL
|
||||
}
|
||||
|
||||
u, err := nurl.Parse(url)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(u.Scheme) == 0 {
|
||||
return "", errNoScheme
|
||||
}
|
||||
|
||||
return u.Scheme, nil
|
||||
}
|
||||
|
||||
// FilterCustomQuery filters all query values starting with `x-`
|
||||
func FilterCustomQuery(u *nurl.URL) *nurl.URL {
|
||||
ux := *u
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# This source code refers to The Go Authors for copyright purposes.
|
||||
# The master list of authors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/AUTHORS.
|
||||
# visible at https://tip.golang.org/AUTHORS.
|
||||
@@ -1,3 +1,3 @@
|
||||
# This source code was written by the Go contributors.
|
||||
# The master list of contributors is in the main Go distribution,
|
||||
# visible at http://tip.golang.org/CONTRIBUTORS.
|
||||
# visible at https://tip.golang.org/CONTRIBUTORS.
|
||||
@@ -1,16 +1,16 @@
|
||||
Copyright 2010 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2018 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
@@ -25,4 +25,3 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
22
vendor/google.golang.org/protobuf/PATENTS
generated
vendored
Normal file
22
vendor/google.golang.org/protobuf/PATENTS
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
||||
773
vendor/google.golang.org/protobuf/encoding/prototext/decode.go
generated
vendored
Normal file
773
vendor/google.golang.org/protobuf/encoding/prototext/decode.go
generated
vendored
Normal file
@@ -0,0 +1,773 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package prototext
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode/utf8"
|
||||
|
||||
"google.golang.org/protobuf/internal/encoding/messageset"
|
||||
"google.golang.org/protobuf/internal/encoding/text"
|
||||
"google.golang.org/protobuf/internal/errors"
|
||||
"google.golang.org/protobuf/internal/flags"
|
||||
"google.golang.org/protobuf/internal/genid"
|
||||
"google.golang.org/protobuf/internal/pragma"
|
||||
"google.golang.org/protobuf/internal/set"
|
||||
"google.golang.org/protobuf/internal/strs"
|
||||
"google.golang.org/protobuf/proto"
|
||||
pref "google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
// Unmarshal reads the given []byte into the given proto.Message.
|
||||
// The provided message must be mutable (e.g., a non-nil pointer to a message).
|
||||
func Unmarshal(b []byte, m proto.Message) error {
|
||||
return UnmarshalOptions{}.Unmarshal(b, m)
|
||||
}
|
||||
|
||||
// UnmarshalOptions is a configurable textproto format unmarshaler.
|
||||
type UnmarshalOptions struct {
|
||||
pragma.NoUnkeyedLiterals
|
||||
|
||||
// AllowPartial accepts input for messages that will result in missing
|
||||
// required fields. If AllowPartial is false (the default), Unmarshal will
|
||||
// return error if there are any missing required fields.
|
||||
AllowPartial bool
|
||||
|
||||
// DiscardUnknown specifies whether to ignore unknown fields when parsing.
|
||||
// An unknown field is any field whose field name or field number does not
|
||||
// resolve to any known or extension field in the message.
|
||||
// By default, unmarshal rejects unknown fields as an error.
|
||||
DiscardUnknown bool
|
||||
|
||||
// Resolver is used for looking up types when unmarshaling
|
||||
// google.protobuf.Any messages or extension fields.
|
||||
// If nil, this defaults to using protoregistry.GlobalTypes.
|
||||
Resolver interface {
|
||||
protoregistry.MessageTypeResolver
|
||||
protoregistry.ExtensionTypeResolver
|
||||
}
|
||||
}
|
||||
|
||||
// Unmarshal reads the given []byte and populates the given proto.Message
|
||||
// using options in the UnmarshalOptions object.
|
||||
// The provided message must be mutable (e.g., a non-nil pointer to a message).
|
||||
func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error {
|
||||
return o.unmarshal(b, m)
|
||||
}
|
||||
|
||||
// unmarshal is a centralized function that all unmarshal operations go through.
|
||||
// For profiling purposes, avoid changing the name of this function or
|
||||
// introducing other code paths for unmarshal that do not go through this.
|
||||
func (o UnmarshalOptions) unmarshal(b []byte, m proto.Message) error {
|
||||
proto.Reset(m)
|
||||
|
||||
if o.Resolver == nil {
|
||||
o.Resolver = protoregistry.GlobalTypes
|
||||
}
|
||||
|
||||
dec := decoder{text.NewDecoder(b), o}
|
||||
if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil {
|
||||
return err
|
||||
}
|
||||
if o.AllowPartial {
|
||||
return nil
|
||||
}
|
||||
return proto.CheckInitialized(m)
|
||||
}
|
||||
|
||||
type decoder struct {
|
||||
*text.Decoder
|
||||
opts UnmarshalOptions
|
||||
}
|
||||
|
||||
// newError returns an error object with position info.
|
||||
func (d decoder) newError(pos int, f string, x ...interface{}) error {
|
||||
line, column := d.Position(pos)
|
||||
head := fmt.Sprintf("(line %d:%d): ", line, column)
|
||||
return errors.New(head+f, x...)
|
||||
}
|
||||
|
||||
// unexpectedTokenError returns a syntax error for the given unexpected token.
|
||||
func (d decoder) unexpectedTokenError(tok text.Token) error {
|
||||
return d.syntaxError(tok.Pos(), "unexpected token: %s", tok.RawString())
|
||||
}
|
||||
|
||||
// syntaxError returns a syntax error for given position.
|
||||
func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
|
||||
line, column := d.Position(pos)
|
||||
head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
|
||||
return errors.New(head+f, x...)
|
||||
}
|
||||
|
||||
// unmarshalMessage unmarshals into the given protoreflect.Message.
|
||||
func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error {
|
||||
messageDesc := m.Descriptor()
|
||||
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
|
||||
return errors.New("no support for proto1 MessageSets")
|
||||
}
|
||||
|
||||
if messageDesc.FullName() == genid.Any_message_fullname {
|
||||
return d.unmarshalAny(m, checkDelims)
|
||||
}
|
||||
|
||||
if checkDelims {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tok.Kind() != text.MessageOpen {
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
}
|
||||
|
||||
var seenNums set.Ints
|
||||
var seenOneofs set.Ints
|
||||
fieldDescs := messageDesc.Fields()
|
||||
|
||||
for {
|
||||
// Read field name.
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch typ := tok.Kind(); typ {
|
||||
case text.Name:
|
||||
// Continue below.
|
||||
case text.EOF:
|
||||
if checkDelims {
|
||||
return text.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
if checkDelims && typ == text.MessageClose {
|
||||
return nil
|
||||
}
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
|
||||
// Resolve the field descriptor.
|
||||
var name pref.Name
|
||||
var fd pref.FieldDescriptor
|
||||
var xt pref.ExtensionType
|
||||
var xtErr error
|
||||
var isFieldNumberName bool
|
||||
|
||||
switch tok.NameKind() {
|
||||
case text.IdentName:
|
||||
name = pref.Name(tok.IdentName())
|
||||
fd = fieldDescs.ByTextName(string(name))
|
||||
|
||||
case text.TypeName:
|
||||
// Handle extensions only. This code path is not for Any.
|
||||
xt, xtErr = d.opts.Resolver.FindExtensionByName(pref.FullName(tok.TypeName()))
|
||||
|
||||
case text.FieldNumber:
|
||||
isFieldNumberName = true
|
||||
num := pref.FieldNumber(tok.FieldNumber())
|
||||
if !num.IsValid() {
|
||||
return d.newError(tok.Pos(), "invalid field number: %d", num)
|
||||
}
|
||||
fd = fieldDescs.ByNumber(num)
|
||||
if fd == nil {
|
||||
xt, xtErr = d.opts.Resolver.FindExtensionByNumber(messageDesc.FullName(), num)
|
||||
}
|
||||
}
|
||||
|
||||
if xt != nil {
|
||||
fd = xt.TypeDescriptor()
|
||||
if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() {
|
||||
return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName())
|
||||
}
|
||||
} else if xtErr != nil && xtErr != protoregistry.NotFound {
|
||||
return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr)
|
||||
}
|
||||
if flags.ProtoLegacy {
|
||||
if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
|
||||
fd = nil // reset since the weak reference is not linked in
|
||||
}
|
||||
}
|
||||
|
||||
// Handle unknown fields.
|
||||
if fd == nil {
|
||||
if d.opts.DiscardUnknown || messageDesc.ReservedNames().Has(name) {
|
||||
d.skipValue()
|
||||
continue
|
||||
}
|
||||
return d.newError(tok.Pos(), "unknown field: %v", tok.RawString())
|
||||
}
|
||||
|
||||
// Handle fields identified by field number.
|
||||
if isFieldNumberName {
|
||||
// TODO: Add an option to permit parsing field numbers.
|
||||
//
|
||||
// This requires careful thought as the MarshalOptions.EmitUnknown
|
||||
// option allows formatting unknown fields as the field number and the
|
||||
// best-effort textual representation of the field value. In that case,
|
||||
// it may not be possible to unmarshal the value from a parser that does
|
||||
// have information about the unknown field.
|
||||
return d.newError(tok.Pos(), "cannot specify field by number: %v", tok.RawString())
|
||||
}
|
||||
|
||||
switch {
|
||||
case fd.IsList():
|
||||
kind := fd.Kind()
|
||||
if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() {
|
||||
return d.syntaxError(tok.Pos(), "missing field separator :")
|
||||
}
|
||||
|
||||
list := m.Mutable(fd).List()
|
||||
if err := d.unmarshalList(fd, list); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case fd.IsMap():
|
||||
mmap := m.Mutable(fd).Map()
|
||||
if err := d.unmarshalMap(fd, mmap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
kind := fd.Kind()
|
||||
if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() {
|
||||
return d.syntaxError(tok.Pos(), "missing field separator :")
|
||||
}
|
||||
|
||||
// If field is a oneof, check if it has already been set.
|
||||
if od := fd.ContainingOneof(); od != nil {
|
||||
idx := uint64(od.Index())
|
||||
if seenOneofs.Has(idx) {
|
||||
return d.newError(tok.Pos(), "error parsing %q, oneof %v is already set", tok.RawString(), od.FullName())
|
||||
}
|
||||
seenOneofs.Set(idx)
|
||||
}
|
||||
|
||||
num := uint64(fd.Number())
|
||||
if seenNums.Has(num) {
|
||||
return d.newError(tok.Pos(), "non-repeated field %q is repeated", tok.RawString())
|
||||
}
|
||||
|
||||
if err := d.unmarshalSingular(fd, m); err != nil {
|
||||
return err
|
||||
}
|
||||
seenNums.Set(num)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// unmarshalSingular unmarshals a non-repeated field value specified by the
|
||||
// given FieldDescriptor.
|
||||
func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error {
|
||||
var val pref.Value
|
||||
var err error
|
||||
switch fd.Kind() {
|
||||
case pref.MessageKind, pref.GroupKind:
|
||||
val = m.NewField(fd)
|
||||
err = d.unmarshalMessage(val.Message(), true)
|
||||
default:
|
||||
val, err = d.unmarshalScalar(fd)
|
||||
}
|
||||
if err == nil {
|
||||
m.Set(fd, val)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the
|
||||
// given FieldDescriptor.
|
||||
func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return pref.Value{}, err
|
||||
}
|
||||
|
||||
if tok.Kind() != text.Scalar {
|
||||
return pref.Value{}, d.unexpectedTokenError(tok)
|
||||
}
|
||||
|
||||
kind := fd.Kind()
|
||||
switch kind {
|
||||
case pref.BoolKind:
|
||||
if b, ok := tok.Bool(); ok {
|
||||
return pref.ValueOfBool(b), nil
|
||||
}
|
||||
|
||||
case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind:
|
||||
if n, ok := tok.Int32(); ok {
|
||||
return pref.ValueOfInt32(n), nil
|
||||
}
|
||||
|
||||
case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind:
|
||||
if n, ok := tok.Int64(); ok {
|
||||
return pref.ValueOfInt64(n), nil
|
||||
}
|
||||
|
||||
case pref.Uint32Kind, pref.Fixed32Kind:
|
||||
if n, ok := tok.Uint32(); ok {
|
||||
return pref.ValueOfUint32(n), nil
|
||||
}
|
||||
|
||||
case pref.Uint64Kind, pref.Fixed64Kind:
|
||||
if n, ok := tok.Uint64(); ok {
|
||||
return pref.ValueOfUint64(n), nil
|
||||
}
|
||||
|
||||
case pref.FloatKind:
|
||||
if n, ok := tok.Float32(); ok {
|
||||
return pref.ValueOfFloat32(n), nil
|
||||
}
|
||||
|
||||
case pref.DoubleKind:
|
||||
if n, ok := tok.Float64(); ok {
|
||||
return pref.ValueOfFloat64(n), nil
|
||||
}
|
||||
|
||||
case pref.StringKind:
|
||||
if s, ok := tok.String(); ok {
|
||||
if strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
|
||||
return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8")
|
||||
}
|
||||
return pref.ValueOfString(s), nil
|
||||
}
|
||||
|
||||
case pref.BytesKind:
|
||||
if b, ok := tok.String(); ok {
|
||||
return pref.ValueOfBytes([]byte(b)), nil
|
||||
}
|
||||
|
||||
case pref.EnumKind:
|
||||
if lit, ok := tok.Enum(); ok {
|
||||
// Lookup EnumNumber based on name.
|
||||
if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil {
|
||||
return pref.ValueOfEnum(enumVal.Number()), nil
|
||||
}
|
||||
}
|
||||
if num, ok := tok.Int32(); ok {
|
||||
return pref.ValueOfEnum(pref.EnumNumber(num)), nil
|
||||
}
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid scalar kind %v", kind))
|
||||
}
|
||||
|
||||
return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
|
||||
}
|
||||
|
||||
// unmarshalList unmarshals into given protoreflect.List. A list value can
|
||||
// either be in [] syntax or simply just a single scalar/message value.
|
||||
func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error {
|
||||
tok, err := d.Peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch fd.Kind() {
|
||||
case pref.MessageKind, pref.GroupKind:
|
||||
switch tok.Kind() {
|
||||
case text.ListOpen:
|
||||
d.Read()
|
||||
for {
|
||||
tok, err := d.Peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch tok.Kind() {
|
||||
case text.ListClose:
|
||||
d.Read()
|
||||
return nil
|
||||
case text.MessageOpen:
|
||||
pval := list.NewElement()
|
||||
if err := d.unmarshalMessage(pval.Message(), true); err != nil {
|
||||
return err
|
||||
}
|
||||
list.Append(pval)
|
||||
default:
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
}
|
||||
|
||||
case text.MessageOpen:
|
||||
pval := list.NewElement()
|
||||
if err := d.unmarshalMessage(pval.Message(), true); err != nil {
|
||||
return err
|
||||
}
|
||||
list.Append(pval)
|
||||
return nil
|
||||
}
|
||||
|
||||
default:
|
||||
switch tok.Kind() {
|
||||
case text.ListOpen:
|
||||
d.Read()
|
||||
for {
|
||||
tok, err := d.Peek()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch tok.Kind() {
|
||||
case text.ListClose:
|
||||
d.Read()
|
||||
return nil
|
||||
case text.Scalar:
|
||||
pval, err := d.unmarshalScalar(fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
list.Append(pval)
|
||||
default:
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
}
|
||||
|
||||
case text.Scalar:
|
||||
pval, err := d.unmarshalScalar(fd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
list.Append(pval)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
|
||||
// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
|
||||
// textproto message containing {key: <kvalue>, value: <mvalue>}.
|
||||
func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error {
|
||||
// Determine ahead whether map entry is a scalar type or a message type in
|
||||
// order to call the appropriate unmarshalMapValue func inside
|
||||
// unmarshalMapEntry.
|
||||
var unmarshalMapValue func() (pref.Value, error)
|
||||
switch fd.MapValue().Kind() {
|
||||
case pref.MessageKind, pref.GroupKind:
|
||||
unmarshalMapValue = func() (pref.Value, error) {
|
||||
pval := mmap.NewValue()
|
||||
if err := d.unmarshalMessage(pval.Message(), true); err != nil {
|
||||
return pref.Value{}, err
|
||||
}
|
||||
return pval, nil
|
||||
}
|
||||
default:
|
||||
unmarshalMapValue = func() (pref.Value, error) {
|
||||
return d.unmarshalScalar(fd.MapValue())
|
||||
}
|
||||
}
|
||||
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch tok.Kind() {
|
||||
case text.MessageOpen:
|
||||
return d.unmarshalMapEntry(fd, mmap, unmarshalMapValue)
|
||||
|
||||
case text.ListOpen:
|
||||
for {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch tok.Kind() {
|
||||
case text.ListClose:
|
||||
return nil
|
||||
case text.MessageOpen:
|
||||
if err := d.unmarshalMapEntry(fd, mmap, unmarshalMapValue); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
}
|
||||
|
||||
// unmarshalMap unmarshals into given protoreflect.Map. A map value is a
|
||||
// textproto message containing {key: <kvalue>, value: <mvalue>}.
|
||||
func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error {
|
||||
var key pref.MapKey
|
||||
var pval pref.Value
|
||||
Loop:
|
||||
for {
|
||||
// Read field name.
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch tok.Kind() {
|
||||
case text.Name:
|
||||
if tok.NameKind() != text.IdentName {
|
||||
if !d.opts.DiscardUnknown {
|
||||
return d.newError(tok.Pos(), "unknown map entry field %q", tok.RawString())
|
||||
}
|
||||
d.skipValue()
|
||||
continue Loop
|
||||
}
|
||||
// Continue below.
|
||||
case text.MessageClose:
|
||||
break Loop
|
||||
default:
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
|
||||
switch name := pref.Name(tok.IdentName()); name {
|
||||
case genid.MapEntry_Key_field_name:
|
||||
if !tok.HasSeparator() {
|
||||
return d.syntaxError(tok.Pos(), "missing field separator :")
|
||||
}
|
||||
if key.IsValid() {
|
||||
return d.newError(tok.Pos(), "map entry %q cannot be repeated", name)
|
||||
}
|
||||
val, err := d.unmarshalScalar(fd.MapKey())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key = val.MapKey()
|
||||
|
||||
case genid.MapEntry_Value_field_name:
|
||||
if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) {
|
||||
if !tok.HasSeparator() {
|
||||
return d.syntaxError(tok.Pos(), "missing field separator :")
|
||||
}
|
||||
}
|
||||
if pval.IsValid() {
|
||||
return d.newError(tok.Pos(), "map entry %q cannot be repeated", name)
|
||||
}
|
||||
pval, err = unmarshalMapValue()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
if !d.opts.DiscardUnknown {
|
||||
return d.newError(tok.Pos(), "unknown map entry field %q", name)
|
||||
}
|
||||
d.skipValue()
|
||||
}
|
||||
}
|
||||
|
||||
if !key.IsValid() {
|
||||
key = fd.MapKey().Default().MapKey()
|
||||
}
|
||||
if !pval.IsValid() {
|
||||
switch fd.MapValue().Kind() {
|
||||
case pref.MessageKind, pref.GroupKind:
|
||||
// If value field is not set for message/group types, construct an
|
||||
// empty one as default.
|
||||
pval = mmap.NewValue()
|
||||
default:
|
||||
pval = fd.MapValue().Default()
|
||||
}
|
||||
}
|
||||
mmap.Set(key, pval)
|
||||
return nil
|
||||
}
|
||||
|
||||
// unmarshalAny unmarshals an Any textproto. It can either be in expanded form
|
||||
// or non-expanded form.
|
||||
func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error {
|
||||
var typeURL string
|
||||
var bValue []byte
|
||||
var seenTypeUrl bool
|
||||
var seenValue bool
|
||||
var isExpanded bool
|
||||
|
||||
if checkDelims {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tok.Kind() != text.MessageOpen {
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
}
|
||||
|
||||
Loop:
|
||||
for {
|
||||
// Read field name. Can only have 3 possible field names, i.e. type_url,
|
||||
// value and type URL name inside [].
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if typ := tok.Kind(); typ != text.Name {
|
||||
if checkDelims {
|
||||
if typ == text.MessageClose {
|
||||
break Loop
|
||||
}
|
||||
} else if typ == text.EOF {
|
||||
break Loop
|
||||
}
|
||||
return d.unexpectedTokenError(tok)
|
||||
}
|
||||
|
||||
switch tok.NameKind() {
|
||||
case text.IdentName:
|
||||
// Both type_url and value fields require field separator :.
|
||||
if !tok.HasSeparator() {
|
||||
return d.syntaxError(tok.Pos(), "missing field separator :")
|
||||
}
|
||||
|
||||
switch name := pref.Name(tok.IdentName()); name {
|
||||
case genid.Any_TypeUrl_field_name:
|
||||
if seenTypeUrl {
|
||||
return d.newError(tok.Pos(), "duplicate %v field", genid.Any_TypeUrl_field_fullname)
|
||||
}
|
||||
if isExpanded {
|
||||
return d.newError(tok.Pos(), "conflict with [%s] field", typeURL)
|
||||
}
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var ok bool
|
||||
typeURL, ok = tok.String()
|
||||
if !ok {
|
||||
return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_TypeUrl_field_fullname, tok.RawString())
|
||||
}
|
||||
seenTypeUrl = true
|
||||
|
||||
case genid.Any_Value_field_name:
|
||||
if seenValue {
|
||||
return d.newError(tok.Pos(), "duplicate %v field", genid.Any_Value_field_fullname)
|
||||
}
|
||||
if isExpanded {
|
||||
return d.newError(tok.Pos(), "conflict with [%s] field", typeURL)
|
||||
}
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s, ok := tok.String()
|
||||
if !ok {
|
||||
return d.newError(tok.Pos(), "invalid %v field value: %v", genid.Any_Value_field_fullname, tok.RawString())
|
||||
}
|
||||
bValue = []byte(s)
|
||||
seenValue = true
|
||||
|
||||
default:
|
||||
if !d.opts.DiscardUnknown {
|
||||
return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname)
|
||||
}
|
||||
}
|
||||
|
||||
case text.TypeName:
|
||||
if isExpanded {
|
||||
return d.newError(tok.Pos(), "cannot have more than one type")
|
||||
}
|
||||
if seenTypeUrl {
|
||||
return d.newError(tok.Pos(), "conflict with type_url field")
|
||||
}
|
||||
typeURL = tok.TypeName()
|
||||
var err error
|
||||
bValue, err = d.unmarshalExpandedAny(typeURL, tok.Pos())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
isExpanded = true
|
||||
|
||||
default:
|
||||
if !d.opts.DiscardUnknown {
|
||||
return d.newError(tok.Pos(), "invalid field name %q in %v message", tok.RawString(), genid.Any_message_fullname)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fds := m.Descriptor().Fields()
|
||||
if len(typeURL) > 0 {
|
||||
m.Set(fds.ByNumber(genid.Any_TypeUrl_field_number), pref.ValueOfString(typeURL))
|
||||
}
|
||||
if len(bValue) > 0 {
|
||||
m.Set(fds.ByNumber(genid.Any_Value_field_number), pref.ValueOfBytes(bValue))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d decoder) unmarshalExpandedAny(typeURL string, pos int) ([]byte, error) {
|
||||
mt, err := d.opts.Resolver.FindMessageByURL(typeURL)
|
||||
if err != nil {
|
||||
return nil, d.newError(pos, "unable to resolve message [%v]: %v", typeURL, err)
|
||||
}
|
||||
// Create new message for the embedded message type and unmarshal the value
|
||||
// field into it.
|
||||
m := mt.New()
|
||||
if err := d.unmarshalMessage(m, true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Serialize the embedded message and return the resulting bytes.
|
||||
b, err := proto.MarshalOptions{
|
||||
AllowPartial: true, // Never check required fields inside an Any.
|
||||
Deterministic: true,
|
||||
}.Marshal(m.Interface())
|
||||
if err != nil {
|
||||
return nil, d.newError(pos, "error in marshaling message into Any.value: %v", err)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// skipValue makes the decoder parse a field value in order to advance the read
|
||||
// to the next field. It relies on Read returning an error if the types are not
|
||||
// in valid sequence.
|
||||
func (d decoder) skipValue() error {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Only need to continue reading for messages and lists.
|
||||
switch tok.Kind() {
|
||||
case text.MessageOpen:
|
||||
return d.skipMessageValue()
|
||||
|
||||
case text.ListOpen:
|
||||
for {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch tok.Kind() {
|
||||
case text.ListClose:
|
||||
return nil
|
||||
case text.MessageOpen:
|
||||
return d.skipMessageValue()
|
||||
default:
|
||||
// Skip items. This will not validate whether skipped values are
|
||||
// of the same type or not, same behavior as C++
|
||||
// TextFormat::Parser::AllowUnknownField(true) version 3.8.0.
|
||||
if err := d.skipValue(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// skipMessageValue makes the decoder parse and skip over all fields in a
|
||||
// message. It assumes that the previous read type is MessageOpen.
|
||||
func (d decoder) skipMessageValue() error {
|
||||
for {
|
||||
tok, err := d.Read()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch tok.Kind() {
|
||||
case text.MessageClose:
|
||||
return nil
|
||||
case text.Name:
|
||||
if err := d.skipValue(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
7
vendor/google.golang.org/protobuf/encoding/prototext/doc.go
generated
vendored
Normal file
7
vendor/google.golang.org/protobuf/encoding/prototext/doc.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package prototext marshals and unmarshals protocol buffer messages as the
|
||||
// textproto format.
|
||||
package prototext
|
||||
371
vendor/google.golang.org/protobuf/encoding/prototext/encode.go
generated
vendored
Normal file
371
vendor/google.golang.org/protobuf/encoding/prototext/encode.go
generated
vendored
Normal file
@@ -0,0 +1,371 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package prototext
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"unicode/utf8"
|
||||
|
||||
"google.golang.org/protobuf/encoding/protowire"
|
||||
"google.golang.org/protobuf/internal/encoding/messageset"
|
||||
"google.golang.org/protobuf/internal/encoding/text"
|
||||
"google.golang.org/protobuf/internal/errors"
|
||||
"google.golang.org/protobuf/internal/flags"
|
||||
"google.golang.org/protobuf/internal/genid"
|
||||
"google.golang.org/protobuf/internal/order"
|
||||
"google.golang.org/protobuf/internal/pragma"
|
||||
"google.golang.org/protobuf/internal/strs"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoreflect"
|
||||
pref "google.golang.org/protobuf/reflect/protoreflect"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
)
|
||||
|
||||
const defaultIndent = " "
|
||||
|
||||
// Format formats the message as a multiline string.
|
||||
// This function is only intended for human consumption and ignores errors.
|
||||
// Do not depend on the output being stable. It may change over time across
|
||||
// different versions of the program.
|
||||
func Format(m proto.Message) string {
|
||||
return MarshalOptions{Multiline: true}.Format(m)
|
||||
}
|
||||
|
||||
// Marshal writes the given proto.Message in textproto format using default
|
||||
// options. Do not depend on the output being stable. It may change over time
|
||||
// across different versions of the program.
|
||||
func Marshal(m proto.Message) ([]byte, error) {
|
||||
return MarshalOptions{}.Marshal(m)
|
||||
}
|
||||
|
||||
// MarshalOptions is a configurable text format marshaler.
|
||||
type MarshalOptions struct {
|
||||
pragma.NoUnkeyedLiterals
|
||||
|
||||
// Multiline specifies whether the marshaler should format the output in
|
||||
// indented-form with every textual element on a new line.
|
||||
// If Indent is an empty string, then an arbitrary indent is chosen.
|
||||
Multiline bool
|
||||
|
||||
// Indent specifies the set of indentation characters to use in a multiline
|
||||
// formatted output such that every entry is preceded by Indent and
|
||||
// terminated by a newline. If non-empty, then Multiline is treated as true.
|
||||
// Indent can only be composed of space or tab characters.
|
||||
Indent string
|
||||
|
||||
// EmitASCII specifies whether to format strings and bytes as ASCII only
|
||||
// as opposed to using UTF-8 encoding when possible.
|
||||
EmitASCII bool
|
||||
|
||||
// allowInvalidUTF8 specifies whether to permit the encoding of strings
|
||||
// with invalid UTF-8. This is unexported as it is intended to only
|
||||
// be specified by the Format method.
|
||||
allowInvalidUTF8 bool
|
||||
|
||||
// AllowPartial allows messages that have missing required fields to marshal
|
||||
// without returning an error. If AllowPartial is false (the default),
|
||||
// Marshal will return error if there are any missing required fields.
|
||||
AllowPartial bool
|
||||
|
||||
// EmitUnknown specifies whether to emit unknown fields in the output.
|
||||
// If specified, the unmarshaler may be unable to parse the output.
|
||||
// The default is to exclude unknown fields.
|
||||
EmitUnknown bool
|
||||
|
||||
// Resolver is used for looking up types when expanding google.protobuf.Any
|
||||
// messages. If nil, this defaults to using protoregistry.GlobalTypes.
|
||||
Resolver interface {
|
||||
protoregistry.ExtensionTypeResolver
|
||||
protoregistry.MessageTypeResolver
|
||||
}
|
||||
}
|
||||
|
||||
// Format formats the message as a string.
|
||||
// This method is only intended for human consumption and ignores errors.
|
||||
// Do not depend on the output being stable. It may change over time across
|
||||
// different versions of the program.
|
||||
func (o MarshalOptions) Format(m proto.Message) string {
|
||||
if m == nil || !m.ProtoReflect().IsValid() {
|
||||
return "<nil>" // invalid syntax, but okay since this is for debugging
|
||||
}
|
||||
o.allowInvalidUTF8 = true
|
||||
o.AllowPartial = true
|
||||
o.EmitUnknown = true
|
||||
b, _ := o.Marshal(m)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// Marshal writes the given proto.Message in textproto format using options in
|
||||
// MarshalOptions object. Do not depend on the output being stable. It may
|
||||
// change over time across different versions of the program.
|
||||
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
|
||||
return o.marshal(m)
|
||||
}
|
||||
|
||||
// marshal is a centralized function that all marshal operations go through.
|
||||
// For profiling purposes, avoid changing the name of this function or
|
||||
// introducing other code paths for marshal that do not go through this.
|
||||
func (o MarshalOptions) marshal(m proto.Message) ([]byte, error) {
|
||||
var delims = [2]byte{'{', '}'}
|
||||
|
||||
if o.Multiline && o.Indent == "" {
|
||||
o.Indent = defaultIndent
|
||||
}
|
||||
if o.Resolver == nil {
|
||||
o.Resolver = protoregistry.GlobalTypes
|
||||
}
|
||||
|
||||
internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Treat nil message interface as an empty message,
|
||||
// in which case there is nothing to output.
|
||||
if m == nil {
|
||||
return []byte{}, nil
|
||||
}
|
||||
|
||||
enc := encoder{internalEnc, o}
|
||||
err = enc.marshalMessage(m.ProtoReflect(), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := enc.Bytes()
|
||||
if len(o.Indent) > 0 && len(out) > 0 {
|
||||
out = append(out, '\n')
|
||||
}
|
||||
if o.AllowPartial {
|
||||
return out, nil
|
||||
}
|
||||
return out, proto.CheckInitialized(m)
|
||||
}
|
||||
|
||||
type encoder struct {
|
||||
*text.Encoder
|
||||
opts MarshalOptions
|
||||
}
|
||||
|
||||
// marshalMessage marshals the given protoreflect.Message.
|
||||
func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error {
|
||||
messageDesc := m.Descriptor()
|
||||
if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) {
|
||||
return errors.New("no support for proto1 MessageSets")
|
||||
}
|
||||
|
||||
if inclDelims {
|
||||
e.StartMessage()
|
||||
defer e.EndMessage()
|
||||
}
|
||||
|
||||
// Handle Any expansion.
|
||||
if messageDesc.FullName() == genid.Any_message_fullname {
|
||||
if e.marshalAny(m) {
|
||||
return nil
|
||||
}
|
||||
// If unable to expand, continue on to marshal Any as a regular message.
|
||||
}
|
||||
|
||||
// Marshal fields.
|
||||
var err error
|
||||
order.RangeFields(m, order.IndexNameFieldOrder, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
|
||||
if err = e.marshalField(fd.TextName(), v, fd); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Marshal unknown fields.
|
||||
if e.opts.EmitUnknown {
|
||||
e.marshalUnknown(m.GetUnknown())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// marshalField marshals the given field with protoreflect.Value.
|
||||
func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error {
|
||||
switch {
|
||||
case fd.IsList():
|
||||
return e.marshalList(name, val.List(), fd)
|
||||
case fd.IsMap():
|
||||
return e.marshalMap(name, val.Map(), fd)
|
||||
default:
|
||||
e.WriteName(name)
|
||||
return e.marshalSingular(val, fd)
|
||||
}
|
||||
}
|
||||
|
||||
// marshalSingular marshals the given non-repeated field value. This includes
|
||||
// all scalar types, enums, messages, and groups.
|
||||
func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error {
|
||||
kind := fd.Kind()
|
||||
switch kind {
|
||||
case pref.BoolKind:
|
||||
e.WriteBool(val.Bool())
|
||||
|
||||
case pref.StringKind:
|
||||
s := val.String()
|
||||
if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) {
|
||||
return errors.InvalidUTF8(string(fd.FullName()))
|
||||
}
|
||||
e.WriteString(s)
|
||||
|
||||
case pref.Int32Kind, pref.Int64Kind,
|
||||
pref.Sint32Kind, pref.Sint64Kind,
|
||||
pref.Sfixed32Kind, pref.Sfixed64Kind:
|
||||
e.WriteInt(val.Int())
|
||||
|
||||
case pref.Uint32Kind, pref.Uint64Kind,
|
||||
pref.Fixed32Kind, pref.Fixed64Kind:
|
||||
e.WriteUint(val.Uint())
|
||||
|
||||
case pref.FloatKind:
|
||||
// Encoder.WriteFloat handles the special numbers NaN and infinites.
|
||||
e.WriteFloat(val.Float(), 32)
|
||||
|
||||
case pref.DoubleKind:
|
||||
// Encoder.WriteFloat handles the special numbers NaN and infinites.
|
||||
e.WriteFloat(val.Float(), 64)
|
||||
|
||||
case pref.BytesKind:
|
||||
e.WriteString(string(val.Bytes()))
|
||||
|
||||
case pref.EnumKind:
|
||||
num := val.Enum()
|
||||
if desc := fd.Enum().Values().ByNumber(num); desc != nil {
|
||||
e.WriteLiteral(string(desc.Name()))
|
||||
} else {
|
||||
// Use numeric value if there is no enum description.
|
||||
e.WriteInt(int64(num))
|
||||
}
|
||||
|
||||
case pref.MessageKind, pref.GroupKind:
|
||||
return e.marshalMessage(val.Message(), true)
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// marshalList marshals the given protoreflect.List as multiple name-value fields.
|
||||
func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error {
|
||||
size := list.Len()
|
||||
for i := 0; i < size; i++ {
|
||||
e.WriteName(name)
|
||||
if err := e.marshalSingular(list.Get(i), fd); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// marshalMap marshals the given protoreflect.Map as multiple name-value fields.
|
||||
func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error {
|
||||
var err error
|
||||
order.RangeEntries(mmap, order.GenericKeyOrder, func(key pref.MapKey, val pref.Value) bool {
|
||||
e.WriteName(name)
|
||||
e.StartMessage()
|
||||
defer e.EndMessage()
|
||||
|
||||
e.WriteName(string(genid.MapEntry_Key_field_name))
|
||||
err = e.marshalSingular(key.Value(), fd.MapKey())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
e.WriteName(string(genid.MapEntry_Value_field_name))
|
||||
err = e.marshalSingular(val, fd.MapValue())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// marshalUnknown parses the given []byte and marshals fields out.
|
||||
// This function assumes proper encoding in the given []byte.
|
||||
func (e encoder) marshalUnknown(b []byte) {
|
||||
const dec = 10
|
||||
const hex = 16
|
||||
for len(b) > 0 {
|
||||
num, wtype, n := protowire.ConsumeTag(b)
|
||||
b = b[n:]
|
||||
e.WriteName(strconv.FormatInt(int64(num), dec))
|
||||
|
||||
switch wtype {
|
||||
case protowire.VarintType:
|
||||
var v uint64
|
||||
v, n = protowire.ConsumeVarint(b)
|
||||
e.WriteUint(v)
|
||||
case protowire.Fixed32Type:
|
||||
var v uint32
|
||||
v, n = protowire.ConsumeFixed32(b)
|
||||
e.WriteLiteral("0x" + strconv.FormatUint(uint64(v), hex))
|
||||
case protowire.Fixed64Type:
|
||||
var v uint64
|
||||
v, n = protowire.ConsumeFixed64(b)
|
||||
e.WriteLiteral("0x" + strconv.FormatUint(v, hex))
|
||||
case protowire.BytesType:
|
||||
var v []byte
|
||||
v, n = protowire.ConsumeBytes(b)
|
||||
e.WriteString(string(v))
|
||||
case protowire.StartGroupType:
|
||||
e.StartMessage()
|
||||
var v []byte
|
||||
v, n = protowire.ConsumeGroup(num, b)
|
||||
e.marshalUnknown(v)
|
||||
e.EndMessage()
|
||||
default:
|
||||
panic(fmt.Sprintf("prototext: error parsing unknown field wire type: %v", wtype))
|
||||
}
|
||||
|
||||
b = b[n:]
|
||||
}
|
||||
}
|
||||
|
||||
// marshalAny marshals the given google.protobuf.Any message in expanded form.
|
||||
// It returns true if it was able to marshal, else false.
|
||||
func (e encoder) marshalAny(any pref.Message) bool {
|
||||
// Construct the embedded message.
|
||||
fds := any.Descriptor().Fields()
|
||||
fdType := fds.ByNumber(genid.Any_TypeUrl_field_number)
|
||||
typeURL := any.Get(fdType).String()
|
||||
mt, err := e.opts.Resolver.FindMessageByURL(typeURL)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
m := mt.New().Interface()
|
||||
|
||||
// Unmarshal bytes into embedded message.
|
||||
fdValue := fds.ByNumber(genid.Any_Value_field_number)
|
||||
value := any.Get(fdValue)
|
||||
err = proto.UnmarshalOptions{
|
||||
AllowPartial: true,
|
||||
Resolver: e.opts.Resolver,
|
||||
}.Unmarshal(value.Bytes(), m)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Get current encoder position. If marshaling fails, reset encoder output
|
||||
// back to this position.
|
||||
pos := e.Snapshot()
|
||||
|
||||
// Field name is the proto field name enclosed in [].
|
||||
e.WriteName("[" + typeURL + "]")
|
||||
err = e.marshalMessage(m.ProtoReflect(), true)
|
||||
if err != nil {
|
||||
e.Reset(pos)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
538
vendor/google.golang.org/protobuf/encoding/protowire/wire.go
generated
vendored
Normal file
538
vendor/google.golang.org/protobuf/encoding/protowire/wire.go
generated
vendored
Normal file
@@ -0,0 +1,538 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package protowire parses and formats the raw wire encoding.
|
||||
// See https://developers.google.com/protocol-buffers/docs/encoding.
|
||||
//
|
||||
// For marshaling and unmarshaling entire protobuf messages,
|
||||
// use the "google.golang.org/protobuf/proto" package instead.
|
||||
package protowire
|
||||
|
||||
import (
|
||||
"io"
|
||||
"math"
|
||||
"math/bits"
|
||||
|
||||
"google.golang.org/protobuf/internal/errors"
|
||||
)
|
||||
|
||||
// Number represents the field number.
|
||||
type Number int32
|
||||
|
||||
const (
|
||||
MinValidNumber Number = 1
|
||||
FirstReservedNumber Number = 19000
|
||||
LastReservedNumber Number = 19999
|
||||
MaxValidNumber Number = 1<<29 - 1
|
||||
)
|
||||
|
||||
// IsValid reports whether the field number is semantically valid.
|
||||
//
|
||||
// Note that while numbers within the reserved range are semantically invalid,
|
||||
// they are syntactically valid in the wire format.
|
||||
// Implementations may treat records with reserved field numbers as unknown.
|
||||
func (n Number) IsValid() bool {
|
||||
return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber
|
||||
}
|
||||
|
||||
// Type represents the wire type.
|
||||
type Type int8
|
||||
|
||||
const (
|
||||
VarintType Type = 0
|
||||
Fixed32Type Type = 5
|
||||
Fixed64Type Type = 1
|
||||
BytesType Type = 2
|
||||
StartGroupType Type = 3
|
||||
EndGroupType Type = 4
|
||||
)
|
||||
|
||||
const (
|
||||
_ = -iota
|
||||
errCodeTruncated
|
||||
errCodeFieldNumber
|
||||
errCodeOverflow
|
||||
errCodeReserved
|
||||
errCodeEndGroup
|
||||
)
|
||||
|
||||
var (
|
||||
errFieldNumber = errors.New("invalid field number")
|
||||
errOverflow = errors.New("variable length integer overflow")
|
||||
errReserved = errors.New("cannot parse reserved wire type")
|
||||
errEndGroup = errors.New("mismatching end group marker")
|
||||
errParse = errors.New("parse error")
|
||||
)
|
||||
|
||||
// ParseError converts an error code into an error value.
|
||||
// This returns nil if n is a non-negative number.
|
||||
func ParseError(n int) error {
|
||||
if n >= 0 {
|
||||
return nil
|
||||
}
|
||||
switch n {
|
||||
case errCodeTruncated:
|
||||
return io.ErrUnexpectedEOF
|
||||
case errCodeFieldNumber:
|
||||
return errFieldNumber
|
||||
case errCodeOverflow:
|
||||
return errOverflow
|
||||
case errCodeReserved:
|
||||
return errReserved
|
||||
case errCodeEndGroup:
|
||||
return errEndGroup
|
||||
default:
|
||||
return errParse
|
||||
}
|
||||
}
|
||||
|
||||
// ConsumeField parses an entire field record (both tag and value) and returns
|
||||
// the field number, the wire type, and the total length.
|
||||
// This returns a negative length upon an error (see ParseError).
|
||||
//
|
||||
// The total length includes the tag header and the end group marker (if the
|
||||
// field is a group).
|
||||
func ConsumeField(b []byte) (Number, Type, int) {
|
||||
num, typ, n := ConsumeTag(b)
|
||||
if n < 0 {
|
||||
return 0, 0, n // forward error code
|
||||
}
|
||||
m := ConsumeFieldValue(num, typ, b[n:])
|
||||
if m < 0 {
|
||||
return 0, 0, m // forward error code
|
||||
}
|
||||
return num, typ, n + m
|
||||
}
|
||||
|
||||
// ConsumeFieldValue parses a field value and returns its length.
|
||||
// This assumes that the field Number and wire Type have already been parsed.
|
||||
// This returns a negative length upon an error (see ParseError).
|
||||
//
|
||||
// When parsing a group, the length includes the end group marker and
|
||||
// the end group is verified to match the starting field number.
|
||||
func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) {
|
||||
switch typ {
|
||||
case VarintType:
|
||||
_, n = ConsumeVarint(b)
|
||||
return n
|
||||
case Fixed32Type:
|
||||
_, n = ConsumeFixed32(b)
|
||||
return n
|
||||
case Fixed64Type:
|
||||
_, n = ConsumeFixed64(b)
|
||||
return n
|
||||
case BytesType:
|
||||
_, n = ConsumeBytes(b)
|
||||
return n
|
||||
case StartGroupType:
|
||||
n0 := len(b)
|
||||
for {
|
||||
num2, typ2, n := ConsumeTag(b)
|
||||
if n < 0 {
|
||||
return n // forward error code
|
||||
}
|
||||
b = b[n:]
|
||||
if typ2 == EndGroupType {
|
||||
if num != num2 {
|
||||
return errCodeEndGroup
|
||||
}
|
||||
return n0 - len(b)
|
||||
}
|
||||
|
||||
n = ConsumeFieldValue(num2, typ2, b)
|
||||
if n < 0 {
|
||||
return n // forward error code
|
||||
}
|
||||
b = b[n:]
|
||||
}
|
||||
case EndGroupType:
|
||||
return errCodeEndGroup
|
||||
default:
|
||||
return errCodeReserved
|
||||
}
|
||||
}
|
||||
|
||||
// AppendTag encodes num and typ as a varint-encoded tag and appends it to b.
|
||||
func AppendTag(b []byte, num Number, typ Type) []byte {
|
||||
return AppendVarint(b, EncodeTag(num, typ))
|
||||
}
|
||||
|
||||
// ConsumeTag parses b as a varint-encoded tag, reporting its length.
|
||||
// This returns a negative length upon an error (see ParseError).
|
||||
func ConsumeTag(b []byte) (Number, Type, int) {
|
||||
v, n := ConsumeVarint(b)
|
||||
if n < 0 {
|
||||
return 0, 0, n // forward error code
|
||||
}
|
||||
num, typ := DecodeTag(v)
|
||||
if num < MinValidNumber {
|
||||
return 0, 0, errCodeFieldNumber
|
||||
}
|
||||
return num, typ, n
|
||||
}
|
||||
|
||||
func SizeTag(num Number) int {
|
||||
return SizeVarint(EncodeTag(num, 0)) // wire type has no effect on size
|
||||
}
|
||||
|
||||
// AppendVarint appends v to b as a varint-encoded uint64.
|
||||
func AppendVarint(b []byte, v uint64) []byte {
|
||||
switch {
|
||||
case v < 1<<7:
|
||||
b = append(b, byte(v))
|
||||
case v < 1<<14:
|
||||
b = append(b,
|
||||
byte((v>>0)&0x7f|0x80),
|
||||
byte(v>>7))
|
||||
case v < 1<<21:
|
||||
b = append(b,
|
||||
byte((v>>0)&0x7f|0x80),
|
||||
byte((v>>7)&0x7f|0x80),
|
||||
byte(v>>14))
|
||||
case v < 1<<28:
|
||||
b = append(b,
|
||||
byte((v>>0)&0x7f|0x80),
|
||||
byte((v>>7)&0x7f|0x80),
|
||||
byte((v>>14)&0x7f|0x80),
|
||||
byte(v>>21))
|
||||
case v < 1<<35:
|
||||
b = append(b,
|
||||
byte((v>>0)&0x7f|0x80),
|
||||
byte((v>>7)&0x7f|0x80),
|
||||
byte((v>>14)&0x7f|0x80),
|
||||
byte((v>>21)&0x7f|0x80),
|
||||
byte(v>>28))
|
||||
case v < 1<<42:
|
||||
b = append(b,
|
||||
byte((v>>0)&0x7f|0x80),
|
||||
byte((v>>7)&0x7f|0x80),
|
||||
byte((v>>14)&0x7f|0x80),
|
||||
byte((v>>21)&0x7f|0x80),
|
||||
byte((v>>28)&0x7f|0x80),
|
||||
byte(v>>35))
|
||||
case v < 1<<49:
|
||||
b = append(b,
|
||||
byte((v>>0)&0x7f|0x80),
|
||||
byte((v>>7)&0x7f|0x80),
|
||||
byte((v>>14)&0x7f|0x80),
|
||||
byte((v>>21)&0x7f|0x80),
|
||||
byte((v>>28)&0x7f|0x80),
|
||||
byte((v>>35)&0x7f|0x80),
|
||||
byte(v>>42))
|
||||
case v < 1<<56:
|
||||
b = append(b,
|
||||
byte((v>>0)&0x7f|0x80),
|
||||
byte((v>>7)&0x7f|0x80),
|
||||
byte((v>>14)&0x7f|0x80),
|
||||
byte((v>>21)&0x7f|0x80),
|
||||
byte((v>>28)&0x7f|0x80),
|
||||
byte((v>>35)&0x7f|0x80),
|
||||
byte((v>>42)&0x7f|0x80),
|
||||
byte(v>>49))
|
||||
case v < 1<<63:
|
||||
b = append(b,
|
||||
byte((v>>0)&0x7f|0x80),
|
||||
byte((v>>7)&0x7f|0x80),
|
||||
byte((v>>14)&0x7f|0x80),
|
||||
byte((v>>21)&0x7f|0x80),
|
||||
byte((v>>28)&0x7f|0x80),
|
||||
byte((v>>35)&0x7f|0x80),
|
||||
byte((v>>42)&0x7f|0x80),
|
||||
byte((v>>49)&0x7f|0x80),
|
||||
byte(v>>56))
|
||||
default:
|
||||
b = append(b,
|
||||
byte((v>>0)&0x7f|0x80),
|
||||
byte((v>>7)&0x7f|0x80),
|
||||
byte((v>>14)&0x7f|0x80),
|
||||
byte((v>>21)&0x7f|0x80),
|
||||
byte((v>>28)&0x7f|0x80),
|
||||
byte((v>>35)&0x7f|0x80),
|
||||
byte((v>>42)&0x7f|0x80),
|
||||
byte((v>>49)&0x7f|0x80),
|
||||
byte((v>>56)&0x7f|0x80),
|
||||
1)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// ConsumeVarint parses b as a varint-encoded uint64, reporting its length.
|
||||
// This returns a negative length upon an error (see ParseError).
|
||||
func ConsumeVarint(b []byte) (v uint64, n int) {
|
||||
var y uint64
|
||||
if len(b) <= 0 {
|
||||
return 0, errCodeTruncated
|
||||
}
|
||||
v = uint64(b[0])
|
||||
if v < 0x80 {
|
||||
return v, 1
|
||||
}
|
||||
v -= 0x80
|
||||
|
||||
if len(b) <= 1 {
|
||||
return 0, errCodeTruncated
|
||||
}
|
||||
y = uint64(b[1])
|
||||
v += y << 7
|
||||
if y < 0x80 {
|
||||
return v, 2
|
||||
}
|
||||
v -= 0x80 << 7
|
||||
|
||||
if len(b) <= 2 {
|
||||
return 0, errCodeTruncated
|
||||
}
|
||||
y = uint64(b[2])
|
||||
v += y << 14
|
||||
if y < 0x80 {
|
||||
return v, 3
|
||||
}
|
||||
v -= 0x80 << 14
|
||||
|
||||
if len(b) <= 3 {
|
||||
return 0, errCodeTruncated
|
||||
}
|
||||
y = uint64(b[3])
|
||||
v += y << 21
|
||||
if y < 0x80 {
|
||||
return v, 4
|
||||
}
|
||||
v -= 0x80 << 21
|
||||
|
||||
if len(b) <= 4 {
|
||||
return 0, errCodeTruncated
|
||||
}
|
||||
y = uint64(b[4])
|
||||
v += y << 28
|
||||
if y < 0x80 {
|
||||
return v, 5
|
||||
}
|
||||
v -= 0x80 << 28
|
||||
|
||||
if len(b) <= 5 {
|
||||
return 0, errCodeTruncated
|
||||
}
|
||||
y = uint64(b[5])
|
||||
v += y << 35
|
||||
if y < 0x80 {
|
||||
return v, 6
|
||||
}
|
||||
v -= 0x80 << 35
|
||||
|
||||
if len(b) <= 6 {
|
||||
return 0, errCodeTruncated
|
||||
}
|
||||
y = uint64(b[6])
|
||||
v += y << 42
|
||||
if y < 0x80 {
|
||||
return v, 7
|
||||
}
|
||||
v -= 0x80 << 42
|
||||
|
||||
if len(b) <= 7 {
|
||||
return 0, errCodeTruncated
|
||||
}
|
||||
y = uint64(b[7])
|
||||
v += y << 49
|
||||
if y < 0x80 {
|
||||
return v, 8
|
||||
}
|
||||
v -= 0x80 << 49
|
||||
|
||||
if len(b) <= 8 {
|
||||
return 0, errCodeTruncated
|
||||
}
|
||||
y = uint64(b[8])
|
||||
v += y << 56
|
||||
if y < 0x80 {
|
||||
return v, 9
|
||||
}
|
||||
v -= 0x80 << 56
|
||||
|
||||
if len(b) <= 9 {
|
||||
return 0, errCodeTruncated
|
||||
}
|
||||
y = uint64(b[9])
|
||||
v += y << 63
|
||||
if y < 2 {
|
||||
return v, 10
|
||||
}
|
||||
return 0, errCodeOverflow
|
||||
}
|
||||
|
||||
// SizeVarint returns the encoded size of a varint.
|
||||
// The size is guaranteed to be within 1 and 10, inclusive.
|
||||
func SizeVarint(v uint64) int {
|
||||
// This computes 1 + (bits.Len64(v)-1)/7.
|
||||
// 9/64 is a good enough approximation of 1/7
|
||||
return int(9*uint32(bits.Len64(v))+64) / 64
|
||||
}
|
||||
|
||||
// AppendFixed32 appends v to b as a little-endian uint32.
|
||||
func AppendFixed32(b []byte, v uint32) []byte {
|
||||
return append(b,
|
||||
byte(v>>0),
|
||||
byte(v>>8),
|
||||
byte(v>>16),
|
||||
byte(v>>24))
|
||||
}
|
||||
|
||||
// ConsumeFixed32 parses b as a little-endian uint32, reporting its length.
|
||||
// This returns a negative length upon an error (see ParseError).
|
||||
func ConsumeFixed32(b []byte) (v uint32, n int) {
|
||||
if len(b) < 4 {
|
||||
return 0, errCodeTruncated
|
||||
}
|
||||
v = uint32(b[0])<<0 | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
return v, 4
|
||||
}
|
||||
|
||||
// SizeFixed32 returns the encoded size of a fixed32; which is always 4.
|
||||
func SizeFixed32() int {
|
||||
return 4
|
||||
}
|
||||
|
||||
// AppendFixed64 appends v to b as a little-endian uint64.
|
||||
func AppendFixed64(b []byte, v uint64) []byte {
|
||||
return append(b,
|
||||
byte(v>>0),
|
||||
byte(v>>8),
|
||||
byte(v>>16),
|
||||
byte(v>>24),
|
||||
byte(v>>32),
|
||||
byte(v>>40),
|
||||
byte(v>>48),
|
||||
byte(v>>56))
|
||||
}
|
||||
|
||||
// ConsumeFixed64 parses b as a little-endian uint64, reporting its length.
|
||||
// This returns a negative length upon an error (see ParseError).
|
||||
func ConsumeFixed64(b []byte) (v uint64, n int) {
|
||||
if len(b) < 8 {
|
||||
return 0, errCodeTruncated
|
||||
}
|
||||
v = uint64(b[0])<<0 | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
return v, 8
|
||||
}
|
||||
|
||||
// SizeFixed64 returns the encoded size of a fixed64; which is always 8.
|
||||
func SizeFixed64() int {
|
||||
return 8
|
||||
}
|
||||
|
||||
// AppendBytes appends v to b as a length-prefixed bytes value.
|
||||
func AppendBytes(b []byte, v []byte) []byte {
|
||||
return append(AppendVarint(b, uint64(len(v))), v...)
|
||||
}
|
||||
|
||||
// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length.
|
||||
// This returns a negative length upon an error (see ParseError).
|
||||
func ConsumeBytes(b []byte) (v []byte, n int) {
|
||||
m, n := ConsumeVarint(b)
|
||||
if n < 0 {
|
||||
return nil, n // forward error code
|
||||
}
|
||||
if m > uint64(len(b[n:])) {
|
||||
return nil, errCodeTruncated
|
||||
}
|
||||
return b[n:][:m], n + int(m)
|
||||
}
|
||||
|
||||
// SizeBytes returns the encoded size of a length-prefixed bytes value,
|
||||
// given only the length.
|
||||
func SizeBytes(n int) int {
|
||||
return SizeVarint(uint64(n)) + n
|
||||
}
|
||||
|
||||
// AppendString appends v to b as a length-prefixed bytes value.
|
||||
func AppendString(b []byte, v string) []byte {
|
||||
return append(AppendVarint(b, uint64(len(v))), v...)
|
||||
}
|
||||
|
||||
// ConsumeString parses b as a length-prefixed bytes value, reporting its length.
|
||||
// This returns a negative length upon an error (see ParseError).
|
||||
func ConsumeString(b []byte) (v string, n int) {
|
||||
bb, n := ConsumeBytes(b)
|
||||
return string(bb), n
|
||||
}
|
||||
|
||||
// AppendGroup appends v to b as group value, with a trailing end group marker.
|
||||
// The value v must not contain the end marker.
|
||||
func AppendGroup(b []byte, num Number, v []byte) []byte {
|
||||
return AppendVarint(append(b, v...), EncodeTag(num, EndGroupType))
|
||||
}
|
||||
|
||||
// ConsumeGroup parses b as a group value until the trailing end group marker,
|
||||
// and verifies that the end marker matches the provided num. The value v
|
||||
// does not contain the end marker, while the length does contain the end marker.
|
||||
// This returns a negative length upon an error (see ParseError).
|
||||
func ConsumeGroup(num Number, b []byte) (v []byte, n int) {
|
||||
n = ConsumeFieldValue(num, StartGroupType, b)
|
||||
if n < 0 {
|
||||
return nil, n // forward error code
|
||||
}
|
||||
b = b[:n]
|
||||
|
||||
// Truncate off end group marker, but need to handle denormalized varints.
|
||||
// Assuming end marker is never 0 (which is always the case since
|
||||
// EndGroupType is non-zero), we can truncate all trailing bytes where the
|
||||
// lower 7 bits are all zero (implying that the varint is denormalized).
|
||||
for len(b) > 0 && b[len(b)-1]&0x7f == 0 {
|
||||
b = b[:len(b)-1]
|
||||
}
|
||||
b = b[:len(b)-SizeTag(num)]
|
||||
return b, n
|
||||
}
|
||||
|
||||
// SizeGroup returns the encoded size of a group, given only the length.
|
||||
func SizeGroup(num Number, n int) int {
|
||||
return n + SizeTag(num)
|
||||
}
|
||||
|
||||
// DecodeTag decodes the field Number and wire Type from its unified form.
|
||||
// The Number is -1 if the decoded field number overflows int32.
|
||||
// Other than overflow, this does not check for field number validity.
|
||||
func DecodeTag(x uint64) (Number, Type) {
|
||||
// NOTE: MessageSet allows for larger field numbers than normal.
|
||||
if x>>3 > uint64(math.MaxInt32) {
|
||||
return -1, 0
|
||||
}
|
||||
return Number(x >> 3), Type(x & 7)
|
||||
}
|
||||
|
||||
// EncodeTag encodes the field Number and wire Type into its unified form.
|
||||
func EncodeTag(num Number, typ Type) uint64 {
|
||||
return uint64(num)<<3 | uint64(typ&7)
|
||||
}
|
||||
|
||||
// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64.
|
||||
// Input: {…, 5, 3, 1, 0, 2, 4, 6, …}
|
||||
// Output: {…, -3, -2, -1, 0, +1, +2, +3, …}
|
||||
func DecodeZigZag(x uint64) int64 {
|
||||
return int64(x>>1) ^ int64(x)<<63>>63
|
||||
}
|
||||
|
||||
// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64.
|
||||
// Input: {…, -3, -2, -1, 0, +1, +2, +3, …}
|
||||
// Output: {…, 5, 3, 1, 0, 2, 4, 6, …}
|
||||
func EncodeZigZag(x int64) uint64 {
|
||||
return uint64(x<<1) ^ uint64(x>>63)
|
||||
}
|
||||
|
||||
// DecodeBool decodes a uint64 as a bool.
|
||||
// Input: { 0, 1, 2, …}
|
||||
// Output: {false, true, true, …}
|
||||
func DecodeBool(x uint64) bool {
|
||||
return x != 0
|
||||
}
|
||||
|
||||
// EncodeBool encodes a bool as a uint64.
|
||||
// Input: {false, true}
|
||||
// Output: { 0, 1}
|
||||
func EncodeBool(x bool) uint64 {
|
||||
if x {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
318
vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
generated
vendored
Normal file
318
vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
generated
vendored
Normal file
@@ -0,0 +1,318 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package descfmt provides functionality to format descriptors.
|
||||
package descfmt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/internal/detrand"
|
||||
"google.golang.org/protobuf/internal/pragma"
|
||||
pref "google.golang.org/protobuf/reflect/protoreflect"
|
||||
)
|
||||
|
||||
type list interface {
|
||||
Len() int
|
||||
pragma.DoNotImplement
|
||||
}
|
||||
|
||||
func FormatList(s fmt.State, r rune, vs list) {
|
||||
io.WriteString(s, formatListOpt(vs, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
|
||||
}
|
||||
func formatListOpt(vs list, isRoot, allowMulti bool) string {
|
||||
start, end := "[", "]"
|
||||
if isRoot {
|
||||
var name string
|
||||
switch vs.(type) {
|
||||
case pref.Names:
|
||||
name = "Names"
|
||||
case pref.FieldNumbers:
|
||||
name = "FieldNumbers"
|
||||
case pref.FieldRanges:
|
||||
name = "FieldRanges"
|
||||
case pref.EnumRanges:
|
||||
name = "EnumRanges"
|
||||
case pref.FileImports:
|
||||
name = "FileImports"
|
||||
case pref.Descriptor:
|
||||
name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s"
|
||||
default:
|
||||
name = reflect.ValueOf(vs).Elem().Type().Name()
|
||||
}
|
||||
start, end = name+"{", "}"
|
||||
}
|
||||
|
||||
var ss []string
|
||||
switch vs := vs.(type) {
|
||||
case pref.Names:
|
||||
for i := 0; i < vs.Len(); i++ {
|
||||
ss = append(ss, fmt.Sprint(vs.Get(i)))
|
||||
}
|
||||
return start + joinStrings(ss, false) + end
|
||||
case pref.FieldNumbers:
|
||||
for i := 0; i < vs.Len(); i++ {
|
||||
ss = append(ss, fmt.Sprint(vs.Get(i)))
|
||||
}
|
||||
return start + joinStrings(ss, false) + end
|
||||
case pref.FieldRanges:
|
||||
for i := 0; i < vs.Len(); i++ {
|
||||
r := vs.Get(i)
|
||||
if r[0]+1 == r[1] {
|
||||
ss = append(ss, fmt.Sprintf("%d", r[0]))
|
||||
} else {
|
||||
ss = append(ss, fmt.Sprintf("%d:%d", r[0], r[1])) // enum ranges are end exclusive
|
||||
}
|
||||
}
|
||||
return start + joinStrings(ss, false) + end
|
||||
case pref.EnumRanges:
|
||||
for i := 0; i < vs.Len(); i++ {
|
||||
r := vs.Get(i)
|
||||
if r[0] == r[1] {
|
||||
ss = append(ss, fmt.Sprintf("%d", r[0]))
|
||||
} else {
|
||||
ss = append(ss, fmt.Sprintf("%d:%d", r[0], int64(r[1])+1)) // enum ranges are end inclusive
|
||||
}
|
||||
}
|
||||
return start + joinStrings(ss, false) + end
|
||||
case pref.FileImports:
|
||||
for i := 0; i < vs.Len(); i++ {
|
||||
var rs records
|
||||
rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak")
|
||||
ss = append(ss, "{"+rs.Join()+"}")
|
||||
}
|
||||
return start + joinStrings(ss, allowMulti) + end
|
||||
default:
|
||||
_, isEnumValue := vs.(pref.EnumValueDescriptors)
|
||||
for i := 0; i < vs.Len(); i++ {
|
||||
m := reflect.ValueOf(vs).MethodByName("Get")
|
||||
v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface()
|
||||
ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue))
|
||||
}
|
||||
return start + joinStrings(ss, allowMulti && isEnumValue) + end
|
||||
}
|
||||
}
|
||||
|
||||
// descriptorAccessors is a list of accessors to print for each descriptor.
|
||||
//
|
||||
// Do not print all accessors since some contain redundant information,
|
||||
// while others are pointers that we do not want to follow since the descriptor
|
||||
// is actually a cyclic graph.
|
||||
//
|
||||
// Using a list allows us to print the accessors in a sensible order.
|
||||
var descriptorAccessors = map[reflect.Type][]string{
|
||||
reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"},
|
||||
reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"},
|
||||
reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"},
|
||||
reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt
|
||||
reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"},
|
||||
reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"},
|
||||
reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"},
|
||||
reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"},
|
||||
}
|
||||
|
||||
func FormatDesc(s fmt.State, r rune, t pref.Descriptor) {
|
||||
io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#'))))
|
||||
}
|
||||
func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string {
|
||||
rv := reflect.ValueOf(t)
|
||||
rt := rv.MethodByName("ProtoType").Type().In(0)
|
||||
|
||||
start, end := "{", "}"
|
||||
if isRoot {
|
||||
start = rt.Name() + "{"
|
||||
}
|
||||
|
||||
_, isFile := t.(pref.FileDescriptor)
|
||||
rs := records{allowMulti: allowMulti}
|
||||
if t.IsPlaceholder() {
|
||||
if isFile {
|
||||
rs.Append(rv, "Path", "Package", "IsPlaceholder")
|
||||
} else {
|
||||
rs.Append(rv, "FullName", "IsPlaceholder")
|
||||
}
|
||||
} else {
|
||||
switch {
|
||||
case isFile:
|
||||
rs.Append(rv, "Syntax")
|
||||
case isRoot:
|
||||
rs.Append(rv, "Syntax", "FullName")
|
||||
default:
|
||||
rs.Append(rv, "Name")
|
||||
}
|
||||
switch t := t.(type) {
|
||||
case pref.FieldDescriptor:
|
||||
for _, s := range descriptorAccessors[rt] {
|
||||
switch s {
|
||||
case "MapKey":
|
||||
if k := t.MapKey(); k != nil {
|
||||
rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()})
|
||||
}
|
||||
case "MapValue":
|
||||
if v := t.MapValue(); v != nil {
|
||||
switch v.Kind() {
|
||||
case pref.EnumKind:
|
||||
rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())})
|
||||
case pref.MessageKind, pref.GroupKind:
|
||||
rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())})
|
||||
default:
|
||||
rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()})
|
||||
}
|
||||
}
|
||||
case "ContainingOneof":
|
||||
if od := t.ContainingOneof(); od != nil {
|
||||
rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())})
|
||||
}
|
||||
case "ContainingMessage":
|
||||
if t.IsExtension() {
|
||||
rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())})
|
||||
}
|
||||
case "Message":
|
||||
if !t.IsMap() {
|
||||
rs.Append(rv, s)
|
||||
}
|
||||
default:
|
||||
rs.Append(rv, s)
|
||||
}
|
||||
}
|
||||
case pref.OneofDescriptor:
|
||||
var ss []string
|
||||
fs := t.Fields()
|
||||
for i := 0; i < fs.Len(); i++ {
|
||||
ss = append(ss, string(fs.Get(i).Name()))
|
||||
}
|
||||
if len(ss) > 0 {
|
||||
rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"})
|
||||
}
|
||||
default:
|
||||
rs.Append(rv, descriptorAccessors[rt]...)
|
||||
}
|
||||
if rv.MethodByName("GoType").IsValid() {
|
||||
rs.Append(rv, "GoType")
|
||||
}
|
||||
}
|
||||
return start + rs.Join() + end
|
||||
}
|
||||
|
||||
type records struct {
|
||||
recs [][2]string
|
||||
allowMulti bool
|
||||
}
|
||||
|
||||
func (rs *records) Append(v reflect.Value, accessors ...string) {
|
||||
for _, a := range accessors {
|
||||
var rv reflect.Value
|
||||
if m := v.MethodByName(a); m.IsValid() {
|
||||
rv = m.Call(nil)[0]
|
||||
}
|
||||
if v.Kind() == reflect.Struct && !rv.IsValid() {
|
||||
rv = v.FieldByName(a)
|
||||
}
|
||||
if !rv.IsValid() {
|
||||
panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a))
|
||||
}
|
||||
if _, ok := rv.Interface().(pref.Value); ok {
|
||||
rv = rv.MethodByName("Interface").Call(nil)[0]
|
||||
if !rv.IsNil() {
|
||||
rv = rv.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Ignore zero values.
|
||||
var isZero bool
|
||||
switch rv.Kind() {
|
||||
case reflect.Interface, reflect.Slice:
|
||||
isZero = rv.IsNil()
|
||||
case reflect.Bool:
|
||||
isZero = rv.Bool() == false
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
isZero = rv.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
isZero = rv.Uint() == 0
|
||||
case reflect.String:
|
||||
isZero = rv.String() == ""
|
||||
}
|
||||
if n, ok := rv.Interface().(list); ok {
|
||||
isZero = n.Len() == 0
|
||||
}
|
||||
if isZero {
|
||||
continue
|
||||
}
|
||||
|
||||
// Format the value.
|
||||
var s string
|
||||
v := rv.Interface()
|
||||
switch v := v.(type) {
|
||||
case list:
|
||||
s = formatListOpt(v, false, rs.allowMulti)
|
||||
case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor:
|
||||
s = string(v.(pref.Descriptor).Name())
|
||||
case pref.Descriptor:
|
||||
s = string(v.FullName())
|
||||
case string:
|
||||
s = strconv.Quote(v)
|
||||
case []byte:
|
||||
s = fmt.Sprintf("%q", v)
|
||||
default:
|
||||
s = fmt.Sprint(v)
|
||||
}
|
||||
rs.recs = append(rs.recs, [2]string{a, s})
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *records) Join() string {
|
||||
var ss []string
|
||||
|
||||
// In single line mode, simply join all records with commas.
|
||||
if !rs.allowMulti {
|
||||
for _, r := range rs.recs {
|
||||
ss = append(ss, r[0]+formatColon(0)+r[1])
|
||||
}
|
||||
return joinStrings(ss, false)
|
||||
}
|
||||
|
||||
// In allowMulti line mode, align single line records for more readable output.
|
||||
var maxLen int
|
||||
flush := func(i int) {
|
||||
for _, r := range rs.recs[len(ss):i] {
|
||||
ss = append(ss, r[0]+formatColon(maxLen-len(r[0]))+r[1])
|
||||
}
|
||||
maxLen = 0
|
||||
}
|
||||
for i, r := range rs.recs {
|
||||
if isMulti := strings.Contains(r[1], "\n"); isMulti {
|
||||
flush(i)
|
||||
ss = append(ss, r[0]+formatColon(0)+strings.Join(strings.Split(r[1], "\n"), "\n\t"))
|
||||
} else if maxLen < len(r[0]) {
|
||||
maxLen = len(r[0])
|
||||
}
|
||||
}
|
||||
flush(len(rs.recs))
|
||||
return joinStrings(ss, true)
|
||||
}
|
||||
|
||||
func formatColon(padding int) string {
|
||||
// Deliberately introduce instability into the debug output to
|
||||
// discourage users from performing string comparisons.
|
||||
// This provides us flexibility to change the output in the future.
|
||||
if detrand.Bool() {
|
||||
return ":" + strings.Repeat(" ", 1+padding) // use non-breaking spaces (U+00a0)
|
||||
} else {
|
||||
return ":" + strings.Repeat(" ", 1+padding) // use regular spaces (U+0020)
|
||||
}
|
||||
}
|
||||
|
||||
func joinStrings(ss []string, isMulti bool) string {
|
||||
if len(ss) == 0 {
|
||||
return ""
|
||||
}
|
||||
if isMulti {
|
||||
return "\n\t" + strings.Join(ss, "\n\t") + "\n"
|
||||
}
|
||||
return strings.Join(ss, ", ")
|
||||
}
|
||||
29
vendor/google.golang.org/protobuf/internal/descopts/options.go
generated
vendored
Normal file
29
vendor/google.golang.org/protobuf/internal/descopts/options.go
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package descopts contains the nil pointers to concrete descriptor options.
|
||||
//
|
||||
// This package exists as a form of reverse dependency injection so that certain
|
||||
// packages (e.g., internal/filedesc and internal/filetype can avoid a direct
|
||||
// dependency on the descriptor proto package).
|
||||
package descopts
|
||||
|
||||
import pref "google.golang.org/protobuf/reflect/protoreflect"
|
||||
|
||||
// These variables are set by the init function in descriptor.pb.go via logic
|
||||
// in internal/filetype. In other words, so long as the descriptor proto package
|
||||
// is linked in, these variables will be populated.
|
||||
//
|
||||
// Each variable is populated with a nil pointer to the options struct.
|
||||
var (
|
||||
File pref.ProtoMessage
|
||||
Enum pref.ProtoMessage
|
||||
EnumValue pref.ProtoMessage
|
||||
Message pref.ProtoMessage
|
||||
Field pref.ProtoMessage
|
||||
Oneof pref.ProtoMessage
|
||||
ExtensionRange pref.ProtoMessage
|
||||
Service pref.ProtoMessage
|
||||
Method pref.ProtoMessage
|
||||
)
|
||||
69
vendor/google.golang.org/protobuf/internal/detrand/rand.go
generated
vendored
Normal file
69
vendor/google.golang.org/protobuf/internal/detrand/rand.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package detrand provides deterministically random functionality.
|
||||
//
|
||||
// The pseudo-randomness of these functions is seeded by the program binary
|
||||
// itself and guarantees that the output does not change within a program,
|
||||
// while ensuring that the output is unstable across different builds.
|
||||
package detrand
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash/fnv"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Disable disables detrand such that all functions returns the zero value.
|
||||
// This function is not concurrent-safe and must be called during program init.
|
||||
func Disable() {
|
||||
randSeed = 0
|
||||
}
|
||||
|
||||
// Bool returns a deterministically random boolean.
|
||||
func Bool() bool {
|
||||
return randSeed%2 == 1
|
||||
}
|
||||
|
||||
// Intn returns a deterministically random integer between 0 and n-1, inclusive.
|
||||
func Intn(n int) int {
|
||||
if n <= 0 {
|
||||
panic("must be positive")
|
||||
}
|
||||
return int(randSeed % uint64(n))
|
||||
}
|
||||
|
||||
// randSeed is a best-effort at an approximate hash of the Go binary.
|
||||
var randSeed = binaryHash()
|
||||
|
||||
func binaryHash() uint64 {
|
||||
// Open the Go binary.
|
||||
s, err := os.Executable()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
f, err := os.Open(s)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Hash the size and several samples of the Go binary.
|
||||
const numSamples = 8
|
||||
var buf [64]byte
|
||||
h := fnv.New64()
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
binary.LittleEndian.PutUint64(buf[:8], uint64(fi.Size()))
|
||||
h.Write(buf[:8])
|
||||
for i := int64(0); i < numSamples; i++ {
|
||||
if _, err := f.ReadAt(buf[:], i*fi.Size()/numSamples); err != nil {
|
||||
return 0
|
||||
}
|
||||
h.Write(buf[:])
|
||||
}
|
||||
return h.Sum64()
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user