.
m := makeMapFromTestFunc(runTest)
- if err := readJsonFile(path, m.Addr().Interface()); err != nil {
+ if err := readJSONFile(path, m.Addr().Interface()); err != nil {
t.Fatal(err)
}
diff --git a/tests/state_test.go b/tests/state_test.go
index 9ca5f18303..adec4feb2b 100644
--- a/tests/state_test.go
+++ b/tests/state_test.go
@@ -36,14 +36,7 @@ func TestState(t *testing.T) {
st.skipLoad(`^stTransactionTest/zeroSigTransa[^/]*\.json`) // EIP-86 is not supported yet
// Expected failures:
st.fails(`^stRevertTest/RevertPrecompiledTouch\.json/EIP158`, "bug in test")
- st.fails(`^stRevertTest/RevertPrefoundEmptyOOG\.json/EIP158`, "bug in test")
st.fails(`^stRevertTest/RevertPrecompiledTouch\.json/Byzantium`, "bug in test")
- st.fails(`^stRevertTest/RevertPrefoundEmptyOOG\.json/Byzantium`, "bug in test")
- st.fails(`^stRandom2/randomStatetest64[45]\.json/(EIP150|Frontier|Homestead)/.*`, "known bug #15119")
- st.fails(`^stCreateTest/TransactionCollisionToEmpty\.json/EIP158/2`, "known bug ")
- st.fails(`^stCreateTest/TransactionCollisionToEmpty\.json/EIP158/3`, "known bug ")
- st.fails(`^stCreateTest/TransactionCollisionToEmpty\.json/Byzantium/2`, "known bug ")
- st.fails(`^stCreateTest/TransactionCollisionToEmpty\.json/Byzantium/3`, "known bug ")
st.walk(t, stateTestDir, func(t *testing.T, name string, test *StateTest) {
for _, subtest := range test.Subtests() {
diff --git a/tests/state_test_util.go b/tests/state_test_util.go
index 3b761bd771..84581fae18 100644
--- a/tests/state_test_util.go
+++ b/tests/state_test_util.go
@@ -126,8 +126,7 @@ func (t *StateTest) Run(subtest StateSubtest, vmconfig vm.Config) (*state.StateD
return nil, UnsupportedForkError{subtest.Fork}
}
block := t.genesis(config).ToBlock(nil)
- db, _ := ethdb.NewMemDatabase()
- statedb := MakePreState(db, t.json.Pre)
+ statedb := MakePreState(ethdb.NewMemDatabase(), t.json.Pre)
post := t.json.Post[subtest.Fork][subtest.Index]
msg, err := t.json.Tx.toMessage(post)
diff --git a/tests/transaction_test_util.go b/tests/transaction_test_util.go
index 2028d2a278..8c3dac088c 100644
--- a/tests/transaction_test_util.go
+++ b/tests/transaction_test_util.go
@@ -72,9 +72,8 @@ func (tt *TransactionTest) Run(config *params.ChainConfig) error {
if err := rlp.DecodeBytes(tt.json.RLP, tx); err != nil {
if tt.json.Transaction == nil {
return nil
- } else {
- return fmt.Errorf("RLP decoding failed: %v", err)
}
+ return fmt.Errorf("RLP decoding failed: %v", err)
}
// Check sender derivation.
signer := types.MakeSigner(config, new(big.Int).SetUint64(uint64(tt.json.BlockNumber)))
diff --git a/tests/vm_test_util.go b/tests/vm_test_util.go
index b365167a69..cb81c5b94e 100644
--- a/tests/vm_test_util.go
+++ b/tests/vm_test_util.go
@@ -79,8 +79,7 @@ type vmExecMarshaling struct {
}
func (t *VMTest) Run(vmconfig vm.Config) error {
- db, _ := ethdb.NewMemDatabase()
- statedb := MakePreState(db, t.json.Pre)
+ statedb := MakePreState(ethdb.NewMemDatabase(), t.json.Pre)
ret, gasRemaining, err := t.exec(statedb, vmconfig)
if t.json.GasRemaining == nil {
diff --git a/trie/iterator.go b/trie/iterator.go
index 76146c0d64..3bae8e186b 100644
--- a/trie/iterator.go
+++ b/trie/iterator.go
@@ -303,7 +303,7 @@ func (it *nodeIterator) push(state *nodeIteratorState, parentIndex *int, path []
it.path = path
it.stack = append(it.stack, state)
if parentIndex != nil {
- *parentIndex += 1
+ *parentIndex++
}
}
@@ -380,7 +380,7 @@ func (it *differenceIterator) Next(bool) bool {
if !it.b.Next(true) {
return false
}
- it.count += 1
+ it.count++
if it.eof {
// a has reached eof, so we just return all elements from b
@@ -395,7 +395,7 @@ func (it *differenceIterator) Next(bool) bool {
it.eof = true
return true
}
- it.count += 1
+ it.count++
case 1:
// b is before a
return true
@@ -405,12 +405,12 @@ func (it *differenceIterator) Next(bool) bool {
if !it.b.Next(hasHash) {
return false
}
- it.count += 1
+ it.count++
if !it.a.Next(hasHash) {
it.eof = true
return true
}
- it.count += 1
+ it.count++
}
}
}
@@ -504,14 +504,14 @@ func (it *unionIterator) Next(descend bool) bool {
skipped := heap.Pop(it.items).(NodeIterator)
// Skip the whole subtree if the nodes have hashes; otherwise just skip this node
if skipped.Next(skipped.Hash() == common.Hash{}) {
- it.count += 1
+ it.count++
// If there are more elements, push the iterator back on the heap
heap.Push(it.items, skipped)
}
}
if least.Next(descend) {
- it.count += 1
+ it.count++
heap.Push(it.items, least)
}
diff --git a/trie/iterator_test.go b/trie/iterator_test.go
index dce1c78b5d..2a510b1c2d 100644
--- a/trie/iterator_test.go
+++ b/trie/iterator_test.go
@@ -289,7 +289,7 @@ func TestIteratorContinueAfterErrorDisk(t *testing.T) { testIteratorContinueA
func TestIteratorContinueAfterErrorMemonly(t *testing.T) { testIteratorContinueAfterError(t, true) }
func testIteratorContinueAfterError(t *testing.T, memonly bool) {
- diskdb, _ := ethdb.NewMemDatabase()
+ diskdb := ethdb.NewMemDatabase()
triedb := NewDatabase(diskdb)
tr, _ := New(common.Hash{}, triedb)
@@ -376,7 +376,7 @@ func TestIteratorContinueAfterSeekErrorMemonly(t *testing.T) {
func testIteratorContinueAfterSeekError(t *testing.T, memonly bool) {
// Commit test trie to db, then remove the node containing "bars".
- diskdb, _ := ethdb.NewMemDatabase()
+ diskdb := ethdb.NewMemDatabase()
triedb := NewDatabase(diskdb)
ctr, _ := New(common.Hash{}, triedb)
diff --git a/trie/node.go b/trie/node.go
index a7697fc0c6..02815042c6 100644
--- a/trie/node.go
+++ b/trie/node.go
@@ -123,17 +123,17 @@ func decodeNode(hash, buf []byte, cachegen uint16) (node, error) {
}
switch c, _ := rlp.CountValues(elems); c {
case 2:
- n, err := decodeShort(hash, buf, elems, cachegen)
+ n, err := decodeShort(hash, elems, cachegen)
return n, wrapError(err, "short")
case 17:
- n, err := decodeFull(hash, buf, elems, cachegen)
+ n, err := decodeFull(hash, elems, cachegen)
return n, wrapError(err, "full")
default:
return nil, fmt.Errorf("invalid number of list elements: %v", c)
}
}
-func decodeShort(hash, buf, elems []byte, cachegen uint16) (node, error) {
+func decodeShort(hash, elems []byte, cachegen uint16) (node, error) {
kbuf, rest, err := rlp.SplitString(elems)
if err != nil {
return nil, err
@@ -155,7 +155,7 @@ func decodeShort(hash, buf, elems []byte, cachegen uint16) (node, error) {
return &shortNode{key, r, flag}, nil
}
-func decodeFull(hash, buf, elems []byte, cachegen uint16) (*fullNode, error) {
+func decodeFull(hash, elems []byte, cachegen uint16) (*fullNode, error) {
n := &fullNode{flags: nodeFlag{hash: hash, gen: cachegen}}
for i := 0; i < 16; i++ {
cld, rest, err := decodeRef(elems, cachegen)
diff --git a/trie/proof_test.go b/trie/proof_test.go
index fff313d7fd..a3537787cc 100644
--- a/trie/proof_test.go
+++ b/trie/proof_test.go
@@ -36,7 +36,7 @@ func TestProof(t *testing.T) {
trie, vals := randomTrie(500)
root := trie.Hash()
for _, kv := range vals {
- proofs, _ := ethdb.NewMemDatabase()
+ proofs := ethdb.NewMemDatabase()
if trie.Prove(kv.k, 0, proofs) != nil {
t.Fatalf("missing key %x while constructing proof", kv.k)
}
@@ -53,7 +53,7 @@ func TestProof(t *testing.T) {
func TestOneElementProof(t *testing.T) {
trie := new(Trie)
updateString(trie, "k", "v")
- proofs, _ := ethdb.NewMemDatabase()
+ proofs := ethdb.NewMemDatabase()
trie.Prove([]byte("k"), 0, proofs)
if len(proofs.Keys()) != 1 {
t.Error("proof should have one element")
@@ -71,7 +71,7 @@ func TestVerifyBadProof(t *testing.T) {
trie, vals := randomTrie(800)
root := trie.Hash()
for _, kv := range vals {
- proofs, _ := ethdb.NewMemDatabase()
+ proofs := ethdb.NewMemDatabase()
trie.Prove(kv.k, 0, proofs)
if len(proofs.Keys()) == 0 {
t.Fatal("zero length proof")
@@ -109,7 +109,7 @@ func BenchmarkProve(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
kv := vals[keys[i%len(keys)]]
- proofs, _ := ethdb.NewMemDatabase()
+ proofs := ethdb.NewMemDatabase()
if trie.Prove(kv.k, 0, proofs); len(proofs.Keys()) == 0 {
b.Fatalf("zero length proof for %x", kv.k)
}
@@ -123,7 +123,7 @@ func BenchmarkVerifyProof(b *testing.B) {
var proofs []*ethdb.MemDatabase
for k := range vals {
keys = append(keys, k)
- proof, _ := ethdb.NewMemDatabase()
+ proof := ethdb.NewMemDatabase()
trie.Prove([]byte(k), 0, proof)
proofs = append(proofs, proof)
}
diff --git a/trie/secure_trie_test.go b/trie/secure_trie_test.go
index aedf5a1cde..d16d999684 100644
--- a/trie/secure_trie_test.go
+++ b/trie/secure_trie_test.go
@@ -28,18 +28,14 @@ import (
)
func newEmptySecure() *SecureTrie {
- diskdb, _ := ethdb.NewMemDatabase()
- triedb := NewDatabase(diskdb)
-
- trie, _ := NewSecure(common.Hash{}, triedb, 0)
+ trie, _ := NewSecure(common.Hash{}, NewDatabase(ethdb.NewMemDatabase()), 0)
return trie
}
// makeTestSecureTrie creates a large enough secure trie for testing.
func makeTestSecureTrie() (*Database, *SecureTrie, map[string][]byte) {
// Create an empty trie
- diskdb, _ := ethdb.NewMemDatabase()
- triedb := NewDatabase(diskdb)
+ triedb := NewDatabase(ethdb.NewMemDatabase())
trie, _ := NewSecure(common.Hash{}, triedb, 0)
diff --git a/trie/sync.go b/trie/sync.go
index b573a9f732..4ae975d042 100644
--- a/trie/sync.go
+++ b/trie/sync.go
@@ -212,7 +212,7 @@ func (s *TrieSync) Process(results []SyncResult) (bool, int, error) {
}
// Commit flushes the data stored in the internal membatch out to persistent
-// storage, returning th enumber of items written and any occurred error.
+// storage, returning the number of items written and any occurred error.
func (s *TrieSync) Commit(dbw ethdb.Putter) (int, error) {
// Dump the membatch into a database dbw
for i, key := range s.membatch.order {
diff --git a/trie/sync_test.go b/trie/sync_test.go
index 4a720612b6..142a6f5b1a 100644
--- a/trie/sync_test.go
+++ b/trie/sync_test.go
@@ -27,8 +27,7 @@ import (
// makeTestTrie create a sample test trie to test node-wise reconstruction.
func makeTestTrie() (*Database, *Trie, map[string][]byte) {
// Create an empty trie
- diskdb, _ := ethdb.NewMemDatabase()
- triedb := NewDatabase(diskdb)
+ triedb := NewDatabase(ethdb.NewMemDatabase())
trie, _ := New(common.Hash{}, triedb)
// Fill it with some arbitrary data
@@ -89,18 +88,13 @@ func checkTrieConsistency(db *Database, root common.Hash) error {
// Tests that an empty trie is not scheduled for syncing.
func TestEmptyTrieSync(t *testing.T) {
- diskdbA, _ := ethdb.NewMemDatabase()
- triedbA := NewDatabase(diskdbA)
-
- diskdbB, _ := ethdb.NewMemDatabase()
- triedbB := NewDatabase(diskdbB)
-
- emptyA, _ := New(common.Hash{}, triedbA)
- emptyB, _ := New(emptyRoot, triedbB)
+ dbA := NewDatabase(ethdb.NewMemDatabase())
+ dbB := NewDatabase(ethdb.NewMemDatabase())
+ emptyA, _ := New(common.Hash{}, dbA)
+ emptyB, _ := New(emptyRoot, dbB)
for i, trie := range []*Trie{emptyA, emptyB} {
- diskdb, _ := ethdb.NewMemDatabase()
- if req := NewTrieSync(trie.Hash(), diskdb, nil).Missing(1); len(req) != 0 {
+ if req := NewTrieSync(trie.Hash(), ethdb.NewMemDatabase(), nil).Missing(1); len(req) != 0 {
t.Errorf("test %d: content requested for empty trie: %v", i, req)
}
}
@@ -116,7 +110,7 @@ func testIterativeTrieSync(t *testing.T, batch int) {
srcDb, srcTrie, srcData := makeTestTrie()
// Create a destination trie and sync with the scheduler
- diskdb, _ := ethdb.NewMemDatabase()
+ diskdb := ethdb.NewMemDatabase()
triedb := NewDatabase(diskdb)
sched := NewTrieSync(srcTrie.Hash(), diskdb, nil)
@@ -149,7 +143,7 @@ func TestIterativeDelayedTrieSync(t *testing.T) {
srcDb, srcTrie, srcData := makeTestTrie()
// Create a destination trie and sync with the scheduler
- diskdb, _ := ethdb.NewMemDatabase()
+ diskdb := ethdb.NewMemDatabase()
triedb := NewDatabase(diskdb)
sched := NewTrieSync(srcTrie.Hash(), diskdb, nil)
@@ -187,7 +181,7 @@ func testIterativeRandomTrieSync(t *testing.T, batch int) {
srcDb, srcTrie, srcData := makeTestTrie()
// Create a destination trie and sync with the scheduler
- diskdb, _ := ethdb.NewMemDatabase()
+ diskdb := ethdb.NewMemDatabase()
triedb := NewDatabase(diskdb)
sched := NewTrieSync(srcTrie.Hash(), diskdb, nil)
@@ -228,7 +222,7 @@ func TestIterativeRandomDelayedTrieSync(t *testing.T) {
srcDb, srcTrie, srcData := makeTestTrie()
// Create a destination trie and sync with the scheduler
- diskdb, _ := ethdb.NewMemDatabase()
+ diskdb := ethdb.NewMemDatabase()
triedb := NewDatabase(diskdb)
sched := NewTrieSync(srcTrie.Hash(), diskdb, nil)
@@ -275,7 +269,7 @@ func TestDuplicateAvoidanceTrieSync(t *testing.T) {
srcDb, srcTrie, srcData := makeTestTrie()
// Create a destination trie and sync with the scheduler
- diskdb, _ := ethdb.NewMemDatabase()
+ diskdb := ethdb.NewMemDatabase()
triedb := NewDatabase(diskdb)
sched := NewTrieSync(srcTrie.Hash(), diskdb, nil)
@@ -315,7 +309,7 @@ func TestIncompleteTrieSync(t *testing.T) {
srcDb, srcTrie, _ := makeTestTrie()
// Create a destination trie and sync with the scheduler
- diskdb, _ := ethdb.NewMemDatabase()
+ diskdb := ethdb.NewMemDatabase()
triedb := NewDatabase(diskdb)
sched := NewTrieSync(srcTrie.Hash(), diskdb, nil)
diff --git a/trie/trie_test.go b/trie/trie_test.go
index 9972226288..f8e5fd12a1 100644
--- a/trie/trie_test.go
+++ b/trie/trie_test.go
@@ -43,8 +43,7 @@ func init() {
// Used for testing
func newEmpty() *Trie {
- diskdb, _ := ethdb.NewMemDatabase()
- trie, _ := New(common.Hash{}, NewDatabase(diskdb))
+ trie, _ := New(common.Hash{}, NewDatabase(ethdb.NewMemDatabase()))
return trie
}
@@ -68,8 +67,7 @@ func TestNull(t *testing.T) {
}
func TestMissingRoot(t *testing.T) {
- diskdb, _ := ethdb.NewMemDatabase()
- trie, err := New(common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), NewDatabase(diskdb))
+ trie, err := New(common.HexToHash("0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33"), NewDatabase(ethdb.NewMemDatabase()))
if trie != nil {
t.Error("New returned non-nil trie for invalid root")
}
@@ -82,7 +80,7 @@ func TestMissingNodeDisk(t *testing.T) { testMissingNode(t, false) }
func TestMissingNodeMemonly(t *testing.T) { testMissingNode(t, true) }
func testMissingNode(t *testing.T, memonly bool) {
- diskdb, _ := ethdb.NewMemDatabase()
+ diskdb := ethdb.NewMemDatabase()
triedb := NewDatabase(diskdb)
trie, _ := New(common.Hash{}, triedb)
@@ -413,8 +411,7 @@ func (randTest) Generate(r *rand.Rand, size int) reflect.Value {
}
func runRandTest(rt randTest) bool {
- diskdb, _ := ethdb.NewMemDatabase()
- triedb := NewDatabase(diskdb)
+ triedb := NewDatabase(ethdb.NewMemDatabase())
tr, _ := New(common.Hash{}, triedb)
values := make(map[string]string) // tracks content of the trie
diff --git a/vendor/github.com/elastic/gosigar/CHANGELOG.md b/vendor/github.com/elastic/gosigar/CHANGELOG.md
index 12695e10ef..45262e7b8d 100644
--- a/vendor/github.com/elastic/gosigar/CHANGELOG.md
+++ b/vendor/github.com/elastic/gosigar/CHANGELOG.md
@@ -8,10 +8,21 @@ This project adheres to [Semantic Versioning](http://semver.org/).
### Fixed
+- Added missing runtime import for FreeBSD. #104
+
### Changed
### Deprecated
+## [0.9.0]
+
+### Added
+- Added support for huge TLB pages on Linux #97
+- Added support for big endian platform #100
+
+### Fixed
+- Add missing method for OpenBSD #99
+
## [0.8.0]
### Added
diff --git a/vendor/github.com/elastic/gosigar/README.md b/vendor/github.com/elastic/gosigar/README.md
index 2482620a83..ecdfc1c3c5 100644
--- a/vendor/github.com/elastic/gosigar/README.md
+++ b/vendor/github.com/elastic/gosigar/README.md
@@ -26,6 +26,7 @@ The features vary by operating system.
| FDUsage | X | | | | X |
| FileSystemList | X | X | X | X | X |
| FileSystemUsage | X | X | X | X | X |
+| HugeTLBPages | X | | | | |
| LoadAverage | X | X | | X | X |
| Mem | X | X | X | X | X |
| ProcArgs | X | X | X | | X |
diff --git a/vendor/github.com/elastic/gosigar/concrete_sigar.go b/vendor/github.com/elastic/gosigar/concrete_sigar.go
index 685aa6dedd..e3ee80a980 100644
--- a/vendor/github.com/elastic/gosigar/concrete_sigar.go
+++ b/vendor/github.com/elastic/gosigar/concrete_sigar.go
@@ -62,6 +62,12 @@ func (c *ConcreteSigar) GetSwap() (Swap, error) {
return s, err
}
+func (c *ConcreteSigar) GetHugeTLBPages() (HugeTLBPages, error) {
+ p := HugeTLBPages{}
+ err := p.Get()
+ return p, err
+}
+
func (c *ConcreteSigar) GetFileSystemUsage(path string) (FileSystemUsage, error) {
f := FileSystemUsage{}
err := f.Get(path)
diff --git a/vendor/github.com/elastic/gosigar/sigar_darwin.go b/vendor/github.com/elastic/gosigar/sigar_darwin.go
index f989f51608..a90b998c2e 100644
--- a/vendor/github.com/elastic/gosigar/sigar_darwin.go
+++ b/vendor/github.com/elastic/gosigar/sigar_darwin.go
@@ -91,6 +91,10 @@ func (self *Swap) Get() error {
return nil
}
+func (self *HugeTLBPages) Get() error {
+ return ErrNotImplemented{runtime.GOOS}
+}
+
func (self *Cpu) Get() error {
var count C.mach_msg_type_number_t = C.HOST_CPU_LOAD_INFO_COUNT
var cpuload C.host_cpu_load_info_data_t
diff --git a/vendor/github.com/elastic/gosigar/sigar_freebsd.go b/vendor/github.com/elastic/gosigar/sigar_freebsd.go
index 602b4a0aad..9b2af639b6 100644
--- a/vendor/github.com/elastic/gosigar/sigar_freebsd.go
+++ b/vendor/github.com/elastic/gosigar/sigar_freebsd.go
@@ -4,6 +4,7 @@ package gosigar
import (
"io/ioutil"
+ "runtime"
"strconv"
"strings"
"unsafe"
@@ -97,6 +98,10 @@ func (self *ProcFDUsage) Get(pid int) error {
return nil
}
+func (self *HugeTLBPages) Get() error {
+ return ErrNotImplemented{runtime.GOOS}
+}
+
func parseCpuStat(self *Cpu, line string) error {
fields := strings.Fields(line)
diff --git a/vendor/github.com/elastic/gosigar/sigar_interface.go b/vendor/github.com/elastic/gosigar/sigar_interface.go
index a956af604a..df79ae08d2 100644
--- a/vendor/github.com/elastic/gosigar/sigar_interface.go
+++ b/vendor/github.com/elastic/gosigar/sigar_interface.go
@@ -26,6 +26,7 @@ type Sigar interface {
GetLoadAverage() (LoadAverage, error)
GetMem() (Mem, error)
GetSwap() (Swap, error)
+ GetHugeTLBPages(HugeTLBPages, error)
GetFileSystemUsage(string) (FileSystemUsage, error)
GetFDUsage() (FDUsage, error)
GetRusage(who int) (Rusage, error)
@@ -82,6 +83,15 @@ type Swap struct {
Free uint64
}
+type HugeTLBPages struct {
+ Total uint64
+ Free uint64
+ Reserved uint64
+ Surplus uint64
+ DefaultSize uint64
+ TotalAllocatedSize uint64
+}
+
type CpuList struct {
List []Cpu
}
diff --git a/vendor/github.com/elastic/gosigar/sigar_linux.go b/vendor/github.com/elastic/gosigar/sigar_linux.go
index cb1d3525b5..09f2e30b2f 100644
--- a/vendor/github.com/elastic/gosigar/sigar_linux.go
+++ b/vendor/github.com/elastic/gosigar/sigar_linux.go
@@ -45,6 +45,30 @@ func (self *FDUsage) Get() error {
})
}
+func (self *HugeTLBPages) Get() error {
+ table, err := parseMeminfo()
+ if err != nil {
+ return err
+ }
+
+ self.Total, _ = table["HugePages_Total"]
+ self.Free, _ = table["HugePages_Free"]
+ self.Reserved, _ = table["HugePages_Rsvd"]
+ self.Surplus, _ = table["HugePages_Surp"]
+ self.DefaultSize, _ = table["Hugepagesize"]
+
+ if totalSize, found := table["Hugetlb"]; found {
+ self.TotalAllocatedSize = totalSize
+ } else {
+ // If Hugetlb is not present, or huge pages of different sizes
+ // are used, this figure can be unaccurate.
+ // TODO (jsoriano): Extract information from /sys/kernel/mm/hugepages too
+ self.TotalAllocatedSize = (self.Total - self.Free + self.Reserved) * self.DefaultSize
+ }
+
+ return nil
+}
+
func (self *ProcFDUsage) Get(pid int) error {
err := readFile(procFileName(pid, "limits"), func(line string) bool {
if strings.HasPrefix(line, "Max open files") {
diff --git a/vendor/github.com/elastic/gosigar/sigar_linux_common.go b/vendor/github.com/elastic/gosigar/sigar_linux_common.go
index 8e5e7856f6..7ca6497622 100644
--- a/vendor/github.com/elastic/gosigar/sigar_linux_common.go
+++ b/vendor/github.com/elastic/gosigar/sigar_linux_common.go
@@ -379,12 +379,16 @@ func parseMeminfo() (map[string]uint64, error) {
return true // skip on errors
}
- num := strings.TrimLeft(fields[1], " ")
- val, err := strtoull(strings.Fields(num)[0])
+ valueUnit := strings.Fields(fields[1])
+ value, err := strtoull(valueUnit[0])
if err != nil {
return true // skip on errors
}
- table[fields[0]] = val * 1024 //in bytes
+
+ if len(valueUnit) > 1 && valueUnit[1] == "kB" {
+ value *= 1024
+ }
+ table[fields[0]] = value
return true
})
@@ -420,8 +424,18 @@ func procFileName(pid int, name string) string {
return Procd + "/" + strconv.Itoa(pid) + "/" + name
}
-func readProcFile(pid int, name string) ([]byte, error) {
+func readProcFile(pid int, name string) (content []byte, err error) {
path := procFileName(pid, name)
+
+ // Panics have been reported when reading proc files, let's recover and
+ // report the path if this happens
+ // See https://github.com/elastic/beats/issues/6692
+ defer func() {
+ if r := recover(); r != nil {
+ content = nil
+ err = fmt.Errorf("recovered panic when reading proc file '%s': %v", path, r)
+ }
+ }()
contents, err := ioutil.ReadFile(path)
if err != nil {
diff --git a/vendor/github.com/elastic/gosigar/sigar_openbsd.go b/vendor/github.com/elastic/gosigar/sigar_openbsd.go
index 4f1383a6ba..e4371b8b68 100644
--- a/vendor/github.com/elastic/gosigar/sigar_openbsd.go
+++ b/vendor/github.com/elastic/gosigar/sigar_openbsd.go
@@ -294,6 +294,10 @@ func (self *Swap) Get() error {
return nil
}
+func (self *HugeTLBPages) Get() error {
+ return ErrNotImplemented{runtime.GOOS}
+}
+
func (self *Cpu) Get() error {
load := [C.CPUSTATES]C.long{C.CP_USER, C.CP_NICE, C.CP_SYS, C.CP_INTR, C.CP_IDLE}
@@ -381,6 +385,10 @@ func (self *ProcFDUsage) Get(pid int) error {
return ErrNotImplemented{runtime.GOOS}
}
+func (self *Rusage) Get(pid int) error {
+ return ErrNotImplemented{runtime.GOOS}
+}
+
func fillCpu(cpu *Cpu, load [C.CPUSTATES]C.long) {
cpu.User = uint64(load[0])
cpu.Nice = uint64(load[1])
diff --git a/vendor/github.com/elastic/gosigar/sigar_stub.go b/vendor/github.com/elastic/gosigar/sigar_stub.go
index 0b858f1c0c..de9565aec4 100644
--- a/vendor/github.com/elastic/gosigar/sigar_stub.go
+++ b/vendor/github.com/elastic/gosigar/sigar_stub.go
@@ -22,6 +22,10 @@ func (s *Swap) Get() error {
return ErrNotImplemented{runtime.GOOS}
}
+func (s *HugeTLBPages) Get() error {
+ return ErrNotImplemented{runtime.GOOS}
+}
+
func (f *FDUsage) Get() error {
return ErrNotImplemented{runtime.GOOS}
}
diff --git a/vendor/github.com/elastic/gosigar/sigar_windows.go b/vendor/github.com/elastic/gosigar/sigar_windows.go
index 0cdf928d16..c2b54d8d7f 100644
--- a/vendor/github.com/elastic/gosigar/sigar_windows.go
+++ b/vendor/github.com/elastic/gosigar/sigar_windows.go
@@ -120,6 +120,10 @@ func (self *Swap) Get() error {
return nil
}
+func (self *HugeTLBPages) Get() error {
+ return ErrNotImplemented{runtime.GOOS}
+}
+
func (self *Cpu) Get() error {
idle, kernel, user, err := windows.GetSystemTimes()
if err != nil {
diff --git a/vendor/github.com/fjl/memsize/LICENSE b/vendor/github.com/fjl/memsize/LICENSE
new file mode 100644
index 0000000000..8b80456419
--- /dev/null
+++ b/vendor/github.com/fjl/memsize/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018 Felix Lange
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/fjl/memsize/bitmap.go b/vendor/github.com/fjl/memsize/bitmap.go
new file mode 100644
index 0000000000..47799ea8d3
--- /dev/null
+++ b/vendor/github.com/fjl/memsize/bitmap.go
@@ -0,0 +1,119 @@
+package memsize
+
+import (
+ "math/bits"
+)
+
+const (
+ uintptrBits = 32 << (uint64(^uintptr(0)) >> 63)
+ uintptrBytes = uintptrBits / 8
+ bmBlockRange = 1 * 1024 * 1024 // bytes covered by bmBlock
+ bmBlockWords = bmBlockRange / uintptrBits
+)
+
+// bitmap is a sparse bitmap.
+type bitmap struct {
+ blocks map[uintptr]*bmBlock
+}
+
+func newBitmap() *bitmap {
+ return &bitmap{make(map[uintptr]*bmBlock)}
+}
+
+// markRange sets n consecutive bits starting at addr.
+func (b *bitmap) markRange(addr, n uintptr) {
+ for end := addr + n; addr < end; {
+ block, baddr := b.block(addr)
+ for i := baddr; i < bmBlockRange && addr < end; i++ {
+ block.mark(i)
+ addr++
+ }
+ }
+}
+
+// isMarked returns the value of the bit at the given address.
+func (b *bitmap) isMarked(addr uintptr) bool {
+ block, baddr := b.block(addr)
+ return block.isMarked(baddr)
+}
+
+// countRange returns the number of set bits in the range (addr,addr+n).
+func (b *bitmap) countRange(addr, n uintptr) uintptr {
+ c := uintptr(0)
+ for end := addr + n; addr < end; {
+ block, baddr := b.block(addr)
+ bend := uintptr(bmBlockRange - 1)
+ if baddr+(end-addr) < bmBlockRange {
+ bend = baddr + (end - addr)
+ }
+ c += uintptr(block.count(baddr, bend))
+ // Move addr to next block.
+ addr += bmBlockRange - baddr
+ }
+ return c
+}
+
+// block finds the block corresponding to the given memory address.
+// It also returns the block's starting address.
+func (b *bitmap) block(addr uintptr) (*bmBlock, uintptr) {
+ index := addr / bmBlockRange
+ block := b.blocks[index]
+ if block == nil {
+ block = new(bmBlock)
+ b.blocks[index] = block
+ }
+ return block, addr % bmBlockRange
+}
+
+// size returns the sum of the byte sizes of all blocks.
+func (b *bitmap) size() uintptr {
+ return uintptr(len(b.blocks)) * bmBlockWords * uintptrBytes
+}
+
+// utilization returns the mean percentage of one bits across all blocks.
+func (b *bitmap) utilization() float32 {
+ var avg float32
+ for _, block := range b.blocks {
+ avg += float32(block.count(0, bmBlockRange-1)) / float32(bmBlockRange)
+ }
+ return avg / float32(len(b.blocks))
+}
+
+// bmBlock is a bitmap block.
+type bmBlock [bmBlockWords]uintptr
+
+// mark sets the i'th bit to one.
+func (b *bmBlock) mark(i uintptr) {
+ b[i/uintptrBits] |= 1 << (i % uintptrBits)
+}
+
+// isMarked returns the value of the i'th bit.
+func (b *bmBlock) isMarked(i uintptr) bool {
+ return (b[i/uintptrBits] & (1 << (i % uintptrBits))) != 0
+}
+
+// count returns the number of set bits in the range (start,end).
+func (b *bmBlock) count(start, end uintptr) (count int) {
+ br := b[start/uintptrBits : end/uintptrBits+1]
+ for i, w := range br {
+ if i == 0 {
+ w &= blockmask(start)
+ }
+ if i == len(br)-1 {
+ w &^= blockmask(end)
+ }
+ count += onesCountPtr(w)
+ }
+ return count
+}
+
+func blockmask(x uintptr) uintptr {
+ return ^uintptr(0) << (x % uintptrBits)
+}
+
+func onesCountPtr(x uintptr) int {
+ if uintptrBits == 64 {
+ return bits.OnesCount64(uint64(x))
+ }
+ return bits.OnesCount32(uint32(x))
+}
diff --git a/vendor/github.com/fjl/memsize/doc.go b/vendor/github.com/fjl/memsize/doc.go
new file mode 100644
index 0000000000..640cfba5eb
--- /dev/null
+++ b/vendor/github.com/fjl/memsize/doc.go
@@ -0,0 +1,16 @@
+/*
+Package memsize computes the size of your object graph.
+
+So you made a spiffy algorithm and it works really well, but geez it's using
+way too much memory. Where did it all go? memsize to the rescue!
+
+To get started, find a value that references all your objects and scan it.
+This traverses the graph, counting sizes per type.
+
+ sizes := memsize.Scan(myValue)
+ fmt.Println(sizes.Total)
+
+memsize can handle cycles just fine and tracks both private and public struct fields.
+Unfortunately function closures cannot be inspected in any way.
+*/
+package memsize
diff --git a/vendor/github.com/fjl/memsize/memsize.go b/vendor/github.com/fjl/memsize/memsize.go
new file mode 100644
index 0000000000..2664e87c46
--- /dev/null
+++ b/vendor/github.com/fjl/memsize/memsize.go
@@ -0,0 +1,243 @@
+package memsize
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "sort"
+ "strings"
+ "text/tabwriter"
+ "unsafe"
+)
+
+// Scan traverses all objects reachable from v and counts how much memory
+// is used per type. The value must be a non-nil pointer to any value.
+func Scan(v interface{}) Sizes {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ panic("value to scan must be non-nil pointer")
+ }
+
+ stopTheWorld("memsize scan")
+ defer startTheWorld()
+
+ ctx := newContext()
+ ctx.scan(invalidAddr, rv, false)
+ ctx.s.BitmapSize = ctx.seen.size()
+ ctx.s.BitmapUtilization = ctx.seen.utilization()
+ return *ctx.s
+}
+
+// Sizes is the result of a scan.
+type Sizes struct {
+ Total uintptr
+ ByType map[reflect.Type]*TypeSize
+ // Internal stats (for debugging)
+ BitmapSize uintptr
+ BitmapUtilization float32
+}
+
+type TypeSize struct {
+ Total uintptr
+ Count uintptr
+}
+
+func newSizes() *Sizes {
+ return &Sizes{ByType: make(map[reflect.Type]*TypeSize)}
+}
+
+// Report returns a human-readable report.
+func (s Sizes) Report() string {
+ type typLine struct {
+ name string
+ count uintptr
+ total uintptr
+ }
+ tab := []typLine{{"ALL", 0, s.Total}}
+ for _, typ := range s.ByType {
+ tab[0].count += typ.Count
+ }
+ maxname := 0
+ for typ, s := range s.ByType {
+ line := typLine{typ.String(), s.Count, s.Total}
+ tab = append(tab, line)
+ if len(line.name) > maxname {
+ maxname = len(line.name)
+ }
+ }
+ sort.Slice(tab, func(i, j int) bool { return tab[i].total > tab[j].total })
+
+ buf := new(bytes.Buffer)
+ w := tabwriter.NewWriter(buf, 0, 0, 0, ' ', tabwriter.AlignRight)
+ for _, line := range tab {
+ namespace := strings.Repeat(" ", maxname-len(line.name))
+ fmt.Fprintf(w, "%s%s\t %v\t %s\t\n", line.name, namespace, line.count, HumanSize(line.total))
+ }
+ w.Flush()
+ return buf.String()
+}
+
+// addValue is called during scan and adds the memory of given object.
+func (s *Sizes) addValue(v reflect.Value, size uintptr) {
+ s.Total += size
+ rs := s.ByType[v.Type()]
+ if rs == nil {
+ rs = new(TypeSize)
+ s.ByType[v.Type()] = rs
+ }
+ rs.Total += size
+ rs.Count++
+}
+
+type context struct {
+ // We track previously scanned objects to prevent infinite loops
+ // when scanning cycles and to prevent counting objects more than once.
+ seen *bitmap
+ tc typCache
+ s *Sizes
+}
+
+func newContext() *context {
+ return &context{seen: newBitmap(), tc: make(typCache), s: newSizes()}
+}
+
+// scan walks all objects below v, determining their size. All scan* functions return the
+// amount of 'extra' memory (e.g. slice data) that is referenced by the object.
+func (c *context) scan(addr address, v reflect.Value, add bool) (extraSize uintptr) {
+ size := v.Type().Size()
+ var marked uintptr
+ if addr.valid() {
+ marked = c.seen.countRange(uintptr(addr), size)
+ if marked == size {
+ return 0 // Skip if we have already seen the whole object.
+ }
+ c.seen.markRange(uintptr(addr), size)
+ }
+ // fmt.Printf("%v: %v ⮑ (marked %d)\n", addr, v.Type(), marked)
+ if c.tc.needScan(v.Type()) {
+ extraSize = c.scanContent(addr, v)
+ }
+ // fmt.Printf("%v: %v %d (add %v, size %d, marked %d, extra %d)\n", addr, v.Type(), size+extraSize, add, v.Type().Size(), marked, extraSize)
+ if add {
+ size -= marked
+ size += extraSize
+ c.s.addValue(v, size)
+ }
+ return extraSize
+}
+
+func (c *context) scanContent(addr address, v reflect.Value) uintptr {
+ switch v.Kind() {
+ case reflect.Array:
+ return c.scanArray(addr, v)
+ case reflect.Chan:
+ return c.scanChan(v)
+ case reflect.Func:
+ // can't do anything here
+ return 0
+ case reflect.Interface:
+ return c.scanInterface(v)
+ case reflect.Map:
+ return c.scanMap(v)
+ case reflect.Ptr:
+ if !v.IsNil() {
+ c.scan(address(v.Pointer()), v.Elem(), true)
+ }
+ return 0
+ case reflect.Slice:
+ return c.scanSlice(v)
+ case reflect.String:
+ return uintptr(v.Len())
+ case reflect.Struct:
+ return c.scanStruct(addr, v)
+ default:
+ unhandledKind(v.Kind())
+ return 0
+ }
+}
+
+func (c *context) scanChan(v reflect.Value) uintptr {
+ etyp := v.Type().Elem()
+ extra := uintptr(0)
+ if c.tc.needScan(etyp) {
+ // Scan the channel buffer. This is unsafe but doesn't race because
+ // the world is stopped during scan.
+ hchan := unsafe.Pointer(v.Pointer())
+ for i := uint(0); i < uint(v.Cap()); i++ {
+ addr := chanbuf(hchan, i)
+ elem := reflect.NewAt(etyp, addr).Elem()
+ extra += c.scanContent(address(addr), elem)
+ }
+ }
+ return uintptr(v.Cap())*etyp.Size() + extra
+}
+
+func (c *context) scanStruct(base address, v reflect.Value) uintptr {
+ extra := uintptr(0)
+ for i := 0; i < v.NumField(); i++ {
+ f := v.Type().Field(i)
+ if c.tc.needScan(f.Type) {
+ addr := base.addOffset(f.Offset)
+ extra += c.scanContent(addr, v.Field(i))
+ }
+ }
+ return extra
+}
+
+func (c *context) scanArray(addr address, v reflect.Value) uintptr {
+ esize := v.Type().Elem().Size()
+ extra := uintptr(0)
+ for i := 0; i < v.Len(); i++ {
+ extra += c.scanContent(addr, v.Index(i))
+ addr = addr.addOffset(esize)
+ }
+ return extra
+}
+
+func (c *context) scanSlice(v reflect.Value) uintptr {
+ slice := v.Slice(0, v.Cap())
+ esize := slice.Type().Elem().Size()
+ base := slice.Pointer()
+ // Add size of the unscanned portion of the backing array to extra.
+ blen := uintptr(slice.Len()) * esize
+ marked := c.seen.countRange(base, blen)
+ extra := blen - marked
+ c.seen.markRange(uintptr(base), blen)
+ if c.tc.needScan(slice.Type().Elem()) {
+ // Elements may contain pointers, scan them individually.
+ addr := address(base)
+ for i := 0; i < slice.Len(); i++ {
+ extra += c.scanContent(addr, slice.Index(i))
+ addr = addr.addOffset(esize)
+ }
+ }
+ return extra
+}
+
+func (c *context) scanMap(v reflect.Value) uintptr {
+ var (
+ typ = v.Type()
+ len = uintptr(v.Len())
+ extra = uintptr(0)
+ )
+ if c.tc.needScan(typ.Key()) || c.tc.needScan(typ.Elem()) {
+ for _, k := range v.MapKeys() {
+ extra += c.scan(invalidAddr, k, false)
+ extra += c.scan(invalidAddr, v.MapIndex(k), false)
+ }
+ }
+ return len*typ.Key().Size() + len*typ.Elem().Size() + extra
+}
+
+func (c *context) scanInterface(v reflect.Value) uintptr {
+ elem := v.Elem()
+ if !elem.IsValid() {
+ return 0 // nil interface
+ }
+ c.scan(invalidAddr, elem, false)
+ if !c.tc.isPointer(elem.Type()) {
+ // Account for non-pointer size of the value.
+ return elem.Type().Size()
+ }
+ return 0
+}
diff --git a/vendor/github.com/fjl/memsize/memsizeui/template.go b/vendor/github.com/fjl/memsize/memsizeui/template.go
new file mode 100644
index 0000000000..b60fe6ba54
--- /dev/null
+++ b/vendor/github.com/fjl/memsize/memsizeui/template.go
@@ -0,0 +1,106 @@
+package memsizeui
+
+import (
+ "html/template"
+ "strconv"
+ "sync"
+
+ "github.com/fjl/memsize"
+)
+
+var (
+ base *template.Template // the "base" template
+ baseInitOnce sync.Once
+)
+
+func baseInit() {
+ base = template.Must(template.New("base").Parse(`
+
+
+
+ memsize
+
+
+
+ {{template "content" .}}
+
+`))
+
+ base.Funcs(template.FuncMap{
+ "quote": strconv.Quote,
+ "humansize": memsize.HumanSize,
+ })
+
+ template.Must(base.New("rootbuttons").Parse(`
+Overview
+{{- range $root := .Roots -}}
+
+{{- end -}}`))
+}
+
+func contentTemplate(source string) *template.Template {
+ baseInitOnce.Do(baseInit)
+ t := template.Must(base.Clone())
+ template.Must(t.New("content").Parse(source))
+ return t
+}
+
+var rootTemplate = contentTemplate(`
+Memsize
+{{template "rootbuttons" .}}
+
+Reports
+
+`)
+
+var notFoundTemplate = contentTemplate(`
+{{.Data}}
+{{template "rootbuttons" .}}
+`)
+
+var reportTemplate = contentTemplate(`
+{{- $report := .Data -}}
+Memsize Report {{$report.ID}}
+
+
+Root: {{quote $report.RootName}}
+Date: {{$report.Date}}
+Duration: {{$report.Duration}}
+Bitmap Size: {{$report.Sizes.BitmapSize | humansize}}
+Bitmap Utilization: {{$report.Sizes.BitmapUtilization}}
+
+
+
+{{$report.Sizes.Report}}
+
+`)
diff --git a/vendor/github.com/fjl/memsize/memsizeui/ui.go b/vendor/github.com/fjl/memsize/memsizeui/ui.go
new file mode 100644
index 0000000000..c48fc53f7f
--- /dev/null
+++ b/vendor/github.com/fjl/memsize/memsizeui/ui.go
@@ -0,0 +1,153 @@
+package memsizeui
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "net/http"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/fjl/memsize"
+)
+
+type Handler struct {
+ init sync.Once
+ mux http.ServeMux
+ mu sync.Mutex
+ reports map[int]Report
+ roots map[string]interface{}
+ reportID int
+}
+
+type Report struct {
+ ID int
+ Date time.Time
+ Duration time.Duration
+ RootName string
+ Sizes memsize.Sizes
+}
+
+type templateInfo struct {
+ Roots []string
+ Reports map[int]Report
+ PathDepth int
+ Data interface{}
+}
+
+func (ti *templateInfo) Link(path ...string) string {
+ prefix := strings.Repeat("../", ti.PathDepth)
+ return prefix + strings.Join(path, "")
+}
+
+func (h *Handler) Add(name string, v interface{}) {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ panic("root must be non-nil pointer")
+ }
+ h.mu.Lock()
+ if h.roots == nil {
+ h.roots = make(map[string]interface{})
+ }
+ h.roots[name] = v
+ h.mu.Unlock()
+}
+
+func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ h.init.Do(func() {
+ h.reports = make(map[int]Report)
+ h.mux.HandleFunc("/", h.handleRoot)
+ h.mux.HandleFunc("/scan", h.handleScan)
+ h.mux.HandleFunc("/report/", h.handleReport)
+ })
+ h.mux.ServeHTTP(w, r)
+}
+
+func (h *Handler) templateInfo(r *http.Request, data interface{}) *templateInfo {
+ h.mu.Lock()
+ roots := make([]string, 0, len(h.roots))
+ for name := range h.roots {
+ roots = append(roots, name)
+ }
+ h.mu.Unlock()
+ sort.Strings(roots)
+
+ return &templateInfo{
+ Roots: roots,
+ Reports: h.reports,
+ PathDepth: strings.Count(r.URL.Path, "/") - 1,
+ Data: data,
+ }
+}
+
+func (h *Handler) handleRoot(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/" {
+ http.NotFound(w, r)
+ return
+ }
+ serveHTML(w, rootTemplate, http.StatusOK, h.templateInfo(r, nil))
+}
+
+func (h *Handler) handleScan(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodPost {
+ http.Error(w, "invalid HTTP method, want POST", http.StatusMethodNotAllowed)
+ return
+ }
+ ti := h.templateInfo(r, "Unknown root")
+ id, ok := h.scan(r.URL.Query().Get("root"))
+ if !ok {
+ serveHTML(w, notFoundTemplate, http.StatusNotFound, ti)
+ return
+ }
+ w.Header().Add("Location", ti.Link(fmt.Sprintf("report/%d", id)))
+ w.WriteHeader(http.StatusSeeOther)
+}
+
+func (h *Handler) handleReport(w http.ResponseWriter, r *http.Request) {
+ var id int
+ fmt.Sscan(strings.TrimPrefix(r.URL.Path, "/report/"), &id)
+ h.mu.Lock()
+ report, ok := h.reports[id]
+ h.mu.Unlock()
+
+ if !ok {
+ serveHTML(w, notFoundTemplate, http.StatusNotFound, h.templateInfo(r, "Report not found"))
+ } else {
+ serveHTML(w, reportTemplate, http.StatusOK, h.templateInfo(r, report))
+ }
+}
+
+func (h *Handler) scan(root string) (int, bool) {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ val, ok := h.roots[root]
+ if !ok {
+ return 0, false
+ }
+ id := h.reportID
+ start := time.Now()
+ sizes := memsize.Scan(val)
+ h.reports[id] = Report{
+ ID: id,
+ RootName: root,
+ Date: start.Truncate(1 * time.Second),
+ Duration: time.Since(start),
+ Sizes: sizes,
+ }
+ h.reportID++
+ return id, true
+}
+
+func serveHTML(w http.ResponseWriter, tpl *template.Template, status int, ti *templateInfo) {
+ w.Header().Set("content-type", "text/html")
+ var buf bytes.Buffer
+ if err := tpl.Execute(&buf, ti); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ buf.WriteTo(w)
+}
diff --git a/vendor/github.com/fjl/memsize/runtimefunc.go b/vendor/github.com/fjl/memsize/runtimefunc.go
new file mode 100644
index 0000000000..912a3e768d
--- /dev/null
+++ b/vendor/github.com/fjl/memsize/runtimefunc.go
@@ -0,0 +1,14 @@
+package memsize
+
+import "unsafe"
+
+var _ = unsafe.Pointer(nil)
+
+//go:linkname stopTheWorld runtime.stopTheWorld
+func stopTheWorld(reason string)
+
+//go:linkname startTheWorld runtime.startTheWorld
+func startTheWorld()
+
+//go:linkname chanbuf runtime.chanbuf
+func chanbuf(ch unsafe.Pointer, i uint) unsafe.Pointer
diff --git a/vendor/github.com/fjl/memsize/runtimefunc.s b/vendor/github.com/fjl/memsize/runtimefunc.s
new file mode 100644
index 0000000000..a091e2fa72
--- /dev/null
+++ b/vendor/github.com/fjl/memsize/runtimefunc.s
@@ -0,0 +1 @@
+// This file is required to make stub function declarations work.
diff --git a/vendor/github.com/fjl/memsize/type.go b/vendor/github.com/fjl/memsize/type.go
new file mode 100644
index 0000000000..5d6f59e9ff
--- /dev/null
+++ b/vendor/github.com/fjl/memsize/type.go
@@ -0,0 +1,119 @@
+package memsize
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// address is a memory location.
+//
+// Code dealing with uintptr is oblivious to the zero address.
+// Code dealing with address is not: it treats the zero address
+// as invalid. Offsetting an invalid address doesn't do anything.
+//
+// This distinction is useful because there are objects that we can't
+// get the pointer to.
+type address uintptr
+
+const invalidAddr = address(0)
+
+func (a address) valid() bool {
+ return a != 0
+}
+
+func (a address) addOffset(off uintptr) address {
+ if !a.valid() {
+ return invalidAddr
+ }
+ return a + address(off)
+}
+
+func (a address) String() string {
+ if uintptrBits == 32 {
+ return fmt.Sprintf("%#0.8x", uintptr(a))
+ }
+ return fmt.Sprintf("%#0.16x", uintptr(a))
+}
+
+type typCache map[reflect.Type]typInfo
+
+type typInfo struct {
+ isPointer bool
+ needScan bool
+}
+
+// isPointer returns true for pointer-ish values. The notion of
+// pointer includes everything but plain values, i.e. slices, maps
+// channels, interfaces are 'pointer', too.
+func (tc *typCache) isPointer(typ reflect.Type) bool {
+ return tc.info(typ).isPointer
+}
+
+// needScan reports whether a value of the type needs to be scanned
+// recursively because it may contain pointers.
+func (tc *typCache) needScan(typ reflect.Type) bool {
+ return tc.info(typ).needScan
+}
+
+func (tc *typCache) info(typ reflect.Type) typInfo {
+ info, found := (*tc)[typ]
+ switch {
+ case found:
+ return info
+ case isPointer(typ):
+ info = typInfo{true, true}
+ default:
+ info = typInfo{false, tc.checkNeedScan(typ)}
+ }
+ (*tc)[typ] = info
+ return info
+}
+
+func (tc *typCache) checkNeedScan(typ reflect.Type) bool {
+ switch k := typ.Kind(); k {
+ case reflect.Struct:
+ // Structs don't need scan if none of their fields need it.
+ for i := 0; i < typ.NumField(); i++ {
+ if tc.needScan(typ.Field(i).Type) {
+ return true
+ }
+ }
+ case reflect.Array:
+ // Arrays don't need scan if their element type doesn't.
+ return tc.needScan(typ.Elem())
+ }
+ return false
+}
+
+func isPointer(typ reflect.Type) bool {
+ k := typ.Kind()
+ switch {
+ case k <= reflect.Complex128:
+ return false
+ case k == reflect.Array:
+ return false
+ case k >= reflect.Chan && k <= reflect.String:
+ return true
+ case k == reflect.Struct || k == reflect.UnsafePointer:
+ return false
+ default:
+ unhandledKind(k)
+ return false
+ }
+}
+
+func unhandledKind(k reflect.Kind) {
+ panic("unhandled kind " + k.String())
+}
+
+// HumanSize formats the given number of bytes as a readable string.
+func HumanSize(bytes uintptr) string {
+ switch {
+ case bytes < 1024:
+ return fmt.Sprintf("%d B", bytes)
+ case bytes < 1024*1024:
+ return fmt.Sprintf("%.3f KB", float64(bytes)/1024)
+ default:
+ return fmt.Sprintf("%.3f MB", float64(bytes)/1024/1024)
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
index 9b0421f035..838f1bee1b 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
@@ -12,7 +12,11 @@ import (
"sync"
)
-const typeShift = 3
+const typeShift = 4
+
+// Verify at compile-time that typeShift is large enough to cover all FileType
+// values by confirming that 0 == 0.
+var _ [0]struct{} = [TypeAll >> typeShift]struct{}{}
type memStorageLock struct {
ms *memStorage
@@ -143,7 +147,7 @@ func (ms *memStorage) Remove(fd FileDesc) error {
}
func (ms *memStorage) Rename(oldfd, newfd FileDesc) error {
- if FileDescOk(oldfd) || FileDescOk(newfd) {
+ if !FileDescOk(oldfd) || !FileDescOk(newfd) {
return ErrInvalidFile
}
if oldfd == newfd {
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util.go
index e572a329e9..0e2b519e5c 100644
--- a/vendor/github.com/syndtr/goleveldb/leveldb/util.go
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util.go
@@ -20,7 +20,7 @@ func shorten(str string) string {
return str[:3] + ".." + str[len(str)-3:]
}
-var bunits = [...]string{"", "Ki", "Mi", "Gi"}
+var bunits = [...]string{"", "Ki", "Mi", "Gi", "Ti"}
func shortenb(bytes int) string {
i := 0
diff --git a/vendor/vendor.json b/vendor/vendor.json
index bf55bc437f..fdc7789364 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -93,10 +93,10 @@
"revisionTime": "2016-05-12T03:30:02Z"
},
{
- "checksumSHA1": "Fc8BCxCoQ7ZmghDT6X1cASR10Ec=",
+ "checksumSHA1": "jElNoLEe7m/iaoF1vYIHyNaS2SE=",
"path": "github.com/elastic/gosigar",
- "revision": "a3814ce5008e612a0c6d027608b54e1d0d9a5613",
- "revisionTime": "2018-01-22T22:25:45Z"
+ "revision": "37f05ff46ffa7a825d1b24cf2b62d4a4c1a9d2e8",
+ "revisionTime": "2018-03-30T10:04:40Z"
},
{
"checksumSHA1": "qDsgp2kAeI9nhj565HUScaUyjU4=",
@@ -110,6 +110,18 @@
"revision": "5ec5d9d3c2cf82e9688b34e9bc27a94d616a7193",
"revisionTime": "2017-02-09T08:00:14Z"
},
+ {
+ "checksumSHA1": "Jq1rrHSGPfh689nA2hL1QVb62zE=",
+ "path": "github.com/fjl/memsize",
+ "revision": "ca190fb6ffbc076ff49197b7168a760f30182d2e",
+ "revisionTime": "2018-04-18T12:24:29Z"
+ },
+ {
+ "checksumSHA1": "Z13QAYTqeW4cTiglkc2F05gWLu4=",
+ "path": "github.com/fjl/memsize/memsizeui",
+ "revision": "ca190fb6ffbc076ff49197b7168a760f30182d2e",
+ "revisionTime": "2018-04-18T12:24:29Z"
+ },
{
"checksumSHA1": "0orwvPL96wFckVJyPl39fz2QsgA=",
"path": "github.com/gizak/termui",
@@ -406,76 +418,76 @@
"revisionTime": "2017-07-05T02:17:15Z"
},
{
- "checksumSHA1": "3QsnhPTXGytTbW3uDvQLgSo9s9M=",
+ "checksumSHA1": "k13cCuMJO7+KhR8ZXx5oUqDKGQA=",
"path": "github.com/syndtr/goleveldb/leveldb",
- "revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
- "revisionTime": "2018-03-07T11:33:52Z"
+ "revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
+ "revisionTime": "2018-05-02T07:23:49Z"
},
{
"checksumSHA1": "EKIow7XkgNdWvR/982ffIZxKG8Y=",
"path": "github.com/syndtr/goleveldb/leveldb/cache",
- "revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
- "revisionTime": "2018-03-07T11:33:52Z"
+ "revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
+ "revisionTime": "2018-05-02T07:23:49Z"
},
{
"checksumSHA1": "5KPgnvCPlR0ysDAqo6jApzRQ3tw=",
"path": "github.com/syndtr/goleveldb/leveldb/comparer",
- "revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
- "revisionTime": "2018-03-07T11:33:52Z"
+ "revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
+ "revisionTime": "2018-05-02T07:23:49Z"
},
{
"checksumSHA1": "1DRAxdlWzS4U0xKN/yQ/fdNN7f0=",
"path": "github.com/syndtr/goleveldb/leveldb/errors",
- "revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
- "revisionTime": "2018-03-07T11:33:52Z"
+ "revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
+ "revisionTime": "2018-05-02T07:23:49Z"
},
{
"checksumSHA1": "eqKeD6DS7eNCtxVYZEHHRKkyZrw=",
"path": "github.com/syndtr/goleveldb/leveldb/filter",
- "revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
- "revisionTime": "2018-03-07T11:33:52Z"
+ "revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
+ "revisionTime": "2018-05-02T07:23:49Z"
},
{
"checksumSHA1": "weSsccMav4BCerDpSLzh3mMxAYo=",
"path": "github.com/syndtr/goleveldb/leveldb/iterator",
- "revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
- "revisionTime": "2018-03-07T11:33:52Z"
+ "revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
+ "revisionTime": "2018-05-02T07:23:49Z"
},
{
"checksumSHA1": "gJY7bRpELtO0PJpZXgPQ2BYFJ88=",
"path": "github.com/syndtr/goleveldb/leveldb/journal",
- "revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
- "revisionTime": "2018-03-07T11:33:52Z"
+ "revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
+ "revisionTime": "2018-05-02T07:23:49Z"
},
{
"checksumSHA1": "MtYY1b2234y/MlS+djL8tXVAcQs=",
"path": "github.com/syndtr/goleveldb/leveldb/memdb",
- "revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
- "revisionTime": "2018-03-07T11:33:52Z"
+ "revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
+ "revisionTime": "2018-05-02T07:23:49Z"
},
{
"checksumSHA1": "UmQeotV+m8/FduKEfLOhjdp18rs=",
"path": "github.com/syndtr/goleveldb/leveldb/opt",
- "revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
- "revisionTime": "2018-03-07T11:33:52Z"
+ "revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
+ "revisionTime": "2018-05-02T07:23:49Z"
},
{
- "checksumSHA1": "QCSae2ub87f8awH+PKMpd8ZYOtg=",
+ "checksumSHA1": "7H3fa12T7WoMAeXq1+qG5O7LD0w=",
"path": "github.com/syndtr/goleveldb/leveldb/storage",
- "revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
- "revisionTime": "2018-03-07T11:33:52Z"
+ "revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
+ "revisionTime": "2018-05-02T07:23:49Z"
},
{
"checksumSHA1": "gWFPMz8OQeul0t54RM66yMTX49g=",
"path": "github.com/syndtr/goleveldb/leveldb/table",
- "revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
- "revisionTime": "2018-03-07T11:33:52Z"
+ "revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
+ "revisionTime": "2018-05-02T07:23:49Z"
},
{
"checksumSHA1": "V/Dh7NV0/fy/5jX1KaAjmGcNbzI=",
"path": "github.com/syndtr/goleveldb/leveldb/util",
- "revision": "169b1b37be738edb2813dab48c97a549bcf99bb5",
- "revisionTime": "2018-03-07T11:33:52Z"
+ "revision": "ae970a0732be3a1f5311da86118d37b9f4bd2a5a",
+ "revisionTime": "2018-05-02T07:23:49Z"
},
{
"checksumSHA1": "TT1rac6kpQp2vz24m5yDGUNQ/QQ=",
diff --git a/whisper/mailserver/mailserver.go b/whisper/mailserver/mailserver.go
index 57e6505ad1..d32eaddec3 100644
--- a/whisper/mailserver/mailserver.go
+++ b/whisper/mailserver/mailserver.go
@@ -20,7 +20,6 @@ import (
"encoding/binary"
"fmt"
- "github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/log"
@@ -54,19 +53,19 @@ func NewDbKey(t uint32, h common.Hash) *DBKey {
return &k
}
-func (s *WMailServer) Init(shh *whisper.Whisper, path string, password string, pow float64) {
+func (s *WMailServer) Init(shh *whisper.Whisper, path string, password string, pow float64) error {
var err error
if len(path) == 0 {
- utils.Fatalf("DB file is not specified")
+ return fmt.Errorf("DB file is not specified")
}
if len(password) == 0 {
- utils.Fatalf("Password is not specified for MailServer")
+ return fmt.Errorf("password is not specified")
}
s.db, err = leveldb.OpenFile(path, nil)
if err != nil {
- utils.Fatalf("Failed to open DB file: %s", err)
+ return fmt.Errorf("open DB file: %s", err)
}
s.w = shh
@@ -74,12 +73,13 @@ func (s *WMailServer) Init(shh *whisper.Whisper, path string, password string, p
MailServerKeyID, err := s.w.AddSymKeyFromPassword(password)
if err != nil {
- utils.Fatalf("Failed to create symmetric key for MailServer: %s", err)
+ return fmt.Errorf("create symmetric key: %s", err)
}
s.key, err = s.w.GetSymKey(MailServerKeyID)
if err != nil {
- utils.Fatalf("Failed to save symmetric key for MailServer")
+ return fmt.Errorf("save symmetric key: %s", err)
}
+ return nil
}
func (s *WMailServer) Close() {
diff --git a/whisper/mailserver/server_test.go b/whisper/mailserver/server_test.go
index d5b993afb9..edb817cc75 100644
--- a/whisper/mailserver/server_test.go
+++ b/whisper/mailserver/server_test.go
@@ -92,7 +92,10 @@ func TestMailServer(t *testing.T) {
shh = whisper.New(&whisper.DefaultConfig)
shh.RegisterServer(&server)
- server.Init(shh, dir, password, powRequirement)
+ err = server.Init(shh, dir, password, powRequirement)
+ if err != nil {
+ t.Fatal(err)
+ }
defer server.Close()
keyID, err = shh.AddSymKeyFromPassword(password)
diff --git a/whisper/shhclient/client.go b/whisper/shhclient/client.go
index bbe694baaa..8e7085a0a6 100644
--- a/whisper/shhclient/client.go
+++ b/whisper/shhclient/client.go
@@ -67,7 +67,6 @@ func (sc *Client) SetMaxMessageSize(ctx context.Context, size uint32) error {
}
// SetMinimumPoW (experimental) sets the minimal PoW required by this node.
-
// This experimental function was introduced for the future dynamic adjustment of
// PoW requirement. If the node is overwhelmed with messages, it should raise the
// PoW requirement and notify the peers. The new value should be set relative to
@@ -77,7 +76,7 @@ func (sc *Client) SetMinimumPoW(ctx context.Context, pow float64) error {
return sc.c.CallContext(ctx, &ignored, "shh_setMinPoW", pow)
}
-// Marks specific peer trusted, which will allow it to send historic (expired) messages.
+// MarkTrustedPeer marks specific peer trusted, which will allow it to send historic (expired) messages.
// Note This function is not adding new nodes, the node needs to exists as a peer.
func (sc *Client) MarkTrustedPeer(ctx context.Context, enode string) error {
var ignored bool
@@ -136,9 +135,9 @@ func (sc *Client) AddSymmetricKey(ctx context.Context, key []byte) (string, erro
}
// GenerateSymmetricKeyFromPassword generates the key from password, stores it, and returns its identifier.
-func (sc *Client) GenerateSymmetricKeyFromPassword(ctx context.Context, passwd []byte) (string, error) {
+func (sc *Client) GenerateSymmetricKeyFromPassword(ctx context.Context, passwd string) (string, error) {
var id string
- return id, sc.c.CallContext(ctx, &id, "shh_generateSymKeyFromPassword", hexutil.Bytes(passwd))
+ return id, sc.c.CallContext(ctx, &id, "shh_generateSymKeyFromPassword", passwd)
}
// HasSymmetricKey returns an indication if the key associated with the given id is stored in the node.
diff --git a/whisper/whisperv5/api.go b/whisper/whisperv5/api.go
index ee566625c9..c56d139499 100644
--- a/whisper/whisperv5/api.go
+++ b/whisper/whisperv5/api.go
@@ -32,10 +32,6 @@ import (
"github.com/ethereum/go-ethereum/rpc"
)
-const (
- filterTimeout = 300 // filters are considered timeout out after filterTimeout seconds
-)
-
var (
ErrSymAsym = errors.New("specify either a symmetric or an asymmetric key")
ErrInvalidSymmetricKey = errors.New("invalid symmetric key")
@@ -93,7 +89,7 @@ func (api *PublicWhisperAPI) SetMaxMessageSize(ctx context.Context, size uint32)
return true, api.w.SetMaxMessageSize(size)
}
-// SetMinPow sets the minimum PoW for a message before it is accepted.
+// SetMinPoW sets the minimum PoW for a message before it is accepted.
func (api *PublicWhisperAPI) SetMinPoW(ctx context.Context, pow float64) (bool, error) {
return true, api.w.SetMinimumPoW(pow)
}
@@ -146,7 +142,7 @@ func (api *PublicWhisperAPI) GetPublicKey(ctx context.Context, id string) (hexut
return crypto.FromECDSAPub(&key.PublicKey), nil
}
-// GetPublicKey returns the private key associated with the given key. The key is the hex
+// GetPrivateKey returns the private key associated with the given key. The key is the hex
// encoded representation of a key in the form specified in section 4.3.6 of ANSI X9.62.
func (api *PublicWhisperAPI) GetPrivateKey(ctx context.Context, id string) (hexutil.Bytes, error) {
key, err := api.w.GetPrivateKey(id)
diff --git a/whisper/whisperv5/doc.go b/whisper/whisperv5/doc.go
index 7a57488bd7..8161db8ed6 100644
--- a/whisper/whisperv5/doc.go
+++ b/whisper/whisperv5/doc.go
@@ -15,7 +15,7 @@
// along with the go-ethereum library. If not, see .
/*
-Package whisper implements the Whisper protocol (version 5).
+Package whisperv5 implements the Whisper protocol (version 5).
Whisper combines aspects of both DHTs and datagram messaging systems (e.g. UDP).
As such it may be likened and compared to both, not dissimilar to the
diff --git a/whisper/whisperv5/message.go b/whisper/whisperv5/message.go
index 34ce52e64f..35711d724b 100644
--- a/whisper/whisperv5/message.go
+++ b/whisper/whisperv5/message.go
@@ -33,7 +33,7 @@ import (
"github.com/ethereum/go-ethereum/log"
)
-// Options specifies the exact way a message should be wrapped into an Envelope.
+// MessageParams specifies the exact way a message should be wrapped into an Envelope.
type MessageParams struct {
TTL uint32
Src *ecdsa.PrivateKey
@@ -86,7 +86,7 @@ func (msg *ReceivedMessage) isAsymmetricEncryption() bool {
return msg.Dst != nil
}
-// NewMessage creates and initializes a non-signed, non-encrypted Whisper message.
+// NewSentMessage creates and initializes a non-signed, non-encrypted Whisper message.
func NewSentMessage(params *MessageParams) (*sentMessage, error) {
msg := sentMessage{}
msg.Raw = make([]byte, 1, len(params.Payload)+len(params.Padding)+signatureLength+padSizeLimit)
@@ -330,7 +330,7 @@ func (msg *ReceivedMessage) extractPadding(end int) (int, bool) {
return paddingSize, true
}
-// Recover retrieves the public key of the message signer.
+// SigToPubKey retrieves the public key of the message signer.
func (msg *ReceivedMessage) SigToPubKey() *ecdsa.PublicKey {
defer func() { recover() }() // in case of invalid signature
diff --git a/whisper/whisperv5/peer.go b/whisper/whisperv5/peer.go
index 179c931795..da07631992 100644
--- a/whisper/whisperv5/peer.go
+++ b/whisper/whisperv5/peer.go
@@ -27,7 +27,7 @@ import (
set "gopkg.in/fatih/set.v0"
)
-// peer represents a whisper protocol peer connection.
+// Peer represents a whisper protocol peer connection.
type Peer struct {
host *Whisper
peer *p2p.Peer
@@ -53,51 +53,51 @@ func newPeer(host *Whisper, remote *p2p.Peer, rw p2p.MsgReadWriter) *Peer {
// start initiates the peer updater, periodically broadcasting the whisper packets
// into the network.
-func (p *Peer) start() {
- go p.update()
- log.Trace("start", "peer", p.ID())
+func (peer *Peer) start() {
+ go peer.update()
+ log.Trace("start", "peer", peer.ID())
}
// stop terminates the peer updater, stopping message forwarding to it.
-func (p *Peer) stop() {
- close(p.quit)
- log.Trace("stop", "peer", p.ID())
+func (peer *Peer) stop() {
+ close(peer.quit)
+ log.Trace("stop", "peer", peer.ID())
}
// handshake sends the protocol initiation status message to the remote peer and
// verifies the remote status too.
-func (p *Peer) handshake() error {
+func (peer *Peer) handshake() error {
// Send the handshake status message asynchronously
errc := make(chan error, 1)
go func() {
- errc <- p2p.Send(p.ws, statusCode, ProtocolVersion)
+ errc <- p2p.Send(peer.ws, statusCode, ProtocolVersion)
}()
// Fetch the remote status packet and verify protocol match
- packet, err := p.ws.ReadMsg()
+ packet, err := peer.ws.ReadMsg()
if err != nil {
return err
}
if packet.Code != statusCode {
- return fmt.Errorf("peer [%x] sent packet %x before status packet", p.ID(), packet.Code)
+ return fmt.Errorf("peer [%x] sent packet %x before status packet", peer.ID(), packet.Code)
}
s := rlp.NewStream(packet.Payload, uint64(packet.Size))
peerVersion, err := s.Uint()
if err != nil {
- return fmt.Errorf("peer [%x] sent bad status message: %v", p.ID(), err)
+ return fmt.Errorf("peer [%x] sent bad status message: %v", peer.ID(), err)
}
if peerVersion != ProtocolVersion {
- return fmt.Errorf("peer [%x]: protocol version mismatch %d != %d", p.ID(), peerVersion, ProtocolVersion)
+ return fmt.Errorf("peer [%x]: protocol version mismatch %d != %d", peer.ID(), peerVersion, ProtocolVersion)
}
// Wait until out own status is consumed too
if err := <-errc; err != nil {
- return fmt.Errorf("peer [%x] failed to send status packet: %v", p.ID(), err)
+ return fmt.Errorf("peer [%x] failed to send status packet: %v", peer.ID(), err)
}
return nil
}
// update executes periodic operations on the peer, including message transmission
// and expiration.
-func (p *Peer) update() {
+func (peer *Peer) update() {
// Start the tickers for the updates
expire := time.NewTicker(expirationCycle)
transmit := time.NewTicker(transmissionCycle)
@@ -106,15 +106,15 @@ func (p *Peer) update() {
for {
select {
case <-expire.C:
- p.expire()
+ peer.expire()
case <-transmit.C:
- if err := p.broadcast(); err != nil {
- log.Trace("broadcast failed", "reason", err, "peer", p.ID())
+ if err := peer.broadcast(); err != nil {
+ log.Trace("broadcast failed", "reason", err, "peer", peer.ID())
return
}
- case <-p.quit:
+ case <-peer.quit:
return
}
}
@@ -148,16 +148,16 @@ func (peer *Peer) expire() {
// broadcast iterates over the collection of envelopes and transmits yet unknown
// ones over the network.
-func (p *Peer) broadcast() error {
+func (peer *Peer) broadcast() error {
var cnt int
- envelopes := p.host.Envelopes()
+ envelopes := peer.host.Envelopes()
for _, envelope := range envelopes {
- if !p.marked(envelope) {
- err := p2p.Send(p.ws, messagesCode, envelope)
+ if !peer.marked(envelope) {
+ err := p2p.Send(peer.ws, messagesCode, envelope)
if err != nil {
return err
} else {
- p.mark(envelope)
+ peer.mark(envelope)
cnt++
}
}
@@ -168,7 +168,7 @@ func (p *Peer) broadcast() error {
return nil
}
-func (p *Peer) ID() []byte {
- id := p.peer.ID()
+func (peer *Peer) ID() []byte {
+ id := peer.peer.ID()
return id[:]
}
diff --git a/whisper/whisperv5/peer_test.go b/whisper/whisperv5/peer_test.go
index bae2adb6f5..051b52dcf8 100644
--- a/whisper/whisperv5/peer_test.go
+++ b/whisper/whisperv5/peer_test.go
@@ -32,7 +32,7 @@ import (
"github.com/ethereum/go-ethereum/p2p/nat"
)
-var keys []string = []string{
+var keys = []string{
"d49dcf37238dc8a7aac57dc61b9fee68f0a97f062968978b9fafa7d1033d03a9",
"73fd6143c48e80ed3c56ea159fe7494a0b6b393a392227b422f4c3e8f1b54f98",
"119dd32adb1daa7a4c7bf77f847fb28730785aa92947edf42fdd997b54de40dc",
@@ -84,9 +84,9 @@ type TestNode struct {
var result TestData
var nodes [NumNodes]*TestNode
-var sharedKey []byte = []byte("some arbitrary data here")
+var sharedKey = []byte("some arbitrary data here")
var sharedTopic TopicType = TopicType{0xF, 0x1, 0x2, 0}
-var expectedMessage []byte = []byte("per rectum ad astra")
+var expectedMessage = []byte("per rectum ad astra")
// This test does the following:
// 1. creates a chain of whisper nodes,
diff --git a/whisper/whisperv5/topic.go b/whisper/whisperv5/topic.go
index c4ea67eefa..c4eda1db42 100644
--- a/whisper/whisperv5/topic.go
+++ b/whisper/whisperv5/topic.go
@@ -23,7 +23,7 @@ import (
"github.com/ethereum/go-ethereum/common/hexutil"
)
-// Topic represents a cryptographically secure, probabilistic partial
+// TopicType represents a cryptographically secure, probabilistic partial
// classifications of a message, determined as the first (left) 4 bytes of the
// SHA3 hash of some arbitrary data given by the original author of the message.
type TopicType [TopicLength]byte
diff --git a/whisper/whisperv5/whisper.go b/whisper/whisperv5/whisper.go
index 85849ccce4..62bd1ce179 100644
--- a/whisper/whisperv5/whisper.go
+++ b/whisper/whisperv5/whisper.go
@@ -469,18 +469,18 @@ func (w *Whisper) Stop() error {
// HandlePeer is called by the underlying P2P layer when the whisper sub-protocol
// connection is negotiated.
-func (wh *Whisper) HandlePeer(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
+func (w *Whisper) HandlePeer(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
// Create the new peer and start tracking it
- whisperPeer := newPeer(wh, peer, rw)
+ whisperPeer := newPeer(w, peer, rw)
- wh.peerMu.Lock()
- wh.peers[whisperPeer] = struct{}{}
- wh.peerMu.Unlock()
+ w.peerMu.Lock()
+ w.peers[whisperPeer] = struct{}{}
+ w.peerMu.Unlock()
defer func() {
- wh.peerMu.Lock()
- delete(wh.peers, whisperPeer)
- wh.peerMu.Unlock()
+ w.peerMu.Lock()
+ delete(w.peers, whisperPeer)
+ w.peerMu.Unlock()
}()
// Run the peer handshake and state updates
@@ -490,11 +490,11 @@ func (wh *Whisper) HandlePeer(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
whisperPeer.start()
defer whisperPeer.stop()
- return wh.runMessageLoop(whisperPeer, rw)
+ return w.runMessageLoop(whisperPeer, rw)
}
// runMessageLoop reads and processes inbound messages directly to merge into client-global state.
-func (wh *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error {
+func (w *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error {
for {
// fetch the next packet
packet, err := rw.ReadMsg()
@@ -502,7 +502,7 @@ func (wh *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error {
log.Warn("message loop", "peer", p.peer.ID(), "err", err)
return err
}
- if packet.Size > wh.MaxMessageSize() {
+ if packet.Size > w.MaxMessageSize() {
log.Warn("oversized message received", "peer", p.peer.ID())
return errors.New("oversized message received")
}
@@ -518,7 +518,7 @@ func (wh *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error {
log.Warn("failed to decode envelope, peer will be disconnected", "peer", p.peer.ID(), "err", err)
return errors.New("invalid envelope")
}
- cached, err := wh.add(&envelope)
+ cached, err := w.add(&envelope)
if err != nil {
log.Warn("bad envelope received, peer will be disconnected", "peer", p.peer.ID(), "err", err)
return errors.New("invalid envelope")
@@ -537,17 +537,17 @@ func (wh *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error {
log.Warn("failed to decode direct message, peer will be disconnected", "peer", p.peer.ID(), "err", err)
return errors.New("invalid direct message")
}
- wh.postEvent(&envelope, true)
+ w.postEvent(&envelope, true)
}
case p2pRequestCode:
// Must be processed if mail server is implemented. Otherwise ignore.
- if wh.mailServer != nil {
+ if w.mailServer != nil {
var request Envelope
if err := packet.Decode(&request); err != nil {
log.Warn("failed to decode p2p request message, peer will be disconnected", "peer", p.peer.ID(), "err", err)
return errors.New("invalid p2p request")
}
- wh.mailServer.DeliverMail(p, &request)
+ w.mailServer.DeliverMail(p, &request)
}
default:
// New message types might be implemented in the future versions of Whisper.
@@ -561,29 +561,27 @@ func (wh *Whisper) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error {
// add inserts a new envelope into the message pool to be distributed within the
// whisper network. It also inserts the envelope into the expiration pool at the
// appropriate time-stamp. In case of error, connection should be dropped.
-func (wh *Whisper) add(envelope *Envelope) (bool, error) {
+func (w *Whisper) add(envelope *Envelope) (bool, error) {
now := uint32(time.Now().Unix())
sent := envelope.Expiry - envelope.TTL
if sent > now {
if sent-SynchAllowance > now {
return false, fmt.Errorf("envelope created in the future [%x]", envelope.Hash())
- } else {
- // recalculate PoW, adjusted for the time difference, plus one second for latency
- envelope.calculatePoW(sent - now + 1)
}
+ // recalculate PoW, adjusted for the time difference, plus one second for latency
+ envelope.calculatePoW(sent - now + 1)
}
if envelope.Expiry < now {
if envelope.Expiry+SynchAllowance*2 < now {
return false, fmt.Errorf("very old message")
- } else {
- log.Debug("expired envelope dropped", "hash", envelope.Hash().Hex())
- return false, nil // drop envelope without error
}
+ log.Debug("expired envelope dropped", "hash", envelope.Hash().Hex())
+ return false, nil // drop envelope without error
}
- if uint32(envelope.size()) > wh.MaxMessageSize() {
+ if uint32(envelope.size()) > w.MaxMessageSize() {
return false, fmt.Errorf("huge messages are not allowed [%x]", envelope.Hash())
}
@@ -598,36 +596,36 @@ func (wh *Whisper) add(envelope *Envelope) (bool, error) {
return false, fmt.Errorf("wrong size of AESNonce: %d bytes [env: %x]", aesNonceSize, envelope.Hash())
}
- if envelope.PoW() < wh.MinPow() {
+ if envelope.PoW() < w.MinPow() {
log.Debug("envelope with low PoW dropped", "PoW", envelope.PoW(), "hash", envelope.Hash().Hex())
return false, nil // drop envelope without error
}
hash := envelope.Hash()
- wh.poolMu.Lock()
- _, alreadyCached := wh.envelopes[hash]
+ w.poolMu.Lock()
+ _, alreadyCached := w.envelopes[hash]
if !alreadyCached {
- wh.envelopes[hash] = envelope
- if wh.expirations[envelope.Expiry] == nil {
- wh.expirations[envelope.Expiry] = set.NewNonTS()
+ w.envelopes[hash] = envelope
+ if w.expirations[envelope.Expiry] == nil {
+ w.expirations[envelope.Expiry] = set.NewNonTS()
}
- if !wh.expirations[envelope.Expiry].Has(hash) {
- wh.expirations[envelope.Expiry].Add(hash)
+ if !w.expirations[envelope.Expiry].Has(hash) {
+ w.expirations[envelope.Expiry].Add(hash)
}
}
- wh.poolMu.Unlock()
+ w.poolMu.Unlock()
if alreadyCached {
log.Trace("whisper envelope already cached", "hash", envelope.Hash().Hex())
} else {
log.Trace("cached whisper envelope", "hash", envelope.Hash().Hex())
- wh.statsMu.Lock()
- wh.stats.memoryUsed += envelope.size()
- wh.statsMu.Unlock()
- wh.postEvent(envelope, false) // notify the local node about the new message
- if wh.mailServer != nil {
- wh.mailServer.Archive(envelope)
+ w.statsMu.Lock()
+ w.stats.memoryUsed += envelope.size()
+ w.statsMu.Unlock()
+ w.postEvent(envelope, false) // notify the local node about the new message
+ if w.mailServer != nil {
+ w.mailServer.Archive(envelope)
}
}
return true, nil
@@ -838,9 +836,8 @@ func deriveKeyMaterial(key []byte, version uint64) (derivedKey []byte, err error
// because it's a once in a session experience
derivedKey := pbkdf2.Key(key, nil, 65356, aesKeyLength, sha256.New)
return derivedKey, nil
- } else {
- return nil, unknownVersionError(version)
}
+ return nil, unknownVersionError(version)
}
// GenerateRandomID generates a random string, which is then returned to be used as a key id
diff --git a/whisper/whisperv6/api.go b/whisper/whisperv6/api.go
index 3f3a082afe..c60bc46a13 100644
--- a/whisper/whisperv6/api.go
+++ b/whisper/whisperv6/api.go
@@ -32,10 +32,6 @@ import (
"github.com/ethereum/go-ethereum/rpc"
)
-const (
- filterTimeout = 300 // filters are considered timeout out after filterTimeout seconds
-)
-
// List of errors
var (
ErrSymAsym = errors.New("specify either a symmetric or an asymmetric key")
@@ -231,8 +227,9 @@ type newMessageOverride struct {
Padding hexutil.Bytes
}
-// Post a message on the Whisper network.
-func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (bool, error) {
+// Post posts a message on the Whisper network.
+// returns the hash of the message in case of success.
+func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (hexutil.Bytes, error) {
var (
symKeyGiven = len(req.SymKeyID) > 0
pubKeyGiven = len(req.PublicKey) > 0
@@ -241,7 +238,7 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (bool, er
// user must specify either a symmetric or an asymmetric key
if (symKeyGiven && pubKeyGiven) || (!symKeyGiven && !pubKeyGiven) {
- return false, ErrSymAsym
+ return nil, ErrSymAsym
}
params := &MessageParams{
@@ -256,20 +253,20 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (bool, er
// Set key that is used to sign the message
if len(req.Sig) > 0 {
if params.Src, err = api.w.GetPrivateKey(req.Sig); err != nil {
- return false, err
+ return nil, err
}
}
// Set symmetric key that is used to encrypt the message
if symKeyGiven {
if params.Topic == (TopicType{}) { // topics are mandatory with symmetric encryption
- return false, ErrNoTopics
+ return nil, ErrNoTopics
}
if params.KeySym, err = api.w.GetSymKey(req.SymKeyID); err != nil {
- return false, err
+ return nil, err
}
if !validateDataIntegrity(params.KeySym, aesKeyLength) {
- return false, ErrInvalidSymmetricKey
+ return nil, ErrInvalidSymmetricKey
}
}
@@ -277,36 +274,47 @@ func (api *PublicWhisperAPI) Post(ctx context.Context, req NewMessage) (bool, er
if pubKeyGiven {
params.Dst = crypto.ToECDSAPub(req.PublicKey)
if !ValidatePublicKey(params.Dst) {
- return false, ErrInvalidPublicKey
+ return nil, ErrInvalidPublicKey
}
}
// encrypt and sent message
whisperMsg, err := NewSentMessage(params)
if err != nil {
- return false, err
+ return nil, err
}
+ var result []byte
env, err := whisperMsg.Wrap(params)
if err != nil {
- return false, err
+ return nil, err
}
// send to specific node (skip PoW check)
if len(req.TargetPeer) > 0 {
n, err := discover.ParseNode(req.TargetPeer)
if err != nil {
- return false, fmt.Errorf("failed to parse target peer: %s", err)
+ return nil, fmt.Errorf("failed to parse target peer: %s", err)
}
- return true, api.w.SendP2PMessage(n.ID[:], env)
+ err = api.w.SendP2PMessage(n.ID[:], env)
+ if err == nil {
+ hash := env.Hash()
+ result = hash[:]
+ }
+ return result, err
}
// ensure that the message PoW meets the node's minimum accepted PoW
if req.PowTarget < api.w.MinPow() {
- return false, ErrTooLowPoW
+ return nil, ErrTooLowPoW
}
- return true, api.w.Send(env)
+ err = api.w.Send(env)
+ if err == nil {
+ hash := env.Hash()
+ result = hash[:]
+ }
+ return result, err
}
//go:generate gencodec -type Criteria -field-override criteriaOverride -out gen_criteria_json.go