diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 79c7a53014..0a09baef7d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,12 +1,32 @@ # Lines starting with '#' are comments. # Each line is a file pattern followed by one or more owners. -accounts/usbwallet @karalabe -consensus @karalabe -core/ @karalabe @holiman -eth/ @karalabe -les/ @zsfelfoldi -light/ @zsfelfoldi -mobile/ @karalabe -p2p/ @fjl @zsfelfoldi -whisper/ @gballet @gluk256 +accounts/usbwallet @karalabe +consensus @karalabe +core/ @karalabe @holiman +eth/ @karalabe +les/ @zsfelfoldi +light/ @zsfelfoldi +mobile/ @karalabe +p2p/ @fjl @zsfelfoldi +swarm/bmt @zelig +swarm/dev @lmars +swarm/fuse @jmozah @holisticode +swarm/grafana_dashboards @nonsense +swarm/metrics @nonsense @holisticode +swarm/multihash @nolash +swarm/network/bitvector @zelig @janos @gbalint +swarm/network/priorityqueue @zelig @janos @gbalint +swarm/network/simulations @zelig +swarm/network/stream @janos @zelig @gbalint @holisticode @justelad +swarm/network/stream/intervals @janos +swarm/network/stream/testing @zelig +swarm/pot @zelig +swarm/pss @nolash @zelig @nonsense +swarm/services @zelig +swarm/state @justelad +swarm/storage/encryption @gbalint @zelig @nagydani +swarm/storage/mock @janos +swarm/storage/mru @nolash +swarm/testutil @lmars +whisper/ @gballet @gluk256 diff --git a/VERSION b/VERSION index 53ed4ba51e..7d2424c90b 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.8.9 +1.8.12 diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index 7b605e1c1e..fd69538d53 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -454,7 +454,7 @@ func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*ty return logs, nil } -func (fb *filterBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { +func (fb *filterBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { return event.NewSubscription(func(quit <-chan struct{}) error { <-quit return nil diff --git a/accounts/abi/numbers.go b/accounts/abi/numbers.go index 0cd97cc66f..4d706846da 100644 --- a/accounts/abi/numbers.go +++ b/accounts/abi/numbers.go @@ -31,29 +31,14 @@ var ( uint16T = reflect.TypeOf(uint16(0)) uint32T = reflect.TypeOf(uint32(0)) uint64T = reflect.TypeOf(uint64(0)) - intT = reflect.TypeOf(int(0)) int8T = reflect.TypeOf(int8(0)) int16T = reflect.TypeOf(int16(0)) int32T = reflect.TypeOf(int32(0)) int64T = reflect.TypeOf(int64(0)) addressT = reflect.TypeOf(common.Address{}) - intTS = reflect.TypeOf([]int(nil)) - int8TS = reflect.TypeOf([]int8(nil)) - int16TS = reflect.TypeOf([]int16(nil)) - int32TS = reflect.TypeOf([]int32(nil)) - int64TS = reflect.TypeOf([]int64(nil)) ) // U256 converts a big Int into a 256bit EVM number. func U256(n *big.Int) []byte { return math.PaddedBigBytes(math.U256(n), 32) } - -// checks whether the given reflect value is signed. This also works for slices with a number type -func isSigned(v reflect.Value) bool { - switch v.Type() { - case intTS, int8TS, int16TS, int32TS, int64TS, intT, int8T, int16T, int32T, int64T: - return true - } - return false -} diff --git a/accounts/abi/numbers_test.go b/accounts/abi/numbers_test.go index b9ff5aef17..d25a5abcb5 100644 --- a/accounts/abi/numbers_test.go +++ b/accounts/abi/numbers_test.go @@ -19,7 +19,6 @@ package abi import ( "bytes" "math/big" - "reflect" "testing" ) @@ -32,13 +31,3 @@ func TestNumberTypes(t *testing.T) { t.Errorf("expected %x got %x", ubytes, unsigned) } } - -func TestSigned(t *testing.T) { - if isSigned(reflect.ValueOf(uint(10))) { - t.Error("signed") - } - - if !isSigned(reflect.ValueOf(int(10))) { - t.Error("not signed") - } -} diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go index 80ccd37419..6b04acd05f 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -50,7 +50,7 @@ var ( var KeyStoreType = reflect.TypeOf(&KeyStore{}) // KeyStoreScheme is the protocol scheme prefixing account and wallet URLs. -var KeyStoreScheme = "keystore" +const KeyStoreScheme = "keystore" // Maximum time between wallet refreshes (if filesystem notifications don't work). const walletRefreshCycle = 3 * time.Second diff --git a/accounts/usbwallet/internal/trezor/trezor.go b/accounts/usbwallet/internal/trezor/trezor.go index 8ae9e726e4..80cc75efc4 100644 --- a/accounts/usbwallet/internal/trezor/trezor.go +++ b/accounts/usbwallet/internal/trezor/trezor.go @@ -36,7 +36,7 @@ func Type(msg proto.Message) uint16 { } // Name returns the friendly message type name of a specific protocol buffer -// type numbers. +// type number. func Name(kind uint16) string { name := MessageType_name[int32(kind)] if len(name) < 12 { diff --git a/accounts/usbwallet/ledger.go b/accounts/usbwallet/ledger.go index 7ad32dd1e8..2583fbc4da 100644 --- a/accounts/usbwallet/ledger.go +++ b/accounts/usbwallet/ledger.go @@ -302,7 +302,7 @@ func (w *ledgerDriver) ledgerSign(derivationPath []uint32, tx *types.Transaction for i, component := range derivationPath { binary.BigEndian.PutUint32(path[1+4*i:], component) } - // Create the transaction RLP based on whether legacy or EIP155 signing was requeste + // Create the transaction RLP based on whether legacy or EIP155 signing was requested var ( txrlp []byte err error diff --git a/appveyor.yml b/appveyor.yml index 141ef16ff8..05ff92cf36 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -23,8 +23,8 @@ environment: install: - git submodule update --init - rmdir C:\go /s /q - - appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.1.windows-%GETH_ARCH%.zip - - 7z x go1.10.1.windows-%GETH_ARCH%.zip -y -oC:\ > NUL + - appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.3.windows-%GETH_ARCH%.zip + - 7z x go1.10.3.windows-%GETH_ARCH%.zip -y -oC:\ > NUL - go version - gcc --version diff --git a/bmt/bmt.go b/bmt/bmt.go deleted file mode 100644 index c290223452..0000000000 --- a/bmt/bmt.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package bmt provides a binary merkle tree implementation -package bmt - -import ( - "fmt" - "hash" - "io" - "strings" - "sync" - "sync/atomic" -) - -/* -Binary Merkle Tree Hash is a hash function over arbitrary datachunks of limited size -It is defined as the root hash of the binary merkle tree built over fixed size segments -of the underlying chunk using any base hash function (e.g keccak 256 SHA3) - -It is used as the chunk hash function in swarm which in turn is the basis for the -128 branching swarm hash http://swarm-guide.readthedocs.io/en/latest/architecture.html#swarm-hash - -The BMT is optimal for providing compact inclusion proofs, i.e. prove that a -segment is a substring of a chunk starting at a particular offset -The size of the underlying segments is fixed at 32 bytes (called the resolution -of the BMT hash), the EVM word size to optimize for on-chain BMT verification -as well as the hash size optimal for inclusion proofs in the merkle tree of the swarm hash. - -Two implementations are provided: - -* RefHasher is optimized for code simplicity and meant as a reference implementation -* Hasher is optimized for speed taking advantage of concurrency with minimalistic - control structure to coordinate the concurrent routines - It implements the ChunkHash interface as well as the go standard hash.Hash interface - -*/ - -const ( - // DefaultSegmentCount is the maximum number of segments of the underlying chunk - DefaultSegmentCount = 128 // Should be equal to storage.DefaultBranches - // DefaultPoolSize is the maximum number of bmt trees used by the hashers, i.e, - // the maximum number of concurrent BMT hashing operations performed by the same hasher - DefaultPoolSize = 8 -) - -// BaseHasher is a hash.Hash constructor function used for the base hash of the BMT. -type BaseHasher func() hash.Hash - -// Hasher a reusable hasher for fixed maximum size chunks representing a BMT -// implements the hash.Hash interface -// reuse pool of Tree-s for amortised memory allocation and resource control -// supports order-agnostic concurrent segment writes -// as well as sequential read and write -// can not be called concurrently on more than one chunk -// can be further appended after Sum -// Reset gives back the Tree to the pool and guaranteed to leave -// the tree and itself in a state reusable for hashing a new chunk -type Hasher struct { - pool *TreePool // BMT resource pool - bmt *Tree // prebuilt BMT resource for flowcontrol and proofs - blocksize int // segment size (size of hash) also for hash.Hash - count int // segment count - size int // for hash.Hash same as hashsize - cur int // cursor position for rightmost currently open chunk - segment []byte // the rightmost open segment (not complete) - depth int // index of last level - result chan []byte // result channel - hash []byte // to record the result - max int32 // max segments for SegmentWriter interface - blockLength []byte // The block length that needes to be added in Sum -} - -// New creates a reusable Hasher -// implements the hash.Hash interface -// pulls a new Tree from a resource pool for hashing each chunk -func New(p *TreePool) *Hasher { - return &Hasher{ - pool: p, - depth: depth(p.SegmentCount), - size: p.SegmentSize, - blocksize: p.SegmentSize, - count: p.SegmentCount, - result: make(chan []byte), - } -} - -// Node is a reuseable segment hasher representing a node in a BMT -// it allows for continued writes after a Sum -// and is left in completely reusable state after Reset -type Node struct { - level, index int // position of node for information/logging only - initial bool // first and last node - root bool // whether the node is root to a smaller BMT - isLeft bool // whether it is left side of the parent double segment - unbalanced bool // indicates if a node has only the left segment - parent *Node // BMT connections - state int32 // atomic increment impl concurrent boolean toggle - left, right []byte -} - -// NewNode constructor for segment hasher nodes in the BMT -func NewNode(level, index int, parent *Node) *Node { - return &Node{ - parent: parent, - level: level, - index: index, - initial: index == 0, - isLeft: index%2 == 0, - } -} - -// TreePool provides a pool of Trees used as resources by Hasher -// a Tree popped from the pool is guaranteed to have clean state -// for hashing a new chunk -// Hasher Reset releases the Tree to the pool -type TreePool struct { - lock sync.Mutex - c chan *Tree - hasher BaseHasher - SegmentSize int - SegmentCount int - Capacity int - count int -} - -// NewTreePool creates a Tree pool with hasher, segment size, segment count and capacity -// on GetTree it reuses free Trees or creates a new one if size is not reached -func NewTreePool(hasher BaseHasher, segmentCount, capacity int) *TreePool { - return &TreePool{ - c: make(chan *Tree, capacity), - hasher: hasher, - SegmentSize: hasher().Size(), - SegmentCount: segmentCount, - Capacity: capacity, - } -} - -// Drain drains the pool until it has no more than n resources -func (p *TreePool) Drain(n int) { - p.lock.Lock() - defer p.lock.Unlock() - for len(p.c) > n { - <-p.c - p.count-- - } -} - -// Reserve is blocking until it returns an available Tree -// it reuses free Trees or creates a new one if size is not reached -func (p *TreePool) Reserve() *Tree { - p.lock.Lock() - defer p.lock.Unlock() - var t *Tree - if p.count == p.Capacity { - return <-p.c - } - select { - case t = <-p.c: - default: - t = NewTree(p.hasher, p.SegmentSize, p.SegmentCount) - p.count++ - } - return t -} - -// Release gives back a Tree to the pool. -// This Tree is guaranteed to be in reusable state -// does not need locking -func (p *TreePool) Release(t *Tree) { - p.c <- t // can never fail but... -} - -// Tree is a reusable control structure representing a BMT -// organised in a binary tree -// Hasher uses a TreePool to pick one for each chunk hash -// the Tree is 'locked' while not in the pool -type Tree struct { - leaves []*Node -} - -// Draw draws the BMT (badly) -func (t *Tree) Draw(hash []byte, d int) string { - var left, right []string - var anc []*Node - for i, n := range t.leaves { - left = append(left, fmt.Sprintf("%v", hashstr(n.left))) - if i%2 == 0 { - anc = append(anc, n.parent) - } - right = append(right, fmt.Sprintf("%v", hashstr(n.right))) - } - anc = t.leaves - var hashes [][]string - for l := 0; len(anc) > 0; l++ { - var nodes []*Node - hash := []string{""} - for i, n := range anc { - hash = append(hash, fmt.Sprintf("%v|%v", hashstr(n.left), hashstr(n.right))) - if i%2 == 0 && n.parent != nil { - nodes = append(nodes, n.parent) - } - } - hash = append(hash, "") - hashes = append(hashes, hash) - anc = nodes - } - hashes = append(hashes, []string{"", fmt.Sprintf("%v", hashstr(hash)), ""}) - total := 60 - del := " " - var rows []string - for i := len(hashes) - 1; i >= 0; i-- { - var textlen int - hash := hashes[i] - for _, s := range hash { - textlen += len(s) - } - if total < textlen { - total = textlen + len(hash) - } - delsize := (total - textlen) / (len(hash) - 1) - if delsize > len(del) { - delsize = len(del) - } - row := fmt.Sprintf("%v: %v", len(hashes)-i-1, strings.Join(hash, del[:delsize])) - rows = append(rows, row) - - } - rows = append(rows, strings.Join(left, " ")) - rows = append(rows, strings.Join(right, " ")) - return strings.Join(rows, "\n") + "\n" -} - -// NewTree initialises the Tree by building up the nodes of a BMT -// segment size is stipulated to be the size of the hash -// segmentCount needs to be positive integer and does not need to be -// a power of two and can even be an odd number -// segmentSize * segmentCount determines the maximum chunk size -// hashed using the tree -func NewTree(hasher BaseHasher, segmentSize, segmentCount int) *Tree { - n := NewNode(0, 0, nil) - n.root = true - prevlevel := []*Node{n} - // iterate over levels and creates 2^level nodes - level := 1 - count := 2 - for d := 1; d <= depth(segmentCount); d++ { - nodes := make([]*Node, count) - for i := 0; i < len(nodes); i++ { - parent := prevlevel[i/2] - t := NewNode(level, i, parent) - nodes[i] = t - } - prevlevel = nodes - level++ - count *= 2 - } - // the datanode level is the nodes on the last level where - return &Tree{ - leaves: prevlevel, - } -} - -// methods needed by hash.Hash - -// Size returns the size -func (h *Hasher) Size() int { - return h.size -} - -// BlockSize returns the block size -func (h *Hasher) BlockSize() int { - return h.blocksize -} - -// Sum returns the hash of the buffer -// hash.Hash interface Sum method appends the byte slice to the underlying -// data before it calculates and returns the hash of the chunk -func (h *Hasher) Sum(b []byte) (r []byte) { - t := h.bmt - i := h.cur - n := t.leaves[i] - j := i - // must run strictly before all nodes calculate - // datanodes are guaranteed to have a parent - if len(h.segment) > h.size && i > 0 && n.parent != nil { - n = n.parent - } else { - i *= 2 - } - d := h.finalise(n, i) - h.writeSegment(j, h.segment, d) - c := <-h.result - h.releaseTree() - - // sha3(length + BMT(pure_chunk)) - if h.blockLength == nil { - return c - } - res := h.pool.hasher() - res.Reset() - res.Write(h.blockLength) - res.Write(c) - return res.Sum(nil) -} - -// Hasher implements the SwarmHash interface - -// Hash waits for the hasher result and returns it -// caller must call this on a BMT Hasher being written to -func (h *Hasher) Hash() []byte { - return <-h.result -} - -// Hasher implements the io.Writer interface - -// Write fills the buffer to hash -// with every full segment complete launches a hasher go routine -// that shoots up the BMT -func (h *Hasher) Write(b []byte) (int, error) { - l := len(b) - if l <= 0 { - return 0, nil - } - s := h.segment - i := h.cur - count := (h.count + 1) / 2 - need := h.count*h.size - h.cur*2*h.size - size := h.size - if need > size { - size *= 2 - } - if l < need { - need = l - } - // calculate missing bit to complete current open segment - rest := size - len(s) - if need < rest { - rest = need - } - s = append(s, b[:rest]...) - need -= rest - // read full segments and the last possibly partial segment - for need > 0 && i < count-1 { - // push all finished chunks we read - h.writeSegment(i, s, h.depth) - need -= size - if need < 0 { - size += need - } - s = b[rest : rest+size] - rest += size - i++ - } - h.segment = s - h.cur = i - // otherwise, we can assume len(s) == 0, so all buffer is read and chunk is not yet full - return l, nil -} - -// Hasher implements the io.ReaderFrom interface - -// ReadFrom reads from io.Reader and appends to the data to hash using Write -// it reads so that chunk to hash is maximum length or reader reaches EOF -// caller must Reset the hasher prior to call -func (h *Hasher) ReadFrom(r io.Reader) (m int64, err error) { - bufsize := h.size*h.count - h.size*h.cur - len(h.segment) - buf := make([]byte, bufsize) - var read int - for { - var n int - n, err = r.Read(buf) - read += n - if err == io.EOF || read == len(buf) { - hash := h.Sum(buf[:n]) - if read == len(buf) { - err = NewEOC(hash) - } - break - } - if err != nil { - break - } - n, err = h.Write(buf[:n]) - if err != nil { - break - } - } - return int64(read), err -} - -// Reset needs to be called before writing to the hasher -func (h *Hasher) Reset() { - h.getTree() - h.blockLength = nil -} - -// Hasher implements the SwarmHash interface - -// ResetWithLength needs to be called before writing to the hasher -// the argument is supposed to be the byte slice binary representation of -// the length of the data subsumed under the hash -func (h *Hasher) ResetWithLength(l []byte) { - h.Reset() - h.blockLength = l -} - -// Release gives back the Tree to the pool whereby it unlocks -// it resets tree, segment and index -func (h *Hasher) releaseTree() { - if h.bmt != nil { - n := h.bmt.leaves[h.cur] - for ; n != nil; n = n.parent { - n.unbalanced = false - if n.parent != nil { - n.root = false - } - } - h.pool.Release(h.bmt) - h.bmt = nil - - } - h.cur = 0 - h.segment = nil -} - -func (h *Hasher) writeSegment(i int, s []byte, d int) { - hash := h.pool.hasher() - n := h.bmt.leaves[i] - - if len(s) > h.size && n.parent != nil { - go func() { - hash.Reset() - hash.Write(s) - s = hash.Sum(nil) - - if n.root { - h.result <- s - return - } - h.run(n.parent, hash, d, n.index, s) - }() - return - } - go h.run(n, hash, d, i*2, s) -} - -func (h *Hasher) run(n *Node, hash hash.Hash, d int, i int, s []byte) { - isLeft := i%2 == 0 - for { - if isLeft { - n.left = s - } else { - n.right = s - } - if !n.unbalanced && n.toggle() { - return - } - if !n.unbalanced || !isLeft || i == 0 && d == 0 { - hash.Reset() - hash.Write(n.left) - hash.Write(n.right) - s = hash.Sum(nil) - - } else { - s = append(n.left, n.right...) - } - - h.hash = s - if n.root { - h.result <- s - return - } - - isLeft = n.isLeft - n = n.parent - i++ - } -} - -// getTree obtains a BMT resource by reserving one from the pool -func (h *Hasher) getTree() *Tree { - if h.bmt != nil { - return h.bmt - } - t := h.pool.Reserve() - h.bmt = t - return t -} - -// atomic bool toggle implementing a concurrent reusable 2-state object -// atomic addint with %2 implements atomic bool toggle -// it returns true if the toggler just put it in the active/waiting state -func (n *Node) toggle() bool { - return atomic.AddInt32(&n.state, 1)%2 == 1 -} - -func hashstr(b []byte) string { - end := len(b) - if end > 4 { - end = 4 - } - return fmt.Sprintf("%x", b[:end]) -} - -func depth(n int) (d int) { - for l := (n - 1) / 2; l > 0; l /= 2 { - d++ - } - return d -} - -// finalise is following the zigzags on the tree belonging -// to the final datasegment -func (h *Hasher) finalise(n *Node, i int) (d int) { - isLeft := i%2 == 0 - for { - // when the final segment's path is going via left segments - // the incoming data is pushed to the parent upon pulling the left - // we do not need toggle the state since this condition is - // detectable - n.unbalanced = isLeft - n.right = nil - if n.initial { - n.root = true - return d - } - isLeft = n.isLeft - n = n.parent - d++ - } -} - -// EOC (end of chunk) implements the error interface -type EOC struct { - Hash []byte // read the hash of the chunk off the error -} - -// Error returns the error string -func (e *EOC) Error() string { - return fmt.Sprintf("hasher limit reached, chunk hash: %x", e.Hash) -} - -// NewEOC creates new end of chunk error with the hash -func NewEOC(hash []byte) *EOC { - return &EOC{hash} -} diff --git a/bmt/bmt_r.go b/bmt/bmt_r.go deleted file mode 100644 index 649093ee3a..0000000000 --- a/bmt/bmt_r.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// simple nonconcurrent reference implementation for hashsize segment based -// Binary Merkle tree hash on arbitrary but fixed maximum chunksize -// -// This implementation does not take advantage of any paralellisms and uses -// far more memory than necessary, but it is easy to see that it is correct. -// It can be used for generating test cases for optimized implementations. -// see testBMTHasherCorrectness function in bmt_test.go -package bmt - -import ( - "hash" -) - -// RefHasher is the non-optimized easy to read reference implementation of BMT -type RefHasher struct { - span int - section int - cap int - h hash.Hash -} - -// NewRefHasher returns a new RefHasher -func NewRefHasher(hasher BaseHasher, count int) *RefHasher { - h := hasher() - hashsize := h.Size() - maxsize := hashsize * count - c := 2 - for ; c < count; c *= 2 { - } - if c > 2 { - c /= 2 - } - return &RefHasher{ - section: 2 * hashsize, - span: c * hashsize, - cap: maxsize, - h: h, - } -} - -// Hash returns the BMT hash of the byte slice -// implements the SwarmHash interface -func (rh *RefHasher) Hash(d []byte) []byte { - if len(d) > rh.cap { - d = d[:rh.cap] - } - - return rh.hash(d, rh.span) -} - -func (rh *RefHasher) hash(d []byte, s int) []byte { - l := len(d) - left := d - var right []byte - if l > rh.section { - for ; s >= l; s /= 2 { - } - left = rh.hash(d[:s], s) - right = d[s:] - if l-s > rh.section/2 { - right = rh.hash(right, s) - } - } - defer rh.h.Reset() - rh.h.Write(left) - rh.h.Write(right) - h := rh.h.Sum(nil) - return h -} diff --git a/bmt/bmt_test.go b/bmt/bmt_test.go deleted file mode 100644 index 57df83060a..0000000000 --- a/bmt/bmt_test.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package bmt - -import ( - "bytes" - crand "crypto/rand" - "fmt" - "hash" - "io" - "math/rand" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ethereum/go-ethereum/crypto/sha3" -) - -const ( - maxproccnt = 8 -) - -// TestRefHasher tests that the RefHasher computes the expected BMT hash for -// all data lengths between 0 and 256 bytes -func TestRefHasher(t *testing.T) { - hashFunc := sha3.NewKeccak256 - - sha3 := func(data ...[]byte) []byte { - h := hashFunc() - for _, v := range data { - h.Write(v) - } - return h.Sum(nil) - } - - // the test struct is used to specify the expected BMT hash for data - // lengths between "from" and "to" - type test struct { - from int64 - to int64 - expected func([]byte) []byte - } - - var tests []*test - - // all lengths in [0,64] should be: - // - // sha3(data) - // - tests = append(tests, &test{ - from: 0, - to: 64, - expected: func(data []byte) []byte { - return sha3(data) - }, - }) - - // all lengths in [65,96] should be: - // - // sha3( - // sha3(data[:64]) - // data[64:] - // ) - // - tests = append(tests, &test{ - from: 65, - to: 96, - expected: func(data []byte) []byte { - return sha3(sha3(data[:64]), data[64:]) - }, - }) - - // all lengths in [97,128] should be: - // - // sha3( - // sha3(data[:64]) - // sha3(data[64:]) - // ) - // - tests = append(tests, &test{ - from: 97, - to: 128, - expected: func(data []byte) []byte { - return sha3(sha3(data[:64]), sha3(data[64:])) - }, - }) - - // all lengths in [129,160] should be: - // - // sha3( - // sha3( - // sha3(data[:64]) - // sha3(data[64:128]) - // ) - // data[128:] - // ) - // - tests = append(tests, &test{ - from: 129, - to: 160, - expected: func(data []byte) []byte { - return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), data[128:]) - }, - }) - - // all lengths in [161,192] should be: - // - // sha3( - // sha3( - // sha3(data[:64]) - // sha3(data[64:128]) - // ) - // sha3(data[128:]) - // ) - // - tests = append(tests, &test{ - from: 161, - to: 192, - expected: func(data []byte) []byte { - return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(data[128:])) - }, - }) - - // all lengths in [193,224] should be: - // - // sha3( - // sha3( - // sha3(data[:64]) - // sha3(data[64:128]) - // ) - // sha3( - // sha3(data[128:192]) - // data[192:] - // ) - // ) - // - tests = append(tests, &test{ - from: 193, - to: 224, - expected: func(data []byte) []byte { - return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), data[192:])) - }, - }) - - // all lengths in [225,256] should be: - // - // sha3( - // sha3( - // sha3(data[:64]) - // sha3(data[64:128]) - // ) - // sha3( - // sha3(data[128:192]) - // sha3(data[192:]) - // ) - // ) - // - tests = append(tests, &test{ - from: 225, - to: 256, - expected: func(data []byte) []byte { - return sha3(sha3(sha3(data[:64]), sha3(data[64:128])), sha3(sha3(data[128:192]), sha3(data[192:]))) - }, - }) - - // run the tests - for _, x := range tests { - for length := x.from; length <= x.to; length++ { - t.Run(fmt.Sprintf("%d_bytes", length), func(t *testing.T) { - data := make([]byte, length) - if _, err := io.ReadFull(crand.Reader, data); err != nil && err != io.EOF { - t.Fatal(err) - } - expected := x.expected(data) - actual := NewRefHasher(hashFunc, 128).Hash(data) - if !bytes.Equal(actual, expected) { - t.Fatalf("expected %x, got %x", expected, actual) - } - }) - } - } -} - -func testDataReader(l int) (r io.Reader) { - return io.LimitReader(crand.Reader, int64(l)) -} - -func TestHasherCorrectness(t *testing.T) { - err := testHasher(testBaseHasher) - if err != nil { - t.Fatal(err) - } -} - -func testHasher(f func(BaseHasher, []byte, int, int) error) error { - tdata := testDataReader(4128) - data := make([]byte, 4128) - tdata.Read(data) - hasher := sha3.NewKeccak256 - size := hasher().Size() - counts := []int{1, 2, 3, 4, 5, 8, 16, 32, 64, 128} - - var err error - for _, count := range counts { - max := count * size - incr := 1 - for n := 0; n <= max+incr; n += incr { - err = f(hasher, data, n, count) - if err != nil { - return err - } - } - } - return nil -} - -func TestHasherReuseWithoutRelease(t *testing.T) { - testHasherReuse(1, t) -} - -func TestHasherReuseWithRelease(t *testing.T) { - testHasherReuse(maxproccnt, t) -} - -func testHasherReuse(i int, t *testing.T) { - hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, 128, i) - defer pool.Drain(0) - bmt := New(pool) - - for i := 0; i < 500; i++ { - n := rand.Intn(4096) - tdata := testDataReader(n) - data := make([]byte, n) - tdata.Read(data) - - err := testHasherCorrectness(bmt, hasher, data, n, 128) - if err != nil { - t.Fatal(err) - } - } -} - -func TestHasherConcurrency(t *testing.T) { - hasher := sha3.NewKeccak256 - pool := NewTreePool(hasher, 128, maxproccnt) - defer pool.Drain(0) - wg := sync.WaitGroup{} - cycles := 100 - wg.Add(maxproccnt * cycles) - errc := make(chan error) - - for p := 0; p < maxproccnt; p++ { - for i := 0; i < cycles; i++ { - go func() { - bmt := New(pool) - n := rand.Intn(4096) - tdata := testDataReader(n) - data := make([]byte, n) - tdata.Read(data) - err := testHasherCorrectness(bmt, hasher, data, n, 128) - wg.Done() - if err != nil { - errc <- err - } - }() - } - } - go func() { - wg.Wait() - close(errc) - }() - var err error - select { - case <-time.NewTimer(5 * time.Second).C: - err = fmt.Errorf("timed out") - case err = <-errc: - } - if err != nil { - t.Fatal(err) - } -} - -func testBaseHasher(hasher BaseHasher, d []byte, n, count int) error { - pool := NewTreePool(hasher, count, 1) - defer pool.Drain(0) - bmt := New(pool) - return testHasherCorrectness(bmt, hasher, d, n, count) -} - -func testHasherCorrectness(bmt hash.Hash, hasher BaseHasher, d []byte, n, count int) (err error) { - data := d[:n] - rbmt := NewRefHasher(hasher, count) - exp := rbmt.Hash(data) - timeout := time.NewTimer(time.Second) - c := make(chan error) - - go func() { - bmt.Reset() - bmt.Write(data) - got := bmt.Sum(nil) - if !bytes.Equal(got, exp) { - c <- fmt.Errorf("wrong hash: expected %x, got %x", exp, got) - } - close(c) - }() - select { - case <-timeout.C: - err = fmt.Errorf("BMT hash calculation timed out") - case err = <-c: - } - return err -} - -func BenchmarkSHA3_4k(t *testing.B) { benchmarkSHA3(4096, t) } -func BenchmarkSHA3_2k(t *testing.B) { benchmarkSHA3(4096/2, t) } -func BenchmarkSHA3_1k(t *testing.B) { benchmarkSHA3(4096/4, t) } -func BenchmarkSHA3_512b(t *testing.B) { benchmarkSHA3(4096/8, t) } -func BenchmarkSHA3_256b(t *testing.B) { benchmarkSHA3(4096/16, t) } -func BenchmarkSHA3_128b(t *testing.B) { benchmarkSHA3(4096/32, t) } - -func BenchmarkBMTBaseline_4k(t *testing.B) { benchmarkBMTBaseline(4096, t) } -func BenchmarkBMTBaseline_2k(t *testing.B) { benchmarkBMTBaseline(4096/2, t) } -func BenchmarkBMTBaseline_1k(t *testing.B) { benchmarkBMTBaseline(4096/4, t) } -func BenchmarkBMTBaseline_512b(t *testing.B) { benchmarkBMTBaseline(4096/8, t) } -func BenchmarkBMTBaseline_256b(t *testing.B) { benchmarkBMTBaseline(4096/16, t) } -func BenchmarkBMTBaseline_128b(t *testing.B) { benchmarkBMTBaseline(4096/32, t) } - -func BenchmarkRefHasher_4k(t *testing.B) { benchmarkRefHasher(4096, t) } -func BenchmarkRefHasher_2k(t *testing.B) { benchmarkRefHasher(4096/2, t) } -func BenchmarkRefHasher_1k(t *testing.B) { benchmarkRefHasher(4096/4, t) } -func BenchmarkRefHasher_512b(t *testing.B) { benchmarkRefHasher(4096/8, t) } -func BenchmarkRefHasher_256b(t *testing.B) { benchmarkRefHasher(4096/16, t) } -func BenchmarkRefHasher_128b(t *testing.B) { benchmarkRefHasher(4096/32, t) } - -func BenchmarkHasher_4k(t *testing.B) { benchmarkHasher(4096, t) } -func BenchmarkHasher_2k(t *testing.B) { benchmarkHasher(4096/2, t) } -func BenchmarkHasher_1k(t *testing.B) { benchmarkHasher(4096/4, t) } -func BenchmarkHasher_512b(t *testing.B) { benchmarkHasher(4096/8, t) } -func BenchmarkHasher_256b(t *testing.B) { benchmarkHasher(4096/16, t) } -func BenchmarkHasher_128b(t *testing.B) { benchmarkHasher(4096/32, t) } - -func BenchmarkHasherNoReuse_4k(t *testing.B) { benchmarkHasherReuse(1, 4096, t) } -func BenchmarkHasherNoReuse_2k(t *testing.B) { benchmarkHasherReuse(1, 4096/2, t) } -func BenchmarkHasherNoReuse_1k(t *testing.B) { benchmarkHasherReuse(1, 4096/4, t) } -func BenchmarkHasherNoReuse_512b(t *testing.B) { benchmarkHasherReuse(1, 4096/8, t) } -func BenchmarkHasherNoReuse_256b(t *testing.B) { benchmarkHasherReuse(1, 4096/16, t) } -func BenchmarkHasherNoReuse_128b(t *testing.B) { benchmarkHasherReuse(1, 4096/32, t) } - -func BenchmarkHasherReuse_4k(t *testing.B) { benchmarkHasherReuse(16, 4096, t) } -func BenchmarkHasherReuse_2k(t *testing.B) { benchmarkHasherReuse(16, 4096/2, t) } -func BenchmarkHasherReuse_1k(t *testing.B) { benchmarkHasherReuse(16, 4096/4, t) } -func BenchmarkHasherReuse_512b(t *testing.B) { benchmarkHasherReuse(16, 4096/8, t) } -func BenchmarkHasherReuse_256b(t *testing.B) { benchmarkHasherReuse(16, 4096/16, t) } -func BenchmarkHasherReuse_128b(t *testing.B) { benchmarkHasherReuse(16, 4096/32, t) } - -// benchmarks the minimum hashing time for a balanced (for simplicity) BMT -// by doing count/segmentsize parallel hashings of 2*segmentsize bytes -// doing it on n maxproccnt each reusing the base hasher -// the premise is that this is the minimum computation needed for a BMT -// therefore this serves as a theoretical optimum for concurrent implementations -func benchmarkBMTBaseline(n int, t *testing.B) { - tdata := testDataReader(64) - data := make([]byte, 64) - tdata.Read(data) - hasher := sha3.NewKeccak256 - - t.ReportAllocs() - t.ResetTimer() - for i := 0; i < t.N; i++ { - count := int32((n-1)/hasher().Size() + 1) - wg := sync.WaitGroup{} - wg.Add(maxproccnt) - var i int32 - for j := 0; j < maxproccnt; j++ { - go func() { - defer wg.Done() - h := hasher() - for atomic.AddInt32(&i, 1) < count { - h.Reset() - h.Write(data) - h.Sum(nil) - } - }() - } - wg.Wait() - } -} - -func benchmarkHasher(n int, t *testing.B) { - tdata := testDataReader(n) - data := make([]byte, n) - tdata.Read(data) - - size := 1 - hasher := sha3.NewKeccak256 - segmentCount := 128 - pool := NewTreePool(hasher, segmentCount, size) - bmt := New(pool) - - t.ReportAllocs() - t.ResetTimer() - for i := 0; i < t.N; i++ { - bmt.Reset() - bmt.Write(data) - bmt.Sum(nil) - } -} - -func benchmarkHasherReuse(poolsize, n int, t *testing.B) { - tdata := testDataReader(n) - data := make([]byte, n) - tdata.Read(data) - - hasher := sha3.NewKeccak256 - segmentCount := 128 - pool := NewTreePool(hasher, segmentCount, poolsize) - cycles := 200 - - t.ReportAllocs() - t.ResetTimer() - for i := 0; i < t.N; i++ { - wg := sync.WaitGroup{} - wg.Add(cycles) - for j := 0; j < cycles; j++ { - bmt := New(pool) - go func() { - defer wg.Done() - bmt.Reset() - bmt.Write(data) - bmt.Sum(nil) - }() - } - wg.Wait() - } -} - -func benchmarkSHA3(n int, t *testing.B) { - data := make([]byte, n) - tdata := testDataReader(n) - tdata.Read(data) - hasher := sha3.NewKeccak256 - h := hasher() - - t.ReportAllocs() - t.ResetTimer() - for i := 0; i < t.N; i++ { - h.Reset() - h.Write(data) - h.Sum(nil) - } -} - -func benchmarkRefHasher(n int, t *testing.B) { - data := make([]byte, n) - tdata := testDataReader(n) - tdata.Read(data) - hasher := sha3.NewKeccak256 - rbmt := NewRefHasher(hasher, 128) - - t.ReportAllocs() - t.ResetTimer() - for i := 0; i < t.N; i++ { - rbmt.Hash(data) - } -} diff --git a/build/ci.go b/build/ci.go index e721934f30..5939d91e96 100644 --- a/build/ci.go +++ b/build/ci.go @@ -330,6 +330,7 @@ func doLint(cmdline []string) { configs := []string{ "--vendor", "--tests", + "--deadline=2m", "--disable-all", "--enable=goimports", "--enable=varcheck", @@ -338,7 +339,6 @@ func doLint(cmdline []string) { "--enable=misspell", "--enable=goconst", "--min-occurrences=6", // for goconst - "--deadline=10m", } build.MustRunCommand(filepath.Join(GOBIN, "gometalinter.v2"), append(configs, packages...)...) diff --git a/build/goimports.sh b/build/goimports.sh index 6d67ef1f0f..1fcace6a4b 100755 --- a/build/goimports.sh +++ b/build/goimports.sh @@ -1,18 +1,18 @@ -#!/usr/bin/env bash +#!/bin/sh find_files() { - find . -not \( \ + find . ! \( \ \( \ - -wholename '.github' \ - -o -wholename './build/_workspace' \ - -o -wholename './build/bin' \ - -o -wholename './crypto/bn256' \ - -o -wholename '*/vendor/*' \ + -path '.github' \ + -o -path './build/_workspace' \ + -o -path './build/bin' \ + -o -path './crypto/bn256' \ + -o -path '*/vendor/*' \ \) -prune \ \) -name '*.go' } -GOFMT="gofmt -s -w"; -GOIMPORTS="goimports -w"; -find_files | xargs $GOFMT; -find_files | xargs $GOIMPORTS; \ No newline at end of file +GOFMT="gofmt -s -w" +GOIMPORTS="goimports -w" +find_files | xargs $GOFMT +find_files | xargs $GOIMPORTS diff --git a/cmd/abigen/main.go b/cmd/abigen/main.go index 3a1ae6f4c3..3ac37ba7ad 100644 --- a/cmd/abigen/main.go +++ b/cmd/abigen/main.go @@ -29,7 +29,7 @@ import ( ) var ( - abiFlag = flag.String("abi", "", "Path to the Ethereum contract ABI json to bind") + abiFlag = flag.String("abi", "", "Path to the Ethereum contract ABI json to bind, - for STDIN") binFlag = flag.String("bin", "", "Path to the Ethereum contract bytecode (generate deploy method)") typFlag = flag.String("type", "", "Struct name for the binding (default = package name)") @@ -75,16 +75,27 @@ func main() { bins []string types []string ) - if *solFlag != "" { + if *solFlag != "" || *abiFlag == "-" { // Generate the list of types to exclude from binding exclude := make(map[string]bool) for _, kind := range strings.Split(*excFlag, ",") { exclude[strings.ToLower(kind)] = true } - contracts, err := compiler.CompileSolidity(*solcFlag, *solFlag) - if err != nil { - fmt.Printf("Failed to build Solidity contract: %v\n", err) - os.Exit(-1) + + var contracts map[string]*compiler.Contract + var err error + if *solFlag != "" { + contracts, err = compiler.CompileSolidity(*solcFlag, *solFlag) + if err != nil { + fmt.Printf("Failed to build Solidity contract: %v\n", err) + os.Exit(-1) + } + } else { + contracts, err = contractsFromStdin() + if err != nil { + fmt.Printf("Failed to read input ABIs from STDIN: %v\n", err) + os.Exit(-1) + } } // Gather all non-excluded contract for binding for name, contract := range contracts { @@ -138,3 +149,12 @@ func main() { os.Exit(-1) } } + +func contractsFromStdin() (map[string]*compiler.Contract, error) { + bytes, err := ioutil.ReadAll(os.Stdin) + if err != nil { + return nil, err + } + + return compiler.ParseCombinedJSON(bytes, "", "", "", "") +} diff --git a/cmd/ethkey/changepassphrase.go b/cmd/ethkey/changepassphrase.go new file mode 100644 index 0000000000..d1ae2ae0d8 --- /dev/null +++ b/cmd/ethkey/changepassphrase.go @@ -0,0 +1,72 @@ +package main + +import ( + "fmt" + "io/ioutil" + "strings" + + "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/cmd/utils" + "gopkg.in/urfave/cli.v1" +) + +var newPassphraseFlag = cli.StringFlag{ + Name: "newpasswordfile", + Usage: "the file that contains the new passphrase for the keyfile", +} + +var commandChangePassphrase = cli.Command{ + Name: "changepassphrase", + Usage: "change the passphrase on a keyfile", + ArgsUsage: "", + Description: ` +Change the passphrase of a keyfile.`, + Flags: []cli.Flag{ + passphraseFlag, + newPassphraseFlag, + }, + Action: func(ctx *cli.Context) error { + keyfilepath := ctx.Args().First() + + // Read key from file. + keyjson, err := ioutil.ReadFile(keyfilepath) + if err != nil { + utils.Fatalf("Failed to read the keyfile at '%s': %v", keyfilepath, err) + } + + // Decrypt key with passphrase. + passphrase := getPassphrase(ctx) + key, err := keystore.DecryptKey(keyjson, passphrase) + if err != nil { + utils.Fatalf("Error decrypting key: %v", err) + } + + // Get a new passphrase. + fmt.Println("Please provide a new passphrase") + var newPhrase string + if passFile := ctx.String(newPassphraseFlag.Name); passFile != "" { + content, err := ioutil.ReadFile(passFile) + if err != nil { + utils.Fatalf("Failed to read new passphrase file '%s': %v", passFile, err) + } + newPhrase = strings.TrimRight(string(content), "\r\n") + } else { + newPhrase = promptPassphrase(true) + } + + // Encrypt the key with the new passphrase. + newJson, err := keystore.EncryptKey(key, newPhrase, keystore.StandardScryptN, keystore.StandardScryptP) + if err != nil { + utils.Fatalf("Error encrypting with new passphrase: %v", err) + } + + // Then write the new keyfile in place of the old one. + if err := ioutil.WriteFile(keyfilepath, newJson, 600); err != nil { + utils.Fatalf("Error writing new keyfile to disk: %v", err) + } + + // Don't print anything. Just return successfully, + // producing a positive exit code. + return nil + }, +} diff --git a/cmd/ethkey/generate.go b/cmd/ethkey/generate.go index 6d57d17fb4..fe9a0c1519 100644 --- a/cmd/ethkey/generate.go +++ b/cmd/ethkey/generate.go @@ -90,7 +90,7 @@ If you want to encrypt an existing private key, it can be specified by setting } // Encrypt key with passphrase. - passphrase := getPassPhrase(ctx, true) + passphrase := promptPassphrase(true) keyjson, err := keystore.EncryptKey(key, passphrase, keystore.StandardScryptN, keystore.StandardScryptP) if err != nil { utils.Fatalf("Error encrypting key: %v", err) diff --git a/cmd/ethkey/inspect.go b/cmd/ethkey/inspect.go index dbf5afc0ce..ba03d4d936 100644 --- a/cmd/ethkey/inspect.go +++ b/cmd/ethkey/inspect.go @@ -60,7 +60,7 @@ make sure to use this feature with great caution!`, } // Decrypt key with passphrase. - passphrase := getPassPhrase(ctx, false) + passphrase := getPassphrase(ctx) key, err := keystore.DecryptKey(keyjson, passphrase) if err != nil { utils.Fatalf("Error decrypting key: %v", err) diff --git a/cmd/ethkey/main.go b/cmd/ethkey/main.go index 4127f5566f..c434da0c05 100644 --- a/cmd/ethkey/main.go +++ b/cmd/ethkey/main.go @@ -38,6 +38,7 @@ func init() { app.Commands = []cli.Command{ commandGenerate, commandInspect, + commandChangePassphrase, commandSignMessage, commandVerifyMessage, } diff --git a/cmd/ethkey/message.go b/cmd/ethkey/message.go index 531a931c82..5caea69ff6 100644 --- a/cmd/ethkey/message.go +++ b/cmd/ethkey/message.go @@ -62,7 +62,7 @@ To sign a message contained in a file, use the --msgfile flag. } // Decrypt key with passphrase. - passphrase := getPassPhrase(ctx, false) + passphrase := getPassphrase(ctx) key, err := keystore.DecryptKey(keyjson, passphrase) if err != nil { utils.Fatalf("Error decrypting key: %v", err) diff --git a/cmd/ethkey/utils.go b/cmd/ethkey/utils.go index 0e563bf922..6f60ebaf1b 100644 --- a/cmd/ethkey/utils.go +++ b/cmd/ethkey/utils.go @@ -28,11 +28,32 @@ import ( "gopkg.in/urfave/cli.v1" ) -// getPassPhrase obtains a passphrase given by the user. It first checks the -// --passphrase command line flag and ultimately prompts the user for a +// promptPassphrase prompts the user for a passphrase. Set confirmation to true +// to require the user to confirm the passphrase. +func promptPassphrase(confirmation bool) string { + passphrase, err := console.Stdin.PromptPassword("Passphrase: ") + if err != nil { + utils.Fatalf("Failed to read passphrase: %v", err) + } + + if confirmation { + confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ") + if err != nil { + utils.Fatalf("Failed to read passphrase confirmation: %v", err) + } + if passphrase != confirm { + utils.Fatalf("Passphrases do not match") + } + } + + return passphrase +} + +// getPassphrase obtains a passphrase given by the user. It first checks the +// --passfile command line flag and ultimately prompts the user for a // passphrase. -func getPassPhrase(ctx *cli.Context, confirmation bool) string { - // Look for the --passphrase flag. +func getPassphrase(ctx *cli.Context) string { + // Look for the --passwordfile flag. passphraseFile := ctx.String(passphraseFlag.Name) if passphraseFile != "" { content, err := ioutil.ReadFile(passphraseFile) @@ -44,20 +65,7 @@ func getPassPhrase(ctx *cli.Context, confirmation bool) string { } // Otherwise prompt the user for the passphrase. - passphrase, err := console.Stdin.PromptPassword("Passphrase: ") - if err != nil { - utils.Fatalf("Failed to read passphrase: %v", err) - } - if confirmation { - confirm, err := console.Stdin.PromptPassword("Repeat passphrase: ") - if err != nil { - utils.Fatalf("Failed to read passphrase confirmation: %v", err) - } - if passphrase != confirm { - utils.Fatalf("Passphrases do not match") - } - } - return passphrase + return promptPassphrase(false) } // signHash is a helper function that calculates a hash for the given message diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go index 5bad09bbd5..70fd6d1ab6 100644 --- a/cmd/faucet/faucet.go +++ b/cmd/faucet/faucet.go @@ -77,9 +77,6 @@ var ( accJSONFlag = flag.String("account.json", "", "Key json file to fund user requests with") accPassFlag = flag.String("account.pass", "", "Decryption password to access faucet funds") - githubUser = flag.String("github.user", "", "GitHub user to authenticate with for Gist access") - githubToken = flag.String("github.token", "", "GitHub personal token to access Gists with") - captchaToken = flag.String("captcha.token", "", "Recaptcha site key to authenticate client side") captchaSecret = flag.String("captcha.secret", "", "Recaptcha secret key to authenticate server side") @@ -474,7 +471,7 @@ func (f *faucet) apiHandler(conn *websocket.Conn) { amount = new(big.Int).Div(amount, new(big.Int).Exp(big.NewInt(2), big.NewInt(int64(msg.Tier)), nil)) tx := types.NewTransaction(f.nonce+uint64(len(f.reqs)), address, amount, 21000, f.price, nil) - signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainId) + signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID) if err != nil { f.lock.Unlock() if err = sendError(conn, err); err != nil { @@ -638,59 +635,6 @@ func sendSuccess(conn *websocket.Conn, msg string) error { return send(conn, map[string]string{"success": msg}, time.Second) } -// authGitHub tries to authenticate a faucet request using GitHub gists, returning -// the username, avatar URL and Ethereum address to fund on success. -func authGitHub(url string) (string, string, common.Address, error) { - // Retrieve the gist from the GitHub Gist APIs - parts := strings.Split(url, "/") - req, _ := http.NewRequest("GET", "https://api.github.com/gists/"+parts[len(parts)-1], nil) - if *githubUser != "" { - req.SetBasicAuth(*githubUser, *githubToken) - } - res, err := http.DefaultClient.Do(req) - if err != nil { - return "", "", common.Address{}, err - } - var gist struct { - Owner struct { - Login string `json:"login"` - } `json:"owner"` - Files map[string]struct { - Content string `json:"content"` - } `json:"files"` - } - err = json.NewDecoder(res.Body).Decode(&gist) - res.Body.Close() - if err != nil { - return "", "", common.Address{}, err - } - if gist.Owner.Login == "" { - return "", "", common.Address{}, errors.New("Anonymous Gists not allowed") - } - // Iterate over all the files and look for Ethereum addresses - var address common.Address - for _, file := range gist.Files { - content := strings.TrimSpace(file.Content) - if len(content) == 2+common.AddressLength*2 { - address = common.HexToAddress(content) - } - } - if address == (common.Address{}) { - return "", "", common.Address{}, errors.New("No Ethereum address found to fund") - } - // Validate the user's existence since the API is unhelpful here - if res, err = http.Head("https://github.com/" + gist.Owner.Login); err != nil { - return "", "", common.Address{}, err - } - res.Body.Close() - - if res.StatusCode != 200 { - return "", "", common.Address{}, errors.New("Invalid user... boom!") - } - // Everything passed validation, return the gathered infos - return gist.Owner.Login + "@github", fmt.Sprintf("https://github.com/%s.png?size=64", gist.Owner.Login), address, nil -} - // authTwitter tries to authenticate a faucet request using Twitter posts, returning // the username, avatar URL and Ethereum address to fund on success. func authTwitter(url string) (string, string, common.Address, error) { diff --git a/cmd/geth/genesis_test.go b/cmd/geth/genesis_test.go index a00ae00c19..e75b542cbb 100644 --- a/cmd/geth/genesis_test.go +++ b/cmd/geth/genesis_test.go @@ -77,7 +77,7 @@ var customGenesisTests = []struct { "homesteadBlock" : 314, "daoForkBlock" : 141, "daoForkSupport" : true - }, + } }`, query: "eth.getBlock(0).nonce", result: "0x0000000000000042", diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 37f9bd43f4..e1a9b9d04c 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -19,12 +19,16 @@ package main import ( "fmt" + "math" "os" "runtime" + godebug "runtime/debug" "sort" + "strconv" "strings" "time" + "github.com/elastic/gosigar" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/cmd/utils" @@ -143,6 +147,15 @@ var ( utils.WhisperMaxMessageSizeFlag, utils.WhisperMinPOWFlag, } + + metricsFlags = []cli.Flag{ + utils.MetricsEnableInfluxDBFlag, + utils.MetricsInfluxDBEndpointFlag, + utils.MetricsInfluxDBDatabaseFlag, + utils.MetricsInfluxDBUsernameFlag, + utils.MetricsInfluxDBPasswordFlag, + utils.MetricsInfluxDBHostTagFlag, + } ) func init() { @@ -187,12 +200,32 @@ func init() { app.Flags = append(app.Flags, consoleFlags...) app.Flags = append(app.Flags, debug.Flags...) app.Flags = append(app.Flags, whisperFlags...) + app.Flags = append(app.Flags, metricsFlags...) app.Before = func(ctx *cli.Context) error { runtime.GOMAXPROCS(runtime.NumCPU()) if err := debug.Setup(ctx); err != nil { return err } + // Cap the cache allowance and tune the garbage colelctor + var mem gosigar.Mem + if err := mem.Get(); err == nil { + allowance := int(mem.Total / 1024 / 1024 / 3) + if cache := ctx.GlobalInt(utils.CacheFlag.Name); cache > allowance { + log.Warn("Sanitizing cache to Go's GC limits", "provided", cache, "updated", allowance) + ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(allowance)) + } + } + // Ensure Go's GC ignores the database cache for trigger percentage + cache := ctx.GlobalInt(utils.CacheFlag.Name) + gogc := math.Max(20, math.Min(100, 100/(float64(cache)/1024))) + + log.Debug("Sanitizing Go's GC trigger", "percent", int(gogc)) + godebug.SetGCPercent(int(gogc)) + + // Start metrics export if enabled + utils.SetupMetrics(ctx) + // Start system runtime metrics collection go metrics.CollectProcessMetrics(3 * time.Second) diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index bafc3d48c6..08ca5196f4 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -213,11 +213,22 @@ var AppHelpFlagGroups = []flagGroup{ { Name: "LOGGING AND DEBUGGING", Flags: append([]cli.Flag{ - utils.MetricsEnabledFlag, utils.FakePoWFlag, utils.NoCompactionFlag, }, debug.Flags...), }, + { + Name: "METRICS AND STATS", + Flags: []cli.Flag{ + utils.MetricsEnabledFlag, + utils.MetricsEnableInfluxDBFlag, + utils.MetricsInfluxDBEndpointFlag, + utils.MetricsInfluxDBDatabaseFlag, + utils.MetricsInfluxDBUsernameFlag, + utils.MetricsInfluxDBPasswordFlag, + utils.MetricsInfluxDBHostTagFlag, + }, + }, { Name: "WHISPER (EXPERIMENTAL)", Flags: whisperFlags, diff --git a/cmd/p2psim/main.go b/cmd/p2psim/main.go index 0c8ed038d5..d32c298631 100644 --- a/cmd/p2psim/main.go +++ b/cmd/p2psim/main.go @@ -275,9 +275,8 @@ func createNode(ctx *cli.Context) error { if len(ctx.Args()) != 0 { return cli.ShowCommandHelp(ctx, ctx.Command.Name) } - config := &adapters.NodeConfig{ - Name: ctx.String("name"), - } + config := adapters.RandomNodeConfig() + config.Name = ctx.String("name") if key := ctx.String("key"); key != "" { privKey, err := crypto.HexToECDSA(key) if err != nil { diff --git a/cmd/puppeth/genesis.go b/cmd/puppeth/genesis.go index 1974a94aa2..5f39a889d1 100644 --- a/cmd/puppeth/genesis.go +++ b/cmd/puppeth/genesis.go @@ -103,8 +103,8 @@ func newCppEthereumGenesisSpec(network string, genesis *core.Genesis) (*cppEther spec.Params.ByzantiumForkBlock = (hexutil.Uint64)(genesis.Config.ByzantiumBlock.Uint64()) spec.Params.ConstantinopleForkBlock = (hexutil.Uint64)(math.MaxUint64) - spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64()) - spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64()) + spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64()) + spec.Params.ChainID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64()) spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize) spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit) @@ -284,7 +284,7 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize) spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit) spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor) - spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64()) + spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainID.Uint64()) spec.Params.MaxCodeSize = params.MaxCodeSize spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64() spec.Params.EIP98Transition = math.MaxUint64 diff --git a/cmd/puppeth/module_dashboard.go b/cmd/puppeth/module_dashboard.go index 4f9e88899a..a517623381 100644 --- a/cmd/puppeth/module_dashboard.go +++ b/cmd/puppeth/module_dashboard.go @@ -609,7 +609,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da } template.Must(template.New("").Parse(dashboardContent)).Execute(indexfile, map[string]interface{}{ "Network": network, - "NetworkID": conf.Genesis.Config.ChainId, + "NetworkID": conf.Genesis.Config.ChainID, "NetworkTitle": strings.Title(network), "EthstatsPage": config.ethstats, "ExplorerPage": config.explorer, diff --git a/cmd/puppeth/wizard_faucet.go b/cmd/puppeth/wizard_faucet.go index 9a429bc96d..6f08408947 100644 --- a/cmd/puppeth/wizard_faucet.go +++ b/cmd/puppeth/wizard_faucet.go @@ -49,7 +49,7 @@ func (w *wizard) deployFaucet() { existed := err == nil infos.node.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ") - infos.node.network = w.conf.Genesis.Config.ChainId.Int64() + infos.node.network = w.conf.Genesis.Config.ChainID.Int64() // Figure out which port to listen on fmt.Println() diff --git a/cmd/puppeth/wizard_genesis.go b/cmd/puppeth/wizard_genesis.go index f255ef6e65..6c4cd571fb 100644 --- a/cmd/puppeth/wizard_genesis.go +++ b/cmd/puppeth/wizard_genesis.go @@ -121,7 +121,7 @@ func (w *wizard) makeGenesis() { // Query the user for some custom extras fmt.Println() fmt.Println("Specify your chain/network ID if you want an explicit one (default = random)") - genesis.Config.ChainId = new(big.Int).SetUint64(uint64(w.readDefaultInt(rand.Intn(65536)))) + genesis.Config.ChainID = new(big.Int).SetUint64(uint64(w.readDefaultInt(rand.Intn(65536)))) // All done, store the genesis and flush to disk log.Info("Configured new genesis block") diff --git a/cmd/puppeth/wizard_netstats.go b/cmd/puppeth/wizard_netstats.go index 90bf7ae3c8..a307c5ee3a 100644 --- a/cmd/puppeth/wizard_netstats.go +++ b/cmd/puppeth/wizard_netstats.go @@ -276,13 +276,3 @@ func (stats serverStats) render() { } table.Render() } - -// protips contains a collection of network infos to report pro-tips -// based on. -type protips struct { - genesis string - network int64 - bootFull []string - bootLight []string - ethstats string -} diff --git a/cmd/puppeth/wizard_node.go b/cmd/puppeth/wizard_node.go index a60948bc67..9b22da4460 100644 --- a/cmd/puppeth/wizard_node.go +++ b/cmd/puppeth/wizard_node.go @@ -56,7 +56,7 @@ func (w *wizard) deployNode(boot bool) { existed := err == nil infos.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ") - infos.network = w.conf.Genesis.Config.ChainId.Int64() + infos.network = w.conf.Genesis.Config.ChainID.Int64() // Figure out where the user wants to store the persistent data fmt.Println() @@ -107,7 +107,7 @@ func (w *wizard) deployNode(boot bool) { // Ethash based miners only need an etherbase to mine against fmt.Println() if infos.etherbase == "" { - fmt.Printf("What address should the miner user?\n") + fmt.Printf("What address should the miner use?\n") for { if address := w.readAddress(); address != nil { infos.etherbase = address.Hex() @@ -115,7 +115,7 @@ func (w *wizard) deployNode(boot bool) { } } } else { - fmt.Printf("What address should the miner user? (default = %s)\n", infos.etherbase) + fmt.Printf("What address should the miner use? (default = %s)\n", infos.etherbase) infos.etherbase = w.readDefaultAddress(common.HexToAddress(infos.etherbase)).Hex() } } else if w.conf.Genesis.Config.Clique != nil { diff --git a/cmd/puppeth/wizard_wallet.go b/cmd/puppeth/wizard_wallet.go index 933cd9ae59..7624d11e20 100644 --- a/cmd/puppeth/wizard_wallet.go +++ b/cmd/puppeth/wizard_wallet.go @@ -52,7 +52,7 @@ func (w *wizard) deployWallet() { existed := err == nil infos.genesis, _ = json.MarshalIndent(w.conf.Genesis, "", " ") - infos.network = w.conf.Genesis.Config.ChainId.Int64() + infos.network = w.conf.Genesis.Config.ChainID.Int64() // Figure out which port to listen on fmt.Println() diff --git a/cmd/swarm/config.go b/cmd/swarm/config.go index adac772bab..64c37a0b5e 100644 --- a/cmd/swarm/config.go +++ b/cmd/swarm/config.go @@ -24,6 +24,7 @@ import ( "reflect" "strconv" "strings" + "time" "unicode" cli "gopkg.in/urfave/cli.v1" @@ -37,6 +38,8 @@ import ( bzzapi "github.com/ethereum/go-ethereum/swarm/api" ) +const SWARM_VERSION = "0.3" + var ( //flag definition for the dumpconfig command DumpConfigCommand = cli.Command{ @@ -58,19 +61,25 @@ var ( //constants for environment variables const ( - SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR" - SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT" - SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR" - SWARM_ENV_PORT = "SWARM_PORT" - SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID" - SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE" - SWARM_ENV_SWAP_API = "SWARM_SWAP_API" - SWARM_ENV_SYNC_ENABLE = "SWARM_SYNC_ENABLE" - SWARM_ENV_ENS_API = "SWARM_ENS_API" - SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR" - SWARM_ENV_CORS = "SWARM_CORS" - SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES" - GETH_ENV_DATADIR = "GETH_DATADIR" + SWARM_ENV_CHEQUEBOOK_ADDR = "SWARM_CHEQUEBOOK_ADDR" + SWARM_ENV_ACCOUNT = "SWARM_ACCOUNT" + SWARM_ENV_LISTEN_ADDR = "SWARM_LISTEN_ADDR" + SWARM_ENV_PORT = "SWARM_PORT" + SWARM_ENV_NETWORK_ID = "SWARM_NETWORK_ID" + SWARM_ENV_SWAP_ENABLE = "SWARM_SWAP_ENABLE" + SWARM_ENV_SWAP_API = "SWARM_SWAP_API" + SWARM_ENV_SYNC_DISABLE = "SWARM_SYNC_DISABLE" + SWARM_ENV_SYNC_UPDATE_DELAY = "SWARM_ENV_SYNC_UPDATE_DELAY" + SWARM_ENV_DELIVERY_SKIP_CHECK = "SWARM_DELIVERY_SKIP_CHECK" + SWARM_ENV_ENS_API = "SWARM_ENS_API" + SWARM_ENV_ENS_ADDR = "SWARM_ENS_ADDR" + SWARM_ENV_CORS = "SWARM_CORS" + SWARM_ENV_BOOTNODES = "SWARM_BOOTNODES" + SWARM_ENV_PSS_ENABLE = "SWARM_PSS_ENABLE" + SWARM_ENV_STORE_PATH = "SWARM_STORE_PATH" + SWARM_ENV_STORE_CAPACITY = "SWARM_STORE_CAPACITY" + SWARM_ENV_STORE_CACHE_CAPACITY = "SWARM_STORE_CACHE_CAPACITY" + GETH_ENV_DATADIR = "GETH_DATADIR" ) // These settings ensure that TOML keys use the same names as Go struct fields. @@ -92,10 +101,8 @@ var tomlSettings = toml.Config{ //before booting the swarm node, build the configuration func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) { - //check for deprecated flags - checkDeprecated(ctx) //start by creating a default config - config = bzzapi.NewDefaultConfig() + config = bzzapi.NewConfig() //first load settings from config file (if provided) config, err = configFileOverride(config, ctx) if err != nil { @@ -168,7 +175,7 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con if networkid := ctx.GlobalString(SwarmNetworkIdFlag.Name); networkid != "" { if id, _ := strconv.Atoi(networkid); id != 0 { - currentConfig.NetworkId = uint64(id) + currentConfig.NetworkID = uint64(id) } } @@ -191,12 +198,20 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con currentConfig.SwapEnabled = true } - if ctx.GlobalIsSet(SwarmSyncEnabledFlag.Name) { - currentConfig.SyncEnabled = true + if ctx.GlobalIsSet(SwarmSyncDisabledFlag.Name) { + currentConfig.SyncEnabled = false } - currentConfig.SwapApi = ctx.GlobalString(SwarmSwapAPIFlag.Name) - if currentConfig.SwapEnabled && currentConfig.SwapApi == "" { + if d := ctx.GlobalDuration(SwarmSyncUpdateDelay.Name); d > 0 { + currentConfig.SyncUpdateDelay = d + } + + if ctx.GlobalIsSet(SwarmDeliverySkipCheckFlag.Name) { + currentConfig.DeliverySkipCheck = true + } + + currentConfig.SwapAPI = ctx.GlobalString(SwarmSwapAPIFlag.Name) + if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" { utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API) } @@ -209,10 +224,6 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con currentConfig.EnsAPIs = ensAPIs } - if ensaddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name); ensaddr != "" { - currentConfig.EnsRoot = common.HexToAddress(ensaddr) - } - if cors := ctx.GlobalString(CorsStringFlag.Name); cors != "" { currentConfig.Cors = cors } @@ -221,6 +232,18 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con currentConfig.BootNodes = ctx.GlobalString(utils.BootnodesFlag.Name) } + if storePath := ctx.GlobalString(SwarmStorePath.Name); storePath != "" { + currentConfig.LocalStoreParams.ChunkDbPath = storePath + } + + if storeCapacity := ctx.GlobalUint64(SwarmStoreCapacity.Name); storeCapacity != 0 { + currentConfig.LocalStoreParams.DbCapacity = storeCapacity + } + + if storeCacheCapacity := ctx.GlobalUint(SwarmStoreCacheCapacity.Name); storeCacheCapacity != 0 { + currentConfig.LocalStoreParams.CacheCapacity = storeCacheCapacity + } + return currentConfig } @@ -239,7 +262,7 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) { if networkid := os.Getenv(SWARM_ENV_NETWORK_ID); networkid != "" { if id, _ := strconv.Atoi(networkid); id != 0 { - currentConfig.NetworkId = uint64(id) + currentConfig.NetworkID = uint64(id) } } @@ -262,17 +285,29 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) { } } - if syncenable := os.Getenv(SWARM_ENV_SYNC_ENABLE); syncenable != "" { - if sync, err := strconv.ParseBool(syncenable); err != nil { - currentConfig.SyncEnabled = sync + if syncdisable := os.Getenv(SWARM_ENV_SYNC_DISABLE); syncdisable != "" { + if sync, err := strconv.ParseBool(syncdisable); err != nil { + currentConfig.SyncEnabled = !sync + } + } + + if v := os.Getenv(SWARM_ENV_DELIVERY_SKIP_CHECK); v != "" { + if skipCheck, err := strconv.ParseBool(v); err != nil { + currentConfig.DeliverySkipCheck = skipCheck + } + } + + if v := os.Getenv(SWARM_ENV_SYNC_UPDATE_DELAY); v != "" { + if d, err := time.ParseDuration(v); err != nil { + currentConfig.SyncUpdateDelay = d } } if swapapi := os.Getenv(SWARM_ENV_SWAP_API); swapapi != "" { - currentConfig.SwapApi = swapapi + currentConfig.SwapAPI = swapapi } - if currentConfig.SwapEnabled && currentConfig.SwapApi == "" { + if currentConfig.SwapEnabled && currentConfig.SwapAPI == "" { utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API) } @@ -312,18 +347,6 @@ func dumpConfig(ctx *cli.Context) error { return nil } -//deprecated flags checked here -func checkDeprecated(ctx *cli.Context) { - // exit if the deprecated --ethapi flag is set - if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" { - utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.") - } - // warn if --ens-api flag is set - if ctx.GlobalString(DeprecatedEnsAddrFlag.Name) != "" { - log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.") - } -} - //validate configuration parameters func validateConfig(cfg *bzzapi.Config) (err error) { for _, ensAPI := range cfg.EnsAPIs { diff --git a/cmd/swarm/config_test.go b/cmd/swarm/config_test.go index 9bf584f50c..d5011e3a70 100644 --- a/cmd/swarm/config_test.go +++ b/cmd/swarm/config_test.go @@ -34,7 +34,7 @@ import ( func TestDumpConfig(t *testing.T) { swarm := runSwarm(t, "dumpconfig") - defaultConf := api.NewDefaultConfig() + defaultConf := api.NewConfig() out, err := tomlSettings.Marshal(&defaultConf) if err != nil { t.Fatal(err) @@ -43,7 +43,7 @@ func TestDumpConfig(t *testing.T) { swarm.ExpectExit() } -func TestFailsSwapEnabledNoSwapApi(t *testing.T) { +func TestConfigFailsSwapEnabledNoSwapApi(t *testing.T) { flags := []string{ fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545", @@ -55,7 +55,7 @@ func TestFailsSwapEnabledNoSwapApi(t *testing.T) { swarm.ExpectExit() } -func TestFailsNoBzzAccount(t *testing.T) { +func TestConfigFailsNoBzzAccount(t *testing.T) { flags := []string{ fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", fmt.Sprintf("--%s", SwarmPortFlag.Name), "54545", @@ -66,7 +66,7 @@ func TestFailsNoBzzAccount(t *testing.T) { swarm.ExpectExit() } -func TestCmdLineOverrides(t *testing.T) { +func TestConfigCmdLineOverrides(t *testing.T) { dir, err := ioutil.TempDir("", "bzztest") if err != nil { t.Fatal(err) @@ -85,9 +85,10 @@ func TestCmdLineOverrides(t *testing.T) { flags := []string{ fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "42", fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort, - fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name), + fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name), fmt.Sprintf("--%s", CorsStringFlag.Name), "*", fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(), + fmt.Sprintf("--%s", SwarmDeliverySkipCheckFlag.Name), fmt.Sprintf("--%s", EnsAPIFlag.Name), "", "--datadir", dir, "--ipcpath", conf.IPCPath, @@ -120,12 +121,16 @@ func TestCmdLineOverrides(t *testing.T) { t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) } - if info.NetworkId != 42 { - t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkId) + if info.NetworkID != 42 { + t.Fatalf("Expected network ID to be %d, got %d", 42, info.NetworkID) } - if !info.SyncEnabled { - t.Fatal("Expected Sync to be enabled, but is false") + if info.SyncEnabled { + t.Fatal("Expected Sync to be disabled, but is true") + } + + if !info.DeliverySkipCheck { + t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not") } if info.Cors != "*" { @@ -135,7 +140,7 @@ func TestCmdLineOverrides(t *testing.T) { node.Shutdown() } -func TestFileOverrides(t *testing.T) { +func TestConfigFileOverrides(t *testing.T) { // assign ports httpPort, err := assignTCPPort() @@ -145,16 +150,16 @@ func TestFileOverrides(t *testing.T) { //create a config file //first, create a default conf - defaultConf := api.NewDefaultConfig() + defaultConf := api.NewConfig() //change some values in order to test if they have been loaded - defaultConf.SyncEnabled = true - defaultConf.NetworkId = 54 + defaultConf.SyncEnabled = false + defaultConf.DeliverySkipCheck = true + defaultConf.NetworkID = 54 defaultConf.Port = httpPort - defaultConf.StoreParams.DbCapacity = 9000000 - defaultConf.ChunkerParams.Branches = 64 - defaultConf.HiveParams.CallInterval = 6000000000 + defaultConf.DbCapacity = 9000000 + defaultConf.HiveParams.KeepAliveInterval = 6000000000 defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second - defaultConf.SyncParams.KeyBufferSize = 512 + //defaultConf.SyncParams.KeyBufferSize = 512 //create a TOML string out, err := tomlSettings.Marshal(&defaultConf) if err != nil { @@ -215,38 +220,38 @@ func TestFileOverrides(t *testing.T) { t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) } - if info.NetworkId != 54 { - t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId) + if info.NetworkID != 54 { + t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID) } - if !info.SyncEnabled { - t.Fatal("Expected Sync to be enabled, but is false") + if info.SyncEnabled { + t.Fatal("Expected Sync to be disabled, but is true") } - if info.StoreParams.DbCapacity != 9000000 { - t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId) + if !info.DeliverySkipCheck { + t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not") } - if info.ChunkerParams.Branches != 64 { - t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches) + if info.DbCapacity != 9000000 { + t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkID) } - if info.HiveParams.CallInterval != 6000000000 { - t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval)) + if info.HiveParams.KeepAliveInterval != 6000000000 { + t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval)) } if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second { t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval) } - if info.SyncParams.KeyBufferSize != 512 { - t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize) - } + // if info.SyncParams.KeyBufferSize != 512 { + // t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize) + // } node.Shutdown() } -func TestEnvVars(t *testing.T) { +func TestConfigEnvVars(t *testing.T) { // assign ports httpPort, err := assignTCPPort() if err != nil { @@ -257,7 +262,8 @@ func TestEnvVars(t *testing.T) { envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmPortFlag.EnvVar, httpPort)) envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmNetworkIdFlag.EnvVar, "999")) envVars = append(envVars, fmt.Sprintf("%s=%s", CorsStringFlag.EnvVar, "*")) - envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncEnabledFlag.EnvVar, "true")) + envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmSyncDisabledFlag.EnvVar, "true")) + envVars = append(envVars, fmt.Sprintf("%s=%s", SwarmDeliverySkipCheckFlag.EnvVar, "true")) dir, err := ioutil.TempDir("", "bzztest") if err != nil { @@ -326,23 +332,27 @@ func TestEnvVars(t *testing.T) { t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) } - if info.NetworkId != 999 { - t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkId) + if info.NetworkID != 999 { + t.Fatalf("Expected network ID to be %d, got %d", 999, info.NetworkID) } if info.Cors != "*" { t.Fatalf("Expected Cors flag to be set to %s, got %s", "*", info.Cors) } - if !info.SyncEnabled { - t.Fatal("Expected Sync to be enabled, but is false") + if info.SyncEnabled { + t.Fatal("Expected Sync to be disabled, but is true") + } + + if !info.DeliverySkipCheck { + t.Fatal("Expected DeliverySkipCheck to be enabled, but it is not") } node.Shutdown() cmd.Process.Kill() } -func TestCmdLineOverridesFile(t *testing.T) { +func TestConfigCmdLineOverridesFile(t *testing.T) { // assign ports httpPort, err := assignTCPPort() @@ -352,26 +362,27 @@ func TestCmdLineOverridesFile(t *testing.T) { //create a config file //first, create a default conf - defaultConf := api.NewDefaultConfig() + defaultConf := api.NewConfig() //change some values in order to test if they have been loaded - defaultConf.SyncEnabled = false - defaultConf.NetworkId = 54 + defaultConf.SyncEnabled = true + defaultConf.NetworkID = 54 defaultConf.Port = "8588" - defaultConf.StoreParams.DbCapacity = 9000000 - defaultConf.ChunkerParams.Branches = 64 - defaultConf.HiveParams.CallInterval = 6000000000 + defaultConf.DbCapacity = 9000000 + defaultConf.HiveParams.KeepAliveInterval = 6000000000 defaultConf.Swap.Params.Strategy.AutoCashInterval = 600 * time.Second - defaultConf.SyncParams.KeyBufferSize = 512 + //defaultConf.SyncParams.KeyBufferSize = 512 //create a TOML file out, err := tomlSettings.Marshal(&defaultConf) if err != nil { t.Fatalf("Error creating TOML file in TestFileOverride: %v", err) } //write file - f, err := ioutil.TempFile("", "testconfig.toml") + fname := "testconfig.toml" + f, err := ioutil.TempFile("", fname) if err != nil { t.Fatalf("Error writing TOML file in TestFileOverride: %v", err) } + defer os.Remove(fname) //write file _, err = f.WriteString(string(out)) if err != nil { @@ -392,7 +403,7 @@ func TestCmdLineOverridesFile(t *testing.T) { flags := []string{ fmt.Sprintf("--%s", SwarmNetworkIdFlag.Name), "77", fmt.Sprintf("--%s", SwarmPortFlag.Name), httpPort, - fmt.Sprintf("--%s", SwarmSyncEnabledFlag.Name), + fmt.Sprintf("--%s", SwarmSyncDisabledFlag.Name), fmt.Sprintf("--%s", SwarmTomlConfigPathFlag.Name), f.Name(), fmt.Sprintf("--%s", SwarmAccountFlag.Name), account.Address.String(), "--ens-api", "", @@ -427,33 +438,29 @@ func TestCmdLineOverridesFile(t *testing.T) { t.Fatalf("Expected port to be %s, got %s", httpPort, info.Port) } - if info.NetworkId != expectNetworkId { - t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkId) + if info.NetworkID != expectNetworkId { + t.Fatalf("Expected network ID to be %d, got %d", expectNetworkId, info.NetworkID) } - if !info.SyncEnabled { - t.Fatal("Expected Sync to be enabled, but is false") + if info.SyncEnabled { + t.Fatal("Expected Sync to be disabled, but is true") } - if info.StoreParams.DbCapacity != 9000000 { - t.Fatalf("Expected network ID to be %d, got %d", 54, info.NetworkId) + if info.LocalStoreParams.DbCapacity != 9000000 { + t.Fatalf("Expected Capacity to be %d, got %d", 9000000, info.LocalStoreParams.DbCapacity) } - if info.ChunkerParams.Branches != 64 { - t.Fatalf("Expected chunker params branches to be %d, got %d", 64, info.ChunkerParams.Branches) - } - - if info.HiveParams.CallInterval != 6000000000 { - t.Fatalf("Expected HiveParams CallInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.CallInterval)) + if info.HiveParams.KeepAliveInterval != 6000000000 { + t.Fatalf("Expected HiveParams KeepAliveInterval to be %d, got %d", uint64(6000000000), uint64(info.HiveParams.KeepAliveInterval)) } if info.Swap.Params.Strategy.AutoCashInterval != 600*time.Second { t.Fatalf("Expected SwapParams AutoCashInterval to be %ds, got %d", 600, info.Swap.Params.Strategy.AutoCashInterval) } - if info.SyncParams.KeyBufferSize != 512 { - t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize) - } + // if info.SyncParams.KeyBufferSize != 512 { + // t.Fatalf("Expected info.SyncParams.KeyBufferSize to be %d, got %d", 512, info.SyncParams.KeyBufferSize) + // } node.Shutdown() } diff --git a/cmd/swarm/db.go b/cmd/swarm/db.go index dfd2d069b9..fe03f2d160 100644 --- a/cmd/swarm/db.go +++ b/cmd/swarm/db.go @@ -23,6 +23,7 @@ import ( "path/filepath" "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/swarm/storage" "gopkg.in/urfave/cli.v1" @@ -30,11 +31,11 @@ import ( func dbExport(ctx *cli.Context) { args := ctx.Args() - if len(args) != 2 { - utils.Fatalf("invalid arguments, please specify both (path to a local chunk database) and (path to write the tar archive to, - for stdout)") + if len(args) != 3 { + utils.Fatalf("invalid arguments, please specify both (path to a local chunk database), (path to write the tar archive to, - for stdout) and the base key") } - store, err := openDbStore(args[0]) + store, err := openLDBStore(args[0], common.Hex2Bytes(args[2])) if err != nil { utils.Fatalf("error opening local chunk database: %s", err) } @@ -62,11 +63,11 @@ func dbExport(ctx *cli.Context) { func dbImport(ctx *cli.Context) { args := ctx.Args() - if len(args) != 2 { - utils.Fatalf("invalid arguments, please specify both (path to a local chunk database) and (path to read the tar archive from, - for stdin)") + if len(args) != 3 { + utils.Fatalf("invalid arguments, please specify both (path to a local chunk database), (path to read the tar archive from, - for stdin) and the base key") } - store, err := openDbStore(args[0]) + store, err := openLDBStore(args[0], common.Hex2Bytes(args[2])) if err != nil { utils.Fatalf("error opening local chunk database: %s", err) } @@ -94,11 +95,11 @@ func dbImport(ctx *cli.Context) { func dbClean(ctx *cli.Context) { args := ctx.Args() - if len(args) != 1 { - utils.Fatalf("invalid arguments, please specify (path to a local chunk database)") + if len(args) != 2 { + utils.Fatalf("invalid arguments, please specify (path to a local chunk database) and the base key") } - store, err := openDbStore(args[0]) + store, err := openLDBStore(args[0], common.Hex2Bytes(args[1])) if err != nil { utils.Fatalf("error opening local chunk database: %s", err) } @@ -107,10 +108,13 @@ func dbClean(ctx *cli.Context) { store.Cleanup() } -func openDbStore(path string) (*storage.DbStore, error) { +func openLDBStore(path string, basekey []byte) (*storage.LDBStore, error) { if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil { return nil, fmt.Errorf("invalid chunkdb path: %s", err) } - hash := storage.MakeHashFunc("SHA3") - return storage.NewDbStore(path, hash, 10000000, 0) + + storeparams := storage.NewDefaultStoreParams() + ldbparams := storage.NewLDBStoreParams(storeparams, path) + ldbparams.BaseKey = basekey + return storage.NewLDBStore(ldbparams) } diff --git a/cmd/swarm/download.go b/cmd/swarm/download.go new file mode 100644 index 0000000000..c2418f744c --- /dev/null +++ b/cmd/swarm/download.go @@ -0,0 +1,85 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . +package main + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/api" + swarm "github.com/ethereum/go-ethereum/swarm/api/client" + "gopkg.in/urfave/cli.v1" +) + +func download(ctx *cli.Context) { + log.Debug("downloading content using swarm down") + args := ctx.Args() + dest := "." + + switch len(args) { + case 0: + utils.Fatalf("Usage: swarm down [options] []") + case 1: + log.Trace(fmt.Sprintf("swarm down: no destination path - assuming working dir")) + default: + log.Trace(fmt.Sprintf("destination path arg: %s", args[1])) + if absDest, err := filepath.Abs(args[1]); err == nil { + dest = absDest + } else { + utils.Fatalf("could not get download path: %v", err) + } + } + + var ( + bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") + isRecursive = ctx.Bool(SwarmRecursiveFlag.Name) + client = swarm.NewClient(bzzapi) + ) + + if fi, err := os.Stat(dest); err == nil { + if isRecursive && !fi.Mode().IsDir() { + utils.Fatalf("destination path is not a directory!") + } + } else { + if !os.IsNotExist(err) { + utils.Fatalf("could not stat path: %v", err) + } + } + + uri, err := api.Parse(args[0]) + if err != nil { + utils.Fatalf("could not parse uri argument: %v", err) + } + + // assume behaviour according to --recursive switch + if isRecursive { + if err := client.DownloadDirectory(uri.Addr, uri.Path, dest); err != nil { + utils.Fatalf("encoutered an error while downloading directory: %v", err) + } + } else { + // we are downloading a file + log.Debug(fmt.Sprintf("downloading file/path from a manifest. hash: %s, path:%s", uri.Addr, uri.Path)) + + err := client.DownloadFile(uri.Addr, uri.Path, dest) + if err != nil { + utils.Fatalf("could not download %s from given address: %s. error: %v", uri.Path, uri.Addr, err) + } + } +} diff --git a/cmd/swarm/export_test.go b/cmd/swarm/export_test.go new file mode 100644 index 0000000000..525538ad75 --- /dev/null +++ b/cmd/swarm/export_test.go @@ -0,0 +1,139 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "io" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + + "github.com/ethereum/go-ethereum/swarm" +) + +// TestCLISwarmExportImport perform the following test: +// 1. runs swarm node +// 2. uploads a random file +// 3. runs an export of the local datastore +// 4. runs a second swarm node +// 5. imports the exported datastore +// 6. fetches the uploaded random file from the second node +func TestCLISwarmExportImport(t *testing.T) { + cluster := newTestCluster(t, 1) + + // generate random 10mb file + f, cleanup := generateRandomFile(t, 10000000) + defer cleanup() + + // upload the file with 'swarm up' and expect a hash + up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", f.Name()) + _, matches := up.ExpectRegexp(`[a-f\d]{64}`) + up.ExpectExit() + hash := matches[0] + + var info swarm.Info + if err := cluster.Nodes[0].Client.Call(&info, "bzz_info"); err != nil { + t.Fatal(err) + } + + cluster.Stop() + defer cluster.Cleanup() + + // generate an export.tar + exportCmd := runSwarm(t, "db", "export", info.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info.BzzKey, "0x")) + exportCmd.ExpectExit() + + // start second cluster + cluster2 := newTestCluster(t, 1) + + var info2 swarm.Info + if err := cluster2.Nodes[0].Client.Call(&info2, "bzz_info"); err != nil { + t.Fatal(err) + } + + // stop second cluster, so that we close LevelDB + cluster2.Stop() + defer cluster2.Cleanup() + + // import the export.tar + importCmd := runSwarm(t, "db", "import", info2.Path+"/chunks", info.Path+"/export.tar", strings.TrimPrefix(info2.BzzKey, "0x")) + importCmd.ExpectExit() + + // spin second cluster back up + cluster2.StartExistingNodes(t, 1, strings.TrimPrefix(info2.BzzAccount, "0x")) + + // try to fetch imported file + res, err := http.Get(cluster2.Nodes[0].URL + "/bzz:/" + hash) + if err != nil { + t.Fatal(err) + } + + if res.StatusCode != 200 { + t.Fatalf("expected HTTP status %d, got %s", 200, res.Status) + } + + // compare downloaded file with the generated random file + mustEqualFiles(t, f, res.Body) +} + +func mustEqualFiles(t *testing.T, up io.Reader, down io.Reader) { + h := md5.New() + upLen, err := io.Copy(h, up) + if err != nil { + t.Fatal(err) + } + upHash := h.Sum(nil) + h.Reset() + downLen, err := io.Copy(h, down) + if err != nil { + t.Fatal(err) + } + downHash := h.Sum(nil) + + if !bytes.Equal(upHash, downHash) || upLen != downLen { + t.Fatalf("downloaded imported file md5=%x (length %v) is not the same as the generated one mp5=%x (length %v)", downHash, downLen, upHash, upLen) + } +} + +func generateRandomFile(t *testing.T, size int) (f *os.File, teardown func()) { + // create a tmp file + tmp, err := ioutil.TempFile("", "swarm-test") + if err != nil { + t.Fatal(err) + } + + // callback for tmp file cleanup + teardown = func() { + tmp.Close() + os.Remove(tmp.Name()) + } + + // write 10mb random data to file + buf := make([]byte, 10000000) + _, err = rand.Read(buf) + if err != nil { + t.Fatal(err) + } + ioutil.WriteFile(tmp.Name(), buf, 0755) + + return tmp, teardown +} diff --git a/cmd/swarm/fs.go b/cmd/swarm/fs.go new file mode 100644 index 0000000000..0124586cfe --- /dev/null +++ b/cmd/swarm/fs.go @@ -0,0 +1,127 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "context" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/swarm/fuse" + "gopkg.in/urfave/cli.v1" +) + +func mount(cliContext *cli.Context) { + args := cliContext.Args() + if len(args) < 2 { + utils.Fatalf("Usage: swarm fs mount --ipcpath ") + } + + client, err := dialRPC(cliContext) + if err != nil { + utils.Fatalf("had an error dailing to RPC endpoint: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + mf := &fuse.MountInfo{} + mountPoint, err := filepath.Abs(filepath.Clean(args[1])) + if err != nil { + utils.Fatalf("error expanding path for mount point: %v", err) + } + err = client.CallContext(ctx, mf, "swarmfs_mount", args[0], mountPoint) + if err != nil { + utils.Fatalf("had an error calling the RPC endpoint while mounting: %v", err) + } +} + +func unmount(cliContext *cli.Context) { + args := cliContext.Args() + + if len(args) < 1 { + utils.Fatalf("Usage: swarm fs unmount --ipcpath ") + } + client, err := dialRPC(cliContext) + if err != nil { + utils.Fatalf("had an error dailing to RPC endpoint: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + mf := fuse.MountInfo{} + err = client.CallContext(ctx, &mf, "swarmfs_unmount", args[0]) + if err != nil { + utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err) + } + fmt.Printf("%s\n", mf.LatestManifest) //print the latest manifest hash for user reference +} + +func listMounts(cliContext *cli.Context) { + client, err := dialRPC(cliContext) + if err != nil { + utils.Fatalf("had an error dailing to RPC endpoint: %v", err) + } + defer client.Close() + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + + mf := []fuse.MountInfo{} + err = client.CallContext(ctx, &mf, "swarmfs_listmounts") + if err != nil { + utils.Fatalf("encountered an error calling the RPC endpoint while unmounting: %v", err) + } + if len(mf) == 0 { + fmt.Print("Could not found any swarmfs mounts. Please make sure you've specified the correct RPC endpoint\n") + } else { + fmt.Printf("Found %d swarmfs mount(s):\n", len(mf)) + for i, mountInfo := range mf { + fmt.Printf("%d:\n", i) + fmt.Printf("\tMount point: %s\n", mountInfo.MountPoint) + fmt.Printf("\tLatest Manifest: %s\n", mountInfo.LatestManifest) + fmt.Printf("\tStart Manifest: %s\n", mountInfo.StartManifest) + } + } +} + +func dialRPC(ctx *cli.Context) (*rpc.Client, error) { + var endpoint string + + if ctx.IsSet(utils.IPCPathFlag.Name) { + endpoint = ctx.String(utils.IPCPathFlag.Name) + } else { + utils.Fatalf("swarm ipc endpoint not specified") + } + + if endpoint == "" { + endpoint = node.DefaultIPCEndpoint(clientIdentifier) + } else if strings.HasPrefix(endpoint, "rpc:") || strings.HasPrefix(endpoint, "ipc:") { + // Backwards compatibility with geth < 1.5 which required + // these prefixes. + endpoint = endpoint[4:] + } + return rpc.Dial(endpoint) +} diff --git a/cmd/swarm/fs_test.go b/cmd/swarm/fs_test.go new file mode 100644 index 0000000000..25705c0a49 --- /dev/null +++ b/cmd/swarm/fs_test.go @@ -0,0 +1,234 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/log" + colorable "github.com/mattn/go-colorable" +) + +func init() { + log.PrintOrigins(true) + log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) +} + +type testFile struct { + filePath string + content string +} + +// TestCLISwarmFs is a high-level test of swarmfs +func TestCLISwarmFs(t *testing.T) { + cluster := newTestCluster(t, 3) + defer cluster.Shutdown() + + // create a tmp dir + mountPoint, err := ioutil.TempDir("", "swarm-test") + log.Debug("swarmfs cli test", "1st mount", mountPoint) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(mountPoint) + + handlingNode := cluster.Nodes[0] + mhash := doUploadEmptyDir(t, handlingNode) + log.Debug("swarmfs cli test: mounting first run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) + + mount := runSwarm(t, []string{ + "fs", + "mount", + "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath), + mhash, + mountPoint, + }...) + mount.ExpectExit() + + filesToAssert := []*testFile{} + + dirPath, err := createDirInDir(mountPoint, "testSubDir") + if err != nil { + t.Fatal(err) + } + dirPath2, err := createDirInDir(dirPath, "AnotherTestSubDir") + + dummyContent := "somerandomtestcontentthatshouldbeasserted" + dirs := []string{ + mountPoint, + dirPath, + dirPath2, + } + files := []string{"f1.tmp", "f2.tmp"} + for _, d := range dirs { + for _, entry := range files { + tFile, err := createTestFileInPath(d, entry, dummyContent) + if err != nil { + t.Fatal(err) + } + filesToAssert = append(filesToAssert, tFile) + } + } + if len(filesToAssert) != len(dirs)*len(files) { + t.Fatalf("should have %d files to assert now, got %d", len(dirs)*len(files), len(filesToAssert)) + } + hashRegexp := `[a-f\d]{64}` + log.Debug("swarmfs cli test: unmounting first run...", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) + + unmount := runSwarm(t, []string{ + "fs", + "unmount", + "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath), + mountPoint, + }...) + _, matches := unmount.ExpectRegexp(hashRegexp) + unmount.ExpectExit() + + hash := matches[0] + if hash == mhash { + t.Fatal("this should not be equal") + } + log.Debug("swarmfs cli test: asserting no files in mount point") + + //check that there's nothing in the mount folder + filesInDir, err := ioutil.ReadDir(mountPoint) + if err != nil { + t.Fatalf("had an error reading the directory: %v", err) + } + + if len(filesInDir) != 0 { + t.Fatal("there shouldn't be anything here") + } + + secondMountPoint, err := ioutil.TempDir("", "swarm-test") + log.Debug("swarmfs cli test", "2nd mount point at", secondMountPoint) + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(secondMountPoint) + + log.Debug("swarmfs cli test: remounting at second mount point", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) + + //remount, check files + newMount := runSwarm(t, []string{ + "fs", + "mount", + "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath), + hash, // the latest hash + secondMountPoint, + }...) + + newMount.ExpectExit() + time.Sleep(1 * time.Second) + + filesInDir, err = ioutil.ReadDir(secondMountPoint) + if err != nil { + t.Fatal(err) + } + + if len(filesInDir) == 0 { + t.Fatal("there should be something here") + } + + log.Debug("swarmfs cli test: traversing file tree to see it matches previous mount") + + for _, file := range filesToAssert { + file.filePath = strings.Replace(file.filePath, mountPoint, secondMountPoint, -1) + fileBytes, err := ioutil.ReadFile(file.filePath) + + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(fileBytes, bytes.NewBufferString(file.content).Bytes()) { + t.Fatal("this should be equal") + } + } + + log.Debug("swarmfs cli test: unmounting second run", "ipc path", filepath.Join(handlingNode.Dir, handlingNode.IpcPath)) + + unmountSec := runSwarm(t, []string{ + "fs", + "unmount", + "--ipcpath", filepath.Join(handlingNode.Dir, handlingNode.IpcPath), + secondMountPoint, + }...) + + _, matches = unmountSec.ExpectRegexp(hashRegexp) + unmountSec.ExpectExit() + + if matches[0] != hash { + t.Fatal("these should be equal - no changes made") + } +} + +func doUploadEmptyDir(t *testing.T, node *testNode) string { + // create a tmp dir + tmpDir, err := ioutil.TempDir("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDir) + + hashRegexp := `[a-f\d]{64}` + + flags := []string{ + "--bzzapi", node.URL, + "--recursive", + "up", + tmpDir} + + log.Info("swarmfs cli test: uploading dir with 'swarm up'") + up := runSwarm(t, flags...) + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + hash := matches[0] + log.Info("swarmfs cli test: dir uploaded", "hash", hash) + return hash +} + +func createDirInDir(createInDir string, dirToCreate string) (string, error) { + fullpath := filepath.Join(createInDir, dirToCreate) + err := os.MkdirAll(fullpath, 0777) + if err != nil { + return "", err + } + return fullpath, nil +} + +func createTestFileInPath(dir, filename, content string) (*testFile, error) { + tFile := &testFile{} + filePath := filepath.Join(dir, filename) + if file, err := os.Create(filePath); err == nil { + tFile.content = content + tFile.filePath = filePath + + _, err = io.WriteString(file, content) + if err != nil { + return nil, err + } + file.Close() + } + + return tFile, nil +} diff --git a/cmd/swarm/hash.go b/cmd/swarm/hash.go index 792e8d0d7a..c82456b3cd 100644 --- a/cmd/swarm/hash.go +++ b/cmd/swarm/hash.go @@ -38,11 +38,11 @@ func hash(ctx *cli.Context) { defer f.Close() stat, _ := f.Stat() - chunker := storage.NewTreeChunker(storage.NewChunkerParams()) - key, err := chunker.Split(f, stat.Size(), nil, nil, nil) + fileStore := storage.NewFileStore(storage.NewMapChunkStore(), storage.NewFileStoreParams()) + addr, _, err := fileStore.Store(f, stat.Size(), false) if err != nil { utils.Fatalf("%v\n", err) } else { - fmt.Printf("%v\n", key) + fmt.Printf("%v\n", addr) } } diff --git a/cmd/swarm/main.go b/cmd/swarm/main.go index 360020b77b..9877e9150d 100644 --- a/cmd/swarm/main.go +++ b/cmd/swarm/main.go @@ -34,7 +34,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/console" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" @@ -49,6 +48,22 @@ import ( ) const clientIdentifier = "swarm" +const helpTemplate = `NAME: +{{.HelpName}} - {{.Usage}} + +USAGE: +{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}{{if .Category}} + +CATEGORY: +{{.Category}}{{end}}{{if .Description}} + +DESCRIPTION: +{{.Description}}{{end}}{{if .VisibleFlags}} + +OPTIONS: +{{range .VisibleFlags}}{{.}} +{{end}}{{end}} +` var ( gitCommit string // Git SHA1 commit hash of the release (set via linker flags) @@ -87,10 +102,6 @@ var ( Usage: "Network identifier (integer, default 3=swarm testnet)", EnvVar: SWARM_ENV_NETWORK_ID, } - SwarmConfigPathFlag = cli.StringFlag{ - Name: "bzzconfig", - Usage: "DEPRECATED: please use --config path/to/TOML-file", - } SwarmSwapEnabledFlag = cli.BoolFlag{ Name: "swap", Usage: "Swarm SWAP enabled (default false)", @@ -101,10 +112,20 @@ var ( Usage: "URL of the Ethereum API provider to use to settle SWAP payments", EnvVar: SWARM_ENV_SWAP_API, } - SwarmSyncEnabledFlag = cli.BoolTFlag{ - Name: "sync", - Usage: "Swarm Syncing enabled (default true)", - EnvVar: SWARM_ENV_SYNC_ENABLE, + SwarmSyncDisabledFlag = cli.BoolTFlag{ + Name: "nosync", + Usage: "Disable swarm syncing", + EnvVar: SWARM_ENV_SYNC_DISABLE, + } + SwarmSyncUpdateDelay = cli.DurationFlag{ + Name: "sync-update-delay", + Usage: "Duration for sync subscriptions update after no new peers are added (default 15s)", + EnvVar: SWARM_ENV_SYNC_UPDATE_DELAY, + } + SwarmDeliverySkipCheckFlag = cli.BoolFlag{ + Name: "delivery-skip-check", + Usage: "Skip chunk delivery check (default false)", + EnvVar: SWARM_ENV_DELIVERY_SKIP_CHECK, } EnsAPIFlag = cli.StringSliceFlag{ Name: "ens-api", @@ -116,7 +137,7 @@ var ( Usage: "Swarm HTTP endpoint", Value: "http://127.0.0.1:8500", } - SwarmRecursiveUploadFlag = cli.BoolFlag{ + SwarmRecursiveFlag = cli.BoolFlag{ Name: "recursive", Usage: "Upload directories recursively", } @@ -136,20 +157,29 @@ var ( Name: "mime", Usage: "force mime type", } + SwarmEncryptedFlag = cli.BoolFlag{ + Name: "encrypt", + Usage: "use encrypted upload", + } CorsStringFlag = cli.StringFlag{ Name: "corsdomain", Usage: "Domain on which to send Access-Control-Allow-Origin header (multiple domains can be supplied separated by a ',')", EnvVar: SWARM_ENV_CORS, } - - // the following flags are deprecated and should be removed in the future - DeprecatedEthAPIFlag = cli.StringFlag{ - Name: "ethapi", - Usage: "DEPRECATED: please use --ens-api and --swap-api", + SwarmStorePath = cli.StringFlag{ + Name: "store.path", + Usage: "Path to leveldb chunk DB (default <$GETH_ENV_DIR>/swarm/bzz-<$BZZ_KEY>/chunks)", + EnvVar: SWARM_ENV_STORE_PATH, } - DeprecatedEnsAddrFlag = cli.StringFlag{ - Name: "ens-addr", - Usage: "DEPRECATED: ENS contract address, please use --ens-api with contract address according to its format", + SwarmStoreCapacity = cli.Uint64Flag{ + Name: "store.size", + Usage: "Number of chunks (5M is roughly 20-25GB) (default 5000000)", + EnvVar: SWARM_ENV_STORE_CAPACITY, + } + SwarmStoreCacheCapacity = cli.UintFlag{ + Name: "store.cache.size", + Usage: "Number of recent chunks cached in memory (default 5000)", + EnvVar: SWARM_ENV_STORE_CACHE_CAPACITY, } ) @@ -180,91 +210,130 @@ func init() { app.Copyright = "Copyright 2013-2016 The go-ethereum Authors" app.Commands = []cli.Command{ { - Action: version, - Name: "version", - Usage: "Print version numbers", - ArgsUsage: " ", - Description: ` -The output of this command is supposed to be machine-readable. -`, + Action: version, + CustomHelpTemplate: helpTemplate, + Name: "version", + Usage: "Print version numbers", + Description: "The output of this command is supposed to be machine-readable", }, { - Action: upload, - Name: "up", - Usage: "upload a file or directory to swarm using the HTTP API", - ArgsUsage: " ", - Description: ` -"upload a file or directory to swarm using the HTTP API and prints the root hash", -`, + Action: upload, + CustomHelpTemplate: helpTemplate, + Name: "up", + Usage: "uploads a file or directory to swarm using the HTTP API", + ArgsUsage: "", + Flags: []cli.Flag{SwarmEncryptedFlag}, + Description: "uploads a file or directory to swarm using the HTTP API and prints the root hash", }, { - Action: list, - Name: "ls", - Usage: "list files and directories contained in a manifest", - ArgsUsage: " []", - Description: ` -Lists files and directories contained in a manifest. -`, + Action: list, + CustomHelpTemplate: helpTemplate, + Name: "ls", + Usage: "list files and directories contained in a manifest", + ArgsUsage: " []", + Description: "Lists files and directories contained in a manifest", }, { - Action: hash, - Name: "hash", - Usage: "print the swarm hash of a file or directory", - ArgsUsage: " ", - Description: ` -Prints the swarm hash of file or directory. -`, + Action: hash, + CustomHelpTemplate: helpTemplate, + Name: "hash", + Usage: "print the swarm hash of a file or directory", + ArgsUsage: "", + Description: "Prints the swarm hash of file or directory", }, { - Name: "manifest", - Usage: "update a MANIFEST", - ArgsUsage: "manifest COMMAND", + Action: download, + Name: "down", + Flags: []cli.Flag{SwarmRecursiveFlag}, + Usage: "downloads a swarm manifest or a file inside a manifest", + ArgsUsage: " []", Description: ` -Updates a MANIFEST by adding/removing/updating the hash of a path. +Downloads a swarm bzz uri to the given dir. When no dir is provided, working directory is assumed. --recursive flag is expected when downloading a manifest with multiple entries. `, + }, + + { + Name: "manifest", + CustomHelpTemplate: helpTemplate, + Usage: "perform operations on swarm manifests", + ArgsUsage: "COMMAND", + Description: "Updates a MANIFEST by adding/removing/updating the hash of a path.\nCOMMAND could be: add, update, remove", Subcommands: []cli.Command{ { - Action: add, - Name: "add", - Usage: "add a new path to the manifest", - ArgsUsage: " []", - Description: ` -Adds a new path to the manifest -`, + Action: add, + CustomHelpTemplate: helpTemplate, + Name: "add", + Usage: "add a new path to the manifest", + ArgsUsage: " []", + Description: "Adds a new path to the manifest", }, { - Action: update, - Name: "update", - Usage: "update the hash for an already existing path in the manifest", - ArgsUsage: " []", - Description: ` -Update the hash for an already existing path in the manifest -`, + Action: update, + CustomHelpTemplate: helpTemplate, + Name: "update", + Usage: "update the hash for an already existing path in the manifest", + ArgsUsage: " []", + Description: "Update the hash for an already existing path in the manifest", }, { - Action: remove, - Name: "remove", - Usage: "removes a path from the manifest", - ArgsUsage: " ", - Description: ` -Removes a path from the manifest -`, + Action: remove, + CustomHelpTemplate: helpTemplate, + Name: "remove", + Usage: "removes a path from the manifest", + ArgsUsage: " ", + Description: "Removes a path from the manifest", }, }, }, { - Name: "db", - Usage: "manage the local chunk database", - ArgsUsage: "db COMMAND", - Description: ` -Manage the local chunk database. -`, + Name: "fs", + CustomHelpTemplate: helpTemplate, + Usage: "perform FUSE operations", + ArgsUsage: "fs COMMAND", + Description: "Performs FUSE operations by mounting/unmounting/listing mount points. This assumes you already have a Swarm node running locally. For all operation you must reference the correct path to bzzd.ipc in order to communicate with the node", Subcommands: []cli.Command{ { - Action: dbExport, - Name: "export", - Usage: "export a local chunk database as a tar archive (use - to send to stdout)", - ArgsUsage: " ", + Action: mount, + CustomHelpTemplate: helpTemplate, + Name: "mount", + Flags: []cli.Flag{utils.IPCPathFlag}, + Usage: "mount a swarm hash to a mount point", + ArgsUsage: "swarm fs mount --ipcpath ", + Description: "Mounts a Swarm manifest hash to a given mount point. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", + }, + { + Action: unmount, + CustomHelpTemplate: helpTemplate, + Name: "unmount", + Flags: []cli.Flag{utils.IPCPathFlag}, + Usage: "unmount a swarmfs mount", + ArgsUsage: "swarm fs unmount --ipcpath ", + Description: "Unmounts a swarmfs mount residing at . This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", + }, + { + Action: listMounts, + CustomHelpTemplate: helpTemplate, + Name: "list", + Flags: []cli.Flag{utils.IPCPathFlag}, + Usage: "list swarmfs mounts", + ArgsUsage: "swarm fs list --ipcpath ", + Description: "Lists all mounted swarmfs volumes. This assumes you already have a Swarm node running locally. You must reference the correct path to your bzzd.ipc file", + }, + }, + }, + { + Name: "db", + CustomHelpTemplate: helpTemplate, + Usage: "manage the local chunk database", + ArgsUsage: "db COMMAND", + Description: "Manage the local chunk database", + Subcommands: []cli.Command{ + { + Action: dbExport, + CustomHelpTemplate: helpTemplate, + Name: "export", + Usage: "export a local chunk database as a tar archive (use - to send to stdout)", + ArgsUsage: " ", Description: ` Export a local chunk database as a tar archive (use - to send to stdout). @@ -277,10 +346,11 @@ pv(1) tool to get a progress bar: `, }, { - Action: dbImport, - Name: "import", - Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)", - ArgsUsage: " ", + Action: dbImport, + CustomHelpTemplate: helpTemplate, + Name: "import", + Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)", + ArgsUsage: " ", Description: ` Import chunks from a tar archive into a local chunk database (use - to read from stdin). @@ -293,27 +363,16 @@ pv(1) tool to get a progress bar: `, }, { - Action: dbClean, - Name: "clean", - Usage: "remove corrupt entries from a local chunk database", - ArgsUsage: "", - Description: ` -Remove corrupt entries from a local chunk database. -`, + Action: dbClean, + CustomHelpTemplate: helpTemplate, + Name: "clean", + Usage: "remove corrupt entries from a local chunk database", + ArgsUsage: "", + Description: "Remove corrupt entries from a local chunk database", }, }, }, - { - Action: func(ctx *cli.Context) { - utils.Fatalf("ERROR: 'swarm cleandb' has been removed, please use 'swarm db clean'.") - }, - Name: "cleandb", - Usage: "DEPRECATED: use 'swarm db clean'", - ArgsUsage: " ", - Description: ` -DEPRECATED: use 'swarm db clean'. -`, - }, + // See config.go DumpConfigCommand, } @@ -339,10 +398,11 @@ DEPRECATED: use 'swarm db clean'. CorsStringFlag, EnsAPIFlag, SwarmTomlConfigPathFlag, - SwarmConfigPathFlag, SwarmSwapEnabledFlag, SwarmSwapAPIFlag, - SwarmSyncEnabledFlag, + SwarmSyncDisabledFlag, + SwarmSyncUpdateDelay, + SwarmDeliverySkipCheckFlag, SwarmListenAddrFlag, SwarmPortFlag, SwarmAccountFlag, @@ -350,15 +410,24 @@ DEPRECATED: use 'swarm db clean'. ChequebookAddrFlag, // upload flags SwarmApiFlag, - SwarmRecursiveUploadFlag, + SwarmRecursiveFlag, SwarmWantManifestFlag, SwarmUploadDefaultPath, SwarmUpFromStdinFlag, SwarmUploadMimeType, - //deprecated flags - DeprecatedEthAPIFlag, - DeprecatedEnsAddrFlag, + // storage flags + SwarmStorePath, + SwarmStoreCapacity, + SwarmStoreCacheCapacity, } + rpcFlags := []cli.Flag{ + utils.WSEnabledFlag, + utils.WSListenAddrFlag, + utils.WSPortFlag, + utils.WSApiFlag, + utils.WSAllowedOriginsFlag, + } + app.Flags = append(app.Flags, rpcFlags...) app.Flags = append(app.Flags, debug.Flags...) app.Flags = append(app.Flags, swarmmetrics.Flags...) app.Before = func(ctx *cli.Context) error { @@ -383,16 +452,12 @@ func main() { } func version(ctx *cli.Context) error { - fmt.Println(strings.Title(clientIdentifier)) - fmt.Println("Version:", params.Version) + fmt.Println("Version:", SWARM_VERSION) if gitCommit != "" { fmt.Println("Git Commit:", gitCommit) } - fmt.Println("Network Id:", ctx.GlobalInt(utils.NetworkIdFlag.Name)) fmt.Println("Go Version:", runtime.Version()) fmt.Println("OS:", runtime.GOOS) - fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH")) - fmt.Printf("GOROOT=%s\n", runtime.GOROOT()) return nil } @@ -405,6 +470,10 @@ func bzzd(ctx *cli.Context) error { } cfg := defaultNodeConfig + + //pss operates on ws + cfg.WSModules = append(cfg.WSModules, "pss") + //geth only supports --datadir via command line //in order to be consistent within swarm, if we pass --datadir via environment variable //or via config file, we get the same directory for geth and swarm @@ -421,7 +490,7 @@ func bzzd(ctx *cli.Context) error { //due to overriding behavior initSwarmNode(bzzconfig, stack, ctx) //register BZZ as node.Service in the ethereum node - registerBzzService(bzzconfig, ctx, stack) + registerBzzService(bzzconfig, stack) //start the node utils.StartNode(stack) @@ -439,7 +508,7 @@ func bzzd(ctx *cli.Context) error { bootnodes := strings.Split(bzzconfig.BootNodes, ",") injectBootnodes(stack.Server(), bootnodes) } else { - if bzzconfig.NetworkId == 3 { + if bzzconfig.NetworkID == 3 { injectBootnodes(stack.Server(), testbetBootNodes) } } @@ -448,21 +517,11 @@ func bzzd(ctx *cli.Context) error { return nil } -func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.Node) { - +func registerBzzService(bzzconfig *bzzapi.Config, stack *node.Node) { //define the swarm service boot function - boot := func(ctx *node.ServiceContext) (node.Service, error) { - var swapClient *ethclient.Client - var err error - if bzzconfig.SwapApi != "" { - log.Info("connecting to SWAP API", "url", bzzconfig.SwapApi) - swapClient, err = ethclient.Dial(bzzconfig.SwapApi) - if err != nil { - return nil, fmt.Errorf("error connecting to SWAP API %s: %s", bzzconfig.SwapApi, err) - } - } - - return swarm.NewSwarm(ctx, swapClient, bzzconfig) + boot := func(_ *node.ServiceContext) (node.Service, error) { + // In production, mockStore must be always nil. + return swarm.NewSwarm(bzzconfig, nil) } //register within the ethereum node if err := stack.Register(boot); err != nil { diff --git a/cmd/swarm/manifest.go b/cmd/swarm/manifest.go index 41a69a5d05..82166edf6c 100644 --- a/cmd/swarm/manifest.go +++ b/cmd/swarm/manifest.go @@ -131,13 +131,13 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin longestPathEntry = api.ManifestEntry{} ) - mroot, err := client.DownloadManifest(mhash) + mroot, isEncrypted, err := client.DownloadManifest(mhash) if err != nil { utils.Fatalf("Manifest download failed: %v", err) } //TODO: check if the "hash" to add is valid and present in swarm - _, err = client.DownloadManifest(hash) + _, _, err = client.DownloadManifest(hash) if err != nil { utils.Fatalf("Hash to add is not present: %v", err) } @@ -180,7 +180,7 @@ func addEntryToManifest(ctx *cli.Context, mhash, path, hash, ctype string) strin mroot.Entries = append(mroot.Entries, newEntry) } - newManifestHash, err := client.UploadManifest(mroot) + newManifestHash, err := client.UploadManifest(mroot, isEncrypted) if err != nil { utils.Fatalf("Manifest upload failed: %v", err) } @@ -197,7 +197,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st longestPathEntry = api.ManifestEntry{} ) - mroot, err := client.DownloadManifest(mhash) + mroot, isEncrypted, err := client.DownloadManifest(mhash) if err != nil { utils.Fatalf("Manifest download failed: %v", err) } @@ -257,7 +257,7 @@ func updateEntryInManifest(ctx *cli.Context, mhash, path, hash, ctype string) st mroot = newMRoot } - newManifestHash, err := client.UploadManifest(mroot) + newManifestHash, err := client.UploadManifest(mroot, isEncrypted) if err != nil { utils.Fatalf("Manifest upload failed: %v", err) } @@ -273,7 +273,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string { longestPathEntry = api.ManifestEntry{} ) - mroot, err := client.DownloadManifest(mhash) + mroot, isEncrypted, err := client.DownloadManifest(mhash) if err != nil { utils.Fatalf("Manifest download failed: %v", err) } @@ -323,7 +323,7 @@ func removeEntryFromManifest(ctx *cli.Context, mhash, path string) string { mroot = newMRoot } - newManifestHash, err := client.UploadManifest(mroot) + newManifestHash, err := client.UploadManifest(mroot, isEncrypted) if err != nil { utils.Fatalf("Manifest upload failed: %v", err) } diff --git a/cmd/swarm/run_test.go b/cmd/swarm/run_test.go index 594cfa55cb..a70c4686dd 100644 --- a/cmd/swarm/run_test.go +++ b/cmd/swarm/run_test.go @@ -81,6 +81,7 @@ type testCluster struct { // // When starting more than one node, they are connected together using the // admin SetPeer RPC method. + func newTestCluster(t *testing.T, size int) *testCluster { cluster := &testCluster{} defer func() { @@ -96,18 +97,7 @@ func newTestCluster(t *testing.T, size int) *testCluster { cluster.TmpDir = tmpdir // start the nodes - cluster.Nodes = make([]*testNode, 0, size) - for i := 0; i < size; i++ { - dir := filepath.Join(cluster.TmpDir, fmt.Sprintf("swarm%02d", i)) - if err := os.Mkdir(dir, 0700); err != nil { - t.Fatal(err) - } - - node := newTestNode(t, dir) - node.Name = fmt.Sprintf("swarm%02d", i) - - cluster.Nodes = append(cluster.Nodes, node) - } + cluster.StartNewNodes(t, size) if size == 1 { return cluster @@ -145,14 +135,51 @@ func (c *testCluster) Shutdown() { os.RemoveAll(c.TmpDir) } +func (c *testCluster) Stop() { + for _, node := range c.Nodes { + node.Shutdown() + } +} + +func (c *testCluster) StartNewNodes(t *testing.T, size int) { + c.Nodes = make([]*testNode, 0, size) + for i := 0; i < size; i++ { + dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i)) + if err := os.Mkdir(dir, 0700); err != nil { + t.Fatal(err) + } + + node := newTestNode(t, dir) + node.Name = fmt.Sprintf("swarm%02d", i) + + c.Nodes = append(c.Nodes, node) + } +} + +func (c *testCluster) StartExistingNodes(t *testing.T, size int, bzzaccount string) { + c.Nodes = make([]*testNode, 0, size) + for i := 0; i < size; i++ { + dir := filepath.Join(c.TmpDir, fmt.Sprintf("swarm%02d", i)) + node := existingTestNode(t, dir, bzzaccount) + node.Name = fmt.Sprintf("swarm%02d", i) + + c.Nodes = append(c.Nodes, node) + } +} + +func (c *testCluster) Cleanup() { + os.RemoveAll(c.TmpDir) +} + type testNode struct { - Name string - Addr string - URL string - Enode string - Dir string - Client *rpc.Client - Cmd *cmdtest.TestCmd + Name string + Addr string + URL string + Enode string + Dir string + IpcPath string + Client *rpc.Client + Cmd *cmdtest.TestCmd } const testPassphrase = "swarm-test-passphrase" @@ -181,6 +208,72 @@ func getTestAccount(t *testing.T, dir string) (conf *node.Config, account accoun return conf, account } +func existingTestNode(t *testing.T, dir string, bzzaccount string) *testNode { + conf, _ := getTestAccount(t, dir) + node := &testNode{Dir: dir} + + // use a unique IPCPath when running tests on Windows + if runtime.GOOS == "windows" { + conf.IPCPath = fmt.Sprintf("bzzd-%s.ipc", bzzaccount) + } + + // assign ports + httpPort, err := assignTCPPort() + if err != nil { + t.Fatal(err) + } + p2pPort, err := assignTCPPort() + if err != nil { + t.Fatal(err) + } + + // start the node + node.Cmd = runSwarm(t, + "--port", p2pPort, + "--nodiscover", + "--datadir", dir, + "--ipcpath", conf.IPCPath, + "--ens-api", "", + "--bzzaccount", bzzaccount, + "--bzznetworkid", "321", + "--bzzport", httpPort, + "--verbosity", "6", + ) + node.Cmd.InputLine(testPassphrase) + defer func() { + if t.Failed() { + node.Shutdown() + } + }() + + // wait for the node to start + for start := time.Now(); time.Since(start) < 10*time.Second; time.Sleep(50 * time.Millisecond) { + node.Client, err = rpc.Dial(conf.IPCEndpoint()) + if err == nil { + break + } + } + if node.Client == nil { + t.Fatal(err) + } + + // load info + var info swarm.Info + if err := node.Client.Call(&info, "bzz_info"); err != nil { + t.Fatal(err) + } + node.Addr = net.JoinHostPort("127.0.0.1", info.Port) + node.URL = "http://" + node.Addr + + var nodeInfo p2p.NodeInfo + if err := node.Client.Call(&nodeInfo, "admin_nodeInfo"); err != nil { + t.Fatal(err) + } + node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort) + + return node +} + func newTestNode(t *testing.T, dir string) *testNode { conf, account := getTestAccount(t, dir) @@ -239,6 +332,7 @@ func newTestNode(t *testing.T, dir string) *testNode { t.Fatal(err) } node.Enode = fmt.Sprintf("enode://%s@127.0.0.1:%s", nodeInfo.ID, p2pPort) + node.IpcPath = conf.IPCPath return node } diff --git a/cmd/swarm/swarm-smoke/main.go b/cmd/swarm/swarm-smoke/main.go new file mode 100644 index 0000000000..87bc39816d --- /dev/null +++ b/cmd/swarm/swarm-smoke/main.go @@ -0,0 +1,101 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "os" + "sort" + + "github.com/ethereum/go-ethereum/log" + colorable "github.com/mattn/go-colorable" + + cli "gopkg.in/urfave/cli.v1" +) + +var ( + endpoints []string + includeLocalhost bool + cluster string + scheme string + filesize int + from int + to int +) + +func main() { + log.PrintOrigins(true) + log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) + + app := cli.NewApp() + app.Name = "smoke-test" + app.Usage = "" + + app.Flags = []cli.Flag{ + cli.StringFlag{ + Name: "cluster-endpoint", + Value: "testing", + Usage: "cluster to point to (open, or testing)", + Destination: &cluster, + }, + cli.IntFlag{ + Name: "cluster-from", + Value: 8501, + Usage: "swarm node (from)", + Destination: &from, + }, + cli.IntFlag{ + Name: "cluster-to", + Value: 8512, + Usage: "swarm node (to)", + Destination: &to, + }, + cli.StringFlag{ + Name: "cluster-scheme", + Value: "http", + Usage: "http or https", + Destination: &scheme, + }, + cli.BoolFlag{ + Name: "include-localhost", + Usage: "whether to include localhost:8500 as an endpoint", + Destination: &includeLocalhost, + }, + cli.IntFlag{ + Name: "filesize", + Value: 1, + Usage: "file size for generated random file in MB", + Destination: &filesize, + }, + } + + app.Commands = []cli.Command{ + { + Name: "upload_and_sync", + Aliases: []string{"c"}, + Usage: "upload and sync", + Action: cliUploadAndSync, + }, + } + + sort.Sort(cli.FlagsByName(app.Flags)) + sort.Sort(cli.CommandsByName(app.Commands)) + + err := app.Run(os.Args) + if err != nil { + log.Error(err.Error()) + } +} diff --git a/cmd/swarm/swarm-smoke/upload_and_sync.go b/cmd/swarm/swarm-smoke/upload_and_sync.go new file mode 100644 index 0000000000..7f9051e7fe --- /dev/null +++ b/cmd/swarm/swarm-smoke/upload_and_sync.go @@ -0,0 +1,184 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "bytes" + "crypto/md5" + "crypto/rand" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "os/exec" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/log" + "github.com/pborman/uuid" + + cli "gopkg.in/urfave/cli.v1" +) + +func generateEndpoints(scheme string, cluster string, from int, to int) { + for port := from; port <= to; port++ { + endpoints = append(endpoints, fmt.Sprintf("%s://%v.%s.swarm-gateways.net", scheme, port, cluster)) + } + + if includeLocalhost { + endpoints = append(endpoints, "http://localhost:8500") + } +} + +func cliUploadAndSync(c *cli.Context) error { + defer func(now time.Time) { log.Info("total time", "time", time.Since(now), "size", filesize) }(time.Now()) + + generateEndpoints(scheme, cluster, from, to) + + log.Info("uploading to " + endpoints[0] + " and syncing") + + f, cleanup := generateRandomFile(filesize * 1000000) + defer cleanup() + + hash, err := upload(f, endpoints[0]) + if err != nil { + log.Error(err.Error()) + return err + } + + fhash, err := digest(f) + if err != nil { + log.Error(err.Error()) + return err + } + + log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash)) + + if filesize < 10 { + time.Sleep(15 * time.Second) + } else { + time.Sleep(2 * time.Duration(filesize) * time.Second) + } + + wg := sync.WaitGroup{} + for _, endpoint := range endpoints { + endpoint := endpoint + ruid := uuid.New()[:8] + wg.Add(1) + go func(endpoint string, ruid string) { + for { + err := fetch(hash, endpoint, fhash, ruid) + if err != nil { + continue + } + + wg.Done() + return + } + }(endpoint, ruid) + } + wg.Wait() + log.Info("all endpoints synced random file successfully") + + return nil +} + +// fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file +func fetch(hash string, endpoint string, original []byte, ruid string) error { + log.Trace("sleeping", "ruid", ruid) + time.Sleep(1 * time.Second) + + log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash) + res, err := http.Get(endpoint + "/bzz:/" + hash + "/") + if err != nil { + log.Warn(err.Error(), "ruid", ruid) + return err + } + log.Trace("http get response", "ruid", ruid, "api", endpoint, "hash", hash, "code", res.StatusCode, "len", res.ContentLength) + + if res.StatusCode != 200 { + err := fmt.Errorf("expected status code %d, got %v", 200, res.StatusCode) + log.Warn(err.Error(), "ruid", ruid) + return err + } + + defer res.Body.Close() + + rdigest, err := digest(res.Body) + if err != nil { + log.Warn(err.Error(), "ruid", ruid) + return err + } + + if !bytes.Equal(rdigest, original) { + err := fmt.Errorf("downloaded imported file md5=%x is not the same as the generated one=%x", rdigest, original) + log.Warn(err.Error(), "ruid", ruid) + return err + } + + log.Trace("downloaded file matches random file", "ruid", ruid, "len", res.ContentLength) + + return nil +} + +// upload is uploading a file `f` to `endpoint` via the `swarm up` cmd +func upload(f *os.File, endpoint string) (string, error) { + var out bytes.Buffer + cmd := exec.Command("swarm", "--bzzapi", endpoint, "up", f.Name()) + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return "", err + } + hash := strings.TrimRight(out.String(), "\r\n") + return hash, nil +} + +func digest(r io.Reader) ([]byte, error) { + h := md5.New() + _, err := io.Copy(h, r) + if err != nil { + return nil, err + } + return h.Sum(nil), nil +} + +// generateRandomFile is creating a temporary file with the requested byte size +func generateRandomFile(size int) (f *os.File, teardown func()) { + // create a tmp file + tmp, err := ioutil.TempFile("", "swarm-test") + if err != nil { + panic(err) + } + + // callback for tmp file cleanup + teardown = func() { + tmp.Close() + os.Remove(tmp.Name()) + } + + buf := make([]byte, size) + _, err = rand.Read(buf) + if err != nil { + panic(err) + } + ioutil.WriteFile(tmp.Name(), buf, 0755) + + return tmp, teardown +} diff --git a/cmd/swarm/upload.go b/cmd/swarm/upload.go index 9f4c525bb9..8ba0e7c5f0 100644 --- a/cmd/swarm/upload.go +++ b/cmd/swarm/upload.go @@ -40,12 +40,13 @@ func upload(ctx *cli.Context) { args := ctx.Args() var ( bzzapi = strings.TrimRight(ctx.GlobalString(SwarmApiFlag.Name), "/") - recursive = ctx.GlobalBool(SwarmRecursiveUploadFlag.Name) + recursive = ctx.GlobalBool(SwarmRecursiveFlag.Name) wantManifest = ctx.GlobalBoolT(SwarmWantManifestFlag.Name) defaultPath = ctx.GlobalString(SwarmUploadDefaultPath.Name) fromStdin = ctx.GlobalBool(SwarmUpFromStdinFlag.Name) mimeType = ctx.GlobalString(SwarmUploadMimeType.Name) client = swarm.NewClient(bzzapi) + toEncrypt = ctx.Bool(SwarmEncryptedFlag.Name) file string ) @@ -76,7 +77,7 @@ func upload(ctx *cli.Context) { utils.Fatalf("Error opening file: %s", err) } defer f.Close() - hash, err := client.UploadRaw(f, f.Size) + hash, err := client.UploadRaw(f, f.Size, toEncrypt) if err != nil { utils.Fatalf("Upload failed: %s", err) } @@ -97,7 +98,7 @@ func upload(ctx *cli.Context) { if !recursive { return "", errors.New("Argument is a directory and recursive upload is disabled") } - return client.UploadDirectory(file, defaultPath, "") + return client.UploadDirectory(file, defaultPath, "", toEncrypt) } } else { doUpload = func() (string, error) { @@ -110,7 +111,7 @@ func upload(ctx *cli.Context) { mimeType = detectMimeType(file) } f.ContentType = mimeType - return client.Upload(f, "") + return client.Upload(f, "", toEncrypt) } } hash, err := doUpload() diff --git a/cmd/swarm/upload_test.go b/cmd/swarm/upload_test.go index df7fc216af..2afc9b3a11 100644 --- a/cmd/swarm/upload_test.go +++ b/cmd/swarm/upload_test.go @@ -17,60 +17,259 @@ package main import ( + "bytes" + "flag" + "fmt" "io" "io/ioutil" "net/http" "os" + "path" + "path/filepath" + "strings" "testing" + "time" + + "github.com/ethereum/go-ethereum/log" + swarm "github.com/ethereum/go-ethereum/swarm/api/client" + colorable "github.com/mattn/go-colorable" ) +var loglevel = flag.Int("loglevel", 3, "verbosity of logs") + +func init() { + log.PrintOrigins(true) + log.Root().SetHandler(log.LvlFilterHandler(log.Lvl(*loglevel), log.StreamHandler(colorable.NewColorableStderr(), log.TerminalFormat(true)))) +} + // TestCLISwarmUp tests that running 'swarm up' makes the resulting file // available from all nodes via the HTTP API func TestCLISwarmUp(t *testing.T) { - // start 3 node cluster - t.Log("starting 3 node cluster") + testCLISwarmUp(false, t) +} +func TestCLISwarmUpRecursive(t *testing.T) { + testCLISwarmUpRecursive(false, t) +} + +// TestCLISwarmUpEncrypted tests that running 'swarm encrypted-up' makes the resulting file +// available from all nodes via the HTTP API +func TestCLISwarmUpEncrypted(t *testing.T) { + testCLISwarmUp(true, t) +} +func TestCLISwarmUpEncryptedRecursive(t *testing.T) { + testCLISwarmUpRecursive(true, t) +} + +func testCLISwarmUp(toEncrypt bool, t *testing.T) { + log.Info("starting 3 node cluster") cluster := newTestCluster(t, 3) defer cluster.Shutdown() // create a tmp file tmp, err := ioutil.TempFile("", "swarm-test") - assertNil(t, err) - defer tmp.Close() - defer os.Remove(tmp.Name()) - _, err = io.WriteString(tmp, "data") - assertNil(t, err) - - // upload the file with 'swarm up' and expect a hash - t.Log("uploading file with 'swarm up'") - up := runSwarm(t, "--bzzapi", cluster.Nodes[0].URL, "up", tmp.Name()) - _, matches := up.ExpectRegexp(`[a-f\d]{64}`) - up.ExpectExit() - hash := matches[0] - t.Logf("file uploaded with hash %s", hash) - - // get the file from the HTTP API of each node - for _, node := range cluster.Nodes { - t.Logf("getting file from %s", node.Name) - res, err := http.Get(node.URL + "/bzz:/" + hash) - assertNil(t, err) - assertHTTPResponse(t, res, http.StatusOK, "data") - } -} - -func assertNil(t *testing.T, err error) { if err != nil { t.Fatal(err) } + defer tmp.Close() + defer os.Remove(tmp.Name()) + + // write data to file + data := "notsorandomdata" + _, err = io.WriteString(tmp, data) + if err != nil { + t.Fatal(err) + } + + hashRegexp := `[a-f\d]{64}` + flags := []string{ + "--bzzapi", cluster.Nodes[0].URL, + "up", + tmp.Name()} + if toEncrypt { + hashRegexp = `[a-f\d]{128}` + flags = []string{ + "--bzzapi", cluster.Nodes[0].URL, + "up", + "--encrypt", + tmp.Name()} + } + // upload the file with 'swarm up' and expect a hash + log.Info(fmt.Sprintf("uploading file with 'swarm up'")) + up := runSwarm(t, flags...) + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + hash := matches[0] + log.Info("file uploaded", "hash", hash) + + // get the file from the HTTP API of each node + for _, node := range cluster.Nodes { + log.Info("getting file from node", "node", node.Name) + + res, err := http.Get(node.URL + "/bzz:/" + hash) + if err != nil { + t.Fatal(err) + } + defer res.Body.Close() + + reply, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatal(err) + } + if res.StatusCode != 200 { + t.Fatalf("expected HTTP status 200, got %s", res.Status) + } + if string(reply) != data { + t.Fatalf("expected HTTP body %q, got %q", data, reply) + } + log.Debug("verifying uploaded file using `swarm down`") + //try to get the content with `swarm down` + tmpDownload, err := ioutil.TempDir("", "swarm-test") + tmpDownload = path.Join(tmpDownload, "tmpfile.tmp") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDownload) + + bzzLocator := "bzz:/" + hash + flags = []string{ + "--bzzapi", cluster.Nodes[0].URL, + "down", + bzzLocator, + tmpDownload, + } + + down := runSwarm(t, flags...) + down.ExpectExit() + + fi, err := os.Stat(tmpDownload) + if err != nil { + t.Fatalf("could not stat path: %v", err) + } + + switch mode := fi.Mode(); { + case mode.IsRegular(): + downloadedBytes, err := ioutil.ReadFile(tmpDownload) + if err != nil { + t.Fatalf("had an error reading the downloaded file: %v", err) + } + if !bytes.Equal(downloadedBytes, bytes.NewBufferString(data).Bytes()) { + t.Fatalf("retrieved data and posted data not equal!") + } + + default: + t.Fatalf("expected to download regular file, got %s", fi.Mode()) + } + } + + timeout := time.Duration(2 * time.Second) + httpClient := http.Client{ + Timeout: timeout, + } + + // try to squeeze a timeout by getting an non-existent hash from each node + for _, node := range cluster.Nodes { + _, err := httpClient.Get(node.URL + "/bzz:/1023e8bae0f70be7d7b5f74343088ba408a218254391490c85ae16278e230340") + // we're speeding up the timeout here since netstore has a 60 seconds timeout on a request + if err != nil && !strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") { + t.Fatal(err) + } + // this is disabled since it takes 60s due to netstore timeout + // if res.StatusCode != 404 { + // t.Fatalf("expected HTTP status 404, got %s", res.Status) + // } + } } -func assertHTTPResponse(t *testing.T, res *http.Response, expectedStatus int, expectedBody string) { - defer res.Body.Close() - if res.StatusCode != expectedStatus { - t.Fatalf("expected HTTP status %d, got %s", expectedStatus, res.Status) +func testCLISwarmUpRecursive(toEncrypt bool, t *testing.T) { + fmt.Println("starting 3 node cluster") + cluster := newTestCluster(t, 3) + defer cluster.Shutdown() + + tmpUploadDir, err := ioutil.TempDir("", "swarm-test") + if err != nil { + t.Fatal(err) } - data, err := ioutil.ReadAll(res.Body) - assertNil(t, err) - if string(data) != expectedBody { - t.Fatalf("expected HTTP body %q, got %q", expectedBody, data) + defer os.RemoveAll(tmpUploadDir) + // create tmp files + data := "notsorandomdata" + for _, path := range []string{"tmp1", "tmp2"} { + if err := ioutil.WriteFile(filepath.Join(tmpUploadDir, path), bytes.NewBufferString(data).Bytes(), 0644); err != nil { + t.Fatal(err) + } + } + + hashRegexp := `[a-f\d]{64}` + flags := []string{ + "--bzzapi", cluster.Nodes[0].URL, + "--recursive", + "up", + tmpUploadDir} + if toEncrypt { + hashRegexp = `[a-f\d]{128}` + flags = []string{ + "--bzzapi", cluster.Nodes[0].URL, + "--recursive", + "up", + "--encrypt", + tmpUploadDir} + } + // upload the file with 'swarm up' and expect a hash + log.Info(fmt.Sprintf("uploading file with 'swarm up'")) + up := runSwarm(t, flags...) + _, matches := up.ExpectRegexp(hashRegexp) + up.ExpectExit() + hash := matches[0] + log.Info("dir uploaded", "hash", hash) + + // get the file from the HTTP API of each node + for _, node := range cluster.Nodes { + log.Info("getting file from node", "node", node.Name) + //try to get the content with `swarm down` + tmpDownload, err := ioutil.TempDir("", "swarm-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpDownload) + bzzLocator := "bzz:/" + hash + flagss := []string{} + flagss = []string{ + "--bzzapi", cluster.Nodes[0].URL, + "down", + "--recursive", + bzzLocator, + tmpDownload, + } + + fmt.Println("downloading from swarm with recursive") + down := runSwarm(t, flagss...) + down.ExpectExit() + + files, err := ioutil.ReadDir(tmpDownload) + for _, v := range files { + fi, err := os.Stat(path.Join(tmpDownload, v.Name())) + if err != nil { + t.Fatalf("got an error: %v", err) + } + + switch mode := fi.Mode(); { + case mode.IsRegular(): + if file, err := swarm.Open(path.Join(tmpDownload, v.Name())); err != nil { + t.Fatalf("encountered an error opening the file returned from the CLI: %v", err) + } else { + ff := make([]byte, len(data)) + io.ReadFull(file, ff) + buf := bytes.NewBufferString(data) + + if !bytes.Equal(ff, buf.Bytes()) { + t.Fatalf("retrieved data and posted data not equal!") + } + } + default: + t.Fatalf("this shouldnt happen") + } + } + if err != nil { + t.Fatalf("could not list files at: %v", files) + } } } diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 0fedecb2ac..3a4b7f7620 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -27,6 +27,7 @@ import ( "runtime" "strconv" "strings" + "time" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/keystore" @@ -48,6 +49,7 @@ import ( "github.com/ethereum/go-ethereum/les" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/metrics/influxdb" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discover" @@ -361,10 +363,6 @@ var ( Name: "ethstats", Usage: "Reporting URL of a ethstats service (nodename:secret@host:port)", } - MetricsEnabledFlag = cli.BoolFlag{ - Name: metrics.MetricsEnabledFlag, - Usage: "Enable metrics collection and reporting", - } FakePoWFlag = cli.BoolFlag{ Name: "fakepow", Usage: "Disables proof-of-work verification", @@ -546,6 +544,44 @@ var ( Name: "shardid", Usage: `use the --shardid to determine which shard to start p2p server, listen for incoming transactions and perform proposer/observer duties`, } + // Metrics flags + MetricsEnabledFlag = cli.BoolFlag{ + Name: metrics.MetricsEnabledFlag, + Usage: "Enable metrics collection and reporting", + } + MetricsEnableInfluxDBFlag = cli.BoolFlag{ + Name: "metrics.influxdb", + Usage: "Enable metrics export/push to an external InfluxDB database", + } + MetricsInfluxDBEndpointFlag = cli.StringFlag{ + Name: "metrics.influxdb.endpoint", + Usage: "InfluxDB API endpoint to report metrics to", + Value: "http://localhost:8086", + } + MetricsInfluxDBDatabaseFlag = cli.StringFlag{ + Name: "metrics.influxdb.database", + Usage: "InfluxDB database name to push reported metrics to", + Value: "geth", + } + MetricsInfluxDBUsernameFlag = cli.StringFlag{ + Name: "metrics.influxdb.username", + Usage: "Username to authorize access to the database", + Value: "test", + } + MetricsInfluxDBPasswordFlag = cli.StringFlag{ + Name: "metrics.influxdb.password", + Usage: "Password to authorize access to the database", + Value: "test", + } + // The `host` tag is part of every measurement sent to InfluxDB. Queries on tags are faster in InfluxDB. + // It is used so that we can group all nodes and average a measurement across all of them, but also so + // that we can select a specific node and inspect its measurements. + // https://docs.influxdata.com/influxdb/v1.4/concepts/key_concepts/#tag-key + MetricsInfluxDBHostTagFlag = cli.StringFlag{ + Name: "metrics.influxdb.host.tag", + Usage: "InfluxDB `host` tag attached to all measurements", + Value: "localhost", + } ) // MakeDataDir retrieves the currently requested data directory, terminating @@ -1098,6 +1134,9 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { } cfg.Genesis = core.DefaultRinkebyGenesisBlock() case ctx.GlobalBool(DeveloperFlag.Name): + if !ctx.GlobalIsSet(NetworkIdFlag.Name) { + cfg.NetworkId = 1337 + } // Create new developer account or reuse existing one var ( developer accounts.Account @@ -1195,6 +1234,27 @@ func SetupNetwork(ctx *cli.Context) { params.TargetGasLimit = ctx.GlobalUint64(TargetGasLimitFlag.Name) } +func SetupMetrics(ctx *cli.Context) { + if metrics.Enabled { + log.Info("Enabling metrics collection") + var ( + enableExport = ctx.GlobalBool(MetricsEnableInfluxDBFlag.Name) + endpoint = ctx.GlobalString(MetricsInfluxDBEndpointFlag.Name) + database = ctx.GlobalString(MetricsInfluxDBDatabaseFlag.Name) + username = ctx.GlobalString(MetricsInfluxDBUsernameFlag.Name) + password = ctx.GlobalString(MetricsInfluxDBPasswordFlag.Name) + hosttag = ctx.GlobalString(MetricsInfluxDBHostTagFlag.Name) + ) + + if enableExport { + log.Info("Enabling metrics export to InfluxDB") + go influxdb.InfluxDBWithTags(metrics.DefaultRegistry, 10*time.Second, endpoint, database, username, password, "geth.", map[string]string{ + "host": hosttag, + }) + } + } +} + // MakeChainDatabase open an LevelDB using the flags passed to the client and will hard crash if it fails. func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { var ( diff --git a/cmd/wnode/main.go b/cmd/wnode/main.go index 5031a088cb..31b65c1da9 100644 --- a/cmd/wnode/main.go +++ b/cmd/wnode/main.go @@ -140,8 +140,8 @@ func processArgs() { } if *asymmetricMode && len(*argPub) > 0 { - pub = crypto.ToECDSAPub(common.FromHex(*argPub)) - if !isKeyValid(pub) { + var err error + if pub, err = crypto.UnmarshalPubkey(common.FromHex(*argPub)); err != nil { utils.Fatalf("invalid public key") } } @@ -321,10 +321,6 @@ func startServer() error { return nil } -func isKeyValid(k *ecdsa.PublicKey) bool { - return k.X != nil && k.Y != nil -} - func configureNode() { var err error var p2pAccept bool @@ -340,9 +336,8 @@ func configureNode() { if b == nil { utils.Fatalf("Error: can not convert hexadecimal string") } - pub = crypto.ToECDSAPub(b) - if !isKeyValid(pub) { - utils.Fatalf("Error: invalid public key") + if pub, err = crypto.UnmarshalPubkey(b); err != nil { + utils.Fatalf("Error: invalid peer public key") } } } diff --git a/common/bytes.go b/common/bytes.go index e2adc80059..cbab2c3fa9 100644 --- a/common/bytes.go +++ b/common/bytes.go @@ -19,15 +19,20 @@ package common import "encoding/hex" +// ToHex returns the hex representation of b, prefixed with '0x'. +// For empty slices, the return value is "0x0". +// +// Deprecated: use hexutil.Encode instead. func ToHex(b []byte) string { hex := Bytes2Hex(b) - // Prefer output of "0x0" instead of "0x" if len(hex) == 0 { hex = "0" } return "0x" + hex } +// FromHex returns the bytes represented by the hexadecimal string s. +// s may be prefixed with "0x". func FromHex(s string) []byte { if len(s) > 1 { if s[0:2] == "0x" || s[0:2] == "0X" { @@ -40,9 +45,7 @@ func FromHex(s string) []byte { return Hex2Bytes(s) } -// Copy bytes -// -// Returns an exact copy of the provided bytes +// CopyBytes returns an exact copy of the provided bytes. func CopyBytes(b []byte) (copiedBytes []byte) { if b == nil { return nil @@ -53,14 +56,17 @@ func CopyBytes(b []byte) (copiedBytes []byte) { return } +// hasHexPrefix validates str begins with '0x' or '0X'. func hasHexPrefix(str string) bool { return len(str) >= 2 && str[0] == '0' && (str[1] == 'x' || str[1] == 'X') } +// isHexCharacter returns bool of c being a valid hexadecimal. func isHexCharacter(c byte) bool { return ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') || ('A' <= c && c <= 'F') } +// isHex validates whether each byte is valid hexadecimal string. func isHex(str string) bool { if len(str)%2 != 0 { return false @@ -73,16 +79,18 @@ func isHex(str string) bool { return true } +// Bytes2Hex returns the hexadecimal encoding of d. func Bytes2Hex(d []byte) string { return hex.EncodeToString(d) } +// Hex2Bytes returns the bytes represented by the hexadecimal string str. func Hex2Bytes(str string) []byte { h, _ := hex.DecodeString(str) - return h } +// Hex2BytesFixed returns bytes of a specified fixed length flen. func Hex2BytesFixed(str string, flen int) []byte { h, _ := hex.DecodeString(str) if len(h) == flen { @@ -96,6 +104,7 @@ func Hex2BytesFixed(str string, flen int) []byte { return hh } +// RightPadBytes zero-pads slice to the right up to length l. func RightPadBytes(slice []byte, l int) []byte { if l <= len(slice) { return slice @@ -107,6 +116,7 @@ func RightPadBytes(slice []byte, l int) []byte { return padded } +// LeftPadBytes zero-pads slice to the left up to length l. func LeftPadBytes(slice []byte, l int) []byte { if l <= len(slice) { return slice diff --git a/common/compiler/solidity.go b/common/compiler/solidity.go index 234714a2b9..f6e8d2e42a 100644 --- a/common/compiler/solidity.go +++ b/common/compiler/solidity.go @@ -31,11 +31,17 @@ import ( var versionRegexp = regexp.MustCompile(`([0-9]+)\.([0-9]+)\.([0-9]+)`) +// Contract contains information about a compiled contract, alongside its code. type Contract struct { Code string `json:"code"` Info ContractInfo `json:"info"` } +// ContractInfo contains information about a compiled contract, including access +// to the ABI definition, user and developer docs, and metadata. +// +// Depending on the source, language version, compiler version, and compiler +// options will provide information about how the contract was compiled. type ContractInfo struct { Source string `json:"source"` Language string `json:"language"` @@ -142,8 +148,22 @@ func (s *Solidity) run(cmd *exec.Cmd, source string) (map[string]*Contract, erro if err := cmd.Run(); err != nil { return nil, fmt.Errorf("solc: %v\n%s", err, stderr.Bytes()) } + + return ParseCombinedJSON(stdout.Bytes(), source, s.Version, s.Version, strings.Join(s.makeArgs(), " ")) +} + +// ParseCombinedJSON takes the direct output of a solc --combined-output run and +// parses it into a map of string contract name to Contract structs. The +// provided source, language and compiler version, and compiler options are all +// passed through into the Contract structs. +// +// The solc output is expected to contain ABI, user docs, and dev docs. +// +// Returns an error if the JSON is malformed or missing data, or if the JSON +// embedded within the JSON is malformed. +func ParseCombinedJSON(combinedJSON []byte, source string, languageVersion string, compilerVersion string, compilerOptions string) (map[string]*Contract, error) { var output solcOutput - if err := json.Unmarshal(stdout.Bytes(), &output); err != nil { + if err := json.Unmarshal(combinedJSON, &output); err != nil { return nil, err } @@ -168,9 +188,9 @@ func (s *Solidity) run(cmd *exec.Cmd, source string) (map[string]*Contract, erro Info: ContractInfo{ Source: source, Language: "Solidity", - LanguageVersion: s.Version, - CompilerVersion: s.Version, - CompilerOptions: strings.Join(s.makeArgs(), " "), + LanguageVersion: languageVersion, + CompilerVersion: compilerVersion, + CompilerOptions: compilerOptions, AbiDefinition: abi, UserDoc: userdoc, DeveloperDoc: devdoc, diff --git a/common/hexutil/hexutil.go b/common/hexutil/hexutil.go index 02c488a3f1..46223a2815 100644 --- a/common/hexutil/hexutil.go +++ b/common/hexutil/hexutil.go @@ -39,6 +39,7 @@ import ( const uintBits = 32 << (uint64(^uint(0)) >> 63) +// Errors var ( ErrEmptyString = &decError{"empty hex string"} ErrSyntax = &decError{"invalid hex string"} diff --git a/common/math/big.go b/common/math/big.go index 7872786503..9d2e7946d1 100644 --- a/common/math/big.go +++ b/common/math/big.go @@ -22,12 +22,13 @@ import ( "math/big" ) +// Various big integer limit values. var ( tt255 = BigPow(2, 255) tt256 = BigPow(2, 256) tt256m1 = new(big.Int).Sub(tt256, big.NewInt(1)) - MaxBig256 = new(big.Int).Set(tt256m1) tt63 = BigPow(2, 63) + MaxBig256 = new(big.Int).Set(tt256m1) MaxBig63 = new(big.Int).Sub(tt63, big.NewInt(1)) ) @@ -78,7 +79,7 @@ func ParseBig256(s string) (*big.Int, bool) { return bigint, ok } -// MustParseBig parses s as a 256 bit big integer and panics if the string is invalid. +// MustParseBig256 parses s as a 256 bit big integer and panics if the string is invalid. func MustParseBig256(s string) *big.Int { v, ok := ParseBig256(s) if !ok { @@ -186,9 +187,8 @@ func U256(x *big.Int) *big.Int { func S256(x *big.Int) *big.Int { if x.Cmp(tt255) < 0 { return x - } else { - return new(big.Int).Sub(x, tt256) } + return new(big.Int).Sub(x, tt256) } // Exp implements exponentiation by squaring. diff --git a/common/math/integer.go b/common/math/integer.go index 7eff4d3b00..93b1d036dd 100644 --- a/common/math/integer.go +++ b/common/math/integer.go @@ -21,8 +21,8 @@ import ( "strconv" ) +// Integer limit values. const ( - // Integer limit values. MaxInt8 = 1<<7 - 1 MinInt8 = -1 << 7 MaxInt16 = 1<<15 - 1 diff --git a/common/mclock/mclock.go b/common/mclock/mclock.go index 92005252eb..02608d17b0 100644 --- a/common/mclock/mclock.go +++ b/common/mclock/mclock.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// package mclock is a wrapper for a monotonic clock source +// Package mclock is a wrapper for a monotonic clock source package mclock import ( @@ -23,8 +23,10 @@ import ( "github.com/aristanetworks/goarista/monotime" ) -type AbsTime time.Duration // absolute monotonic time +// AbsTime represents absolute monotonic time. +type AbsTime time.Duration +// Now returns the current absolute monotonic time. func Now() AbsTime { return AbsTime(monotime.Now()) } diff --git a/common/number/int.go b/common/number/int.go deleted file mode 100644 index 6dab2436de..0000000000 --- a/common/number/int.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package number - -import ( - "math/big" - - "github.com/ethereum/go-ethereum/common" -) - -var tt256 = new(big.Int).Lsh(big.NewInt(1), 256) -var tt256m1 = new(big.Int).Sub(new(big.Int).Lsh(big.NewInt(1), 256), big.NewInt(1)) -var tt255 = new(big.Int).Lsh(big.NewInt(1), 255) - -func limitUnsigned256(x *Number) *Number { - x.num.And(x.num, tt256m1) - return x -} - -func limitSigned256(x *Number) *Number { - if x.num.Cmp(tt255) < 0 { - return x - } else { - x.num.Sub(x.num, tt256) - return x - } -} - -// Number function -type Initialiser func(n int64) *Number - -// A Number represents a generic integer with a bounding function limiter. Limit is called after each operations -// to give "fake" bounded integers. New types of Number can be created through NewInitialiser returning a lambda -// with the new Initialiser. -type Number struct { - num *big.Int - limit func(n *Number) *Number -} - -// Returns a new initialiser for a new *Number without having to expose certain fields -func NewInitialiser(limiter func(*Number) *Number) Initialiser { - return func(n int64) *Number { - return &Number{big.NewInt(n), limiter} - } -} - -// Return a Number with a UNSIGNED limiter up to 256 bits -func Uint256(n int64) *Number { - return &Number{big.NewInt(n), limitUnsigned256} -} - -// Return a Number with a SIGNED limiter up to 256 bits -func Int256(n int64) *Number { - return &Number{big.NewInt(n), limitSigned256} -} - -// Returns a Number with a SIGNED unlimited size -func Big(n int64) *Number { - return &Number{big.NewInt(n), func(x *Number) *Number { return x }} -} - -// Sets i to sum of x+y -func (i *Number) Add(x, y *Number) *Number { - i.num.Add(x.num, y.num) - return i.limit(i) -} - -// Sets i to difference of x-y -func (i *Number) Sub(x, y *Number) *Number { - i.num.Sub(x.num, y.num) - return i.limit(i) -} - -// Sets i to product of x*y -func (i *Number) Mul(x, y *Number) *Number { - i.num.Mul(x.num, y.num) - return i.limit(i) -} - -// Sets i to the quotient prodject of x/y -func (i *Number) Div(x, y *Number) *Number { - i.num.Div(x.num, y.num) - return i.limit(i) -} - -// Sets i to x % y -func (i *Number) Mod(x, y *Number) *Number { - i.num.Mod(x.num, y.num) - return i.limit(i) -} - -// Sets i to x << s -func (i *Number) Lsh(x *Number, s uint) *Number { - i.num.Lsh(x.num, s) - return i.limit(i) -} - -// Sets i to x^y -func (i *Number) Pow(x, y *Number) *Number { - i.num.Exp(x.num, y.num, big.NewInt(0)) - return i.limit(i) -} - -// Setters - -// Set x to i -func (i *Number) Set(x *Number) *Number { - i.num.Set(x.num) - return i.limit(i) -} - -// Set x bytes to i -func (i *Number) SetBytes(x []byte) *Number { - i.num.SetBytes(x) - return i.limit(i) -} - -// Cmp compares x and y and returns: -// -// -1 if x < y -// 0 if x == y -// +1 if x > y -func (i *Number) Cmp(x *Number) int { - return i.num.Cmp(x.num) -} - -// Getters - -// Returns the string representation of i -func (i *Number) String() string { - return i.num.String() -} - -// Returns the byte representation of i -func (i *Number) Bytes() []byte { - return i.num.Bytes() -} - -// Uint64 returns the Uint64 representation of x. If x cannot be represented in an int64, the result is undefined. -func (i *Number) Uint64() uint64 { - return i.num.Uint64() -} - -// Int64 returns the int64 representation of x. If x cannot be represented in an int64, the result is undefined. -func (i *Number) Int64() int64 { - return i.num.Int64() -} - -// Returns the signed version of i -func (i *Number) Int256() *Number { - return Int(0).Set(i) -} - -// Returns the unsigned version of i -func (i *Number) Uint256() *Number { - return Uint(0).Set(i) -} - -// Returns the index of the first bit that's set to 1 -func (i *Number) FirstBitSet() int { - for j := 0; j < i.num.BitLen(); j++ { - if i.num.Bit(j) > 0 { - return j - } - } - - return i.num.BitLen() -} - -// Variables - -var ( - Zero = Uint(0) - One = Uint(1) - Two = Uint(2) - MaxUint256 = Uint(0).SetBytes(common.Hex2Bytes("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) - - MinOne = Int(-1) - - // "typedefs" - Uint = Uint256 - Int = Int256 -) diff --git a/common/number/uint_test.go b/common/number/uint_test.go deleted file mode 100644 index 3ab9e4c344..0000000000 --- a/common/number/uint_test.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package number - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" -) - -func TestSet(t *testing.T) { - a := Uint(0) - b := Uint(10) - a.Set(b) - if a.num.Cmp(b.num) != 0 { - t.Error("didn't compare", a, b) - } - - c := Uint(0).SetBytes(common.Hex2Bytes("0a")) - if c.num.Cmp(big.NewInt(10)) != 0 { - t.Error("c set bytes failed.") - } -} - -func TestInitialiser(t *testing.T) { - check := false - init := NewInitialiser(func(x *Number) *Number { - check = true - return x - }) - a := init(0).Add(init(1), init(2)) - if a.Cmp(init(3)) != 0 { - t.Error("expected 3. got", a) - } - if !check { - t.Error("expected limiter to be called") - } -} - -func TestGet(t *testing.T) { - a := Uint(10) - if a.Uint64() != 10 { - t.Error("expected to get 10. got", a.Uint64()) - } - - a = Uint(10) - if a.Int64() != 10 { - t.Error("expected to get 10. got", a.Int64()) - } -} - -func TestCmp(t *testing.T) { - a := Uint(10) - b := Uint(10) - c := Uint(11) - - if a.Cmp(b) != 0 { - t.Error("a b == 0 failed", a, b) - } - - if a.Cmp(c) >= 0 { - t.Error("a c < 0 failed", a, c) - } - - if c.Cmp(b) <= 0 { - t.Error("c b > 0 failed", c, b) - } -} - -func TestMaxArith(t *testing.T) { - a := Uint(0).Add(MaxUint256, One) - if a.Cmp(Zero) != 0 { - t.Error("expected max256 + 1 = 0 got", a) - } - - a = Uint(0).Sub(Uint(0), One) - if a.Cmp(MaxUint256) != 0 { - t.Error("expected 0 - 1 = max256 got", a) - } - - a = Int(0).Sub(Int(0), One) - if a.Cmp(MinOne) != 0 { - t.Error("expected 0 - 1 = -1 got", a) - } -} - -func TestConversion(t *testing.T) { - a := Int(-1) - b := a.Uint256() - if b.Cmp(MaxUint256) != 0 { - t.Error("expected -1 => unsigned to return max. got", b) - } -} diff --git a/common/path.go b/common/path.go index bd8da86e74..69820cfe5d 100644 --- a/common/path.go +++ b/common/path.go @@ -30,6 +30,7 @@ func MakeName(name, version string) string { return fmt.Sprintf("%s/v%s/%s/%s", name, version, runtime.GOOS, runtime.Version()) } +// FileExist checks if a file exists at filePath. func FileExist(filePath string) bool { _, err := os.Stat(filePath) if err != nil && os.IsNotExist(err) { @@ -39,9 +40,10 @@ func FileExist(filePath string) bool { return true } -func AbsolutePath(Datadir string, filename string) string { +// AbsolutePath returns datadir + filename, or filename if it is absolute. +func AbsolutePath(datadir string, filename string) string { if filepath.IsAbs(filename) { return filename } - return filepath.Join(Datadir, filename) + return filepath.Join(datadir, filename) } diff --git a/common/types.go b/common/types.go index 0b94fb2c25..4d374ad246 100644 --- a/common/types.go +++ b/common/types.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/crypto/sha3" ) +// Lengths of hashes and addresses in bytes. const ( HashLength = 32 AddressLength = 20 @@ -42,19 +43,30 @@ var ( // Hash represents the 32 byte Keccak256 hash of arbitrary data. type Hash [HashLength]byte +// BytesToHash sets b to hash. +// If b is larger than len(h), b will be cropped from the left. func BytesToHash(b []byte) Hash { var h Hash h.SetBytes(b) return h } -func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) } -func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) } -// Get the string representation of the underlying hash -func (h Hash) Str() string { return string(h[:]) } +// BigToHash sets byte representation of b to hash. +// If b is larger than len(h), b will be cropped from the left. +func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) } + +// HexToHash sets byte representation of s to hash. +// If b is larger than len(h), b will be cropped from the left. +func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) } + +// Bytes gets the byte representation of the underlying hash. func (h Hash) Bytes() []byte { return h[:] } + +// Big converts a hash to a big integer. func (h Hash) Big() *big.Int { return new(big.Int).SetBytes(h[:]) } -func (h Hash) Hex() string { return hexutil.Encode(h[:]) } + +// Hex converts a hash to a hex string. +func (h Hash) Hex() string { return hexutil.Encode(h[:]) } // TerminalString implements log.TerminalStringer, formatting a string for console // output during logging. @@ -89,7 +101,8 @@ func (h Hash) MarshalText() ([]byte, error) { return hexutil.Bytes(h[:]).MarshalText() } -// Sets the hash to the value of b. If b is larger than len(h), 'b' will be cropped (from the left). +// SetBytes sets the hash to the value of b. +// If b is larger than len(h), b will be cropped from the left. func (h *Hash) SetBytes(b []byte) { if len(b) > len(h) { b = b[len(b)-HashLength:] @@ -98,16 +111,6 @@ func (h *Hash) SetBytes(b []byte) { copy(h[HashLength-len(b):], b) } -// Set string `s` to h. If s is larger than len(h) s will be cropped (from left) to fit. -func (h *Hash) SetString(s string) { h.SetBytes([]byte(s)) } - -// Sets h to other -func (h *Hash) Set(other Hash) { - for i, v := range other { - h[i] = v - } -} - // Generate implements testing/quick.Generator. func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value { m := rand.Intn(len(h)) @@ -117,10 +120,6 @@ func (h Hash) Generate(rand *rand.Rand, size int) reflect.Value { return reflect.ValueOf(h) } -func EmptyHash(h Hash) bool { - return h == Hash{} -} - // UnprefixedHash allows marshaling a Hash without 0x prefix. type UnprefixedHash Hash @@ -139,13 +138,21 @@ func (h UnprefixedHash) MarshalText() ([]byte, error) { // Address represents the 20 byte address of an Ethereum account. type Address [AddressLength]byte +// BytesToAddress returns Address with value b. +// If b is larger than len(h), b will be cropped from the left. func BytesToAddress(b []byte) Address { var a Address a.SetBytes(b) return a } + +// BigToAddress returns Address with byte values of b. +// If b is larger than len(h), b will be cropped from the left. func BigToAddress(b *big.Int) Address { return BytesToAddress(b.Bytes()) } -func HexToAddress(s string) Address { return BytesToAddress(FromHex(s)) } + +// HexToAddress returns Address with byte values of s. +// If s is larger than len(h), s will be cropped from the left. +func HexToAddress(s string) Address { return BytesToAddress(FromHex(s)) } // IsHexAddress verifies whether a string can represent a valid hex-encoded // Ethereum address or not. @@ -156,11 +163,14 @@ func IsHexAddress(s string) bool { return len(s) == 2*AddressLength && isHex(s) } -// Get the string representation of the underlying address -func (a Address) Str() string { return string(a[:]) } +// Bytes gets the string representation of the underlying address. func (a Address) Bytes() []byte { return a[:] } + +// Big converts an address to a big integer. func (a Address) Big() *big.Int { return new(big.Int).SetBytes(a[:]) } -func (a Address) Hash() Hash { return BytesToHash(a[:]) } + +// Hash converts an address to a hash by left-padding it with zeros. +func (a Address) Hash() Hash { return BytesToHash(a[:]) } // Hex returns an EIP55-compliant hex string representation of the address. func (a Address) Hex() string { @@ -184,7 +194,7 @@ func (a Address) Hex() string { return "0x" + string(result) } -// String implements the stringer interface and is used also by the logger. +// String implements fmt.Stringer. func (a Address) String() string { return a.Hex() } @@ -195,7 +205,8 @@ func (a Address) Format(s fmt.State, c rune) { fmt.Fprintf(s, "%"+string(c), a[:]) } -// Sets the address to the value of b. If b is larger than len(a) it will panic +// SetBytes sets the address to the value of b. +// If b is larger than len(a) it will panic. func (a *Address) SetBytes(b []byte) { if len(b) > len(a) { b = b[len(b)-AddressLength:] @@ -203,16 +214,6 @@ func (a *Address) SetBytes(b []byte) { copy(a[AddressLength-len(b):], b) } -// Set string `s` to a. If s is larger than len(a) it will panic -func (a *Address) SetString(s string) { a.SetBytes([]byte(s)) } - -// Sets a to other -func (a *Address) Set(other Address) { - for i, v := range other { - a[i] = v - } -} - // MarshalText returns the hex representation of a. func (a Address) MarshalText() ([]byte, error) { return hexutil.Bytes(a[:]).MarshalText() @@ -228,7 +229,7 @@ func (a *Address) UnmarshalJSON(input []byte) error { return hexutil.UnmarshalFixedJSON(addressT, input, a[:]) } -// UnprefixedHash allows marshaling an Address without 0x prefix. +// UnprefixedAddress allows marshaling an Address without 0x prefix. type UnprefixedAddress Address // UnmarshalText decodes the address from hex. The 0x prefix is optional. diff --git a/common/types_template.go b/common/types_template.go deleted file mode 100644 index 9a8f29977b..0000000000 --- a/common/types_template.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// +build none -//sed -e 's/_N_/Hash/g' -e 's/_S_/32/g' -e '1d' types_template.go | gofmt -w hash.go - -package common - -import "math/big" - -type _N_ [_S_]byte - -func BytesTo_N_(b []byte) _N_ { - var h _N_ - h.SetBytes(b) - return h -} -func StringTo_N_(s string) _N_ { return BytesTo_N_([]byte(s)) } -func BigTo_N_(b *big.Int) _N_ { return BytesTo_N_(b.Bytes()) } -func HexTo_N_(s string) _N_ { return BytesTo_N_(FromHex(s)) } - -// Don't use the default 'String' method in case we want to overwrite - -// Get the string representation of the underlying hash -func (h _N_) Str() string { return string(h[:]) } -func (h _N_) Bytes() []byte { return h[:] } -func (h _N_) Big() *big.Int { return new(big.Int).SetBytes(h[:]) } -func (h _N_) Hex() string { return "0x" + Bytes2Hex(h[:]) } - -// Sets the hash to the value of b. If b is larger than len(h) it will panic -func (h *_N_) SetBytes(b []byte) { - // Use the right most bytes - if len(b) > len(h) { - b = b[len(b)-_S_:] - } - - // Reverse the loop - for i := len(b) - 1; i >= 0; i-- { - h[_S_-len(b)+i] = b[i] - } -} - -// Set string `s` to h. If s is larger than len(h) it will panic -func (h *_N_) SetString(s string) { h.SetBytes([]byte(s)) } - -// Sets h to other -func (h *_N_) Set(other _N_) { - for i, v := range other { - h[i] = v - } -} diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index 2bdad9092a..01df4d5c78 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -383,7 +383,7 @@ func (c *Clique) snapshot(chain consensus.ChainReader, number uint64, hash commo // If an on-disk checkpoint snapshot can be found, use that if number%checkpointInterval == 0 { if s, err := loadSnapshot(c.config, c.signatures, c.db, hash); err == nil { - log.Trace("Loaded voting snapshot form disk", "number", number, "hash", hash) + log.Trace("Loaded voting snapshot from disk", "number", number, "hash", hash) snap = s break } diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go index 905a7b1ea7..fa1c2c8246 100644 --- a/consensus/ethash/algorithm.go +++ b/consensus/ethash/algorithm.go @@ -94,14 +94,25 @@ func calcDatasetSize(epoch int) uint64 { // reused between hash runs instead of requiring new ones to be created. type hasher func(dest []byte, data []byte) -// makeHasher creates a repetitive hasher, allowing the same hash data structures -// to be reused between hash runs instead of requiring new ones to be created. -// The returned function is not thread safe! +// makeHasher creates a repetitive hasher, allowing the same hash data structures to +// be reused between hash runs instead of requiring new ones to be created. The returned +// function is not thread safe! func makeHasher(h hash.Hash) hasher { + // sha3.state supports Read to get the sum, use it to avoid the overhead of Sum. + // Read alters the state but we reset the hash before every operation. + type readerHash interface { + hash.Hash + Read([]byte) (int, error) + } + rh, ok := h.(readerHash) + if !ok { + panic("can't find Read method on hash") + } + outputLen := rh.Size() return func(dest []byte, data []byte) { - h.Write(data) - h.Sum(dest[:0]) - h.Reset() + rh.Reset() + rh.Write(data) + rh.Read(dest[:outputLen]) } } diff --git a/console/bridge.go b/console/bridge.go index f2120351c0..b0b4d37985 100644 --- a/console/bridge.go +++ b/console/bridge.go @@ -271,7 +271,7 @@ func (b *bridge) SleepBlocks(call otto.FunctionCall) (response otto.Value) { } type jsonrpcCall struct { - Id int64 + ID int64 Method string Params []interface{} } @@ -304,7 +304,7 @@ func (b *bridge) Send(call otto.FunctionCall) (response otto.Value) { resps, _ := call.Otto.Object("new Array()") for _, req := range reqs { resp, _ := call.Otto.Object(`({"jsonrpc":"2.0"})`) - resp.Set("id", req.Id) + resp.Set("id", req.ID) var result json.RawMessage err = b.client.Call(&result, req.Method, req.Params...) switch err := err.(type) { diff --git a/console/console.go b/console/console.go index b280d4e65d..56e03837ac 100644 --- a/console/console.go +++ b/console/console.go @@ -60,7 +60,7 @@ type Config struct { Preload []string // Absolute paths to JavaScript files to preload } -// Console is a JavaScript interpreted runtime environment. It is a fully fleged +// Console is a JavaScript interpreted runtime environment. It is a fully fledged // JavaScript console attached to a running node via an external or in-process RPC // client. type Console struct { @@ -73,6 +73,8 @@ type Console struct { printer io.Writer // Output writer to serialize any display strings to } +// New initializes a JavaScript interpreted runtime environment and sets defaults +// with the config struct. func New(config Config) (*Console, error) { // Handle unset config values gracefully if config.Prompter == nil { diff --git a/contracts/ens/ens.go b/contracts/ens/ens.go index 06045a5cd8..75d9d0e4b1 100644 --- a/contracts/ens/ens.go +++ b/contracts/ens/ens.go @@ -95,7 +95,7 @@ func ensParentNode(name string) (common.Hash, common.Hash) { } } -func ensNode(name string) common.Hash { +func EnsNode(name string) common.Hash { parentNode, parentLabel := ensParentNode(name) return crypto.Keccak256Hash(parentNode[:], parentLabel[:]) } @@ -136,7 +136,7 @@ func (self *ENS) getRegistrar(node [32]byte) (*contract.FIFSRegistrarSession, er // Resolve is a non-transactional call that returns the content hash associated with a name. func (self *ENS) Resolve(name string) (common.Hash, error) { - node := ensNode(name) + node := EnsNode(name) resolver, err := self.getResolver(node) if err != nil { @@ -165,7 +165,7 @@ func (self *ENS) Register(name string) (*types.Transaction, error) { // SetContentHash sets the content hash associated with a name. Only works if the caller // owns the name, and the associated resolver implements a `setContent` function. func (self *ENS) SetContentHash(name string, hash common.Hash) (*types.Transaction, error) { - node := ensNode(name) + node := EnsNode(name) resolver, err := self.getResolver(node) if err != nil { diff --git a/contracts/ens/ens_test.go b/contracts/ens/ens_test.go index 0016f47dbf..6ad8447082 100644 --- a/contracts/ens/ens_test.go +++ b/contracts/ens/ens_test.go @@ -55,7 +55,7 @@ func TestENS(t *testing.T) { if err != nil { t.Fatalf("can't deploy resolver: %v", err) } - if _, err := ens.SetResolver(ensNode(name), resolverAddr); err != nil { + if _, err := ens.SetResolver(EnsNode(name), resolverAddr); err != nil { t.Fatalf("can't set resolver: %v", err) } contractBackend.Commit() diff --git a/core/asm/compiler.go b/core/asm/compiler.go index c273e7c51b..c7a5440701 100644 --- a/core/asm/compiler.go +++ b/core/asm/compiler.go @@ -51,7 +51,7 @@ func NewCompiler(debug bool) *Compiler { // the compiler. // // feed is the first pass in the compile stage as it -// collect the used labels in the program and keeps a +// collects the used labels in the program and keeps a // program counter which is used to determine the locations // of the jump dests. The labels can than be used in the // second stage to push labels and determine the right @@ -120,7 +120,7 @@ func (c *Compiler) next() token { return token } -// compile line compiles a single line instruction e.g. +// compileLine compiles a single line instruction e.g. // "push 1", "jump @label". func (c *Compiler) compileLine() error { n := c.next() diff --git a/core/asm/lexer.go b/core/asm/lexer.go index 4d62159e55..91caeb27bc 100644 --- a/core/asm/lexer.go +++ b/core/asm/lexer.go @@ -242,7 +242,7 @@ func lexLabel(l *lexer) stateFn { } // lexInsideString lexes the inside of a string until -// until the state function finds the closing quote. +// the state function finds the closing quote. // It returns the lex text state function. func lexInsideString(l *lexer) stateFn { if l.acceptRunUntil('"') { diff --git a/core/blockchain.go b/core/blockchain.go index f74a0f5b27..2f1e78423e 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -269,8 +269,8 @@ func (bc *BlockChain) SetHead(head uint64) error { defer bc.mu.Unlock() // Rewind the header chain, deleting all block bodies until then - delFn := func(hash common.Hash, num uint64) { - rawdb.DeleteBody(bc.db, hash, num) + delFn := func(db rawdb.DatabaseDeleter, hash common.Hash, num uint64) { + rawdb.DeleteBody(db, hash, num) } bc.hc.SetHead(head, delFn) currentHeader := bc.hc.CurrentHeader() @@ -672,9 +672,9 @@ func (bc *BlockChain) Stop() { } } for !bc.triegc.Empty() { - triedb.Dereference(bc.triegc.PopItem().(common.Hash), common.Hash{}) + triedb.Dereference(bc.triegc.PopItem().(common.Hash)) } - if size := triedb.Size(); size != 0 { + if size, _ := triedb.Size(); size != 0 { log.Error("Dangling trie nodes after full cleanup") } } @@ -916,33 +916,29 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. bc.triegc.Push(root, -float32(block.NumberU64())) if current := block.NumberU64(); current > triesInMemory { + // If we exceeded our memory allowance, flush matured singleton nodes to disk + var ( + nodes, imgs = triedb.Size() + limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024 + ) + if nodes > limit || imgs > 4*1024*1024 { + triedb.Cap(limit - ethdb.IdealBatchSize) + } // Find the next state trie we need to commit header := bc.GetHeaderByNumber(current - triesInMemory) chosen := header.Number.Uint64() - // Only write to disk if we exceeded our memory allowance *and* also have at - // least a given number of tries gapped. - var ( - size = triedb.Size() - limit = common.StorageSize(bc.cacheConfig.TrieNodeLimit) * 1024 * 1024 - ) - if size > limit || bc.gcproc > bc.cacheConfig.TrieTimeLimit { + // If we exceeded out time allowance, flush an entire trie to disk + if bc.gcproc > bc.cacheConfig.TrieTimeLimit { // If we're exceeding limits but haven't reached a large enough memory gap, // warn the user that the system is becoming unstable. - if chosen < lastWrite+triesInMemory { - switch { - case size >= 2*limit: - log.Warn("State memory usage too high, committing", "size", size, "limit", limit, "optimum", float64(chosen-lastWrite)/triesInMemory) - case bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit: - log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) - } - } - // If optimum or critical limits reached, write to disk - if chosen >= lastWrite+triesInMemory || size >= 2*limit || bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { - triedb.Commit(header.Root, true) - lastWrite = chosen - bc.gcproc = 0 + if chosen < lastWrite+triesInMemory && bc.gcproc >= 2*bc.cacheConfig.TrieTimeLimit { + log.Info("State in memory for too long, committing", "time", bc.gcproc, "allowance", bc.cacheConfig.TrieTimeLimit, "optimum", float64(chosen-lastWrite)/triesInMemory) } + // Flush an entire trie and restart the counters + triedb.Commit(header.Root, true) + lastWrite = chosen + bc.gcproc = 0 } // Garbage collect anything below our required write retention for !bc.triegc.Empty() { @@ -951,7 +947,7 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types. bc.triegc.Push(root, number) break } - triedb.Dereference(root.(common.Hash), common.Hash{}) + triedb.Dereference(root.(common.Hash)) } } } @@ -1009,6 +1005,10 @@ func (bc *BlockChain) InsertChain(chain types.Blocks) (int, error) { // only reason this method exists as a separate one is to make locking cleaner // with deferred statements. func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*types.Log, error) { + // Sanity check that we have something meaningful to import + if len(chain) == 0 { + return 0, nil, nil, nil + } // Do a sanity check that the provided chain is actually ordered and linked for i := 1; i < len(chain); i++ { if chain[i].NumberU64() != chain[i-1].NumberU64()+1 || chain[i].ParentHash() != chain[i-1].Hash() { @@ -1047,6 +1047,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty abort, results := bc.engine.VerifyHeaders(bc, headers, seals) defer close(abort) + // Start a parallel signature recovery (signer will fluke on fork transition, minimal perf loss) + senderCacher.recoverFromBlocks(types.MakeSigner(bc.chainConfig, chain[0].Number()), chain) + // Iterate over the blocks and insert when the verifier permits for i, block := range chain { // If the chain is terminating, stop processing blocks @@ -1181,7 +1184,9 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty } stats.processed++ stats.usedGas += usedGas - stats.report(chain, i, bc.stateCache.TrieDB().Size()) + + cache, _ := bc.stateCache.TrieDB().Size() + stats.report(chain, i, cache) } // Append a single chain head event if we've progressed the chain if lastCanon != nil && bc.CurrentBlock().Hash() == lastCanon.Hash() { @@ -1335,9 +1340,12 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { diff := types.TxDifference(deletedTxs, addedTxs) // When transactions get deleted from the database that means the // receipts that were created in the fork must also be deleted + batch := bc.db.NewBatch() for _, tx := range diff { - rawdb.DeleteTxLookupEntry(bc.db, tx.Hash()) + rawdb.DeleteTxLookupEntry(batch, tx.Hash()) } + batch.Write() + if len(deletedLogs) > 0 { go bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) } @@ -1387,27 +1395,21 @@ func (bc *BlockChain) update() { } } -// BadBlockArgs represents the entries in the list returned when bad blocks are queried. -type BadBlockArgs struct { - Hash common.Hash `json:"hash"` - Header *types.Header `json:"header"` -} - // BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network -func (bc *BlockChain) BadBlocks() ([]BadBlockArgs, error) { - headers := make([]BadBlockArgs, 0, bc.badBlocks.Len()) +func (bc *BlockChain) BadBlocks() []*types.Block { + blocks := make([]*types.Block, 0, bc.badBlocks.Len()) for _, hash := range bc.badBlocks.Keys() { - if hdr, exist := bc.badBlocks.Peek(hash); exist { - header := hdr.(*types.Header) - headers = append(headers, BadBlockArgs{header.Hash(), header}) + if blk, exist := bc.badBlocks.Peek(hash); exist { + block := blk.(*types.Block) + blocks = append(blocks, block) } } - return headers, nil + return blocks } // addBadBlock adds a bad block to the bad-block LRU cache func (bc *BlockChain) addBadBlock(block *types.Block) { - bc.badBlocks.Add(block.Header().Hash(), block.Header()) + bc.badBlocks.Add(block.Hash(), block) } // reportBlock logs a bad block error. @@ -1525,6 +1527,18 @@ func (bc *BlockChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []com return bc.hc.GetBlockHashesFromHash(hash, max) } +// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or +// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the +// number of blocks to be individually checked before we reach the canonical chain. +// +// Note: ancestor == 0 returns the same block, 1 returns its parent and so on. +func (bc *BlockChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { + bc.chainmu.Lock() + defer bc.chainmu.Unlock() + + return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical) +} + // GetHeaderByNumber retrieves a block header from the database by number, // caching it (associated with its hash) if found. func (bc *BlockChain) GetHeaderByNumber(number uint64) *types.Header { diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 89c071174f..687209bfae 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -25,6 +25,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" @@ -35,6 +36,39 @@ import ( "github.com/ethereum/go-ethereum/params" ) +// So we can deterministically seed different blockchains +var ( + canonicalSeed = 1 + forkSeed = 2 +) + +// newCanonical creates a chain database, and injects a deterministic canonical +// chain. Depending on the full flag, if creates either a full block chain or a +// header only chain. +func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *BlockChain, error) { + var ( + db = ethdb.NewMemDatabase() + genesis = new(Genesis).MustCommit(db) + ) + + // Initialize a fresh chain with only a genesis block + blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}) + // Create and inject the requested chain + if n == 0 { + return db, blockchain, nil + } + if full { + // Full block-chain requested + blocks := makeBlockChain(genesis, n, engine, db, canonicalSeed) + _, err := blockchain.InsertChain(blocks) + return db, blockchain, err + } + // Header-only chain requested + headers := makeHeaderChain(genesis.Header(), n, engine, db, canonicalSeed) + _, err := blockchain.InsertHeaderChain(headers, 1) + return db, blockchain, err +} + // Test fork of length N starting from block i func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) { // Copy old chain up to #i into a new db @@ -578,7 +612,7 @@ func TestFastVsFullChains(t *testing.T) { Alloc: GenesisAlloc{address: {Balance: funds}}, } genesis = gspec.MustCommit(gendb) - signer = types.NewEIP155Signer(gspec.Config.ChainId) + signer = types.NewEIP155Signer(gspec.Config.ChainID) ) blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, 1024, func(i int, block *BlockGen) { block.SetCoinbase(common.Address{0x00}) @@ -753,7 +787,7 @@ func TestChainTxReorgs(t *testing.T) { }, } genesis = gspec.MustCommit(db) - signer = types.NewEIP155Signer(gspec.Config.ChainId) + signer = types.NewEIP155Signer(gspec.Config.ChainID) ) // Create two transactions shared between the chains: @@ -859,7 +893,7 @@ func TestLogReorgs(t *testing.T) { code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}} genesis = gspec.MustCommit(db) - signer = types.NewEIP155Signer(gspec.Config.ChainId) + signer = types.NewEIP155Signer(gspec.Config.ChainID) ) blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}) @@ -906,7 +940,7 @@ func TestReorgSideEvent(t *testing.T) { Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}, } genesis = gspec.MustCommit(db) - signer = types.NewEIP155Signer(gspec.Config.ChainId) + signer = types.NewEIP155Signer(gspec.Config.ChainID) ) blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}) @@ -1032,7 +1066,7 @@ func TestEIP155Transition(t *testing.T) { funds = big.NewInt(1000000000) deleteAddr = common.Address{1} gspec = &Genesis{ - Config: ¶ms.ChainConfig{ChainId: big.NewInt(1), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)}, + Config: ¶ms.ChainConfig{ChainID: big.NewInt(1), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{address: {Balance: funds}, deleteAddr: {Balance: new(big.Int)}}, } genesis = gspec.MustCommit(db) @@ -1063,7 +1097,7 @@ func TestEIP155Transition(t *testing.T) { } block.AddTx(tx) - tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainId)) + tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainID)) if err != nil { t.Fatal(err) } @@ -1075,7 +1109,7 @@ func TestEIP155Transition(t *testing.T) { } block.AddTx(tx) - tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainId)) + tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainID)) if err != nil { t.Fatal(err) } @@ -1103,7 +1137,7 @@ func TestEIP155Transition(t *testing.T) { } // generate an invalid chain id transaction - config := ¶ms.ChainConfig{ChainId: big.NewInt(2), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)} + config := ¶ms.ChainConfig{ChainID: big.NewInt(2), EIP155Block: big.NewInt(2), HomesteadBlock: new(big.Int)} blocks, _ = GenerateChain(config, blocks[len(blocks)-1], ethash.NewFaker(), db, 4, func(i int, block *BlockGen) { var ( tx *types.Transaction @@ -1137,7 +1171,7 @@ func TestEIP161AccountRemoval(t *testing.T) { theAddr = common.Address{1} gspec = &Genesis{ Config: ¶ms.ChainConfig{ - ChainId: big.NewInt(1), + ChainID: big.NewInt(1), HomesteadBlock: new(big.Int), EIP155Block: new(big.Int), EIP158Block: big.NewInt(2), @@ -1153,7 +1187,7 @@ func TestEIP161AccountRemoval(t *testing.T) { var ( tx *types.Transaction err error - signer = types.NewEIP155Signer(gspec.Config.ChainId) + signer = types.NewEIP155Signer(gspec.Config.ChainID) ) switch i { case 0: @@ -1279,8 +1313,8 @@ func TestTrieForkGC(t *testing.T) { } // Dereference all the recent tries and ensure no past trie is left in for i := 0; i < triesInMemory; i++ { - chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root(), common.Hash{}) - chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root(), common.Hash{}) + chain.stateCache.TrieDB().Dereference(blocks[len(blocks)-1-i].Root()) + chain.stateCache.TrieDB().Dereference(forks[len(blocks)-1-i].Root()) } if len(chain.stateCache.TrieDB().Nodes()) > 0 { t.Fatalf("stale tries still alive after garbase collection") diff --git a/core/chain_makers.go b/core/chain_makers.go index fcba90bb87..c1e4b4264a 100644 --- a/core/chain_makers.go +++ b/core/chain_makers.go @@ -30,12 +30,6 @@ import ( "github.com/ethereum/go-ethereum/params" ) -// So we can deterministically seed different blockchains -var ( - canonicalSeed = 1 - forkSeed = 2 -) - // BlockGen creates blocks for testing. // See GenerateChain for a detailed explanation. type BlockGen struct { @@ -252,33 +246,6 @@ func makeHeader(chain consensus.ChainReader, parent *types.Block, state *state.S } } -// newCanonical creates a chain database, and injects a deterministic canonical -// chain. Depending on the full flag, if creates either a full block chain or a -// header only chain. -func newCanonical(engine consensus.Engine, n int, full bool) (ethdb.Database, *BlockChain, error) { - var ( - db = ethdb.NewMemDatabase() - genesis = new(Genesis).MustCommit(db) - ) - - // Initialize a fresh chain with only a genesis block - blockchain, _ := NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}) - // Create and inject the requested chain - if n == 0 { - return db, blockchain, nil - } - if full { - // Full block-chain requested - blocks := makeBlockChain(genesis, n, engine, db, canonicalSeed) - _, err := blockchain.InsertChain(blocks) - return db, blockchain, err - } - // Header-only chain requested - headers := makeHeaderChain(genesis.Header(), n, engine, db, canonicalSeed) - _, err := blockchain.InsertHeaderChain(headers, 1) - return db, blockchain, err -} - // makeHeaderChain creates a deterministic chain of headers rooted at parent. func makeHeaderChain(parent *types.Header, n int, engine consensus.Engine, db ethdb.Database, seed int) []*types.Header { blocks := makeBlockChain(types.NewBlockWithHeader(parent), n, engine, db, seed) diff --git a/core/events.go b/core/events.go index 6f404f612b..8d200f2a29 100644 --- a/core/events.go +++ b/core/events.go @@ -21,8 +21,8 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) -// TxPreEvent is posted when a transaction enters the transaction pool. -type TxPreEvent struct{ Tx *types.Transaction } +// NewTxsEvent is posted when a batch of transactions enter the transaction pool. +type NewTxsEvent struct{ Txs []*types.Transaction } // PendingLogsEvent is posted pre mining and notifies of pending logs. type PendingLogsEvent struct { @@ -35,9 +35,6 @@ type PendingStateEvent struct{} // NewMinedBlockEvent is posted when a block has been imported. type NewMinedBlockEvent struct{ Block *types.Block } -// RemovedTransactionEvent is posted when a reorg happens -type RemovedTransactionEvent struct{ Txs types.Transactions } - // RemovedLogsEvent is posted when a reorg happens type RemovedLogsEvent struct{ Logs []*types.Log } diff --git a/core/headerchain.go b/core/headerchain.go index 2ac0cccc72..da6716d679 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -156,13 +156,16 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf if externTd.Cmp(localTd) > 0 || (externTd.Cmp(localTd) == 0 && mrand.Float64() < 0.5) { // Delete any canonical number assignments above the new head + batch := hc.chainDb.NewBatch() for i := number + 1; ; i++ { hash := rawdb.ReadCanonicalHash(hc.chainDb, i) if hash == (common.Hash{}) { break } - rawdb.DeleteCanonicalHash(hc.chainDb, i) + rawdb.DeleteCanonicalHash(batch, i) } + batch.Write() + // Overwrite any stale canonical number assignments var ( headHash = header.ParentHash @@ -307,6 +310,43 @@ func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []co return chain } +// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or +// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the +// number of blocks to be individually checked before we reach the canonical chain. +// +// Note: ancestor == 0 returns the same block, 1 returns its parent and so on. +func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { + if ancestor > number { + return common.Hash{}, 0 + } + if ancestor == 1 { + // in this case it is cheaper to just read the header + if header := hc.GetHeader(hash, number); header != nil { + return header.ParentHash, number - 1 + } else { + return common.Hash{}, 0 + } + } + for ancestor != 0 { + if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { + number -= ancestor + return rawdb.ReadCanonicalHash(hc.chainDb, number), number + } + if *maxNonCanonical == 0 { + return common.Hash{}, 0 + } + *maxNonCanonical-- + ancestor-- + header := hc.GetHeader(hash, number) + if header == nil { + return common.Hash{}, 0 + } + hash = header.ParentHash + number-- + } + return hash, number +} + // GetTd retrieves a block's total difficulty in the canonical chain from the // database by hash and number, caching it if found. func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int { @@ -401,7 +441,7 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { // DeleteCallback is a callback function that is called by SetHead before // each header is deleted. -type DeleteCallback func(common.Hash, uint64) +type DeleteCallback func(rawdb.DatabaseDeleter, common.Hash, uint64) // SetHead rewinds the local chain to a new head. Everything above the new head // will be deleted and the new one set. @@ -411,22 +451,24 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) { if hdr := hc.CurrentHeader(); hdr != nil { height = hdr.Number.Uint64() } - + batch := hc.chainDb.NewBatch() for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() { hash := hdr.Hash() num := hdr.Number.Uint64() if delFn != nil { - delFn(hash, num) + delFn(batch, hash, num) } - rawdb.DeleteHeader(hc.chainDb, hash, num) - rawdb.DeleteTd(hc.chainDb, hash, num) + rawdb.DeleteHeader(batch, hash, num) + rawdb.DeleteTd(batch, hash, num) hc.currentHeader.Store(hc.GetHeader(hdr.ParentHash, hdr.Number.Uint64()-1)) } // Roll back the canonical chain numbering for i := height; i > head; i-- { - rawdb.DeleteCanonicalHash(hc.chainDb, i) + rawdb.DeleteCanonicalHash(batch, i) } + batch.Write() + // Clear out any stale content from the caches hc.headerCache.Purge() hc.tdCache.Purge() diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index a26a42ba7c..da54328326 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -29,7 +29,7 @@ import ( // ReadCanonicalHash retrieves the hash assigned to a canonical block number. func ReadCanonicalHash(db DatabaseReader, number uint64) common.Hash { - data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...)) + data, _ := db.Get(headerHashKey(number)) if len(data) == 0 { return common.Hash{} } @@ -38,22 +38,21 @@ func ReadCanonicalHash(db DatabaseReader, number uint64) common.Hash { // WriteCanonicalHash stores the hash assigned to a canonical block number. func WriteCanonicalHash(db DatabaseWriter, hash common.Hash, number uint64) { - key := append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...) - if err := db.Put(key, hash.Bytes()); err != nil { + if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil { log.Crit("Failed to store number to hash mapping", "err", err) } } // DeleteCanonicalHash removes the number to hash canonical mapping. func DeleteCanonicalHash(db DatabaseDeleter, number uint64) { - if err := db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...)); err != nil { + if err := db.Delete(headerHashKey(number)); err != nil { log.Crit("Failed to delete number to hash mapping", "err", err) } } // ReadHeaderNumber returns the header number assigned to a hash. func ReadHeaderNumber(db DatabaseReader, hash common.Hash) *uint64 { - data, _ := db.Get(append(headerNumberPrefix, hash.Bytes()...)) + data, _ := db.Get(headerNumberKey(hash)) if len(data) != 8 { return nil } @@ -129,14 +128,13 @@ func WriteFastTrieProgress(db DatabaseWriter, count uint64) { // ReadHeaderRLP retrieves a block header in its raw RLP database encoding. func ReadHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue { - data, _ := db.Get(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)) + data, _ := db.Get(headerKey(number, hash)) return data } // HasHeader verifies the existence of a block header corresponding to the hash. func HasHeader(db DatabaseReader, hash common.Hash, number uint64) bool { - key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)) - if has, err := db.Has(key); !has || err != nil { + if has, err := db.Has(headerKey(number, hash)); !has || err != nil { return false } return true @@ -161,11 +159,11 @@ func ReadHeader(db DatabaseReader, hash common.Hash, number uint64) *types.Heade func WriteHeader(db DatabaseWriter, header *types.Header) { // Write the hash -> number mapping var ( - hash = header.Hash().Bytes() + hash = header.Hash() number = header.Number.Uint64() encoded = encodeBlockNumber(number) ) - key := append(headerNumberPrefix, hash...) + key := headerNumberKey(hash) if err := db.Put(key, encoded); err != nil { log.Crit("Failed to store hash to number mapping", "err", err) } @@ -174,7 +172,7 @@ func WriteHeader(db DatabaseWriter, header *types.Header) { if err != nil { log.Crit("Failed to RLP encode header", "err", err) } - key = append(append(headerPrefix, encoded...), hash...) + key = headerKey(number, hash) if err := db.Put(key, data); err != nil { log.Crit("Failed to store header", "err", err) } @@ -182,32 +180,30 @@ func WriteHeader(db DatabaseWriter, header *types.Header) { // DeleteHeader removes all block header data associated with a hash. func DeleteHeader(db DatabaseDeleter, hash common.Hash, number uint64) { - if err := db.Delete(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...)); err != nil { + if err := db.Delete(headerKey(number, hash)); err != nil { log.Crit("Failed to delete header", "err", err) } - if err := db.Delete(append(headerNumberPrefix, hash.Bytes()...)); err != nil { + if err := db.Delete(headerNumberKey(hash)); err != nil { log.Crit("Failed to delete hash to number mapping", "err", err) } } // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding. func ReadBodyRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue { - data, _ := db.Get(append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)) + data, _ := db.Get(blockBodyKey(number, hash)) return data } // WriteBodyRLP stores an RLP encoded block body into the database. func WriteBodyRLP(db DatabaseWriter, hash common.Hash, number uint64, rlp rlp.RawValue) { - key := append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...) - if err := db.Put(key, rlp); err != nil { + if err := db.Put(blockBodyKey(number, hash), rlp); err != nil { log.Crit("Failed to store block body", "err", err) } } // HasBody verifies the existence of a block body corresponding to the hash. func HasBody(db DatabaseReader, hash common.Hash, number uint64) bool { - key := append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...) - if has, err := db.Has(key); !has || err != nil { + if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil { return false } return true @@ -238,14 +234,14 @@ func WriteBody(db DatabaseWriter, hash common.Hash, number uint64, body *types.B // DeleteBody removes all block body data associated with a hash. func DeleteBody(db DatabaseDeleter, hash common.Hash, number uint64) { - if err := db.Delete(append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...)); err != nil { + if err := db.Delete(blockBodyKey(number, hash)); err != nil { log.Crit("Failed to delete block body", "err", err) } } // ReadTd retrieves a block's total difficulty corresponding to the hash. func ReadTd(db DatabaseReader, hash common.Hash, number uint64) *big.Int { - data, _ := db.Get(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash[:]...), headerTDSuffix...)) + data, _ := db.Get(headerTDKey(number, hash)) if len(data) == 0 { return nil } @@ -263,15 +259,14 @@ func WriteTd(db DatabaseWriter, hash common.Hash, number uint64, td *big.Int) { if err != nil { log.Crit("Failed to RLP encode block total difficulty", "err", err) } - key := append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), headerTDSuffix...) - if err := db.Put(key, data); err != nil { + if err := db.Put(headerTDKey(number, hash), data); err != nil { log.Crit("Failed to store block total difficulty", "err", err) } } // DeleteTd removes all block total difficulty data associated with a hash. func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) { - if err := db.Delete(append(append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...), headerTDSuffix...)); err != nil { + if err := db.Delete(headerTDKey(number, hash)); err != nil { log.Crit("Failed to delete block total difficulty", "err", err) } } @@ -279,7 +274,7 @@ func DeleteTd(db DatabaseDeleter, hash common.Hash, number uint64) { // ReadReceipts retrieves all the transaction receipts belonging to a block. func ReadReceipts(db DatabaseReader, hash common.Hash, number uint64) types.Receipts { // Retrieve the flattened receipt slice - data, _ := db.Get(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash[:]...)) + data, _ := db.Get(blockReceiptsKey(number, hash)) if len(data) == 0 { return nil } @@ -308,15 +303,14 @@ func WriteReceipts(db DatabaseWriter, hash common.Hash, number uint64, receipts log.Crit("Failed to encode block receipts", "err", err) } // Store the flattened receipt slice - key := append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...) - if err := db.Put(key, bytes); err != nil { + if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil { log.Crit("Failed to store block receipts", "err", err) } } // DeleteReceipts removes all receipt data associated with a block hash. func DeleteReceipts(db DatabaseDeleter, hash common.Hash, number uint64) { - if err := db.Delete(append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...)); err != nil { + if err := db.Delete(blockReceiptsKey(number, hash)); err != nil { log.Crit("Failed to delete block receipts", "err", err) } } diff --git a/core/rawdb/accessors_indexes.go b/core/rawdb/accessors_indexes.go index 9abad14e0c..4ff7e5bd35 100644 --- a/core/rawdb/accessors_indexes.go +++ b/core/rawdb/accessors_indexes.go @@ -17,8 +17,6 @@ package rawdb import ( - "encoding/binary" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" @@ -28,7 +26,7 @@ import ( // ReadTxLookupEntry retrieves the positional metadata associated with a transaction // hash to allow retrieving the transaction or receipt by hash. func ReadTxLookupEntry(db DatabaseReader, hash common.Hash) (common.Hash, uint64, uint64) { - data, _ := db.Get(append(txLookupPrefix, hash.Bytes()...)) + data, _ := db.Get(txLookupKey(hash)) if len(data) == 0 { return common.Hash{}, 0, 0 } @@ -53,7 +51,7 @@ func WriteTxLookupEntries(db DatabaseWriter, block *types.Block) { if err != nil { log.Crit("Failed to encode transaction lookup entry", "err", err) } - if err := db.Put(append(txLookupPrefix, tx.Hash().Bytes()...), data); err != nil { + if err := db.Put(txLookupKey(tx.Hash()), data); err != nil { log.Crit("Failed to store transaction lookup entry", "err", err) } } @@ -61,7 +59,7 @@ func WriteTxLookupEntries(db DatabaseWriter, block *types.Block) { // DeleteTxLookupEntry removes all transaction data associated with a hash. func DeleteTxLookupEntry(db DatabaseDeleter, hash common.Hash) { - db.Delete(append(txLookupPrefix, hash.Bytes()...)) + db.Delete(txLookupKey(hash)) } // ReadTransaction retrieves a specific transaction from the database, along with @@ -97,23 +95,13 @@ func ReadReceipt(db DatabaseReader, hash common.Hash) (*types.Receipt, common.Ha // ReadBloomBits retrieves the compressed bloom bit vector belonging to the given // section and bit index from the. func ReadBloomBits(db DatabaseReader, bit uint, section uint64, head common.Hash) ([]byte, error) { - key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...) - - binary.BigEndian.PutUint16(key[1:], uint16(bit)) - binary.BigEndian.PutUint64(key[3:], section) - - return db.Get(key) + return db.Get(bloomBitsKey(bit, section, head)) } // WriteBloomBits stores the compressed bloom bits vector belonging to the given // section and bit index. func WriteBloomBits(db DatabaseWriter, bit uint, section uint64, head common.Hash, bits []byte) { - key := append(append(bloomBitsPrefix, make([]byte, 10)...), head.Bytes()...) - - binary.BigEndian.PutUint16(key[1:], uint16(bit)) - binary.BigEndian.PutUint64(key[3:], section) - - if err := db.Put(key, bits); err != nil { + if err := db.Put(bloomBitsKey(bit, section, head), bits); err != nil { log.Crit("Failed to store bloom bits", "err", err) } } diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index 73ab983f2b..514328e87b 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -45,7 +45,7 @@ func WriteDatabaseVersion(db DatabaseWriter, version int) { // ReadChainConfig retrieves the consensus settings based on the given genesis hash. func ReadChainConfig(db DatabaseReader, hash common.Hash) *params.ChainConfig { - data, _ := db.Get(append(configPrefix, hash[:]...)) + data, _ := db.Get(configKey(hash)) if len(data) == 0 { return nil } @@ -66,14 +66,14 @@ func WriteChainConfig(db DatabaseWriter, hash common.Hash, cfg *params.ChainConf if err != nil { log.Crit("Failed to JSON encode chain config", "err", err) } - if err := db.Put(append(configPrefix, hash[:]...), data); err != nil { + if err := db.Put(configKey(hash), data); err != nil { log.Crit("Failed to store chain config", "err", err) } } // ReadPreimage retrieves a single preimage of the provided hash. func ReadPreimage(db DatabaseReader, hash common.Hash) []byte { - data, _ := db.Get(append(preimagePrefix, hash.Bytes()...)) + data, _ := db.Get(preimageKey(hash)) return data } @@ -81,7 +81,7 @@ func ReadPreimage(db DatabaseReader, hash common.Hash) []byte { // current block number, and is used for debug messages only. func WritePreimages(db DatabaseWriter, number uint64, preimages map[common.Hash][]byte) { for hash, preimage := range preimages { - if err := db.Put(append(preimagePrefix, hash.Bytes()...), preimage); err != nil { + if err := db.Put(preimageKey(hash), preimage); err != nil { log.Crit("Failed to store trie preimage", "err", err) } } diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index a4b1596fde..ef597ef309 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -77,3 +77,58 @@ func encodeBlockNumber(number uint64) []byte { binary.BigEndian.PutUint64(enc, number) return enc } + +// headerKey = headerPrefix + num (uint64 big endian) + hash +func headerKey(number uint64, hash common.Hash) []byte { + return append(append(headerPrefix, encodeBlockNumber(number)...), hash.Bytes()...) +} + +// headerTDKey = headerPrefix + num (uint64 big endian) + hash + headerTDSuffix +func headerTDKey(number uint64, hash common.Hash) []byte { + return append(headerKey(number, hash), headerTDSuffix...) +} + +// headerHashKey = headerPrefix + num (uint64 big endian) + headerHashSuffix +func headerHashKey(number uint64) []byte { + return append(append(headerPrefix, encodeBlockNumber(number)...), headerHashSuffix...) +} + +// headerNumberKey = headerNumberPrefix + hash +func headerNumberKey(hash common.Hash) []byte { + return append(headerNumberPrefix, hash.Bytes()...) +} + +// blockBodyKey = blockBodyPrefix + num (uint64 big endian) + hash +func blockBodyKey(number uint64, hash common.Hash) []byte { + return append(append(blockBodyPrefix, encodeBlockNumber(number)...), hash.Bytes()...) +} + +// blockReceiptsKey = blockReceiptsPrefix + num (uint64 big endian) + hash +func blockReceiptsKey(number uint64, hash common.Hash) []byte { + return append(append(blockReceiptsPrefix, encodeBlockNumber(number)...), hash.Bytes()...) +} + +// txLookupKey = txLookupPrefix + hash +func txLookupKey(hash common.Hash) []byte { + return append(txLookupPrefix, hash.Bytes()...) +} + +// bloomBitsKey = bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash +func bloomBitsKey(bit uint, section uint64, hash common.Hash) []byte { + key := append(append(bloomBitsPrefix, make([]byte, 10)...), hash.Bytes()...) + + binary.BigEndian.PutUint16(key[1:], uint16(bit)) + binary.BigEndian.PutUint64(key[3:], section) + + return key +} + +// preimageKey = preimagePrefix + hash +func preimageKey(hash common.Hash) []byte { + return append(preimagePrefix, hash.Bytes()...) +} + +// configKey = configPrefix + hash +func configKey(hash common.Hash) []byte { + return append(configPrefix, hash.Bytes()...) +} diff --git a/core/state/state_object.go b/core/state/state_object.go index 5d203fddd8..091d24184a 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -219,7 +219,7 @@ func (self *stateObject) updateRoot(db Database) { self.data.Root = self.trie.Hash() } -// CommitTrie the storage trie of the object to dwb. +// CommitTrie the storage trie of the object to db. // This updates the trie root. func (self *stateObject) CommitTrie(db Database) error { self.updateTrie(db) diff --git a/core/state/state_test.go b/core/state/state_test.go index 12778f6f12..123559ea9b 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -99,7 +99,7 @@ func (s *StateSuite) TestNull(c *checker.C) { s.state.SetState(address, common.Hash{}, value) s.state.Commit(false) value = s.state.GetState(address, common.Hash{}) - if !common.EmptyHash(value) { + if value != (common.Hash{}) { c.Errorf("expected empty hash. got %x", value) } } diff --git a/core/state/statedb.go b/core/state/statedb.go index a952027d6d..92d394ae32 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -358,7 +358,7 @@ func (self *StateDB) deleteStateObject(stateObject *stateObject) { self.setError(self.trie.TryDelete(addr[:])) } -// Retrieve a state object given my the address. Returns nil if not found. +// Retrieve a state object given by the address. Returns nil if not found. func (self *StateDB) getStateObject(addr common.Address) (stateObject *stateObject) { // Prefer 'live' objects. if obj := self.stateObjects[addr]; obj != nil { @@ -596,7 +596,7 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (root common.Hash, err error) case isDirty: // Write any contract code associated with the state object if stateObject.code != nil && stateObject.dirtyCode { - s.db.TrieDB().Insert(common.BytesToHash(stateObject.CodeHash()), stateObject.code) + s.db.TrieDB().InsertBlob(common.BytesToHash(stateObject.CodeHash()), stateObject.code) stateObject.dirtyCode = false } // Write any storage changes in the state object to its storage trie. diff --git a/core/state/sync.go b/core/state/sync.go index 28fcf6ae05..c566e79073 100644 --- a/core/state/sync.go +++ b/core/state/sync.go @@ -25,8 +25,8 @@ import ( ) // NewStateSync create a new state trie download scheduler. -func NewStateSync(root common.Hash, database trie.DatabaseReader) *trie.TrieSync { - var syncer *trie.TrieSync +func NewStateSync(root common.Hash, database trie.DatabaseReader) *trie.Sync { + var syncer *trie.Sync callback := func(leaf []byte, parent common.Hash) error { var obj Account if err := rlp.Decode(bytes.NewReader(leaf), &obj); err != nil { @@ -36,6 +36,6 @@ func NewStateSync(root common.Hash, database trie.DatabaseReader) *trie.TrieSync syncer.AddRawEntry(common.BytesToHash(obj.CodeHash), 64, parent) return nil } - syncer = trie.NewTrieSync(root, database, callback) + syncer = trie.NewSync(root, database, callback) return syncer } diff --git a/core/state_processor.go b/core/state_processor.go index 4dc58b9dea..8e238ce1f0 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -85,7 +85,7 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg // and uses the input parameters for its environment. It returns the receipt // for the transaction, gas used and an error if the transaction failed, // indicating the block was invalid. -func ApplyTransaction(config *params.ChainConfig, bc *BlockChain, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, uint64, error) { +func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, uint64, error) { msg, err := tx.AsMessage(types.MakeSigner(config, header.Number)) if err != nil { return nil, 0, err diff --git a/core/tx_cacher.go b/core/tx_cacher.go new file mode 100644 index 0000000000..6d989c83d9 --- /dev/null +++ b/core/tx_cacher.go @@ -0,0 +1,105 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "runtime" + + "github.com/ethereum/go-ethereum/core/types" +) + +// senderCacher is a concurrent tranaction sender recoverer anc cacher. +var senderCacher = newTxSenderCacher(runtime.NumCPU()) + +// txSenderCacherRequest is a request for recovering transaction senders with a +// specific signature scheme and caching it into the transactions themselves. +// +// The inc field defines the number of transactions to skip after each recovery, +// which is used to feed the same underlying input array to different threads but +// ensure they process the early transactions fast. +type txSenderCacherRequest struct { + signer types.Signer + txs []*types.Transaction + inc int +} + +// txSenderCacher is a helper structure to concurrently ecrecover transaction +// senders from digital signatures on background threads. +type txSenderCacher struct { + threads int + tasks chan *txSenderCacherRequest +} + +// newTxSenderCacher creates a new transaction sender background cacher and starts +// as many procesing goroutines as allowed by the GOMAXPROCS on construction. +func newTxSenderCacher(threads int) *txSenderCacher { + cacher := &txSenderCacher{ + tasks: make(chan *txSenderCacherRequest, threads), + threads: threads, + } + for i := 0; i < threads; i++ { + go cacher.cache() + } + return cacher +} + +// cache is an infinite loop, caching transaction senders from various forms of +// data structures. +func (cacher *txSenderCacher) cache() { + for task := range cacher.tasks { + for i := 0; i < len(task.txs); i += task.inc { + types.Sender(task.signer, task.txs[i]) + } + } +} + +// recover recovers the senders from a batch of transactions and caches them +// back into the same data structures. There is no validation being done, nor +// any reaction to invalid signatures. That is up to calling code later. +func (cacher *txSenderCacher) recover(signer types.Signer, txs []*types.Transaction) { + // If there's nothing to recover, abort + if len(txs) == 0 { + return + } + // Ensure we have meaningful task sizes and schedule the recoveries + tasks := cacher.threads + if len(txs) < tasks*4 { + tasks = (len(txs) + 3) / 4 + } + for i := 0; i < tasks; i++ { + cacher.tasks <- &txSenderCacherRequest{ + signer: signer, + txs: txs[i:], + inc: tasks, + } + } +} + +// recoverFromBlocks recovers the senders from a batch of blocks and caches them +// back into the same data structures. There is no validation being done, nor +// any reaction to invalid signatures. That is up to calling code later. +func (cacher *txSenderCacher) recoverFromBlocks(signer types.Signer, blocks []*types.Block) { + count := 0 + for _, block := range blocks { + count += len(block.Transactions()) + } + txs := make([]*types.Transaction, 0, count) + for _, block := range blocks { + txs = append(txs, block.Transactions()...) + } + cacher.recover(signer, txs) +} diff --git a/core/tx_journal.go b/core/tx_journal.go index e872d7b530..1397e9fd34 100644 --- a/core/tx_journal.go +++ b/core/tx_journal.go @@ -56,7 +56,7 @@ func newTxJournal(path string) *txJournal { // load parses a transaction journal dump from disk, loading its contents into // the specified pool. -func (journal *txJournal) load(add func(*types.Transaction) error) error { +func (journal *txJournal) load(add func([]*types.Transaction) []error) error { // Skip the parsing if the journal file doens't exist at all if _, err := os.Stat(journal.path); os.IsNotExist(err) { return nil @@ -76,7 +76,21 @@ func (journal *txJournal) load(add func(*types.Transaction) error) error { stream := rlp.NewStream(input, 0) total, dropped := 0, 0 - var failure error + // Create a method to load a limited batch of transactions and bump the + // appropriate progress counters. Then use this method to load all the + // journalled transactions in small-ish batches. + loadBatch := func(txs types.Transactions) { + for _, err := range add(txs) { + if err != nil { + log.Debug("Failed to add journaled transaction", "err", err) + dropped++ + } + } + } + var ( + failure error + batch types.Transactions + ) for { // Parse the next transaction and terminate on error tx := new(types.Transaction) @@ -84,14 +98,17 @@ func (journal *txJournal) load(add func(*types.Transaction) error) error { if err != io.EOF { failure = err } + if batch.Len() > 0 { + loadBatch(batch) + } break } - // Import the transaction and bump the appropriate progress counters + // New transaction parsed, queue up for later, import if threnshold is reached total++ - if err = add(tx); err != nil { - log.Debug("Failed to add journaled transaction", "err", err) - dropped++ - continue + + if batch = append(batch, tx); batch.Len() > 1024 { + loadBatch(batch) + batch = batch[:0] } } log.Info("Loaded local transaction journal", "transactions", total, "dropped", dropped) diff --git a/core/tx_list.go b/core/tx_list.go index ea6ee7019f..287dda4c33 100644 --- a/core/tx_list.go +++ b/core/tx_list.go @@ -397,13 +397,13 @@ func (h *priceHeap) Pop() interface{} { // txPricedList is a price-sorted heap to allow operating on transactions pool // contents in a price-incrementing way. type txPricedList struct { - all *map[common.Hash]*types.Transaction // Pointer to the map of all transactions - items *priceHeap // Heap of prices of all the stored transactions - stales int // Number of stale price points to (re-heap trigger) + all *txLookup // Pointer to the map of all transactions + items *priceHeap // Heap of prices of all the stored transactions + stales int // Number of stale price points to (re-heap trigger) } // newTxPricedList creates a new price-sorted transaction heap. -func newTxPricedList(all *map[common.Hash]*types.Transaction) *txPricedList { +func newTxPricedList(all *txLookup) *txPricedList { return &txPricedList{ all: all, items: new(priceHeap), @@ -425,12 +425,13 @@ func (l *txPricedList) Removed() { return } // Seems we've reached a critical number of stale transactions, reheap - reheap := make(priceHeap, 0, len(*l.all)) + reheap := make(priceHeap, 0, l.all.Count()) l.stales, l.items = 0, &reheap - for _, tx := range *l.all { + l.all.Range(func(hash common.Hash, tx *types.Transaction) bool { *l.items = append(*l.items, tx) - } + return true + }) heap.Init(l.items) } @@ -443,7 +444,7 @@ func (l *txPricedList) Cap(threshold *big.Int, local *accountSet) types.Transact for len(*l.items) > 0 { // Discard stale transactions if found during cleanup tx := heap.Pop(l.items).(*types.Transaction) - if _, ok := (*l.all)[tx.Hash()]; !ok { + if l.all.Get(tx.Hash()) == nil { l.stales-- continue } @@ -475,7 +476,7 @@ func (l *txPricedList) Underpriced(tx *types.Transaction, local *accountSet) boo // Discard stale price points if found at the heap start for len(*l.items) > 0 { head := []*types.Transaction(*l.items)[0] - if _, ok := (*l.all)[head.Hash()]; !ok { + if l.all.Get(head.Hash()) == nil { l.stales-- heap.Pop(l.items) continue @@ -500,7 +501,7 @@ func (l *txPricedList) Discard(count int, local *accountSet) types.Transactions for len(*l.items) > 0 && count > 0 { // Discard stale transactions if found during cleanup tx := heap.Pop(l.items).(*types.Transaction) - if _, ok := (*l.all)[tx.Hash()]; !ok { + if l.all.Get(tx.Hash()) == nil { l.stales-- continue } diff --git a/core/tx_pool.go b/core/tx_pool.go index 388b40058f..9c958e3b6f 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -200,11 +200,11 @@ type TxPool struct { locals *accountSet // Set of local transaction to exempt from eviction rules journal *txJournal // Journal of local transaction to back up to disk - pending map[common.Address]*txList // All currently processable transactions - queue map[common.Address]*txList // Queued but non-processable transactions - beats map[common.Address]time.Time // Last heartbeat from each known account - all map[common.Hash]*types.Transaction // All transactions to allow lookups - priced *txPricedList // All transactions sorted by price + pending map[common.Address]*txList // All currently processable transactions + queue map[common.Address]*txList // Queued but non-processable transactions + beats map[common.Address]time.Time // Last heartbeat from each known account + all *txLookup // All transactions to allow lookups + priced *txPricedList // All transactions sorted by price wg sync.WaitGroup // for shutdown sync @@ -222,23 +222,23 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block config: config, chainconfig: chainconfig, chain: chain, - signer: types.NewEIP155Signer(chainconfig.ChainId), + signer: types.NewEIP155Signer(chainconfig.ChainID), pending: make(map[common.Address]*txList), queue: make(map[common.Address]*txList), beats: make(map[common.Address]time.Time), - all: make(map[common.Hash]*types.Transaction), + all: newTxLookup(), chainHeadCh: make(chan ChainHeadEvent, chainHeadChanSize), gasPrice: new(big.Int).SetUint64(config.PriceLimit), } pool.locals = newAccountSet(pool.signer) - pool.priced = newTxPricedList(&pool.all) + pool.priced = newTxPricedList(pool.all) pool.reset(nil, chain.CurrentBlock().Header()) // If local transactions and journaling is enabled, load from disk if !config.NoLocals && config.Journal != "" { pool.journal = newTxJournal(config.Journal) - if err := pool.journal.load(pool.AddLocal); err != nil { + if err := pool.journal.load(pool.AddLocals); err != nil { log.Warn("Failed to load transaction journal", "err", err) } if err := pool.journal.rotate(pool.local()); err != nil { @@ -411,6 +411,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { // Inject any transactions discarded due to reorgs log.Debug("Reinjecting stale transactions", "count", len(reinject)) + senderCacher.recover(pool.signer, reinject) pool.addTxsLocked(reinject, false) // validate the pool of pending transactions, this will remove @@ -444,9 +445,9 @@ func (pool *TxPool) Stop() { log.Info("Transaction pool stopped") } -// SubscribeTxPreEvent registers a subscription of TxPreEvent and +// SubscribeNewTxsEvent registers a subscription of NewTxsEvent and // starts sending event to the given channel. -func (pool *TxPool) SubscribeTxPreEvent(ch chan<- TxPreEvent) event.Subscription { +func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- NewTxsEvent) event.Subscription { return pool.scope.Track(pool.txFeed.Subscribe(ch)) } @@ -605,7 +606,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { // If the transaction is already known, discard it hash := tx.Hash() - if pool.all[hash] != nil { + if pool.all.Get(hash) != nil { log.Trace("Discarding already known transaction", "hash", hash) return false, fmt.Errorf("known transaction: %x", hash) } @@ -616,7 +617,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { return false, err } // If the transaction pool is full, discard underpriced transactions - if uint64(len(pool.all)) >= pool.config.GlobalSlots+pool.config.GlobalQueue { + if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue { // If the new transaction is underpriced, don't accept it if !local && pool.priced.Underpriced(tx, pool.locals) { log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) @@ -624,7 +625,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { return false, ErrUnderpriced } // New transaction is better than our worse ones, make room for it - drop := pool.priced.Discard(len(pool.all)-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals) + drop := pool.priced.Discard(pool.all.Count()-int(pool.config.GlobalSlots+pool.config.GlobalQueue-1), pool.locals) for _, tx := range drop { log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) underpricedTxCounter.Inc(1) @@ -642,18 +643,18 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (bool, error) { } // New transaction is better, replace old one if old != nil { - delete(pool.all, old.Hash()) + pool.all.Remove(old.Hash()) pool.priced.Removed() pendingReplaceCounter.Inc(1) } - pool.all[tx.Hash()] = tx + pool.all.Add(tx) pool.priced.Put(tx) pool.journalTx(from, tx) log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) // We've directly injected a replacement transaction, notify subsystems - go pool.txFeed.Send(TxPreEvent{tx}) + go pool.txFeed.Send(NewTxsEvent{types.Transactions{tx}}) return old != nil, nil } @@ -689,12 +690,12 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er } // Discard any previous transaction and mark this if old != nil { - delete(pool.all, old.Hash()) + pool.all.Remove(old.Hash()) pool.priced.Removed() queuedReplaceCounter.Inc(1) } - if pool.all[hash] == nil { - pool.all[hash] = tx + if pool.all.Get(hash) == nil { + pool.all.Add(tx) pool.priced.Put(tx) } return old != nil, nil @@ -712,10 +713,11 @@ func (pool *TxPool) journalTx(from common.Address, tx *types.Transaction) { } } -// promoteTx adds a transaction to the pending (processable) list of transactions. +// promoteTx adds a transaction to the pending (processable) list of transactions +// and returns whether it was inserted or an older was better. // // Note, this method assumes the pool lock is held! -func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) { +func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.Transaction) bool { // Try to insert the transaction into the pending queue if pool.pending[addr] == nil { pool.pending[addr] = newTxList(true) @@ -725,29 +727,29 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T inserted, old := list.Add(tx, pool.config.PriceBump) if !inserted { // An older transaction was better, discard this - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() pendingDiscardCounter.Inc(1) - return + return false } // Otherwise discard any previous transaction and mark this if old != nil { - delete(pool.all, old.Hash()) + pool.all.Remove(old.Hash()) pool.priced.Removed() pendingReplaceCounter.Inc(1) } // Failsafe to work around direct pending inserts (tests) - if pool.all[hash] == nil { - pool.all[hash] = tx + if pool.all.Get(hash) == nil { + pool.all.Add(tx) pool.priced.Put(tx) } // Set the potentially new pending nonce and notify any subsystems of the new tx pool.beats[addr] = time.Now() pool.pendingState.SetNonce(addr, tx.Nonce()+1) - go pool.txFeed.Send(TxPreEvent{tx}) + return true } // AddLocal enqueues a single transaction into the pool if it is valid, marking @@ -813,11 +815,9 @@ func (pool *TxPool) addTxsLocked(txs []*types.Transaction, local bool) []error { for i, tx := range txs { var replace bool - if replace, errs[i] = pool.add(tx, local); errs[i] == nil { - if !replace { - from, _ := types.Sender(pool.signer, tx) // already validated - dirty[from] = struct{}{} - } + if replace, errs[i] = pool.add(tx, local); errs[i] == nil && !replace { + from, _ := types.Sender(pool.signer, tx) // already validated + dirty[from] = struct{}{} } } // Only reprocess the internal state if something was actually added @@ -839,7 +839,7 @@ func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { status := make([]TxStatus, len(hashes)) for i, hash := range hashes { - if tx := pool.all[hash]; tx != nil { + if tx := pool.all.Get(hash); tx != nil { from, _ := types.Sender(pool.signer, tx) // already validated if pool.pending[from] != nil && pool.pending[from].txs.items[tx.Nonce()] != nil { status[i] = TxStatusPending @@ -854,24 +854,21 @@ func (pool *TxPool) Status(hashes []common.Hash) []TxStatus { // Get returns a transaction if it is contained in the pool // and nil otherwise. func (pool *TxPool) Get(hash common.Hash) *types.Transaction { - pool.mu.RLock() - defer pool.mu.RUnlock() - - return pool.all[hash] + return pool.all.Get(hash) } // removeTx removes a single transaction from the queue, moving all subsequent // transactions back to the future queue. func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { // Fetch the transaction we wish to delete - tx, ok := pool.all[hash] - if !ok { + tx := pool.all.Get(hash) + if tx == nil { return } addr, _ := types.Sender(pool.signer, tx) // already validated during insertion // Remove it from the list of known transactions - delete(pool.all, hash) + pool.all.Remove(hash) if outofbound { pool.priced.Removed() } @@ -907,6 +904,9 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { // future queue to the set of pending transactions. During this process, all // invalidated transactions (low nonce, low balance) are deleted. func (pool *TxPool) promoteExecutables(accounts []common.Address) { + // Track the promoted transactions to broadcast them at once + var promoted []*types.Transaction + // Gather all the accounts potentially needing updates if accounts == nil { accounts = make([]common.Address, 0, len(pool.queue)) @@ -924,7 +924,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { for _, tx := range list.Forward(pool.currentState.GetNonce(addr)) { hash := tx.Hash() log.Trace("Removed old queued transaction", "hash", hash) - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() } // Drop all transactions that are too costly (low balance or out of gas) @@ -932,21 +932,23 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { for _, tx := range drops { hash := tx.Hash() log.Trace("Removed unpayable queued transaction", "hash", hash) - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() queuedNofundsCounter.Inc(1) } // Gather all executable transactions and promote them for _, tx := range list.Ready(pool.pendingState.GetNonce(addr)) { hash := tx.Hash() - log.Trace("Promoting queued transaction", "hash", hash) - pool.promoteTx(addr, hash, tx) + if pool.promoteTx(addr, hash, tx) { + log.Trace("Promoting queued transaction", "hash", hash) + promoted = append(promoted, tx) + } } // Drop all transactions over the allowed limit if !pool.locals.contains(addr) { for _, tx := range list.Cap(int(pool.config.AccountQueue)) { hash := tx.Hash() - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() queuedRateLimitCounter.Inc(1) log.Trace("Removed cap-exceeding queued transaction", "hash", hash) @@ -957,6 +959,10 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { delete(pool.queue, addr) } } + // Notify subsystem for new promoted transactions. + if len(promoted) > 0 { + go pool.txFeed.Send(NewTxsEvent{promoted}) + } // If the pending limit is overflown, start equalizing allowances pending := uint64(0) for _, list := range pool.pending { @@ -991,7 +997,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { for _, tx := range list.Cap(list.Len() - 1) { // Drop the transaction from the global pools too hash := tx.Hash() - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() // Update the account nonce to the dropped transaction @@ -1013,7 +1019,7 @@ func (pool *TxPool) promoteExecutables(accounts []common.Address) { for _, tx := range list.Cap(list.Len() - 1) { // Drop the transaction from the global pools too hash := tx.Hash() - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() // Update the account nonce to the dropped transaction @@ -1082,7 +1088,7 @@ func (pool *TxPool) demoteUnexecutables() { for _, tx := range list.Forward(nonce) { hash := tx.Hash() log.Trace("Removed old pending transaction", "hash", hash) - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() } // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later @@ -1090,7 +1096,7 @@ func (pool *TxPool) demoteUnexecutables() { for _, tx := range drops { hash := tx.Hash() log.Trace("Removed unpayable pending transaction", "hash", hash) - delete(pool.all, hash) + pool.all.Remove(hash) pool.priced.Removed() pendingNofundsCounter.Inc(1) } @@ -1099,7 +1105,7 @@ func (pool *TxPool) demoteUnexecutables() { log.Trace("Demoting pending transaction", "hash", hash) pool.enqueueTx(hash, tx) } - // If there's a gap in front, warn (should never happen) and postpone all transactions + // If there's a gap in front, alert (should never happen) and postpone all transactions if list.Len() > 0 && list.txs.Get(nonce) == nil { for _, tx := range list.Cap(0) { hash := tx.Hash() @@ -1162,3 +1168,68 @@ func (as *accountSet) containsTx(tx *types.Transaction) bool { func (as *accountSet) add(addr common.Address) { as.accounts[addr] = struct{}{} } + +// txLookup is used internally by TxPool to track transactions while allowing lookup without +// mutex contention. +// +// Note, although this type is properly protected against concurrent access, it +// is **not** a type that should ever be mutated or even exposed outside of the +// transaction pool, since its internal state is tightly coupled with the pools +// internal mechanisms. The sole purpose of the type is to permit out-of-bound +// peeking into the pool in TxPool.Get without having to acquire the widely scoped +// TxPool.mu mutex. +type txLookup struct { + all map[common.Hash]*types.Transaction + lock sync.RWMutex +} + +// newTxLookup returns a new txLookup structure. +func newTxLookup() *txLookup { + return &txLookup{ + all: make(map[common.Hash]*types.Transaction), + } +} + +// Range calls f on each key and value present in the map. +func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) { + t.lock.RLock() + defer t.lock.RUnlock() + + for key, value := range t.all { + if !f(key, value) { + break + } + } +} + +// Get returns a transaction if it exists in the lookup, or nil if not found. +func (t *txLookup) Get(hash common.Hash) *types.Transaction { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.all[hash] +} + +// Count returns the current number of items in the lookup. +func (t *txLookup) Count() int { + t.lock.RLock() + defer t.lock.RUnlock() + + return len(t.all) +} + +// Add adds a transaction to the lookup. +func (t *txLookup) Add(tx *types.Transaction) { + t.lock.Lock() + defer t.lock.Unlock() + + t.all[tx.Hash()] = tx +} + +// Remove removes a transaction from the lookup. +func (t *txLookup) Remove(hash common.Hash) { + t.lock.Lock() + defer t.lock.Unlock() + + delete(t.all, hash) +} diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index e7f52075e5..5a59205442 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -94,7 +94,7 @@ func validateTxPoolInternals(pool *TxPool) error { // Ensure the total transaction set is consistent with pending + queued pending, queued := pool.stats() - if total := len(pool.all); total != pending+queued { + if total := pool.all.Count(); total != pending+queued { return fmt.Errorf("total transaction count %d != %d pending + %d queued", total, pending, queued) } if priced := pool.priced.items.Len() - pool.priced.stales; priced != pending+queued { @@ -118,21 +118,27 @@ func validateTxPoolInternals(pool *TxPool) error { // validateEvents checks that the correct number of transaction addition events // were fired on the pool's event feed. -func validateEvents(events chan TxPreEvent, count int) error { - for i := 0; i < count; i++ { +func validateEvents(events chan NewTxsEvent, count int) error { + var received []*types.Transaction + + for len(received) < count { select { - case <-events: + case ev := <-events: + received = append(received, ev.Txs...) case <-time.After(time.Second): - return fmt.Errorf("event #%d not fired", i) + return fmt.Errorf("event #%d not fired", received) } } + if len(received) > count { + return fmt.Errorf("more than %d events fired: %v", count, received[count:]) + } select { - case tx := <-events: - return fmt.Errorf("more than %d events fired: %v", count, tx.Tx) + case ev := <-events: + return fmt.Errorf("more than %d events fired: %v", count, ev.Txs) case <-time.After(50 * time.Millisecond): // This branch should be "default", but it's a data race between goroutines, - // reading the event channel and pushng into it, so better wait a bit ensuring + // reading the event channel and pushing into it, so better wait a bit ensuring // really nothing gets injected. } return nil @@ -395,8 +401,8 @@ func TestTransactionDoubleNonce(t *testing.T) { t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash()) } // Ensure the total transaction count is correct - if len(pool.all) != 1 { - t.Error("expected 1 total transactions, got", len(pool.all)) + if pool.all.Count() != 1 { + t.Error("expected 1 total transactions, got", pool.all.Count()) } } @@ -418,8 +424,8 @@ func TestTransactionMissingNonce(t *testing.T) { if pool.queue[addr].Len() != 1 { t.Error("expected 1 queued transaction, got", pool.queue[addr].Len()) } - if len(pool.all) != 1 { - t.Error("expected 1 total transactions, got", len(pool.all)) + if pool.all.Count() != 1 { + t.Error("expected 1 total transactions, got", pool.all.Count()) } } @@ -482,8 +488,8 @@ func TestTransactionDropping(t *testing.T) { if pool.queue[account].Len() != 3 { t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) } - if len(pool.all) != 6 { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 6) + if pool.all.Count() != 6 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) } pool.lockedReset(nil, nil) if pool.pending[account].Len() != 3 { @@ -492,8 +498,8 @@ func TestTransactionDropping(t *testing.T) { if pool.queue[account].Len() != 3 { t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) } - if len(pool.all) != 6 { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 6) + if pool.all.Count() != 6 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) } // Reduce the balance of the account, and check that invalidated transactions are dropped pool.currentState.AddBalance(account, big.NewInt(-650)) @@ -517,8 +523,8 @@ func TestTransactionDropping(t *testing.T) { if _, ok := pool.queue[account].txs.items[tx12.Nonce()]; ok { t.Errorf("out-of-fund queued transaction present: %v", tx11) } - if len(pool.all) != 4 { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 4) + if pool.all.Count() != 4 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4) } // Reduce the block gas limit, check that invalidated transactions are dropped pool.chain.(*testBlockChain).gasLimit = 100 @@ -536,8 +542,8 @@ func TestTransactionDropping(t *testing.T) { if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; ok { t.Errorf("over-gased queued transaction present: %v", tx11) } - if len(pool.all) != 2 { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), 2) + if pool.all.Count() != 2 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 2) } } @@ -590,8 +596,8 @@ func TestTransactionPostponing(t *testing.T) { if len(pool.queue) != 0 { t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) } - if len(pool.all) != len(txs) { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txs)) + if pool.all.Count() != len(txs) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) } pool.lockedReset(nil, nil) if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { @@ -600,8 +606,8 @@ func TestTransactionPostponing(t *testing.T) { if len(pool.queue) != 0 { t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) } - if len(pool.all) != len(txs) { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txs)) + if pool.all.Count() != len(txs) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) } // Reduce the balance of the account, and check that transactions are reorganised for _, addr := range accs { @@ -650,8 +656,8 @@ func TestTransactionPostponing(t *testing.T) { } } } - if len(pool.all) != len(txs)/2 { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txs)/2) + if pool.all.Count() != len(txs)/2 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)/2) } } @@ -669,7 +675,7 @@ func TestTransactionGapFilling(t *testing.T) { pool.currentState.AddBalance(account, big.NewInt(1000000)) // Keep track of transaction events to ensure all executables get announced - events := make(chan TxPreEvent, testTxPoolConfig.AccountQueue+5) + events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -742,8 +748,8 @@ func TestTransactionQueueAccountLimiting(t *testing.T) { } } } - if len(pool.all) != int(testTxPoolConfig.AccountQueue) { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), testTxPoolConfig.AccountQueue) + if pool.all.Count() != int(testTxPoolConfig.AccountQueue) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), testTxPoolConfig.AccountQueue) } } @@ -920,7 +926,7 @@ func TestTransactionPendingLimiting(t *testing.T) { pool.currentState.AddBalance(account, big.NewInt(1000000)) // Keep track of transaction events to ensure all executables get announced - events := make(chan TxPreEvent, testTxPoolConfig.AccountQueue+5) + events := make(chan NewTxsEvent, testTxPoolConfig.AccountQueue+5) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -936,8 +942,8 @@ func TestTransactionPendingLimiting(t *testing.T) { t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0) } } - if len(pool.all) != int(testTxPoolConfig.AccountQueue+5) { - t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), testTxPoolConfig.AccountQueue+5) + if pool.all.Count() != int(testTxPoolConfig.AccountQueue+5) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), testTxPoolConfig.AccountQueue+5) } if err := validateEvents(events, int(testTxPoolConfig.AccountQueue+5)); err != nil { t.Fatalf("event firing failed: %v", err) @@ -987,8 +993,8 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) { if len(pool1.queue) != len(pool2.queue) { t.Errorf("queued transaction count mismatch: one-by-one algo: %d, batch algo: %d", len(pool1.queue), len(pool2.queue)) } - if len(pool1.all) != len(pool2.all) { - t.Errorf("total transaction count mismatch: one-by-one algo %d, batch algo %d", len(pool1.all), len(pool2.all)) + if pool1.all.Count() != pool2.all.Count() { + t.Errorf("total transaction count mismatch: one-by-one algo %d, batch algo %d", pool1.all.Count(), pool2.all.Count()) } if err := validateTxPoolInternals(pool1); err != nil { t.Errorf("pool 1 internal state corrupted: %v", err) @@ -1140,7 +1146,7 @@ func TestTransactionPoolRepricing(t *testing.T) { defer pool.Stop() // Keep track of transaction events to ensure all executables get announced - events := make(chan TxPreEvent, 32) + events := make(chan NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -1327,7 +1333,7 @@ func TestTransactionPoolUnderpricing(t *testing.T) { defer pool.Stop() // Keep track of transaction events to ensure all executables get announced - events := make(chan TxPreEvent, 32) + events := make(chan NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -1433,7 +1439,7 @@ func TestTransactionPoolStableUnderpricing(t *testing.T) { defer pool.Stop() // Keep track of transaction events to ensure all executables get announced - events := make(chan TxPreEvent, 32) + events := make(chan NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() @@ -1495,7 +1501,7 @@ func TestTransactionReplacement(t *testing.T) { defer pool.Stop() // Keep track of transaction events to ensure all executables get announced - events := make(chan TxPreEvent, 32) + events := make(chan NewTxsEvent, 32) sub := pool.txFeed.Subscribe(events) defer sub.Unsubscribe() diff --git a/core/types/gen_receipt_json.go b/core/types/gen_receipt_json.go index c297adebb6..5c807a4ccb 100644 --- a/core/types/gen_receipt_json.go +++ b/core/types/gen_receipt_json.go @@ -12,10 +12,11 @@ import ( var _ = (*receiptMarshaling)(nil) +// MarshalJSON marshals as JSON. func (r Receipt) MarshalJSON() ([]byte, error) { type Receipt struct { PostState hexutil.Bytes `json:"root"` - Status hexutil.Uint `json:"status"` + Status hexutil.Uint64 `json:"status"` CumulativeGasUsed hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"` Bloom Bloom `json:"logsBloom" gencodec:"required"` Logs []*Log `json:"logs" gencodec:"required"` @@ -25,7 +26,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) { } var enc Receipt enc.PostState = r.PostState - enc.Status = hexutil.Uint(r.Status) + enc.Status = hexutil.Uint64(r.Status) enc.CumulativeGasUsed = hexutil.Uint64(r.CumulativeGasUsed) enc.Bloom = r.Bloom enc.Logs = r.Logs @@ -35,10 +36,11 @@ func (r Receipt) MarshalJSON() ([]byte, error) { return json.Marshal(&enc) } +// UnmarshalJSON unmarshals from JSON. func (r *Receipt) UnmarshalJSON(input []byte) error { type Receipt struct { PostState *hexutil.Bytes `json:"root"` - Status *hexutil.Uint `json:"status"` + Status *hexutil.Uint64 `json:"status"` CumulativeGasUsed *hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"` Bloom *Bloom `json:"logsBloom" gencodec:"required"` Logs []*Log `json:"logs" gencodec:"required"` @@ -54,7 +56,7 @@ func (r *Receipt) UnmarshalJSON(input []byte) error { r.PostState = *dec.PostState } if dec.Status != nil { - r.Status = uint(*dec.Status) + r.Status = uint64(*dec.Status) } if dec.CumulativeGasUsed == nil { return errors.New("missing required field 'cumulativeGasUsed' for Receipt") diff --git a/core/types/receipt.go b/core/types/receipt.go index 613f03d50f..3d1fc95aab 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -36,17 +36,17 @@ var ( const ( // ReceiptStatusFailed is the status code of a transaction if execution failed. - ReceiptStatusFailed = uint(0) + ReceiptStatusFailed = uint64(0) // ReceiptStatusSuccessful is the status code of a transaction if execution succeeded. - ReceiptStatusSuccessful = uint(1) + ReceiptStatusSuccessful = uint64(1) ) // Receipt represents the results of a transaction. type Receipt struct { // Consensus fields PostState []byte `json:"root"` - Status uint `json:"status"` + Status uint64 `json:"status"` CumulativeGasUsed uint64 `json:"cumulativeGasUsed" gencodec:"required"` Bloom Bloom `json:"logsBloom" gencodec:"required"` Logs []*Log `json:"logs" gencodec:"required"` @@ -59,7 +59,7 @@ type Receipt struct { type receiptMarshaling struct { PostState hexutil.Bytes - Status hexutil.Uint + Status hexutil.Uint64 CumulativeGasUsed hexutil.Uint64 GasUsed hexutil.Uint64 } diff --git a/core/types/transaction.go b/core/types/transaction.go index c1cb7a043a..b824a77f61 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -35,15 +35,6 @@ var ( ErrInvalidSig = errors.New("invalid transaction v, r, s values") ) -// deriveSigner makes a *best* guess about which signer to use. -func deriveSigner(V *big.Int) Signer { - if V.Sign() != 0 && isProtectedV(V) { - return NewEIP155Signer(deriveChainId(V)) - } else { - return HomesteadSigner{} - } -} - type Transaction struct { data txdata // caches diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index dfc84fdac7..c195d23a32 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -43,7 +43,7 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int) Signer { var signer Signer switch { case config.IsEIP155(blockNumber): - signer = NewEIP155Signer(config.ChainId) + signer = NewEIP155Signer(config.ChainID) case config.IsHomestead(blockNumber): signer = HomesteadSigner{} default: diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index d1861b14cd..b390f45c67 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -165,28 +165,13 @@ func TestTransactionPriceNonceSort(t *testing.T) { t.Errorf("invalid nonce ordering: tx #%d (A=%x N=%v) < tx #%d (A=%x N=%v)", i, fromi[:4], txi.Nonce(), i+j, fromj[:4], txj.Nonce()) } } - // Find the previous and next nonce of this account - prev, next := i-1, i+1 - for j := i - 1; j >= 0; j-- { - if fromj, _ := Sender(signer, txs[j]); fromi == fromj { - prev = j - break - } - } - for j := i + 1; j < len(txs); j++ { - if fromj, _ := Sender(signer, txs[j]); fromi == fromj { - next = j - break - } - } - // Make sure that in between the neighbor nonces, the transaction is correctly positioned price wise - for j := prev + 1; j < next; j++ { - fromj, _ := Sender(signer, txs[j]) - if j < i && txs[j].GasPrice().Cmp(txi.GasPrice()) < 0 { - t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", j, fromj[:4], txs[j].GasPrice(), i, fromi[:4], txi.GasPrice()) - } - if j > i && txs[j].GasPrice().Cmp(txi.GasPrice()) > 0 { - t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) > tx #%d (A=%x P=%v)", j, fromj[:4], txs[j].GasPrice(), i, fromi[:4], txi.GasPrice()) + + // If the next tx has different from account, the price must be lower than the current one + if i+1 < len(txs) { + next := txs[i+1] + fromNext, _ := Sender(signer, next) + if fromi != fromNext && txi.GasPrice().Cmp(next.GasPrice()) < 0 { + t.Errorf("invalid gasprice ordering: tx #%d (A=%x P=%v) < tx #%d (A=%x P=%v)", i, fromi[:4], txi.GasPrice(), i+1, fromNext[:4], next.GasPrice()) } } } diff --git a/core/vm/errors.go b/core/vm/errors.go index b19366be0e..ea33f13f33 100644 --- a/core/vm/errors.go +++ b/core/vm/errors.go @@ -18,6 +18,7 @@ package vm import "errors" +// List execution errors var ( ErrOutOfGas = errors.New("out of gas") ErrCodeStoreOutOfGas = errors.New("contract creation code storage out of gas") diff --git a/core/vm/evm.go b/core/vm/evm.go index ea46209742..69c8ec4787 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -31,8 +31,10 @@ import ( var emptyCodeHash = crypto.Keccak256Hash(nil) type ( + // CanTransferFunc is the signature of a transfer guard function CanTransferFunc func(StateDB, common.Address, *big.Int) bool - TransferFunc func(StateDB, common.Address, common.Address, *big.Int) + // TransferFunc is the signature of a transfer function + TransferFunc func(StateDB, common.Address, common.Address, *big.Int) // GetHashFunc returns the nth block hash in the blockchain // and is used by the BLOCKHASH EVM op code. GetHashFunc func(uint64) common.Hash diff --git a/core/vm/gas.go b/core/vm/gas.go index dd64d5f178..bba7058c7e 100644 --- a/core/vm/gas.go +++ b/core/vm/gas.go @@ -22,6 +22,7 @@ import ( "github.com/ethereum/go-ethereum/params" ) +// Gas costs const ( GasQuickStep uint64 = 2 GasFastestStep uint64 = 3 diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 83adba428e..0764c67a4d 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -124,12 +124,12 @@ func gasSStore(gt params.GasTable, evm *EVM, contract *Contract, stack *Stack, m // 1. From a zero-value address to a non-zero value (NEW VALUE) // 2. From a non-zero value address to a zero-value address (DELETE) // 3. From a non-zero to a non-zero (CHANGE) - if common.EmptyHash(val) && !common.EmptyHash(common.BigToHash(y)) { + if val == (common.Hash{}) && y.Sign() != 0 { // 0 => non 0 return params.SstoreSetGas, nil - } else if !common.EmptyHash(val) && common.EmptyHash(common.BigToHash(y)) { + } else if val != (common.Hash{}) && y.Sign() == 0 { + // non 0 => 0 evm.StateDB.AddRefund(params.SstoreRefundGas) - return params.SstoreClearGas, nil } else { // non 0 => non 0 (or 0 => 0) diff --git a/core/vm/instructions.go b/core/vm/instructions.go index 0689ee39cf..1ec13ba35f 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -556,7 +556,7 @@ func opMload(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *St func opMstore(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { // pop value of the stack mStart, val := stack.pop(), stack.pop() - memory.Set(mStart.Uint64(), 32, math.PaddedBigBytes(val, 32)) + memory.Set32(mStart.Uint64(), val) evm.interpreter.intPool.put(mStart, val) return nil, nil @@ -570,9 +570,9 @@ func opMstore8(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack * } func opSload(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { - loc := common.BigToHash(stack.pop()) - val := evm.StateDB.GetState(contract.Address(), loc).Big() - stack.push(val) + loc := stack.peek() + val := evm.StateDB.GetState(contract.Address(), common.BigToHash(loc)) + loc.SetBytes(val.Bytes()) return nil, nil } @@ -850,7 +850,7 @@ func makePush(size uint64, pushByteSize int) executionFunc { } } -// make push instruction function +// make dup instruction function func makeDup(size int64) executionFunc { return func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { stack.dup(evm.interpreter.intPool, int(size)) @@ -861,7 +861,7 @@ func makeDup(size int64) executionFunc { // make swap instruction function func makeSwap(size int64) executionFunc { // switch n + 1 otherwise n would be swapped with n - size += 1 + size++ return func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) { stack.swap(int(size)) return nil, nil diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index 0de558612c..f51e6363f6 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -425,3 +425,42 @@ func BenchmarkOpIsZero(b *testing.B) { x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff" opBenchmark(b, opIszero, x) } + +func TestOpMstore(t *testing.T) { + var ( + env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) + stack = newstack() + mem = NewMemory() + ) + mem.Resize(64) + pc := uint64(0) + v := "abcdef00000000000000abba000000000deaf000000c0de00100000000133700" + stack.pushN(new(big.Int).SetBytes(common.Hex2Bytes(v)), big.NewInt(0)) + opMstore(&pc, env, nil, mem, stack) + if got := common.Bytes2Hex(mem.Get(0, 32)); got != v { + t.Fatalf("Mstore fail, got %v, expected %v", got, v) + } + stack.pushN(big.NewInt(0x1), big.NewInt(0)) + opMstore(&pc, env, nil, mem, stack) + if common.Bytes2Hex(mem.Get(0, 32)) != "0000000000000000000000000000000000000000000000000000000000000001" { + t.Fatalf("Mstore failed to overwrite previous value") + } +} + +func BenchmarkOpMstore(bench *testing.B) { + var ( + env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) + stack = newstack() + mem = NewMemory() + ) + mem.Resize(64) + pc := uint64(0) + memStart := big.NewInt(0) + value := big.NewInt(0x1337) + + bench.ResetTimer() + for i := 0; i < bench.N; i++ { + stack.pushN(value, memStart) + opMstore(&pc, env, nil, mem, stack) + } +} diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 3389941353..111a9b7981 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -33,7 +33,7 @@ type ( var errGasUintOverflow = errors.New("gas uint64 overflow") type operation struct { - // op is the operation function + // execute is the operation function execute executionFunc // gasCost is the gas function and returns the gas required for execution gasCost gasFunc @@ -51,17 +51,17 @@ type operation struct { } var ( - frontierInstructionSet = NewFrontierInstructionSet() - homesteadInstructionSet = NewHomesteadInstructionSet() - byzantiumInstructionSet = NewByzantiumInstructionSet() - constantinopleInstructionSet = NewConstantinopleInstructionSet() + frontierInstructionSet = newFrontierInstructionSet() + homesteadInstructionSet = newHomesteadInstructionSet() + byzantiumInstructionSet = newByzantiumInstructionSet() + constantinopleInstructionSet = newConstantinopleInstructionSet() ) // NewConstantinopleInstructionSet returns the frontier, homestead // byzantium and contantinople instructions. -func NewConstantinopleInstructionSet() [256]operation { +func newConstantinopleInstructionSet() [256]operation { // instructions that can be executed during the byzantium phase. - instructionSet := NewByzantiumInstructionSet() + instructionSet := newByzantiumInstructionSet() instructionSet[SHL] = operation{ execute: opSHL, gasCost: constGasFunc(GasFastestStep), @@ -85,9 +85,9 @@ func NewConstantinopleInstructionSet() [256]operation { // NewByzantiumInstructionSet returns the frontier, homestead and // byzantium instructions. -func NewByzantiumInstructionSet() [256]operation { +func newByzantiumInstructionSet() [256]operation { // instructions that can be executed during the homestead phase. - instructionSet := NewHomesteadInstructionSet() + instructionSet := newHomesteadInstructionSet() instructionSet[STATICCALL] = operation{ execute: opStaticCall, gasCost: gasStaticCall, @@ -123,8 +123,8 @@ func NewByzantiumInstructionSet() [256]operation { // NewHomesteadInstructionSet returns the frontier and homestead // instructions that can be executed during the homestead phase. -func NewHomesteadInstructionSet() [256]operation { - instructionSet := NewFrontierInstructionSet() +func newHomesteadInstructionSet() [256]operation { + instructionSet := newFrontierInstructionSet() instructionSet[DELEGATECALL] = operation{ execute: opDelegateCall, gasCost: gasDelegateCall, @@ -138,7 +138,7 @@ func NewHomesteadInstructionSet() [256]operation { // NewFrontierInstructionSet returns the frontier instructions // that can be executed during the frontier phase. -func NewFrontierInstructionSet() [256]operation { +func newFrontierInstructionSet() [256]operation { return [256]operation{ STOP: { execute: opStop, diff --git a/core/vm/logger.go b/core/vm/logger.go index c32a7b4044..85acb8d6d3 100644 --- a/core/vm/logger.go +++ b/core/vm/logger.go @@ -29,8 +29,10 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) +// Storage represents a contract's storage. type Storage map[common.Hash]common.Hash +// Copy duplicates the current storage. func (s Storage) Copy() Storage { cpy := make(Storage) for key, value := range s { @@ -76,10 +78,12 @@ type structLogMarshaling struct { ErrorString string `json:"error"` // adds call to ErrorString() in MarshalJSON } +// OpName formats the operand name in a human-readable format. func (s *StructLog) OpName() string { return s.Op.String() } +// ErrorString formats the log's error as a string. func (s *StructLog) ErrorString() string { if s.Err != nil { return s.Err.Error() @@ -124,6 +128,7 @@ func NewStructLogger(cfg *LogConfig) *StructLogger { return logger } +// CaptureStart implements the Tracer interface to initialize the tracing operation. func (l *StructLogger) CaptureStart(from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) error { return nil } @@ -178,10 +183,13 @@ func (l *StructLogger) CaptureState(env *EVM, pc uint64, op OpCode, gas, cost ui return nil } +// CaptureFault implements the Tracer interface to trace an execution fault +// while running an opcode. func (l *StructLogger) CaptureFault(env *EVM, pc uint64, op OpCode, gas, cost uint64, memory *Memory, stack *Stack, contract *Contract, depth int, err error) error { return nil } +// CaptureEnd is called after the call finishes to finalize the tracing. func (l *StructLogger) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error { l.output = output l.err = err diff --git a/core/vm/memory.go b/core/vm/memory.go index d5cc7870bc..722862b1de 100644 --- a/core/vm/memory.go +++ b/core/vm/memory.go @@ -16,7 +16,12 @@ package vm -import "fmt" +import ( + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common/math" +) // Memory implements a simple memory model for the ethereum virtual machine. type Memory struct { @@ -24,25 +29,39 @@ type Memory struct { lastGasCost uint64 } +// NewMemory returns a new memory memory model. func NewMemory() *Memory { return &Memory{} } // Set sets offset + size to value func (m *Memory) Set(offset, size uint64, value []byte) { - // length of store may never be less than offset + size. - // The store should be resized PRIOR to setting the memory - if size > uint64(len(m.store)) { - panic("INVALID memory: store empty") - } - // It's possible the offset is greater than 0 and size equals 0. This is because // the calcMemSize (common.go) could potentially return 0 when size is zero (NO-OP) if size > 0 { + // length of store may never be less than offset + size. + // The store should be resized PRIOR to setting the memory + if offset+size > uint64(len(m.store)) { + panic("invalid memory: store empty") + } copy(m.store[offset:offset+size], value) } } +// Set32 sets the 32 bytes starting at offset to the value of val, left-padded with zeroes to +// 32 bytes. +func (m *Memory) Set32(offset uint64, val *big.Int) { + // length of store may never be less than offset + size. + // The store should be resized PRIOR to setting the memory + if offset+32 > uint64(len(m.store)) { + panic("invalid memory: store empty") + } + // Zero the memory area + copy(m.store[offset:offset+32], []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}) + // Fill in relevant bits + math.ReadBits(val, m.store[offset:offset+32]) +} + // Resize resizes the memory to size func (m *Memory) Resize(size uint64) { if uint64(m.Len()) < size { @@ -89,6 +108,7 @@ func (m *Memory) Data() []byte { return m.store } +// Print dumps the content of the memory. func (m *Memory) Print() { fmt.Printf("### mem %d bytes ###\n", len(m.store)) if len(m.store) > 0 { diff --git a/core/vm/memory_table.go b/core/vm/memory_table.go index bec0235bcc..ab49ebb38b 100644 --- a/core/vm/memory_table.go +++ b/core/vm/memory_table.go @@ -65,12 +65,6 @@ func memoryCall(stack *Stack) *big.Int { return math.BigMax(x, y) } -func memoryCallCode(stack *Stack) *big.Int { - x := calcMemSize(stack.Back(5), stack.Back(6)) - y := calcMemSize(stack.Back(3), stack.Back(4)) - - return math.BigMax(x, y) -} func memoryDelegateCall(stack *Stack) *big.Int { x := calcMemSize(stack.Back(4), stack.Back(5)) y := calcMemSize(stack.Back(2), stack.Back(3)) diff --git a/core/vm/opcodes.go b/core/vm/opcodes.go index e3568eb000..6c12c50e51 100644 --- a/core/vm/opcodes.go +++ b/core/vm/opcodes.go @@ -23,6 +23,7 @@ import ( // OpCode is an EVM opcode type OpCode byte +// IsPush specifies if an opcode is a PUSH opcode. func (op OpCode) IsPush() bool { switch op { case PUSH1, PUSH2, PUSH3, PUSH4, PUSH5, PUSH6, PUSH7, PUSH8, PUSH9, PUSH10, PUSH11, PUSH12, PUSH13, PUSH14, PUSH15, PUSH16, PUSH17, PUSH18, PUSH19, PUSH20, PUSH21, PUSH22, PUSH23, PUSH24, PUSH25, PUSH26, PUSH27, PUSH28, PUSH29, PUSH30, PUSH31, PUSH32: @@ -31,12 +32,13 @@ func (op OpCode) IsPush() bool { return false } +// IsStaticJump specifies if an opcode is JUMP. func (op OpCode) IsStaticJump() bool { return op == JUMP } +// 0x0 range - arithmetic ops. const ( - // 0x0 range - arithmetic ops STOP OpCode = iota ADD MUL @@ -51,6 +53,7 @@ const ( SIGNEXTEND ) +// 0x10 range - comparison ops. const ( LT OpCode = iota + 0x10 GT @@ -70,8 +73,8 @@ const ( SHA3 = 0x20 ) +// 0x30 range - closure state. const ( - // 0x30 range - closure state ADDRESS OpCode = 0x30 + iota BALANCE ORIGIN @@ -89,8 +92,8 @@ const ( RETURNDATACOPY ) +// 0x40 range - block operations. const ( - // 0x40 range - block operations BLOCKHASH OpCode = 0x40 + iota COINBASE TIMESTAMP @@ -99,8 +102,8 @@ const ( GASLIMIT ) +// 0x50 range - 'storage' and execution. const ( - // 0x50 range - 'storage' and execution POP OpCode = 0x50 + iota MLOAD MSTORE @@ -115,8 +118,8 @@ const ( JUMPDEST ) +// 0x60 range. const ( - // 0x60 range PUSH1 OpCode = 0x60 + iota PUSH2 PUSH3 @@ -183,6 +186,7 @@ const ( SWAP16 ) +// 0xa0 range - logging ops. const ( LOG0 OpCode = 0xa0 + iota LOG1 @@ -191,15 +195,15 @@ const ( LOG4 ) -// unofficial opcodes used for parsing +// unofficial opcodes used for parsing. const ( PUSH OpCode = 0xb0 + iota DUP SWAP ) +// 0xf0 range - closures. const ( - // 0xf0 range - closures CREATE OpCode = 0xf0 + iota CALL CALLCODE @@ -211,9 +215,9 @@ const ( SELFDESTRUCT = 0xff ) -// Since the opcodes aren't all in order we can't use a regular slice +// Since the opcodes aren't all in order we can't use a regular slice. var opCodeToString = map[OpCode]string{ - // 0x0 range - arithmetic ops + // 0x0 range - arithmetic ops. STOP: "STOP", ADD: "ADD", MUL: "MUL", @@ -232,7 +236,7 @@ var opCodeToString = map[OpCode]string{ ISZERO: "ISZERO", SIGNEXTEND: "SIGNEXTEND", - // 0x10 range - bit ops + // 0x10 range - bit ops. AND: "AND", OR: "OR", XOR: "XOR", @@ -243,10 +247,10 @@ var opCodeToString = map[OpCode]string{ ADDMOD: "ADDMOD", MULMOD: "MULMOD", - // 0x20 range - crypto + // 0x20 range - crypto. SHA3: "SHA3", - // 0x30 range - closure state + // 0x30 range - closure state. ADDRESS: "ADDRESS", BALANCE: "BALANCE", ORIGIN: "ORIGIN", @@ -263,7 +267,7 @@ var opCodeToString = map[OpCode]string{ RETURNDATASIZE: "RETURNDATASIZE", RETURNDATACOPY: "RETURNDATACOPY", - // 0x40 range - block operations + // 0x40 range - block operations. BLOCKHASH: "BLOCKHASH", COINBASE: "COINBASE", TIMESTAMP: "TIMESTAMP", @@ -271,7 +275,7 @@ var opCodeToString = map[OpCode]string{ DIFFICULTY: "DIFFICULTY", GASLIMIT: "GASLIMIT", - // 0x50 range - 'storage' and execution + // 0x50 range - 'storage' and execution. POP: "POP", //DUP: "DUP", //SWAP: "SWAP", @@ -287,7 +291,7 @@ var opCodeToString = map[OpCode]string{ GAS: "GAS", JUMPDEST: "JUMPDEST", - // 0x60 range - push + // 0x60 range - push. PUSH1: "PUSH1", PUSH2: "PUSH2", PUSH3: "PUSH3", @@ -360,7 +364,7 @@ var opCodeToString = map[OpCode]string{ LOG3: "LOG3", LOG4: "LOG4", - // 0xf0 range + // 0xf0 range. CREATE: "CREATE", CALL: "CALL", RETURN: "RETURN", @@ -524,6 +528,7 @@ var stringToOp = map[string]OpCode{ "SELFDESTRUCT": SELFDESTRUCT, } +// StringToOp finds the opcode whose name is stored in `str`. func StringToOp(str string) OpCode { return stringToOp[str] } diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index c8c5d34d9d..cda49a34b4 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -52,7 +52,7 @@ type Config struct { func setDefaults(cfg *Config) { if cfg.ChainConfig == nil { cfg.ChainConfig = ¶ms.ChainConfig{ - ChainId: big.NewInt(1), + ChainID: big.NewInt(1), HomesteadBlock: new(big.Int), DAOForkBlock: new(big.Int), DAOForkSupport: false, diff --git a/core/vm/stack.go b/core/vm/stack.go index 9c10d50ad1..4c1b9e8037 100644 --- a/core/vm/stack.go +++ b/core/vm/stack.go @@ -21,7 +21,7 @@ import ( "math/big" ) -// stack is an object for basic stack operations. Items popped to the stack are +// Stack is an object for basic stack operations. Items popped to the stack are // expected to be changed and modified. stack does not take care of adding newly // initialised objects. type Stack struct { @@ -32,6 +32,7 @@ func newstack() *Stack { return &Stack{data: make([]*big.Int, 0, 1024)} } +// Data returns the underlying big.Int array. func (st *Stack) Data() []*big.Int { return st.data } @@ -80,6 +81,7 @@ func (st *Stack) require(n int) error { return nil } +// Print dumps the content of the stack func (st *Stack) Print() { fmt.Println("### stack ###") if len(st.data) > 0 { diff --git a/crypto/crypto.go b/crypto/crypto.go index 76d1ffaf64..619440e817 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -39,6 +39,8 @@ var ( secp256k1halfN = new(big.Int).Div(secp256k1N, big.NewInt(2)) ) +var errInvalidPubkey = errors.New("invalid secp256k1 public key") + // Keccak256 calculates and returns the Keccak256 hash of the input data. func Keccak256(data ...[]byte) []byte { d := sha3.NewKeccak256() @@ -122,12 +124,13 @@ func FromECDSA(priv *ecdsa.PrivateKey) []byte { return math.PaddedBigBytes(priv.D, priv.Params().BitSize/8) } -func ToECDSAPub(pub []byte) *ecdsa.PublicKey { - if len(pub) == 0 { - return nil - } +// UnmarshalPubkey converts bytes to a secp256k1 public key. +func UnmarshalPubkey(pub []byte) (*ecdsa.PublicKey, error) { x, y := elliptic.Unmarshal(S256(), pub) - return &ecdsa.PublicKey{Curve: S256(), X: x, Y: y} + if x == nil { + return nil, errInvalidPubkey + } + return &ecdsa.PublicKey{Curve: S256(), X: x, Y: y}, nil } func FromECDSAPub(pub *ecdsa.PublicKey) []byte { diff --git a/crypto/crypto_test.go b/crypto/crypto_test.go index 804de3fe22..177c19c0cf 100644 --- a/crypto/crypto_test.go +++ b/crypto/crypto_test.go @@ -23,9 +23,11 @@ import ( "io/ioutil" "math/big" "os" + "reflect" "testing" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" ) var testAddrHex = "970e8128ab834e8eac17ab8e3812f010678cf791" @@ -56,6 +58,33 @@ func BenchmarkSha3(b *testing.B) { } } +func TestUnmarshalPubkey(t *testing.T) { + key, err := UnmarshalPubkey(nil) + if err != errInvalidPubkey || key != nil { + t.Fatalf("expected error, got %v, %v", err, key) + } + key, err = UnmarshalPubkey([]byte{1, 2, 3}) + if err != errInvalidPubkey || key != nil { + t.Fatalf("expected error, got %v, %v", err, key) + } + + var ( + enc, _ = hex.DecodeString("04760c4460e5336ac9bbd87952a3c7ec4363fc0a97bd31c86430806e287b437fd1b01abc6e1db640cf3106b520344af1d58b00b57823db3e1407cbc433e1b6d04d") + dec = &ecdsa.PublicKey{ + Curve: S256(), + X: hexutil.MustDecodeBig("0x760c4460e5336ac9bbd87952a3c7ec4363fc0a97bd31c86430806e287b437fd1"), + Y: hexutil.MustDecodeBig("0xb01abc6e1db640cf3106b520344af1d58b00b57823db3e1407cbc433e1b6d04d"), + } + ) + key, err = UnmarshalPubkey(enc) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if !reflect.DeepEqual(key, dec) { + t.Fatal("wrong result") + } +} + func TestSign(t *testing.T) { key, _ := HexToECDSA(testPrivHex) addr := common.HexToAddress(testAddrHex) @@ -69,7 +98,7 @@ func TestSign(t *testing.T) { if err != nil { t.Errorf("ECRecover error: %s", err) } - pubKey := ToECDSAPub(recoveredPub) + pubKey, _ := UnmarshalPubkey(recoveredPub) recoveredAddr := PubkeyToAddress(*pubKey) if addr != recoveredAddr { t.Errorf("Address mismatch: want: %x have: %x", addr, recoveredAddr) diff --git a/eth/api.go b/eth/api.go index 247ca7485c..0b6da456f6 100644 --- a/eth/api.go +++ b/eth/api.go @@ -32,6 +32,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/params" @@ -351,10 +352,34 @@ func (api *PrivateDebugAPI) Preimage(ctx context.Context, hash common.Hash) (hex return nil, errors.New("unknown preimage") } -// GetBadBLocks returns a list of the last 'bad blocks' that the client has seen on the network +// BadBlockArgs represents the entries in the list returned when bad blocks are queried. +type BadBlockArgs struct { + Hash common.Hash `json:"hash"` + Block map[string]interface{} `json:"block"` + RLP string `json:"rlp"` +} + +// GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network // and returns them as a JSON list of block-hashes -func (api *PrivateDebugAPI) GetBadBlocks(ctx context.Context) ([]core.BadBlockArgs, error) { - return api.eth.BlockChain().BadBlocks() +func (api *PrivateDebugAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, error) { + blocks := api.eth.BlockChain().BadBlocks() + results := make([]*BadBlockArgs, len(blocks)) + + var err error + for i, block := range blocks { + results[i] = &BadBlockArgs{ + Hash: block.Hash(), + } + if rlpBytes, err := rlp.EncodeToBytes(block); err != nil { + results[i].RLP = err.Error() // Hacky, but hey, it works + } else { + results[i].RLP = fmt.Sprintf("0x%x", rlpBytes) + } + if results[i].Block, err = ethapi.RPCMarshalBlock(block, true, true); err != nil { + results[i].Block = map[string]interface{}{"error": err.Error()} + } + } + return results, nil } // StorageRangeResult is the result of a debug_storageRangeAt API call. @@ -406,7 +431,7 @@ func storageRangeAt(st state.Trie, start []byte, maxResult int) (StorageRangeRes return result, nil } -// GetModifiedAccountsByumber returns all accounts that have changed between the +// GetModifiedAccountsByNumber returns all accounts that have changed between the // two blocks specified. A change is defined as a difference in nonce, balance, // code hash, or storage hash. // diff --git a/eth/api_backend.go b/eth/api_backend.go index 4ace9b5945..016087dfe8 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -43,6 +43,7 @@ type EthAPIBackend struct { gpo *gasprice.Oracle } +// ChainConfig returns the active chain configuration. func (b *EthAPIBackend) ChainConfig() *params.ChainConfig { return b.eth.chainConfig } @@ -188,8 +189,8 @@ func (b *EthAPIBackend) TxPoolContent() (map[common.Address]types.Transactions, return b.eth.TxPool().Content() } -func (b *EthAPIBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { - return b.eth.TxPool().SubscribeTxPreEvent(ch) +func (b *EthAPIBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { + return b.eth.TxPool().SubscribeNewTxsEvent(ch) } func (b *EthAPIBackend) Downloader() *downloader.Downloader { diff --git a/eth/api_tracer.go b/eth/api_tracer.go index 45a819022a..623e5ed1bd 100644 --- a/eth/api_tracer.go +++ b/eth/api_tracer.go @@ -251,7 +251,8 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl // Print progress logs if long enough time elapsed if time.Since(logged) > 8*time.Second { if number > origin { - log.Info("Tracing chain segment", "start", origin, "end", end.NumberU64(), "current", number, "transactions", traced, "elapsed", time.Since(begin), "memory", database.TrieDB().Size()) + nodes, imgs := database.TrieDB().Size() + log.Info("Tracing chain segment", "start", origin, "end", end.NumberU64(), "current", number, "transactions", traced, "elapsed", time.Since(begin), "memory", nodes+imgs) } else { log.Info("Preparing state for chain trace", "block", number, "start", origin, "elapsed", time.Since(begin)) } @@ -296,8 +297,10 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl database.TrieDB().Reference(root, common.Hash{}) } // Dereference all past tries we ourselves are done working with - database.TrieDB().Dereference(proot, common.Hash{}) + database.TrieDB().Dereference(proot) proot = root + + // TODO(karalabe): Do we need the preimages? Won't they accumulate too much? } }() @@ -317,7 +320,7 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl done[uint64(result.Block)] = result // Dereference any paret tries held in memory by this task - database.TrieDB().Dereference(res.rootref, common.Hash{}) + database.TrieDB().Dereference(res.rootref) // Stream completed traces to the user, aborting on the first error for result, ok := done[next]; ok; result, ok = done[next] { @@ -523,10 +526,11 @@ func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (* return nil, err } database.TrieDB().Reference(root, common.Hash{}) - database.TrieDB().Dereference(proot, common.Hash{}) + database.TrieDB().Dereference(proot) proot = root } - log.Info("Historical state regenerated", "block", block.NumberU64(), "elapsed", time.Since(start), "size", database.TrieDB().Size()) + nodes, imgs := database.TrieDB().Size() + log.Info("Historical state regenerated", "block", block.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) return statedb, nil } diff --git a/eth/backend.go b/eth/backend.go index ea70e3826c..a18abdfb56 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -88,7 +88,7 @@ type Ethereum struct { gasPrice *big.Int etherbase common.Address - networkId uint64 + networkID uint64 netRPCService *ethapi.PublicNetAPI lock sync.RWMutex // Protects the variadic fields (e.g. gas price and etherbase) @@ -126,7 +126,7 @@ func New(ctx *node.ServiceContext, config *Config) (*Ethereum, error) { accountManager: ctx.AccountManager, engine: CreateConsensusEngine(ctx, &config.Ethash, chainConfig, chainDb), shutdownChan: make(chan bool), - networkId: config.NetworkId, + networkID: config.NetworkId, gasPrice: config.GasPrice, etherbase: config.Etherbase, bloomRequests: make(chan chan *bloombits.Retrieval), @@ -215,14 +215,14 @@ func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chai return clique.New(chainConfig.Clique, db) } // Otherwise assume proof-of-work - switch { - case config.PowMode == ethash.ModeFake: + switch config.PowMode { + case ethash.ModeFake: log.Warn("Ethash used in fake mode") return ethash.NewFaker() - case config.PowMode == ethash.ModeTest: + case ethash.ModeTest: log.Warn("Ethash used in test mode") return ethash.NewTester() - case config.PowMode == ethash.ModeShared: + case ethash.ModeShared: log.Warn("Ethash used in shared mode") return ethash.NewShared() default: @@ -239,7 +239,7 @@ func CreateConsensusEngine(ctx *node.ServiceContext, config *ethash.Config, chai } } -// APIs returns the collection of RPC services the ethereum package offers. +// APIs return the collection of RPC services the ethereum package offers. // NOTE, some of these services probably need to be moved to somewhere else. func (s *Ethereum) APIs() []rpc.API { apis := ethapi.GetAPIs(s.APIBackend) @@ -369,7 +369,7 @@ func (s *Ethereum) Engine() consensus.Engine { return s.engine } func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb } func (s *Ethereum) IsListening() bool { return true } // Always listening func (s *Ethereum) EthVersion() int { return int(s.protocolManager.SubProtocols[0].Version) } -func (s *Ethereum) NetVersion() uint64 { return s.networkId } +func (s *Ethereum) NetVersion() uint64 { return s.networkID } func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader } // Protocols implements node.Service, returning all the currently configured diff --git a/eth/config.go b/eth/config.go index dd7f42c7d9..426d2bf1ef 100644 --- a/eth/config.go +++ b/eth/config.go @@ -47,7 +47,7 @@ var DefaultConfig = Config{ LightPeers: 100, DatabaseCache: 768, TrieCache: 256, - TrieTimeout: 5 * time.Minute, + TrieTimeout: 60 * time.Minute, GasPrice: big.NewInt(18 * params.Shannon), TxPool: core.DefaultTxPoolConfig, diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index dc23354929..fbde9c6caa 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -680,7 +680,7 @@ func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, err } } // If the head fetch already found an ancestor, return - if !common.EmptyHash(hash) { + if hash != (common.Hash{}) { if int64(number) <= floor { p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) return 0, errInvalidAncestor @@ -900,7 +900,7 @@ func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ( var ( deliver = func(packet dataPack) (int, error) { pack := packet.(*headerPack) - return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh) + return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh) } expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) } throttle = func() bool { return false } @@ -930,7 +930,7 @@ func (d *Downloader) fetchBodies(from uint64) error { var ( deliver = func(packet dataPack) (int, error) { pack := packet.(*bodyPack) - return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles) + return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles) } expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) } fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) } @@ -954,7 +954,7 @@ func (d *Downloader) fetchReceipts(from uint64) error { var ( deliver = func(packet dataPack) (int, error) { pack := packet.(*receiptPack) - return d.queue.DeliverReceipts(pack.peerId, pack.receipts) + return d.queue.DeliverReceipts(pack.peerID, pack.receipts) } expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) } fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) } diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index bbe0aed5dc..984dd13d68 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -596,21 +596,21 @@ func (q *queue) cancel(request *fetchRequest, taskQueue *prque.Prque, pendPool m // Revoke cancels all pending requests belonging to a given peer. This method is // meant to be called during a peer drop to quickly reassign owned data fetches // to remaining nodes. -func (q *queue) Revoke(peerId string) { +func (q *queue) Revoke(peerID string) { q.lock.Lock() defer q.lock.Unlock() - if request, ok := q.blockPendPool[peerId]; ok { + if request, ok := q.blockPendPool[peerID]; ok { for _, header := range request.Headers { q.blockTaskQueue.Push(header, -float32(header.Number.Uint64())) } - delete(q.blockPendPool, peerId) + delete(q.blockPendPool, peerID) } - if request, ok := q.receiptPendPool[peerId]; ok { + if request, ok := q.receiptPendPool[peerID]; ok { for _, header := range request.Headers { q.receiptTaskQueue.Push(header, -float32(header.Number.Uint64())) } - delete(q.receiptPendPool, peerId) + delete(q.receiptPendPool, peerID) } } diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go index 5b4b9ba1b7..8d33dfec74 100644 --- a/eth/downloader/statesync.go +++ b/eth/downloader/statesync.go @@ -214,7 +214,7 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync { type stateSync struct { d *Downloader // Downloader instance to access and manage current peerset - sched *trie.TrieSync // State trie sync scheduler defining the tasks + sched *trie.Sync // State trie sync scheduler defining the tasks keccak hash.Hash // Keccak256 hasher to verify deliveries with tasks map[common.Hash]*stateTask // Set of tasks currently queued for retrieval diff --git a/eth/downloader/types.go b/eth/downloader/types.go index 3f30ea9dd1..ff70bfa0e3 100644 --- a/eth/downloader/types.go +++ b/eth/downloader/types.go @@ -34,22 +34,22 @@ type dataPack interface { // headerPack is a batch of block headers returned by a peer. type headerPack struct { - peerId string + peerID string headers []*types.Header } -func (p *headerPack) PeerId() string { return p.peerId } +func (p *headerPack) PeerId() string { return p.peerID } func (p *headerPack) Items() int { return len(p.headers) } func (p *headerPack) Stats() string { return fmt.Sprintf("%d", len(p.headers)) } // bodyPack is a batch of block bodies returned by a peer. type bodyPack struct { - peerId string + peerID string transactions [][]*types.Transaction uncles [][]*types.Header } -func (p *bodyPack) PeerId() string { return p.peerId } +func (p *bodyPack) PeerId() string { return p.peerID } func (p *bodyPack) Items() int { if len(p.transactions) <= len(p.uncles) { return len(p.transactions) @@ -60,20 +60,20 @@ func (p *bodyPack) Stats() string { return fmt.Sprintf("%d:%d", len(p.transactio // receiptPack is a batch of receipts returned by a peer. type receiptPack struct { - peerId string + peerID string receipts [][]*types.Receipt } -func (p *receiptPack) PeerId() string { return p.peerId } +func (p *receiptPack) PeerId() string { return p.peerID } func (p *receiptPack) Items() int { return len(p.receipts) } func (p *receiptPack) Stats() string { return fmt.Sprintf("%d", len(p.receipts)) } // statePack is a batch of states returned by a peer. type statePack struct { - peerId string + peerID string states [][]byte } -func (p *statePack) PeerId() string { return p.peerId } +func (p *statePack) PeerId() string { return p.peerID } func (p *statePack) Items() int { return len(p.states) } func (p *statePack) Stats() string { return fmt.Sprintf("%d", len(p.states)) } diff --git a/eth/fetcher/fetcher.go b/eth/fetcher/fetcher.go index 0c679cec3a..6383596dce 100644 --- a/eth/fetcher/fetcher.go +++ b/eth/fetcher/fetcher.go @@ -88,7 +88,7 @@ type headerFilterTask struct { time time.Time // Arrival time of the headers } -// headerFilterTask represents a batch of block bodies (transactions and uncles) +// bodyFilterTask represents a batch of block bodies (transactions and uncles) // needing fetcher filtering. type bodyFilterTask struct { peer string // The source peer of block bodies @@ -292,20 +292,20 @@ func (f *Fetcher) loop() { height := f.chainHeight() for !f.queue.Empty() { op := f.queue.PopItem().(*inject) + hash := op.block.Hash() if f.queueChangeHook != nil { - f.queueChangeHook(op.block.Hash(), false) + f.queueChangeHook(hash, false) } // If too high up the chain or phase, continue later number := op.block.NumberU64() if number > height+1 { - f.queue.Push(op, -float32(op.block.NumberU64())) + f.queue.Push(op, -float32(number)) if f.queueChangeHook != nil { - f.queueChangeHook(op.block.Hash(), true) + f.queueChangeHook(hash, true) } break } // Otherwise if fresh and still unknown, try and import - hash := op.block.Hash() if number+maxUncleDist < height || f.getBlock(hash) != nil { f.forgetBlock(hash) continue diff --git a/eth/filters/api.go b/eth/filters/api.go index 1297b74788..592ad3b82f 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -104,8 +104,8 @@ func (api *PublicFilterAPI) timeoutLoop() { // https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newpendingtransactionfilter func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID { var ( - pendingTxs = make(chan common.Hash) - pendingTxSub = api.events.SubscribePendingTxEvents(pendingTxs) + pendingTxs = make(chan []common.Hash) + pendingTxSub = api.events.SubscribePendingTxs(pendingTxs) ) api.filtersMu.Lock() @@ -118,7 +118,7 @@ func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID { case ph := <-pendingTxs: api.filtersMu.Lock() if f, found := api.filters[pendingTxSub.ID]; found { - f.hashes = append(f.hashes, ph) + f.hashes = append(f.hashes, ph...) } api.filtersMu.Unlock() case <-pendingTxSub.Err(): @@ -144,13 +144,17 @@ func (api *PublicFilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Su rpcSub := notifier.CreateSubscription() go func() { - txHashes := make(chan common.Hash) - pendingTxSub := api.events.SubscribePendingTxEvents(txHashes) + txHashes := make(chan []common.Hash, 128) + pendingTxSub := api.events.SubscribePendingTxs(txHashes) for { select { - case h := <-txHashes: - notifier.Notify(rpcSub.ID, h) + case hashes := <-txHashes: + // To keep the original behaviour, send a single tx hash in one notification. + // TODO(rjl493456442) Send a batch of tx hashes in one notification + for _, h := range hashes { + notifier.Notify(rpcSub.ID, h) + } case <-rpcSub.Err(): pendingTxSub.Unsubscribe() return diff --git a/eth/filters/filter.go b/eth/filters/filter.go index 5dfe60e772..7000d74fa4 100644 --- a/eth/filters/filter.go +++ b/eth/filters/filter.go @@ -36,7 +36,7 @@ type Backend interface { GetReceipts(ctx context.Context, blockHash common.Hash) (types.Receipts, error) GetLogs(ctx context.Context, blockHash common.Hash) ([][]*types.Log, error) - SubscribeTxPreEvent(chan<- core.TxPreEvent) event.Subscription + SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription @@ -258,9 +258,9 @@ Logs: if len(topics) > len(log.Topics) { continue Logs } - for i, topics := range topics { - match := len(topics) == 0 // empty rule set == wildcard - for _, topic := range topics { + for i, sub := range topics { + match := len(sub) == 0 // empty rule set == wildcard + for _, topic := range sub { if log.Topics[i] == topic { match = true break diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index bb11734a76..4e999cda85 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -59,7 +59,7 @@ const ( const ( - // txChanSize is the size of channel listening to TxPreEvent. + // txChanSize is the size of channel listening to NewTxsEvent. // The number is referenced from the size of tx pool. txChanSize = 4096 // rmLogsChanSize is the size of channel listening to RemovedLogsEvent. @@ -80,7 +80,7 @@ type subscription struct { created time.Time logsCrit ethereum.FilterQuery logs chan []*types.Log - hashes chan common.Hash + hashes chan []common.Hash headers chan *types.Header installed chan struct{} // closed when the filter is installed err chan error // closed when the filter is uninstalled @@ -95,7 +95,7 @@ type EventSystem struct { lastHead *types.Header // Subscriptions - txSub event.Subscription // Subscription for new transaction event + txsSub event.Subscription // Subscription for new transaction event logsSub event.Subscription // Subscription for new log event rmLogsSub event.Subscription // Subscription for removed log event chainSub event.Subscription // Subscription for new chain event @@ -104,7 +104,7 @@ type EventSystem struct { // Channels install chan *subscription // install filter for event notification uninstall chan *subscription // remove filter for event notification - txCh chan core.TxPreEvent // Channel to receive new transaction event + txsCh chan core.NewTxsEvent // Channel to receive new transactions event logsCh chan []*types.Log // Channel to receive new log event rmLogsCh chan core.RemovedLogsEvent // Channel to receive removed log event chainCh chan core.ChainEvent // Channel to receive new chain event @@ -123,14 +123,14 @@ func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventS lightMode: lightMode, install: make(chan *subscription), uninstall: make(chan *subscription), - txCh: make(chan core.TxPreEvent, txChanSize), + txsCh: make(chan core.NewTxsEvent, txChanSize), logsCh: make(chan []*types.Log, logsChanSize), rmLogsCh: make(chan core.RemovedLogsEvent, rmLogsChanSize), chainCh: make(chan core.ChainEvent, chainEvChanSize), } // Subscribe events - m.txSub = m.backend.SubscribeTxPreEvent(m.txCh) + m.txsSub = m.backend.SubscribeNewTxsEvent(m.txsCh) m.logsSub = m.backend.SubscribeLogsEvent(m.logsCh) m.rmLogsSub = m.backend.SubscribeRemovedLogsEvent(m.rmLogsCh) m.chainSub = m.backend.SubscribeChainEvent(m.chainCh) @@ -138,7 +138,7 @@ func NewEventSystem(mux *event.TypeMux, backend Backend, lightMode bool) *EventS m.pendingLogSub = m.mux.Subscribe(core.PendingLogsEvent{}) // Make sure none of the subscriptions are empty - if m.txSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil || + if m.txsSub == nil || m.logsSub == nil || m.rmLogsSub == nil || m.chainSub == nil || m.pendingLogSub.Closed() { log.Crit("Subscribe for event system failed") } @@ -240,7 +240,7 @@ func (es *EventSystem) subscribeMinedPendingLogs(crit ethereum.FilterQuery, logs logsCrit: crit, created: time.Now(), logs: logs, - hashes: make(chan common.Hash), + hashes: make(chan []common.Hash), headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -257,7 +257,7 @@ func (es *EventSystem) subscribeLogs(crit ethereum.FilterQuery, logs chan []*typ logsCrit: crit, created: time.Now(), logs: logs, - hashes: make(chan common.Hash), + hashes: make(chan []common.Hash), headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -274,7 +274,7 @@ func (es *EventSystem) subscribePendingLogs(crit ethereum.FilterQuery, logs chan logsCrit: crit, created: time.Now(), logs: logs, - hashes: make(chan common.Hash), + hashes: make(chan []common.Hash), headers: make(chan *types.Header), installed: make(chan struct{}), err: make(chan error), @@ -290,7 +290,7 @@ func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscripti typ: BlocksSubscription, created: time.Now(), logs: make(chan []*types.Log), - hashes: make(chan common.Hash), + hashes: make(chan []common.Hash), headers: headers, installed: make(chan struct{}), err: make(chan error), @@ -298,9 +298,9 @@ func (es *EventSystem) SubscribeNewHeads(headers chan *types.Header) *Subscripti return es.subscribe(sub) } -// SubscribePendingTxEvents creates a subscription that writes transaction hashes for +// SubscribePendingTxs creates a subscription that writes transaction hashes for // transactions that enter the transaction pool. -func (es *EventSystem) SubscribePendingTxEvents(hashes chan common.Hash) *Subscription { +func (es *EventSystem) SubscribePendingTxs(hashes chan []common.Hash) *Subscription { sub := &subscription{ id: rpc.NewID(), typ: PendingTransactionsSubscription, @@ -348,9 +348,13 @@ func (es *EventSystem) broadcast(filters filterIndex, ev interface{}) { } } } - case core.TxPreEvent: + case core.NewTxsEvent: + hashes := make([]common.Hash, 0, len(e.Txs)) + for _, tx := range e.Txs { + hashes = append(hashes, tx.Hash()) + } for _, f := range filters[PendingTransactionsSubscription] { - f.hashes <- e.Tx.Hash() + f.hashes <- hashes } case core.ChainEvent: for _, f := range filters[BlocksSubscription] { @@ -446,7 +450,7 @@ func (es *EventSystem) eventLoop() { // Ensure all subscriptions get cleaned up defer func() { es.pendingLogSub.Unsubscribe() - es.txSub.Unsubscribe() + es.txsSub.Unsubscribe() es.logsSub.Unsubscribe() es.rmLogsSub.Unsubscribe() es.chainSub.Unsubscribe() @@ -460,7 +464,7 @@ func (es *EventSystem) eventLoop() { for { select { // Handle subscribed events - case ev := <-es.txCh: + case ev := <-es.txsCh: es.broadcast(index, ev) case ev := <-es.logsCh: es.broadcast(index, ev) @@ -495,7 +499,7 @@ func (es *EventSystem) eventLoop() { close(f.err) // System stopped - case <-es.txSub.Err(): + case <-es.txsSub.Err(): return case <-es.logsSub.Err(): return diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index b4df24b471..ff1af85a80 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -96,7 +96,7 @@ func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types return logs, nil } -func (b *testBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { +func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { return b.txFeed.Subscribe(ch) } @@ -232,10 +232,7 @@ func TestPendingTxFilter(t *testing.T) { fid0 := api.NewPendingTransactionFilter() time.Sleep(1 * time.Second) - for _, tx := range transactions { - ev := core.TxPreEvent{Tx: tx} - txFeed.Send(ev) - } + txFeed.Send(core.NewTxsEvent{Txs: transactions}) timeout := time.Now().Add(1 * time.Second) for { diff --git a/eth/handler.go b/eth/handler.go index c8f7e13f16..f2d2eaf1c7 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -46,7 +46,7 @@ const ( softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data. estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header - // txChanSize is the size of channel listening to TxPreEvent. + // txChanSize is the size of channel listening to NewTxsEvent. // The number is referenced from the size of tx pool. txChanSize = 4096 ) @@ -64,7 +64,7 @@ func errResp(code errCode, format string, v ...interface{}) error { } type ProtocolManager struct { - networkId uint64 + networkID uint64 fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks) acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing) @@ -81,8 +81,8 @@ type ProtocolManager struct { SubProtocols []p2p.Protocol eventMux *event.TypeMux - txCh chan core.TxPreEvent - txSub event.Subscription + txsCh chan core.NewTxsEvent + txsSub event.Subscription minedBlockSub *event.TypeMuxSubscription // channels for fetcher, syncer, txsyncLoop @@ -98,10 +98,10 @@ type ProtocolManager struct { // NewProtocolManager returns a new Ethereum sub protocol manager. The Ethereum sub protocol manages peers capable // with the Ethereum network. -func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) { +func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkID uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database) (*ProtocolManager, error) { // Create the protocol manager with the base fields manager := &ProtocolManager{ - networkId: networkId, + networkID: networkID, eventMux: mux, txpool: txpool, blockchain: blockchain, @@ -204,8 +204,8 @@ func (pm *ProtocolManager) Start(maxPeers int) { pm.maxPeers = maxPeers // broadcast transactions - pm.txCh = make(chan core.TxPreEvent, txChanSize) - pm.txSub = pm.txpool.SubscribeTxPreEvent(pm.txCh) + pm.txsCh = make(chan core.NewTxsEvent, txChanSize) + pm.txsSub = pm.txpool.SubscribeNewTxsEvent(pm.txsCh) go pm.txBroadcastLoop() // broadcast mined blocks @@ -220,7 +220,7 @@ func (pm *ProtocolManager) Start(maxPeers int) { func (pm *ProtocolManager) Stop() { log.Info("Stopping Ethereum protocol") - pm.txSub.Unsubscribe() // quits txBroadcastLoop + pm.txsSub.Unsubscribe() // quits txBroadcastLoop pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop // Quit the sync loop. @@ -263,7 +263,7 @@ func (pm *ProtocolManager) handle(p *peer) error { number = head.Number.Uint64() td = pm.blockchain.GetTd(hash, number) ) - if err := p.Handshake(pm.networkId, td, hash, genesis.Hash()); err != nil { + if err := p.Handshake(pm.networkID, td, hash, genesis.Hash()); err != nil { p.Log().Debug("Ethereum handshake failed", "err", err) return err } @@ -340,6 +340,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { return errResp(ErrDecode, "%v: %v", msg, err) } hashMode := query.Origin.Hash != (common.Hash{}) + first := true + maxNonCanonical := uint64(100) // Gather headers until the fetch or network limits is reached var ( @@ -351,31 +353,36 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { // Retrieve the next header satisfying the query var origin *types.Header if hashMode { - origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash) + if first { + first = false + origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash) + if origin != nil { + query.Origin.Number = origin.Number.Uint64() + } + } else { + origin = pm.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number) + } } else { origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number) } if origin == nil { break } - number := origin.Number.Uint64() headers = append(headers, origin) bytes += estHeaderRlpSize // Advance to the next header of the query switch { - case query.Origin.Hash != (common.Hash{}) && query.Reverse: + case hashMode && query.Reverse: // Hash based traversal towards the genesis block - for i := 0; i < int(query.Skip)+1; i++ { - if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil { - query.Origin.Hash = header.ParentHash - number-- - } else { - unknown = true - break - } + ancestor := query.Skip + 1 + if ancestor == 0 { + unknown = true + } else { + query.Origin.Hash, query.Origin.Number = pm.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical) + unknown = (query.Origin.Hash == common.Hash{}) } - case query.Origin.Hash != (common.Hash{}) && !query.Reverse: + case hashMode && !query.Reverse: // Hash based traversal towards the leaf block var ( current = origin.Number.Uint64() @@ -387,8 +394,10 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { unknown = true } else { if header := pm.blockchain.GetHeaderByNumber(next); header != nil { - if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash { - query.Origin.Hash = header.Hash() + nextHash := header.Hash() + expOldHash, _ := pm.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical) + if expOldHash == query.Origin.Hash { + query.Origin.Hash, query.Origin.Number = nextHash, next } else { unknown = true } @@ -698,7 +707,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { // Send the block to a subset of our peers transfer := peers[:int(math.Sqrt(float64(len(peers))))] for _, peer := range transfer { - peer.SendNewBlock(block, td) + peer.AsyncSendNewBlock(block, td) } log.Trace("Propagated block", "hash", hash, "recipients", len(transfer), "duration", common.PrettyDuration(time.Since(block.ReceivedAt))) return @@ -706,22 +715,29 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { // Otherwise if the block is indeed in out own chain, announce it if pm.blockchain.HasBlock(hash, block.NumberU64()) { for _, peer := range peers { - peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()}) + peer.AsyncSendNewBlockHash(block) } log.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt))) } } -// BroadcastTx will propagate a transaction to all peers which are not known to +// BroadcastTxs will propagate a batch of transactions to all peers which are not known to // already have the given transaction. -func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) { - // Broadcast transaction to a batch of peers not knowing about it - peers := pm.peers.PeersWithoutTx(hash) - //FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))] - for _, peer := range peers { - peer.SendTransactions(types.Transactions{tx}) +func (pm *ProtocolManager) BroadcastTxs(txs types.Transactions) { + var txset = make(map[*peer]types.Transactions) + + // Broadcast transactions to a batch of peers not knowing about it + for _, tx := range txs { + peers := pm.peers.PeersWithoutTx(tx.Hash()) + for _, peer := range peers { + txset[peer] = append(txset[peer], tx) + } + log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers)) + } + // FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))] + for peer, txs := range txset { + peer.AsyncSendTransactions(txs) } - log.Trace("Broadcast transaction", "hash", hash, "recipients", len(peers)) } // Mined broadcast loop @@ -739,11 +755,11 @@ func (pm *ProtocolManager) minedBroadcastLoop() { func (pm *ProtocolManager) txBroadcastLoop() { for { select { - case event := <-pm.txCh: - pm.BroadcastTx(event.Tx.Hash(), event.Tx) + case event := <-pm.txsCh: + pm.BroadcastTxs(event.Txs) // Err() channel will be closed when unsubscribing. - case <-pm.txSub.Err(): + case <-pm.txsSub.Err(): return } } @@ -763,7 +779,7 @@ type NodeInfo struct { func (pm *ProtocolManager) NodeInfo() *NodeInfo { currentBlock := pm.blockchain.CurrentBlock() return &NodeInfo{ - Network: pm.networkId, + Network: pm.networkID, Difficulty: pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()), Genesis: pm.blockchain.Genesis().Hash(), Config: pm.blockchain.Config(), diff --git a/eth/helper_test.go b/eth/helper_test.go index 8a0260fc95..3d2ab0aba1 100644 --- a/eth/helper_test.go +++ b/eth/helper_test.go @@ -124,7 +124,7 @@ func (p *testTxPool) Pending() (map[common.Address]types.Transactions, error) { return batches, nil } -func (p *testTxPool) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { +func (p *testTxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { return p.txFeed.Subscribe(ch) } diff --git a/eth/peer.go b/eth/peer.go index 42ead53965..953aca17b9 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -37,8 +37,24 @@ var ( ) const ( - maxKnownTxs = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS) - maxKnownBlocks = 1024 // Maximum block hashes to keep in the known list (prevent DOS) + maxKnownTxs = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS) + maxKnownBlocks = 1024 // Maximum block hashes to keep in the known list (prevent DOS) + + // maxQueuedTxs is the maximum number of transaction lists to queue up before + // dropping broadcasts. This is a sensitive number as a transaction list might + // contain a single transaction, or thousands. + maxQueuedTxs = 128 + + // maxQueuedProps is the maximum number of block propagations to queue up before + // dropping broadcasts. There's not much point in queueing stale blocks, so a few + // that might cover uncles should be enough. + maxQueuedProps = 4 + + // maxQueuedAnns is the maximum number of block announcements to queue up before + // dropping broadcasts. Similarly to block propagations, there's no point to queue + // above some healthy uncle limit, so use that. + maxQueuedAnns = 4 + handshakeTimeout = 5 * time.Second ) @@ -50,6 +66,12 @@ type PeerInfo struct { Head string `json:"head"` // SHA3 hash of the peer's best owned block } +// propEvent is a block propagation, waiting for its turn in the broadcast queue. +type propEvent struct { + block *types.Block + td *big.Int +} + type peer struct { id string @@ -63,23 +85,64 @@ type peer struct { td *big.Int lock sync.RWMutex - knownTxs *set.Set // Set of transaction hashes known to be known by this peer - knownBlocks *set.Set // Set of block hashes known to be known by this peer + knownTxs *set.Set // Set of transaction hashes known to be known by this peer + knownBlocks *set.Set // Set of block hashes known to be known by this peer + queuedTxs chan []*types.Transaction // Queue of transactions to broadcast to the peer + queuedProps chan *propEvent // Queue of blocks to broadcast to the peer + queuedAnns chan *types.Block // Queue of blocks to announce to the peer + term chan struct{} // Termination channel to stop the broadcaster } func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer { - id := p.ID() - return &peer{ Peer: p, rw: rw, version: version, - id: fmt.Sprintf("%x", id[:8]), + id: fmt.Sprintf("%x", p.ID().Bytes()[:8]), knownTxs: set.New(), knownBlocks: set.New(), + queuedTxs: make(chan []*types.Transaction, maxQueuedTxs), + queuedProps: make(chan *propEvent, maxQueuedProps), + queuedAnns: make(chan *types.Block, maxQueuedAnns), + term: make(chan struct{}), } } +// broadcast is a write loop that multiplexes block propagations, announcements +// and transaction broadcasts into the remote peer. The goal is to have an async +// writer that does not lock up node internals. +func (p *peer) broadcast() { + for { + select { + case txs := <-p.queuedTxs: + if err := p.SendTransactions(txs); err != nil { + return + } + p.Log().Trace("Broadcast transactions", "count", len(txs)) + + case prop := <-p.queuedProps: + if err := p.SendNewBlock(prop.block, prop.td); err != nil { + return + } + p.Log().Trace("Propagated block", "number", prop.block.Number(), "hash", prop.block.Hash(), "td", prop.td) + + case block := <-p.queuedAnns: + if err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil { + return + } + p.Log().Trace("Announced block", "number", block.Number(), "hash", block.Hash()) + + case <-p.term: + return + } + } +} + +// close signals the broadcast goroutine to terminate. +func (p *peer) close() { + close(p.term) +} + // Info gathers and returns a collection of metadata known about a peer. func (p *peer) Info() *PeerInfo { hash, td := p.Head() @@ -139,6 +202,19 @@ func (p *peer) SendTransactions(txs types.Transactions) error { return p2p.Send(p.rw, TxMsg, txs) } +// AsyncSendTransactions queues list of transactions propagation to a remote +// peer. If the peer's broadcast queue is full, the event is silently dropped. +func (p *peer) AsyncSendTransactions(txs []*types.Transaction) { + select { + case p.queuedTxs <- txs: + for _, tx := range txs { + p.knownTxs.Add(tx.Hash()) + } + default: + p.Log().Debug("Dropping transaction propagation", "count", len(txs)) + } +} + // SendNewBlockHashes announces the availability of a number of blocks through // a hash notification. func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error { @@ -153,12 +229,35 @@ func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error return p2p.Send(p.rw, NewBlockHashesMsg, request) } +// AsyncSendNewBlockHash queues the availability of a block for propagation to a +// remote peer. If the peer's broadcast queue is full, the event is silently +// dropped. +func (p *peer) AsyncSendNewBlockHash(block *types.Block) { + select { + case p.queuedAnns <- block: + p.knownBlocks.Add(block.Hash()) + default: + p.Log().Debug("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash()) + } +} + // SendNewBlock propagates an entire block to a remote peer. func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error { p.knownBlocks.Add(block.Hash()) return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, td}) } +// AsyncSendNewBlock queues an entire block for propagation to a remote peer. If +// the peer's broadcast queue is full, the event is silently dropped. +func (p *peer) AsyncSendNewBlock(block *types.Block, td *big.Int) { + select { + case p.queuedProps <- &propEvent{block: block, td: td}: + p.knownBlocks.Add(block.Hash()) + default: + p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash()) + } +} + // SendBlockHeaders sends a batch of block headers to the remote peer. func (p *peer) SendBlockHeaders(headers []*types.Header) error { return p2p.Send(p.rw, BlockHeadersMsg, headers) @@ -313,7 +412,8 @@ func newPeerSet() *peerSet { } // Register injects a new peer into the working set, or returns an error if the -// peer is already known. +// peer is already known. If a new peer it registered, its broadcast loop is also +// started. func (ps *peerSet) Register(p *peer) error { ps.lock.Lock() defer ps.lock.Unlock() @@ -325,6 +425,8 @@ func (ps *peerSet) Register(p *peer) error { return errAlreadyRegistered } ps.peers[p.id] = p + go p.broadcast() + return nil } @@ -334,10 +436,13 @@ func (ps *peerSet) Unregister(id string) error { ps.lock.Lock() defer ps.lock.Unlock() - if _, ok := ps.peers[id]; !ok { + p, ok := ps.peers[id] + if !ok { return errNotRegistered } delete(ps.peers, id) + p.close() + return nil } diff --git a/eth/protocol.go b/eth/protocol.go index 328d5b9939..0e90e6a2ef 100644 --- a/eth/protocol.go +++ b/eth/protocol.go @@ -103,9 +103,9 @@ type txPool interface { // The slice should be modifiable by the caller. Pending() (map[common.Address]types.Transactions, error) - // SubscribeTxPreEvent should return an event subscription of - // TxPreEvent and send events to the given channel. - SubscribeTxPreEvent(chan<- core.TxPreEvent) event.Subscription + // SubscribeNewTxsEvent should return an event subscription of + // NewTxsEvent and send events to the given channel. + SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription } // statusData is the network packet for the status message. diff --git a/eth/protocol_test.go b/eth/protocol_test.go index b2f93d8dd1..aa43dfa920 100644 --- a/eth/protocol_test.go +++ b/eth/protocol_test.go @@ -116,7 +116,7 @@ func testRecvTransactions(t *testing.T, protocol int) { t.Errorf("added wrong tx hash: got %v, want %v", added[0].Hash(), tx.Hash()) } case <-time.After(2 * time.Second): - t.Errorf("no TxPreEvent received within 2 seconds") + t.Errorf("no NewTxsEvent received within 2 seconds") } } diff --git a/eth/tracers/internal/tracers/4byte_tracer.js b/eth/tracers/internal/tracers/4byte_tracer.js index 0a6b088cca..462b4ad4cb 100644 --- a/eth/tracers/internal/tracers/4byte_tracer.js +++ b/eth/tracers/internal/tracers/4byte_tracer.js @@ -60,7 +60,7 @@ return; } // Skip any pre-compile invocations, those are just fancy opcodes - if (isPrecompiled(toAddress(log.stack.peek(1)))) { + if (isPrecompiled(toAddress(log.stack.peek(1).toString(16)))) { return; } // Gather internal call details @@ -78,7 +78,7 @@ // the final result of the tracing. result: function(ctx) { // Save the outer calldata also - if (ctx.input.length > 4) { + if (ctx.input.length >= 4) { this.store(slice(ctx.input, 0, 4), ctx.input.length-4) } return this.ids; diff --git a/eth/tracers/internal/tracers/assets.go b/eth/tracers/internal/tracers/assets.go index 1912f74edd..a3963b53b8 100644 --- a/eth/tracers/internal/tracers/assets.go +++ b/eth/tracers/internal/tracers/assets.go @@ -1,17 +1,21 @@ // Code generated by go-bindata. DO NOT EDIT. // sources: // 4byte_tracer.js +// bigram_tracer.js // call_tracer.js // evmdis_tracer.js // noop_tracer.js // opcount_tracer.js // prestate_tracer.js +// trigram_tracer.js +// unigram_tracer.js package tracers import ( "bytes" "compress/gzip" + "crypto/sha256" "fmt" "io" "io/ioutil" @@ -42,8 +46,9 @@ func bindataRead(data []byte, name string) ([]byte, error) { } type asset struct { - bytes []byte - info os.FileInfo + bytes []byte + info os.FileInfo + digest [sha256.Size]byte } type bindataFileInfo struct { @@ -72,7 +77,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var __4byte_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x56\x5b\x6f\xdb\x4a\x0e\x7e\xb6\x7f\x05\xd7\x2f\xb5\x51\x59\x8e\x2f\x89\x2f\xd9\x16\xf0\xe6\xa4\x6d\x80\x9c\x24\x88\xdd\x3d\x28\x16\xfb\x30\x9e\xa1\xac\xd9\xc8\x33\xc2\x0c\xe5\x4b\x73\xf2\xdf\x17\x1c\x49\x89\x93\xd3\x62\xbb\x4f\x96\x47\xc3\x8f\x1f\xc9\x8f\xa4\x7a\x3d\xb8\xb0\xf9\xc1\xe9\x75\x4a\x30\x38\xe9\x8f\x61\x99\x22\xac\x6d\x17\x29\x45\x87\xc5\x06\xe6\x05\xa5\xd6\xf9\x66\xaf\x07\xcb\x54\x7b\x48\x74\x86\xa0\x3d\xe4\xc2\x11\xd8\x04\xe8\xcd\xfd\x4c\xaf\x9c\x70\x87\xb8\xd9\xeb\x95\x36\x3f\x7c\xcd\x08\x89\x43\x04\x6f\x13\xda\x09\x87\x33\x38\xd8\x02\xa4\x30\xe0\x50\x69\x4f\x4e\xaf\x0a\x42\xd0\x04\xc2\xa8\x9e\x75\xb0\xb1\x4a\x27\x07\x86\xd4\x04\x85\x51\xe8\x82\x6b\x42\xb7\xf1\x35\x8f\xcf\x37\x5f\xe1\x1a\xbd\x47\x07\x9f\xd1\xa0\x13\x19\xdc\x15\xab\x4c\x4b\xb8\xd6\x12\x8d\x47\x10\x1e\x72\x3e\xf1\x29\x2a\x58\x05\x38\x36\xfc\xc4\x54\x16\x15\x15\xf8\x64\x0b\xa3\x04\x69\x6b\x22\x40\xcd\xcc\x61\x8b\xce\x6b\x6b\x60\x58\xbb\xaa\x00\x23\xb0\x8e\x41\xda\x82\x38\x00\x07\x36\x67\xbb\x0e\x08\x73\x80\x4c\xd0\x8b\xe9\x2f\x24\xe4\x25\x6e\x05\xda\x04\x37\xa9\xcd\x11\x28\x15\xc4\x51\xef\x74\x96\xc1\x0a\xa1\xf0\x98\x14\x59\xc4\x68\xab\x82\xe0\x8f\xab\xe5\x97\xdb\xaf\x4b\x98\xdf\x7c\x83\x3f\xe6\xf7\xf7\xf3\x9b\xe5\xb7\x73\xd8\x69\x4a\x6d\x41\x80\x5b\x2c\xa1\xf4\x26\xcf\x34\x2a\xd8\x09\xe7\x84\xa1\x03\xd8\x84\x11\x7e\xbf\xbc\xbf\xf8\x32\xbf\x59\xce\xff\x71\x75\x7d\xb5\xfc\x06\xd6\xc1\xa7\xab\xe5\xcd\xe5\x62\x01\x9f\x6e\xef\x61\x0e\x77\xf3\xfb\xe5\xd5\xc5\xd7\xeb\xf9\x3d\xdc\x7d\xbd\xbf\xbb\x5d\x5c\xc6\xb0\x40\x66\x85\x6c\xff\xbf\x73\x9e\x84\xea\x39\x04\x85\x24\x74\xe6\xeb\x4c\x7c\xb3\x05\xf8\xd4\x16\x99\x82\x54\x6c\x11\x1c\x4a\xd4\x5b\x54\x20\x40\xda\xfc\xf0\xcb\x45\x65\x2c\x91\x59\xb3\x0e\x31\xff\x54\x90\x70\x95\x80\xb1\x14\x81\x47\x84\xbf\xa7\x44\xf9\xac\xd7\xdb\xed\x76\xf1\xda\x14\xb1\x75\xeb\x5e\x56\xc2\xf9\xde\xc7\xb8\xc9\x98\xa3\xd5\x81\x70\xe9\x84\x44\x07\x1e\x85\x93\x29\xfa\x10\x4c\x78\xd1\xd5\x0a\x0d\xe9\x44\xa3\xf3\x11\x8b\x14\xa4\xcd\x32\x94\xe4\x99\xc1\x26\x5c\xcc\xad\xa7\x6e\xee\xac\x44\xef\xb5\x59\x73\xe0\x70\x45\xaf\x2e\xc2\x06\x29\xb5\xca\xc3\x11\xdc\xdb\x68\xbc\xfe\x8e\x75\x36\x7c\x91\x97\x65\x54\x82\x44\x04\xde\x86\xe8\xc1\x21\xcb\x0c\x15\x78\xbd\x36\x82\x0a\x87\xa1\x97\x56\x08\x1b\x41\x92\xc5\x2e\xd6\x42\x1b\x4f\x7f\x01\x64\x9c\xba\x22\x97\x7b\xb1\xc9\x33\x9c\xf1\x33\xc0\x47\x50\xb8\x2a\xd6\x31\x71\x0a\x96\x4e\x18\x2f\x24\x8b\xbb\x0d\xad\x93\xfd\xa0\x3f\xc2\xd3\xe9\x18\x87\xa7\x4a\x9c\x4c\x86\x67\xd3\x41\x72\x3a\x9c\x9c\xf5\x47\x7d\x3c\x9b\x26\xa3\x31\x4e\xc7\xc3\xd5\x40\x9e\x9e\xe1\x58\x4c\x4e\xc6\xc3\x55\x1f\xc5\xc9\x24\x51\xe3\xd3\x71\x1f\xa7\x0a\x5b\x11\x3c\x06\x60\x37\x83\xd6\x51\xa6\x5b\x4f\x9d\xd2\xfb\x63\xf9\x03\x70\xb2\x1f\x8c\x95\x1c\x4c\xc7\xd8\xed\x0f\x26\x33\xe8\x47\x2f\x6f\x86\x13\x29\x47\x93\x61\xbf\x7b\x32\x83\xc1\xd1\xf9\xe9\x60\x94\x0c\x27\x93\x69\x77\x7a\xf6\xda\x40\xa8\xe4\x74\x9a\x4c\xa7\xdd\xc1\xe4\x0d\x94\x1c\x4c\xfa\xaa\x3f\x45\x86\xea\x97\xc7\x4f\xcd\xc7\x66\x83\x07\x8e\xf2\x20\xd6\x6b\x87\x6b\x41\x58\x56\x2d\x30\x0e\x2f\x12\x1e\x16\x71\xb3\xc1\xcf\x33\x78\x7c\x8a\x9a\xc1\x46\x8a\x2c\x5b\x1e\x72\x56\x35\x15\xce\x78\x78\x97\x88\xcc\xe3\xbb\xa0\x0b\x63\x4d\x97\x2f\x78\x1e\x1f\x01\x2f\x47\x7c\xe8\x6a\xa3\x70\x1f\x2e\xf0\x51\xa2\x9d\x27\x1e\xb3\x62\x13\x10\x45\xc2\xd3\xe4\xdd\x56\x64\x05\xbe\x8b\x40\xc7\x18\xc3\x06\x37\x5c\x54\xe1\x28\x6e\x36\x6a\x97\x33\x48\x0a\x53\x56\xca\xe6\x9e\x5c\xe7\xb1\xd9\x68\xf8\x9d\x26\x99\x1e\x1d\x48\xe1\x11\x5a\x17\xf3\xeb\xeb\xd6\x0c\x5e\xfe\x5c\xdc\xfe\x76\xd9\x9a\x35\x1b\x0d\x76\xb9\x16\x2c\x6d\xa5\x5c\x04\x5b\x91\x45\xa5\xbb\xea\xc7\x7f\x0f\x0f\xb6\xa0\xfa\xd7\x7f\x67\xb3\x32\x5e\x18\x9e\x43\xaf\x07\x9e\x84\x7c\x80\x9c\x1c\x90\x2d\xcd\x9a\xcf\xae\x7f\xbb\xbc\xbe\xfc\x3c\x5f\x5e\xbe\xa2\xb0\x58\xce\x97\x57\x17\xe5\xd1\x5f\x49\xfc\x1f\xfe\x07\x3f\xf3\xdf\x68\x3c\x35\x9f\x6f\x85\x9a\x9c\x37\x1b\x75\xd5\x3c\xf1\x9c\xf2\x3c\x8d\xc2\x18\xd1\x3c\x3c\xb9\x2c\x55\x6b\x86\x3e\xe7\x8e\xe1\x0e\x8a\x9b\x8d\x70\xff\x28\xdf\x5a\x45\xa1\xb9\x42\x86\xb7\xc2\xc1\x03\x1e\xe0\x03\xb4\x5a\xf0\x1e\xc8\x7e\xc1\x7d\x5b\xab\x0e\xbc\x87\x56\x97\x4f\xf8\xe6\x79\xb3\xd1\xa0\x54\xfb\x58\x2b\xff\xaf\x07\x3c\xfc\x1b\x3e\xc0\xeb\xff\xef\xa1\x0f\x7f\xfe\x09\xfd\x57\x34\x31\xe7\x85\xa1\xcd\xd6\x3e\xa0\x0a\x92\xe1\x01\x70\x00\x9b\x4b\xab\xaa\x8d\xc1\x11\xfc\xf3\x77\xc0\x3d\xca\x82\xd0\x07\xba\x98\x1f\xb1\xcd\xec\x3a\x02\xb5\xea\x00\xb3\xed\xf5\x60\xf1\xa0\xf3\xb0\xb8\x4a\x14\x5f\xc2\xf0\x46\x34\x96\x40\x1b\x42\x67\x44\x16\xa4\xed\xab\xf8\x24\xd5\x7c\x6b\xf5\x31\x6a\x6c\xf3\x98\xec\x82\x9c\x36\xeb\x76\xa7\xc3\x31\xea\x04\xda\x7f\x93\x54\xfa\xaa\xd2\x7f\x5e\x15\xe3\xd8\x75\xee\xb0\x2b\xed\x26\x0f\x5f\x19\x66\x6b\x65\xd8\xc3\x3e\x02\x4a\x2d\xef\x6f\x87\xf0\x9f\xc2\x13\x24\xc2\xc8\x67\xa2\x15\xbe\xf6\x77\x0e\x2b\x63\xd5\x26\x3b\x57\xca\xa1\xf7\x81\x51\x50\x42\xcc\x6d\xd6\xee\x77\x3a\x9d\x9f\xf1\xf8\x2c\xc2\xba\x7f\x15\x6b\xbd\xb7\xaa\x90\xb5\x59\x7c\x87\x0f\xf0\x06\x54\x12\x17\xaa\x13\x87\xf6\xbc\x4d\xda\xcf\x41\x87\xeb\x1f\x3f\xc0\xa8\x72\x59\x42\xdc\x26\xc9\x8f\x30\xde\xd8\x97\xca\x08\x22\x0b\x41\xb0\xce\xdd\x21\xf6\xbc\xa9\xda\x01\x24\xaa\xb0\xde\xc3\xa8\x13\x05\x6a\xdd\x51\xa7\x8a\xa7\x56\x4b\x22\x8a\x8c\x8e\xe5\xb2\x4b\xab\x4f\x02\x21\xa9\x10\x59\xa5\x10\xfe\xbc\xb1\x09\x08\x53\x8b\x28\x29\x97\x75\x23\xd8\xff\x50\x36\x50\xbb\x70\xe8\x7f\xe4\x83\x93\xc7\x7e\x6a\x3d\x85\x35\xbf\x42\xee\x29\x42\x27\xf8\x3b\xc7\x6e\xab\xae\xaa\xe6\x64\x80\x2b\xc7\x1f\xe7\xbf\x02\xae\x76\x15\x2f\x8c\xb0\x47\x1b\xe5\xf9\x11\x29\x49\xfb\x17\x1d\xd7\xfd\x6b\x0b\x1e\x99\x5c\x43\xee\x59\x10\x99\xb7\x55\x55\x24\xed\x63\x6d\xf2\x82\xe2\x0c\xcd\x9a\x52\xf8\xf8\x5c\xa0\xa3\x9c\x97\x89\x7e\xbe\x1b\xc1\x49\x14\xf2\xfc\xd6\xba\x3b\xea\xbc\x9e\x2b\x75\x07\x97\x3d\xfb\xd4\xfc\x6f\x00\x00\x00\xff\xff\x08\x1e\xd9\xac\x67\x0b\x00\x00") +var __4byte_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x94\x56\x5b\x6f\xdb\x4a\x0e\x7e\xb6\x7f\x05\xd7\x2f\xb5\x51\x59\x8e\x2f\x89\x2f\xd9\x16\xf0\xe6\xa4\x6d\x80\x9c\x24\x88\xdd\x3d\x28\x16\xfb\x30\x9e\xa1\xac\xd9\xc8\x33\xc2\x0c\xe5\x4b\x73\xf2\xdf\x17\x1c\x49\x89\x93\xd3\x62\xbb\x4f\x96\x47\xc3\x8f\x1f\xc9\x8f\xa4\x7a\x3d\xb8\xb0\xf9\xc1\xe9\x75\x4a\x30\x38\xe9\x8f\x61\x99\x22\xac\x6d\x17\x29\x45\x87\xc5\x06\xe6\x05\xa5\xd6\xf9\x66\xaf\x07\xcb\x54\x7b\x48\x74\x86\xa0\x3d\xe4\xc2\x11\xd8\x04\xe8\xcd\xfd\x4c\xaf\x9c\x70\x87\xb8\xd9\xeb\x95\x36\x3f\x7c\xcd\x08\x89\x43\x04\x6f\x13\xda\x09\x87\x33\x38\xd8\x02\xa4\x30\xe0\x50\x69\x4f\x4e\xaf\x0a\x42\xd0\x04\xc2\xa8\x9e\x75\xb0\xb1\x4a\x27\x07\x86\xd4\x04\x85\x51\xe8\x82\x6b\x42\xb7\xf1\x35\x8f\xcf\x37\x5f\xe1\x1a\xbd\x47\x07\x9f\xd1\xa0\x13\x19\xdc\x15\xab\x4c\x4b\xb8\xd6\x12\x8d\x47\x10\x1e\x72\x3e\xf1\x29\x2a\x58\x05\x38\x36\xfc\xc4\x54\x16\x15\x15\xf8\x64\x0b\xa3\x04\x69\x6b\x22\x40\xcd\xcc\x61\x8b\xce\x6b\x6b\x60\x58\xbb\xaa\x00\x23\xb0\x8e\x41\xda\x82\x38\x00\x07\x36\x67\xbb\x0e\x08\x73\x80\x4c\xd0\x8b\xe9\x2f\x24\xe4\x25\x6e\x05\xda\x04\x37\xa9\xcd\x11\x28\x15\xc4\x51\xef\x74\x96\xc1\x0a\xa1\xf0\x98\x14\x59\xc4\x68\xab\x82\xe0\x8f\xab\xe5\x97\xdb\xaf\x4b\x98\xdf\x7c\x83\x3f\xe6\xf7\xf7\xf3\x9b\xe5\xb7\x73\xd8\x69\x4a\x6d\x41\x80\x5b\x2c\xa1\xf4\x26\xcf\x34\x2a\xd8\x09\xe7\x84\xa1\x03\xd8\x84\x11\x7e\xbf\xbc\xbf\xf8\x32\xbf\x59\xce\xff\x71\x75\x7d\xb5\xfc\x06\xd6\xc1\xa7\xab\xe5\xcd\xe5\x62\x01\x9f\x6e\xef\x61\x0e\x77\xf3\xfb\xe5\xd5\xc5\xd7\xeb\xf9\x3d\xdc\x7d\xbd\xbf\xbb\x5d\x5c\xc6\xb0\x40\x66\x85\x6c\xff\xbf\x73\x9e\x84\xea\x39\x04\x85\x24\x74\xe6\xeb\x4c\x7c\xb3\x05\xf8\xd4\x16\x99\x82\x54\x6c\x11\x1c\x4a\xd4\x5b\x54\x20\x40\xda\xfc\xf0\xcb\x45\x65\x2c\x91\x59\xb3\x0e\x31\xff\x54\x90\x70\x95\x80\xb1\x14\x81\x47\x84\xbf\xa7\x44\xf9\xac\xd7\xdb\xed\x76\xf1\xda\x14\xb1\x75\xeb\x5e\x56\xc2\xf9\xde\xc7\xb8\xc9\x98\xa3\xd5\x81\x70\xe9\x84\x44\x07\x1e\x85\x93\x29\xfa\x10\x4c\x78\xd1\xd5\x0a\x0d\xe9\x44\xa3\xf3\x11\x8b\x14\xa4\xcd\x32\x94\xe4\x99\xc1\x26\x5c\xcc\xad\xa7\x6e\xee\xac\x44\xef\xb5\x59\x73\xe0\x70\x45\xaf\x2e\xc2\x06\x29\xb5\xca\xc3\x11\xdc\xdb\x68\xbc\xfe\x8e\x75\x36\x7c\x91\x97\x65\x54\x82\x44\x04\xde\x86\xe8\xc1\x21\xcb\x0c\x15\x78\xbd\x36\x82\x0a\x87\xa1\x97\x56\x08\x1b\x41\x92\xc5\x2e\xd6\x42\x1b\x4f\x7f\x01\x64\x9c\xba\x22\x97\x7b\xb1\xc9\x33\x9c\xf1\x33\xc0\x47\x50\xb8\x2a\xd6\x31\x71\x0a\x96\x4e\x18\x2f\x24\x8b\xbb\x0d\xad\x93\xfd\xa0\x3f\xc2\xd3\xe9\x18\x87\xa7\x4a\x9c\x4c\x86\x67\xd3\x41\x72\x3a\x9c\x9c\xf5\x47\x7d\x3c\x9b\x26\xa3\x31\x4e\xc7\xc3\xd5\x40\x9e\x9e\xe1\x58\x4c\x4e\xc6\xc3\x55\x1f\xc5\xc9\x24\x51\xe3\xd3\x71\x1f\xa7\x0a\x5b\x11\x3c\x06\x60\x37\x83\xd6\x51\xa6\x5b\x4f\x9d\xd2\xfb\x63\xf9\x03\x70\xb2\x1f\x8c\x95\x1c\x4c\xc7\xd8\xed\x0f\x26\x33\xe8\x47\x2f\x6f\x86\x13\x29\x47\x93\x61\xbf\x7b\x32\x83\xc1\xd1\xf9\xe9\x60\x94\x0c\x27\x93\x69\x77\x7a\xf6\xda\x40\xa8\xe4\x74\x9a\x4c\xa7\xdd\xc1\xe4\x0d\x94\x1c\x4c\xfa\xaa\x3f\x45\x86\xea\x97\xc7\x4f\xcd\xc7\x66\x83\x07\x8e\xf2\x20\xd6\x6b\x87\x6b\x41\x58\x56\x2d\x30\x0e\x2f\x12\x1e\x16\x71\xb3\xc1\xcf\x33\x78\x7c\x8a\x9a\xc1\x46\x8a\x2c\x5b\x1e\x72\x56\x35\x15\xce\x78\x78\x97\x88\xcc\xe3\xbb\xa0\x0b\x63\x4d\x97\x2f\x78\x1e\x1f\x01\x2f\x47\x7c\xe8\x6a\xa3\x70\x1f\x2e\xf0\x51\xa2\x9d\x27\x1e\xb3\x62\x13\x10\x45\xc2\xd3\xe4\xdd\x56\x64\x05\xbe\x8b\x40\xc7\x18\xc3\x06\x37\x5c\x54\xe1\x28\x6e\x36\x6a\x97\x33\x48\x0a\x53\x56\xca\xe6\x9e\x5c\xe7\xb1\xd9\x68\xf8\x9d\x26\x99\x1e\x1d\x48\xe1\x11\x5a\x17\xf3\xeb\xeb\xd6\x0c\x5e\xfe\x5c\xdc\xfe\x76\xd9\x9a\x35\x1b\x0d\x76\xb9\x16\x2c\x6d\xa5\x5c\x04\x5b\x91\x45\xa5\xbb\xea\xc7\x7f\x0f\x0f\xb6\xa0\xfa\xd7\x7f\x67\xb3\x32\x5e\x18\x9e\x43\xaf\x07\x9e\x84\x7c\x80\x9c\x1c\x90\x2d\xcd\x9a\xcf\xae\x7f\xbb\xbc\xbe\xfc\x3c\x5f\x5e\xbe\xa2\xb0\x58\xce\x97\x57\x17\xe5\xd1\x5f\x49\xfc\x1f\xfe\x07\x3f\xf3\xdf\x68\x3c\x35\x9f\x6f\x85\x9a\x9c\x37\x1b\x75\xd5\x3c\xf1\x9c\xf2\x3c\x8d\xc2\x18\xd1\x3c\x3c\xb9\x2c\x55\x6b\x86\x3e\xe7\x8e\xe1\x0e\x8a\x9b\x8d\x70\xff\x28\xdf\x5a\x45\xa1\xb9\x42\x86\xb7\xc2\xc1\x03\x1e\xe0\x03\xb4\x5a\xf0\x1e\xc8\x7e\xc1\x7d\x5b\xab\x0e\xbc\x87\x56\x97\x4f\xf8\xe6\x79\xb3\xd1\xa0\x54\xfb\x58\x2b\xff\xaf\x07\x3c\xfc\x1b\x3e\xc0\xeb\xff\xef\xa1\x0f\x7f\xfe\x09\xfd\x57\x34\x31\xe7\x85\xa1\xcd\xd6\x3e\xa0\x0a\x92\xe1\x01\x70\x00\x9b\x4b\xab\xaa\x8d\xc1\x11\xfc\xf3\x77\xc0\x3d\xca\x82\xd0\x07\xba\x98\x1f\xb1\xcd\xec\x3a\x02\xb5\xea\x00\xb3\xed\xf5\x60\xf1\xa0\xf3\xb0\xb8\x4a\x14\x5f\xc2\xf0\x46\x34\x96\x40\x1b\x42\x67\x44\x16\xa4\xed\xab\xf8\x24\xd5\x7c\x6b\xf5\x31\x6a\x6c\xf3\x98\xec\x82\x9c\x36\xeb\x76\xa7\xc3\x31\xea\x04\xda\x7f\x93\x54\xfa\xaa\xd2\x7f\x5e\x15\xe3\xd8\x75\xee\xb0\x2b\xed\x26\x0f\x5f\x19\x66\x6b\x65\xd8\xc3\x3e\x02\x4a\x2d\xef\x6f\x87\xf0\x9f\xc2\x13\x24\xc2\xc8\x67\xa2\x15\xbe\xf6\x77\x0e\x2b\x63\xd5\x26\x3b\x57\xca\xa1\xf7\x81\x51\x50\x42\xcc\x6d\xd6\xee\x77\x5e\xc8\xf5\xcf\x3a\x9d\xce\xcf\x48\x7d\x16\x61\xf7\xbf\x0a\xbc\x5e\x62\x55\xfc\xda\x2c\xbe\xc3\x07\x78\xe3\x41\x12\x57\xad\x13\x87\x5e\xbd\x4d\xda\xcf\x19\x08\xd7\x3f\x7e\x80\x51\xe5\xb2\x84\xb8\x4d\x92\x1f\x61\xbc\xb1\x2f\x65\x12\x14\x17\x22\x62\xd1\xbb\x43\xec\x79\x6d\xb5\x03\x48\x54\x61\xbd\x87\x51\x27\x0a\xd4\xba\xa3\x4e\x15\x4f\x2d\x9d\x44\x14\x19\x1d\x6b\x67\x97\x56\xdf\x07\x42\x52\x21\xb2\x4a\x2e\xfc\xad\x63\x13\x10\xa6\x56\x54\x52\x6e\xee\x46\xb0\xff\xa1\x86\xa0\x76\xe1\xd0\xff\xc8\x07\x27\x8f\xfd\xd4\xe2\x0a\x3b\x7f\x85\xdc\x60\x84\x4e\xf0\x47\x8f\xdd\x56\x2d\x56\x0d\xcd\x00\x57\xce\x42\xce\x7f\x05\x5c\x2d\x2e\xde\x1e\x61\xa9\x36\xca\xf3\x23\x52\x92\xf6\x2f\xa2\xae\x9b\xd9\x16\x3c\x3f\xb9\x86\xdc\xc0\x20\x32\x6f\xab\xaa\x48\xda\xc7\xda\xe4\x05\xc5\x19\x9a\x35\xa5\xc7\x15\x3a\x4a\x7a\x99\xe9\xe7\xcb\x11\x9c\x44\x21\xd1\x6f\xcd\xbb\xa3\xce\xeb\x29\x53\xf7\x73\xd9\xc1\x4f\xcd\xff\x06\x00\x00\xff\xff\x8e\xc8\x27\x72\x75\x0b\x00\x00") func _4byte_tracerJsBytes() ([]byte, error) { return bindataRead( @@ -88,7 +93,27 @@ func _4byte_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "4byte_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xb4, 0xc5, 0x48, 0x2d, 0xd9, 0x43, 0x95, 0x93, 0x3b, 0x93, 0x2c, 0x47, 0x8c, 0x84, 0x32, 0x3c, 0x8b, 0x2e, 0xf3, 0x72, 0xc4, 0x57, 0xe6, 0x3a, 0xb3, 0xdf, 0x1d, 0xbf, 0x45, 0x3, 0xfc, 0xa}} + return a, nil +} + +var _bigram_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x54\x5b\x6f\xdb\x36\x14\x7e\xf7\xaf\xf8\xde\x92\x20\xae\xd4\x6e\x2f\x83\x33\x0f\xd0\xb2\xa4\x35\x90\xda\x81\xad\xac\x30\x86\x3d\x50\xd2\x91\x44\x84\x26\x05\xf2\xd0\xae\x50\xe4\xbf\x17\x94\x2c\x5f\x8a\x14\x8d\x9e\x64\xf3\xbb\x9d\x0b\x15\xc7\xb8\x35\x4d\x6b\x65\x55\x33\x7e\x7b\xff\xe1\x0f\xa4\x35\xa1\x32\xef\x88\x6b\xb2\xe4\x37\x48\x3c\xd7\xc6\xba\x51\x1c\x23\xad\xa5\x43\x29\x15\x41\x3a\x34\xc2\x32\x4c\x09\xfe\x01\xaf\x64\x66\x85\x6d\xa3\x51\x1c\xf7\x9c\x57\x8f\x83\x42\x69\x89\xe0\x4c\xc9\x3b\x61\x69\x82\xd6\x78\xe4\x42\xc3\x52\x21\x1d\x5b\x99\x79\x26\x48\x86\xd0\x45\x6c\x2c\x36\xa6\x90\x65\x1b\x24\x25\xc3\xeb\x82\x6c\x67\xcd\x64\x37\x6e\xc8\xf1\x71\xfe\x84\x07\x72\x8e\x2c\x3e\x92\x26\x2b\x14\x1e\x7d\xa6\x64\x8e\x07\x99\x93\x76\x04\xe1\xd0\x84\x7f\x5c\x4d\x05\xb2\x4e\x2e\x10\xef\x43\x94\xd5\x3e\x0a\xee\x8d\xd7\x85\x60\x69\xf4\x18\x24\x43\x72\x6c\xc9\x3a\x69\x34\x7e\x1f\xac\xf6\x82\x63\x18\x1b\x44\x2e\x05\x87\x02\x2c\x4c\x13\x78\x57\x10\xba\x85\x12\x7c\xa4\xbe\xa1\x21\xc7\xba\x0b\x48\xdd\xd9\xd4\xa6\x21\x70\x2d\x38\x54\xbd\x93\x4a\x21\x23\x78\x47\xa5\x57\xe3\xa0\x96\x79\xc6\x97\x59\xfa\x69\xf1\x94\x22\x99\xaf\xf1\x25\x59\x2e\x93\x79\xba\xbe\xc1\x4e\x72\x6d\x3c\x83\xb6\xd4\x4b\xc9\x4d\xa3\x24\x15\xd8\x09\x6b\x85\xe6\x16\xa6\x0c\x0a\x9f\xef\x96\xb7\x9f\x92\x79\x9a\xfc\x3d\x7b\x98\xa5\x6b\x18\x8b\xfb\x59\x3a\xbf\x5b\xad\x70\xbf\x58\x22\xc1\x63\xb2\x4c\x67\xb7\x4f\x0f\xc9\x12\x8f\x4f\xcb\xc7\xc5\xea\x2e\xc2\x8a\x42\x2a\x0a\xfc\x5f\xf7\xbc\xec\xa6\x67\x09\x05\xb1\x90\xca\x0d\x9d\x58\x1b\x0f\x57\x1b\xaf\x0a\xd4\x62\x4b\xb0\x94\x93\xdc\x52\x01\x81\xdc\x34\xed\x9b\x87\x1a\xb4\x84\x32\xba\xea\x6a\xfe\xe9\x42\x62\x56\x42\x1b\x1e\xc3\x11\xe1\xcf\x9a\xb9\x99\xc4\xf1\x6e\xb7\x8b\x2a\xed\x23\x63\xab\x58\xf5\x72\x2e\xfe\x2b\x1a\x8d\xbe\x8d\x00\x20\x8e\x51\x4b\xc7\x61\x38\x41\x36\x37\x5e\x33\xd9\x6e\xdf\x4c\x93\x9b\x82\x90\xc9\xca\x8a\x8d\xeb\xd0\x01\x3a\xc1\xb7\x97\xf1\xc0\x55\xc2\xf1\xa2\x09\xec\xf0\x06\xd3\x90\xed\xd6\xaa\x3b\xef\x0f\x27\xb8\xb8\x38\xe0\xe9\x2b\xe5\x3e\x00\x50\x50\xc3\x75\xb0\xd9\x13\x0f\x8c\x7f\xc2\xc1\x04\xef\x0f\x1c\xc7\xd4\x39\x48\xbd\x35\xcf\x54\x74\xdd\xa6\x2d\xd9\x76\x48\xd8\x6d\x4f\x48\xff\xef\xe7\xbd\x01\xb9\xa8\x63\x07\xea\x04\xa5\xd7\x79\xf0\xbc\x54\xa6\x1a\xa3\xc8\xae\xd0\xd7\x1e\x9e\xad\x08\x1b\x8d\x29\x94\xa9\x22\xd3\x44\x6c\x56\x6c\xa5\xae\x2e\xaf\x6e\xce\x30\x7d\xdc\x1e\x56\x51\x1f\xf2\x14\x23\x4b\x5c\xee\x31\x53\x70\x2d\x5d\x74\xa8\xe5\xea\xe8\x36\xa8\x3d\x53\x8b\x13\xd8\xa2\xb9\xbe\x78\x77\x71\x6d\x9a\x9b\x33\x64\xd0\xec\x30\xa1\xed\xff\x3d\x53\xfb\xff\x0f\x52\xe1\x39\x07\x5c\x5f\x9f\x4b\xbc\x9c\xfd\x22\xe5\x08\xbf\x92\xc0\x14\x1f\x7e\x26\x72\x7c\x3b\xc9\x8e\x29\x4e\x93\x9f\x17\x8f\x69\xdf\xba\xfe\xfc\xb8\x38\xa5\xf0\x8a\x4f\xa7\xba\xab\xf7\xb7\x58\xe4\xec\x85\x3a\xd9\x14\x53\x42\xe8\x61\xd6\x65\x7f\xbf\x82\x4a\x27\xf1\xea\x74\x8f\x36\x96\xdc\x6b\x3e\x42\xa9\xce\xab\x17\x75\xfd\xed\xcc\x88\x34\x24\x87\x0d\xa6\x02\x66\x4b\x36\x7c\x99\x61\x89\xbd\xd5\x6e\x50\x0c\xb4\x52\x6a\xa1\x06\xed\xfd\x25\x66\x2b\x72\xa9\xab\x3e\x5a\x7f\x74\x92\x2d\xe7\xaf\xa7\x5b\xd7\x6b\x1e\x1b\x7f\xe8\xce\xcb\xe8\x7b\x00\x00\x00\xff\xff\x83\xb5\xcb\x27\xb0\x06\x00\x00") + +func bigram_tracerJsBytes() ([]byte, error) { + return bindataRead( + _bigram_tracerJs, + "bigram_tracer.js", + ) +} + +func bigram_tracerJs() (*asset, error) { + bytes, err := bigram_tracerJsBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "bigram_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x77, 0x6c, 0xd, 0x24, 0xf2, 0x49, 0xbd, 0x58, 0x8b, 0xb5, 0xd1, 0xc9, 0xcd, 0xcf, 0x5b, 0x3e, 0x5c, 0xfb, 0x14, 0x50, 0xe7, 0xe3, 0xb9, 0xd1, 0x54, 0x69, 0xe6, 0x5e, 0x45, 0xa6, 0x2c, 0x6c}} return a, nil } @@ -108,7 +133,7 @@ func call_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "call_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xf5, 0xb3, 0xb6, 0xe8, 0x19, 0xc3, 0xa, 0xce, 0xfd, 0x50, 0x84, 0xf7, 0x8a, 0xc5, 0x99, 0x10, 0x58, 0xc4, 0x69, 0xfb, 0x8, 0xad, 0x67, 0xea, 0x12, 0x38, 0xcb, 0xd, 0x2a, 0x94, 0xa1, 0x70}} return a, nil } @@ -128,7 +153,7 @@ func evmdis_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "evmdis_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd5, 0xe8, 0x96, 0xa1, 0x8b, 0xc, 0x68, 0x3c, 0xe8, 0x5d, 0x7e, 0xf0, 0xab, 0xfe, 0xec, 0xd1, 0xb, 0x3d, 0xfc, 0xc7, 0xac, 0xb5, 0xa, 0x41, 0x55, 0x0, 0x3a, 0x60, 0xa7, 0x8e, 0x46, 0x93}} return a, nil } @@ -148,7 +173,7 @@ func noop_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "noop_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x4d, 0xcc, 0x83, 0xe9, 0x9e, 0xc1, 0x56, 0x41, 0x6c, 0x6a, 0x3b, 0x46, 0xc9, 0x5f, 0xe1, 0x5b, 0xcd, 0x6b, 0x53, 0x45, 0xcd, 0xfe, 0x1e, 0x86, 0x8c, 0x6b, 0xb, 0x70, 0x73, 0x9f, 0x4c, 0xb1}} return a, nil } @@ -168,7 +193,7 @@ func opcount_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "opcount_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x27, 0xe, 0x97, 0x88, 0x9b, 0x53, 0xbb, 0x20, 0x44, 0xd8, 0xf5, 0xeb, 0x41, 0xd2, 0x7e, 0xd6, 0xda, 0x6b, 0xf5, 0xaf, 0x0, 0x75, 0x9f, 0xd9, 0x22, 0xc, 0x6e, 0x74, 0xac, 0x2a, 0xa9, 0xa7}} return a, nil } @@ -188,7 +213,47 @@ func prestate_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "prestate_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd0, 0xd5, 0x5, 0x92, 0xed, 0xf4, 0x69, 0x2e, 0x14, 0x48, 0x35, 0x67, 0xcc, 0xf2, 0x3e, 0xc7, 0xf, 0x18, 0x22, 0x7a, 0x4d, 0x6f, 0x31, 0xad, 0x3c, 0x92, 0x77, 0xb4, 0x1, 0x2a, 0xd3, 0x7c}} + return a, nil +} + +var _trigram_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x94\x4f\x6f\xe3\x36\x10\xc5\xef\xfe\x14\xaf\x27\x27\x88\xd7\x4a\xda\x4b\xe1\xd4\x05\xdc\x6c\xb2\x6b\x20\x6b\x07\xb6\xd2\x45\x10\xe4\x40\x4b\x23\x89\x08\x4d\x0a\xe4\xd0\x5e\x21\xc8\x77\x2f\xa8\x3f\xfe\x13\xb8\xed\xfa\x64\x70\xe6\xfd\xe6\xcd\x70\xc4\x28\xc2\x8d\x29\x2b\x2b\xf3\x82\xf1\xeb\xe5\xd5\xef\x88\x0b\x42\x6e\x3e\x11\x17\x64\xc9\xaf\x31\xf1\x5c\x18\xeb\x7a\x51\x84\xb8\x90\x0e\x99\x54\x04\xe9\x50\x0a\xcb\x30\x19\xf8\x43\xbe\x92\x2b\x2b\x6c\x35\xec\x45\x51\xa3\x39\x19\x0e\x84\xcc\x12\xc1\x99\x8c\xb7\xc2\xd2\x08\x95\xf1\x48\x84\x86\xa5\x54\x3a\xb6\x72\xe5\x99\x20\x19\x42\xa7\x91\xb1\x58\x9b\x54\x66\x55\x40\x4a\x86\xd7\x29\xd9\xba\x34\x93\x5d\xbb\xce\xc7\x97\xd9\x23\xee\xc9\x39\xb2\xf8\x42\x9a\xac\x50\x78\xf0\x2b\x25\x13\xdc\xcb\x84\xb4\x23\x08\x87\x32\x9c\xb8\x82\x52\xac\x6a\x5c\x10\xde\x05\x2b\xcb\xd6\x0a\xee\x8c\xd7\xa9\x60\x69\xf4\x00\x24\x83\x73\x6c\xc8\x3a\x69\x34\x7e\xeb\x4a\xb5\xc0\x01\x8c\x0d\x90\x33\xc1\xa1\x01\x0b\x53\x06\xdd\x39\x84\xae\xa0\x04\xef\xa5\x3f\x31\x90\x7d\xdf\x29\xa4\xae\xcb\x14\xa6\x24\x70\x21\x38\x74\xbd\x95\x4a\x61\x45\xf0\x8e\x32\xaf\x06\x81\xb6\xf2\x8c\xef\xd3\xf8\xeb\xfc\x31\xc6\x64\xf6\x84\xef\x93\xc5\x62\x32\x8b\x9f\xae\xb1\x95\x5c\x18\xcf\xa0\x0d\x35\x28\xb9\x2e\x95\xa4\x14\x5b\x61\xad\xd0\x5c\xc1\x64\x81\xf0\xed\x76\x71\xf3\x75\x32\x8b\x27\x7f\x4d\xef\xa7\xf1\x13\x8c\xc5\xdd\x34\x9e\xdd\x2e\x97\xb8\x9b\x2f\x30\xc1\xc3\x64\x11\x4f\x6f\x1e\xef\x27\x0b\x3c\x3c\x2e\x1e\xe6\xcb\xdb\x21\x96\x14\x5c\x51\xd0\xff\xff\xcc\xb3\xfa\xf6\x2c\x21\x25\x16\x52\xb9\x6e\x12\x4f\xc6\xc3\x15\xc6\xab\x14\x85\xd8\x10\x2c\x25\x24\x37\x94\x42\x20\x31\x65\xf5\xd3\x97\x1a\x58\x42\x19\x9d\xd7\x3d\xff\xeb\x42\x62\x9a\x41\x1b\x1e\xc0\x11\xe1\x8f\x82\xb9\x1c\x45\xd1\x76\xbb\x1d\xe6\xda\x0f\x8d\xcd\x23\xd5\xe0\x5c\xf4\xe7\xb0\xd7\x7b\xeb\x01\x40\x14\xa1\x90\x8e\xc3\xe5\x04\xec\x5a\x94\xb5\x2b\x2b\x73\x2b\xd6\x48\x8c\xd7\x4c\xd6\xd5\xa9\x21\x6f\x84\xb7\xf7\x41\x27\x54\xc2\xf1\xbc\x0c\xd2\xf0\x0f\xa6\x24\x5b\xef\x54\x1d\x6f\x82\x6e\x84\xe7\x7e\x7f\xd0\xef\xbf\x0c\x76\xa7\x9f\xa9\xe4\x62\x84\xcb\xe6\xa4\x65\x39\xa6\x9a\x24\xf5\xc6\xbc\x52\x5a\x8f\x94\x36\x64\x2b\x98\x32\x31\x69\xbb\x22\xc1\xe2\xdf\xdf\x40\x3f\x28\xf1\x4c\x6e\x58\x13\x82\x74\x84\xcc\xeb\x24\x14\x3f\x53\x26\x1f\x20\x5d\x9d\xe3\x6d\xc7\xdf\x08\x8b\x34\x54\xc5\x18\xca\xe4\xc3\x9c\x1a\x13\x67\xe7\xd7\xbb\x1c\x99\xe1\xac\xc9\xf9\x65\x0c\x2e\xa4\x1b\xee\xbc\x9e\xef\x49\xe1\xb7\x0b\xce\x4b\x87\x71\xd7\xdf\xf5\xe9\x9c\xcf\x6d\xd9\x1a\x7d\x9c\x63\x89\xbd\xd5\xfb\xb3\xf7\x23\xbf\xa6\x6c\xcd\x9a\x72\xc8\x66\xc9\x56\xea\xfc\xd0\x6f\xc8\x79\xa5\x0a\xe3\x23\x3f\xcf\x97\x2f\x17\xfd\x4f\xfd\x8b\xa3\xb3\xab\xe6\xcc\x94\xc7\xdd\xd6\x39\xe1\x52\x9f\x5f\xa9\x7a\x39\xd5\xe4\x2e\x78\x71\x71\xca\x26\x29\x47\xf8\x2f\x19\xc6\xb8\x3a\x25\xfc\xe0\xf8\x63\x0f\x57\x07\xc3\xfc\x10\xc0\x18\x5d\x1b\xfb\x3d\xcc\x84\x57\x7c\xb8\x3c\xdb\xa2\x7d\x11\x44\xc2\x5e\xa8\x76\x5f\xc2\xeb\x66\x32\x08\xdd\xad\x54\xd6\x7c\xab\x81\x52\x23\x4e\x2e\xd1\xbe\x8c\x25\x77\xaa\x8e\x50\xaa\xae\xd5\x40\x5d\xf3\xa5\xaf\x88\x34\x24\x87\x0f\x82\x52\x98\x0d\xd9\xf0\xca\xb7\x57\xee\x3a\x62\x90\x65\x52\x0b\xd5\xb1\xdb\x07\x81\xad\x48\xa4\xce\x1b\x6b\x4d\xe8\xc0\x5b\xc2\x3f\x0e\x97\xbb\x61\xee\x27\xbf\x9b\xce\x7b\xef\x9f\x00\x00\x00\xff\xff\xb3\x93\x16\xd5\xfc\x06\x00\x00") + +func trigram_tracerJsBytes() ([]byte, error) { + return bindataRead( + _trigram_tracerJs, + "trigram_tracer.js", + ) +} + +func trigram_tracerJs() (*asset, error) { + bytes, err := trigram_tracerJsBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "trigram_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x40, 0x63, 0xe1, 0x42, 0x60, 0x7, 0x1b, 0x79, 0x47, 0x1, 0xa1, 0xbf, 0xc4, 0x66, 0x19, 0x9b, 0x2b, 0x5a, 0x1f, 0x82, 0x3d, 0xcf, 0xee, 0xe7, 0x60, 0x25, 0x2c, 0x4f, 0x13, 0x97, 0xc7, 0x18}} + return a, nil +} + +var _unigram_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x54\x4d\x6f\xdb\x46\x10\xbd\xeb\x57\xbc\xa3\x8c\xa8\xa4\xd3\x5e\x0a\xa5\x09\xc0\x1a\x76\x22\xc0\x91\x0d\x89\x6e\x60\x14\x3d\x2c\xc9\x21\xb9\xe8\x6a\x87\xd8\x9d\x95\x42\x04\xfa\xef\xc5\x92\xa2\xe5\x1a\x6e\x13\x9e\x04\xcd\xbc\x8f\x79\x33\x64\x9a\xe2\x8a\xbb\xde\xe9\xa6\x15\xfc\x7c\xf9\xf6\x57\xe4\x2d\xa1\xe1\x9f\x48\x5a\x72\x14\x76\xc8\x82\xb4\xec\xfc\x2c\x4d\x91\xb7\xda\xa3\xd6\x86\xa0\x3d\x3a\xe5\x04\x5c\x43\x5e\xf4\x1b\x5d\x38\xe5\xfa\x64\x96\xa6\x23\xe6\xd5\x72\x64\xa8\x1d\x11\x3c\xd7\x72\x50\x8e\x96\xe8\x39\xa0\x54\x16\x8e\x2a\xed\xc5\xe9\x22\x08\x41\x0b\x94\xad\x52\x76\xd8\x71\xa5\xeb\x3e\x52\x6a\x41\xb0\x15\xb9\x41\x5a\xc8\xed\xfc\xe4\xe3\xe3\xfa\x01\xb7\xe4\x3d\x39\x7c\x24\x4b\x4e\x19\xdc\x87\xc2\xe8\x12\xb7\xba\x24\xeb\x09\xca\xa3\x8b\xff\xf8\x96\x2a\x14\x03\x5d\x04\xde\x44\x2b\xdb\x93\x15\xdc\x70\xb0\x95\x12\xcd\x76\x01\xd2\xd1\x39\xf6\xe4\xbc\x66\x8b\x5f\x26\xa9\x13\xe1\x02\xec\x22\xc9\x5c\x49\x1c\xc0\x81\xbb\x88\xbb\x80\xb2\x3d\x8c\x92\x33\xf4\x07\x02\x39\xcf\x5d\x41\xdb\x41\xa6\xe5\x8e\x20\xad\x92\x38\xf5\x41\x1b\x83\x82\x10\x3c\xd5\xc1\x2c\x22\x5b\x11\x04\x5f\x56\xf9\xa7\xbb\x87\x1c\xd9\xfa\x11\x5f\xb2\xcd\x26\x5b\xe7\x8f\xef\x70\xd0\xd2\x72\x10\xd0\x9e\x46\x2a\xbd\xeb\x8c\xa6\x0a\x07\xe5\x9c\xb2\xd2\x83\xeb\xc8\xf0\xf9\x7a\x73\xf5\x29\x5b\xe7\xd9\xef\xab\xdb\x55\xfe\x08\x76\xb8\x59\xe5\xeb\xeb\xed\x16\x37\x77\x1b\x64\xb8\xcf\x36\xf9\xea\xea\xe1\x36\xdb\xe0\xfe\x61\x73\x7f\xb7\xbd\x4e\xb0\xa5\xe8\x8a\x22\xfe\xfb\x99\xd7\xc3\xf6\x1c\xa1\x22\x51\xda\xf8\x29\x89\x47\x0e\xf0\x2d\x07\x53\xa1\x55\x7b\x82\xa3\x92\xf4\x9e\x2a\x28\x94\xdc\xf5\x3f\xbc\xd4\xc8\xa5\x0c\xdb\x66\x98\xf9\x3f\x0f\x12\xab\x1a\x96\x65\x01\x4f\x84\xdf\x5a\x91\x6e\x99\xa6\x87\xc3\x21\x69\x6c\x48\xd8\x35\xa9\x19\xe9\x7c\xfa\x21\x99\xcd\xbe\xcd\x00\x20\x4d\xd1\x6a\x2f\x71\x39\x91\x76\xa7\xba\xe8\x8a\xbb\x92\x2b\xf2\x10\x46\xc9\xc1\x0a\x39\x3f\x74\xc7\xd6\x25\xbe\x1d\x17\x13\xd6\x72\xe7\xc7\x16\x0f\x1b\x76\x05\xb9\x11\x3e\xb6\xc7\xea\x12\x97\x4f\xdd\x5e\xa8\x8b\x4a\xda\xee\xf9\x6f\xaa\x86\xdc\x68\x4f\xae\x3f\x09\x8e\x77\x10\x7d\xfc\xf1\x19\xf4\x95\xca\x20\xe4\x93\x01\x1d\xa1\x4b\xd4\xc1\x96\xf1\xfa\xe6\x86\x9b\x05\xaa\xe2\x02\xe3\x14\xf1\xd9\xab\x78\x9b\x78\x0f\xc3\x4d\xc2\x5d\x22\xbc\x15\xa7\x6d\x33\xbf\x78\xf7\xd4\xa3\x6b\xcc\xa5\xd5\x3e\x89\x83\xfc\xc9\xdd\x5f\x17\x67\x7c\x7c\xfe\x55\x7b\xf3\xe6\x0c\x3c\x3e\xfd\x22\xe3\x09\xff\x83\xc2\x7b\xbc\x7d\x0d\x37\x34\xc5\x40\x26\xda\x73\x88\xb5\x0a\x46\x9e\xe7\x72\x68\x4f\x17\xad\x4a\x09\xca\x9c\xa2\x88\x6f\x27\xd7\x50\x76\x4a\xab\x1e\x6f\x2d\xb2\x0c\x14\xaf\xe6\x73\x5c\xcc\x26\x1d\x47\xfe\x35\x21\x65\xcc\x20\x36\x2d\x7d\x38\xd5\x82\xc8\x42\x0b\x39\x15\xdf\x55\xde\x93\x8b\x9f\x29\x38\x92\xe0\xac\x9f\x18\x23\xac\xd6\x56\x99\x89\xfb\x74\xd1\xe2\x54\xa9\x6d\x33\x7a\x1b\x4b\xcf\xcc\x95\xf2\xf5\xf9\xe2\x74\x3d\x7f\x0a\x07\x1f\x70\xf9\x62\x27\xa3\xe4\x39\xe4\x97\xe1\x1e\x17\xb3\xe3\xec\x9f\x00\x00\x00\xff\xff\x8d\xba\x8d\xa8\xe6\x05\x00\x00") + +func unigram_tracerJsBytes() ([]byte, error) { + return bindataRead( + _unigram_tracerJs, + "unigram_tracer.js", + ) +} + +func unigram_tracerJs() (*asset, error) { + bytes, err := unigram_tracerJsBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "unigram_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2f, 0x36, 0x14, 0xc2, 0xf6, 0xc3, 0x80, 0x2b, 0x4a, 0x11, 0x7d, 0xd5, 0x3e, 0xef, 0x23, 0xb5, 0xd6, 0xe6, 0xe6, 0x5, 0x41, 0xf6, 0x14, 0x7a, 0x39, 0xf7, 0xf8, 0xac, 0x89, 0x8e, 0x43, 0xe6}} return a, nil } @@ -207,6 +272,12 @@ func Asset(name string) ([]byte, error) { return nil, fmt.Errorf("Asset %s not found", name) } +// AssetString returns the asset contents as a string (instead of a []byte). +func AssetString(name string) (string, error) { + data, err := Asset(name) + return string(data), err +} + // MustAsset is like Asset but panics when Asset would return an error. // It simplifies safe initialization of global variables. func MustAsset(name string) []byte { @@ -218,6 +289,12 @@ func MustAsset(name string) []byte { return a } +// MustAssetString is like AssetString but panics when Asset would return an +// error. It simplifies safe initialization of global variables. +func MustAssetString(name string) string { + return string(MustAsset(name)) +} + // AssetInfo loads and returns the asset info for the given name. // It returns an error if the asset could not be found or // could not be loaded. @@ -233,6 +310,33 @@ func AssetInfo(name string) (os.FileInfo, error) { return nil, fmt.Errorf("AssetInfo %s not found", name) } +// AssetDigest returns the digest of the file with the given name. It returns an +// error if the asset could not be found or the digest could not be loaded. +func AssetDigest(name string) ([sha256.Size]byte, error) { + canonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[canonicalName]; ok { + a, err := f() + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) + } + return a.digest, nil + } + return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) +} + +// Digests returns a map of all known files and their checksums. +func Digests() (map[string][sha256.Size]byte, error) { + mp := make(map[string][sha256.Size]byte, len(_bindata)) + for name := range _bindata { + a, err := _bindata[name]() + if err != nil { + return nil, err + } + mp[name] = a.digest + } + return mp, nil +} + // AssetNames returns the names of the assets. func AssetNames() []string { names := make([]string, 0, len(_bindata)) @@ -246,6 +350,8 @@ func AssetNames() []string { var _bindata = map[string]func() (*asset, error){ "4byte_tracer.js": _4byte_tracerJs, + "bigram_tracer.js": bigram_tracerJs, + "call_tracer.js": call_tracerJs, "evmdis_tracer.js": evmdis_tracerJs, @@ -255,6 +361,10 @@ var _bindata = map[string]func() (*asset, error){ "opcount_tracer.js": opcount_tracerJs, "prestate_tracer.js": prestate_tracerJs, + + "trigram_tracer.js": trigram_tracerJs, + + "unigram_tracer.js": unigram_tracerJs, } // AssetDir returns the file names below a certain @@ -266,9 +376,9 @@ var _bindata = map[string]func() (*asset, error){ // img/ // a.png // b.png -// then AssetDir("data") would return []string{"foo.txt", "img"} -// AssetDir("data/img") would return []string{"a.png", "b.png"} -// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// then AssetDir("data") would return []string{"foo.txt", "img"}, +// AssetDir("data/img") would return []string{"a.png", "b.png"}, +// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and // AssetDir("") will return []string{"data"}. func AssetDir(name string) ([]string, error) { node := _bintree @@ -299,14 +409,17 @@ type bintree struct { var _bintree = &bintree{nil, map[string]*bintree{ "4byte_tracer.js": {_4byte_tracerJs, map[string]*bintree{}}, + "bigram_tracer.js": {bigram_tracerJs, map[string]*bintree{}}, "call_tracer.js": {call_tracerJs, map[string]*bintree{}}, "evmdis_tracer.js": {evmdis_tracerJs, map[string]*bintree{}}, "noop_tracer.js": {noop_tracerJs, map[string]*bintree{}}, "opcount_tracer.js": {opcount_tracerJs, map[string]*bintree{}}, "prestate_tracer.js": {prestate_tracerJs, map[string]*bintree{}}, + "trigram_tracer.js": {trigram_tracerJs, map[string]*bintree{}}, + "unigram_tracer.js": {unigram_tracerJs, map[string]*bintree{}}, }} -// RestoreAsset restores an asset under the given directory +// RestoreAsset restores an asset under the given directory. func RestoreAsset(dir, name string) error { data, err := Asset(name) if err != nil { @@ -327,7 +440,7 @@ func RestoreAsset(dir, name string) error { return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) } -// RestoreAssets restores an asset under the given directory recursively +// RestoreAssets restores an asset under the given directory recursively. func RestoreAssets(dir, name string) error { children, err := AssetDir(name) // File diff --git a/eth/tracers/internal/tracers/bigram_tracer.js b/eth/tracers/internal/tracers/bigram_tracer.js new file mode 100644 index 0000000000..421c360af9 --- /dev/null +++ b/eth/tracers/internal/tracers/bigram_tracer.js @@ -0,0 +1,47 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +{ + // hist is the counters of opcode bigrams + hist: {}, + // lastOp is last operation + lastOp: '', + // execution depth of last op + lastDepth: 0, + // step is invoked for every opcode that the VM executes. + step: function(log, db) { + var op = log.op.toString(); + var depth = log.getDepth(); + if (depth == this.lastDepth){ + var key = this.lastOp+'-'+op; + if (this.hist[key]){ + this.hist[key]++; + } + else { + this.hist[key] = 1; + } + } + this.lastOp = op; + this.lastDepth = depth; + }, + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) {}, + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx) { + return this.hist; + }, +} diff --git a/eth/tracers/internal/tracers/trigram_tracer.js b/eth/tracers/internal/tracers/trigram_tracer.js new file mode 100644 index 0000000000..8756490dfc --- /dev/null +++ b/eth/tracers/internal/tracers/trigram_tracer.js @@ -0,0 +1,49 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +{ + // hist is the map of trigram counters + hist: {}, + // lastOp is last operation + lastOps: ['',''], + lastDepth: 0, + // step is invoked for every opcode that the VM executes. + step: function(log, db) { + var depth = log.getDepth(); + if (depth != this.lastDepth){ + this.lastOps = ['','']; + this.lastDepth = depth; + return; + } + var op = log.op.toString(); + var key = this.lastOps[0]+'-'+this.lastOps[1]+'-'+op; + if (this.hist[key]){ + this.hist[key]++; + } + else { + this.hist[key] = 1; + } + this.lastOps[0] = this.lastOps[1]; + this.lastOps[1] = op; + }, + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) {}, + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx) { + return this.hist; + }, +} diff --git a/eth/tracers/internal/tracers/unigram_tracer.js b/eth/tracers/internal/tracers/unigram_tracer.js new file mode 100644 index 0000000000..000fb13b1e --- /dev/null +++ b/eth/tracers/internal/tracers/unigram_tracer.js @@ -0,0 +1,43 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +{ + // hist is the map of opcodes to counters + hist: {}, + // nops counts number of ops + nops: 0, + // step is invoked for every opcode that the VM executes. + step: function(log, db) { + var op = log.op.toString(); + if (this.hist[op]){ + this.hist[op]++; + } + else { + this.hist[op] = 1; + } + this.nops++; + }, + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) {}, + + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx) { + if(this.nops > 0){ + return this.hist; + } + }, +} diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index b482245878..b40837c8cc 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -141,7 +141,9 @@ func (ec *Client) getBlock(ctx context.Context, method string, args ...interface // Fill the sender cache of transactions in the block. txs := make([]*types.Transaction, len(body.Transactions)) for i, tx := range body.Transactions { - setSenderFromServer(tx.tx, tx.From, body.Hash) + if tx.From != nil { + setSenderFromServer(tx.tx, *tx.From, body.Hash) + } txs[i] = tx.tx } return types.NewBlockWithHeader(head).WithBody(txs, uncles), nil @@ -174,9 +176,9 @@ type rpcTransaction struct { } type txExtraInfo struct { - BlockNumber *string - BlockHash common.Hash - From common.Address + BlockNumber *string `json:"blockNumber,omitempty"` + BlockHash *common.Hash `json:"blockHash,omitempty"` + From *common.Address `json:"from,omitempty"` } func (tx *rpcTransaction) UnmarshalJSON(msg []byte) error { @@ -197,7 +199,9 @@ func (ec *Client) TransactionByHash(ctx context.Context, hash common.Hash) (tx * } else if _, r, _ := json.tx.RawSignatureValues(); r == nil { return nil, false, fmt.Errorf("server returned transaction without signature") } - setSenderFromServer(json.tx, json.From, json.BlockHash) + if json.From != nil && json.BlockHash != nil { + setSenderFromServer(json.tx, *json.From, *json.BlockHash) + } return json.tx, json.BlockNumber == nil, nil } @@ -244,7 +248,9 @@ func (ec *Client) TransactionInBlock(ctx context.Context, blockHash common.Hash, return nil, fmt.Errorf("server returned transaction without signature") } } - setSenderFromServer(json.tx, json.From, json.BlockHash) + if json.From != nil && json.BlockHash != nil { + setSenderFromServer(json.tx, *json.From, *json.BlockHash) + } return json.tx, err } diff --git a/ethdb/database.go b/ethdb/database.go index 001d8f0bb9..e32c912f93 100644 --- a/ethdb/database.go +++ b/ethdb/database.go @@ -141,6 +141,7 @@ func (db *LDBDatabase) Close() { if err := <-errc; err != nil { db.log.Error("Metrics collection failed", "err", err) } + db.quitChan = nil } err := db.db.Close() if err == nil { @@ -189,7 +190,7 @@ func (db *LDBDatabase) Meter(prefix string) { // 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000 // // This is how the write delay look like (currently): -// DelayN:5 Delay:406.604657ms +// DelayN:5 Delay:406.604657ms Paused: false // // This is how the iostats look like (currently): // Read(MB):3895.04860 Write(MB):3654.64712 @@ -207,15 +208,22 @@ func (db *LDBDatabase) meter(refresh time.Duration) { delaystats [2]int64 lastWriteDelay time.Time lastWriteDelayN time.Time + lastWritePaused time.Time + ) + + var ( + errc chan error + merr error ) // Iterate ad infinitum and collect the stats - for i := 1; ; i++ { + for i := 1; errc == nil && merr == nil; i++ { // Retrieve the database stats stats, err := db.db.GetProperty("leveldb.stats") if err != nil { db.log.Error("Failed to read database stats", "err", err) - return + merr = err + continue } // Find the compaction table, skip the header lines := strings.Split(stats, "\n") @@ -224,7 +232,8 @@ func (db *LDBDatabase) meter(refresh time.Duration) { } if len(lines) <= 3 { db.log.Error("Compaction table not found") - return + merr = errors.New("compaction table not found") + continue } lines = lines[3:] @@ -241,7 +250,8 @@ func (db *LDBDatabase) meter(refresh time.Duration) { value, err := strconv.ParseFloat(strings.TrimSpace(counter), 64) if err != nil { db.log.Error("Compaction entry parsing failed", "err", err) - return + merr = err + continue } compactions[i%2][idx] += value } @@ -261,21 +271,25 @@ func (db *LDBDatabase) meter(refresh time.Duration) { writedelay, err := db.db.GetProperty("leveldb.writedelay") if err != nil { db.log.Error("Failed to read database write delay statistic", "err", err) - return + merr = err + continue } var ( delayN int64 delayDuration string duration time.Duration + paused bool ) - if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s", &delayN, &delayDuration); n != 2 || err != nil { + if n, err := fmt.Sscanf(writedelay, "DelayN:%d Delay:%s Paused:%t", &delayN, &delayDuration, &paused); n != 3 || err != nil { db.log.Error("Write delay statistic not found") - return + merr = err + continue } duration, err = time.ParseDuration(delayDuration) if err != nil { db.log.Error("Failed to parse delay duration", "err", err) - return + merr = err + continue } if db.writeDelayNMeter != nil { db.writeDelayNMeter.Mark(delayN - delaystats[0]) @@ -301,59 +315,61 @@ func (db *LDBDatabase) meter(refresh time.Duration) { lastWriteDelay = time.Now() } } + // If a warning that db is performing compaction has been displayed, any subsequent + // warnings will be withheld for one minute not to overwhelm the user. + if paused && delayN-delaystats[0] == 0 && duration.Nanoseconds()-delaystats[1] == 0 && + time.Now().After(lastWritePaused.Add(writeDelayWarningThrottler)) { + db.log.Warn("Database compacting, degraded performance") + lastWritePaused = time.Now() + } + delaystats[0], delaystats[1] = delayN, duration.Nanoseconds() // Retrieve the database iostats. ioStats, err := db.db.GetProperty("leveldb.iostats") if err != nil { db.log.Error("Failed to read database iostats", "err", err) - return + merr = err + continue } + var nRead, nWrite float64 parts := strings.Split(ioStats, " ") if len(parts) < 2 { db.log.Error("Bad syntax of ioStats", "ioStats", ioStats) - return + merr = fmt.Errorf("bad syntax of ioStats %s", ioStats) + continue } - r := strings.Split(parts[0], ":") - if len(r) < 2 { + if n, err := fmt.Sscanf(parts[0], "Read(MB):%f", &nRead); n != 1 || err != nil { db.log.Error("Bad syntax of read entry", "entry", parts[0]) - return + merr = err + continue } - read, err := strconv.ParseFloat(r[1], 64) - if err != nil { - db.log.Error("Read entry parsing failed", "err", err) - return - } - w := strings.Split(parts[1], ":") - if len(w) < 2 { + if n, err := fmt.Sscanf(parts[1], "Write(MB):%f", &nWrite); n != 1 || err != nil { db.log.Error("Bad syntax of write entry", "entry", parts[1]) - return - } - write, err := strconv.ParseFloat(w[1], 64) - if err != nil { - db.log.Error("Write entry parsing failed", "err", err) - return + merr = err + continue } if db.diskReadMeter != nil { - db.diskReadMeter.Mark(int64((read - iostats[0]) * 1024 * 1024)) + db.diskReadMeter.Mark(int64((nRead - iostats[0]) * 1024 * 1024)) } if db.diskWriteMeter != nil { - db.diskWriteMeter.Mark(int64((write - iostats[1]) * 1024 * 1024)) + db.diskWriteMeter.Mark(int64((nWrite - iostats[1]) * 1024 * 1024)) } - iostats[0] = read - iostats[1] = write + iostats[0], iostats[1] = nRead, nWrite // Sleep a bit, then repeat the stats collection select { - case errc := <-db.quitChan: + case errc = <-db.quitChan: // Quit requesting, stop hammering the database - errc <- nil - return - case <-time.After(refresh): // Timeout, gather a new set of stats } } + + if errc == nil { + errc = <-db.quitChan + } + errc <- merr } func (db *LDBDatabase) NewBatch() Batch { @@ -372,6 +388,12 @@ func (b *ldbBatch) Put(key, value []byte) error { return nil } +func (b *ldbBatch) Delete(key []byte) error { + b.b.Delete(key) + b.size += 1 + return nil +} + func (b *ldbBatch) Write() error { return b.db.Write(b.b, nil) } @@ -437,6 +459,10 @@ func (tb *tableBatch) Put(key, value []byte) error { return tb.batch.Put(append([]byte(tb.prefix), key...), value) } +func (tb *tableBatch) Delete(key []byte) error { + return tb.batch.Delete(append([]byte(tb.prefix), key...)) +} + func (tb *tableBatch) Write() error { return tb.batch.Write() } diff --git a/ethdb/interface.go b/ethdb/interface.go index 5373120030..af13557798 100644 --- a/ethdb/interface.go +++ b/ethdb/interface.go @@ -25,12 +25,17 @@ type Putter interface { Put(key []byte, value []byte) error } +// Deleter wraps the database delete operation supported by both batches and regular databases. +type Deleter interface { + Delete(key []byte) error +} + // Database wraps all database operations. All methods are safe for concurrent use. type Database interface { Putter + Deleter Get(key []byte) ([]byte, error) Has(key []byte) (bool, error) - Delete(key []byte) error Close() NewBatch() Batch } @@ -39,6 +44,7 @@ type Database interface { // when Write is called. Batch cannot be used concurrently. type Batch interface { Putter + Deleter ValueSize() int // amount of data in the batch Write() error // Reset resets the batch for reuse diff --git a/ethdb/memory_database.go b/ethdb/memory_database.go index c57042920a..f28ff54818 100644 --- a/ethdb/memory_database.go +++ b/ethdb/memory_database.go @@ -110,11 +110,20 @@ func (b *memBatch) Put(key, value []byte) error { return nil } +func (b *memBatch) Delete(key []byte) error { + b.writes = append(b.writes, kv{common.CopyBytes(key), nil}) + return nil +} + func (b *memBatch) Write() error { b.db.lock.Lock() defer b.db.lock.Unlock() for _, kv := range b.writes { + if kv.v == nil { + delete(b.db.db, string(kv.k)) + continue + } b.db.db[string(kv.k)] = kv.v } return nil diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index a15d846150..9f3d7237e9 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -49,7 +49,7 @@ const ( // history request. historyUpdateRange = 50 - // txChanSize is the size of channel listening to TxPreEvent. + // txChanSize is the size of channel listening to NewTxsEvent. // The number is referenced from the size of tx pool. txChanSize = 4096 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. @@ -57,9 +57,9 @@ const ( ) type txPool interface { - // SubscribeTxPreEvent should return an event subscription of - // TxPreEvent and send events to the given channel. - SubscribeTxPreEvent(chan<- core.TxPreEvent) event.Subscription + // SubscribeNewTxsEvent should return an event subscription of + // NewTxsEvent and send events to the given channel. + SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription } type blockChain interface { @@ -150,8 +150,8 @@ func (s *Service) loop() { headSub := blockchain.SubscribeChainHeadEvent(chainHeadCh) defer headSub.Unsubscribe() - txEventCh := make(chan core.TxPreEvent, txChanSize) - txSub := txpool.SubscribeTxPreEvent(txEventCh) + txEventCh := make(chan core.NewTxsEvent, txChanSize) + txSub := txpool.SubscribeNewTxsEvent(txEventCh) defer txSub.Unsubscribe() // Start a goroutine that exhausts the subsciptions to avoid events piling up @@ -362,7 +362,7 @@ type nodeInfo struct { // authMsg is the authentication infos needed to login to a monitoring server. type authMsg struct { - Id string `json:"id"` + ID string `json:"id"` Info nodeInfo `json:"info"` Secret string `json:"secret"` } @@ -381,7 +381,7 @@ func (s *Service) login(conn *websocket.Conn) error { protocol = fmt.Sprintf("les/%d", les.ClientProtocolVersions[0]) } auth := &authMsg{ - Id: s.node, + ID: s.node, Info: nodeInfo{ Name: s.node, Node: infos.Name, @@ -499,7 +499,7 @@ func (s uncleStats) MarshalJSON() ([]byte, error) { return []byte("[]"), nil } -// reportBlock retrieves the current chain head and repors it to the stats server. +// reportBlock retrieves the current chain head and reports it to the stats server. func (s *Service) reportBlock(conn *websocket.Conn, block *types.Block) error { // Gather the block details from the header or block chain details := s.assembleBlockStats(block) diff --git a/interfaces.go b/interfaces.go index 1ae1eba48a..a8b48c93d7 100644 --- a/interfaces.go +++ b/interfaces.go @@ -144,7 +144,7 @@ type FilterQuery struct { // {} or nil matches any topic list // {{A}} matches topic A in first position // {{}, {B}} matches any topic in first position, B in second position - // {{A}}, {B}} matches topic A in first position, B in second position + // {{A}, {B}} matches topic A in first position, B in second position // {{A, B}}, {C, D}} matches topic (A OR B) in first position, (C OR D) in second position Topics [][]common.Hash } diff --git a/internal/debug/api.go b/internal/debug/api.go index 048b7d7635..86a4218f6a 100644 --- a/internal/debug/api.go +++ b/internal/debug/api.go @@ -21,6 +21,7 @@ package debug import ( + "bytes" "errors" "io" "os" @@ -190,9 +191,9 @@ func (*HandlerT) WriteMemProfile(file string) error { // Stacks returns a printed representation of the stacks of all goroutines. func (*HandlerT) Stacks() string { - buf := make([]byte, 1024*1024) - buf = buf[:runtime.Stack(buf, true)] - return string(buf) + buf := new(bytes.Buffer) + pprof.Lookup("goroutine").WriteTo(buf, 2) + return buf.String() } // FreeOSMemory returns unused memory to the OS. diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index a524dbadd8..a465a59046 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -62,8 +62,9 @@ func NewPublicEthereumAPI(b Backend) *PublicEthereumAPI { } // GasPrice returns a suggestion for a gas price. -func (s *PublicEthereumAPI) GasPrice(ctx context.Context) (*big.Int, error) { - return s.b.SuggestPrice(ctx) +func (s *PublicEthereumAPI) GasPrice(ctx context.Context) (*hexutil.Big, error) { + price, err := s.b.SuggestPrice(ctx) + return (*hexutil.Big)(price), err } // ProtocolVersion returns the current Ethereum protocol version this node supports @@ -354,7 +355,7 @@ func (s *PrivateAccountAPI) signTransaction(ctx context.Context, args SendTxArgs var chainID *big.Int if config := s.b.ChainConfig(); config.IsEIP155(s.b.CurrentBlock().Number()) { - chainID = config.ChainId + chainID = config.ChainID } return wallet.SignTxWithPassphrase(account, passwd, tx, chainID) } @@ -460,13 +461,11 @@ func (s *PrivateAccountAPI) EcRecover(ctx context.Context, data, sig hexutil.Byt } sig[64] -= 27 // Transform yellow paper V from 27/28 to 0/1 - rpk, err := crypto.Ecrecover(signHash(data), sig) + rpk, err := crypto.SigToPub(signHash(data), sig) if err != nil { return common.Address{}, err } - pubKey := crypto.ToECDSAPub(rpk) - recoveredAddr := crypto.PubkeyToAddress(*pubKey) - return recoveredAddr, nil + return crypto.PubkeyToAddress(*rpk), nil } // SignAndSendTransaction was renamed to SendTransaction. This method is deprecated @@ -487,21 +486,20 @@ func NewPublicBlockChainAPI(b Backend) *PublicBlockChainAPI { } // BlockNumber returns the block number of the chain head. -func (s *PublicBlockChainAPI) BlockNumber() *big.Int { +func (s *PublicBlockChainAPI) BlockNumber() hexutil.Uint64 { header, _ := s.b.HeaderByNumber(context.Background(), rpc.LatestBlockNumber) // latest header should always be available - return header.Number + return hexutil.Uint64(header.Number.Uint64()) } // GetBalance returns the amount of wei for the given address in the state of the // given block number. The rpc.LatestBlockNumber and rpc.PendingBlockNumber meta // block numbers are also allowed. -func (s *PublicBlockChainAPI) GetBalance(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*big.Int, error) { +func (s *PublicBlockChainAPI) GetBalance(ctx context.Context, address common.Address, blockNr rpc.BlockNumber) (*hexutil.Big, error) { state, _, err := s.b.StateAndHeaderByNumber(ctx, blockNr) if state == nil || err != nil { return nil, err } - b := state.GetBalance(address) - return b, state.Error() + return (*hexutil.Big)(state.GetBalance(address)), state.Error() } // GetBlockByNumber returns the requested block. When blockNr is -1 the chain head is returned. When fullTx is true all @@ -792,10 +790,10 @@ func FormatLogs(logs []vm.StructLog) []StructLogRes { return formatted } -// rpcOutputBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are +// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are // returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain // transaction hashes. -func (s *PublicBlockChainAPI) rpcOutputBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { +func RPCMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { head := b.Header() // copies the header once fields := map[string]interface{}{ "number": (*hexutil.Big)(head.Number), @@ -808,7 +806,6 @@ func (s *PublicBlockChainAPI) rpcOutputBlock(b *types.Block, inclTx bool, fullTx "stateRoot": head.Root, "miner": head.Coinbase, "difficulty": (*hexutil.Big)(head.Difficulty), - "totalDifficulty": (*hexutil.Big)(s.b.GetTd(b.Hash())), "extraData": hexutil.Bytes(head.Extra), "size": hexutil.Uint64(b.Size()), "gasLimit": hexutil.Uint64(head.GasLimit), @@ -822,17 +819,15 @@ func (s *PublicBlockChainAPI) rpcOutputBlock(b *types.Block, inclTx bool, fullTx formatTx := func(tx *types.Transaction) (interface{}, error) { return tx.Hash(), nil } - if fullTx { formatTx = func(tx *types.Transaction) (interface{}, error) { return newRPCTransactionFromBlockHash(b, tx.Hash()), nil } } - txs := b.Transactions() transactions := make([]interface{}, len(txs)) var err error - for i, tx := range b.Transactions() { + for i, tx := range txs { if transactions[i], err = formatTx(tx); err != nil { return nil, err } @@ -850,6 +845,17 @@ func (s *PublicBlockChainAPI) rpcOutputBlock(b *types.Block, inclTx bool, fullTx return fields, nil } +// rpcOutputBlock uses the generalized output filler, then adds the total difficulty field, which requires +// a `PublicBlockchainAPI`. +func (s *PublicBlockChainAPI) rpcOutputBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { + fields, err := RPCMarshalBlock(b, inclTx, fullTx) + if err != nil { + return nil, err + } + fields["totalDifficulty"] = (*hexutil.Big)(s.b.GetTd(b.Hash())) + return fields, err +} + // RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction type RPCTransaction struct { BlockHash common.Hash `json:"blockHash"` @@ -1096,7 +1102,7 @@ func (s *PublicTransactionPoolAPI) sign(addr common.Address, tx *types.Transacti // Request the wallet to sign the transaction var chainID *big.Int if config := s.b.ChainConfig(); config.IsEIP155(s.b.CurrentBlock().Number()) { - chainID = config.ChainId + chainID = config.ChainID } return wallet.SignTx(account, tx, chainID) } @@ -1216,7 +1222,7 @@ func (s *PublicTransactionPoolAPI) SendTransaction(ctx context.Context, args Sen var chainID *big.Int if config := s.b.ChainConfig(); config.IsEIP155(s.b.CurrentBlock().Number()) { - chainID = config.ChainId + chainID = config.ChainID } signed, err := wallet.SignTx(account, tx, chainID) if err != nil { @@ -1293,14 +1299,19 @@ func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args Sen return &SignTransactionResult{data, tx}, nil } -// PendingTransactions returns the transactions that are in the transaction pool and have a from address that is one of -// the accounts this node manages. +// PendingTransactions returns the transactions that are in the transaction pool +// and have a from address that is one of the accounts this node manages. func (s *PublicTransactionPoolAPI) PendingTransactions() ([]*RPCTransaction, error) { pending, err := s.b.GetPoolTransactions() if err != nil { return nil, err } - + accounts := make(map[common.Address]struct{}) + for _, wallet := range s.b.AccountManager().Wallets() { + for _, account := range wallet.Accounts() { + accounts[account.Address] = struct{}{} + } + } transactions := make([]*RPCTransaction, 0, len(pending)) for _, tx := range pending { var signer types.Signer = types.HomesteadSigner{} @@ -1308,7 +1319,7 @@ func (s *PublicTransactionPoolAPI) PendingTransactions() ([]*RPCTransaction, err signer = types.NewEIP155Signer(tx.ChainId()) } from, _ := types.Sender(signer, tx) - if _, err := s.b.AccountManager().Find(accounts.Account{Address: from}); err == nil { + if _, exists := accounts[from]; exists { transactions = append(transactions, newRPCPendingTransaction(tx)) } } diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index af95d7906f..c9ffe230c6 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -65,7 +65,7 @@ type Backend interface { GetPoolNonce(ctx context.Context, addr common.Address) (uint64, error) Stats() (pending int, queued int) TxPoolContent() (map[common.Address]types.Transactions, map[common.Address]types.Transactions) - SubscribeTxPreEvent(chan<- core.TxPreEvent) event.Subscription + SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription ChainConfig() *params.ChainConfig CurrentBlock() *types.Block diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index 9d6ce8c6c8..89ebceec7c 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -313,8 +313,8 @@ web3._extend({ params: 2 }), new web3._extend.Method({ - name: 'setMutexProfileRate', - call: 'debug_setMutexProfileRate', + name: 'setMutexProfileFraction', + call: 'debug_setMutexProfileFraction', params: 1 }), new web3._extend.Method({ diff --git a/les/api_backend.go b/les/api_backend.go index 1d3c995134..dea33c4702 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -136,8 +136,8 @@ func (b *LesApiBackend) TxPoolContent() (map[common.Address]types.Transactions, return b.eth.txPool.Content() } -func (b *LesApiBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { - return b.eth.txPool.SubscribeTxPreEvent(ch) +func (b *LesApiBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { + return b.eth.txPool.SubscribeNewTxsEvent(ch) } func (b *LesApiBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { diff --git a/les/backend.go b/les/backend.go index 2d8ada7b0e..35f67f29f8 100644 --- a/les/backend.go +++ b/les/backend.go @@ -127,7 +127,7 @@ func New(ctx *node.ServiceContext, config *eth.Config) (*LightEthereum, error) { } leth.txPool = light.NewTxPool(leth.chainConfig, leth.blockchain, leth.relay) - if leth.protocolManager, err = NewProtocolManager(leth.chainConfig, true, ClientProtocolVersions, config.NetworkId, leth.eventMux, leth.engine, leth.peers, leth.blockchain, nil, chainDb, leth.odr, leth.relay, quitSync, &leth.wg); err != nil { + if leth.protocolManager, err = NewProtocolManager(leth.chainConfig, true, ClientProtocolVersions, config.NetworkId, leth.eventMux, leth.engine, leth.peers, leth.blockchain, nil, chainDb, leth.odr, leth.relay, leth.serverPool, quitSync, &leth.wg); err != nil { return nil, err } leth.ApiBackend = &LesApiBackend{leth, nil} diff --git a/les/handler.go b/les/handler.go index 22899eb1bc..a1c16cb875 100644 --- a/les/handler.go +++ b/les/handler.go @@ -19,6 +19,7 @@ package les import ( "encoding/binary" + "encoding/json" "errors" "fmt" "math/big" @@ -82,7 +83,7 @@ type BlockChain interface { InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) Rollback(chain []common.Hash) GetHeaderByNumber(number uint64) *types.Header - GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash + GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) Genesis() *types.Block SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription } @@ -128,7 +129,7 @@ type ProtocolManager struct { // NewProtocolManager returns a new ethereum sub protocol manager. The Ethereum sub protocol manages peers capable // with the ethereum network. -func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, protocolVersions []uint, networkId uint64, mux *event.TypeMux, engine consensus.Engine, peers *peerSet, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay, quitSync chan struct{}, wg *sync.WaitGroup) (*ProtocolManager, error) { +func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, protocolVersions []uint, networkId uint64, mux *event.TypeMux, engine consensus.Engine, peers *peerSet, blockchain BlockChain, txpool txPool, chainDb ethdb.Database, odr *LesOdr, txrelay *LesTxRelay, serverPool *serverPool, quitSync chan struct{}, wg *sync.WaitGroup) (*ProtocolManager, error) { // Create the protocol manager with the base fields manager := &ProtocolManager{ lightSync: lightSync, @@ -140,6 +141,7 @@ func NewProtocolManager(chainConfig *params.ChainConfig, lightSync bool, protoco networkId: networkId, txpool: txpool, txrelay: txrelay, + serverPool: serverPool, peers: peers, newPeerCh: make(chan *peer), quitSync: quitSync, @@ -417,6 +419,8 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { } hashMode := query.Origin.Hash != (common.Hash{}) + first := true + maxNonCanonical := uint64(100) // Gather headers until the fetch or network limits is reached var ( @@ -428,40 +432,57 @@ func (pm *ProtocolManager) handleMsg(p *peer) error { // Retrieve the next header satisfying the query var origin *types.Header if hashMode { - origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash) + if first { + first = false + origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash) + if origin != nil { + query.Origin.Number = origin.Number.Uint64() + } + } else { + origin = pm.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number) + } } else { origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number) } if origin == nil { break } - number := origin.Number.Uint64() headers = append(headers, origin) bytes += estHeaderRlpSize // Advance to the next header of the query switch { - case query.Origin.Hash != (common.Hash{}) && query.Reverse: + case hashMode && query.Reverse: // Hash based traversal towards the genesis block - for i := 0; i < int(query.Skip)+1; i++ { - if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil { - query.Origin.Hash = header.ParentHash - number-- - } else { - unknown = true - break - } - } - case query.Origin.Hash != (common.Hash{}) && !query.Reverse: - // Hash based traversal towards the leaf block - if header := pm.blockchain.GetHeaderByNumber(origin.Number.Uint64() + query.Skip + 1); header != nil { - if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash { - query.Origin.Hash = header.Hash() - } else { - unknown = true - } - } else { + ancestor := query.Skip + 1 + if ancestor == 0 { unknown = true + } else { + query.Origin.Hash, query.Origin.Number = pm.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical) + unknown = (query.Origin.Hash == common.Hash{}) + } + case hashMode && !query.Reverse: + // Hash based traversal towards the leaf block + var ( + current = origin.Number.Uint64() + next = current + query.Skip + 1 + ) + if next <= current { + infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ") + p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos) + unknown = true + } else { + if header := pm.blockchain.GetHeaderByNumber(next); header != nil { + nextHash := header.Hash() + expOldHash, _ := pm.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical) + if expOldHash == query.Origin.Hash { + query.Origin.Hash, query.Origin.Number = nextHash, next + } else { + unknown = true + } + } else { + unknown = true + } } case query.Reverse: // Number based traversal towards the genesis block diff --git a/les/helper_test.go b/les/helper_test.go index 6d997a1a36..8fd01a39e0 100644 --- a/les/helper_test.go +++ b/les/helper_test.go @@ -178,7 +178,7 @@ func newTestProtocolManager(lightSync bool, blocks int, generator func(int, *cor } else { protocolVersions = ServerProtocolVersions } - pm, err := NewProtocolManager(gspec.Config, lightSync, protocolVersions, NetworkId, evmux, engine, peers, chain, nil, db, odr, nil, make(chan struct{}), new(sync.WaitGroup)) + pm, err := NewProtocolManager(gspec.Config, lightSync, protocolVersions, NetworkId, evmux, engine, peers, chain, nil, db, odr, nil, nil, make(chan struct{}), new(sync.WaitGroup)) if err != nil { return nil, err } diff --git a/les/odr_requests.go b/les/odr_requests.go index c7f06df94d..075fcd92ca 100644 --- a/les/odr_requests.go +++ b/les/odr_requests.go @@ -230,7 +230,7 @@ func (r *TrieRequest) Validate(db ethdb.Database, msg *Msg) error { } nodeSet := proofs[0].NodeSet() // Verify the proof and store if checks out - if _, err, _ := trie.VerifyProof(r.Id.Root, r.Key, nodeSet); err != nil { + if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, nodeSet); err != nil { return fmt.Errorf("merkle proof verification failed: %v", err) } r.Proof = nodeSet @@ -241,7 +241,7 @@ func (r *TrieRequest) Validate(db ethdb.Database, msg *Msg) error { // Verify the proof and store if checks out nodeSet := proofs.NodeSet() reads := &readTraceDB{db: nodeSet} - if _, err, _ := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil { + if _, _, err := trie.VerifyProof(r.Id.Root, r.Key, reads); err != nil { return fmt.Errorf("merkle proof verification failed: %v", err) } // check if all nodes have been read by VerifyProof @@ -400,7 +400,7 @@ func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error { var encNumber [8]byte binary.BigEndian.PutUint64(encNumber[:], r.BlockNum) - value, err, _ := trie.VerifyProof(r.ChtRoot, encNumber[:], light.NodeList(proof.Proof).NodeSet()) + value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], light.NodeList(proof.Proof).NodeSet()) if err != nil { return err } @@ -435,7 +435,7 @@ func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error { binary.BigEndian.PutUint64(encNumber[:], r.BlockNum) reads := &readTraceDB{db: nodeSet} - value, err, _ := trie.VerifyProof(r.ChtRoot, encNumber[:], reads) + value, _, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads) if err != nil { return fmt.Errorf("merkle proof verification failed: %v", err) } @@ -529,7 +529,7 @@ func (r *BloomRequest) Validate(db ethdb.Database, msg *Msg) error { for i, idx := range r.SectionIdxList { binary.BigEndian.PutUint64(encNumber[2:], idx) - value, err, _ := trie.VerifyProof(r.BloomTrieRoot, encNumber[:], reads) + value, _, err := trie.VerifyProof(r.BloomTrieRoot, encNumber[:], reads) if err != nil { return err } diff --git a/les/retrieve.go b/les/retrieve.go index e262a3cb47..a9037a38ef 100644 --- a/les/retrieve.go +++ b/les/retrieve.go @@ -69,9 +69,9 @@ type sentReq struct { lock sync.RWMutex // protect access to sentTo map sentTo map[distPeer]sentReqToPeer - reqQueued bool // a request has been queued but not sent - reqSent bool // a request has been sent but not timed out - reqSrtoCount int // number of requests that reached soft (but not hard) timeout + lastReqQueued bool // last request has been queued but not sent + lastReqSentTo distPeer // if not nil then last request has been sent to given peer but not timed out + reqSrtoCount int // number of requests that reached soft (but not hard) timeout } // sentReqToPeer notifies the request-from-peer goroutine (tryRequest) about a response @@ -180,7 +180,7 @@ type reqStateFn func() reqStateFn // retrieveLoop is the retrieval state machine event loop func (r *sentReq) retrieveLoop() { go r.tryRequest() - r.reqQueued = true + r.lastReqQueued = true state := r.stateRequesting for state != nil { @@ -214,7 +214,7 @@ func (r *sentReq) stateRequesting() reqStateFn { case rpSoftTimeout: // last request timed out, try asking a new peer go r.tryRequest() - r.reqQueued = true + r.lastReqQueued = true return r.stateRequesting case rpDeliveredValid: r.stop(nil) @@ -233,7 +233,7 @@ func (r *sentReq) stateNoMorePeers() reqStateFn { select { case <-time.After(retryQueue): go r.tryRequest() - r.reqQueued = true + r.lastReqQueued = true return r.stateRequesting case ev := <-r.eventsCh: r.update(ev) @@ -260,22 +260,26 @@ func (r *sentReq) stateStopped() reqStateFn { func (r *sentReq) update(ev reqPeerEvent) { switch ev.event { case rpSent: - r.reqQueued = false - if ev.peer != nil { - r.reqSent = true - } + r.lastReqQueued = false + r.lastReqSentTo = ev.peer case rpSoftTimeout: - r.reqSent = false + r.lastReqSentTo = nil r.reqSrtoCount++ - case rpHardTimeout, rpDeliveredValid, rpDeliveredInvalid: + case rpHardTimeout: r.reqSrtoCount-- + case rpDeliveredValid, rpDeliveredInvalid: + if ev.peer == r.lastReqSentTo { + r.lastReqSentTo = nil + } else { + r.reqSrtoCount-- + } } } // waiting returns true if the retrieval mechanism is waiting for an answer from // any peer func (r *sentReq) waiting() bool { - return r.reqQueued || r.reqSent || r.reqSrtoCount > 0 + return r.lastReqQueued || r.lastReqSentTo != nil || r.reqSrtoCount > 0 } // tryRequest tries to send the request to a new peer and waits for it to either diff --git a/les/server.go b/les/server.go index 9eb8ee7f97..fca6124c9c 100644 --- a/les/server.go +++ b/les/server.go @@ -52,7 +52,7 @@ type LesServer struct { func NewLesServer(eth *eth.Ethereum, config *eth.Config) (*LesServer, error) { quitSync := make(chan struct{}) - pm, err := NewProtocolManager(eth.BlockChain().Config(), false, ServerProtocolVersions, config.NetworkId, eth.EventMux(), eth.Engine(), newPeerSet(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil, quitSync, new(sync.WaitGroup)) + pm, err := NewProtocolManager(eth.BlockChain().Config(), false, ServerProtocolVersions, config.NetworkId, eth.EventMux(), eth.Engine(), newPeerSet(), eth.BlockChain(), eth.TxPool(), eth.ChainDb(), nil, nil, nil, quitSync, new(sync.WaitGroup)) if err != nil { return nil, err } diff --git a/les/serverpool.go b/les/serverpool.go index a39f883554..1a4c752293 100644 --- a/les/serverpool.go +++ b/les/serverpool.go @@ -87,6 +87,27 @@ const ( initStatsWeight = 1 ) +// connReq represents a request for peer connection. +type connReq struct { + p *peer + ip net.IP + port uint16 + result chan *poolEntry +} + +// disconnReq represents a request for peer disconnection. +type disconnReq struct { + entry *poolEntry + stopped bool + done chan struct{} +} + +// registerReq represents a request for peer registration. +type registerReq struct { + entry *poolEntry + done chan struct{} +} + // serverPool implements a pool for storing and selecting newly discovered and already // known light server nodes. It received discovered nodes, stores statistics about // known nodes and takes care of always having enough good quality servers connected. @@ -105,10 +126,13 @@ type serverPool struct { discLookups chan bool entries map[discover.NodeID]*poolEntry - lock sync.Mutex timeout, enableRetry chan *poolEntry adjustStats chan poolStatAdjust + connCh chan *connReq + disconnCh chan *disconnReq + registerCh chan *registerReq + knownQueue, newQueue poolEntryQueue knownSelect, newSelect *weightedRandomSelect knownSelected, newSelected int @@ -125,6 +149,9 @@ func newServerPool(db ethdb.Database, quit chan struct{}, wg *sync.WaitGroup) *s timeout: make(chan *poolEntry, 1), adjustStats: make(chan poolStatAdjust, 100), enableRetry: make(chan *poolEntry, 1), + connCh: make(chan *connReq), + disconnCh: make(chan *disconnReq), + registerCh: make(chan *registerReq), knownSelect: newWeightedRandomSelect(), newSelect: newWeightedRandomSelect(), fastDiscover: true, @@ -147,9 +174,8 @@ func (pool *serverPool) start(server *p2p.Server, topic discv5.Topic) { pool.discLookups = make(chan bool, 100) go pool.server.DiscV5.SearchTopic(pool.topic, pool.discSetPeriod, pool.discNodes, pool.discLookups) } - - go pool.eventLoop() pool.checkDial() + go pool.eventLoop() } // connect should be called upon any incoming connection. If the connection has been @@ -158,83 +184,44 @@ func (pool *serverPool) start(server *p2p.Server, topic discv5.Topic) { // Note that whenever a connection has been accepted and a pool entry has been returned, // disconnect should also always be called. func (pool *serverPool) connect(p *peer, ip net.IP, port uint16) *poolEntry { - pool.lock.Lock() - defer pool.lock.Unlock() - entry := pool.entries[p.ID()] - if entry == nil { - entry = pool.findOrNewNode(p.ID(), ip, port) - } - p.Log().Debug("Connecting to new peer", "state", entry.state) - if entry.state == psConnected || entry.state == psRegistered { + log.Debug("Connect new entry", "enode", p.id) + req := &connReq{p: p, ip: ip, port: port, result: make(chan *poolEntry, 1)} + select { + case pool.connCh <- req: + case <-pool.quit: return nil } - pool.connWg.Add(1) - entry.peer = p - entry.state = psConnected - addr := &poolEntryAddress{ - ip: ip, - port: port, - lastSeen: mclock.Now(), - } - entry.lastConnected = addr - entry.addr = make(map[string]*poolEntryAddress) - entry.addr[addr.strKey()] = addr - entry.addrSelect = *newWeightedRandomSelect() - entry.addrSelect.update(addr) - return entry + return <-req.result } // registered should be called after a successful handshake func (pool *serverPool) registered(entry *poolEntry) { log.Debug("Registered new entry", "enode", entry.id) - pool.lock.Lock() - defer pool.lock.Unlock() - - entry.state = psRegistered - entry.regTime = mclock.Now() - if !entry.known { - pool.newQueue.remove(entry) - entry.known = true + req := ®isterReq{entry: entry, done: make(chan struct{})} + select { + case pool.registerCh <- req: + case <-pool.quit: + return } - pool.knownQueue.setLatest(entry) - entry.shortRetry = shortRetryCnt + <-req.done } // disconnect should be called when ending a connection. Service quality statistics // can be updated optionally (not updated if no registration happened, in this case // only connection statistics are updated, just like in case of timeout) func (pool *serverPool) disconnect(entry *poolEntry) { + stopped := false + select { + case <-pool.quit: + stopped = true + default: + } log.Debug("Disconnected old entry", "enode", entry.id) - pool.lock.Lock() - defer pool.lock.Unlock() + req := &disconnReq{entry: entry, stopped: stopped, done: make(chan struct{})} - if entry.state == psRegistered { - connTime := mclock.Now() - entry.regTime - connAdjust := float64(connTime) / float64(targetConnTime) - if connAdjust > 1 { - connAdjust = 1 - } - stopped := false - select { - case <-pool.quit: - stopped = true - default: - } - if stopped { - entry.connectStats.add(1, connAdjust) - } else { - entry.connectStats.add(connAdjust, 1) - } - } - - entry.state = psNotConnected - if entry.knownSelected { - pool.knownSelected-- - } else { - pool.newSelected-- - } - pool.setRetryDial(entry) - pool.connWg.Done() + // Block until disconnection request is served. + pool.disconnCh <- req + <-req.done } const ( @@ -277,25 +264,51 @@ func (pool *serverPool) eventLoop() { if pool.discSetPeriod != nil { pool.discSetPeriod <- time.Millisecond * 100 } + + // disconnect updates service quality statistics depending on the connection time + // and disconnection initiator. + disconnect := func(req *disconnReq, stopped bool) { + // Handle peer disconnection requests. + entry := req.entry + if entry.state == psRegistered { + connAdjust := float64(mclock.Now()-entry.regTime) / float64(targetConnTime) + if connAdjust > 1 { + connAdjust = 1 + } + if stopped { + // disconnect requested by ourselves. + entry.connectStats.add(1, connAdjust) + } else { + // disconnect requested by server side. + entry.connectStats.add(connAdjust, 1) + } + } + entry.state = psNotConnected + + if entry.knownSelected { + pool.knownSelected-- + } else { + pool.newSelected-- + } + pool.setRetryDial(entry) + pool.connWg.Done() + close(req.done) + } + for { select { case entry := <-pool.timeout: - pool.lock.Lock() if !entry.removed { pool.checkDialTimeout(entry) } - pool.lock.Unlock() case entry := <-pool.enableRetry: - pool.lock.Lock() if !entry.removed { entry.delayedRetry = false pool.updateCheckDial(entry) } - pool.lock.Unlock() case adj := <-pool.adjustStats: - pool.lock.Lock() switch adj.adjustType { case pseBlockDelay: adj.entry.delayStats.add(float64(adj.time), 1) @@ -305,13 +318,10 @@ func (pool *serverPool) eventLoop() { case pseResponseTimeout: adj.entry.timeoutStats.add(1, 1) } - pool.lock.Unlock() case node := <-pool.discNodes: - pool.lock.Lock() entry := pool.findOrNewNode(discover.NodeID(node.ID), node.IP, node.TCP) pool.updateCheckDial(entry) - pool.lock.Unlock() case conv := <-pool.discLookups: if conv { @@ -327,15 +337,66 @@ func (pool *serverPool) eventLoop() { } } + case req := <-pool.connCh: + // Handle peer connection requests. + entry := pool.entries[req.p.ID()] + if entry == nil { + entry = pool.findOrNewNode(req.p.ID(), req.ip, req.port) + } + if entry.state == psConnected || entry.state == psRegistered { + req.result <- nil + continue + } + pool.connWg.Add(1) + entry.peer = req.p + entry.state = psConnected + addr := &poolEntryAddress{ + ip: req.ip, + port: req.port, + lastSeen: mclock.Now(), + } + entry.lastConnected = addr + entry.addr = make(map[string]*poolEntryAddress) + entry.addr[addr.strKey()] = addr + entry.addrSelect = *newWeightedRandomSelect() + entry.addrSelect.update(addr) + req.result <- entry + + case req := <-pool.registerCh: + // Handle peer registration requests. + entry := req.entry + entry.state = psRegistered + entry.regTime = mclock.Now() + if !entry.known { + pool.newQueue.remove(entry) + entry.known = true + } + pool.knownQueue.setLatest(entry) + entry.shortRetry = shortRetryCnt + close(req.done) + + case req := <-pool.disconnCh: + // Handle peer disconnection requests. + disconnect(req, req.stopped) + case <-pool.quit: if pool.discSetPeriod != nil { close(pool.discSetPeriod) } - pool.connWg.Wait() + + // Spawn a goroutine to close the disconnCh after all connections are disconnected. + go func() { + pool.connWg.Wait() + close(pool.disconnCh) + }() + + // Handle all remaining disconnection requests before exit. + for req := range pool.disconnCh { + disconnect(req, true) + } pool.saveNodes() pool.wg.Done() return - } } } diff --git a/light/lightchain.go b/light/lightchain.go index 9d0a4e4f73..30b9bd89a6 100644 --- a/light/lightchain.go +++ b/light/lightchain.go @@ -433,6 +433,18 @@ func (self *LightChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []c return self.hc.GetBlockHashesFromHash(hash, max) } +// GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or +// a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the +// number of blocks to be individually checked before we reach the canonical chain. +// +// Note: ancestor == 0 returns the same block, 1 returns its parent and so on. +func (bc *LightChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { + bc.chainmu.Lock() + defer bc.chainmu.Unlock() + + return bc.hc.GetAncestor(hash, number, ancestor, maxNonCanonical) +} + // GetHeaderByNumber retrieves a block header from the database by number, // caching it (associated with its hash) if found. func (self *LightChain) GetHeaderByNumber(number uint64) *types.Header { diff --git a/light/postprocess.go b/light/postprocess.go index c06c18027e..121321ae66 100644 --- a/light/postprocess.go +++ b/light/postprocess.go @@ -59,18 +59,18 @@ type trustedCheckpoint struct { var ( mainnetCheckpoint = trustedCheckpoint{ name: "mainnet", - sectionIdx: 170, - sectionHead: common.HexToHash("3bb2c28bcce463d57968f14f56cdb3fbf35349ab7a701f44c1afb57349c9a356"), - chtRoot: common.HexToHash("d92b6d0853455f8439086292338e87f69781921680dd7aa072fb71547b87415e"), - bloomTrieRoot: common.HexToHash("e4e8250a2fefddead7ae42daecd848cbf9b66d748a8270f8bbd4370b764bb9e9"), + sectionIdx: 174, + sectionHead: common.HexToHash("a3ef48cd8f1c3a08419f0237fc7763491fe89497b3144b17adf87c1c43664613"), + chtRoot: common.HexToHash("dcbeed9f4dea1b3cb75601bb27c51b9960c28e5850275402ac49a150a667296e"), + bloomTrieRoot: common.HexToHash("6b7497a4a03e33870a2383cb6f5e70570f12b1bf5699063baf8c71d02ca90b02"), } ropstenCheckpoint = trustedCheckpoint{ name: "ropsten", - sectionIdx: 97, - sectionHead: common.HexToHash("719448c67c01eb5b9f27833a36a4e34612f66801316d7ff37daf9e77fb4cd095"), - chtRoot: common.HexToHash("a7857afc15930ca6e583b6c3d563a025144011655843d52d28e2fdaadd417bea"), - bloomTrieRoot: common.HexToHash("9c71d4b50cbec86dfeaa8e08992de8a4667b81d13c54d6522b17ce2fc5d36416"), + sectionIdx: 102, + sectionHead: common.HexToHash("9017ab08465cb2b2dee035ee5b817bbd7fa28e2c8d2cd903e0aed1cccb249e89"), + chtRoot: common.HexToHash("f61c10a7a787a5ef15f0ae1ae6c13c64331e57e79d0466d2bd9b0c06833fe956"), + bloomTrieRoot: common.HexToHash("69f2ad19aa46d5213a90137b3d2c9bff8a7c9483f7170f0125096ff450c9a873"), } ) diff --git a/light/txpool.go b/light/txpool.go index 94c8139cbd..767a797bdc 100644 --- a/light/txpool.go +++ b/light/txpool.go @@ -89,7 +89,7 @@ type TxRelayBackend interface { func NewTxPool(config *params.ChainConfig, chain *LightChain, relay TxRelayBackend) *TxPool { pool := &TxPool{ config: config, - signer: types.NewEIP155Signer(config.ChainId), + signer: types.NewEIP155Signer(config.ChainID), nonce: make(map[common.Address]uint64), pending: make(map[common.Hash]*types.Transaction), mined: make(map[common.Hash][]*types.Transaction), @@ -199,15 +199,17 @@ func (pool *TxPool) checkMinedTxs(ctx context.Context, hash common.Hash, number // rollbackTxs marks the transactions contained in recently rolled back blocks // as rolled back. It also removes any positional lookup entries. func (pool *TxPool) rollbackTxs(hash common.Hash, txc txStateChanges) { + batch := pool.chainDb.NewBatch() if list, ok := pool.mined[hash]; ok { for _, tx := range list { txHash := tx.Hash() - rawdb.DeleteTxLookupEntry(pool.chainDb, txHash) + rawdb.DeleteTxLookupEntry(batch, txHash) pool.pending[txHash] = tx txc.setState(txHash, false) } delete(pool.mined, hash) } + batch.Write() } // reorgOnNewHead sets a new head header, processing (and rolling back if necessary) @@ -321,9 +323,9 @@ func (pool *TxPool) Stop() { log.Info("Transaction pool stopped") } -// SubscribeTxPreEvent registers a subscription of core.TxPreEvent and +// SubscribeNewTxsEvent registers a subscription of core.NewTxsEvent and // starts sending event to the given channel. -func (pool *TxPool) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription { +func (pool *TxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { return pool.scope.Track(pool.txFeed.Subscribe(ch)) } @@ -412,7 +414,7 @@ func (self *TxPool) add(ctx context.Context, tx *types.Transaction) error { // Notify the subscribers. This event is posted in a goroutine // because it's possible that somewhere during the post "Remove transaction" // gets called which will then wait for the global tx pool lock and deadlock. - go self.txFeed.Send(core.TxPreEvent{Tx: tx}) + go self.txFeed.Send(core.NewTxsEvent{Txs: types.Transactions{tx}}) } // Print a log message if low enough level is set @@ -504,14 +506,16 @@ func (self *TxPool) Content() (map[common.Address]types.Transactions, map[common func (self *TxPool) RemoveTransactions(txs types.Transactions) { self.mu.Lock() defer self.mu.Unlock() + var hashes []common.Hash + batch := self.chainDb.NewBatch() for _, tx := range txs { - //self.RemoveTx(tx.Hash()) hash := tx.Hash() delete(self.pending, hash) - self.chainDb.Delete(hash[:]) + batch.Delete(hash.Bytes()) hashes = append(hashes, hash) } + batch.Write() self.relay.Discard(hashes) } diff --git a/log/README.md b/log/README.md index 0951b21cb5..b4476577b6 100644 --- a/log/README.md +++ b/log/README.md @@ -45,7 +45,7 @@ srvlog.SetHandler(log.MultiHandler( log.StreamHandler(os.Stderr, log.LogfmtFormat()), log.LvlFilterHandler( log.LvlError, - log.Must.FileHandler("errors.json", log.JsonFormat())))) + log.Must.FileHandler("errors.json", log.JSONFormat())))) ``` Will result in output that looks like this: diff --git a/log/doc.go b/log/doc.go index 83ad8c54f6..bff2f4966a 100644 --- a/log/doc.go +++ b/log/doc.go @@ -86,7 +86,7 @@ from the rpc package in logfmt to standard out. The other prints records at Erro or above in JSON formatted output to the file /var/log/service.json handler := log.MultiHandler( - log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JsonFormat())), + log.LvlFilterHandler(log.LvlError, log.Must.FileHandler("/var/log/service.json", log.JSONFormat())), log.MatchFilterHandler("pkg", "app/rpc" log.StdoutHandler()) ) @@ -304,8 +304,8 @@ For all Handler functions which can return an error, there is a version of that function which will return no error but panics on failure. They are all available on the Must object. For example: - log.Must.FileHandler("/path", log.JsonFormat) - log.Must.NetHandler("tcp", ":1234", log.JsonFormat) + log.Must.FileHandler("/path", log.JSONFormat) + log.Must.NetHandler("tcp", ":1234", log.JSONFormat) Inspiration and Credit diff --git a/log/format.go b/log/format.go index 0b07abb2ac..0d4732cc0f 100644 --- a/log/format.go +++ b/log/format.go @@ -15,7 +15,7 @@ import ( const ( timeFormat = "2006-01-02T15:04:05-0700" - termTimeFormat = "01-02|15:04:05" + termTimeFormat = "01-02|15:04:05.000" floatFormat = 'f' termMsgJust = 40 ) @@ -196,16 +196,16 @@ func logfmt(buf *bytes.Buffer, ctx []interface{}, color int, term bool) { buf.WriteByte('\n') } -// JsonFormat formats log records as JSON objects separated by newlines. -// It is the equivalent of JsonFormatEx(false, true). -func JsonFormat() Format { - return JsonFormatEx(false, true) +// JSONFormat formats log records as JSON objects separated by newlines. +// It is the equivalent of JSONFormatEx(false, true). +func JSONFormat() Format { + return JSONFormatEx(false, true) } -// JsonFormatEx formats log records as JSON objects. If pretty is true, +// JSONFormatEx formats log records as JSON objects. If pretty is true, // records will be pretty-printed. If lineSeparated is true, records // will be logged with a new line between each record. -func JsonFormatEx(pretty, lineSeparated bool) Format { +func JSONFormatEx(pretty, lineSeparated bool) Format { jsonMarshal := json.Marshal if pretty { jsonMarshal = func(v interface{}) ([]byte, error) { @@ -225,7 +225,7 @@ func JsonFormatEx(pretty, lineSeparated bool) Format { if !ok { props[errorKey] = fmt.Sprintf("%+v is not a string key", r.Ctx[i]) } - props[k] = formatJsonValue(r.Ctx[i+1]) + props[k] = formatJSONValue(r.Ctx[i+1]) } b, err := jsonMarshal(props) @@ -270,7 +270,7 @@ func formatShared(value interface{}) (result interface{}) { } } -func formatJsonValue(value interface{}) interface{} { +func formatJSONValue(value interface{}) interface{} { value = formatShared(value) switch value.(type) { case int, int8, int16, int32, int64, float32, float64, uint, uint8, uint16, uint32, uint64, string: diff --git a/log/handler.go b/log/handler.go index 41d5718ddb..3c99114dcb 100644 --- a/log/handler.go +++ b/log/handler.go @@ -11,8 +11,8 @@ import ( "github.com/go-stack/stack" ) +// Handler defines where and how log records are written. // A Logger prints its log records by writing to a Handler. -// The Handler interface defines where and how log records are written. // Handlers are composable, providing you great flexibility in combining // them to achieve the logging structure that suits your applications. type Handler interface { @@ -193,7 +193,7 @@ func LvlFilterHandler(maxLvl Lvl, h Handler) Handler { }, h) } -// A MultiHandler dispatches any write to each of its handlers. +// MultiHandler dispatches any write to each of its handlers. // This is useful for writing different types of log information // to different locations. For example, to log to a file and // standard error: @@ -212,7 +212,7 @@ func MultiHandler(hs ...Handler) Handler { }) } -// A FailoverHandler writes all log records to the first handler +// FailoverHandler writes all log records to the first handler // specified, but will failover and write to the second handler if // the first handler has failed, and so on for all handlers specified. // For example you might want to log to a network socket, but failover @@ -220,7 +220,7 @@ func MultiHandler(hs ...Handler) Handler { // standard out if the file write fails: // // log.FailoverHandler( -// log.Must.NetHandler("tcp", ":9090", log.JsonFormat()), +// log.Must.NetHandler("tcp", ":9090", log.JSONFormat()), // log.Must.FileHandler("/var/log/app.log", log.LogfmtFormat()), // log.StdoutHandler) // @@ -336,7 +336,7 @@ func DiscardHandler() Handler { }) } -// The Must object provides the following Handler creation functions +// Must provides the following Handler creation functions // which instead of returning an error parameter only return a Handler // and panic on failure: FileHandler, NetHandler, SyslogHandler, SyslogNetHandler var Must muster diff --git a/log/logger.go b/log/logger.go index 15c83a9b25..438aa548fa 100644 --- a/log/logger.go +++ b/log/logger.go @@ -12,6 +12,7 @@ const timeKey = "t" const lvlKey = "lvl" const msgKey = "msg" const errorKey = "LOG15_ERROR" +const skipLevel = 2 type Lvl int @@ -24,7 +25,7 @@ const ( LvlTrace ) -// Aligned returns a 5-character string containing the name of a Lvl. +// AlignedString returns a 5-character string containing the name of a Lvl. func (l Lvl) AlignedString() string { switch l { case LvlTrace: @@ -64,7 +65,7 @@ func (l Lvl) String() string { } } -// Returns the appropriate Lvl from a string name. +// LvlFromString returns the appropriate Lvl from a string name. // Useful for parsing command line args and configuration files. func LvlFromString(lvlString string) (Lvl, error) { switch lvlString { @@ -95,6 +96,7 @@ type Record struct { KeyNames RecordKeyNames } +// RecordKeyNames gets stored in a Record when the write function is executed. type RecordKeyNames struct { Time string Msg string @@ -126,13 +128,13 @@ type logger struct { h *swapHandler } -func (l *logger) write(msg string, lvl Lvl, ctx []interface{}) { +func (l *logger) write(msg string, lvl Lvl, ctx []interface{}, skip int) { l.h.Log(&Record{ Time: time.Now(), Lvl: lvl, Msg: msg, Ctx: newContext(l.ctx, ctx), - Call: stack.Caller(2), + Call: stack.Caller(skip), KeyNames: RecordKeyNames{ Time: timeKey, Msg: msgKey, @@ -156,27 +158,27 @@ func newContext(prefix []interface{}, suffix []interface{}) []interface{} { } func (l *logger) Trace(msg string, ctx ...interface{}) { - l.write(msg, LvlTrace, ctx) + l.write(msg, LvlTrace, ctx, skipLevel) } func (l *logger) Debug(msg string, ctx ...interface{}) { - l.write(msg, LvlDebug, ctx) + l.write(msg, LvlDebug, ctx, skipLevel) } func (l *logger) Info(msg string, ctx ...interface{}) { - l.write(msg, LvlInfo, ctx) + l.write(msg, LvlInfo, ctx, skipLevel) } func (l *logger) Warn(msg string, ctx ...interface{}) { - l.write(msg, LvlWarn, ctx) + l.write(msg, LvlWarn, ctx, skipLevel) } func (l *logger) Error(msg string, ctx ...interface{}) { - l.write(msg, LvlError, ctx) + l.write(msg, LvlError, ctx, skipLevel) } func (l *logger) Crit(msg string, ctx ...interface{}) { - l.write(msg, LvlCrit, ctx) + l.write(msg, LvlCrit, ctx, skipLevel) os.Exit(1) } diff --git a/log/root.go b/log/root.go index 71b8cef6d4..9fb4c5ae0b 100644 --- a/log/root.go +++ b/log/root.go @@ -31,31 +31,40 @@ func Root() Logger { // Trace is a convenient alias for Root().Trace func Trace(msg string, ctx ...interface{}) { - root.write(msg, LvlTrace, ctx) + root.write(msg, LvlTrace, ctx, skipLevel) } // Debug is a convenient alias for Root().Debug func Debug(msg string, ctx ...interface{}) { - root.write(msg, LvlDebug, ctx) + root.write(msg, LvlDebug, ctx, skipLevel) } // Info is a convenient alias for Root().Info func Info(msg string, ctx ...interface{}) { - root.write(msg, LvlInfo, ctx) + root.write(msg, LvlInfo, ctx, skipLevel) } // Warn is a convenient alias for Root().Warn func Warn(msg string, ctx ...interface{}) { - root.write(msg, LvlWarn, ctx) + root.write(msg, LvlWarn, ctx, skipLevel) } // Error is a convenient alias for Root().Error func Error(msg string, ctx ...interface{}) { - root.write(msg, LvlError, ctx) + root.write(msg, LvlError, ctx, skipLevel) } // Crit is a convenient alias for Root().Crit func Crit(msg string, ctx ...interface{}) { - root.write(msg, LvlCrit, ctx) + root.write(msg, LvlCrit, ctx, skipLevel) os.Exit(1) } + +// Output is a convenient alias for write, allowing for the modification of +// the calldepth (number of stack frames to skip). +// calldepth influences the reported line number of the log message. +// A calldepth of zero reports the immediate caller of Output. +// Non-zero calldepth skips as many stack frames. +func Output(msg string, lvl Lvl, calldepth int, ctx ...interface{}) { + root.write(msg, lvl, ctx, calldepth+skipLevel) +} diff --git a/metrics/exp/exp.go b/metrics/exp/exp.go index c19d00a94d..625ffd4e8a 100644 --- a/metrics/exp/exp.go +++ b/metrics/exp/exp.go @@ -134,6 +134,17 @@ func (exp *exp) publishTimer(name string, metric metrics.Timer) { exp.getFloat(name + ".mean-rate").Set(t.RateMean()) } +func (exp *exp) publishResettingTimer(name string, metric metrics.ResettingTimer) { + t := metric.Snapshot() + ps := t.Percentiles([]float64{50, 75, 95, 99}) + exp.getInt(name + ".count").Set(int64(len(t.Values()))) + exp.getFloat(name + ".mean").Set(t.Mean()) + exp.getInt(name + ".50-percentile").Set(ps[0]) + exp.getInt(name + ".75-percentile").Set(ps[1]) + exp.getInt(name + ".95-percentile").Set(ps[2]) + exp.getInt(name + ".99-percentile").Set(ps[3]) +} + func (exp *exp) syncToExpvar() { exp.registry.Each(func(name string, i interface{}) { switch i.(type) { @@ -149,6 +160,8 @@ func (exp *exp) syncToExpvar() { exp.publishMeter(name, i.(metrics.Meter)) case metrics.Timer: exp.publishTimer(name, i.(metrics.Timer)) + case metrics.ResettingTimer: + exp.publishResettingTimer(name, i.(metrics.ResettingTimer)) default: panic(fmt.Sprintf("unsupported type for '%s': %T", name, i)) } diff --git a/metrics/influxdb/influxdb.go b/metrics/influxdb/influxdb.go index d5cb4da66f..31a5c21b5f 100644 --- a/metrics/influxdb/influxdb.go +++ b/metrics/influxdb/influxdb.go @@ -2,10 +2,10 @@ package influxdb import ( "fmt" - "log" uurl "net/url" "time" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/influxdata/influxdb/client" ) @@ -35,7 +35,7 @@ func InfluxDB(r metrics.Registry, d time.Duration, url, database, username, pass func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, username, password, namespace string, tags map[string]string) { u, err := uurl.Parse(url) if err != nil { - log.Printf("unable to parse InfluxDB url %s. err=%v", url, err) + log.Warn("Unable to parse InfluxDB", "url", url, "err", err) return } @@ -51,7 +51,7 @@ func InfluxDBWithTags(r metrics.Registry, d time.Duration, url, database, userna cache: make(map[string]int64), } if err := rep.makeClient(); err != nil { - log.Printf("unable to make InfluxDB client. err=%v", err) + log.Warn("Unable to make InfluxDB client", "err", err) return } @@ -76,15 +76,15 @@ func (r *reporter) run() { select { case <-intervalTicker: if err := r.send(); err != nil { - log.Printf("unable to send to InfluxDB. err=%v", err) + log.Warn("Unable to send to InfluxDB", "err", err) } case <-pingTicker: _, _, err := r.client.Ping() if err != nil { - log.Printf("got error while sending a ping to InfluxDB, trying to recreate client. err=%v", err) + log.Warn("Got error while sending a ping to InfluxDB, trying to recreate client", "err", err) if err = r.makeClient(); err != nil { - log.Printf("unable to make InfluxDB client. err=%v", err) + log.Warn("Unable to make InfluxDB client", "err", err) } } } diff --git a/metrics/metrics.go b/metrics/metrics.go index e24324814c..2356f2b148 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -68,17 +68,20 @@ func CollectProcessMetrics(refresh time.Duration) { } // Iterate loading the different stats and updating the meters for i := 1; ; i++ { - runtime.ReadMemStats(memstats[i%2]) - memAllocs.Mark(int64(memstats[i%2].Mallocs - memstats[(i-1)%2].Mallocs)) - memFrees.Mark(int64(memstats[i%2].Frees - memstats[(i-1)%2].Frees)) - memInuse.Mark(int64(memstats[i%2].Alloc - memstats[(i-1)%2].Alloc)) - memPauses.Mark(int64(memstats[i%2].PauseTotalNs - memstats[(i-1)%2].PauseTotalNs)) + location1 := i % 2 + location2 := (i - 1) % 2 - if ReadDiskStats(diskstats[i%2]) == nil { - diskReads.Mark(diskstats[i%2].ReadCount - diskstats[(i-1)%2].ReadCount) - diskReadBytes.Mark(diskstats[i%2].ReadBytes - diskstats[(i-1)%2].ReadBytes) - diskWrites.Mark(diskstats[i%2].WriteCount - diskstats[(i-1)%2].WriteCount) - diskWriteBytes.Mark(diskstats[i%2].WriteBytes - diskstats[(i-1)%2].WriteBytes) + runtime.ReadMemStats(memstats[location1]) + memAllocs.Mark(int64(memstats[location1].Mallocs - memstats[location2].Mallocs)) + memFrees.Mark(int64(memstats[location1].Frees - memstats[location2].Frees)) + memInuse.Mark(int64(memstats[location1].Alloc - memstats[location2].Alloc)) + memPauses.Mark(int64(memstats[location1].PauseTotalNs - memstats[location2].PauseTotalNs)) + + if ReadDiskStats(diskstats[location1]) == nil { + diskReads.Mark(diskstats[location1].ReadCount - diskstats[location2].ReadCount) + diskReadBytes.Mark(diskstats[location1].ReadBytes - diskstats[location2].ReadBytes) + diskWrites.Mark(diskstats[location1].WriteCount - diskstats[location2].WriteCount) + diskWriteBytes.Mark(diskstats[location1].WriteBytes - diskstats[location2].WriteBytes) } time.Sleep(refresh) } diff --git a/metrics/resetting_timer.go b/metrics/resetting_timer.go index 57bcb31343..e5327d3bd3 100644 --- a/metrics/resetting_timer.go +++ b/metrics/resetting_timer.go @@ -58,7 +58,11 @@ type NilResettingTimer struct { func (NilResettingTimer) Values() []int64 { return nil } // Snapshot is a no-op. -func (NilResettingTimer) Snapshot() ResettingTimer { return NilResettingTimer{} } +func (NilResettingTimer) Snapshot() ResettingTimer { + return &ResettingTimerSnapshot{ + values: []int64{}, + } +} // Time is a no-op. func (NilResettingTimer) Time(func()) {} @@ -210,7 +214,7 @@ func (t *ResettingTimerSnapshot) calc(percentiles []float64) { // poor man's math.Round(x): // math.Floor(x + 0.5) indexOfPerc := int(math.Floor(((abs / 100.0) * float64(count)) + 0.5)) - if pct >= 0 { + if pct >= 0 && indexOfPerc > 0 { indexOfPerc -= 1 // index offset=0 } thresholdBoundary = t.values[indexOfPerc] diff --git a/metrics/resetting_timer_test.go b/metrics/resetting_timer_test.go index 58fd47f352..77c49dc386 100644 --- a/metrics/resetting_timer_test.go +++ b/metrics/resetting_timer_test.go @@ -104,3 +104,113 @@ func TestResettingTimer(t *testing.T) { } } } + +func TestResettingTimerWithFivePercentiles(t *testing.T) { + tests := []struct { + values []int64 + start int + end int + wantP05 int64 + wantP20 int64 + wantP50 int64 + wantP95 int64 + wantP99 int64 + wantMean float64 + wantMin int64 + wantMax int64 + }{ + { + values: []int64{}, + start: 1, + end: 11, + wantP05: 1, wantP20: 2, wantP50: 5, wantP95: 10, wantP99: 10, + wantMin: 1, wantMax: 10, wantMean: 5.5, + }, + { + values: []int64{}, + start: 1, + end: 101, + wantP05: 5, wantP20: 20, wantP50: 50, wantP95: 95, wantP99: 99, + wantMin: 1, wantMax: 100, wantMean: 50.5, + }, + { + values: []int64{1}, + start: 0, + end: 0, + wantP05: 1, wantP20: 1, wantP50: 1, wantP95: 1, wantP99: 1, + wantMin: 1, wantMax: 1, wantMean: 1, + }, + { + values: []int64{0}, + start: 0, + end: 0, + wantP05: 0, wantP20: 0, wantP50: 0, wantP95: 0, wantP99: 0, + wantMin: 0, wantMax: 0, wantMean: 0, + }, + { + values: []int64{}, + start: 0, + end: 0, + wantP05: 0, wantP20: 0, wantP50: 0, wantP95: 0, wantP99: 0, + wantMin: 0, wantMax: 0, wantMean: 0, + }, + { + values: []int64{1, 10}, + start: 0, + end: 0, + wantP05: 1, wantP20: 1, wantP50: 1, wantP95: 10, wantP99: 10, + wantMin: 1, wantMax: 10, wantMean: 5.5, + }, + } + for ind, tt := range tests { + timer := NewResettingTimer() + + for i := tt.start; i < tt.end; i++ { + tt.values = append(tt.values, int64(i)) + } + + for _, v := range tt.values { + timer.Update(time.Duration(v)) + } + + snap := timer.Snapshot() + + ps := snap.Percentiles([]float64{5, 20, 50, 95, 99}) + + val := snap.Values() + + if len(val) > 0 { + if tt.wantMin != val[0] { + t.Fatalf("%d: min: got %d, want %d", ind, val[0], tt.wantMin) + } + + if tt.wantMax != val[len(val)-1] { + t.Fatalf("%d: max: got %d, want %d", ind, val[len(val)-1], tt.wantMax) + } + } + + if tt.wantMean != snap.Mean() { + t.Fatalf("%d: mean: got %.2f, want %.2f", ind, snap.Mean(), tt.wantMean) + } + + if tt.wantP05 != ps[0] { + t.Fatalf("%d: p05: got %d, want %d", ind, ps[0], tt.wantP05) + } + + if tt.wantP20 != ps[1] { + t.Fatalf("%d: p20: got %d, want %d", ind, ps[1], tt.wantP20) + } + + if tt.wantP50 != ps[2] { + t.Fatalf("%d: p50: got %d, want %d", ind, ps[2], tt.wantP50) + } + + if tt.wantP95 != ps[3] { + t.Fatalf("%d: p95: got %d, want %d", ind, ps[3], tt.wantP95) + } + + if tt.wantP99 != ps[4] { + t.Fatalf("%d: p99: got %d, want %d", ind, ps[4], tt.wantP99) + } + } +} diff --git a/metrics/timer_test.go b/metrics/timer_test.go index c1f0ff9388..8638a2270b 100644 --- a/metrics/timer_test.go +++ b/metrics/timer_test.go @@ -47,8 +47,8 @@ func TestTimerStop(t *testing.T) { func TestTimerFunc(t *testing.T) { tm := NewTimer() tm.Time(func() { time.Sleep(50e6) }) - if max := tm.Max(); 35e6 > max || max > 95e6 { - t.Errorf("tm.Max(): 35e6 > %v || %v > 95e6\n", max, max) + if max := tm.Max(); 35e6 > max || max > 145e6 { + t.Errorf("tm.Max(): 35e6 > %v || %v > 145e6\n", max, max) } } diff --git a/miner/worker.go b/miner/worker.go index 48b0b27652..ff48ecabc3 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -42,7 +42,7 @@ const ( resultQueueSize = 10 miningLogAtDepth = 5 - // txChanSize is the size of channel listening to TxPreEvent. + // txChanSize is the size of channel listening to NewTxsEvent. // The number is referenced from the size of tx pool. txChanSize = 4096 // chainHeadChanSize is the size of channel listening to ChainHeadEvent. @@ -71,6 +71,7 @@ type Work struct { family *set.Set // family set (used for checking uncle invalidity) uncles *set.Set // uncle set tcount int // tx count in cycle + gasPool *core.GasPool // available gas used to pack transactions Block *types.Block // the new block @@ -95,8 +96,8 @@ type worker struct { // update loop mux *event.TypeMux - txCh chan core.TxPreEvent - txSub event.Subscription + txsCh chan core.NewTxsEvent + txsSub event.Subscription chainHeadCh chan core.ChainHeadEvent chainHeadSub event.Subscription chainSideCh chan core.ChainSideEvent @@ -137,7 +138,7 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, coinbase com engine: engine, eth: eth, mux: mux, - txCh: make(chan core.TxPreEvent, txChanSize), + txsCh: make(chan core.NewTxsEvent, txChanSize), chainHeadCh: make(chan core.ChainHeadEvent, chainHeadChanSize), chainSideCh: make(chan core.ChainSideEvent, chainSideChanSize), chainDb: eth.ChainDb(), @@ -149,8 +150,8 @@ func newWorker(config *params.ChainConfig, engine consensus.Engine, coinbase com agents: make(map[Agent]struct{}), unconfirmed: newUnconfirmedBlocks(eth.BlockChain(), miningLogAtDepth), } - // Subscribe TxPreEvent for tx pool - worker.txSub = eth.TxPool().SubscribeTxPreEvent(worker.txCh) + // Subscribe NewTxsEvent for tx pool + worker.txsSub = eth.TxPool().SubscribeNewTxsEvent(worker.txsCh) // Subscribe events for blockchain worker.chainHeadSub = eth.BlockChain().SubscribeChainHeadEvent(worker.chainHeadCh) worker.chainSideSub = eth.BlockChain().SubscribeChainSideEvent(worker.chainSideCh) @@ -241,7 +242,7 @@ func (self *worker) unregister(agent Agent) { } func (self *worker) update() { - defer self.txSub.Unsubscribe() + defer self.txsSub.Unsubscribe() defer self.chainHeadSub.Unsubscribe() defer self.chainSideSub.Unsubscribe() @@ -258,15 +259,21 @@ func (self *worker) update() { self.possibleUncles[ev.Block.Hash()] = ev.Block self.uncleMu.Unlock() - // Handle TxPreEvent - case ev := <-self.txCh: - // Apply transaction to the pending state if we're not mining + // Handle NewTxsEvent + case ev := <-self.txsCh: + // Apply transactions to the pending state if we're not mining. + // + // Note all transactions received may not be continuous with transactions + // already included in the current mining block. These transactions will + // be automatically eliminated. if atomic.LoadInt32(&self.mining) == 0 { self.currentMu.Lock() - acc, _ := types.Sender(self.current.signer, ev.Tx) - txs := map[common.Address]types.Transactions{acc: {ev.Tx}} + txs := make(map[common.Address]types.Transactions) + for _, tx := range ev.Txs { + acc, _ := types.Sender(self.current.signer, tx) + txs[acc] = append(txs[acc], tx) + } txset := types.NewTransactionsByPriceAndNonce(self.current.signer, txs) - self.current.commitTransactions(self.mux, txset, self.chain, self.coinbase) self.updateSnapshot() self.currentMu.Unlock() @@ -278,7 +285,7 @@ func (self *worker) update() { } // System stopped - case <-self.txSub.Err(): + case <-self.txsSub.Err(): return case <-self.chainHeadSub.Err(): return @@ -290,7 +297,6 @@ func (self *worker) update() { func (self *worker) wait() { for { - mustCommitNewWork := true for result := range self.recv { atomic.AddInt32(&self.atWork, -1) @@ -315,11 +321,6 @@ func (self *worker) wait() { log.Error("Failed writing block to chain", "err", err) continue } - // check if canon block and write transactions - if stat == core.CanonStatTy { - // implicit by posting ChainHeadEvent - mustCommitNewWork = false - } // Broadcast the block and announce chain insertion event self.mux.Post(core.NewMinedBlockEvent{Block: block}) var ( @@ -334,10 +335,6 @@ func (self *worker) wait() { // Insert the block into the set of pending ones to wait for confirmations self.unconfirmed.Insert(block.NumberU64(), block.Hash()) - - if mustCommitNewWork { - self.commitNewWork() - } } } } @@ -363,7 +360,7 @@ func (self *worker) makeCurrent(parent *types.Block, header *types.Header) error } work := &Work{ config: self.config, - signer: types.NewEIP155Signer(self.config.ChainId), + signer: types.NewEIP155Signer(self.config.ChainID), state: state, ancestors: set.New(), family: set.New(), @@ -522,14 +519,16 @@ func (self *worker) updateSnapshot() { } func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsByPriceAndNonce, bc *core.BlockChain, coinbase common.Address) { - gp := new(core.GasPool).AddGas(env.header.GasLimit) + if env.gasPool == nil { + env.gasPool = new(core.GasPool).AddGas(env.header.GasLimit) + } var coalescedLogs []*types.Log for { // If we don't have enough gas for any further transactions then we're done - if gp.Gas() < params.TxGas { - log.Trace("Not enough gas for further transactions", "gp", gp) + if env.gasPool.Gas() < params.TxGas { + log.Trace("Not enough gas for further transactions", "have", env.gasPool, "want", params.TxGas) break } // Retrieve the next transaction and abort if all done @@ -553,7 +552,7 @@ func (env *Work) commitTransactions(mux *event.TypeMux, txs *types.TransactionsB // Start executing the transaction env.state.Prepare(tx.Hash(), common.Hash{}, env.tcount) - err, logs := env.commitTransaction(tx, bc, coinbase, gp) + err, logs := env.commitTransaction(tx, bc, coinbase, env.gasPool) switch err { case core.ErrGasLimitReached: // Pop the current out-of-gas transaction without shifting in the next from the account diff --git a/mobile/ethereum.go b/mobile/ethereum.go index 0eb1d90552..35a43d274d 100644 --- a/mobile/ethereum.go +++ b/mobile/ethereum.go @@ -125,12 +125,12 @@ func (t *Topics) Append(topics *Hashes) { t.topics = append(t.topics, topics.hashes) } -// FilterQuery contains options for contact log filtering. +// FilterQuery contains options for contract log filtering. type FilterQuery struct { query ethereum.FilterQuery } -// NewFilterQuery creates an empty filter query for contact log filtering. +// NewFilterQuery creates an empty filter query for contract log filtering. func NewFilterQuery() *FilterQuery { return new(FilterQuery) } diff --git a/mobile/geth.go b/mobile/geth.go index 645b360eb3..63ef062345 100644 --- a/mobile/geth.go +++ b/mobile/geth.go @@ -193,7 +193,7 @@ func (n *Node) Start() error { return n.node.Start() } -// Stop terminates a running node along with all it's services. In the node was +// Stop terminates a running node along with all it's services. If the node was // not started, an error is returned. func (n *Node) Stop() error { return n.node.Stop() diff --git a/node/api.go b/node/api.go index a3b8bc0bb5..117b76b6d8 100644 --- a/node/api.go +++ b/node/api.go @@ -217,7 +217,7 @@ func (api *PrivateAdminAPI) StartWS(host *string, port *int, allowedOrigins *str return true, nil } -// StopRPC terminates an already running websocket RPC API endpoint. +// StopWS terminates an already running websocket RPC API endpoint. func (api *PrivateAdminAPI) StopWS() (bool, error) { api.node.lock.Lock() defer api.node.lock.Unlock() @@ -338,6 +338,21 @@ func (api *PublicDebugAPI) Metrics(raw bool) (map[string]interface{}, error) { }, } + case metrics.ResettingTimer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{5, 20, 50, 80, 95}) + root[name] = map[string]interface{}{ + "Measurements": len(t.Values()), + "Mean": t.Mean(), + "Percentiles": map[string]interface{}{ + "5": ps[0], + "20": ps[1], + "50": ps[2], + "80": ps[3], + "95": ps[4], + }, + } + default: root[name] = "Unknown metric type" } @@ -373,6 +388,21 @@ func (api *PublicDebugAPI) Metrics(raw bool) (map[string]interface{}, error) { }, } + case metrics.ResettingTimer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{5, 20, 50, 80, 95}) + root[name] = map[string]interface{}{ + "Measurements": len(t.Values()), + "Mean": time.Duration(t.Mean()).String(), + "Percentiles": map[string]interface{}{ + "5": time.Duration(ps[0]).String(), + "20": time.Duration(ps[1]).String(), + "50": time.Duration(ps[2]).String(), + "80": time.Duration(ps[3]).String(), + "95": time.Duration(ps[4]).String(), + }, + } + default: root[name] = "Unknown metric type" } diff --git a/node/doc.go b/node/doc.go index d9688e0a12..41a88c19a1 100644 --- a/node/doc.go +++ b/node/doc.go @@ -59,7 +59,7 @@ using the same data directory will store this information in different subdirect the data directory. LevelDB databases are also stored within the instance subdirectory. If multiple node -instances use the same data directory, openening the databases with identical names will +instances use the same data directory, opening the databases with identical names will create one database for each instance. The account key store is shared among all node instances using the same data directory @@ -84,7 +84,7 @@ directory. Mode instance A opens the database "db", node instance B opens the da static-nodes.json -- devp2p static node list of instance B db/ -- LevelDB content for "db" db-2/ -- LevelDB content for "db-2" - B.ipc -- JSON-RPC UNIX domain socket endpoint of instance A + B.ipc -- JSON-RPC UNIX domain socket endpoint of instance B keystore/ -- account key store, used by both instances */ package node diff --git a/node/node_test.go b/node/node_test.go index 2880efa619..e51900bd1e 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -507,8 +507,8 @@ func TestAPIGather(t *testing.T) { } // Register a batch of services with some configured APIs calls := make(chan string, 1) - makeAPI := func(result string) *OneMethodApi { - return &OneMethodApi{fun: func() { calls <- result }} + makeAPI := func(result string) *OneMethodAPI { + return &OneMethodAPI{fun: func() { calls <- result }} } services := map[string]struct { APIs []rpc.API diff --git a/node/utils_test.go b/node/utils_test.go index 8eddce3ed7..9801b1ed45 100644 --- a/node/utils_test.go +++ b/node/utils_test.go @@ -121,12 +121,12 @@ func InstrumentedServiceMakerC(base ServiceConstructor) ServiceConstructor { return InstrumentingWrapperMaker(base, reflect.TypeOf(InstrumentedServiceC{})) } -// OneMethodApi is a single-method API handler to be returned by test services. -type OneMethodApi struct { +// OneMethodAPI is a single-method API handler to be returned by test services. +type OneMethodAPI struct { fun func() } -func (api *OneMethodApi) TheOneMethod() { +func (api *OneMethodAPI) TheOneMethod() { if api.fun != nil { api.fun() } diff --git a/p2p/discover/table.go b/p2p/discover/table.go index 6509326e69..18920ccfdd 100644 --- a/p2p/discover/table.go +++ b/p2p/discover/table.go @@ -480,16 +480,16 @@ func (tab *Table) doRevalidate(done chan<- struct{}) { b := tab.buckets[bi] if err == nil { // The node responded, move it to the front. - log.Debug("Revalidated node", "b", bi, "id", last.ID) + log.Trace("Revalidated node", "b", bi, "id", last.ID) b.bump(last) return } // No reply received, pick a replacement or delete the node if there aren't // any replacements. if r := tab.replace(b, last); r != nil { - log.Debug("Replaced dead node", "b", bi, "id", last.ID, "ip", last.IP, "r", r.ID, "rip", r.IP) + log.Trace("Replaced dead node", "b", bi, "id", last.ID, "ip", last.IP, "r", r.ID, "rip", r.IP) } else { - log.Debug("Removed dead node", "b", bi, "id", last.ID, "ip", last.IP) + log.Trace("Removed dead node", "b", bi, "id", last.ID, "ip", last.IP) } } diff --git a/p2p/discv5/metrics.go b/p2p/discv5/metrics.go new file mode 100644 index 0000000000..cb11d7eacf --- /dev/null +++ b/p2p/discv5/metrics.go @@ -0,0 +1,8 @@ +package discv5 + +import "github.com/ethereum/go-ethereum/metrics" + +var ( + ingressTrafficMeter = metrics.NewRegisteredMeter("discv5/InboundTraffic", nil) + egressTrafficMeter = metrics.NewRegisteredMeter("discv5/OutboundTraffic", nil) +) diff --git a/p2p/discv5/udp.go b/p2p/discv5/udp.go index 09e5f8b374..49e1cb811a 100644 --- a/p2p/discv5/udp.go +++ b/p2p/discv5/udp.go @@ -334,8 +334,10 @@ func (t *udp) sendPacket(toid NodeID, toaddr *net.UDPAddr, ptype byte, req inter return hash, err } log.Trace(fmt.Sprintf(">>> %v to %x@%v", nodeEvent(ptype), toid[:8], toaddr)) - if _, err = t.conn.WriteToUDP(packet, toaddr); err != nil { + if nbytes, err := t.conn.WriteToUDP(packet, toaddr); err != nil { log.Trace(fmt.Sprint("UDP send failed:", err)) + } else { + egressTrafficMeter.Mark(int64(nbytes)) } //fmt.Println(err) return hash, err @@ -374,6 +376,7 @@ func (t *udp) readLoop() { buf := make([]byte, 1280) for { nbytes, from, err := t.conn.ReadFromUDP(buf) + ingressTrafficMeter.Mark(int64(nbytes)) if netutil.IsTemporaryError(err) { // Ignore temporary read errors. log.Debug(fmt.Sprintf("Temporary read error: %v", err)) diff --git a/p2p/enr/enr.go b/p2p/enr/enr.go index c018895cc0..48683471d2 100644 --- a/p2p/enr/enr.go +++ b/p2p/enr/enr.go @@ -29,21 +29,16 @@ package enr import ( "bytes" - "crypto/ecdsa" "errors" "fmt" "io" "sort" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/crypto/sha3" "github.com/ethereum/go-ethereum/rlp" ) const SizeLimit = 300 // maximum encoded size of a node record in bytes -const ID_SECP256k1_KECCAK = ID("secp256k1-keccak") // the default identity scheme - var ( errNoID = errors.New("unknown or unspecified identity scheme") errInvalidSig = errors.New("invalid signature") @@ -80,8 +75,8 @@ func (r *Record) Seq() uint64 { } // SetSeq updates the record sequence number. This invalidates any signature on the record. -// Calling SetSeq is usually not required because signing the redord increments the -// sequence number. +// Calling SetSeq is usually not required because setting any key in a signed record +// increments the sequence number. func (r *Record) SetSeq(s uint64) { r.signature = nil r.raw = nil @@ -104,33 +99,42 @@ func (r *Record) Load(e Entry) error { return &KeyError{Key: e.ENRKey(), Err: errNotFound} } -// Set adds or updates the given entry in the record. -// It panics if the value can't be encoded. +// Set adds or updates the given entry in the record. It panics if the value can't be +// encoded. If the record is signed, Set increments the sequence number and invalidates +// the sequence number. func (r *Record) Set(e Entry) { - r.signature = nil - r.raw = nil blob, err := rlp.EncodeToBytes(e) if err != nil { panic(fmt.Errorf("enr: can't encode %s: %v", e.ENRKey(), err)) } + r.invalidate() - i := sort.Search(len(r.pairs), func(i int) bool { return r.pairs[i].k >= e.ENRKey() }) - - if i < len(r.pairs) && r.pairs[i].k == e.ENRKey() { + pairs := make([]pair, len(r.pairs)) + copy(pairs, r.pairs) + i := sort.Search(len(pairs), func(i int) bool { return pairs[i].k >= e.ENRKey() }) + switch { + case i < len(pairs) && pairs[i].k == e.ENRKey(): // element is present at r.pairs[i] - r.pairs[i].v = blob - return - } else if i < len(r.pairs) { + pairs[i].v = blob + case i < len(r.pairs): // insert pair before i-th elem el := pair{e.ENRKey(), blob} - r.pairs = append(r.pairs, pair{}) - copy(r.pairs[i+1:], r.pairs[i:]) - r.pairs[i] = el - return + pairs = append(pairs, pair{}) + copy(pairs[i+1:], pairs[i:]) + pairs[i] = el + default: + // element should be placed at the end of r.pairs + pairs = append(pairs, pair{e.ENRKey(), blob}) } + r.pairs = pairs +} - // element should be placed at the end of r.pairs - r.pairs = append(r.pairs, pair{e.ENRKey(), blob}) +func (r *Record) invalidate() { + if r.signature == nil { + r.seq++ + } + r.signature = nil + r.raw = nil } // EncodeRLP implements rlp.Encoder. Encoding fails if @@ -196,39 +200,55 @@ func (r *Record) DecodeRLP(s *rlp.Stream) error { return err } - // Verify signature. - if err = dec.verifySignature(); err != nil { + _, scheme := dec.idScheme() + if scheme == nil { + return errNoID + } + if err := scheme.Verify(&dec, dec.signature); err != nil { return err } *r = dec return nil } -type s256raw []byte - -func (s256raw) ENRKey() string { return "secp256k1" } - // NodeAddr returns the node address. The return value will be nil if the record is -// unsigned. +// unsigned or uses an unknown identity scheme. func (r *Record) NodeAddr() []byte { - var entry s256raw - if r.Load(&entry) != nil { + _, scheme := r.idScheme() + if scheme == nil { return nil } - return crypto.Keccak256(entry) + return scheme.NodeAddr(r) } -// Sign signs the record with the given private key. It updates the record's identity -// scheme, public key and increments the sequence number. Sign returns an error if the -// encoded record is larger than the size limit. -func (r *Record) Sign(privkey *ecdsa.PrivateKey) error { - r.seq = r.seq + 1 - r.Set(ID_SECP256k1_KECCAK) - r.Set(Secp256k1(privkey.PublicKey)) - return r.signAndEncode(privkey) +// SetSig sets the record signature. It returns an error if the encoded record is larger +// than the size limit or if the signature is invalid according to the passed scheme. +func (r *Record) SetSig(idscheme string, sig []byte) error { + // Check that "id" is set and matches the given scheme. This panics because + // inconsitencies here are always implementation bugs in the signing function calling + // this method. + id, s := r.idScheme() + if s == nil { + panic(errNoID) + } + if id != idscheme { + panic(fmt.Errorf("identity scheme mismatch in Sign: record has %s, want %s", id, idscheme)) + } + + // Verify against the scheme. + if err := s.Verify(r, sig); err != nil { + return err + } + raw, err := r.encode(sig) + if err != nil { + return err + } + r.signature, r.raw = sig, raw + return nil } -func (r *Record) appendPairs(list []interface{}) []interface{} { +// AppendElements appends the sequence number and entries to the given slice. +func (r *Record) AppendElements(list []interface{}) []interface{} { list = append(list, r.seq) for _, p := range r.pairs { list = append(list, p.k, p.v) @@ -236,54 +256,23 @@ func (r *Record) appendPairs(list []interface{}) []interface{} { return list } -func (r *Record) signAndEncode(privkey *ecdsa.PrivateKey) error { - // Put record elements into a flat list. Leave room for the signature. - list := make([]interface{}, 1, len(r.pairs)*2+2) - list = r.appendPairs(list) - - // Sign the tail of the list. - h := sha3.NewKeccak256() - rlp.Encode(h, list[1:]) - sig, err := crypto.Sign(h.Sum(nil), privkey) - if err != nil { - return err +func (r *Record) encode(sig []byte) (raw []byte, err error) { + list := make([]interface{}, 1, 2*len(r.pairs)+1) + list[0] = sig + list = r.AppendElements(list) + if raw, err = rlp.EncodeToBytes(list); err != nil { + return nil, err } - sig = sig[:len(sig)-1] // remove v - - // Put signature in front. - r.signature, list[0] = sig, sig - r.raw, err = rlp.EncodeToBytes(list) - if err != nil { - return err + if len(raw) > SizeLimit { + return nil, errTooBig } - if len(r.raw) > SizeLimit { - return errTooBig - } - return nil + return raw, nil } -func (r *Record) verifySignature() error { - // Get identity scheme, public key, signature. +func (r *Record) idScheme() (string, IdentityScheme) { var id ID - var entry s256raw if err := r.Load(&id); err != nil { - return err - } else if id != ID_SECP256k1_KECCAK { - return errNoID + return "", nil } - if err := r.Load(&entry); err != nil { - return err - } else if len(entry) != 33 { - return fmt.Errorf("invalid public key") - } - - // Verify the signature. - list := make([]interface{}, 0, len(r.pairs)*2+1) - list = r.appendPairs(list) - h := sha3.NewKeccak256() - rlp.Encode(h, list) - if !crypto.VerifySignature(entry, h.Sum(nil), r.signature) { - return errInvalidSig - } - return nil + return string(id), FindIdentityScheme(string(id)) } diff --git a/p2p/enr/enr_test.go b/p2p/enr/enr_test.go index ce7767d105..d1d0887563 100644 --- a/p2p/enr/enr_test.go +++ b/p2p/enr/enr_test.go @@ -54,35 +54,35 @@ func TestGetSetID(t *testing.T) { assert.Equal(t, id, id2) } -// TestGetSetIP4 tests encoding/decoding and setting/getting of the IP4 key. +// TestGetSetIP4 tests encoding/decoding and setting/getting of the IP key. func TestGetSetIP4(t *testing.T) { - ip := IP4{192, 168, 0, 3} + ip := IP{192, 168, 0, 3} var r Record r.Set(ip) - var ip2 IP4 + var ip2 IP require.NoError(t, r.Load(&ip2)) assert.Equal(t, ip, ip2) } -// TestGetSetIP6 tests encoding/decoding and setting/getting of the IP6 key. +// TestGetSetIP6 tests encoding/decoding and setting/getting of the IP key. func TestGetSetIP6(t *testing.T) { - ip := IP6{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68} + ip := IP{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68} var r Record r.Set(ip) - var ip2 IP6 + var ip2 IP require.NoError(t, r.Load(&ip2)) assert.Equal(t, ip, ip2) } // TestGetSetDiscPort tests encoding/decoding and setting/getting of the DiscPort key. -func TestGetSetDiscPort(t *testing.T) { - port := DiscPort(30309) +func TestGetSetUDP(t *testing.T) { + port := UDP(30309) var r Record r.Set(port) - var port2 DiscPort + var port2 UDP require.NoError(t, r.Load(&port2)) assert.Equal(t, port, port2) } @@ -90,7 +90,7 @@ func TestGetSetDiscPort(t *testing.T) { // TestGetSetSecp256k1 tests encoding/decoding and setting/getting of the Secp256k1 key. func TestGetSetSecp256k1(t *testing.T) { var r Record - if err := r.Sign(privkey); err != nil { + if err := SignV4(&r, privkey); err != nil { t.Fatal(err) } @@ -101,16 +101,16 @@ func TestGetSetSecp256k1(t *testing.T) { func TestLoadErrors(t *testing.T) { var r Record - ip4 := IP4{127, 0, 0, 1} + ip4 := IP{127, 0, 0, 1} r.Set(ip4) // Check error for missing keys. - var ip6 IP6 - err := r.Load(&ip6) + var udp UDP + err := r.Load(&udp) if !IsNotFound(err) { t.Error("IsNotFound should return true for missing key") } - assert.Equal(t, &KeyError{Key: ip6.ENRKey(), Err: errNotFound}, err) + assert.Equal(t, &KeyError{Key: udp.ENRKey(), Err: errNotFound}, err) // Check error for invalid keys. var list []uint @@ -174,7 +174,7 @@ func TestDirty(t *testing.T) { t.Errorf("expected errEncodeUnsigned, got %#v", err) } - require.NoError(t, r.Sign(privkey)) + require.NoError(t, SignV4(&r, privkey)) if !r.Signed() { t.Error("Signed return false for signed record") } @@ -194,13 +194,13 @@ func TestDirty(t *testing.T) { func TestGetSetOverwrite(t *testing.T) { var r Record - ip := IP4{192, 168, 0, 3} + ip := IP{192, 168, 0, 3} r.Set(ip) - ip2 := IP4{192, 168, 0, 4} + ip2 := IP{192, 168, 0, 4} r.Set(ip2) - var ip3 IP4 + var ip3 IP require.NoError(t, r.Load(&ip3)) assert.Equal(t, ip2, ip3) } @@ -208,9 +208,9 @@ func TestGetSetOverwrite(t *testing.T) { // TestSignEncodeAndDecode tests signing, RLP encoding and RLP decoding of a record. func TestSignEncodeAndDecode(t *testing.T) { var r Record - r.Set(DiscPort(30303)) - r.Set(IP4{127, 0, 0, 1}) - require.NoError(t, r.Sign(privkey)) + r.Set(UDP(30303)) + r.Set(IP{127, 0, 0, 1}) + require.NoError(t, SignV4(&r, privkey)) blob, err := rlp.EncodeToBytes(r) require.NoError(t, err) @@ -230,12 +230,12 @@ func TestNodeAddr(t *testing.T) { t.Errorf("wrong address on empty record: got %v, want %v", addr, nil) } - require.NoError(t, r.Sign(privkey)) - expected := "caaa1485d83b18b32ed9ad666026151bf0cae8a0a88c857ae2d4c5be2daa6726" + require.NoError(t, SignV4(&r, privkey)) + expected := "a448f24c6d18e575453db13171562b71999873db5b286df957af199ec94617f7" assert.Equal(t, expected, hex.EncodeToString(r.NodeAddr())) } -var pyRecord, _ = hex.DecodeString("f896b840954dc36583c1f4b69ab59b1375f362f06ee99f3723cd77e64b6de6d211c27d7870642a79d4516997f94091325d2a7ca6215376971455fb221d34f35b277149a1018664697363763582765f82696490736563703235366b312d6b656363616b83697034847f00000189736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd3138") +var pyRecord, _ = hex.DecodeString("f884b8407098ad865b00a582051940cb9cf36836572411a47278783077011599ed5cd16b76f2635f4e234738f30813a89eb9137e3e3df5266e3a1f11df72ecf1145ccb9c01826964827634826970847f00000189736563703235366b31a103ca634cae0d49acb401d8a4c6b6fe8c55b70d115bf400769cc1400f3258cd31388375647082765f") // TestPythonInterop checks that we can decode and verify a record produced by the Python // implementation. @@ -246,10 +246,10 @@ func TestPythonInterop(t *testing.T) { } var ( - wantAddr, _ = hex.DecodeString("caaa1485d83b18b32ed9ad666026151bf0cae8a0a88c857ae2d4c5be2daa6726") - wantSeq = uint64(1) - wantIP = IP4{127, 0, 0, 1} - wantDiscport = DiscPort(30303) + wantAddr, _ = hex.DecodeString("a448f24c6d18e575453db13171562b71999873db5b286df957af199ec94617f7") + wantSeq = uint64(1) + wantIP = IP{127, 0, 0, 1} + wantUDP = UDP(30303) ) if r.Seq() != wantSeq { t.Errorf("wrong seq: got %d, want %d", r.Seq(), wantSeq) @@ -257,7 +257,7 @@ func TestPythonInterop(t *testing.T) { if addr := r.NodeAddr(); !bytes.Equal(addr, wantAddr) { t.Errorf("wrong addr: got %x, want %x", addr, wantAddr) } - want := map[Entry]interface{}{new(IP4): &wantIP, new(DiscPort): &wantDiscport} + want := map[Entry]interface{}{new(IP): &wantIP, new(UDP): &wantUDP} for k, v := range want { desc := fmt.Sprintf("loading key %q", k.ENRKey()) if assert.NoError(t, r.Load(k), desc) { @@ -272,14 +272,14 @@ func TestRecordTooBig(t *testing.T) { key := randomString(10) // set a big value for random key, expect error - r.Set(WithEntry(key, randomString(300))) - if err := r.Sign(privkey); err != errTooBig { + r.Set(WithEntry(key, randomString(SizeLimit))) + if err := SignV4(&r, privkey); err != errTooBig { t.Fatalf("expected to get errTooBig, got %#v", err) } // set an acceptable value for random key, expect no error r.Set(WithEntry(key, randomString(100))) - require.NoError(t, r.Sign(privkey)) + require.NoError(t, SignV4(&r, privkey)) } // TestSignEncodeAndDecodeRandom tests encoding/decoding of records containing random key/value pairs. @@ -295,7 +295,7 @@ func TestSignEncodeAndDecodeRandom(t *testing.T) { r.Set(WithEntry(key, &value)) } - require.NoError(t, r.Sign(privkey)) + require.NoError(t, SignV4(&r, privkey)) _, err := rlp.EncodeToBytes(r) require.NoError(t, err) diff --git a/p2p/enr/entries.go b/p2p/enr/entries.go index 7591e6effb..71c7653a28 100644 --- a/p2p/enr/entries.go +++ b/p2p/enr/entries.go @@ -57,59 +57,43 @@ func WithEntry(k string, v interface{}) Entry { return &generic{key: k, value: v} } -// DiscPort is the "discv5" key, which holds the UDP port for discovery v5. -type DiscPort uint16 +// TCP is the "tcp" key, which holds the TCP port of the node. +type TCP uint16 -func (v DiscPort) ENRKey() string { return "discv5" } +func (v TCP) ENRKey() string { return "tcp" } + +// UDP is the "udp" key, which holds the UDP port of the node. +type UDP uint16 + +func (v UDP) ENRKey() string { return "udp" } // ID is the "id" key, which holds the name of the identity scheme. type ID string +const IDv4 = ID("v4") // the default identity scheme + func (v ID) ENRKey() string { return "id" } -// IP4 is the "ip4" key, which holds a 4-byte IPv4 address. -type IP4 net.IP +// IP is the "ip" key, which holds the IP address of the node. +type IP net.IP -func (v IP4) ENRKey() string { return "ip4" } +func (v IP) ENRKey() string { return "ip" } // EncodeRLP implements rlp.Encoder. -func (v IP4) EncodeRLP(w io.Writer) error { - ip4 := net.IP(v).To4() - if ip4 == nil { - return fmt.Errorf("invalid IPv4 address: %v", v) +func (v IP) EncodeRLP(w io.Writer) error { + if ip4 := net.IP(v).To4(); ip4 != nil { + return rlp.Encode(w, ip4) } - return rlp.Encode(w, ip4) + return rlp.Encode(w, net.IP(v)) } // DecodeRLP implements rlp.Decoder. -func (v *IP4) DecodeRLP(s *rlp.Stream) error { +func (v *IP) DecodeRLP(s *rlp.Stream) error { if err := s.Decode((*net.IP)(v)); err != nil { return err } - if len(*v) != 4 { - return fmt.Errorf("invalid IPv4 address, want 4 bytes: %v", *v) - } - return nil -} - -// IP6 is the "ip6" key, which holds a 16-byte IPv6 address. -type IP6 net.IP - -func (v IP6) ENRKey() string { return "ip6" } - -// EncodeRLP implements rlp.Encoder. -func (v IP6) EncodeRLP(w io.Writer) error { - ip6 := net.IP(v) - return rlp.Encode(w, ip6) -} - -// DecodeRLP implements rlp.Decoder. -func (v *IP6) DecodeRLP(s *rlp.Stream) error { - if err := s.Decode((*net.IP)(v)); err != nil { - return err - } - if len(*v) != 16 { - return fmt.Errorf("invalid IPv6 address, want 16 bytes: %v", *v) + if len(*v) != 4 && len(*v) != 16 { + return fmt.Errorf("invalid IP address, want 4 or 16 bytes: %v", *v) } return nil } diff --git a/p2p/enr/idscheme.go b/p2p/enr/idscheme.go new file mode 100644 index 0000000000..efaf68041d --- /dev/null +++ b/p2p/enr/idscheme.go @@ -0,0 +1,114 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package enr + +import ( + "crypto/ecdsa" + "fmt" + "sync" + + "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto/sha3" + "github.com/ethereum/go-ethereum/rlp" +) + +// Registry of known identity schemes. +var schemes sync.Map + +// An IdentityScheme is capable of verifying record signatures and +// deriving node addresses. +type IdentityScheme interface { + Verify(r *Record, sig []byte) error + NodeAddr(r *Record) []byte +} + +// RegisterIdentityScheme adds an identity scheme to the global registry. +func RegisterIdentityScheme(name string, scheme IdentityScheme) { + if _, loaded := schemes.LoadOrStore(name, scheme); loaded { + panic("identity scheme " + name + " already registered") + } +} + +// FindIdentityScheme resolves name to an identity scheme in the global registry. +func FindIdentityScheme(name string) IdentityScheme { + s, ok := schemes.Load(name) + if !ok { + return nil + } + return s.(IdentityScheme) +} + +// v4ID is the "v4" identity scheme. +type v4ID struct{} + +func init() { + RegisterIdentityScheme("v4", v4ID{}) +} + +// SignV4 signs a record using the v4 scheme. +func SignV4(r *Record, privkey *ecdsa.PrivateKey) error { + // Copy r to avoid modifying it if signing fails. + cpy := *r + cpy.Set(ID("v4")) + cpy.Set(Secp256k1(privkey.PublicKey)) + + h := sha3.NewKeccak256() + rlp.Encode(h, cpy.AppendElements(nil)) + sig, err := crypto.Sign(h.Sum(nil), privkey) + if err != nil { + return err + } + sig = sig[:len(sig)-1] // remove v + if err = cpy.SetSig("v4", sig); err == nil { + *r = cpy + } + return err +} + +// s256raw is an unparsed secp256k1 public key entry. +type s256raw []byte + +func (s256raw) ENRKey() string { return "secp256k1" } + +func (v4ID) Verify(r *Record, sig []byte) error { + var entry s256raw + if err := r.Load(&entry); err != nil { + return err + } else if len(entry) != 33 { + return fmt.Errorf("invalid public key") + } + + h := sha3.NewKeccak256() + rlp.Encode(h, r.AppendElements(nil)) + if !crypto.VerifySignature(entry, h.Sum(nil), sig) { + return errInvalidSig + } + return nil +} + +func (v4ID) NodeAddr(r *Record) []byte { + var pubkey Secp256k1 + err := r.Load(&pubkey) + if err != nil { + return nil + } + buf := make([]byte, 64) + math.ReadBits(pubkey.X, buf[:32]) + math.ReadBits(pubkey.Y, buf[32:]) + return crypto.Keccak256(buf) +} diff --git a/p2p/enr/idscheme_test.go b/p2p/enr/idscheme_test.go new file mode 100644 index 0000000000..d790e12f14 --- /dev/null +++ b/p2p/enr/idscheme_test.go @@ -0,0 +1,36 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package enr + +import ( + "crypto/ecdsa" + "math/big" + "testing" +) + +// Checks that failure to sign leaves the record unmodified. +func TestSignError(t *testing.T) { + invalidKey := &ecdsa.PrivateKey{D: new(big.Int), PublicKey: *pubkey} + + var r Record + if err := SignV4(&r, invalidKey); err == nil { + t.Fatal("expected error from SignV4") + } + if len(r.pairs) > 0 { + t.Fatal("expected empty record, have", r.pairs) + } +} diff --git a/p2p/metrics.go b/p2p/metrics.go index 4cbff90aca..2d52fd1fd1 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -31,10 +31,10 @@ var ( egressTrafficMeter = metrics.NewRegisteredMeter("p2p/OutboundTraffic", nil) ) -// meteredConn is a wrapper around a network TCP connection that meters both the +// meteredConn is a wrapper around a net.Conn that meters both the // inbound and outbound network traffic. type meteredConn struct { - *net.TCPConn // Network connection to wrap with metering + net.Conn // Network connection to wrap with metering } // newMeteredConn creates a new metered connection, also bumping the ingress or @@ -51,13 +51,13 @@ func newMeteredConn(conn net.Conn, ingress bool) net.Conn { } else { egressConnectMeter.Mark(1) } - return &meteredConn{conn.(*net.TCPConn)} + return &meteredConn{Conn: conn} } // Read delegates a network read to the underlying connection, bumping the ingress // traffic meter along the way. func (c *meteredConn) Read(b []byte) (n int, err error) { - n, err = c.TCPConn.Read(b) + n, err = c.Conn.Read(b) ingressTrafficMeter.Mark(int64(n)) return } @@ -65,7 +65,7 @@ func (c *meteredConn) Read(b []byte) (n int, err error) { // Write delegates a network write to the underlying connection, bumping the // egress traffic meter along the way. func (c *meteredConn) Write(b []byte) (n int, err error) { - n, err = c.TCPConn.Write(b) + n, err = c.Conn.Write(b) egressTrafficMeter.Mark(int64(n)) return } diff --git a/p2p/peer.go b/p2p/peer.go index c3907349fc..eb2d34441c 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -17,6 +17,7 @@ package p2p import ( + "errors" "fmt" "io" "net" @@ -31,6 +32,10 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) +var ( + ErrShuttingDown = errors.New("shutting down") +) + const ( baseProtocolVersion = 5 baseProtocolLength = uint64(16) @@ -393,7 +398,7 @@ func (rw *protoRW) WriteMsg(msg Msg) (err error) { // as well but we don't want to rely on that. rw.werr <- err case <-rw.closed: - err = fmt.Errorf("shutting down") + err = ErrShuttingDown } return err } diff --git a/p2p/protocols/protocol.go b/p2p/protocols/protocol.go index 9914c99587..d5c0375ac7 100644 --- a/p2p/protocols/protocol.go +++ b/p2p/protocols/protocol.go @@ -31,9 +31,13 @@ package protocols import ( "context" "fmt" + "io" "reflect" "sync" + "time" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" ) @@ -200,6 +204,11 @@ func NewPeer(p *p2p.Peer, rw p2p.MsgReadWriter, spec *Spec) *Peer { func (p *Peer) Run(handler func(msg interface{}) error) error { for { if err := p.handleIncoming(handler); err != nil { + if err != io.EOF { + metrics.GetOrRegisterCounter("peer.handleincoming.error", nil).Inc(1) + log.Error("peer.handleIncoming", "err", err) + } + return err } } @@ -217,6 +226,8 @@ func (p *Peer) Drop(err error) { // this low level call will be wrapped by libraries providing routed or broadcast sends // but often just used to forward and push messages to directly connected peers func (p *Peer) Send(msg interface{}) error { + defer metrics.GetOrRegisterResettingTimer("peer.send_t", nil).UpdateSince(time.Now()) + metrics.GetOrRegisterCounter("peer.send", nil).Inc(1) code, found := p.spec.GetCode(msg) if !found { return errorf(ErrInvalidMsgType, "%v", code) diff --git a/p2p/protocols/protocol_test.go b/p2p/protocols/protocol_test.go index 053f537a62..aaae7502b5 100644 --- a/p2p/protocols/protocol_test.go +++ b/p2p/protocols/protocol_test.go @@ -373,15 +373,14 @@ WAIT: } } - -func TestMultiplePeersDropSelf(t *testing.T) { +func XTestMultiplePeersDropSelf(t *testing.T) { runMultiplePeers(t, 0, fmt.Errorf("subprotocol error"), fmt.Errorf("Message handler error: (msg code 3): dropped"), ) } -func TestMultiplePeersDropOther(t *testing.T) { +func XTestMultiplePeersDropOther(t *testing.T) { runMultiplePeers(t, 1, fmt.Errorf("Message handler error: (msg code 3): dropped"), fmt.Errorf("subprotocol error"), diff --git a/p2p/rlpx.go b/p2p/rlpx.go index a320e81e7c..149eda689c 100644 --- a/p2p/rlpx.go +++ b/p2p/rlpx.go @@ -528,9 +528,9 @@ func importPublicKey(pubKey []byte) (*ecies.PublicKey, error) { return nil, fmt.Errorf("invalid public key length %v (expect 64/65)", len(pubKey)) } // TODO: fewer pointless conversions - pub := crypto.ToECDSAPub(pubKey65) - if pub.X == nil { - return nil, fmt.Errorf("invalid public key") + pub, err := crypto.UnmarshalPubkey(pubKey65) + if err != nil { + return nil, err } return ecies.ImportECDSAPublic(pub), nil } diff --git a/p2p/rlpx_test.go b/p2p/rlpx_test.go index bca4604021..7ae8007740 100644 --- a/p2p/rlpx_test.go +++ b/p2p/rlpx_test.go @@ -35,6 +35,7 @@ import ( "github.com/ethereum/go-ethereum/crypto/ecies" "github.com/ethereum/go-ethereum/crypto/sha3" "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/simulations/pipes" "github.com/ethereum/go-ethereum/rlp" ) @@ -159,7 +160,7 @@ func TestProtocolHandshake(t *testing.T) { wg sync.WaitGroup ) - fd0, fd1, err := tcpPipe() + fd0, fd1, err := pipes.TCPPipe() if err != nil { t.Fatal(err) } @@ -601,31 +602,3 @@ func TestHandshakeForwardCompatibility(t *testing.T) { t.Errorf("ingress-mac('foo') mismatch:\ngot %x\nwant %x", fooIngressHash, wantFooIngressHash) } } - -// tcpPipe creates an in process full duplex pipe based on a localhost TCP socket -func tcpPipe() (net.Conn, net.Conn, error) { - l, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - return nil, nil, err - } - defer l.Close() - - var aconn net.Conn - aerr := make(chan error, 1) - go func() { - var err error - aconn, err = l.Accept() - aerr <- err - }() - - dconn, err := net.Dial("tcp", l.Addr().String()) - if err != nil { - <-aerr - return nil, nil, err - } - if err := <-aerr; err != nil { - dconn.Close() - return nil, nil, err - } - return aconn, dconn, nil -} diff --git a/p2p/server.go b/p2p/server.go index c41d1dc156..cdb5b1926e 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -594,13 +594,13 @@ running: // This channel is used by AddPeer to add to the // ephemeral static peer list. Add it to the dialer, // it will keep the node connected. - srv.log.Debug("Adding static node", "node", n) + srv.log.Trace("Adding static node", "node", n) dialstate.addStatic(n) case n := <-srv.removestatic: // This channel is used by RemovePeer to send a // disconnect request to a peer and begin the // stop keeping the node connected - srv.log.Debug("Removing static node", "node", n) + srv.log.Trace("Removing static node", "node", n) dialstate.removeStatic(n) if p, ok := peers[n.ID]; ok { p.Disconnect(DiscRequested) diff --git a/p2p/simulations/adapters/docker.go b/p2p/simulations/adapters/docker.go index 8ef5629fb5..d145c46b3a 100644 --- a/p2p/simulations/adapters/docker.go +++ b/p2p/simulations/adapters/docker.go @@ -28,11 +28,14 @@ import ( "strings" "github.com/docker/docker/pkg/reexec" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p/discover" ) +var ( + ErrLinuxOnly = errors.New("DockerAdapter can only be used on Linux as it uses the current binary (which must be a Linux binary)") +) + // DockerAdapter is a NodeAdapter which runs simulation nodes inside Docker // containers. // @@ -52,7 +55,7 @@ func NewDockerAdapter() (*DockerAdapter, error) { // It is reasonable to require this because the caller can just // compile the current binary in a Docker container. if runtime.GOOS != "linux" { - return nil, errors.New("DockerAdapter can only be used on Linux as it uses the current binary (which must be a Linux binary)") + return nil, ErrLinuxOnly } if err := buildDockerImage(); err != nil { @@ -95,7 +98,10 @@ func (d *DockerAdapter) NewNode(config *NodeConfig) (Node, error) { conf.Stack.P2P.NoDiscovery = true conf.Stack.P2P.NAT = nil conf.Stack.NoUSB = true - conf.Stack.Logger = log.New("node.id", config.ID.String()) + + // listen on all interfaces on a given port, which we set when we + // initialise NodeConfig (usually a random port) + conf.Stack.P2P.ListenAddr = fmt.Sprintf(":%d", config.Port) node := &DockerNode{ ExecNode: ExecNode{ diff --git a/p2p/simulations/adapters/exec.go b/p2p/simulations/adapters/exec.go index f381c11596..e64cebc2a7 100644 --- a/p2p/simulations/adapters/exec.go +++ b/p2p/simulations/adapters/exec.go @@ -17,6 +17,7 @@ package adapters import ( + "bufio" "context" "crypto/ecdsa" "encoding/json" @@ -103,9 +104,9 @@ func (e *ExecAdapter) NewNode(config *NodeConfig) (Node, error) { conf.Stack.P2P.NAT = nil conf.Stack.NoUSB = true - // listen on a random localhost port (we'll get the actual port after - // starting the node through the RPC admin.nodeInfo method) - conf.Stack.P2P.ListenAddr = "127.0.0.1:0" + // listen on a localhost port, which we set when we + // initialise NodeConfig (usually a random port) + conf.Stack.P2P.ListenAddr = fmt.Sprintf(":%d", config.Port) node := &ExecNode{ ID: config.ID, @@ -190,9 +191,23 @@ func (n *ExecNode) Start(snapshots map[string][]byte) (err error) { n.Cmd = cmd // read the WebSocket address from the stderr logs - wsAddr, err := findWSAddr(stderrR, 10*time.Second) - if err != nil { - return fmt.Errorf("error getting WebSocket address: %s", err) + var wsAddr string + wsAddrC := make(chan string) + go func() { + s := bufio.NewScanner(stderrR) + for s.Scan() { + if strings.Contains(s.Text(), "WebSocket endpoint opened") { + wsAddrC <- wsAddrPattern.FindString(s.Text()) + } + } + }() + select { + case wsAddr = <-wsAddrC: + if wsAddr == "" { + return errors.New("failed to read WebSocket address from stderr") + } + case <-time.After(10 * time.Second): + return errors.New("timed out waiting for WebSocket address on stderr") } // create the RPC client and load the node info @@ -318,6 +333,21 @@ type execNodeConfig struct { PeerAddrs map[string]string `json:"peer_addrs,omitempty"` } +// ExternalIP gets an external IP address so that Enode URL is usable +func ExternalIP() net.IP { + addrs, err := net.InterfaceAddrs() + if err != nil { + log.Crit("error getting IP address", "err", err) + } + for _, addr := range addrs { + if ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() && !ip.IP.IsLinkLocalUnicast() { + return ip.IP + } + } + log.Warn("unable to determine explicit IP address, falling back to loopback") + return net.IP{127, 0, 0, 1} +} + // execP2PNode starts a devp2p node when the current binary is executed with // argv[0] being "p2p-node", reading the service / ID from argv[1] / argv[2] // and the node config from the _P2P_NODE_CONFIG environment variable @@ -341,25 +371,11 @@ func execP2PNode() { conf.Stack.P2P.PrivateKey = conf.Node.PrivateKey conf.Stack.Logger = log.New("node.id", conf.Node.ID.String()) - // use explicit IP address in ListenAddr so that Enode URL is usable - externalIP := func() string { - addrs, err := net.InterfaceAddrs() - if err != nil { - log.Crit("error getting IP address", "err", err) - } - for _, addr := range addrs { - if ip, ok := addr.(*net.IPNet); ok && !ip.IP.IsLoopback() { - return ip.IP.String() - } - } - log.Crit("unable to determine explicit IP address") - return "" - } if strings.HasPrefix(conf.Stack.P2P.ListenAddr, ":") { - conf.Stack.P2P.ListenAddr = externalIP() + conf.Stack.P2P.ListenAddr + conf.Stack.P2P.ListenAddr = ExternalIP().String() + conf.Stack.P2P.ListenAddr } if conf.Stack.WSHost == "0.0.0.0" { - conf.Stack.WSHost = externalIP() + conf.Stack.WSHost = ExternalIP().String() } // initialize the devp2p stack diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go index 6d90b4a9fc..b68d08f392 100644 --- a/p2p/simulations/adapters/inproc.go +++ b/p2p/simulations/adapters/inproc.go @@ -28,12 +28,14 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/simulations/pipes" "github.com/ethereum/go-ethereum/rpc" ) // SimAdapter is a NodeAdapter which creates in-memory simulation nodes and -// connects them using in-memory net.Pipe connections +// connects them using net.Pipe type SimAdapter struct { + pipe func() (net.Conn, net.Conn, error) mtx sync.RWMutex nodes map[discover.NodeID]*SimNode services map[string]ServiceFunc @@ -42,8 +44,18 @@ type SimAdapter struct { // NewSimAdapter creates a SimAdapter which is capable of running in-memory // simulation nodes running any of the given services (the services to run on a // particular node are passed to the NewNode function in the NodeConfig) +// the adapter uses a net.Pipe for in-memory simulated network connections func NewSimAdapter(services map[string]ServiceFunc) *SimAdapter { return &SimAdapter{ + pipe: pipes.NetPipe, + nodes: make(map[discover.NodeID]*SimNode), + services: services, + } +} + +func NewTCPAdapter(services map[string]ServiceFunc) *SimAdapter { + return &SimAdapter{ + pipe: pipes.TCPPipe, nodes: make(map[discover.NodeID]*SimNode), services: services, } @@ -81,7 +93,7 @@ func (s *SimAdapter) NewNode(config *NodeConfig) (Node, error) { MaxPeers: math.MaxInt32, NoDiscovery: true, Dialer: s, - EnableMsgEvents: true, + EnableMsgEvents: config.EnableMsgEvents, }, NoUSB: true, Logger: log.New("node.id", id.String()), @@ -102,7 +114,7 @@ func (s *SimAdapter) NewNode(config *NodeConfig) (Node, error) { } // Dial implements the p2p.NodeDialer interface by connecting to the node using -// an in-memory net.Pipe connection +// an in-memory net.Pipe func (s *SimAdapter) Dial(dest *discover.Node) (conn net.Conn, err error) { node, ok := s.GetNode(dest.ID) if !ok { @@ -112,7 +124,14 @@ func (s *SimAdapter) Dial(dest *discover.Node) (conn net.Conn, err error) { if srv == nil { return nil, fmt.Errorf("node not running: %s", dest.ID) } - pipe1, pipe2 := net.Pipe() + // SimAdapter.pipe is net.Pipe (NewSimAdapter) + pipe1, pipe2, err := s.pipe() + if err != nil { + return nil, err + } + // this is simulated 'listening' + // asynchronously call the dialed destintion node's p2p server + // to set up connection on the 'listening' side go srv.SetupConn(pipe1, 0, nil) return pipe2, nil } @@ -140,8 +159,8 @@ func (s *SimAdapter) GetNode(id discover.NodeID) (*SimNode, bool) { } // SimNode is an in-memory simulation node which connects to other nodes using -// an in-memory net.Pipe connection (see SimAdapter.Dial), running devp2p -// protocols directly over that pipe +// net.Pipe (see SimAdapter.Dial), running devp2p protocols directly over that +// pipe type SimNode struct { lock sync.RWMutex ID discover.NodeID @@ -241,7 +260,7 @@ func (sn *SimNode) Start(snapshots map[string][]byte) error { for _, name := range sn.config.Services { if err := sn.node.Register(newService(name)); err != nil { regErr = err - return + break } } }) @@ -314,3 +333,18 @@ func (sn *SimNode) NodeInfo() *p2p.NodeInfo { } return server.NodeInfo() } + +func setSocketBuffer(conn net.Conn, socketReadBuffer int, socketWriteBuffer int) error { + switch v := conn.(type) { + case *net.UnixConn: + err := v.SetReadBuffer(socketReadBuffer) + if err != nil { + return err + } + err = v.SetWriteBuffer(socketWriteBuffer) + if err != nil { + return err + } + } + return nil +} diff --git a/p2p/simulations/adapters/inproc_test.go b/p2p/simulations/adapters/inproc_test.go new file mode 100644 index 0000000000..e1e092f6e1 --- /dev/null +++ b/p2p/simulations/adapters/inproc_test.go @@ -0,0 +1,259 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package adapters + +import ( + "bytes" + "encoding/binary" + "fmt" + "testing" + "time" + + "github.com/ethereum/go-ethereum/p2p/simulations/pipes" +) + +func TestTCPPipe(t *testing.T) { + c1, c2, err := pipes.TCPPipe() + if err != nil { + t.Fatal(err) + } + + done := make(chan struct{}) + + go func() { + msgs := 50 + size := 1024 + for i := 0; i < msgs; i++ { + msg := make([]byte, size) + _ = binary.PutUvarint(msg, uint64(i)) + + _, err := c1.Write(msg) + if err != nil { + t.Fatal(err) + } + } + + for i := 0; i < msgs; i++ { + msg := make([]byte, size) + _ = binary.PutUvarint(msg, uint64(i)) + + out := make([]byte, size) + _, err := c2.Read(out) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(msg, out) { + t.Fatalf("expected %#v, got %#v", msg, out) + } + } + done <- struct{}{} + }() + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("test timeout") + } +} + +func TestTCPPipeBidirections(t *testing.T) { + c1, c2, err := pipes.TCPPipe() + if err != nil { + t.Fatal(err) + } + + done := make(chan struct{}) + + go func() { + msgs := 50 + size := 7 + for i := 0; i < msgs; i++ { + msg := []byte(fmt.Sprintf("ping %02d", i)) + + _, err := c1.Write(msg) + if err != nil { + t.Fatal(err) + } + } + + for i := 0; i < msgs; i++ { + expected := []byte(fmt.Sprintf("ping %02d", i)) + + out := make([]byte, size) + _, err := c2.Read(out) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(expected, out) { + t.Fatalf("expected %#v, got %#v", out, expected) + } else { + msg := []byte(fmt.Sprintf("pong %02d", i)) + _, err := c2.Write(msg) + if err != nil { + t.Fatal(err) + } + } + } + + for i := 0; i < msgs; i++ { + expected := []byte(fmt.Sprintf("pong %02d", i)) + + out := make([]byte, size) + _, err := c1.Read(out) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(expected, out) { + t.Fatalf("expected %#v, got %#v", out, expected) + } + } + done <- struct{}{} + }() + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("test timeout") + } +} + +func TestNetPipe(t *testing.T) { + c1, c2, err := pipes.NetPipe() + if err != nil { + t.Fatal(err) + } + + done := make(chan struct{}) + + go func() { + msgs := 50 + size := 1024 + // netPipe is blocking, so writes are emitted asynchronously + go func() { + for i := 0; i < msgs; i++ { + msg := make([]byte, size) + _ = binary.PutUvarint(msg, uint64(i)) + + _, err := c1.Write(msg) + if err != nil { + t.Fatal(err) + } + } + }() + + for i := 0; i < msgs; i++ { + msg := make([]byte, size) + _ = binary.PutUvarint(msg, uint64(i)) + + out := make([]byte, size) + _, err := c2.Read(out) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(msg, out) { + t.Fatalf("expected %#v, got %#v", msg, out) + } + } + + done <- struct{}{} + }() + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("test timeout") + } +} + +func TestNetPipeBidirections(t *testing.T) { + c1, c2, err := pipes.NetPipe() + if err != nil { + t.Fatal(err) + } + + done := make(chan struct{}) + + go func() { + msgs := 1000 + size := 8 + pingTemplate := "ping %03d" + pongTemplate := "pong %03d" + + // netPipe is blocking, so writes are emitted asynchronously + go func() { + for i := 0; i < msgs; i++ { + msg := []byte(fmt.Sprintf(pingTemplate, i)) + + _, err := c1.Write(msg) + if err != nil { + t.Fatal(err) + } + } + }() + + // netPipe is blocking, so reads for pong are emitted asynchronously + go func() { + for i := 0; i < msgs; i++ { + expected := []byte(fmt.Sprintf(pongTemplate, i)) + + out := make([]byte, size) + _, err := c1.Read(out) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(expected, out) { + t.Fatalf("expected %#v, got %#v", expected, out) + } + } + + done <- struct{}{} + }() + + // expect to read pings, and respond with pongs to the alternate connection + for i := 0; i < msgs; i++ { + expected := []byte(fmt.Sprintf(pingTemplate, i)) + + out := make([]byte, size) + _, err := c2.Read(out) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(expected, out) { + t.Fatalf("expected %#v, got %#v", expected, out) + } else { + msg := []byte(fmt.Sprintf(pongTemplate, i)) + + _, err := c2.Write(msg) + if err != nil { + t.Fatal(err) + } + } + } + }() + + select { + case <-done: + case <-time.After(5 * time.Second): + t.Fatal("test timeout") + } +} diff --git a/p2p/simulations/adapters/types.go b/p2p/simulations/adapters/types.go index 5b4b47fe2f..2c4b9dd8f2 100644 --- a/p2p/simulations/adapters/types.go +++ b/p2p/simulations/adapters/types.go @@ -23,6 +23,7 @@ import ( "fmt" "net" "os" + "strconv" "github.com/docker/docker/pkg/reexec" "github.com/ethereum/go-ethereum/crypto" @@ -97,24 +98,30 @@ type NodeConfig struct { // function to sanction or prevent suggesting a peer Reachable func(id discover.NodeID) bool + + Port uint16 } // nodeConfigJSON is used to encode and decode NodeConfig as JSON by encoding // all fields as strings type nodeConfigJSON struct { - ID string `json:"id"` - PrivateKey string `json:"private_key"` - Name string `json:"name"` - Services []string `json:"services"` + ID string `json:"id"` + PrivateKey string `json:"private_key"` + Name string `json:"name"` + Services []string `json:"services"` + EnableMsgEvents bool `json:"enable_msg_events"` + Port uint16 `json:"port"` } // MarshalJSON implements the json.Marshaler interface by encoding the config // fields as strings func (n *NodeConfig) MarshalJSON() ([]byte, error) { confJSON := nodeConfigJSON{ - ID: n.ID.String(), - Name: n.Name, - Services: n.Services, + ID: n.ID.String(), + Name: n.Name, + Services: n.Services, + Port: n.Port, + EnableMsgEvents: n.EnableMsgEvents, } if n.PrivateKey != nil { confJSON.PrivateKey = hex.EncodeToString(crypto.FromECDSA(n.PrivateKey)) @@ -152,6 +159,8 @@ func (n *NodeConfig) UnmarshalJSON(data []byte) error { n.Name = confJSON.Name n.Services = confJSON.Services + n.Port = confJSON.Port + n.EnableMsgEvents = confJSON.EnableMsgEvents return nil } @@ -163,13 +172,36 @@ func RandomNodeConfig() *NodeConfig { if err != nil { panic("unable to generate key") } - var id discover.NodeID - pubkey := crypto.FromECDSAPub(&key.PublicKey) - copy(id[:], pubkey[1:]) - return &NodeConfig{ - ID: id, - PrivateKey: key, + + id := discover.PubkeyID(&key.PublicKey) + port, err := assignTCPPort() + if err != nil { + panic("unable to assign tcp port") } + return &NodeConfig{ + ID: id, + Name: fmt.Sprintf("node_%s", id.String()), + PrivateKey: key, + Port: port, + EnableMsgEvents: true, + } +} + +func assignTCPPort() (uint16, error) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return 0, err + } + l.Close() + _, port, err := net.SplitHostPort(l.Addr().String()) + if err != nil { + return 0, err + } + p, err := strconv.ParseInt(port, 10, 32) + if err != nil { + return 0, err + } + return uint16(p), nil } // ServiceContext is a collection of options and methods which can be utilised diff --git a/p2p/simulations/http.go b/p2p/simulations/http.go index 97dd742e88..24001f1949 100644 --- a/p2p/simulations/http.go +++ b/p2p/simulations/http.go @@ -561,7 +561,8 @@ func (s *Server) LoadSnapshot(w http.ResponseWriter, req *http.Request) { // CreateNode creates a node in the network using the given configuration func (s *Server) CreateNode(w http.ResponseWriter, req *http.Request) { - config := adapters.RandomNodeConfig() + config := &adapters.NodeConfig{} + err := json.NewDecoder(req.Body).Decode(config) if err != nil && err != io.EOF { http.Error(w, err.Error(), http.StatusBadRequest) diff --git a/p2p/simulations/http_test.go b/p2p/simulations/http_test.go index 677a8fb147..732d49f546 100644 --- a/p2p/simulations/http_test.go +++ b/p2p/simulations/http_test.go @@ -348,7 +348,8 @@ func startTestNetwork(t *testing.T, client *Client) []string { nodeCount := 2 nodeIDs := make([]string, nodeCount) for i := 0; i < nodeCount; i++ { - node, err := client.CreateNode(nil) + config := adapters.RandomNodeConfig() + node, err := client.CreateNode(config) if err != nil { t.Fatalf("error creating node: %s", err) } @@ -527,7 +528,9 @@ func TestHTTPNodeRPC(t *testing.T) { // start a node in the network client := NewClient(s.URL) - node, err := client.CreateNode(nil) + + config := adapters.RandomNodeConfig() + node, err := client.CreateNode(config) if err != nil { t.Fatalf("error creating node: %s", err) } @@ -589,7 +592,8 @@ func TestHTTPSnapshot(t *testing.T) { nodeCount := 2 nodes := make([]*p2p.NodeInfo, nodeCount) for i := 0; i < nodeCount; i++ { - node, err := client.CreateNode(nil) + config := adapters.RandomNodeConfig() + node, err := client.CreateNode(config) if err != nil { t.Fatalf("error creating node: %s", err) } diff --git a/p2p/simulations/mocker.go b/p2p/simulations/mocker.go index c38e288552..389b1e3ec3 100644 --- a/p2p/simulations/mocker.go +++ b/p2p/simulations/mocker.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/p2p/simulations/adapters" ) //a map of mocker names to its function @@ -102,7 +103,13 @@ func startStop(net *Network, quit chan struct{}, nodeCount int) { func probabilistic(net *Network, quit chan struct{}, nodeCount int) { nodes, err := connectNodesInRing(net, nodeCount) if err != nil { - panic("Could not startup node network for mocker") + select { + case <-quit: + //error may be due to abortion of mocking; so the quit channel is closed + return + default: + panic("Could not startup node network for mocker") + } } for { select { @@ -143,7 +150,7 @@ func probabilistic(net *Network, quit chan struct{}, nodeCount int) { log.Debug(fmt.Sprintf("node %v shutting down", nodes[i])) err := net.Stop(nodes[i]) if err != nil { - log.Error(fmt.Sprintf("Error stopping node %s", nodes[i])) + log.Error("Error stopping node", "node", nodes[i]) wg.Done() continue } @@ -151,7 +158,7 @@ func probabilistic(net *Network, quit chan struct{}, nodeCount int) { time.Sleep(randWait) err := net.Start(id) if err != nil { - log.Error(fmt.Sprintf("Error starting node %s", id)) + log.Error("Error starting node", "node", id) } wg.Done() }(nodes[i]) @@ -165,9 +172,10 @@ func probabilistic(net *Network, quit chan struct{}, nodeCount int) { func connectNodesInRing(net *Network, nodeCount int) ([]discover.NodeID, error) { ids := make([]discover.NodeID, nodeCount) for i := 0; i < nodeCount; i++ { - node, err := net.NewNode() + conf := adapters.RandomNodeConfig() + node, err := net.NewNodeWithConfig(conf) if err != nil { - log.Error("Error creating a node! %s", err) + log.Error("Error creating a node!", "err", err) return nil, err } ids[i] = node.ID() @@ -175,7 +183,7 @@ func connectNodesInRing(net *Network, nodeCount int) ([]discover.NodeID, error) for _, id := range ids { if err := net.Start(id); err != nil { - log.Error("Error starting a node! %s", err) + log.Error("Error starting a node!", "err", err) return nil, err } log.Debug(fmt.Sprintf("node %v starting up", id)) @@ -183,7 +191,7 @@ func connectNodesInRing(net *Network, nodeCount int) ([]discover.NodeID, error) for i, id := range ids { peerID := ids[(i+1)%len(ids)] if err := net.Connect(id, peerID); err != nil { - log.Error("Error connecting a node to a peer! %s", err) + log.Error("Error connecting a node to a peer!", "err", err) return nil, err } } diff --git a/p2p/simulations/network.go b/p2p/simulations/network.go index 1a2c1e8ff4..0fb7485ad0 100644 --- a/p2p/simulations/network.go +++ b/p2p/simulations/network.go @@ -31,7 +31,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/simulations/adapters" ) -var dialBanTimeout = 200 * time.Millisecond +var DialBanTimeout = 200 * time.Millisecond // NetworkConfig defines configuration options for starting a Network type NetworkConfig struct { @@ -78,41 +78,25 @@ func (net *Network) Events() *event.Feed { return &net.events } -// NewNode adds a new node to the network with a random ID -func (net *Network) NewNode() (*Node, error) { - conf := adapters.RandomNodeConfig() - conf.Services = []string{net.DefaultService} - return net.NewNodeWithConfig(conf) -} - // NewNodeWithConfig adds a new node to the network with the given config, // returning an error if a node with the same ID or name already exists func (net *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error) { net.lock.Lock() defer net.lock.Unlock() - // create a random ID and PrivateKey if not set - if conf.ID == (discover.NodeID{}) { - c := adapters.RandomNodeConfig() - conf.ID = c.ID - conf.PrivateKey = c.PrivateKey - } - id := conf.ID if conf.Reachable == nil { conf.Reachable = func(otherID discover.NodeID) bool { _, err := net.InitConn(conf.ID, otherID) - return err == nil + if err != nil && bytes.Compare(conf.ID.Bytes(), otherID.Bytes()) < 0 { + return false + } + return true } } - // assign a name to the node if not set - if conf.Name == "" { - conf.Name = fmt.Sprintf("node%02d", len(net.Nodes)+1) - } - // check the node doesn't already exist - if node := net.getNode(id); node != nil { - return nil, fmt.Errorf("node with ID %q already exists", id) + if node := net.getNode(conf.ID); node != nil { + return nil, fmt.Errorf("node with ID %q already exists", conf.ID) } if node := net.getNodeByName(conf.Name); node != nil { return nil, fmt.Errorf("node with name %q already exists", conf.Name) @@ -132,8 +116,8 @@ func (net *Network) NewNodeWithConfig(conf *adapters.NodeConfig) (*Node, error) Node: adapterNode, Config: conf, } - log.Trace(fmt.Sprintf("node %v created", id)) - net.nodeMap[id] = len(net.Nodes) + log.Trace(fmt.Sprintf("node %v created", conf.ID)) + net.nodeMap[conf.ID] = len(net.Nodes) net.Nodes = append(net.Nodes, node) // emit a "control" event @@ -181,7 +165,9 @@ func (net *Network) Start(id discover.NodeID) error { // startWithSnapshots starts the node with the given ID using the give // snapshots func (net *Network) startWithSnapshots(id discover.NodeID, snapshots map[string][]byte) error { - node := net.GetNode(id) + net.lock.Lock() + defer net.lock.Unlock() + node := net.getNode(id) if node == nil { return fmt.Errorf("node %v does not exist", id) } @@ -220,9 +206,13 @@ func (net *Network) watchPeerEvents(id discover.NodeID, events chan *p2p.PeerEve // assume the node is now down net.lock.Lock() + defer net.lock.Unlock() node := net.getNode(id) + if node == nil { + log.Error("Can not find node for id", "id", id) + return + } node.Up = false - net.lock.Unlock() net.events.Send(NewEvent(node)) }() for { @@ -259,7 +249,9 @@ func (net *Network) watchPeerEvents(id discover.NodeID, events chan *p2p.PeerEve // Stop stops the node with the given ID func (net *Network) Stop(id discover.NodeID) error { - node := net.GetNode(id) + net.lock.Lock() + defer net.lock.Unlock() + node := net.getNode(id) if node == nil { return fmt.Errorf("node %v does not exist", id) } @@ -312,7 +304,9 @@ func (net *Network) Disconnect(oneID, otherID discover.NodeID) error { // DidConnect tracks the fact that the "one" node connected to the "other" node func (net *Network) DidConnect(one, other discover.NodeID) error { - conn, err := net.GetOrCreateConn(one, other) + net.lock.Lock() + defer net.lock.Unlock() + conn, err := net.getOrCreateConn(one, other) if err != nil { return fmt.Errorf("connection between %v and %v does not exist", one, other) } @@ -327,7 +321,9 @@ func (net *Network) DidConnect(one, other discover.NodeID) error { // DidDisconnect tracks the fact that the "one" node disconnected from the // "other" node func (net *Network) DidDisconnect(one, other discover.NodeID) error { - conn := net.GetConn(one, other) + net.lock.Lock() + defer net.lock.Unlock() + conn := net.getConn(one, other) if conn == nil { return fmt.Errorf("connection between %v and %v does not exist", one, other) } @@ -335,7 +331,7 @@ func (net *Network) DidDisconnect(one, other discover.NodeID) error { return fmt.Errorf("%v and %v already disconnected", one, other) } conn.Up = false - conn.initiated = time.Now().Add(-dialBanTimeout) + conn.initiated = time.Now().Add(-DialBanTimeout) net.events.Send(NewEvent(conn)) return nil } @@ -382,6 +378,15 @@ func (net *Network) GetNodeByName(name string) *Node { return net.getNodeByName(name) } +// GetNodes returns the existing nodes +func (net *Network) GetNodes() (nodes []*Node) { + net.lock.Lock() + defer net.lock.Unlock() + + nodes = append(nodes, net.Nodes...) + return nodes +} + func (net *Network) getNode(id discover.NodeID) *Node { i, found := net.nodeMap[id] if !found { @@ -399,15 +404,6 @@ func (net *Network) getNodeByName(name string) *Node { return nil } -// GetNodes returns the existing nodes -func (net *Network) GetNodes() (nodes []*Node) { - net.lock.Lock() - defer net.lock.Unlock() - - nodes = append(nodes, net.Nodes...) - return nodes -} - // GetConn returns the connection which exists between "one" and "other" // regardless of which node initiated the connection func (net *Network) GetConn(oneID, otherID discover.NodeID) *Conn { @@ -476,16 +472,19 @@ func (net *Network) InitConn(oneID, otherID discover.NodeID) (*Conn, error) { if err != nil { return nil, err } - if time.Since(conn.initiated) < dialBanTimeout { - return nil, fmt.Errorf("connection between %v and %v recently attempted", oneID, otherID) - } if conn.Up { return nil, fmt.Errorf("%v and %v already connected", oneID, otherID) } + if time.Since(conn.initiated) < DialBanTimeout { + return nil, fmt.Errorf("connection between %v and %v recently attempted", oneID, otherID) + } + err = conn.nodesUp() if err != nil { + log.Trace(fmt.Sprintf("nodes not up: %v", err)) return nil, fmt.Errorf("nodes not up: %v", err) } + log.Debug("InitConn - connection initiated") conn.initiated = time.Now() return conn, nil } diff --git a/p2p/simulations/network_test.go b/p2p/simulations/network_test.go index 2a062121be..f178bac502 100644 --- a/p2p/simulations/network_test.go +++ b/p2p/simulations/network_test.go @@ -41,7 +41,8 @@ func TestNetworkSimulation(t *testing.T) { nodeCount := 20 ids := make([]discover.NodeID, nodeCount) for i := 0; i < nodeCount; i++ { - node, err := network.NewNode() + conf := adapters.RandomNodeConfig() + node, err := network.NewNodeWithConfig(conf) if err != nil { t.Fatalf("error creating node: %s", err) } diff --git a/p2p/simulations/pipes/pipes.go b/p2p/simulations/pipes/pipes.go new file mode 100644 index 0000000000..8532c1bcf0 --- /dev/null +++ b/p2p/simulations/pipes/pipes.go @@ -0,0 +1,55 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pipes + +import ( + "net" +) + +// NetPipe wraps net.Pipe in a signature returning an error +func NetPipe() (net.Conn, net.Conn, error) { + p1, p2 := net.Pipe() + return p1, p2, nil +} + +// TCPPipe creates an in process full duplex pipe based on a localhost TCP socket +func TCPPipe() (net.Conn, net.Conn, error) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, nil, err + } + defer l.Close() + + var aconn net.Conn + aerr := make(chan error, 1) + go func() { + var err error + aconn, err = l.Accept() + aerr <- err + }() + + dconn, err := net.Dial("tcp", l.Addr().String()) + if err != nil { + <-aerr + return nil, nil, err + } + if err := <-aerr; err != nil { + dconn.Close() + return nil, nil, err + } + return aconn, dconn, nil +} diff --git a/p2p/testing/protocolsession.go b/p2p/testing/protocolsession.go index 8f73bfa03e..e3ec41ad67 100644 --- a/p2p/testing/protocolsession.go +++ b/p2p/testing/protocolsession.go @@ -91,7 +91,9 @@ func (s *ProtocolSession) trigger(trig Trigger) error { errc := make(chan error) go func() { + log.Trace(fmt.Sprintf("trigger %v (%v)....", trig.Msg, trig.Code)) errc <- mockNode.Trigger(&trig) + log.Trace(fmt.Sprintf("triggered %v (%v)", trig.Msg, trig.Code)) }() t := trig.Timeout diff --git a/params/config.go b/params/config.go index dc02c7ca37..6e6a5cb8b2 100644 --- a/params/config.go +++ b/params/config.go @@ -23,15 +23,16 @@ import ( "github.com/ethereum/go-ethereum/common" ) +// Genesis hashes to enforce below configs on. var ( - MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") // Mainnet genesis hash to enforce below configs on - TestnetGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d") // Testnet genesis hash to enforce below configs on + MainnetGenesisHash = common.HexToHash("0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3") + TestnetGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d") ) var ( // MainnetChainConfig is the chain parameters to run a node on the main network. MainnetChainConfig = &ChainConfig{ - ChainId: big.NewInt(1), + ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(1150000), DAOForkBlock: big.NewInt(1920000), DAOForkSupport: true, @@ -46,7 +47,7 @@ var ( // TestnetChainConfig contains the chain parameters to run a node on the Ropsten test network. TestnetChainConfig = &ChainConfig{ - ChainId: big.NewInt(3), + ChainID: big.NewInt(3), HomesteadBlock: big.NewInt(0), DAOForkBlock: nil, DAOForkSupport: true, @@ -61,7 +62,7 @@ var ( // RinkebyChainConfig contains the chain parameters to run a node on the Rinkeby test network. RinkebyChainConfig = &ChainConfig{ - ChainId: big.NewInt(4), + ChainID: big.NewInt(4), HomesteadBlock: big.NewInt(1), DAOForkBlock: nil, DAOForkSupport: true, @@ -101,7 +102,7 @@ var ( // that any network, identified by its genesis block, can have its own // set of configuration options. type ChainConfig struct { - ChainId *big.Int `json:"chainId"` // Chain id identifies the current chain and is used for replay protection + ChainID *big.Int `json:"chainId"` // chainId identifies the current chain and is used for replay protection HomesteadBlock *big.Int `json:"homesteadBlock,omitempty"` // Homestead switch block (nil = no fork, 0 = already homestead) @@ -154,7 +155,7 @@ func (c *ChainConfig) String() string { engine = "unknown" } return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Engine: %v}", - c.ChainId, + c.ChainID, c.HomesteadBlock, c.DAOForkBlock, c.DAOForkSupport, @@ -172,27 +173,32 @@ func (c *ChainConfig) IsHomestead(num *big.Int) bool { return isForked(c.HomesteadBlock, num) } -// IsDAO returns whether num is either equal to the DAO fork block or greater. +// IsDAOFork returns whether num is either equal to the DAO fork block or greater. func (c *ChainConfig) IsDAOFork(num *big.Int) bool { return isForked(c.DAOForkBlock, num) } +// IsEIP150 returns whether num is either equal to the EIP150 fork block or greater. func (c *ChainConfig) IsEIP150(num *big.Int) bool { return isForked(c.EIP150Block, num) } +// IsEIP155 returns whether num is either equal to the EIP155 fork block or greater. func (c *ChainConfig) IsEIP155(num *big.Int) bool { return isForked(c.EIP155Block, num) } +// IsEIP158 returns whether num is either equal to the EIP158 fork block or greater. func (c *ChainConfig) IsEIP158(num *big.Int) bool { return isForked(c.EIP158Block, num) } +// IsByzantium returns whether num is either equal to the Byzantium fork block or greater. func (c *ChainConfig) IsByzantium(num *big.Int) bool { return isForked(c.ByzantiumBlock, num) } +// IsConstantinople returns whether num is either equal to the Constantinople fork block or greater. func (c *ChainConfig) IsConstantinople(num *big.Int) bool { return isForked(c.ConstantinopleBlock, num) } @@ -251,7 +257,7 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi if isForkIncompatible(c.EIP158Block, newcfg.EIP158Block, head) { return newCompatError("EIP158 fork block", c.EIP158Block, newcfg.EIP158Block) } - if c.IsEIP158(head) && !configNumEqual(c.ChainId, newcfg.ChainId) { + if c.IsEIP158(head) && !configNumEqual(c.ChainID, newcfg.ChainID) { return newCompatError("EIP158 chain ID", c.EIP158Block, newcfg.EIP158Block) } if isForkIncompatible(c.ByzantiumBlock, newcfg.ByzantiumBlock, head) { @@ -324,15 +330,16 @@ func (err *ConfigCompatError) Error() string { // Rules is a one time interface meaning that it shouldn't be used in between transition // phases. type Rules struct { - ChainId *big.Int + ChainID *big.Int IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool IsByzantium bool } +// Rules ensures c's ChainID is not nil. func (c *ChainConfig) Rules(num *big.Int) Rules { - chainId := c.ChainId - if chainId == nil { - chainId = new(big.Int) + chainID := c.ChainID + if chainID == nil { + chainID = new(big.Int) } - return Rules{ChainId: new(big.Int).Set(chainId), IsHomestead: c.IsHomestead(num), IsEIP150: c.IsEIP150(num), IsEIP155: c.IsEIP155(num), IsEIP158: c.IsEIP158(num), IsByzantium: c.IsByzantium(num)} + return Rules{ChainID: new(big.Int).Set(chainID), IsHomestead: c.IsHomestead(num), IsEIP150: c.IsEIP150(num), IsEIP155: c.IsEIP155(num), IsEIP158: c.IsEIP158(num), IsByzantium: c.IsByzantium(num)} } diff --git a/params/denomination.go b/params/denomination.go index 9e1b52506f..1a309827da 100644 --- a/params/denomination.go +++ b/params/denomination.go @@ -16,12 +16,12 @@ package params +// These are the multipliers for ether denominations. +// Example: To get the wei value of an amount in 'douglas', use +// +// new(big.Int).Mul(value, big.NewInt(params.Douglas)) +// const ( - // These are the multipliers for ether denominations. - // Example: To get the wei value of an amount in 'douglas', use - // - // new(big.Int).Mul(value, big.NewInt(params.Douglas)) - // Wei = 1 Ada = 1e3 Babbage = 1e6 diff --git a/params/gas_table.go b/params/gas_table.go index 4969382b1b..d33bebbe53 100644 --- a/params/gas_table.go +++ b/params/gas_table.go @@ -16,6 +16,7 @@ package params +// GasTable organizes gas prices for different ethereum phases. type GasTable struct { ExtcodeSize uint64 ExtcodeCopy uint64 @@ -34,6 +35,7 @@ type GasTable struct { CreateBySuicide uint64 } +// Variables containing gas prices for different ethereum phases. var ( // GasTableHomestead contain the gas prices for // the homestead phase. @@ -47,8 +49,8 @@ var ( ExpByte: 10, } - // GasTableHomestead contain the gas re-prices for - // the homestead phase. + // GasTableEIP150 contain the gas re-prices for + // the EIP150 phase. GasTableEIP150 = GasTable{ ExtcodeSize: 700, ExtcodeCopy: 700, @@ -60,7 +62,8 @@ var ( CreateBySuicide: 25000, } - + // GasTableEIP158 contain the gas re-prices for + // the EIP15* phase. GasTableEIP158 = GasTable{ ExtcodeSize: 700, ExtcodeCopy: 700, diff --git a/params/protocol_params.go b/params/protocol_params.go index 5a0b14d61a..1ea9c58138 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -19,7 +19,7 @@ package params import "math/big" var ( - TargetGasLimit uint64 = GenesisGasLimit // The artificial target + TargetGasLimit = GenesisGasLimit // The artificial target ) const ( diff --git a/params/version.go b/params/version.go index 1e8c43bf81..f679908ae4 100644 --- a/params/version.go +++ b/params/version.go @@ -23,7 +23,7 @@ import ( const ( VersionMajor = 1 // Major version component of the current release VersionMinor = 8 // Minor version component of the current release - VersionPatch = 9 // Patch version component of the current release + VersionPatch = 12 // Patch version component of the current release VersionMeta = "unstable" // Version metadata to append to the version string ) diff --git a/rpc/client.go b/rpc/client.go index 77b4d5ee01..1c88cfab84 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -61,7 +61,7 @@ const ( // The approach taken here is to maintain a per-subscription linked list buffer // shrinks on demand. If the buffer reaches the size below, the subscription is // dropped. - maxClientSubscriptionBuffer = 8000 + maxClientSubscriptionBuffer = 20000 ) // BatchElem is an element in a batch request. diff --git a/rpc/http.go b/rpc/http.go index feaa7348c4..6388d68961 100644 --- a/rpc/http.go +++ b/rpc/http.go @@ -165,7 +165,12 @@ func NewHTTPServer(cors []string, vhosts []string, srv *Server) *http.Server { // Wrap the CORS-handler within a host-handler handler := newCorsHandler(srv, cors) handler = newVHostHandler(vhosts, handler) - return &http.Server{Handler: handler} + return &http.Server{ + Handler: handler, + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + IdleTimeout: 120 * time.Second, + } } // ServeHTTP serves JSON-RPC requests over HTTP. @@ -181,7 +186,7 @@ func (srv *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { // All checks passed, create a codec that reads direct from the request body // untilEOF and writes the response to w and order the server to process a // single request. - ctx := context.Background() + ctx := r.Context() ctx = context.WithValue(ctx, "remote", r.RemoteAddr) ctx = context.WithValue(ctx, "scheme", r.Proto) ctx = context.WithValue(ctx, "local", r.Host) diff --git a/rpc/json.go b/rpc/json.go index 837011f51b..a523eeb8ef 100644 --- a/rpc/json.go +++ b/rpc/json.go @@ -320,9 +320,6 @@ func parsePositionalArguments(rawArgs json.RawMessage, types []reflect.Type) ([] // CreateResponse will create a JSON-RPC success response with the given id and reply as result. func (c *jsonCodec) CreateResponse(id interface{}, reply interface{}) interface{} { - if isHexNum(reflect.TypeOf(reply)) { - return &jsonSuccessResponse{Version: jsonrpcVersion, Id: id, Result: fmt.Sprintf(`%#x`, reply)} - } return &jsonSuccessResponse{Version: jsonrpcVersion, Id: id, Result: reply} } @@ -340,11 +337,6 @@ func (c *jsonCodec) CreateErrorResponseWithInfo(id interface{}, err Error, info // CreateNotification will create a JSON-RPC notification with the given subscription id and event as params. func (c *jsonCodec) CreateNotification(subid, namespace string, event interface{}) interface{} { - if isHexNum(reflect.TypeOf(event)) { - return &jsonNotification{Version: jsonrpcVersion, Method: namespace + notificationMethodSuffix, - Params: jsonSubscription{Subscription: subid, Result: fmt.Sprintf(`%#x`, event)}} - } - return &jsonNotification{Version: jsonrpcVersion, Method: namespace + notificationMethodSuffix, Params: jsonSubscription{Subscription: subid, Result: event}} } diff --git a/rpc/server.go b/rpc/server.go index 7f304d8d93..8925419fe0 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -145,7 +145,7 @@ func (s *Server) serveRequest(ctx context.Context, codec ServerCodec, singleShot defer cancel() // if the codec supports notification include a notifier that callbacks can use - // to send notification to clients. It is thight to the codec/connection. If the + // to send notification to clients. It is tied to the codec/connection. If the // connection is closed the notifier will stop and cancels all active subscriptions. if options&OptionSubscriptions == OptionSubscriptions { ctx = context.WithValue(ctx, notifierKey{}, newNotifier(codec)) @@ -313,7 +313,6 @@ func (s *Server) handle(ctx context.Context, codec ServerCodec, req *serverReque if len(reply) == 0 { return codec.CreateResponse(req.id, nil), nil } - if req.callb.errPos >= 0 { // test if method returned an error if !reply[req.callb.errPos].IsNil() { e := reply[req.callb.errPos].Interface().(error) diff --git a/rpc/utils.go b/rpc/utils.go index 9315cab591..7f7ac4520b 100644 --- a/rpc/utils.go +++ b/rpc/utils.go @@ -22,7 +22,6 @@ import ( crand "crypto/rand" "encoding/binary" "encoding/hex" - "math/big" "math/rand" "reflect" "strings" @@ -105,20 +104,6 @@ func formatName(name string) string { return string(ret) } -var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem() - -// Indication if this type should be serialized in hex -func isHexNum(t reflect.Type) bool { - if t == nil { - return false - } - for t.Kind() == reflect.Ptr { - t = t.Elem() - } - - return t == bigIntType -} - // suitableCallbacks iterates over the methods of the given type. It will determine if a method satisfies the criteria // for a RPC callback or a subscription callback and adds it to the collection of callbacks or subscriptions. See server // documentation for a summary of these criteria. diff --git a/sharding/proposer/proposer.go b/sharding/proposer/proposer.go index 624b5822d0..208127b81c 100644 --- a/sharding/proposer/proposer.go +++ b/sharding/proposer/proposer.go @@ -1,6 +1,7 @@ package proposer import ( + "context" "fmt" "math/big" @@ -16,7 +17,7 @@ import ( // an addHeader transaction to the sharding manager contract. // There can only exist one header per period per shard, it is the proposer's // responsibility to check if a header has been added. -func AddHeader(transactor mainchain.ContractTransactor, collation *sharding.Collation) error { +func AddHeader(client mainchain.EthClient, transactor mainchain.ContractTransactor, collation *sharding.Collation) error { log.Info("Adding header to SMC") txOps, err := transactor.CreateTXOpts(big.NewInt(0)) @@ -24,14 +25,25 @@ func AddHeader(transactor mainchain.ContractTransactor, collation *sharding.Coll return fmt.Errorf("unable to initiate add header transaction: %v", err) } - // TODO: Copy is inefficient here. Let's research how to best convert hash to [32]byte. - var chunkRoot [32]byte - copy(chunkRoot[:], collation.Header().ChunkRoot().Bytes()) - - tx, err := transactor.SMCTransactor().AddHeader(txOps, collation.Header().ShardID(), collation.Header().Period(), chunkRoot, collation.Header().Sig()) + tx, err := transactor.SMCTransactor().AddHeader(txOps, collation.Header().ShardID(), collation.Header().Period(), [32]byte(*collation.Header().ChunkRoot()), collation.Header().Sig()) if err != nil { return fmt.Errorf("unable to add header to SMC: %v", err) } + + // TODO: wait 5 mins for addHeader to be mined, hard coded duration for Ruby release, this will be changed with beacon chain. + err = client.WaitForTransaction(context.Background(), tx.Hash(), 300) + if err != nil { + return err + } + + receipt, err := client.TransactionReceipt(tx.Hash()) + if err != nil { + return err + } + if receipt.Status != types.ReceiptStatusSuccessful { + return fmt.Errorf("add header transaction failed with receipt status %v", receipt.Status) + } + log.Info(fmt.Sprintf("Add header transaction hash: %v", tx.Hash().Hex())) return nil } diff --git a/sharding/proposer/service.go b/sharding/proposer/service.go index 45b7d19257..1abdc7a4f4 100644 --- a/sharding/proposer/service.go +++ b/sharding/proposer/service.go @@ -118,7 +118,7 @@ func (p *Proposer) createCollation(ctx context.Context, txs []*types.Transaction return err } if canAdd { - AddHeader(p.client, collation) + AddHeader(p.client, p.client, collation) } return nil diff --git a/sharding/proposer/service_test.go b/sharding/proposer/service_test.go index 4be9c4971a..90c3bc3dce 100644 --- a/sharding/proposer/service_test.go +++ b/sharding/proposer/service_test.go @@ -98,7 +98,7 @@ func TestAddCollation(t *testing.T) { } // normal test case #1 create collation with normal parameters. - err = AddHeader(node, collation) + err = AddHeader(node, node, collation) if err != nil { t.Errorf("%v", err) } @@ -144,7 +144,7 @@ func TestCheckCollation(t *testing.T) { backend.Commit() } - err = AddHeader(node, collation) + err = AddHeader(node, node, collation) if err != nil { t.Errorf("%v", err) } diff --git a/sharding/syncer/handlers_test.go b/sharding/syncer/handlers_test.go index e577d3ac66..7603b94c79 100644 --- a/sharding/syncer/handlers_test.go +++ b/sharding/syncer/handlers_test.go @@ -2,10 +2,13 @@ package syncer import ( "bytes" + "context" "errors" "math/big" "testing" + "time" + "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" "github.com/ethereum/go-ethereum/common" @@ -17,6 +20,7 @@ import ( "github.com/ethereum/go-ethereum/sharding/p2p" "github.com/ethereum/go-ethereum/sharding/p2p/messages" "github.com/ethereum/go-ethereum/sharding/params" + shardparams "github.com/ethereum/go-ethereum/sharding/params" "github.com/ethereum/go-ethereum/sharding/proposer" ) @@ -31,7 +35,8 @@ type mockNode struct { smc *contracts.SMC t *testing.T depositFlag bool - backend *backends.SimulatedBackend + Backend *backends.SimulatedBackend + BlockNumber int64 } type faultySMCCaller struct{} @@ -73,6 +78,39 @@ func (m *mockNode) GetShardCount() (int64, error) { return shardCount.Int64(), nil } +func (m *mockNode) Account() *accounts.Account { + return &accounts.Account{Address: addr} +} + +func (m *mockNode) WaitForTransaction(ctx context.Context, hash common.Hash, durationInSeconds time.Duration) error { + m.CommitWithBlock() + m.FastForward(1) + return nil +} + +func (m *mockNode) TransactionReceipt(hash common.Hash) (*types.Receipt, error) { + return m.Backend.TransactionReceipt(context.Background(), hash) +} + +func (m *mockNode) DepositFlag() bool { + return m.depositFlag +} + +func (m *mockNode) FastForward(p int) { + for i := 0; i < p*int(shardparams.DefaultConfig.PeriodLength); i++ { + m.CommitWithBlock() + } +} + +func (m *mockNode) CommitWithBlock() { + m.Backend.Commit() + m.BlockNumber = m.BlockNumber + 1 +} + +func (m *mockNode) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { + return types.NewBlockWithHeader(&types.Header{Number: big.NewInt(m.BlockNumber)}), nil +} + type faultyRequest struct{} type faultySigner struct{} type faultyCollationFetcher struct{} @@ -176,7 +214,7 @@ func TestCollationBodyResponse(t *testing.T) { func TestConstructNotaryRequest(t *testing.T) { backend, smc := setup(t) - node := &mockNode{smc: smc, t: t, backend: backend} + node := &mockNode{smc: smc, t: t, Backend: backend} // Fast forward to next period. for i := 0; i < int(params.DefaultConfig.PeriodLength); i++ { @@ -193,7 +231,7 @@ func TestConstructNotaryRequest(t *testing.T) { collation := sharding.NewCollation(header, []byte{}, []*types.Transaction{}) // Adds the header to the SMC. - if err := proposer.AddHeader(node, collation); err != nil { + if err := proposer.AddHeader(node, node, collation); err != nil { t.Fatalf("Failed to add header to SMC: %v", err) } diff --git a/signer/core/api.go b/signer/core/api.go index 45933284bf..9a2a49ccc6 100644 --- a/signer/core/api.go +++ b/signer/core/api.go @@ -189,15 +189,6 @@ type ( var ErrRequestDenied = errors.New("Request denied") -type errorWrapper struct { - msg string - err error -} - -func (ew errorWrapper) String() string { - return fmt.Sprintf("%s\n%s", ew.msg, ew.err) -} - // NewSignerAPI creates a new API that can be used for Account management. // ksLocation specifies the directory where to store the password protected private // key that is generated when a new Account is created. @@ -432,13 +423,11 @@ func (api *SignerAPI) EcRecover(ctx context.Context, data, sig hexutil.Bytes) (c } sig[64] -= 27 // Transform yellow paper V from 27/28 to 0/1 hash, _ := SignHash(data) - rpk, err := crypto.Ecrecover(hash, sig) + rpk, err := crypto.SigToPub(hash, sig) if err != nil { return common.Address{}, err } - pubKey := crypto.ToECDSAPub(rpk) - recoveredAddr := crypto.PubkeyToAddress(*pubKey) - return recoveredAddr, nil + return crypto.PubkeyToAddress(*rpk), nil } // SignHash is a helper function that calculates a hash for the given message that can be diff --git a/swarm/AUTHORS b/swarm/AUTHORS new file mode 100644 index 0000000000..f7232f07ce --- /dev/null +++ b/swarm/AUTHORS @@ -0,0 +1,35 @@ +# Core team members + +Viktor Trón - @zelig +Louis Holbrook - @nolash +Lewis Marshall - @lmars +Anton Evangelatov - @nonsense +Janoš Guljaš - @janos +Balint Gabor - @gbalint +Elad Nachmias - @justelad +Daniel A. Nagy - @nagydani +Aron Fischer - @homotopycolimit +Fabio Barone - @holisticode +Zahoor Mohamed - @jmozah +Zsolt Felföldi - @zsfelfoldi + +# External contributors + +Kiel Barry +Gary Rong +Jared Wasinger +Leon Stanko +Javier Peletier [epiclabs.io] +Bartek Borkowski [tungsten-labs.com] +Shane Howley [mainframe.com] +Doug Leonard [mainframe.com] +Ivan Daniluk [status.im] +Felix Lange [EF] +Martin Holst Swende [EF] +Guillaume Ballet [EF] +ligi [EF] +Christopher Dro [blick-labs.com] +Sergii Bomko [ledgerleopard.com] +Domino Valdano +Rafael Matias +Coogan Brennan \ No newline at end of file diff --git a/swarm/OWNERS b/swarm/OWNERS new file mode 100644 index 0000000000..774cd7db9d --- /dev/null +++ b/swarm/OWNERS @@ -0,0 +1,26 @@ +# Ownership by go packages + +swarm +├── api ─────────────────── ethersphere +├── bmt ─────────────────── @zelig +├── dev ─────────────────── @lmars +├── fuse ────────────────── @jmozah, @holisticode +├── grafana_dashboards ──── @nonsense +├── metrics ─────────────── @nonsense, @holisticode +├── multihash ───────────── @nolash +├── network ─────────────── ethersphere +│ ├── bitvector ───────── @zelig, @janos, @gbalint +│ ├── priorityqueue ───── @zelig, @janos, @gbalint +│ ├── simulations ─────── @zelig +│ └── stream ──────────── @janos, @zelig, @gbalint, @holisticode, @justelad +│ ├── intervals ───── @janos +│ └── testing ─────── @zelig +├── pot ─────────────────── @zelig +├── pss ─────────────────── @nolash, @zelig, @nonsense +├── services ────────────── @zelig +├── state ───────────────── @justelad +├── storage ─────────────── ethersphere +│ ├── encryption ──────── @gbalint, @zelig, @nagydani +│ ├── mock ────────────── @janos +│ └── mru ─────────────── @nolash +└── testutil ────────────── @lmars \ No newline at end of file diff --git a/swarm/api/api.go b/swarm/api/api.go index 0cf12fdbed..36f19998af 100644 --- a/swarm/api/api.go +++ b/swarm/api/api.go @@ -17,13 +17,13 @@ package api import ( + "context" "fmt" "io" + "math/big" "net/http" "path" - "regexp" "strings" - "sync" "bytes" "mime" @@ -31,14 +31,15 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/contracts/ens" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/swarm/log" + "github.com/ethereum/go-ethereum/swarm/multihash" "github.com/ethereum/go-ethereum/swarm/storage" + "github.com/ethereum/go-ethereum/swarm/storage/mru" ) -var hashMatcher = regexp.MustCompile("^[0-9A-Fa-f]{64}") - -//setup metrics var ( apiResolveCount = metrics.NewRegisteredCounter("api.resolve.count", nil) apiResolveFail = metrics.NewRegisteredCounter("api.resolve.fail", nil) @@ -46,7 +47,7 @@ var ( apiPutFail = metrics.NewRegisteredCounter("api.put.fail", nil) apiGetCount = metrics.NewRegisteredCounter("api.get.count", nil) apiGetNotFound = metrics.NewRegisteredCounter("api.get.notfound", nil) - apiGetHttp300 = metrics.NewRegisteredCounter("api.get.http.300", nil) + apiGetHTTP300 = metrics.NewRegisteredCounter("api.get.http.300", nil) apiModifyCount = metrics.NewRegisteredCounter("api.modify.count", nil) apiModifyFail = metrics.NewRegisteredCounter("api.modify.fail", nil) apiAddFileCount = metrics.NewRegisteredCounter("api.addfile.count", nil) @@ -55,22 +56,33 @@ var ( apiRmFileFail = metrics.NewRegisteredCounter("api.removefile.fail", nil) apiAppendFileCount = metrics.NewRegisteredCounter("api.appendfile.count", nil) apiAppendFileFail = metrics.NewRegisteredCounter("api.appendfile.fail", nil) + apiGetInvalid = metrics.NewRegisteredCounter("api.get.invalid", nil) ) +// Resolver interface resolve a domain name to a hash using ENS type Resolver interface { Resolve(string) (common.Hash, error) } +// ResolveValidator is used to validate the contained Resolver +type ResolveValidator interface { + Resolver + Owner(node [32]byte) (common.Address, error) + HeaderByNumber(context.Context, *big.Int) (*types.Header, error) +} + // NoResolverError is returned by MultiResolver.Resolve if no resolver // can be found for the address. type NoResolverError struct { TLD string } +// NewNoResolverError creates a NoResolverError for the given top level domain func NewNoResolverError(tld string) *NoResolverError { return &NoResolverError{TLD: tld} } +// Error NoResolverError implements error func (e *NoResolverError) Error() string { if e.TLD == "" { return "no ENS resolver" @@ -82,7 +94,8 @@ func (e *NoResolverError) Error() string { // Each TLD can have multiple resolvers, and the resoluton from the // first one in the sequence will be returned. type MultiResolver struct { - resolvers map[string][]Resolver + resolvers map[string][]ResolveValidator + nameHash func(string) common.Hash } // MultiResolverOption sets options for MultiResolver and is used as @@ -93,16 +106,24 @@ type MultiResolverOption func(*MultiResolver) // for a specific TLD. If TLD is an empty string, the resolver will be added // to the list of default resolver, the ones that will be used for resolution // of addresses which do not have their TLD resolver specified. -func MultiResolverOptionWithResolver(r Resolver, tld string) MultiResolverOption { +func MultiResolverOptionWithResolver(r ResolveValidator, tld string) MultiResolverOption { return func(m *MultiResolver) { m.resolvers[tld] = append(m.resolvers[tld], r) } } +// MultiResolverOptionWithNameHash is unused at the time of this writing +func MultiResolverOptionWithNameHash(nameHash func(string) common.Hash) MultiResolverOption { + return func(m *MultiResolver) { + m.nameHash = nameHash + } +} + // NewMultiResolver creates a new instance of MultiResolver. func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) { m = &MultiResolver{ - resolvers: make(map[string][]Resolver), + resolvers: make(map[string][]ResolveValidator), + nameHash: ens.EnsNode, } for _, o := range opts { o(m) @@ -114,18 +135,10 @@ func NewMultiResolver(opts ...MultiResolverOption) (m *MultiResolver) { // If there are more default Resolvers, or for a specific TLD, // the Hash from the the first one which does not return error // will be returned. -func (m MultiResolver) Resolve(addr string) (h common.Hash, err error) { - rs := m.resolvers[""] - tld := path.Ext(addr) - if tld != "" { - tld = tld[1:] - rstld, ok := m.resolvers[tld] - if ok { - rs = rstld - } - } - if rs == nil { - return h, NewNoResolverError(tld) +func (m *MultiResolver) Resolve(addr string) (h common.Hash, err error) { + rs, err := m.getResolveValidator(addr) + if err != nil { + return h, err } for _, r := range rs { h, err = r.Resolve(addr) @@ -136,104 +149,171 @@ func (m MultiResolver) Resolve(addr string) (h common.Hash, err error) { return } -/* -Api implements webserver/file system related content storage and retrieval -on top of the dpa -it is the public interface of the dpa which is included in the ethereum stack -*/ -type Api struct { - dpa *storage.DPA - dns Resolver +// ValidateOwner checks the ENS to validate that the owner of the given domain is the given eth address +func (m *MultiResolver) ValidateOwner(name string, address common.Address) (bool, error) { + rs, err := m.getResolveValidator(name) + if err != nil { + return false, err + } + var addr common.Address + for _, r := range rs { + addr, err = r.Owner(m.nameHash(name)) + // we hide the error if it is not for the last resolver we check + if err == nil { + return addr == address, nil + } + } + return false, err } -//the api constructor initialises -func NewApi(dpa *storage.DPA, dns Resolver) (self *Api) { - self = &Api{ - dpa: dpa, - dns: dns, +// HeaderByNumber uses the validator of the given domainname and retrieves the header for the given block number +func (m *MultiResolver) HeaderByNumber(ctx context.Context, name string, blockNr *big.Int) (*types.Header, error) { + rs, err := m.getResolveValidator(name) + if err != nil { + return nil, err + } + for _, r := range rs { + var header *types.Header + header, err = r.HeaderByNumber(ctx, blockNr) + // we hide the error if it is not for the last resolver we check + if err == nil { + return header, nil + } + } + return nil, err +} + +// getResolveValidator uses the hostname to retrieve the resolver associated with the top level domain +func (m *MultiResolver) getResolveValidator(name string) ([]ResolveValidator, error) { + rs := m.resolvers[""] + tld := path.Ext(name) + if tld != "" { + tld = tld[1:] + rstld, ok := m.resolvers[tld] + if ok { + return rstld, nil + } + } + if len(rs) == 0 { + return rs, NewNoResolverError(tld) + } + return rs, nil +} + +// SetNameHash sets the hasher function that hashes the domain into a name hash that ENS uses +func (m *MultiResolver) SetNameHash(nameHash func(string) common.Hash) { + m.nameHash = nameHash +} + +/* +API implements webserver/file system related content storage and retrieval +on top of the FileStore +it is the public interface of the FileStore which is included in the ethereum stack +*/ +type API struct { + resource *mru.Handler + fileStore *storage.FileStore + dns Resolver +} + +// NewAPI the api constructor initialises a new API instance. +func NewAPI(fileStore *storage.FileStore, dns Resolver, resourceHandler *mru.Handler) (self *API) { + self = &API{ + fileStore: fileStore, + dns: dns, + resource: resourceHandler, } return } -// to be used only in TEST -func (self *Api) Upload(uploadDir, index string) (hash string, err error) { - fs := NewFileSystem(self) - hash, err = fs.Upload(uploadDir, index) +// Upload to be used only in TEST +func (a *API) Upload(uploadDir, index string, toEncrypt bool) (hash string, err error) { + fs := NewFileSystem(a) + hash, err = fs.Upload(uploadDir, index, toEncrypt) return hash, err } -// DPA reader API -func (self *Api) Retrieve(key storage.Key) storage.LazySectionReader { - return self.dpa.Retrieve(key) +// Retrieve FileStore reader API +func (a *API) Retrieve(addr storage.Address) (reader storage.LazySectionReader, isEncrypted bool) { + return a.fileStore.Retrieve(addr) } -func (self *Api) Store(data io.Reader, size int64, wg *sync.WaitGroup) (key storage.Key, err error) { - return self.dpa.Store(data, size, wg, nil) +// Store wraps the Store API call of the embedded FileStore +func (a *API) Store(data io.Reader, size int64, toEncrypt bool) (addr storage.Address, wait func(), err error) { + log.Debug("api.store", "size", size) + return a.fileStore.Store(data, size, toEncrypt) } +// ErrResolve is returned when an URI cannot be resolved from ENS. type ErrResolve error -// DNS Resolver -func (self *Api) Resolve(uri *URI) (storage.Key, error) { +// Resolve resolves a URI to an Address using the MultiResolver. +func (a *API) Resolve(uri *URI) (storage.Address, error) { apiResolveCount.Inc(1) - log.Trace(fmt.Sprintf("Resolving : %v", uri.Addr)) + log.Trace("resolving", "uri", uri.Addr) - // if the URI is immutable, check if the address is a hash - isHash := hashMatcher.MatchString(uri.Addr) - if uri.Immutable() || uri.DeprecatedImmutable() { - if !isHash { + // if the URI is immutable, check if the address looks like a hash + if uri.Immutable() { + key := uri.Address() + if key == nil { return nil, fmt.Errorf("immutable address not a content hash: %q", uri.Addr) } - return common.Hex2Bytes(uri.Addr), nil + return key, nil } // if DNS is not configured, check if the address is a hash - if self.dns == nil { - if !isHash { + if a.dns == nil { + key := uri.Address() + if key == nil { apiResolveFail.Inc(1) return nil, fmt.Errorf("no DNS to resolve name: %q", uri.Addr) } - return common.Hex2Bytes(uri.Addr), nil + return key, nil } // try and resolve the address - resolved, err := self.dns.Resolve(uri.Addr) + resolved, err := a.dns.Resolve(uri.Addr) if err == nil { return resolved[:], nil - } else if !isHash { + } + + key := uri.Address() + if key == nil { apiResolveFail.Inc(1) return nil, err } - return common.Hex2Bytes(uri.Addr), nil -} - -// Put provides singleton manifest creation on top of dpa store -func (self *Api) Put(content, contentType string) (storage.Key, error) { - apiPutCount.Inc(1) - r := strings.NewReader(content) - wg := &sync.WaitGroup{} - key, err := self.dpa.Store(r, int64(len(content)), wg, nil) - if err != nil { - apiPutFail.Inc(1) - return nil, err - } - manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType) - r = strings.NewReader(manifest) - key, err = self.dpa.Store(r, int64(len(manifest)), wg, nil) - if err != nil { - apiPutFail.Inc(1) - return nil, err - } - wg.Wait() return key, nil } +// Put provides singleton manifest creation on top of FileStore store +func (a *API) Put(content, contentType string, toEncrypt bool) (k storage.Address, wait func(), err error) { + apiPutCount.Inc(1) + r := strings.NewReader(content) + key, waitContent, err := a.fileStore.Store(r, int64(len(content)), toEncrypt) + if err != nil { + apiPutFail.Inc(1) + return nil, nil, err + } + manifest := fmt.Sprintf(`{"entries":[{"hash":"%v","contentType":"%s"}]}`, key, contentType) + r = strings.NewReader(manifest) + key, waitManifest, err := a.fileStore.Store(r, int64(len(manifest)), toEncrypt) + if err != nil { + apiPutFail.Inc(1) + return nil, nil, err + } + return key, func() { + waitContent() + waitManifest() + }, nil +} + // Get uses iterative manifest retrieval and prefix matching -// to resolve basePath to content using dpa retrieve -// it returns a section reader, mimeType, status and an error -func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionReader, mimeType string, status int, err error) { +// to resolve basePath to content using FileStore retrieve +// it returns a section reader, mimeType, status, the key of the actual content and an error +func (a *API) Get(manifestAddr storage.Address, path string) (reader storage.LazySectionReader, mimeType string, status int, contentAddr storage.Address, err error) { + log.Debug("api.get", "key", manifestAddr, "path", path) apiGetCount.Inc(1) - trie, err := loadManifest(self.dpa, key, nil) + trie, err := loadManifest(a.fileStore, manifestAddr, nil) if err != nil { apiGetNotFound.Inc(1) status = http.StatusNotFound @@ -241,34 +321,111 @@ func (self *Api) Get(key storage.Key, path string) (reader storage.LazySectionRe return } - log.Trace(fmt.Sprintf("getEntry(%s)", path)) - + log.Debug("trie getting entry", "key", manifestAddr, "path", path) entry, _ := trie.getEntry(path) if entry != nil { - key = common.Hex2Bytes(entry.Hash) + log.Debug("trie got entry", "key", manifestAddr, "path", path, "entry.Hash", entry.Hash) + // we need to do some extra work if this is a mutable resource manifest + if entry.ContentType == ResourceContentType { + + // get the resource root chunk key + log.Trace("resource type", "key", manifestAddr, "hash", entry.Hash) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + rsrc, err := a.resource.Load(storage.Address(common.FromHex(entry.Hash))) + if err != nil { + apiGetNotFound.Inc(1) + status = http.StatusNotFound + log.Debug(fmt.Sprintf("get resource content error: %v", err)) + return reader, mimeType, status, nil, err + } + + // use this key to retrieve the latest update + rsrc, err = a.resource.LookupLatest(ctx, rsrc.NameHash(), true, &mru.LookupParams{}) + if err != nil { + apiGetNotFound.Inc(1) + status = http.StatusNotFound + log.Debug(fmt.Sprintf("get resource content error: %v", err)) + return reader, mimeType, status, nil, err + } + + // if it's multihash, we will transparently serve the content this multihash points to + // \TODO this resolve is rather expensive all in all, review to see if it can be achieved cheaper + if rsrc.Multihash { + + // get the data of the update + _, rsrcData, err := a.resource.GetContent(rsrc.NameHash().Hex()) + if err != nil { + apiGetNotFound.Inc(1) + status = http.StatusNotFound + log.Warn(fmt.Sprintf("get resource content error: %v", err)) + return reader, mimeType, status, nil, err + } + + // validate that data as multihash + decodedMultihash, err := multihash.FromMultihash(rsrcData) + if err != nil { + apiGetInvalid.Inc(1) + status = http.StatusUnprocessableEntity + log.Warn("invalid resource multihash", "err", err) + return reader, mimeType, status, nil, err + } + manifestAddr = storage.Address(decodedMultihash) + log.Trace("resource is multihash", "key", manifestAddr) + + // get the manifest the multihash digest points to + trie, err := loadManifest(a.fileStore, manifestAddr, nil) + if err != nil { + apiGetNotFound.Inc(1) + status = http.StatusNotFound + log.Warn(fmt.Sprintf("loadManifestTrie (resource multihash) error: %v", err)) + return reader, mimeType, status, nil, err + } + + // finally, get the manifest entry + // it will always be the entry on path "" + entry, _ = trie.getEntry(path) + if entry == nil { + status = http.StatusNotFound + apiGetNotFound.Inc(1) + err = fmt.Errorf("manifest (resource multihash) entry for '%s' not found", path) + log.Trace("manifest (resource multihash) entry not found", "key", manifestAddr, "path", path) + return reader, mimeType, status, nil, err + } + + } else { + // data is returned verbatim since it's not a multihash + return rsrc, "application/octet-stream", http.StatusOK, nil, nil + } + } + + // regardless of resource update manifests or normal manifests we will converge at this point + // get the key the manifest entry points to and serve it if it's unambiguous + contentAddr = common.Hex2Bytes(entry.Hash) status = entry.Status if status == http.StatusMultipleChoices { - apiGetHttp300.Inc(1) - return - } else { - mimeType = entry.ContentType - log.Trace(fmt.Sprintf("content lookup key: '%v' (%v)", key, mimeType)) - reader = self.dpa.Retrieve(key) + apiGetHTTP300.Inc(1) + return nil, entry.ContentType, status, contentAddr, err } + mimeType = entry.ContentType + log.Debug("content lookup key", "key", contentAddr, "mimetype", mimeType) + reader, _ = a.fileStore.Retrieve(contentAddr) } else { + // no entry found status = http.StatusNotFound apiGetNotFound.Inc(1) err = fmt.Errorf("manifest entry for '%s' not found", path) - log.Warn(fmt.Sprintf("%v", err)) + log.Trace("manifest entry not found", "key", contentAddr, "path", path) } return } -func (self *Api) Modify(key storage.Key, path, contentHash, contentType string) (storage.Key, error) { +// Modify loads manifest and checks the content hash before recalculating and storing the manifest. +func (a *API) Modify(addr storage.Address, path, contentHash, contentType string) (storage.Address, error) { apiModifyCount.Inc(1) quitC := make(chan bool) - trie, err := loadManifest(self.dpa, key, quitC) + trie, err := loadManifest(a.fileStore, addr, quitC) if err != nil { apiModifyFail.Inc(1) return nil, err @@ -288,10 +445,11 @@ func (self *Api) Modify(key storage.Key, path, contentHash, contentType string) apiModifyFail.Inc(1) return nil, err } - return trie.hash, nil + return trie.ref, nil } -func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver bool) (storage.Key, string, error) { +// AddFile creates a new manifest entry, adds it to swarm, then adds a file to swarm. +func (a *API) AddFile(mhash, path, fname string, content []byte, nameresolver bool) (storage.Address, string, error) { apiAddFileCount.Inc(1) uri, err := Parse("bzz:/" + mhash) @@ -299,7 +457,7 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver apiAddFileFail.Inc(1) return nil, "", err } - mkey, err := self.Resolve(uri) + mkey, err := a.Resolve(uri) if err != nil { apiAddFileFail.Inc(1) return nil, "", err @@ -318,7 +476,7 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver ModTime: time.Now(), } - mw, err := self.NewManifestWriter(mkey, nil) + mw, err := a.NewManifestWriter(mkey, nil) if err != nil { apiAddFileFail.Inc(1) return nil, "", err @@ -341,7 +499,8 @@ func (self *Api) AddFile(mhash, path, fname string, content []byte, nameresolver } -func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (string, error) { +// RemoveFile removes a file entry in a manifest. +func (a *API) RemoveFile(mhash, path, fname string, nameresolver bool) (string, error) { apiRmFileCount.Inc(1) uri, err := Parse("bzz:/" + mhash) @@ -349,7 +508,7 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin apiRmFileFail.Inc(1) return "", err } - mkey, err := self.Resolve(uri) + mkey, err := a.Resolve(uri) if err != nil { apiRmFileFail.Inc(1) return "", err @@ -360,7 +519,7 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin path = path[1:] } - mw, err := self.NewManifestWriter(mkey, nil) + mw, err := a.NewManifestWriter(mkey, nil) if err != nil { apiRmFileFail.Inc(1) return "", err @@ -382,7 +541,8 @@ func (self *Api) RemoveFile(mhash, path, fname string, nameresolver bool) (strin return newMkey.String(), nil } -func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, content []byte, oldKey storage.Key, offset int64, addSize int64, nameresolver bool) (storage.Key, string, error) { +// AppendFile removes old manifest, appends file entry to new manifest and adds it to Swarm. +func (a *API) AppendFile(mhash, path, fname string, existingSize int64, content []byte, oldAddr storage.Address, offset int64, addSize int64, nameresolver bool) (storage.Address, string, error) { apiAppendFileCount.Inc(1) buffSize := offset + addSize @@ -392,7 +552,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte buf := make([]byte, buffSize) - oldReader := self.Retrieve(oldKey) + oldReader, _ := a.Retrieve(oldAddr) io.ReadAtLeast(oldReader, buf, int(offset)) newReader := bytes.NewReader(content) @@ -406,7 +566,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte totalSize := int64(len(buf)) // TODO(jmozah): to append using pyramid chunker when it is ready - //oldReader := self.Retrieve(oldKey) + //oldReader := a.Retrieve(oldKey) //newReader := bytes.NewReader(content) //combinedReader := io.MultiReader(oldReader, newReader) @@ -415,7 +575,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte apiAppendFileFail.Inc(1) return nil, "", err } - mkey, err := self.Resolve(uri) + mkey, err := a.Resolve(uri) if err != nil { apiAppendFileFail.Inc(1) return nil, "", err @@ -426,7 +586,7 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte path = path[1:] } - mw, err := self.NewManifestWriter(mkey, nil) + mw, err := a.NewManifestWriter(mkey, nil) if err != nil { apiAppendFileFail.Inc(1) return nil, "", err @@ -463,21 +623,22 @@ func (self *Api) AppendFile(mhash, path, fname string, existingSize int64, conte } -func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storage.Key, manifestEntryMap map[string]*manifestTrieEntry, err error) { +// BuildDirectoryTree used by swarmfs_unix +func (a *API) BuildDirectoryTree(mhash string, nameresolver bool) (addr storage.Address, manifestEntryMap map[string]*manifestTrieEntry, err error) { uri, err := Parse("bzz:/" + mhash) if err != nil { return nil, nil, err } - key, err = self.Resolve(uri) + addr, err = a.Resolve(uri) if err != nil { return nil, nil, err } quitC := make(chan bool) - rootTrie, err := loadManifest(self.dpa, key, quitC) + rootTrie, err := loadManifest(a.fileStore, addr, quitC) if err != nil { - return nil, nil, fmt.Errorf("can't load manifest %v: %v", key.String(), err) + return nil, nil, fmt.Errorf("can't load manifest %v: %v", addr.String(), err) } manifestEntryMap = map[string]*manifestTrieEntry{} @@ -486,7 +647,94 @@ func (self *Api) BuildDirectoryTree(mhash string, nameresolver bool) (key storag }) if err != nil { - return nil, nil, fmt.Errorf("list with prefix failed %v: %v", key.String(), err) + return nil, nil, fmt.Errorf("list with prefix failed %v: %v", addr.String(), err) } - return key, manifestEntryMap, nil + return addr, manifestEntryMap, nil +} + +// ResourceLookup Looks up mutable resource updates at specific periods and versions +func (a *API) ResourceLookup(ctx context.Context, addr storage.Address, period uint32, version uint32, maxLookup *mru.LookupParams) (string, []byte, error) { + var err error + rsrc, err := a.resource.Load(addr) + if err != nil { + return "", nil, err + } + if version != 0 { + if period == 0 { + return "", nil, mru.NewError(mru.ErrInvalidValue, "Period can't be 0") + } + _, err = a.resource.LookupVersion(ctx, rsrc.NameHash(), period, version, true, maxLookup) + } else if period != 0 { + _, err = a.resource.LookupHistorical(ctx, rsrc.NameHash(), period, true, maxLookup) + } else { + _, err = a.resource.LookupLatest(ctx, rsrc.NameHash(), true, maxLookup) + } + if err != nil { + return "", nil, err + } + var data []byte + _, data, err = a.resource.GetContent(rsrc.NameHash().Hex()) + if err != nil { + return "", nil, err + } + return rsrc.Name(), data, nil +} + +// ResourceCreate creates Resource and returns its key +func (a *API) ResourceCreate(ctx context.Context, name string, frequency uint64) (storage.Address, error) { + key, _, err := a.resource.New(ctx, name, frequency) + if err != nil { + return nil, err + } + return key, nil +} + +// ResourceUpdateMultihash updates a Mutable Resource and marks the update's content to be of multihash type, which will be recognized upon retrieval. +// It will fail if the data is not a valid multihash. +func (a *API) ResourceUpdateMultihash(ctx context.Context, name string, data []byte) (storage.Address, uint32, uint32, error) { + return a.resourceUpdate(ctx, name, data, true) +} + +// ResourceUpdate updates a Mutable Resource with arbitrary data. +// Upon retrieval the update will be retrieved verbatim as bytes. +func (a *API) ResourceUpdate(ctx context.Context, name string, data []byte) (storage.Address, uint32, uint32, error) { + return a.resourceUpdate(ctx, name, data, false) +} + +func (a *API) resourceUpdate(ctx context.Context, name string, data []byte, multihash bool) (storage.Address, uint32, uint32, error) { + var addr storage.Address + var err error + if multihash { + addr, err = a.resource.UpdateMultihash(ctx, name, data) + } else { + addr, err = a.resource.Update(ctx, name, data) + } + period, _ := a.resource.GetLastPeriod(name) + version, _ := a.resource.GetVersion(name) + return addr, period, version, err +} + +// ResourceHashSize returned the size of the digest produced by the Mutable Resource hashing function +func (a *API) ResourceHashSize() int { + return a.resource.HashSize +} + +// ResourceIsValidated checks if the Mutable Resource has an active content validator. +func (a *API) ResourceIsValidated() bool { + return a.resource.IsValidated() +} + +// ResolveResourceManifest retrieves the Mutable Resource manifest for the given address, and returns the address of the metadata chunk. +func (a *API) ResolveResourceManifest(addr storage.Address) (storage.Address, error) { + trie, err := loadManifest(a.fileStore, addr, nil) + if err != nil { + return nil, fmt.Errorf("cannot load resource manifest: %v", err) + } + + entry, _ := trie.getEntry("") + if entry.ContentType != ResourceContentType { + return nil, fmt.Errorf("not a resource manifest: %s", addr) + } + + return storage.Address(common.FromHex(entry.Hash)), nil } diff --git a/swarm/api/api_test.go b/swarm/api/api_test.go index 4ee26bd8ad..e607dd4fc3 100644 --- a/swarm/api/api_test.go +++ b/swarm/api/api_test.go @@ -17,33 +17,34 @@ package api import ( + "context" "errors" "fmt" "io" "io/ioutil" + "math/big" "os" "testing" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage" ) -func testApi(t *testing.T, f func(*Api)) { +func testAPI(t *testing.T, f func(*API, bool)) { datadir, err := ioutil.TempDir("", "bzz-test") if err != nil { t.Fatalf("unable to create temp dir: %v", err) } - os.RemoveAll(datadir) defer os.RemoveAll(datadir) - dpa, err := storage.NewLocalDPA(datadir) + fileStore, err := storage.NewLocalFileStore(datadir, make([]byte, 32)) if err != nil { return } - api := NewApi(dpa, nil) - dpa.Start() - f(api) - dpa.Stop() + api := NewAPI(fileStore, nil, nil) + f(api, false) + f(api, true) } type testResponse struct { @@ -82,10 +83,9 @@ func expResponse(content string, mimeType string, status int) *Response { return &Response{mimeType, status, int64(len(content)), content} } -// func testGet(t *testing.T, api *Api, bzzhash string) *testResponse { -func testGet(t *testing.T, api *Api, bzzhash, path string) *testResponse { - key := storage.Key(common.Hex2Bytes(bzzhash)) - reader, mimeType, status, err := api.Get(key, path) +func testGet(t *testing.T, api *API, bzzhash, path string) *testResponse { + addr := storage.Address(common.Hex2Bytes(bzzhash)) + reader, mimeType, status, _, err := api.Get(addr, path) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -106,27 +106,28 @@ func testGet(t *testing.T, api *Api, bzzhash, path string) *testResponse { } func TestApiPut(t *testing.T) { - testApi(t, func(api *Api) { + testAPI(t, func(api *API, toEncrypt bool) { content := "hello" exp := expResponse(content, "text/plain", 0) // exp := expResponse([]byte(content), "text/plain", 0) - key, err := api.Put(content, exp.MimeType) + addr, wait, err := api.Put(content, exp.MimeType, toEncrypt) if err != nil { t.Fatalf("unexpected error: %v", err) } - resp := testGet(t, api, key.String(), "") + wait() + resp := testGet(t, api, addr.Hex(), "") checkResponse(t, resp, exp) }) } // testResolver implements the Resolver interface and either returns the given // hash if it is set, or returns a "name not found" error -type testResolver struct { +type testResolveValidator struct { hash *common.Hash } -func newTestResolver(addr string) *testResolver { - r := &testResolver{} +func newTestResolveValidator(addr string) *testResolveValidator { + r := &testResolveValidator{} if addr != "" { hash := common.HexToHash(addr) r.hash = &hash @@ -134,21 +135,28 @@ func newTestResolver(addr string) *testResolver { return r } -func (t *testResolver) Resolve(addr string) (common.Hash, error) { +func (t *testResolveValidator) Resolve(addr string) (common.Hash, error) { if t.hash == nil { return common.Hash{}, fmt.Errorf("DNS name not found: %q", addr) } return *t.hash, nil } +func (t *testResolveValidator) Owner(node [32]byte) (addr common.Address, err error) { + return +} +func (t *testResolveValidator) HeaderByNumber(context.Context, *big.Int) (header *types.Header, err error) { + return +} + // TestAPIResolve tests resolving URIs which can either contain content hashes // or ENS names func TestAPIResolve(t *testing.T) { ensAddr := "swarm.eth" hashAddr := "1111111111111111111111111111111111111111111111111111111111111111" resolvedAddr := "2222222222222222222222222222222222222222222222222222222222222222" - doesResolve := newTestResolver(resolvedAddr) - doesntResolve := newTestResolver("") + doesResolve := newTestResolveValidator(resolvedAddr) + doesntResolve := newTestResolveValidator("") type test struct { desc string @@ -213,7 +221,7 @@ func TestAPIResolve(t *testing.T) { } for _, x := range tests { t.Run(x.desc, func(t *testing.T) { - api := &Api{dns: x.dns} + api := &API{dns: x.dns} uri := &URI{Addr: x.addr, Scheme: "bzz"} if x.immutable { uri.Scheme = "bzz-immutable" @@ -239,15 +247,15 @@ func TestAPIResolve(t *testing.T) { } func TestMultiResolver(t *testing.T) { - doesntResolve := newTestResolver("") + doesntResolve := newTestResolveValidator("") ethAddr := "swarm.eth" ethHash := "0x2222222222222222222222222222222222222222222222222222222222222222" - ethResolve := newTestResolver(ethHash) + ethResolve := newTestResolveValidator(ethHash) testAddr := "swarm.test" testHash := "0x1111111111111111111111111111111111111111111111111111111111111111" - testResolve := newTestResolver(testHash) + testResolve := newTestResolveValidator(testHash) tests := []struct { desc string diff --git a/swarm/api/client/client.go b/swarm/api/client/client.go index 8165d52d7e..ef6222435f 100644 --- a/swarm/api/client/client.go +++ b/swarm/api/client/client.go @@ -30,6 +30,7 @@ import ( "net/textproto" "os" "path/filepath" + "regexp" "strconv" "strings" @@ -52,12 +53,17 @@ type Client struct { Gateway string } -// UploadRaw uploads raw data to swarm and returns the resulting hash -func (c *Client) UploadRaw(r io.Reader, size int64) (string, error) { +// UploadRaw uploads raw data to swarm and returns the resulting hash. If toEncrypt is true it +// uploads encrypted data +func (c *Client) UploadRaw(r io.Reader, size int64, toEncrypt bool) (string, error) { if size <= 0 { return "", errors.New("data size must be greater than zero") } - req, err := http.NewRequest("POST", c.Gateway+"/bzz-raw:/", r) + addr := "" + if toEncrypt { + addr = "encrypt" + } + req, err := http.NewRequest("POST", c.Gateway+"/bzz-raw:/"+addr, r) if err != nil { return "", err } @@ -77,18 +83,20 @@ func (c *Client) UploadRaw(r io.Reader, size int64) (string, error) { return string(data), nil } -// DownloadRaw downloads raw data from swarm -func (c *Client) DownloadRaw(hash string) (io.ReadCloser, error) { +// DownloadRaw downloads raw data from swarm and it returns a ReadCloser and a bool whether the +// content was encrypted +func (c *Client) DownloadRaw(hash string) (io.ReadCloser, bool, error) { uri := c.Gateway + "/bzz-raw:/" + hash res, err := http.DefaultClient.Get(uri) if err != nil { - return nil, err + return nil, false, err } if res.StatusCode != http.StatusOK { res.Body.Close() - return nil, fmt.Errorf("unexpected HTTP status: %s", res.Status) + return nil, false, fmt.Errorf("unexpected HTTP status: %s", res.Status) } - return res.Body, nil + isEncrypted := (res.Header.Get("X-Decrypted") == "true") + return res.Body, isEncrypted, nil } // File represents a file in a swarm manifest and is used for uploading and @@ -125,11 +133,11 @@ func Open(path string) (*File, error) { // (if the manifest argument is non-empty) or creates a new manifest containing // the file, returning the resulting manifest hash (the file will then be // available at bzz://) -func (c *Client) Upload(file *File, manifest string) (string, error) { +func (c *Client) Upload(file *File, manifest string, toEncrypt bool) (string, error) { if file.Size <= 0 { return "", errors.New("file size must be greater than zero") } - return c.TarUpload(manifest, &FileUploader{file}) + return c.TarUpload(manifest, &FileUploader{file}, toEncrypt) } // Download downloads a file with the given path from the swarm manifest with @@ -159,14 +167,14 @@ func (c *Client) Download(hash, path string) (*File, error) { // directory will then be available at bzz://path/to/file), with // the file specified in defaultPath being uploaded to the root of the manifest // (i.e. bzz://) -func (c *Client) UploadDirectory(dir, defaultPath, manifest string) (string, error) { +func (c *Client) UploadDirectory(dir, defaultPath, manifest string, toEncrypt bool) (string, error) { stat, err := os.Stat(dir) if err != nil { return "", err } else if !stat.IsDir() { return "", fmt.Errorf("not a directory: %s", dir) } - return c.TarUpload(manifest, &DirectoryUploader{dir, defaultPath}) + return c.TarUpload(manifest, &DirectoryUploader{dir, defaultPath}, toEncrypt) } // DownloadDirectory downloads the files contained in a swarm manifest under @@ -228,27 +236,109 @@ func (c *Client) DownloadDirectory(hash, path, destDir string) error { } } +// DownloadFile downloads a single file into the destination directory +// if the manifest entry does not specify a file name - it will fallback +// to the hash of the file as a filename +func (c *Client) DownloadFile(hash, path, dest string) error { + hasDestinationFilename := false + if stat, err := os.Stat(dest); err == nil { + hasDestinationFilename = !stat.IsDir() + } else { + if os.IsNotExist(err) { + // does not exist - should be created + hasDestinationFilename = true + } else { + return fmt.Errorf("could not stat path: %v", err) + } + } + + manifestList, err := c.List(hash, path) + if err != nil { + return fmt.Errorf("could not list manifest: %v", err) + } + + switch len(manifestList.Entries) { + case 0: + return fmt.Errorf("could not find path requested at manifest address. make sure the path you've specified is correct") + case 1: + //continue + default: + return fmt.Errorf("got too many matches for this path") + } + + uri := c.Gateway + "/bzz:/" + hash + "/" + path + req, err := http.NewRequest("GET", uri, nil) + if err != nil { + return err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + defer res.Body.Close() + + if res.StatusCode != http.StatusOK { + return fmt.Errorf("unexpected HTTP status: expected 200 OK, got %d", res.StatusCode) + } + filename := "" + if hasDestinationFilename { + filename = dest + } else { + // try to assert + re := regexp.MustCompile("[^/]+$") //everything after last slash + + if results := re.FindAllString(path, -1); len(results) > 0 { + filename = results[len(results)-1] + } else { + if entry := manifestList.Entries[0]; entry.Path != "" && entry.Path != "/" { + filename = entry.Path + } else { + // assume hash as name if there's nothing from the command line + filename = hash + } + } + filename = filepath.Join(dest, filename) + } + filePath, err := filepath.Abs(filename) + if err != nil { + return err + } + + if err := os.MkdirAll(filepath.Dir(filePath), 0777); err != nil { + return err + } + + dst, err := os.Create(filename) + if err != nil { + return err + } + defer dst.Close() + + _, err = io.Copy(dst, res.Body) + return err +} + // UploadManifest uploads the given manifest to swarm -func (c *Client) UploadManifest(m *api.Manifest) (string, error) { +func (c *Client) UploadManifest(m *api.Manifest, toEncrypt bool) (string, error) { data, err := json.Marshal(m) if err != nil { return "", err } - return c.UploadRaw(bytes.NewReader(data), int64(len(data))) + return c.UploadRaw(bytes.NewReader(data), int64(len(data)), toEncrypt) } // DownloadManifest downloads a swarm manifest -func (c *Client) DownloadManifest(hash string) (*api.Manifest, error) { - res, err := c.DownloadRaw(hash) +func (c *Client) DownloadManifest(hash string) (*api.Manifest, bool, error) { + res, isEncrypted, err := c.DownloadRaw(hash) if err != nil { - return nil, err + return nil, isEncrypted, err } defer res.Close() var manifest api.Manifest if err := json.NewDecoder(res).Decode(&manifest); err != nil { - return nil, err + return nil, isEncrypted, err } - return &manifest, nil + return &manifest, isEncrypted, nil } // List list files in a swarm manifest which have the given prefix, grouping @@ -350,10 +440,19 @@ type UploadFn func(file *File) error // TarUpload uses the given Uploader to upload files to swarm as a tar stream, // returning the resulting manifest hash -func (c *Client) TarUpload(hash string, uploader Uploader) (string, error) { +func (c *Client) TarUpload(hash string, uploader Uploader, toEncrypt bool) (string, error) { reqR, reqW := io.Pipe() defer reqR.Close() - req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+hash, reqR) + addr := hash + + // If there is a hash already (a manifest), then that manifest will determine if the upload has + // to be encrypted or not. If there is no manifest then the toEncrypt parameter decides if + // there is encryption or not. + if hash == "" && toEncrypt { + // This is the built-in address for the encrypted upload endpoint + addr = "encrypt" + } + req, err := http.NewRequest("POST", c.Gateway+"/bzz:/"+addr, reqR) if err != nil { return "", err } diff --git a/swarm/api/client/client_test.go b/swarm/api/client/client_test.go index c1d144e370..a878bff174 100644 --- a/swarm/api/client/client_test.go +++ b/swarm/api/client/client_test.go @@ -26,28 +26,43 @@ import ( "testing" "github.com/ethereum/go-ethereum/swarm/api" + swarmhttp "github.com/ethereum/go-ethereum/swarm/api/http" "github.com/ethereum/go-ethereum/swarm/testutil" ) +func serverFunc(api *api.API) testutil.TestServer { + return swarmhttp.NewServer(api) +} + // TestClientUploadDownloadRaw test uploading and downloading raw data to swarm func TestClientUploadDownloadRaw(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) + testClientUploadDownloadRaw(false, t) +} +func TestClientUploadDownloadRawEncrypted(t *testing.T) { + testClientUploadDownloadRaw(true, t) +} + +func testClientUploadDownloadRaw(toEncrypt bool, t *testing.T) { + srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() client := NewClient(srv.URL) // upload some raw data data := []byte("foo123") - hash, err := client.UploadRaw(bytes.NewReader(data), int64(len(data))) + hash, err := client.UploadRaw(bytes.NewReader(data), int64(len(data)), toEncrypt) if err != nil { t.Fatal(err) } // check we can download the same data - res, err := client.DownloadRaw(hash) + res, isEncrypted, err := client.DownloadRaw(hash) if err != nil { t.Fatal(err) } + if isEncrypted != toEncrypt { + t.Fatalf("Expected encyption status %v got %v", toEncrypt, isEncrypted) + } defer res.Close() gotData, err := ioutil.ReadAll(res) if err != nil { @@ -61,7 +76,15 @@ func TestClientUploadDownloadRaw(t *testing.T) { // TestClientUploadDownloadFiles test uploading and downloading files to swarm // manifests func TestClientUploadDownloadFiles(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) + testClientUploadDownloadFiles(false, t) +} + +func TestClientUploadDownloadFilesEncrypted(t *testing.T) { + testClientUploadDownloadFiles(true, t) +} + +func testClientUploadDownloadFiles(toEncrypt bool, t *testing.T) { + srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() client := NewClient(srv.URL) @@ -74,7 +97,7 @@ func TestClientUploadDownloadFiles(t *testing.T) { Size: int64(len(data)), }, } - hash, err := client.Upload(file, manifest) + hash, err := client.Upload(file, manifest, toEncrypt) if err != nil { t.Fatal(err) } @@ -159,7 +182,7 @@ func newTestDirectory(t *testing.T) string { // TestClientUploadDownloadDirectory tests uploading and downloading a // directory of files to a swarm manifest func TestClientUploadDownloadDirectory(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) + srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() dir := newTestDirectory(t) @@ -168,7 +191,7 @@ func TestClientUploadDownloadDirectory(t *testing.T) { // upload the directory client := NewClient(srv.URL) defaultPath := filepath.Join(dir, testDirFiles[0]) - hash, err := client.UploadDirectory(dir, defaultPath, "") + hash, err := client.UploadDirectory(dir, defaultPath, "", false) if err != nil { t.Fatalf("error uploading directory: %s", err) } @@ -217,14 +240,22 @@ func TestClientUploadDownloadDirectory(t *testing.T) { // TestClientFileList tests listing files in a swarm manifest func TestClientFileList(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) + testClientFileList(false, t) +} + +func TestClientFileListEncrypted(t *testing.T) { + testClientFileList(true, t) +} + +func testClientFileList(toEncrypt bool, t *testing.T) { + srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() dir := newTestDirectory(t) defer os.RemoveAll(dir) client := NewClient(srv.URL) - hash, err := client.UploadDirectory(dir, "", "") + hash, err := client.UploadDirectory(dir, "", "", toEncrypt) if err != nil { t.Fatalf("error uploading directory: %s", err) } @@ -275,7 +306,7 @@ func TestClientFileList(t *testing.T) { // TestClientMultipartUpload tests uploading files to swarm using a multipart // upload func TestClientMultipartUpload(t *testing.T) { - srv := testutil.NewTestSwarmServer(t) + srv := testutil.NewTestSwarmServer(t, serverFunc) defer srv.Close() // define an uploader which uploads testDirFiles with some data diff --git a/swarm/api/config.go b/swarm/api/config.go index 6b224140a4..939285e09c 100644 --- a/swarm/api/config.go +++ b/swarm/api/config.go @@ -21,13 +21,16 @@ import ( "fmt" "os" "path/filepath" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/contracts/ens" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/p2p/discover" + "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/network" + "github.com/ethereum/go-ethereum/swarm/pss" "github.com/ethereum/go-ethereum/swarm/services/swap" "github.com/ethereum/go-ethereum/swarm/storage" ) @@ -41,47 +44,55 @@ const ( // allow several bzz nodes running in parallel type Config struct { // serialised/persisted fields - *storage.StoreParams - *storage.ChunkerParams + *storage.FileStoreParams + *storage.LocalStoreParams *network.HiveParams - Swap *swap.SwapParams - *network.SyncParams - Contract common.Address - EnsRoot common.Address - EnsAPIs []string - Path string - ListenAddr string - Port string - PublicKey string - BzzKey string - NetworkId uint64 - SwapEnabled bool - SyncEnabled bool - SwapApi string - Cors string - BzzAccount string - BootNodes string + Swap *swap.LocalProfile + Pss *pss.PssParams + //*network.SyncParams + Contract common.Address + EnsRoot common.Address + EnsAPIs []string + Path string + ListenAddr string + Port string + PublicKey string + BzzKey string + NodeID string + NetworkID uint64 + SwapEnabled bool + SyncEnabled bool + DeliverySkipCheck bool + SyncUpdateDelay time.Duration + SwapAPI string + Cors string + BzzAccount string + BootNodes string + privateKey *ecdsa.PrivateKey } //create a default config with all parameters to set to defaults -func NewDefaultConfig() (self *Config) { +func NewConfig() (c *Config) { - self = &Config{ - StoreParams: storage.NewDefaultStoreParams(), - ChunkerParams: storage.NewChunkerParams(), - HiveParams: network.NewDefaultHiveParams(), - SyncParams: network.NewDefaultSyncParams(), - Swap: swap.NewDefaultSwapParams(), - ListenAddr: DefaultHTTPListenAddr, - Port: DefaultHTTPPort, - Path: node.DefaultDataDir(), - EnsAPIs: nil, - EnsRoot: ens.TestNetAddress, - NetworkId: network.NetworkId, - SwapEnabled: false, - SyncEnabled: true, - SwapApi: "", - BootNodes: "", + c = &Config{ + LocalStoreParams: storage.NewDefaultLocalStoreParams(), + FileStoreParams: storage.NewFileStoreParams(), + HiveParams: network.NewHiveParams(), + //SyncParams: network.NewDefaultSyncParams(), + Swap: swap.NewDefaultSwapParams(), + Pss: pss.NewPssParams(), + ListenAddr: DefaultHTTPListenAddr, + Port: DefaultHTTPPort, + Path: node.DefaultDataDir(), + EnsAPIs: nil, + EnsRoot: ens.TestNetAddress, + NetworkID: network.DefaultNetworkID, + SwapEnabled: false, + SyncEnabled: true, + DeliverySkipCheck: false, + SyncUpdateDelay: 15 * time.Second, + SwapAPI: "", + BootNodes: "", } return @@ -89,11 +100,11 @@ func NewDefaultConfig() (self *Config) { //some config params need to be initialized after the complete //config building phase is completed (e.g. due to overriding flags) -func (self *Config) Init(prvKey *ecdsa.PrivateKey) { +func (c *Config) Init(prvKey *ecdsa.PrivateKey) { address := crypto.PubkeyToAddress(prvKey.PublicKey) - self.Path = filepath.Join(self.Path, "bzz-"+common.Bytes2Hex(address.Bytes())) - err := os.MkdirAll(self.Path, os.ModePerm) + c.Path = filepath.Join(c.Path, "bzz-"+common.Bytes2Hex(address.Bytes())) + err := os.MkdirAll(c.Path, os.ModePerm) if err != nil { log.Error(fmt.Sprintf("Error creating root swarm data directory: %v", err)) return @@ -103,11 +114,25 @@ func (self *Config) Init(prvKey *ecdsa.PrivateKey) { pubkeyhex := common.ToHex(pubkey) keyhex := crypto.Keccak256Hash(pubkey).Hex() - self.PublicKey = pubkeyhex - self.BzzKey = keyhex + c.PublicKey = pubkeyhex + c.BzzKey = keyhex + c.NodeID = discover.PubkeyID(&prvKey.PublicKey).String() - self.Swap.Init(self.Contract, prvKey) - self.SyncParams.Init(self.Path) - self.HiveParams.Init(self.Path) - self.StoreParams.Init(self.Path) + if c.SwapEnabled { + c.Swap.Init(c.Contract, prvKey) + } + + c.privateKey = prvKey + c.LocalStoreParams.Init(c.Path) + c.LocalStoreParams.BaseKey = common.FromHex(keyhex) + + c.Pss = c.Pss.WithPrivateKey(c.privateKey) +} + +func (c *Config) ShiftPrivateKey() (privKey *ecdsa.PrivateKey) { + if c.privateKey != nil { + privKey = c.privateKey + c.privateKey = nil + } + return privKey } diff --git a/swarm/api/config_test.go b/swarm/api/config_test.go index 5636b6dafb..bd7e1d8705 100644 --- a/swarm/api/config_test.go +++ b/swarm/api/config_test.go @@ -33,9 +33,10 @@ func TestConfig(t *testing.T) { t.Fatalf("failed to load private key: %v", err) } - one := NewDefaultConfig() - two := NewDefaultConfig() + one := NewConfig() + two := NewConfig() + one.LocalStoreParams = two.LocalStoreParams if equal := reflect.DeepEqual(one, two); !equal { t.Fatal("Two default configs are not equal") } @@ -49,21 +50,10 @@ func TestConfig(t *testing.T) { if one.PublicKey == "" { t.Fatal("Expected PublicKey to be set") } - - //the Init function should append subdirs to the given path - if one.Swap.PayProfile.Beneficiary == (common.Address{}) { + if one.Swap.PayProfile.Beneficiary == (common.Address{}) && one.SwapEnabled { t.Fatal("Failed to correctly initialize SwapParams") } - - if one.SyncParams.RequestDbPath == one.Path { - t.Fatal("Failed to correctly initialize SyncParams") - } - - if one.HiveParams.KadDbPath == one.Path { - t.Fatal("Failed to correctly initialize HiveParams") - } - - if one.StoreParams.ChunkDbPath == one.Path { + if one.ChunkDbPath == one.Path { t.Fatal("Failed to correctly initialize StoreParams") } } diff --git a/swarm/api/filesystem.go b/swarm/api/filesystem.go index f5dc90e2e5..297cbec79f 100644 --- a/swarm/api/filesystem.go +++ b/swarm/api/filesystem.go @@ -27,26 +27,27 @@ import ( "sync" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/swarm/log" "github.com/ethereum/go-ethereum/swarm/storage" ) const maxParallelFiles = 5 type FileSystem struct { - api *Api + api *API } -func NewFileSystem(api *Api) *FileSystem { +func NewFileSystem(api *API) *FileSystem { return &FileSystem{api} } // Upload replicates a local directory as a manifest file and uploads it -// using dpa store +// using FileStore store +// This function waits the chunks to be stored. // TODO: localpath should point to a manifest // // DEPRECATED: Use the HTTP API instead -func (self *FileSystem) Upload(lpath, index string) (string, error) { +func (fs *FileSystem) Upload(lpath, index string, toEncrypt bool) (string, error) { var list []*manifestTrieEntry localpath, err := filepath.Abs(filepath.Clean(lpath)) if err != nil { @@ -111,13 +112,13 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) { f, err := os.Open(entry.Path) if err == nil { stat, _ := f.Stat() - var hash storage.Key - wg := &sync.WaitGroup{} - hash, err = self.api.dpa.Store(f, stat.Size(), wg, nil) + var hash storage.Address + var wait func() + hash, wait, err = fs.api.fileStore.Store(f, stat.Size(), toEncrypt) if hash != nil { - list[i].Hash = hash.String() + list[i].Hash = hash.Hex() } - wg.Wait() + wait() awg.Done() if err == nil { first512 := make([]byte, 512) @@ -142,7 +143,7 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) { } trie := &manifestTrie{ - dpa: self.api.dpa, + fileStore: fs.api.fileStore, } quitC := make(chan bool) for i, entry := range list { @@ -163,7 +164,7 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) { err2 := trie.recalcAndStore() var hs string if err2 == nil { - hs = trie.hash.String() + hs = trie.ref.Hex() } awg.Wait() return hs, err2 @@ -173,7 +174,7 @@ func (self *FileSystem) Upload(lpath, index string) (string, error) { // under localpath // // DEPRECATED: Use the HTTP API instead -func (self *FileSystem) Download(bzzpath, localpath string) error { +func (fs *FileSystem) Download(bzzpath, localpath string) error { lpath, err := filepath.Abs(filepath.Clean(localpath)) if err != nil { return err @@ -188,7 +189,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error { if err != nil { return err } - key, err := self.api.Resolve(uri) + addr, err := fs.api.Resolve(uri) if err != nil { return err } @@ -199,14 +200,14 @@ func (self *FileSystem) Download(bzzpath, localpath string) error { } quitC := make(chan bool) - trie, err := loadManifest(self.api.dpa, key, quitC) + trie, err := loadManifest(fs.api.fileStore, addr, quitC) if err != nil { log.Warn(fmt.Sprintf("fs.Download: loadManifestTrie error: %v", err)) return err } type downloadListEntry struct { - key storage.Key + addr storage.Address path string } @@ -217,7 +218,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error { err = trie.listWithPrefix(path, quitC, func(entry *manifestTrieEntry, suffix string) { log.Trace(fmt.Sprintf("fs.Download: %#v", entry)) - key = common.Hex2Bytes(entry.Hash) + addr = common.Hex2Bytes(entry.Hash) path := lpath + "/" + suffix dir := filepath.Dir(path) if dir != prevPath { @@ -225,7 +226,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error { prevPath = dir } if (mde == nil) && (path != dir+"/") { - list = append(list, &downloadListEntry{key: key, path: path}) + list = append(list, &downloadListEntry{addr: addr, path: path}) } }) if err != nil { @@ -244,7 +245,7 @@ func (self *FileSystem) Download(bzzpath, localpath string) error { } go func(i int, entry *downloadListEntry) { defer wg.Done() - err := retrieveToFile(quitC, self.api.dpa, entry.key, entry.path) + err := retrieveToFile(quitC, fs.api.fileStore, entry.addr, entry.path) if err != nil { select { case errC <- err: @@ -267,12 +268,12 @@ func (self *FileSystem) Download(bzzpath, localpath string) error { } } -func retrieveToFile(quitC chan bool, dpa *storage.DPA, key storage.Key, path string) error { +func retrieveToFile(quitC chan bool, fileStore *storage.FileStore, addr storage.Address, path string) error { f, err := os.Create(path) // TODO: basePath separators if err != nil { return err } - reader := dpa.Retrieve(key) + reader, _ := fileStore.Retrieve(addr) writer := bufio.NewWriter(f) size, err := reader.Size(quitC) if err != nil { diff --git a/swarm/api/filesystem_test.go b/swarm/api/filesystem_test.go index 8a15e735dc..915dc4e0b9 100644 --- a/swarm/api/filesystem_test.go +++ b/swarm/api/filesystem_test.go @@ -21,7 +21,6 @@ import ( "io/ioutil" "os" "path/filepath" - "sync" "testing" "github.com/ethereum/go-ethereum/common" @@ -30,9 +29,9 @@ import ( var testDownloadDir, _ = ioutil.TempDir(os.TempDir(), "bzz-test") -func testFileSystem(t *testing.T, f func(*FileSystem)) { - testApi(t, func(api *Api) { - f(NewFileSystem(api)) +func testFileSystem(t *testing.T, f func(*FileSystem, bool)) { + testAPI(t, func(api *API, toEncrypt bool) { + f(NewFileSystem(api), toEncrypt) }) } @@ -47,9 +46,9 @@ func readPath(t *testing.T, parts ...string) string { } func TestApiDirUpload0(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { + testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "") + bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "", toEncrypt) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -63,8 +62,8 @@ func TestApiDirUpload0(t *testing.T) { exp = expResponse(content, "text/css", 0) checkResponse(t, resp, exp) - key := storage.Key(common.Hex2Bytes(bzzhash)) - _, _, _, err = api.Get(key, "") + addr := storage.Address(common.Hex2Bytes(bzzhash)) + _, _, _, _, err = api.Get(addr, "") if err == nil { t.Fatalf("expected error: %v", err) } @@ -75,27 +74,28 @@ func TestApiDirUpload0(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - newbzzhash, err := fs.Upload(downloadDir, "") + newbzzhash, err := fs.Upload(downloadDir, "", toEncrypt) if err != nil { t.Fatalf("unexpected error: %v", err) } - if bzzhash != newbzzhash { + // TODO: currently the hash is not deterministic in the encrypted case + if !toEncrypt && bzzhash != newbzzhash { t.Fatalf("download %v reuploaded has incorrect hash, expected %v, got %v", downloadDir, bzzhash, newbzzhash) } }) } func TestApiDirUploadModify(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { + testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "") + bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "", toEncrypt) if err != nil { t.Errorf("unexpected error: %v", err) return } - key := storage.Key(common.Hex2Bytes(bzzhash)) - key, err = api.Modify(key, "index.html", "", "") + addr := storage.Address(common.Hex2Bytes(bzzhash)) + addr, err = api.Modify(addr, "index.html", "", "") if err != nil { t.Errorf("unexpected error: %v", err) return @@ -105,24 +105,23 @@ func TestApiDirUploadModify(t *testing.T) { t.Errorf("unexpected error: %v", err) return } - wg := &sync.WaitGroup{} - hash, err := api.Store(bytes.NewReader(index), int64(len(index)), wg) - wg.Wait() + hash, wait, err := api.Store(bytes.NewReader(index), int64(len(index)), toEncrypt) + wait() if err != nil { t.Errorf("unexpected error: %v", err) return } - key, err = api.Modify(key, "index2.html", hash.Hex(), "text/html; charset=utf-8") + addr, err = api.Modify(addr, "index2.html", hash.Hex(), "text/html; charset=utf-8") if err != nil { t.Errorf("unexpected error: %v", err) return } - key, err = api.Modify(key, "img/logo.png", hash.Hex(), "text/html; charset=utf-8") + addr, err = api.Modify(addr, "img/logo.png", hash.Hex(), "text/html; charset=utf-8") if err != nil { t.Errorf("unexpected error: %v", err) return } - bzzhash = key.String() + bzzhash = addr.Hex() content := readPath(t, "testdata", "test0", "index.html") resp := testGet(t, api, bzzhash, "index2.html") @@ -138,7 +137,7 @@ func TestApiDirUploadModify(t *testing.T) { exp = expResponse(content, "text/css", 0) checkResponse(t, resp, exp) - _, _, _, err = api.Get(key, "") + _, _, _, _, err = api.Get(addr, "") if err == nil { t.Errorf("expected error: %v", err) } @@ -146,9 +145,9 @@ func TestApiDirUploadModify(t *testing.T) { } func TestApiDirUploadWithRootFile(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { + testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "index.html") + bzzhash, err := fs.Upload(filepath.Join("testdata", "test0"), "index.html", toEncrypt) if err != nil { t.Errorf("unexpected error: %v", err) return @@ -162,9 +161,9 @@ func TestApiDirUploadWithRootFile(t *testing.T) { } func TestApiFileUpload(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { + testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "") + bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "", toEncrypt) if err != nil { t.Errorf("unexpected error: %v", err) return @@ -178,9 +177,9 @@ func TestApiFileUpload(t *testing.T) { } func TestApiFileUploadWithRootFile(t *testing.T) { - testFileSystem(t, func(fs *FileSystem) { + testFileSystem(t, func(fs *FileSystem, toEncrypt bool) { api := fs.api - bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "index.html") + bzzhash, err := fs.Upload(filepath.Join("testdata", "test0", "index.html"), "index.html", toEncrypt) if err != nil { t.Errorf("unexpected error: %v", err) return diff --git a/swarm/api/http/error.go b/swarm/api/http/error.go index 9a65412cf9..5fff7575e8 100644 --- a/swarm/api/http/error.go +++ b/swarm/api/http/error.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/swarm/api" + l "github.com/ethereum/go-ethereum/swarm/log" ) //templateMap holds a mapping of an HTTP error code to a template @@ -44,7 +45,7 @@ var ( ) //parameters needed for formatting the correct HTML page -type ErrorParams struct { +type ResponseParams struct { Msg string Code int Timestamp string @@ -113,45 +114,49 @@ func ValidateCaseErrors(r *Request) string { //For example, if the user requests bzz://read and that manifest contains entries //"readme.md" and "readinglist.txt", a HTML page is returned with this two links. //This only applies if the manifest has no default entry -func ShowMultipleChoices(w http.ResponseWriter, r *Request, list api.ManifestList) { +func ShowMultipleChoices(w http.ResponseWriter, req *Request, list api.ManifestList) { msg := "" if list.Entries == nil { - ShowError(w, r, "Could not resolve", http.StatusInternalServerError) + Respond(w, req, "Could not resolve", http.StatusInternalServerError) return } //make links relative //requestURI comes with the prefix of the ambiguous path, e.g. "read" for "readme.md" and "readinglist.txt" //to get clickable links, need to remove the ambiguous path, i.e. "read" - idx := strings.LastIndex(r.RequestURI, "/") + idx := strings.LastIndex(req.RequestURI, "/") if idx == -1 { - ShowError(w, r, "Internal Server Error", http.StatusInternalServerError) + Respond(w, req, "Internal Server Error", http.StatusInternalServerError) return } //remove ambiguous part - base := r.RequestURI[:idx+1] + base := req.RequestURI[:idx+1] for _, e := range list.Entries { //create clickable link for each entry msg += "" + e.Path + "
" } - respond(w, &r.Request, &ErrorParams{ - Code: http.StatusMultipleChoices, - Details: template.HTML(msg), - Timestamp: time.Now().Format(time.RFC1123), - template: getTemplate(http.StatusMultipleChoices), - }) + Respond(w, req, msg, http.StatusMultipleChoices) } -//ShowError is used to show an HTML error page to a client. +//Respond is used to show an HTML page to a client. //If there is an `Accept` header of `application/json`, JSON will be returned instead //The function just takes a string message which will be displayed in the error page. //The code is used to evaluate which template will be displayed //(and return the correct HTTP status code) -func ShowError(w http.ResponseWriter, r *Request, msg string, code int) { - additionalMessage := ValidateCaseErrors(r) - if code == http.StatusInternalServerError { - log.Error(msg) +func Respond(w http.ResponseWriter, req *Request, msg string, code int) { + additionalMessage := ValidateCaseErrors(req) + switch code { + case http.StatusInternalServerError: + log.Output(msg, log.LvlError, l.CallDepth, "ruid", req.ruid, "code", code) + default: + log.Output(msg, log.LvlDebug, l.CallDepth, "ruid", req.ruid, "code", code) } - respond(w, &r.Request, &ErrorParams{ + + if code >= 400 { + w.Header().Del("Cache-Control") //avoid sending cache headers for errors! + w.Header().Del("ETag") + } + + respond(w, &req.Request, &ResponseParams{ Code: code, Msg: msg, Details: template.HTML(additionalMessage), @@ -161,17 +166,17 @@ func ShowError(w http.ResponseWriter, r *Request, msg string, code int) { } //evaluate if client accepts html or json response -func respond(w http.ResponseWriter, r *http.Request, params *ErrorParams) { +func respond(w http.ResponseWriter, r *http.Request, params *ResponseParams) { w.WriteHeader(params.Code) if r.Header.Get("Accept") == "application/json" { - respondJson(w, params) + respondJSON(w, params) } else { - respondHtml(w, params) + respondHTML(w, params) } } //return a HTML page -func respondHtml(w http.ResponseWriter, params *ErrorParams) { +func respondHTML(w http.ResponseWriter, params *ResponseParams) { htmlCounter.Inc(1) err := params.template.Execute(w, params) if err != nil { @@ -180,7 +185,7 @@ func respondHtml(w http.ResponseWriter, params *ErrorParams) { } //return JSON -func respondJson(w http.ResponseWriter, params *ErrorParams) { +func respondJSON(w http.ResponseWriter, params *ResponseParams) { jsonCounter.Inc(1) w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(params) @@ -190,7 +195,6 @@ func respondJson(w http.ResponseWriter, params *ErrorParams) { func getTemplate(code int) *template.Template { if val, tmpl := templateMap[code]; tmpl { return val - } else { - return templateMap[0] } + return templateMap[0] } diff --git a/swarm/api/http/error_templates.go b/swarm/api/http/error_templates.go index cc9b996ba4..f3c643c90d 100644 --- a/swarm/api/http/error_templates.go +++ b/swarm/api/http/error_templates.go @@ -36,7 +36,6 @@ func GetGenericErrorPage() string { -