Merge branch 'master' of https://github.com/ethereum/go-ethereum into geth-sharding

Former-commit-id: 25dc367490dd16ef4fa1d462118aa438df1b319a [formerly 6fab78aeb8b9f54fddbad1406f97392b753a830a]
Former-commit-id: 0de0bb3ef9af1735ac8ecd1aefe2d57c0f76c62d
This commit is contained in:
Fynn
2018-03-23 19:10:53 +01:00
304 changed files with 22902 additions and 12621 deletions

3
.github/CODEOWNERS vendored
View File

@@ -5,5 +5,8 @@ accounts/usbwallet @karalabe
consensus @karalabe
core/ @karalabe @holiman
eth/ @karalabe
les/ @zsfelfoldi
light/ @zsfelfoldi
mobile/ @karalabe
p2p/ @fjl @zsfelfoldi
whisper/ @gballet @gluk256

11
.github/no-response.yml vendored Normal file
View File

@@ -0,0 +1,11 @@
# Number of days of inactivity before an Issue is closed for lack of response
daysUntilClose: 30
# Label requiring a response
responseRequiredLabel: more-information-needed
# Comment to post when closing an Issue for lack of response. Set to `false` to disable
closeComment: >
This issue has been automatically closed because there has been no response
to our request for more information from the original author. With only the
information that is currently in the issue, we don't have enough information
to take action. Please reach out if you have or find the answers we need so
that we can investigate further.

17
.github/stale.yml vendored Normal file
View File

@@ -0,0 +1,17 @@
# Number of days of inactivity before an issue becomes stale
daysUntilStale: 366
# Number of days of inactivity before a stale issue is closed
daysUntilClose: 42
# Issues with these labels will never be considered stale
exemptLabels:
- pinned
- security
# Label to use when marking an issue as stale
staleLabel: stale
# Comment to post when marking an issue as stale. Set to `false` to disable
markComment: >
This issue has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions.
# Comment to post when closing a stale issue. Set to `false` to disable
closeComment: false

6
.gitignore vendored
View File

@@ -34,8 +34,14 @@ profile.cov
# IdeaIDE
.idea
# VS Code
.vscode
# dashboard
/dashboard/assets/flow-typed
/dashboard/assets/node_modules
/dashboard/assets/stats.json
/dashboard/assets/bundle.js
/dashboard/assets/package-lock.json
**/yarn-error.log

View File

@@ -6,18 +6,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
go: 1.7.x
script:
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
- sudo chown root:$USER /etc/fuse.conf
- go run build/ci.go install
- go run build/ci.go test -coverage
- os: linux
dist: trusty
sudo: required
go: 1.8.x
go: 1.9.x
script:
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
@@ -29,7 +18,7 @@ matrix:
- os: linux
dist: trusty
sudo: required
go: 1.9.x
go: "1.10"
script:
- sudo modprobe fuse
- sudo chmod 666 /dev/fuse
@@ -38,7 +27,7 @@ matrix:
- go run build/ci.go test -coverage
- os: osx
go: 1.9.x
go: "1.10"
script:
- unset -f cd # workaround for https://github.com/travis-ci/travis-ci/issues/8703
- brew update
@@ -50,7 +39,7 @@ matrix:
# This builder only tests code linters on latest version of Go
- os: linux
dist: trusty
go: 1.9.x
go: "1.10"
env:
- lint
git:

View File

@@ -1,5 +1,5 @@
# Build Geth in a stock Go builder container
FROM golang:1.9-alpine as builder
FROM golang:1.10-alpine as builder
RUN apk add --no-cache make gcc musl-dev linux-headers

View File

@@ -1,5 +1,5 @@
# Build Geth in a stock Go builder container
FROM golang:1.9-alpine as builder
FROM golang:1.10-alpine as builder
RUN apk add --no-cache make gcc musl-dev linux-headers

View File

@@ -1 +1 @@
1.8.2
1.8.3

View File

@@ -136,11 +136,11 @@ func (abi *ABI) UnmarshalJSON(data []byte) error {
// MethodById looks up a method by the 4-byte id
// returns nil if none found
func (abi *ABI) MethodById(sigdata []byte) *Method {
func (abi *ABI) MethodById(sigdata []byte) (*Method, error) {
for _, method := range abi.Methods {
if bytes.Equal(method.Id(), sigdata[:4]) {
return &method
return &method, nil
}
}
return nil
return nil, fmt.Errorf("no method with id: %#x", sigdata[:4])
}

View File

@@ -702,7 +702,11 @@ func TestABI_MethodById(t *testing.T) {
}
for name, m := range abi.Methods {
a := fmt.Sprintf("%v", m)
b := fmt.Sprintf("%v", abi.MethodById(m.Id()))
m2, err := abi.MethodById(m.Id())
if err != nil {
t.Fatalf("Failed to look up ABI method: %v", err)
}
b := fmt.Sprintf("%v", m2)
if a != b {
t.Errorf("Method %v (id %v) not 'findable' by id in ABI", name, common.ToHex(m.Id()))
}

View File

@@ -67,6 +67,17 @@ func (arguments Arguments) LengthNonIndexed() int {
return out
}
// NonIndexed returns the arguments with indexed arguments filtered out
func (arguments Arguments) NonIndexed() Arguments {
var ret []Argument
for _, arg := range arguments {
if !arg.Indexed {
ret = append(ret, arg)
}
}
return ret
}
// isTuple returns true for non-atomic constructs, like (uint,uint) or uint[]
func (arguments Arguments) isTuple() bool {
return len(arguments) > 1
@@ -74,21 +85,25 @@ func (arguments Arguments) isTuple() bool {
// Unpack performs the operation hexdata -> Go format
func (arguments Arguments) Unpack(v interface{}, data []byte) error {
if arguments.isTuple() {
return arguments.unpackTuple(v, data)
}
return arguments.unpackAtomic(v, data)
}
func (arguments Arguments) unpackTuple(v interface{}, output []byte) error {
// make sure the passed value is arguments pointer
valueOf := reflect.ValueOf(v)
if reflect.Ptr != valueOf.Kind() {
if reflect.Ptr != reflect.ValueOf(v).Kind() {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
}
marshalledValues, err := arguments.UnpackValues(data)
if err != nil {
return err
}
if arguments.isTuple() {
return arguments.unpackTuple(v, marshalledValues)
}
return arguments.unpackAtomic(v, marshalledValues)
}
func (arguments Arguments) unpackTuple(v interface{}, marshalledValues []interface{}) error {
var (
value = valueOf.Elem()
value = reflect.ValueOf(v).Elem()
typ = value.Type()
kind = value.Kind()
)
@@ -98,53 +113,19 @@ func (arguments Arguments) unpackTuple(v interface{}, output []byte) error {
}
// If the output interface is a struct, make sure names don't collide
if kind == reflect.Struct {
exists := make(map[string]bool)
for _, arg := range arguments {
field := capitalise(arg.Name)
if field == "" {
return fmt.Errorf("abi: purely underscored output cannot unpack to struct")
}
if exists[field] {
return fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", field)
}
exists[field] = true
}
}
// `i` counts the nonindexed arguments.
// `j` counts the number of complex types.
// both `i` and `j` are used to to correctly compute `data` offset.
i, j := -1, 0
for _, arg := range arguments {
if arg.Indexed {
// can't read, continue
continue
}
i++
marshalledValue, err := toGoType((i+j)*32, arg.Type, output)
if err != nil {
if err := requireUniqueStructFieldNames(arguments); err != nil {
return err
}
}
for i, arg := range arguments.NonIndexed() {
if arg.Type.T == ArrayTy {
// combined index ('i' + 'j') need to be adjusted only by size of array, thus
// we need to decrement 'j' because 'i' was incremented
j += arg.Type.Size - 1
}
reflectValue := reflect.ValueOf(marshalledValue)
reflectValue := reflect.ValueOf(marshalledValues[i])
switch kind {
case reflect.Struct:
name := capitalise(arg.Name)
for j := 0; j < typ.NumField(); j++ {
// TODO read tags: `abi:"fieldName"`
if typ.Field(j).Name == name {
if err := set(value.Field(j), reflectValue, arg); err != nil {
return err
}
}
err := unpackStruct(value, reflectValue, arg)
if err != nil {
return err
}
case reflect.Slice, reflect.Array:
if value.Len() < i {
@@ -166,34 +147,84 @@ func (arguments Arguments) unpackTuple(v interface{}, output []byte) error {
}
// unpackAtomic unpacks ( hexdata -> go ) a single value
func (arguments Arguments) unpackAtomic(v interface{}, output []byte) error {
// make sure the passed value is arguments pointer
valueOf := reflect.ValueOf(v)
if reflect.Ptr != valueOf.Kind() {
return fmt.Errorf("abi: Unpack(non-pointer %T)", v)
func (arguments Arguments) unpackAtomic(v interface{}, marshalledValues []interface{}) error {
if len(marshalledValues) != 1 {
return fmt.Errorf("abi: wrong length, expected single value, got %d", len(marshalledValues))
}
arg := arguments[0]
if arg.Indexed {
return fmt.Errorf("abi: attempting to unpack indexed variable into element.")
elem := reflect.ValueOf(v).Elem()
kind := elem.Kind()
reflectValue := reflect.ValueOf(marshalledValues[0])
if kind == reflect.Struct {
//make sure names don't collide
if err := requireUniqueStructFieldNames(arguments); err != nil {
return err
}
return unpackStruct(elem, reflectValue, arguments[0])
}
value := valueOf.Elem()
return set(elem, reflectValue, arguments.NonIndexed()[0])
marshalledValue, err := toGoType(0, arg.Type, output)
if err != nil {
return err
}
return set(value, reflect.ValueOf(marshalledValue), arg)
}
// Unpack performs the operation Go format -> Hexdata
// Computes the full size of an array;
// i.e. counting nested arrays, which count towards size for unpacking.
func getArraySize(arr *Type) int {
size := arr.Size
// Arrays can be nested, with each element being the same size
arr = arr.Elem
for arr.T == ArrayTy {
// Keep multiplying by elem.Size while the elem is an array.
size *= arr.Size
arr = arr.Elem
}
// Now we have the full array size, including its children.
return size
}
// UnpackValues can be used to unpack ABI-encoded hexdata according to the ABI-specification,
// without supplying a struct to unpack into. Instead, this method returns a list containing the
// values. An atomic argument will be a list with one element.
func (arguments Arguments) UnpackValues(data []byte) ([]interface{}, error) {
retval := make([]interface{}, 0, arguments.LengthNonIndexed())
virtualArgs := 0
for index, arg := range arguments.NonIndexed() {
marshalledValue, err := toGoType((index+virtualArgs)*32, arg.Type, data)
if arg.Type.T == ArrayTy {
// If we have a static array, like [3]uint256, these are coded as
// just like uint256,uint256,uint256.
// This means that we need to add two 'virtual' arguments when
// we count the index from now on.
//
// Array values nested multiple levels deep are also encoded inline:
// [2][3]uint256: uint256,uint256,uint256,uint256,uint256,uint256
//
// Calculate the full array size to get the correct offset for the next argument.
// Decrement it by 1, as the normal index increment is still applied.
virtualArgs += getArraySize(&arg.Type) - 1
}
if err != nil {
return nil, err
}
retval = append(retval, marshalledValue)
}
return retval, nil
}
// PackValues performs the operation Go format -> Hexdata
// It is the semantic opposite of UnpackValues
func (arguments Arguments) PackValues(args []interface{}) ([]byte, error) {
return arguments.Pack(args...)
}
// Pack performs the operation Go format -> Hexdata
func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
// Make sure arguments match up and pack them
abiArgs := arguments
if len(args) != len(abiArgs) {
return nil, fmt.Errorf("argument count mismatch: %d for %d", len(args), len(abiArgs))
}
// variable input is the output appended at the end of packed
// output. This is used for strings and bytes types input.
var variableInput []byte
@@ -207,7 +238,6 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
inputOffset += 32
}
}
var ret []byte
for i, a := range args {
input := abiArgs[i]
@@ -216,7 +246,6 @@ func (arguments Arguments) Pack(args ...interface{}) ([]byte, error) {
if err != nil {
return nil, err
}
// check for a slice type (string, bytes, slice)
if input.Type.requiresLengthPrefix() {
// calculate the offset
@@ -248,3 +277,18 @@ func capitalise(input string) string {
}
return strings.ToUpper(input[:1]) + input[1:]
}
//unpackStruct extracts each argument into its corresponding struct field
func unpackStruct(value, reflectValue reflect.Value, arg Argument) error {
name := capitalise(arg.Name)
typ := value.Type()
for j := 0; j < typ.NumField(); j++ {
// TODO read tags: `abi:"fieldName"`
if typ.Field(j).Name == name {
if err := set(value.Field(j), reflectValue, arg); err != nil {
return err
}
}
}
return nil
}

View File

@@ -307,9 +307,9 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa
blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) {
for _, tx := range b.pendingBlock.Transactions() {
block.AddTx(tx)
block.AddTxWithChain(b.blockchain, tx)
}
block.AddTx(tx)
block.AddTxWithChain(b.blockchain, tx)
})
statedb, _ := b.blockchain.State()
@@ -428,10 +428,23 @@ func (fb *filterBackend) HeaderByNumber(ctx context.Context, block rpc.BlockNumb
}
return fb.bc.GetHeaderByNumber(uint64(block.Int64())), nil
}
func (fb *filterBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) {
return core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash)), nil
}
func (fb *filterBackend) GetLogs(ctx context.Context, hash common.Hash) ([][]*types.Log, error) {
receipts := core.GetBlockReceipts(fb.db, hash, core.GetBlockNumber(fb.db, hash))
if receipts == nil {
return nil, nil
}
logs := make([][]*types.Log, len(receipts))
for i, receipt := range receipts {
logs[i] = receipt.Logs
}
return logs, nil
}
func (fb *filterBackend) SubscribeTxPreEvent(ch chan<- core.TxPreEvent) event.Subscription {
return event.NewSubscription(func(quit <-chan struct{}) error {
<-quit

View File

@@ -164,118 +164,147 @@ var bindType = map[Lang]func(kind abi.Type) string{
LangJava: bindTypeJava,
}
// Helper function for the binding generators.
// It reads the unmatched characters after the inner type-match,
// (since the inner type is a prefix of the total type declaration),
// looks for valid arrays (possibly a dynamic one) wrapping the inner type,
// and returns the sizes of these arrays.
//
// Returned array sizes are in the same order as solidity signatures; inner array size first.
// Array sizes may also be "", indicating a dynamic array.
func wrapArray(stringKind string, innerLen int, innerMapping string) (string, []string) {
remainder := stringKind[innerLen:]
//find all the sizes
matches := regexp.MustCompile(`\[(\d*)\]`).FindAllStringSubmatch(remainder, -1)
parts := make([]string, 0, len(matches))
for _, match := range matches {
//get group 1 from the regex match
parts = append(parts, match[1])
}
return innerMapping, parts
}
// Translates the array sizes to a Go-lang declaration of a (nested) array of the inner type.
// Simply returns the inner type if arraySizes is empty.
func arrayBindingGo(inner string, arraySizes []string) string {
out := ""
//prepend all array sizes, from outer (end arraySizes) to inner (start arraySizes)
for i := len(arraySizes) - 1; i >= 0; i-- {
out += "[" + arraySizes[i] + "]"
}
out += inner
return out
}
// bindTypeGo converts a Solidity type to a Go one. Since there is no clear mapping
// from all Solidity types to Go ones (e.g. uint17), those that cannot be exactly
// mapped will use an upscaled type (e.g. *big.Int).
func bindTypeGo(kind abi.Type) string {
stringKind := kind.String()
innerLen, innerMapping := bindUnnestedTypeGo(stringKind)
return arrayBindingGo(wrapArray(stringKind, innerLen, innerMapping))
}
// The inner function of bindTypeGo, this finds the inner type of stringKind.
// (Or just the type itself if it is not an array or slice)
// The length of the matched part is returned, with the the translated type.
func bindUnnestedTypeGo(stringKind string) (int, string) {
switch {
case strings.HasPrefix(stringKind, "address"):
parts := regexp.MustCompile(`address(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 2 {
return stringKind
}
return fmt.Sprintf("%scommon.Address", parts[1])
return len("address"), "common.Address"
case strings.HasPrefix(stringKind, "bytes"):
parts := regexp.MustCompile(`bytes([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 3 {
return stringKind
}
return fmt.Sprintf("%s[%s]byte", parts[2], parts[1])
parts := regexp.MustCompile(`bytes([0-9]*)`).FindStringSubmatch(stringKind)
return len(parts[0]), fmt.Sprintf("[%s]byte", parts[1])
case strings.HasPrefix(stringKind, "int") || strings.HasPrefix(stringKind, "uint"):
parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 4 {
return stringKind
}
parts := regexp.MustCompile(`(u)?int([0-9]*)`).FindStringSubmatch(stringKind)
switch parts[2] {
case "8", "16", "32", "64":
return fmt.Sprintf("%s%sint%s", parts[3], parts[1], parts[2])
return len(parts[0]), fmt.Sprintf("%sint%s", parts[1], parts[2])
}
return fmt.Sprintf("%s*big.Int", parts[3])
return len(parts[0]), "*big.Int"
case strings.HasPrefix(stringKind, "bool") || strings.HasPrefix(stringKind, "string"):
parts := regexp.MustCompile(`([a-z]+)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 3 {
return stringKind
}
return fmt.Sprintf("%s%s", parts[2], parts[1])
case strings.HasPrefix(stringKind, "bool"):
return len("bool"), "bool"
case strings.HasPrefix(stringKind, "string"):
return len("string"), "string"
default:
return stringKind
return len(stringKind), stringKind
}
}
// Translates the array sizes to a Java declaration of a (nested) array of the inner type.
// Simply returns the inner type if arraySizes is empty.
func arrayBindingJava(inner string, arraySizes []string) string {
// Java array type declarations do not include the length.
return inner + strings.Repeat("[]", len(arraySizes))
}
// bindTypeJava converts a Solidity type to a Java one. Since there is no clear mapping
// from all Solidity types to Java ones (e.g. uint17), those that cannot be exactly
// mapped will use an upscaled type (e.g. BigDecimal).
func bindTypeJava(kind abi.Type) string {
stringKind := kind.String()
innerLen, innerMapping := bindUnnestedTypeJava(stringKind)
return arrayBindingJava(wrapArray(stringKind, innerLen, innerMapping))
}
// The inner function of bindTypeJava, this finds the inner type of stringKind.
// (Or just the type itself if it is not an array or slice)
// The length of the matched part is returned, with the the translated type.
func bindUnnestedTypeJava(stringKind string) (int, string) {
switch {
case strings.HasPrefix(stringKind, "address"):
parts := regexp.MustCompile(`address(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 2 {
return stringKind
return len(stringKind), stringKind
}
if parts[1] == "" {
return fmt.Sprintf("Address")
return len("address"), "Address"
}
return fmt.Sprintf("Addresses")
return len(parts[0]), "Addresses"
case strings.HasPrefix(stringKind, "bytes"):
parts := regexp.MustCompile(`bytes([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 3 {
return stringKind
parts := regexp.MustCompile(`bytes([0-9]*)`).FindStringSubmatch(stringKind)
if len(parts) != 2 {
return len(stringKind), stringKind
}
if parts[2] != "" {
return "byte[][]"
}
return "byte[]"
return len(parts[0]), "byte[]"
case strings.HasPrefix(stringKind, "int") || strings.HasPrefix(stringKind, "uint"):
parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 4 {
return stringKind
//Note that uint and int (without digits) are also matched,
// these are size 256, and will translate to BigInt (the default).
parts := regexp.MustCompile(`(u)?int([0-9]*)`).FindStringSubmatch(stringKind)
if len(parts) != 3 {
return len(stringKind), stringKind
}
switch parts[2] {
case "8", "16", "32", "64":
if parts[1] == "" {
if parts[3] == "" {
return fmt.Sprintf("int%s", parts[2])
}
return fmt.Sprintf("int%s[]", parts[2])
}
namedSize := map[string]string{
"8": "byte",
"16": "short",
"32": "int",
"64": "long",
}[parts[2]]
//default to BigInt
if namedSize == "" {
namedSize = "BigInt"
}
if parts[3] == "" {
return fmt.Sprintf("BigInt")
}
return fmt.Sprintf("BigInts")
return len(parts[0]), namedSize
case strings.HasPrefix(stringKind, "bool"):
parts := regexp.MustCompile(`bool(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 2 {
return stringKind
}
if parts[1] == "" {
return fmt.Sprintf("bool")
}
return fmt.Sprintf("bool[]")
return len("bool"), "boolean"
case strings.HasPrefix(stringKind, "string"):
parts := regexp.MustCompile(`string(\[[0-9]*\])?`).FindStringSubmatch(stringKind)
if len(parts) != 2 {
return stringKind
}
if parts[1] == "" {
return fmt.Sprintf("String")
}
return fmt.Sprintf("String[]")
return len("string"), "String"
default:
return stringKind
return len(stringKind), stringKind
}
}
@@ -325,11 +354,13 @@ func namedTypeJava(javaKind string, solKind abi.Type) string {
return "String"
case "string[]":
return "Strings"
case "bool":
case "boolean":
return "Bool"
case "bool[]":
case "boolean[]":
return "Bools"
case "BigInt":
case "BigInt[]":
return "BigInts"
default:
parts := regexp.MustCompile(`(u)?int([0-9]*)(\[[0-9]*\])?`).FindStringSubmatch(solKind.String())
if len(parts) != 4 {
return javaKind
@@ -344,8 +375,6 @@ func namedTypeJava(javaKind string, solKind abi.Type) string {
default:
return javaKind
}
default:
return javaKind
}
}
@@ -356,8 +385,7 @@ var methodNormalizer = map[Lang]func(string) string{
LangJava: decapitalise,
}
// capitalise makes the first character of a string upper case, also removing any
// prefixing underscores from the variable names.
// capitalise makes a camel-case string which starts with an upper case character.
func capitalise(input string) string {
for len(input) > 0 && input[0] == '_' {
input = input[1:]
@@ -365,12 +393,42 @@ func capitalise(input string) string {
if len(input) == 0 {
return ""
}
return strings.ToUpper(input[:1]) + input[1:]
return toCamelCase(strings.ToUpper(input[:1]) + input[1:])
}
// decapitalise makes the first character of a string lower case.
// decapitalise makes a camel-case string which starts with a lower case character.
func decapitalise(input string) string {
return strings.ToLower(input[:1]) + input[1:]
for len(input) > 0 && input[0] == '_' {
input = input[1:]
}
if len(input) == 0 {
return ""
}
return toCamelCase(strings.ToLower(input[:1]) + input[1:])
}
// toCamelCase converts an under-score string to a camel-case string
func toCamelCase(input string) string {
toupper := false
result := ""
for k, v := range input {
switch {
case k == 0:
result = strings.ToUpper(string(input[0]))
case toupper:
result += strings.ToUpper(string(v))
toupper = false
case v == '_':
toupper = true
default:
result += string(v)
}
}
return result
}
// structured checks whether a list of ABI data types has enough information to

View File

@@ -425,7 +425,7 @@ var bindTests = []struct {
}
`,
},
// Tests that gas estimation works for contracts with weird gas mechanics too.
// Tests that gas estimation works for contracts with weird gas mechanics too.
{
`FunkyGasPattern`,
`
@@ -506,6 +506,7 @@ var bindTests = []struct {
}
`,
},
// Tests that methods and returns with underscores inside work correctly.
{
`Underscorer`,
`
@@ -518,7 +519,7 @@ var bindTests = []struct {
}
function LowerUpperCollision() constant returns (int _res, int Res) {
return (1, 2);
}
}
function UpperLowerCollision() constant returns (int _Res, int res) {
return (1, 2);
}
@@ -531,9 +532,12 @@ var bindTests = []struct {
function AllPurelyUnderscoredOutput() constant returns (int _, int __) {
return (1, 2);
}
function _under_scored_func() constant returns (int _int) {
return 0;
}
}
`, `6060604052341561000f57600080fd5b6103498061001e6000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806303a592131461008857806367e6633d146100b85780639df484851461014d578063af7486ab1461017d578063b564b34d146101ad578063e02ab24d146101dd578063e409ca451461020d575b600080fd5b341561009357600080fd5b61009b61023d565b604051808381526020018281526020019250505060405180910390f35b34156100c357600080fd5b6100cb610252565b6040518083815260200180602001828103825283818151815260200191508051906020019080838360005b838110156101115780820151818401526020810190506100f6565b50505050905090810190601f16801561013e5780820380516001836020036101000a031916815260200191505b50935050505060405180910390f35b341561015857600080fd5b6101606102a0565b604051808381526020018281526020019250505060405180910390f35b341561018857600080fd5b6101906102b5565b604051808381526020018281526020019250505060405180910390f35b34156101b857600080fd5b6101c06102ca565b604051808381526020018281526020019250505060405180910390f35b34156101e857600080fd5b6101f06102df565b604051808381526020018281526020019250505060405180910390f35b341561021857600080fd5b6102206102f4565b604051808381526020018281526020019250505060405180910390f35b60008060016002819150809050915091509091565b600061025c610309565b61013a8090506040805190810160405280600281526020017f7069000000000000000000000000000000000000000000000000000000000000815250915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b6020604051908101604052806000815250905600a165627a7a72305820c11dcfa136fc7d182ee4d34f0b12d988496228f7e2d02d2b5376d996ca1743d00029`,
`[{"constant":true,"inputs":[],"name":"LowerUpperCollision","outputs":[{"name":"_res","type":"int256"},{"name":"Res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UnderscoredOutput","outputs":[{"name":"_int","type":"int256"},{"name":"_string","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"PurelyUnderscoredOutput","outputs":[{"name":"_","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UpperLowerCollision","outputs":[{"name":"_Res","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"AllPurelyUnderscoredOutput","outputs":[{"name":"_","type":"int256"},{"name":"__","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UpperUpperCollision","outputs":[{"name":"_Res","type":"int256"},{"name":"Res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"LowerLowerCollision","outputs":[{"name":"_res","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"}]`,
`, `6060604052341561000f57600080fd5b6103858061001e6000396000f30060606040526004361061008e576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806303a592131461009357806346546dbe146100c357806367e6633d146100ec5780639df4848514610181578063af7486ab146101b1578063b564b34d146101e1578063e02ab24d14610211578063e409ca4514610241575b600080fd5b341561009e57600080fd5b6100a6610271565b604051808381526020018281526020019250505060405180910390f35b34156100ce57600080fd5b6100d6610286565b6040518082815260200191505060405180910390f35b34156100f757600080fd5b6100ff61028e565b6040518083815260200180602001828103825283818151815260200191508051906020019080838360005b8381101561014557808201518184015260208101905061012a565b50505050905090810190601f1680156101725780820380516001836020036101000a031916815260200191505b50935050505060405180910390f35b341561018c57600080fd5b6101946102dc565b604051808381526020018281526020019250505060405180910390f35b34156101bc57600080fd5b6101c46102f1565b604051808381526020018281526020019250505060405180910390f35b34156101ec57600080fd5b6101f4610306565b604051808381526020018281526020019250505060405180910390f35b341561021c57600080fd5b61022461031b565b604051808381526020018281526020019250505060405180910390f35b341561024c57600080fd5b610254610330565b604051808381526020018281526020019250505060405180910390f35b60008060016002819150809050915091509091565b600080905090565b6000610298610345565b61013a8090506040805190810160405280600281526020017f7069000000000000000000000000000000000000000000000000000000000000815250915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b60008060016002819150809050915091509091565b6020604051908101604052806000815250905600a165627a7a72305820d1a53d9de9d1e3d55cb3dc591900b63c4f1ded79114f7b79b332684840e186a40029`,
`[{"constant":true,"inputs":[],"name":"LowerUpperCollision","outputs":[{"name":"_res","type":"int256"},{"name":"Res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"_under_scored_func","outputs":[{"name":"_int","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UnderscoredOutput","outputs":[{"name":"_int","type":"int256"},{"name":"_string","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"PurelyUnderscoredOutput","outputs":[{"name":"_","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UpperLowerCollision","outputs":[{"name":"_Res","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"AllPurelyUnderscoredOutput","outputs":[{"name":"_","type":"int256"},{"name":"__","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"UpperUpperCollision","outputs":[{"name":"_Res","type":"int256"},{"name":"Res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"LowerLowerCollision","outputs":[{"name":"_res","type":"int256"},{"name":"res","type":"int256"}],"payable":false,"stateMutability":"view","type":"function"}]`,
`
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
@@ -562,6 +566,7 @@ var bindTests = []struct {
a, b, _ = underscorer.UpperUpperCollision(nil)
a, b, _ = underscorer.PurelyUnderscoredOutput(nil)
a, b, _ = underscorer.AllPurelyUnderscoredOutput(nil)
a, _ = underscorer.UnderScoredFunc(nil)
fmt.Println(a, b, err)
`,
@@ -737,6 +742,73 @@ var bindTests = []struct {
}
`,
},
{
`DeeplyNestedArray`,
`
contract DeeplyNestedArray {
uint64[3][4][5] public deepUint64Array;
function storeDeepUintArray(uint64[3][4][5] arr) public {
deepUint64Array = arr;
}
function retrieveDeepArray() public view returns (uint64[3][4][5]) {
return deepUint64Array;
}
}
`,
`6060604052341561000f57600080fd5b6106438061001e6000396000f300606060405260043610610057576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063344248551461005c5780638ed4573a1461011457806398ed1856146101ab575b600080fd5b341561006757600080fd5b610112600480806107800190600580602002604051908101604052809291906000905b828210156101055783826101800201600480602002604051908101604052809291906000905b828210156100f25783826060020160038060200260405190810160405280929190826003602002808284378201915050505050815260200190600101906100b0565b505050508152602001906001019061008a565b5050505091905050610208565b005b341561011f57600080fd5b61012761021d565b604051808260056000925b8184101561019b578284602002015160046000925b8184101561018d5782846020020151600360200280838360005b8381101561017c578082015181840152602081019050610161565b505050509050019260010192610147565b925050509260010192610132565b9250505091505060405180910390f35b34156101b657600080fd5b6101de6004808035906020019091908035906020019091908035906020019091905050610309565b604051808267ffffffffffffffff1667ffffffffffffffff16815260200191505060405180910390f35b80600090600561021992919061035f565b5050565b6102256103b0565b6000600580602002604051908101604052809291906000905b8282101561030057838260040201600480602002604051908101604052809291906000905b828210156102ed578382016003806020026040519081016040528092919082600380156102d9576020028201916000905b82829054906101000a900467ffffffffffffffff1667ffffffffffffffff16815260200190600801906020826007010492830192600103820291508084116102945790505b505050505081526020019060010190610263565b505050508152602001906001019061023e565b50505050905090565b60008360058110151561031857fe5b600402018260048110151561032957fe5b018160038110151561033757fe5b6004918282040191900660080292509250509054906101000a900467ffffffffffffffff1681565b826005600402810192821561039f579160200282015b8281111561039e5782518290600461038e9291906103df565b5091602001919060040190610375565b5b5090506103ac919061042d565b5090565b610780604051908101604052806005905b6103c9610459565b8152602001906001900390816103c15790505090565b826004810192821561041c579160200282015b8281111561041b5782518290600361040b929190610488565b50916020019190600101906103f2565b5b5090506104299190610536565b5090565b61045691905b8082111561045257600081816104499190610562565b50600401610433565b5090565b90565b610180604051908101604052806004905b6104726105a7565b81526020019060019003908161046a5790505090565b82600380016004900481019282156105255791602002820160005b838211156104ef57835183826101000a81548167ffffffffffffffff021916908367ffffffffffffffff16021790555092602001926008016020816007010492830192600103026104a3565b80156105235782816101000a81549067ffffffffffffffff02191690556008016020816007010492830192600103026104ef565b505b50905061053291906105d9565b5090565b61055f91905b8082111561055b57600081816105529190610610565b5060010161053c565b5090565b90565b50600081816105719190610610565b50600101600081816105839190610610565b50600101600081816105959190610610565b5060010160006105a59190610610565b565b6060604051908101604052806003905b600067ffffffffffffffff168152602001906001900390816105b75790505090565b61060d91905b8082111561060957600081816101000a81549067ffffffffffffffff0219169055506001016105df565b5090565b90565b50600090555600a165627a7a7230582087e5a43f6965ab6ef7a4ff056ab80ed78fd8c15cff57715a1bf34ec76a93661c0029`,
`[{"constant":false,"inputs":[{"name":"arr","type":"uint64[3][4][5]"}],"name":"storeDeepUintArray","outputs":[],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"retrieveDeepArray","outputs":[{"name":"","type":"uint64[3][4][5]"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"","type":"uint256"},{"name":"","type":"uint256"},{"name":"","type":"uint256"}],"name":"deepUint64Array","outputs":[{"name":"","type":"uint64"}],"payable":false,"stateMutability":"view","type":"function"}]`,
`
// Generate a new random account and a funded simulator
key, _ := crypto.GenerateKey()
auth := bind.NewKeyedTransactor(key)
sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}})
//deploy the test contract
_, _, testContract, err := DeployDeeplyNestedArray(auth, sim)
if err != nil {
t.Fatalf("Failed to deploy test contract: %v", err)
}
// Finish deploy.
sim.Commit()
//Create coordinate-filled array, for testing purposes.
testArr := [5][4][3]uint64{}
for i := 0; i < 5; i++ {
testArr[i] = [4][3]uint64{}
for j := 0; j < 4; j++ {
testArr[i][j] = [3]uint64{}
for k := 0; k < 3; k++ {
//pack the coordinates, each array value will be unique, and can be validated easily.
testArr[i][j][k] = uint64(i) << 16 | uint64(j) << 8 | uint64(k)
}
}
}
if _, err := testContract.StoreDeepUintArray(&bind.TransactOpts{
From: auth.From,
Signer: auth.Signer,
}, testArr); err != nil {
t.Fatalf("Failed to store nested array in test contract: %v", err)
}
sim.Commit()
retrievedArr, err := testContract.RetrieveDeepArray(&bind.CallOpts{
From: auth.From,
Pending: false,
})
if err != nil {
t.Fatalf("Failed to retrieve nested array from test contract: %v", err)
}
//quick check to see if contents were copied
// (See accounts/abi/unpack_test.go for more extensive testing)
if retrievedArr[4][3][2] != testArr[4][3][2] {
t.Fatalf("Retrieved value does not match expected value! got: %d, expected: %d. %v", retrievedArr[4][3][2], testArr[4][3][2], err)
}
`,
},
}
// Tests that packages generated by the binder can be successfully compiled and

View File

@@ -299,6 +299,11 @@ func TestPack(t *testing.T) {
[32]byte{1},
common.Hex2Bytes("0100000000000000000000000000000000000000000000000000000000000000"),
},
{
"uint32[2][3][4]",
[4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
common.Hex2Bytes("000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018"),
},
{
"address[]",
[]common.Address{{1}, {2}},

View File

@@ -110,3 +110,19 @@ func requireUnpackKind(v reflect.Value, t reflect.Type, k reflect.Kind,
}
return nil
}
// requireUniqueStructFieldNames makes sure field names don't collide
func requireUniqueStructFieldNames(args Arguments) error {
exists := make(map[string]bool)
for _, arg := range args {
field := capitalise(arg.Name)
if field == "" {
return fmt.Errorf("abi: purely underscored output cannot unpack to struct")
}
if exists[field] {
return fmt.Errorf("abi: multiple outputs mapping to the same struct field '%s'", field)
}
exists[field] = true
}
return nil
}

View File

@@ -93,15 +93,28 @@ func readFixedBytes(t Type, word []byte) (interface{}, error) {
}
func getFullElemSize(elem *Type) int {
//all other should be counted as 32 (slices have pointers to respective elements)
size := 32
//arrays wrap it, each element being the same size
for elem.T == ArrayTy {
size *= elem.Size
elem = elem.Elem
}
return size
}
// iteratively unpack elements
func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error) {
if size < 0 {
return nil, fmt.Errorf("cannot marshal input to array, size is negative (%d)", size)
}
if start+32*size > len(output) {
return nil, fmt.Errorf("abi: cannot marshal in to go array: offset %d would go over slice boundary (len=%d)", len(output), start+32*size)
}
// this value will become our slice or our array, depending on the type
var refSlice reflect.Value
slice := output[start : start+size*32]
if t.T == SliceTy {
// declare our slice
@@ -113,15 +126,20 @@ func forEachUnpack(t Type, output []byte, start, size int) (interface{}, error)
return nil, fmt.Errorf("abi: invalid type in array/slice unpacking stage")
}
for i, j := start, 0; j*32 < len(slice); i, j = i+32, j+1 {
// this corrects the arrangement so that we get all the underlying array values
if t.Elem.T == ArrayTy && j != 0 {
i = start + t.Elem.Size*32*j
}
// Arrays have packed elements, resulting in longer unpack steps.
// Slices have just 32 bytes per element (pointing to the contents).
elemSize := 32
if t.T == ArrayTy {
elemSize = getFullElemSize(t.Elem)
}
for i, j := start, 0; j < size; i, j = i+elemSize, j+1 {
inter, err := toGoType(i, *t.Elem, output)
if err != nil {
return nil, err
}
// append the item to our reflect slice
refSlice.Index(j).Set(reflect.ValueOf(inter))
}
@@ -181,16 +199,32 @@ func toGoType(index int, t Type, output []byte) (interface{}, error) {
// interprets a 32 byte slice as an offset and then determines which indice to look to decode the type.
func lengthPrefixPointsTo(index int, output []byte) (start int, length int, err error) {
offset := int(binary.BigEndian.Uint64(output[index+24 : index+32]))
if offset+32 > len(output) {
return 0, 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %d would go over slice boundary (len=%d)", len(output), offset+32)
}
length = int(binary.BigEndian.Uint64(output[offset+24 : offset+32]))
if offset+32+length > len(output) {
return 0, 0, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %d require %d", len(output), offset+32+length)
}
start = offset + 32
bigOffsetEnd := big.NewInt(0).SetBytes(output[index : index+32])
bigOffsetEnd.Add(bigOffsetEnd, common.Big32)
outputLength := big.NewInt(int64(len(output)))
//fmt.Printf("LENGTH PREFIX INFO: \nsize: %v\noffset: %v\nstart: %v\n", length, offset, start)
if bigOffsetEnd.Cmp(outputLength) > 0 {
return 0, 0, fmt.Errorf("abi: cannot marshal in to go slice: offset %v would go over slice boundary (len=%v)", bigOffsetEnd, outputLength)
}
if bigOffsetEnd.BitLen() > 63 {
return 0, 0, fmt.Errorf("abi offset larger than int64: %v", bigOffsetEnd)
}
offsetEnd := int(bigOffsetEnd.Uint64())
lengthBig := big.NewInt(0).SetBytes(output[offsetEnd-32 : offsetEnd])
totalSize := big.NewInt(0)
totalSize.Add(totalSize, bigOffsetEnd)
totalSize.Add(totalSize, lengthBig)
if totalSize.BitLen() > 63 {
return 0, 0, fmt.Errorf("abi length larger than int64: %v", totalSize)
}
if totalSize.Cmp(outputLength) > 0 {
return 0, 0, fmt.Errorf("abi: cannot marshal in to go type: length insufficient %v require %v", outputLength, totalSize)
}
start = int(bigOffsetEnd.Uint64())
length = int(lengthBig.Uint64())
return
}

View File

@@ -130,7 +130,7 @@ var unpackTests = []unpackTest{
{
def: `[{"type": "bytes32"}]`,
enc: "0100000000000000000000000000000000000000000000000000000000000000",
want: common.HexToHash("0100000000000000000000000000000000000000000000000000000000000000"),
want: [32]byte{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
},
{
def: `[{"type": "function"}]`,
@@ -189,6 +189,11 @@ var unpackTests = []unpackTest{
enc: "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
want: [2]uint32{1, 2},
},
{
def: `[{"type": "uint32[2][3][4]"}]`,
enc: "000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c000000000000000000000000000000000000000000000000000000000000000d000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000f000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012000000000000000000000000000000000000000000000000000000000000001300000000000000000000000000000000000000000000000000000000000000140000000000000000000000000000000000000000000000000000000000000015000000000000000000000000000000000000000000000000000000000000001600000000000000000000000000000000000000000000000000000000000000170000000000000000000000000000000000000000000000000000000000000018",
want: [4][3][2]uint32{{{1, 2}, {3, 4}, {5, 6}}, {{7, 8}, {9, 10}, {11, 12}}, {{13, 14}, {15, 16}, {17, 18}}, {{19, 20}, {21, 22}, {23, 24}}},
},
{
def: `[{"type": "uint64[]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002",
@@ -435,6 +440,46 @@ func TestMultiReturnWithArray(t *testing.T) {
}
}
func TestMultiReturnWithDeeplyNestedArray(t *testing.T) {
// Similar to TestMultiReturnWithArray, but with a special case in mind:
// values of nested static arrays count towards the size as well, and any element following
// after such nested array argument should be read with the correct offset,
// so that it does not read content from the previous array argument.
const definition = `[{"name" : "multi", "outputs": [{"type": "uint64[3][2][4]"}, {"type": "uint64"}]}]`
abi, err := JSON(strings.NewReader(definition))
if err != nil {
t.Fatal(err)
}
buff := new(bytes.Buffer)
// construct the test array, each 3 char element is joined with 61 '0' chars,
// to from the ((3 + 61) * 0.5) = 32 byte elements in the array.
buff.Write(common.Hex2Bytes(strings.Join([]string{
"", //empty, to apply the 61-char separator to the first element as well.
"111", "112", "113", "121", "122", "123",
"211", "212", "213", "221", "222", "223",
"311", "312", "313", "321", "322", "323",
"411", "412", "413", "421", "422", "423",
}, "0000000000000000000000000000000000000000000000000000000000000")))
buff.Write(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000009876"))
ret1, ret1Exp := new([4][2][3]uint64), [4][2][3]uint64{
{{0x111, 0x112, 0x113}, {0x121, 0x122, 0x123}},
{{0x211, 0x212, 0x213}, {0x221, 0x222, 0x223}},
{{0x311, 0x312, 0x313}, {0x321, 0x322, 0x323}},
{{0x411, 0x412, 0x413}, {0x421, 0x422, 0x423}},
}
ret2, ret2Exp := new(uint64), uint64(0x9876)
if err := abi.Unpack(&[]interface{}{ret1, ret2}, "multi", buff.Bytes()); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(*ret1, ret1Exp) {
t.Error("array result", *ret1, "!= Expected", ret1Exp)
}
if *ret2 != ret2Exp {
t.Error("int result", *ret2, "!= Expected", ret2Exp)
}
}
func TestUnmarshal(t *testing.T) {
const definition = `[
{ "name" : "int", "constant" : false, "outputs": [ { "type": "uint256" } ] },
@@ -683,3 +728,73 @@ func TestUnmarshal(t *testing.T) {
t.Fatal("expected error:", err)
}
}
func TestOOMMaliciousInput(t *testing.T) {
oomTests := []unpackTest{
{
def: `[{"type": "uint8[]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
"0000000000000000000000000000000000000000000000000000000000000003" + // num elems
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
},
{ // Length larger than 64 bits
def: `[{"type": "uint8[]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
"00ffffffffffffffffffffffffffffffffffffffffffffff0000000000000002" + // num elems
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
},
{ // Offset very large (over 64 bits)
def: `[{"type": "uint8[]"}]`,
enc: "00ffffffffffffffffffffffffffffffffffffffffffffff0000000000000020" + // offset
"0000000000000000000000000000000000000000000000000000000000000002" + // num elems
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
},
{ // Offset very large (below 64 bits)
def: `[{"type": "uint8[]"}]`,
enc: "0000000000000000000000000000000000000000000000007ffffffffff00020" + // offset
"0000000000000000000000000000000000000000000000000000000000000002" + // num elems
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
},
{ // Offset negative (as 64 bit)
def: `[{"type": "uint8[]"}]`,
enc: "000000000000000000000000000000000000000000000000f000000000000020" + // offset
"0000000000000000000000000000000000000000000000000000000000000002" + // num elems
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
},
{ // Negative length
def: `[{"type": "uint8[]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
"000000000000000000000000000000000000000000000000f000000000000002" + // num elems
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
},
{ // Very large length
def: `[{"type": "uint8[]"}]`,
enc: "0000000000000000000000000000000000000000000000000000000000000020" + // offset
"0000000000000000000000000000000000000000000000007fffffffff000002" + // num elems
"0000000000000000000000000000000000000000000000000000000000000001" + // elem 1
"0000000000000000000000000000000000000000000000000000000000000002", // elem 2
},
}
for i, test := range oomTests {
def := fmt.Sprintf(`[{ "name" : "method", "outputs": %s}]`, test.def)
abi, err := JSON(strings.NewReader(def))
if err != nil {
t.Fatalf("invalid ABI definition %s: %v", def, err)
}
encb, err := hex.DecodeString(test.enc)
if err != nil {
t.Fatalf("invalid hex: %s" + test.enc)
}
_, err = abi.Methods["method"].Outputs.UnpackValues(encb)
if err == nil {
t.Fatalf("Expected error on malicious input, test %d", i)
}
}
}

View File

@@ -23,8 +23,8 @@ environment:
install:
- git submodule update --init
- rmdir C:\go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.9.2.windows-%GETH_ARCH%.zip
- 7z x go1.9.2.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.10.windows-%GETH_ARCH%.zip
- 7z x go1.10.windows-%GETH_ARCH%.zip -y -oC:\ > NUL
- go version
- gcc --version

View File

@@ -2,12 +2,7 @@
Tagged releases and develop branch commits are available as installable Debian packages
for Ubuntu. Packages are built for the all Ubuntu versions which are supported by
Canonical:
- Trusty Tahr (14.04 LTS)
- Xenial Xerus (16.04 LTS)
- Yakkety Yak (16.10)
- Zesty Zapus (17.04)
Canonical.
Packages of develop branch commits have suffix -unstable and cannot be installed alongside
the stable version. Switching between release streams requires user intervention.
@@ -21,18 +16,18 @@ variable which Travis CI makes available to certain builds.
We want to build go-ethereum with the most recent version of Go, irrespective of the Go
version that is available in the main Ubuntu repository. In order to make this possible,
our PPA depends on the ~gophers/ubuntu/archive PPA. Our source package build-depends on
golang-1.9, which is co-installable alongside the regular golang package. PPA dependencies
golang-1.10, which is co-installable alongside the regular golang package. PPA dependencies
can be edited at https://launchpad.net/%7Eethereum/+archive/ubuntu/ethereum/+edit-dependencies
## Building Packages Locally (for testing)
You need to run Ubuntu to do test packaging.
Add the gophers PPA and install Go 1.9 and Debian packaging tools:
Add the gophers PPA and install Go 1.10 and Debian packaging tools:
$ sudo apt-add-repository ppa:gophers/ubuntu/archive
$ sudo apt-get update
$ sudo apt-get install build-essential golang-1.9 devscripts debhelper
$ sudo apt-get install build-essential golang-1.10 devscripts debhelper
Create the source packages:

View File

@@ -182,13 +182,13 @@ func doInstall(cmdline []string) {
// Check Go version. People regularly open issues about compilation
// failure with outdated Go. This should save them the trouble.
if !strings.Contains(runtime.Version(), "devel") {
// Figure out the minor version number since we can't textually compare (1.10 < 1.7)
// Figure out the minor version number since we can't textually compare (1.10 < 1.9)
var minor int
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
if minor < 7 {
if minor < 9 {
log.Println("You have Go version", runtime.Version())
log.Println("go-ethereum requires at least Go version 1.7 and cannot")
log.Println("go-ethereum requires at least Go version 1.9 and cannot")
log.Println("be compiled with an earlier version. Please upgrade your Go installation.")
os.Exit(1)
}
@@ -262,16 +262,6 @@ func goTool(subcmd string, args ...string) *exec.Cmd {
func goToolArch(arch string, cc string, subcmd string, args ...string) *exec.Cmd {
cmd := build.GoTool(subcmd, args...)
if subcmd == "build" || subcmd == "install" || subcmd == "test" {
// Go CGO has a Windows linker error prior to 1.8 (https://github.com/golang/go/issues/8756).
// Work around issue by allowing multiple definitions for <1.8 builds.
var minor int
fmt.Sscanf(strings.TrimPrefix(runtime.Version(), "go1."), "%d", &minor)
if runtime.GOOS == "windows" && minor < 8 {
cmd.Args = append(cmd.Args, []string{"-ldflags", "-extldflags -Wl,--allow-multiple-definition"}...)
}
}
cmd.Env = []string{"GOPATH=" + build.GOPATH()}
if arch == "" || arch == runtime.GOARCH {
cmd.Env = append(cmd.Env, "GOBIN="+GOBIN)
@@ -736,7 +726,7 @@ func doAndroidArchive(cmdline []string) {
log.Fatal("Please ensure ANDROID_NDK points to your Android NDK")
}
// Build the Android archive and Maven resources
build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile"))
build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
build.MustRun(gomobileTool("init", "--ndk", os.Getenv("ANDROID_NDK")))
build.MustRun(gomobileTool("bind", "--target", "android", "--javapkg", "org.ethereum", "-v", "github.com/ethereum/go-ethereum/mobile"))
@@ -787,9 +777,10 @@ func gomobileTool(subcmd string, args ...string) *exec.Cmd {
cmd.Args = append(cmd.Args, args...)
cmd.Env = []string{
"GOPATH=" + build.GOPATH(),
"PATH=" + GOBIN + string(os.PathListSeparator) + os.Getenv("PATH"),
}
for _, e := range os.Environ() {
if strings.HasPrefix(e, "GOPATH=") {
if strings.HasPrefix(e, "GOPATH=") || strings.HasPrefix(e, "PATH=") {
continue
}
cmd.Env = append(cmd.Env, e)
@@ -856,7 +847,7 @@ func doXCodeFramework(cmdline []string) {
env := build.Env()
// Build the iOS XCode framework
build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile"))
build.MustRun(goTool("get", "golang.org/x/mobile/cmd/gomobile", "golang.org/x/mobile/cmd/gobind"))
build.MustRun(gomobileTool("init"))
bind := gomobileTool("bind", "--target", "ios", "--tags", "ios", "-v", "github.com/ethereum/go-ethereum/mobile")

View File

@@ -2,7 +2,7 @@ Source: {{.Name}}
Section: science
Priority: extra
Maintainer: {{.Author}}
Build-Depends: debhelper (>= 8.0.0), golang-1.9
Build-Depends: debhelper (>= 8.0.0), golang-1.10
Standards-Version: 3.9.5
Homepage: https://ethereum.org
Vcs-Git: git://github.com/ethereum/go-ethereum.git

View File

@@ -5,7 +5,7 @@
#export DH_VERBOSE=1
override_dh_auto_build:
build/env.sh /usr/lib/go-1.9/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
build/env.sh /usr/lib/go-1.10/bin/go run build/ci.go install -git-commit={{.Env.Commit}} -git-branch={{.Env.Branch}} -git-tag={{.Env.Tag}} -buildnum={{.Env.Buildnum}} -pull-request={{.Env.IsPullRequest}}
override_dh_auto_test:

View File

@@ -86,10 +86,6 @@ var (
Name: "create",
Usage: "indicates the action should be create rather than call",
}
DisableGasMeteringFlag = cli.BoolFlag{
Name: "nogasmetering",
Usage: "disable gas metering",
}
GenesisFlag = cli.StringFlag{
Name: "prestate",
Usage: "JSON file with prestate (genesis) config",
@@ -128,7 +124,6 @@ func init() {
ValueFlag,
DumpFlag,
InputFlag,
DisableGasMeteringFlag,
MemProfileFlag,
CPUProfileFlag,
StatDumpFlag,

View File

@@ -161,9 +161,8 @@ func runCmd(ctx *cli.Context) error {
GasPrice: utils.GlobalBig(ctx, PriceFlag.Name),
Value: utils.GlobalBig(ctx, ValueFlag.Name),
EVMConfig: vm.Config{
Tracer: tracer,
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
DisableGasMetering: ctx.GlobalBool(DisableGasMeteringFlag.Name),
Tracer: tracer,
Debug: ctx.GlobalBool(DebugFlag.Name) || ctx.GlobalBool(MachineFlag.Name),
},
}

View File

@@ -533,9 +533,11 @@ func (f *faucet) loop() {
}
defer sub.Unsubscribe()
for {
select {
case head := <-heads:
// Start a goroutine to update the state from head notifications in the background
update := make(chan *types.Header)
go func() {
for head := range update {
// New chain head arrived, query the current stats and stream to clients
var (
balance *big.Int
@@ -588,6 +590,17 @@ func (f *faucet) loop() {
}
}
f.lock.RUnlock()
}
}()
// Wait for various events and assing to the appropriate background threads
for {
select {
case head := <-heads:
// New head arrived, send if for state update if there's none running
select {
case update <- head:
default:
}
case <-f.update:
// Pending requests updated, stream to clients
@@ -686,8 +699,6 @@ func authTwitter(url string) (string, string, common.Address, error) {
if len(parts) < 4 || parts[len(parts)-2] != "status" {
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
}
username := parts[len(parts)-3]
// Twitter's API isn't really friendly with direct links. Still, we don't
// want to do ask read permissions from users, so just load the public posts and
// scrape it for the Ethereum address and profile URL.
@@ -697,6 +708,13 @@ func authTwitter(url string) (string, string, common.Address, error) {
}
defer res.Body.Close()
// Resolve the username from the final redirect, no intermediate junk
parts = strings.Split(res.Request.URL.String(), "/")
if len(parts) < 4 || parts[len(parts)-2] != "status" {
return "", "", common.Address{}, errors.New("Invalid Twitter status URL")
}
username := parts[len(parts)-3]
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", common.Address{}, err

View File

@@ -225,6 +225,13 @@ func importChain(ctx *cli.Context) error {
utils.Fatalf("Failed to read database stats: %v", err)
}
fmt.Println(stats)
ioStats, err := db.LDB().GetProperty("leveldb.iostats")
if err != nil {
utils.Fatalf("Failed to read database iostats: %v", err)
}
fmt.Println(ioStats)
fmt.Printf("Trie cache misses: %d\n", trie.CacheMisses())
fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
@@ -255,6 +262,12 @@ func importChain(ctx *cli.Context) error {
}
fmt.Println(stats)
ioStats, err = db.LDB().GetProperty("leveldb.iostats")
if err != nil {
utils.Fatalf("Failed to read database iostats: %v", err)
}
fmt.Println(ioStats)
return nil
}

View File

@@ -22,6 +22,7 @@ import (
"os/signal"
"path/filepath"
"strings"
"syscall"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/console"
@@ -42,7 +43,7 @@ var (
Description: `
The Geth console is an interactive shell for the JavaScript runtime environment
which exposes a node admin interface as well as the Ðapp JavaScript API.
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console.`,
See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console.`,
}
attachCommand = cli.Command{
@@ -55,7 +56,7 @@ See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console.`,
Description: `
The Geth console is an interactive shell for the JavaScript runtime environment
which exposes a node admin interface as well as the Ðapp JavaScript API.
See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console.
See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console.
This command allows to open a console on a running geth node.`,
}
@@ -68,7 +69,7 @@ This command allows to open a console on a running geth node.`,
Category: "CONSOLE COMMANDS",
Description: `
The JavaScript VM exposes a node admin interface as well as the Ðapp
JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/Javascipt-Console`,
JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console`,
}
)
@@ -207,7 +208,7 @@ func ephemeralConsole(ctx *cli.Context) error {
}
// Wait for pending callbacks, but stop for Ctrl-C.
abort := make(chan os.Signal, 1)
signal.Notify(abort, os.Interrupt)
signal.Notify(abort, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-abort

View File

@@ -65,7 +65,6 @@ var (
utils.DashboardAddrFlag,
utils.DashboardPortFlag,
utils.DashboardRefreshFlag,
utils.DashboardAssetsFlag,
utils.EthashCacheDirFlag,
utils.EthashCachesInMemoryFlag,
utils.EthashCachesOnDiskFlag,

View File

@@ -168,19 +168,18 @@ type parityChainSpec struct {
Engine struct {
Ethash struct {
Params struct {
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
DurationLimit *hexutil.Big `json:"durationLimit"`
BlockReward *hexutil.Big `json:"blockReward"`
HomesteadTransition uint64 `json:"homesteadTransition"`
EIP150Transition uint64 `json:"eip150Transition"`
EIP160Transition uint64 `json:"eip160Transition"`
EIP161abcTransition uint64 `json:"eip161abcTransition"`
EIP161dTransition uint64 `json:"eip161dTransition"`
EIP649Reward *hexutil.Big `json:"eip649Reward"`
EIP100bTransition uint64 `json:"eip100bTransition"`
EIP649Transition uint64 `json:"eip649Transition"`
MinimumDifficulty *hexutil.Big `json:"minimumDifficulty"`
DifficultyBoundDivisor *hexutil.Big `json:"difficultyBoundDivisor"`
DurationLimit *hexutil.Big `json:"durationLimit"`
BlockReward *hexutil.Big `json:"blockReward"`
HomesteadTransition uint64 `json:"homesteadTransition"`
EIP150Transition uint64 `json:"eip150Transition"`
EIP160Transition uint64 `json:"eip160Transition"`
EIP161abcTransition uint64 `json:"eip161abcTransition"`
EIP161dTransition uint64 `json:"eip161dTransition"`
EIP649Reward *hexutil.Big `json:"eip649Reward"`
EIP100bTransition uint64 `json:"eip100bTransition"`
EIP649Transition uint64 `json:"eip649Transition"`
} `json:"params"`
} `json:"Ethash"`
} `json:"engine"`
@@ -188,6 +187,7 @@ type parityChainSpec struct {
Params struct {
MaximumExtraDataSize hexutil.Uint64 `json:"maximumExtraDataSize"`
MinGasLimit hexutil.Uint64 `json:"minGasLimit"`
GasLimitBoundDivisor hexutil.Uint64 `json:"gasLimitBoundDivisor"`
NetworkID hexutil.Uint64 `json:"networkID"`
MaxCodeSize uint64 `json:"maxCodeSize"`
EIP155Transition uint64 `json:"eip155Transition"`
@@ -270,7 +270,6 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
}
spec.Engine.Ethash.Params.MinimumDifficulty = (*hexutil.Big)(params.MinimumDifficulty)
spec.Engine.Ethash.Params.DifficultyBoundDivisor = (*hexutil.Big)(params.DifficultyBoundDivisor)
spec.Engine.Ethash.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
spec.Engine.Ethash.Params.DurationLimit = (*hexutil.Big)(params.DurationLimit)
spec.Engine.Ethash.Params.BlockReward = (*hexutil.Big)(ethash.FrontierBlockReward)
spec.Engine.Ethash.Params.HomesteadTransition = genesis.Config.HomesteadBlock.Uint64()
@@ -284,6 +283,7 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin
spec.Params.MaximumExtraDataSize = (hexutil.Uint64)(params.MaximumExtraDataSize)
spec.Params.MinGasLimit = (hexutil.Uint64)(params.MinGasLimit)
spec.Params.GasLimitBoundDivisor = (hexutil.Uint64)(params.GasLimitBoundDivisor)
spec.Params.NetworkID = (hexutil.Uint64)(genesis.Config.ChainId.Uint64())
spec.Params.MaxCodeSize = params.MaxCodeSize
spec.Params.EIP155Transition = genesis.Config.EIP155Block.Uint64()

View File

@@ -631,6 +631,7 @@ func deployDashboard(client *sshClient, network string, conf *config, config *da
"Tangerine": conf.Genesis.Config.EIP150Block,
"Spurious": conf.Genesis.Config.EIP155Block,
"Byzantium": conf.Genesis.Config.ByzantiumBlock,
"Constantinople": conf.Genesis.Config.ConstantinopleBlock,
})
files[filepath.Join(workdir, "index.html")] = indexfile.Bytes()

View File

@@ -37,7 +37,7 @@ ADD genesis.json /genesis.json
RUN \
echo 'node server.js &' > wallet.sh && \
echo 'geth --cache 512 init /genesis.json' >> wallet.sh && \
echo $'geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*"' >> wallet.sh
echo $'geth --networkid {{.NetworkID}} --port {{.NodePort}} --bootnodes {{.Bootnodes}} --ethstats \'{{.Ethstats}}\' --cache=512 --rpc --rpcaddr=0.0.0.0 --rpccorsdomain "*" --rpcvhosts "*"' >> wallet.sh
RUN \
sed -i 's/PuppethNetworkID/{{.NetworkID}}/g' dist/js/etherwallet-master.js && \

View File

@@ -59,15 +59,16 @@ func (w *wizard) run() {
fmt.Println()
// Make sure we have a good network name to work with fmt.Println()
// Docker accepts hyphens in image names, but doesn't like it for container names
if w.network == "" {
fmt.Println("Please specify a network name to administer (no spaces, please)")
fmt.Println("Please specify a network name to administer (no spaces or hyphens, please)")
for {
w.network = w.readString()
if !strings.Contains(w.network, " ") {
if !strings.Contains(w.network, " ") && !strings.Contains(w.network, "-") {
fmt.Printf("\nSweet, you can set this via --network=%s next time!\n\n", w.network)
break
}
log.Error("I also like to live dangerously, still no spaces")
log.Error("I also like to live dangerously, still no spaces or hyphens")
}
}
log.Info("Administering Ethereum network", "name", w.network)

View File

@@ -23,6 +23,7 @@ import (
"os"
"reflect"
"strconv"
"strings"
"unicode"
cli "gopkg.in/urfave/cli.v1"
@@ -97,10 +98,15 @@ func buildConfig(ctx *cli.Context) (config *bzzapi.Config, err error) {
config = bzzapi.NewDefaultConfig()
//first load settings from config file (if provided)
config, err = configFileOverride(config, ctx)
if err != nil {
return nil, err
}
//override settings provided by environment variables
config = envVarsOverride(config)
//override settings provided by command line
config = cmdLineOverride(config, ctx)
//validate configuration parameters
err = validateConfig(config)
return
}
@@ -194,12 +200,16 @@ func cmdLineOverride(currentConfig *bzzapi.Config, ctx *cli.Context) *bzzapi.Con
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
}
//EnsApi can be set to "", so can't check for empty string, as it is allowed!
if ctx.GlobalIsSet(EnsAPIFlag.Name) {
currentConfig.EnsApi = ctx.GlobalString(EnsAPIFlag.Name)
ensAPIs := ctx.GlobalStringSlice(EnsAPIFlag.Name)
// preserve backward compatibility to disable ENS with --ens-api=""
if len(ensAPIs) == 1 && ensAPIs[0] == "" {
ensAPIs = nil
}
currentConfig.EnsAPIs = ensAPIs
}
if ensaddr := ctx.GlobalString(EnsAddrFlag.Name); ensaddr != "" {
if ensaddr := ctx.GlobalString(DeprecatedEnsAddrFlag.Name); ensaddr != "" {
currentConfig.EnsRoot = common.HexToAddress(ensaddr)
}
@@ -266,9 +276,8 @@ func envVarsOverride(currentConfig *bzzapi.Config) (config *bzzapi.Config) {
utils.Fatalf(SWARM_ERR_SWAP_SET_NO_API)
}
//EnsApi can be set to "", so can't check for empty string, as it is allowed
if ensapi, exists := os.LookupEnv(SWARM_ENV_ENS_API); exists {
currentConfig.EnsApi = ensapi
if ensapi := os.Getenv(SWARM_ENV_ENS_API); ensapi != "" {
currentConfig.EnsAPIs = strings.Split(ensapi, ",")
}
if ensaddr := os.Getenv(SWARM_ENV_ENS_ADDR); ensaddr != "" {
@@ -309,6 +318,43 @@ func checkDeprecated(ctx *cli.Context) {
if ctx.GlobalString(DeprecatedEthAPIFlag.Name) != "" {
utils.Fatalf("--ethapi is no longer a valid command line flag, please use --ens-api and/or --swap-api.")
}
// warn if --ens-api flag is set
if ctx.GlobalString(DeprecatedEnsAddrFlag.Name) != "" {
log.Warn("--ens-addr is no longer a valid command line flag, please use --ens-api to specify contract address.")
}
}
//validate configuration parameters
func validateConfig(cfg *bzzapi.Config) (err error) {
for _, ensAPI := range cfg.EnsAPIs {
if ensAPI != "" {
if err := validateEnsAPIs(ensAPI); err != nil {
return fmt.Errorf("invalid format [tld:][contract-addr@]url for ENS API endpoint configuration %q: %v", ensAPI, err)
}
}
}
return nil
}
//validate EnsAPIs configuration parameter
func validateEnsAPIs(s string) (err error) {
// missing contract address
if strings.HasPrefix(s, "@") {
return errors.New("missing contract address")
}
// missing url
if strings.HasSuffix(s, "@") {
return errors.New("missing url")
}
// missing tld
if strings.HasPrefix(s, ":") {
return errors.New("missing tld")
}
// missing url
if strings.HasSuffix(s, ":") {
return errors.New("missing url")
}
return nil
}
//print a Config as string

View File

@@ -457,3 +457,98 @@ func TestCmdLineOverridesFile(t *testing.T) {
node.Shutdown()
}
func TestValidateConfig(t *testing.T) {
for _, c := range []struct {
cfg *api.Config
err string
}{
{
cfg: &api.Config{EnsAPIs: []string{
"/data/testnet/geth.ipc",
}},
},
{
cfg: &api.Config{EnsAPIs: []string{
"http://127.0.0.1:1234",
}},
},
{
cfg: &api.Config{EnsAPIs: []string{
"ws://127.0.0.1:1234",
}},
},
{
cfg: &api.Config{EnsAPIs: []string{
"test:/data/testnet/geth.ipc",
}},
},
{
cfg: &api.Config{EnsAPIs: []string{
"test:ws://127.0.0.1:1234",
}},
},
{
cfg: &api.Config{EnsAPIs: []string{
"314159265dD8dbb310642f98f50C066173C1259b@/data/testnet/geth.ipc",
}},
},
{
cfg: &api.Config{EnsAPIs: []string{
"314159265dD8dbb310642f98f50C066173C1259b@http://127.0.0.1:1234",
}},
},
{
cfg: &api.Config{EnsAPIs: []string{
"314159265dD8dbb310642f98f50C066173C1259b@ws://127.0.0.1:1234",
}},
},
{
cfg: &api.Config{EnsAPIs: []string{
"test:314159265dD8dbb310642f98f50C066173C1259b@/data/testnet/geth.ipc",
}},
},
{
cfg: &api.Config{EnsAPIs: []string{
"eth:314159265dD8dbb310642f98f50C066173C1259b@http://127.0.0.1:1234",
}},
},
{
cfg: &api.Config{EnsAPIs: []string{
"eth:314159265dD8dbb310642f98f50C066173C1259b@ws://127.0.0.1:12344",
}},
},
{
cfg: &api.Config{EnsAPIs: []string{
"eth:",
}},
err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \"eth:\": missing url",
},
{
cfg: &api.Config{EnsAPIs: []string{
"314159265dD8dbb310642f98f50C066173C1259b@",
}},
err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \"314159265dD8dbb310642f98f50C066173C1259b@\": missing url",
},
{
cfg: &api.Config{EnsAPIs: []string{
":314159265dD8dbb310642f98f50C066173C1259",
}},
err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \":314159265dD8dbb310642f98f50C066173C1259\": missing tld",
},
{
cfg: &api.Config{EnsAPIs: []string{
"@/data/testnet/geth.ipc",
}},
err: "invalid format [tld:][contract-addr@]url for ENS API endpoint configuration \"@/data/testnet/geth.ipc\": missing contract address",
},
} {
err := validateConfig(c.cfg)
if c.err != "" && err.Error() != c.err {
t.Errorf("expected error %q, got %q", c.err, err)
}
if c.err == "" && err != nil {
t.Errorf("unexpected error %q", err)
}
}
}

View File

@@ -17,11 +17,9 @@
package main
import (
"context"
"crypto/ecdsa"
"fmt"
"io/ioutil"
"math/big"
"os"
"os/signal"
"runtime"
@@ -29,14 +27,12 @@ import (
"strconv"
"strings"
"syscall"
"time"
"github.com/ethereum/go-ethereum/accounts"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/cmd/utils"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/console"
"github.com/ethereum/go-ethereum/contracts/ens"
"github.com/ethereum/go-ethereum/crypto"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/internal/debug"
@@ -45,9 +41,9 @@ import (
"github.com/ethereum/go-ethereum/p2p"
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
"github.com/ethereum/go-ethereum/swarm"
bzzapi "github.com/ethereum/go-ethereum/swarm/api"
swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics"
"gopkg.in/urfave/cli.v1"
)
@@ -110,16 +106,11 @@ var (
Usage: "Swarm Syncing enabled (default true)",
EnvVar: SWARM_ENV_SYNC_ENABLE,
}
EnsAPIFlag = cli.StringFlag{
EnsAPIFlag = cli.StringSliceFlag{
Name: "ens-api",
Usage: "URL of the Ethereum API provider to use for ENS record lookups",
Usage: "ENS API endpoint for a TLD and with contract address, can be repeated, format [tld:][contract-addr@]url",
EnvVar: SWARM_ENV_ENS_API,
}
EnsAddrFlag = cli.StringFlag{
Name: "ens-addr",
Usage: "ENS contract address (default is detected as testnet or mainnet using --ens-api)",
EnvVar: SWARM_ENV_ENS_ADDR,
}
SwarmApiFlag = cli.StringFlag{
Name: "bzzapi",
Usage: "Swarm HTTP endpoint",
@@ -156,6 +147,10 @@ var (
Name: "ethapi",
Usage: "DEPRECATED: please use --ens-api and --swap-api",
}
DeprecatedEnsAddrFlag = cli.StringFlag{
Name: "ens-addr",
Usage: "DEPRECATED: ENS contract address, please use --ens-api with contract address according to its format",
}
)
//declare a few constant error messages, useful for later error check comparisons in test
@@ -343,7 +338,6 @@ DEPRECATED: use 'swarm db clean'.
// bzzd-specific flags
CorsStringFlag,
EnsAPIFlag,
EnsAddrFlag,
SwarmTomlConfigPathFlag,
SwarmConfigPathFlag,
SwarmSwapEnabledFlag,
@@ -363,11 +357,17 @@ DEPRECATED: use 'swarm db clean'.
SwarmUploadMimeType,
//deprecated flags
DeprecatedEthAPIFlag,
DeprecatedEnsAddrFlag,
}
app.Flags = append(app.Flags, debug.Flags...)
app.Flags = append(app.Flags, swarmmetrics.Flags...)
app.Before = func(ctx *cli.Context) error {
runtime.GOMAXPROCS(runtime.NumCPU())
return debug.Setup(ctx)
if err := debug.Setup(ctx); err != nil {
return err
}
swarmmetrics.Setup(ctx)
return nil
}
app.After = func(ctx *cli.Context) error {
debug.Exit()
@@ -448,38 +448,6 @@ func bzzd(ctx *cli.Context) error {
return nil
}
// detectEnsAddr determines the ENS contract address by getting both the
// version and genesis hash using the client and matching them to either
// mainnet or testnet addresses
func detectEnsAddr(client *rpc.Client) (common.Address, error) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
var version string
if err := client.CallContext(ctx, &version, "net_version"); err != nil {
return common.Address{}, err
}
block, err := ethclient.NewClient(client).BlockByNumber(ctx, big.NewInt(0))
if err != nil {
return common.Address{}, err
}
switch {
case version == "1" && block.Hash() == params.MainnetGenesisHash:
log.Info("using Mainnet ENS contract address", "addr", ens.MainNetAddress)
return ens.MainNetAddress, nil
case version == "3" && block.Hash() == params.TestnetGenesisHash:
log.Info("using Testnet ENS contract address", "addr", ens.TestNetAddress)
return ens.TestNetAddress, nil
default:
return common.Address{}, fmt.Errorf("unknown version and genesis hash: %s %s", version, block.Hash())
}
}
func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.Node) {
//define the swarm service boot function
@@ -494,27 +462,7 @@ func registerBzzService(bzzconfig *bzzapi.Config, ctx *cli.Context, stack *node.
}
}
var ensClient *ethclient.Client
if bzzconfig.EnsApi != "" {
log.Info("connecting to ENS API", "url", bzzconfig.EnsApi)
client, err := rpc.Dial(bzzconfig.EnsApi)
if err != nil {
return nil, fmt.Errorf("error connecting to ENS API %s: %s", bzzconfig.EnsApi, err)
}
ensClient = ethclient.NewClient(client)
//no ENS root address set yet
if bzzconfig.EnsRoot == (common.Address{}) {
ensAddr, err := detectEnsAddr(client)
if err == nil {
bzzconfig.EnsRoot = ensAddr
} else {
log.Warn(fmt.Sprintf("could not determine ENS contract address, using default %s", bzzconfig.EnsRoot), "err", err)
}
}
}
return swarm.NewSwarm(ctx, swapClient, ensClient, bzzconfig, bzzconfig.SwapEnabled, bzzconfig.SyncEnabled, bzzconfig.Cors)
return swarm.NewSwarm(ctx, swapClient, bzzconfig)
}
//register within the ethereum node
if err := stack.Register(boot); err != nil {

View File

@@ -35,7 +35,7 @@ const bzzManifestJSON = "application/bzz-manifest+json"
func add(ctx *cli.Context) {
args := ctx.Args()
if len(args) < 3 {
utils.Fatalf("Need atleast three arguments <MHASH> <path> <HASH> [<content-type>]")
utils.Fatalf("Need at least three arguments <MHASH> <path> <HASH> [<content-type>]")
}
var (
@@ -69,7 +69,7 @@ func update(ctx *cli.Context) {
args := ctx.Args()
if len(args) < 3 {
utils.Fatalf("Need atleast three arguments <MHASH> <path> <HASH>")
utils.Fatalf("Need at least three arguments <MHASH> <path> <HASH>")
}
var (
@@ -101,7 +101,7 @@ func update(ctx *cli.Context) {
func remove(ctx *cli.Context) {
args := ctx.Args()
if len(args) < 2 {
utils.Fatalf("Need atleast two arguments <MHASH> <path>")
utils.Fatalf("Need at least two arguments <MHASH> <path>")
}
var (

View File

@@ -25,6 +25,7 @@ import (
"os/signal"
"runtime"
"strings"
"syscall"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
@@ -64,7 +65,7 @@ func StartNode(stack *node.Node) {
}
go func() {
sigc := make(chan os.Signal, 1)
signal.Notify(sigc, os.Interrupt)
signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(sigc)
<-sigc
log.Info("Got interrupt, shutting down...")
@@ -85,7 +86,7 @@ func ImportChain(chain *core.BlockChain, fn string) error {
// If a signal is received, the import will stop at the next batch.
interrupt := make(chan os.Signal, 1)
stop := make(chan struct{})
signal.Notify(interrupt, os.Interrupt)
signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
defer signal.Stop(interrupt)
defer close(interrupt)
go func() {

View File

@@ -209,11 +209,6 @@ var (
Usage: "Dashboard metrics collection refresh rate",
Value: dashboard.DefaultConfig.Refresh,
}
DashboardAssetsFlag = cli.StringFlag{
Name: "dashboard.assets",
Usage: "Developer flag to serve the dashboard from the local file system",
Value: dashboard.DefaultConfig.Assets,
}
// Ethash settings
EthashCacheDirFlag = DirectoryFlag{
Name: "ethash.cachedir",
@@ -400,7 +395,7 @@ var (
RPCVirtualHostsFlag = cli.StringFlag{
Name: "rpcvhosts",
Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard.",
Value: "localhost",
Value: strings.Join(node.DefaultConfig.HTTPVirtualHosts, ","),
}
RPCApiFlag = cli.StringFlag{
Name: "rpcapi",
@@ -701,8 +696,9 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) {
if ctx.GlobalIsSet(RPCApiFlag.Name) {
cfg.HTTPModules = splitAndTrim(ctx.GlobalString(RPCApiFlag.Name))
}
cfg.HTTPVirtualHosts = splitAndTrim(ctx.GlobalString(RPCVirtualHostsFlag.Name))
if ctx.GlobalIsSet(RPCVirtualHostsFlag.Name) {
cfg.HTTPVirtualHosts = splitAndTrim(ctx.GlobalString(RPCVirtualHostsFlag.Name))
}
}
// setWS creates the WebSocket RPC listener interface string from the set
@@ -824,6 +820,9 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) {
if ctx.GlobalIsSet(MaxPeersFlag.Name) {
cfg.MaxPeers = ctx.GlobalInt(MaxPeersFlag.Name)
if lightServer && !ctx.GlobalIsSet(LightPeersFlag.Name) {
cfg.MaxPeers += lightPeers
}
} else {
if lightServer {
cfg.MaxPeers += lightPeers
@@ -1125,7 +1124,6 @@ func SetDashboardConfig(ctx *cli.Context, cfg *dashboard.Config) {
cfg.Host = ctx.GlobalString(DashboardAddrFlag.Name)
cfg.Port = ctx.GlobalInt(DashboardPortFlag.Name)
cfg.Refresh = ctx.GlobalDuration(DashboardRefreshFlag.Name)
cfg.Assets = ctx.GlobalString(DashboardAssetsFlag.Name)
}
// RegisterEthService adds an Ethereum client to the stack.

View File

@@ -22,6 +22,7 @@ package main
import (
"bufio"
"crypto/ecdsa"
crand "crypto/rand"
"crypto/sha512"
"encoding/binary"
"encoding/hex"
@@ -48,6 +49,7 @@ import (
)
const quitCommand = "~Q"
const entropySize = 32
// singletons
var (
@@ -55,6 +57,7 @@ var (
shh *whisper.Whisper
done chan struct{}
mailServer mailserver.WMailServer
entropy [entropySize]byte
input = bufio.NewReader(os.Stdin)
)
@@ -76,14 +79,15 @@ var (
// cmd arguments
var (
bootstrapMode = flag.Bool("standalone", false, "boostrap node: don't actively connect to peers, wait for incoming connections")
forwarderMode = flag.Bool("forwarder", false, "forwarder mode: only forward messages, neither send nor decrypt messages")
bootstrapMode = flag.Bool("standalone", false, "boostrap node: don't initiate connection to peers, just wait for incoming connections")
forwarderMode = flag.Bool("forwarder", false, "forwarder mode: only forward messages, neither encrypt nor decrypt messages")
mailServerMode = flag.Bool("mailserver", false, "mail server mode: delivers expired messages on demand")
requestMail = flag.Bool("mailclient", false, "request expired messages from the bootstrap server")
asymmetricMode = flag.Bool("asym", false, "use asymmetric encryption")
generateKey = flag.Bool("generatekey", false, "generate and show the private key")
fileExMode = flag.Bool("fileexchange", false, "file exchange mode")
testMode = flag.Bool("test", false, "use of predefined parameters for diagnostics")
fileReader = flag.Bool("filereader", false, "load and decrypt messages saved as files, display as plain text")
testMode = flag.Bool("test", false, "use of predefined parameters for diagnostics (password, etc.)")
echoMode = flag.Bool("echo", false, "echo mode: prints some arguments for diagnostics")
argVerbosity = flag.Int("verbosity", int(log.LvlError), "log verbosity level")
@@ -99,13 +103,14 @@ var (
argIDFile = flag.String("idfile", "", "file name with node id (private key)")
argEnode = flag.String("boot", "", "bootstrap node you want to connect to (e.g. enode://e454......08d50@52.176.211.200:16428)")
argTopic = flag.String("topic", "", "topic in hexadecimal format (e.g. 70a4beef)")
argSaveDir = flag.String("savedir", "", "directory where incoming messages will be saved as files")
argSaveDir = flag.String("savedir", "", "directory where all incoming messages will be saved as files")
)
func main() {
processArgs()
initialize()
run()
shutdown()
}
func processArgs() {
@@ -192,6 +197,8 @@ func initialize() {
if len(*argIP) == 0 {
argIP = scanLineA("Please enter your IP and port (e.g. 127.0.0.1:30348): ")
}
} else if *fileReader {
*bootstrapMode = true
} else {
if len(*argEnode) == 0 {
argEnode = scanLineA("Please enter the peer's enode: ")
@@ -200,11 +207,6 @@ func initialize() {
peers = append(peers, peer)
}
cfg := &whisper.Config{
MaxMessageSize: uint32(*argMaxSize),
MinimumAcceptedPOW: *argPoW,
}
if *mailServerMode {
if len(msPassword) == 0 {
msPassword, err = console.Stdin.PromptPassword("Please enter the Mail Server password: ")
@@ -212,14 +214,15 @@ func initialize() {
utils.Fatalf("Failed to read Mail Server password: %s", err)
}
}
shh = whisper.New(cfg)
shh.RegisterServer(&mailServer)
mailServer.Init(shh, *argDBPath, msPassword, *argServerPoW)
} else {
shh = whisper.New(cfg)
}
cfg := &whisper.Config{
MaxMessageSize: uint32(*argMaxSize),
MinimumAcceptedPOW: *argPoW,
}
shh = whisper.New(cfg)
if *argPoW != whisper.DefaultMinimumPoW {
err := shh.SetMinimumPoW(*argPoW)
if err != nil {
@@ -261,6 +264,16 @@ func initialize() {
maxPeers = 800
}
_, err = crand.Read(entropy[:])
if err != nil {
utils.Fatalf("crypto/rand failed: %s", err)
}
if *mailServerMode {
shh.RegisterServer(&mailServer)
mailServer.Init(shh, *argDBPath, msPassword, *argServerPoW)
}
server = &p2p.Server{
Config: p2p.Config{
PrivateKey: nodeid,
@@ -276,10 +289,11 @@ func initialize() {
}
}
func startServer() {
func startServer() error {
err := server.Start()
if err != nil {
utils.Fatalf("Failed to start Whisper peer: %s.", err)
fmt.Printf("Failed to start Whisper peer: %s.", err)
return err
}
fmt.Printf("my public key: %s \n", common.ToHex(crypto.FromECDSAPub(&asymKey.PublicKey)))
@@ -295,9 +309,14 @@ func startServer() {
configureNode()
}
if !*forwarderMode {
if *fileExMode {
fmt.Printf("Please type the file name to be send. To quit type: '%s'\n", quitCommand)
} else if *fileReader {
fmt.Printf("Please type the file name to be decrypted. To quit type: '%s'\n", quitCommand)
} else if !*forwarderMode {
fmt.Printf("Please type the message. To quit type: '%s'\n", quitCommand)
}
return nil
}
func isKeyValid(k *ecdsa.PublicKey) bool {
@@ -411,8 +430,10 @@ func waitForConnection(timeout bool) {
}
func run() {
defer mailServer.Close()
startServer()
err := startServer()
if err != nil {
return
}
defer server.Stop()
shh.Start(nil)
defer shh.Stop()
@@ -425,21 +446,26 @@ func run() {
requestExpiredMessagesLoop()
} else if *fileExMode {
sendFilesLoop()
} else if *fileReader {
fileReaderLoop()
} else {
sendLoop()
}
}
func shutdown() {
close(done)
mailServer.Close()
}
func sendLoop() {
for {
s := scanLine("")
if s == quitCommand {
fmt.Println("Quit command received")
close(done)
break
return
}
sendMsg([]byte(s))
if *asymmetricMode {
// print your own message for convenience,
// because in asymmetric mode it is impossible to decrypt it
@@ -455,13 +481,11 @@ func sendFilesLoop() {
s := scanLine("")
if s == quitCommand {
fmt.Println("Quit command received")
close(done)
break
return
}
b, err := ioutil.ReadFile(s)
if err != nil {
fmt.Printf(">>> Error: %s \n", err)
continue
} else {
h := sendMsg(b)
if (h == common.Hash{}) {
@@ -475,6 +499,38 @@ func sendFilesLoop() {
}
}
func fileReaderLoop() {
watcher1 := shh.GetFilter(symFilterID)
watcher2 := shh.GetFilter(asymFilterID)
if watcher1 == nil && watcher2 == nil {
fmt.Println("Error: neither symmetric nor asymmetric filter is installed")
return
}
for {
s := scanLine("")
if s == quitCommand {
fmt.Println("Quit command received")
return
}
raw, err := ioutil.ReadFile(s)
if err != nil {
fmt.Printf(">>> Error: %s \n", err)
} else {
env := whisper.Envelope{Data: raw} // the topic is zero
msg := env.Open(watcher1) // force-open envelope regardless of the topic
if msg == nil {
msg = env.Open(watcher2)
}
if msg == nil {
fmt.Printf(">>> Error: failed to decrypt the message \n")
} else {
printMessageInfo(msg)
}
}
}
}
func scanLine(prompt string) string {
if len(prompt) > 0 {
fmt.Print(prompt)
@@ -517,6 +573,7 @@ func sendMsg(payload []byte) common.Hash {
if err != nil {
utils.Fatalf("failed to create new message: %s", err)
}
envelope, err := msg.Wrap(&params)
if err != nil {
fmt.Printf("failed to seal message: %v \n", err)
@@ -548,21 +605,21 @@ func messageLoop() {
for {
select {
case <-ticker.C:
messages := sf.Retrieve()
m1 := sf.Retrieve()
m2 := af.Retrieve()
messages := append(m1, m2...)
for _, msg := range messages {
if *fileExMode || len(msg.Payload) > 2048 {
writeMessageToFile(*argSaveDir, msg)
} else {
reportedOnce := false
if !*fileExMode && len(msg.Payload) <= 2048 {
printMessageInfo(msg)
reportedOnce = true
}
}
messages = af.Retrieve()
for _, msg := range messages {
if *fileExMode || len(msg.Payload) > 2048 {
writeMessageToFile(*argSaveDir, msg)
} else {
printMessageInfo(msg)
// All messages are saved upon specifying argSaveDir.
// fileExMode only specifies how messages are displayed on the console after they are saved.
// if fileExMode == true, only the hashes are displayed, since messages might be too big.
if len(*argSaveDir) > 0 {
writeMessageToFile(*argSaveDir, msg, !reportedOnce)
}
}
case <-done:
@@ -587,7 +644,11 @@ func printMessageInfo(msg *whisper.ReceivedMessage) {
}
}
func writeMessageToFile(dir string, msg *whisper.ReceivedMessage) {
func writeMessageToFile(dir string, msg *whisper.ReceivedMessage, show bool) {
if len(dir) == 0 {
return
}
timestamp := fmt.Sprintf("%d", msg.Sent)
name := fmt.Sprintf("%x", msg.EnvelopeHash)
@@ -596,27 +657,32 @@ func writeMessageToFile(dir string, msg *whisper.ReceivedMessage) {
address = crypto.PubkeyToAddress(*msg.Src)
}
if whisper.IsPubKeyEqual(msg.Src, &asymKey.PublicKey) {
// message from myself: don't save, only report
fmt.Printf("\n%s <%x>: message received: '%s'\n", timestamp, address, name)
} else if len(dir) > 0 {
fullpath := filepath.Join(dir, name)
err := ioutil.WriteFile(fullpath, msg.Payload, 0644)
if err != nil {
fmt.Printf("\n%s {%x}: message received but not saved: %s\n", timestamp, address, err)
} else {
fmt.Printf("\n%s {%x}: message received and saved as '%s' (%d bytes)\n", timestamp, address, name, len(msg.Payload))
}
} else {
fmt.Printf("\n%s {%x}: big message received (%d bytes), but not saved: %s\n", timestamp, address, len(msg.Payload), name)
env := shh.GetEnvelope(msg.EnvelopeHash)
if env == nil {
fmt.Printf("\nUnexpected error: envelope not found: %x\n", msg.EnvelopeHash)
return
}
// this is a sample code; uncomment if you don't want to save your own messages.
//if whisper.IsPubKeyEqual(msg.Src, &asymKey.PublicKey) {
// fmt.Printf("\n%s <%x>: message from myself received, not saved: '%s'\n", timestamp, address, name)
// return
//}
fullpath := filepath.Join(dir, name)
err := ioutil.WriteFile(fullpath, env.Data, 0644)
if err != nil {
fmt.Printf("\n%s {%x}: message received but not saved: %s\n", timestamp, address, err)
} else if show {
fmt.Printf("\n%s {%x}: message received and saved as '%s' (%d bytes)\n", timestamp, address, name, len(env.Data))
}
}
func requestExpiredMessagesLoop() {
var key, peerID []byte
var key, peerID, bloom []byte
var timeLow, timeUpp uint32
var t string
var xt, empty whisper.TopicType
var xt whisper.TopicType
keyID, err := shh.AddSymKeyFromPassword(msPassword)
if err != nil {
@@ -632,25 +698,31 @@ func requestExpiredMessagesLoop() {
for {
timeLow = scanUint("Please enter the lower limit of the time range (unix timestamp): ")
timeUpp = scanUint("Please enter the upper limit of the time range (unix timestamp): ")
t = scanLine("Please enter the topic (hexadecimal): ")
if len(t) >= whisper.TopicLength*2 {
t = scanLine("Enter the topic (hex). Press enter to request all messages, regardless of the topic: ")
if len(t) == whisper.TopicLength*2 {
x, err := hex.DecodeString(t)
if err != nil {
utils.Fatalf("Failed to parse the topic: %s", err)
fmt.Printf("Failed to parse the topic: %s \n", err)
continue
}
xt = whisper.BytesToTopic(x)
bloom = whisper.TopicToBloom(xt)
obfuscateBloom(bloom)
} else if len(t) == 0 {
bloom = whisper.MakeFullNodeBloom()
} else {
fmt.Println("Error: topic is invalid, request aborted")
continue
}
if timeUpp == 0 {
timeUpp = 0xFFFFFFFF
}
data := make([]byte, 8+whisper.TopicLength)
data := make([]byte, 8, 8+whisper.BloomFilterSize)
binary.BigEndian.PutUint32(data, timeLow)
binary.BigEndian.PutUint32(data[4:], timeUpp)
copy(data[8:], xt[:])
if xt == empty {
data = data[:8]
}
data = append(data, bloom...)
var params whisper.MessageParams
params.PoW = *argServerPoW
@@ -684,3 +756,20 @@ func extractIDFromEnode(s string) []byte {
}
return n.ID[:]
}
// obfuscateBloom adds 16 random bits to the the bloom
// filter, in order to obfuscate the containing topics.
// it does so deterministically within every session.
// despite additional bits, it will match on average
// 32000 times less messages than full node's bloom filter.
func obfuscateBloom(bloom []byte) {
const half = entropySize / 2
for i := 0; i < half; i++ {
x := int(entropy[i])
if entropy[half+i] < 128 {
x += 256
}
bloom[x/8] = 1 << uint(x%8) // set the bit number X
}
}

View File

@@ -25,6 +25,6 @@ var (
Big3 = big.NewInt(3)
Big0 = big.NewInt(0)
Big32 = big.NewInt(32)
Big256 = big.NewInt(0xff)
Big256 = big.NewInt(256)
Big257 = big.NewInt(257)
)

View File

@@ -65,7 +65,6 @@ type solcOutput struct {
func (s *Solidity) makeArgs() []string {
p := []string{
"--combined-json", "bin,abi,userdoc,devdoc",
"--add-std", // include standard lib contracts
"--optimize", // code optimizer switched on
}
if s.Major > 0 || s.Minor > 4 || s.Patch > 6 {

View File

@@ -19,6 +19,7 @@ package ethash
import (
"encoding/binary"
"hash"
"math/big"
"reflect"
"runtime"
"sync"
@@ -47,6 +48,48 @@ const (
loopAccesses = 64 // Number of accesses in hashimoto loop
)
// cacheSize returns the size of the ethash verification cache that belongs to a certain
// block number.
func cacheSize(block uint64) uint64 {
epoch := int(block / epochLength)
if epoch < maxEpoch {
return cacheSizes[epoch]
}
return calcCacheSize(epoch)
}
// calcCacheSize calculates the cache size for epoch. The cache size grows linearly,
// however, we always take the highest prime below the linearly growing threshold in order
// to reduce the risk of accidental regularities leading to cyclic behavior.
func calcCacheSize(epoch int) uint64 {
size := cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes
for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * hashBytes
}
return size
}
// datasetSize returns the size of the ethash mining dataset that belongs to a certain
// block number.
func datasetSize(block uint64) uint64 {
epoch := int(block / epochLength)
if epoch < maxEpoch {
return datasetSizes[epoch]
}
return calcDatasetSize(epoch)
}
// calcDatasetSize calculates the dataset size for epoch. The dataset size grows linearly,
// however, we always take the highest prime below the linearly growing threshold in order
// to reduce the risk of accidental regularities leading to cyclic behavior.
func calcDatasetSize(epoch int) uint64 {
size := datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes
for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * mixBytes
}
return size
}
// hasher is a repetitive hasher allowing the same hash data structures to be
// reused between hash runs instead of requiring new ones to be created.
type hasher func(dest []byte, data []byte)

View File

@@ -1,47 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build !go1.8
package ethash
// cacheSize calculates and returns the size of the ethash verification cache that
// belongs to a certain block number. The cache size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to
// reduce the risk of accidental regularities leading to cyclic behavior.
func cacheSize(block uint64) uint64 {
// If we have a pre-generated value, use that
epoch := int(block / epochLength)
if epoch < maxEpoch {
return cacheSizes[epoch]
}
// We don't have a way to verify primes fast before Go 1.8
panic("fast prime testing unsupported in Go < 1.8")
}
// datasetSize calculates and returns the size of the ethash mining dataset that
// belongs to a certain block number. The dataset size grows linearly, however, we
// always take the highest prime below the linearly growing threshold in order to
// reduce the risk of accidental regularities leading to cyclic behavior.
func datasetSize(block uint64) uint64 {
// If we have a pre-generated value, use that
epoch := int(block / epochLength)
if epoch < maxEpoch {
return datasetSizes[epoch]
}
// We don't have a way to verify primes fast before Go 1.8
panic("fast prime testing unsupported in Go < 1.8")
}

View File

@@ -1,63 +0,0 @@
// Copyright 2017 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build go1.8
package ethash
import "math/big"
// cacheSize returns the size of the ethash verification cache that belongs to a certain
// block number.
func cacheSize(block uint64) uint64 {
epoch := int(block / epochLength)
if epoch < maxEpoch {
return cacheSizes[epoch]
}
return calcCacheSize(epoch)
}
// calcCacheSize calculates the cache size for epoch. The cache size grows linearly,
// however, we always take the highest prime below the linearly growing threshold in order
// to reduce the risk of accidental regularities leading to cyclic behavior.
func calcCacheSize(epoch int) uint64 {
size := cacheInitBytes + cacheGrowthBytes*uint64(epoch) - hashBytes
for !new(big.Int).SetUint64(size / hashBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * hashBytes
}
return size
}
// datasetSize returns the size of the ethash mining dataset that belongs to a certain
// block number.
func datasetSize(block uint64) uint64 {
epoch := int(block / epochLength)
if epoch < maxEpoch {
return datasetSizes[epoch]
}
return calcDatasetSize(epoch)
}
// calcDatasetSize calculates the dataset size for epoch. The dataset size grows linearly,
// however, we always take the highest prime below the linearly growing threshold in order
// to reduce the risk of accidental regularities leading to cyclic behavior.
func calcDatasetSize(epoch int) uint64 {
size := datasetInitBytes + datasetGrowthBytes*uint64(epoch) - mixBytes
for !new(big.Int).SetUint64(size / mixBytes).ProbablyPrime(1) { // Always accurate for n < 2^64
size -= 2 * mixBytes
}
return size
}

View File

@@ -30,6 +30,22 @@ import (
"github.com/ethereum/go-ethereum/core/types"
)
// Tests whether the dataset size calculator works correctly by cross checking the
// hard coded lookup table with the value generated by it.
func TestSizeCalculations(t *testing.T) {
// Verify all the cache and dataset sizes from the lookup table.
for epoch, want := range cacheSizes {
if size := calcCacheSize(epoch); size != want {
t.Errorf("cache %d: cache size mismatch: have %d, want %d", epoch, size, want)
}
}
for epoch, want := range datasetSizes {
if size := calcDatasetSize(epoch); size != want {
t.Errorf("dataset %d: dataset size mismatch: have %d, want %d", epoch, size, want)
}
}
}
// Tests that verification caches can be correctly generated.
func TestCacheGeneration(t *testing.T) {
tests := []struct {

View File

@@ -53,7 +53,6 @@ var (
errDuplicateUncle = errors.New("duplicate uncle")
errUncleIsAncestor = errors.New("uncle is ancestor")
errDanglingUncle = errors.New("uncle's parent is not ancestor")
errNonceOutOfRange = errors.New("nonce out of range")
errInvalidDifficulty = errors.New("non-positive difficulty")
errInvalidMixDigest = errors.New("invalid mix digest")
errInvalidPoW = errors.New("invalid proof-of-work")
@@ -356,7 +355,7 @@ func calcDifficultyByzantium(time uint64, parent *types.Header) *big.Int {
if x.Cmp(params.MinimumDifficulty) < 0 {
x.Set(params.MinimumDifficulty)
}
// calculate a fake block numer for the ice-age delay:
// calculate a fake block number for the ice-age delay:
// https://github.com/ethereum/EIPs/pull/669
// fake_block_number = min(0, block.number - 3_000_000
fakeBlockNumber := new(big.Int)
@@ -474,18 +473,13 @@ func (ethash *Ethash) VerifySeal(chain consensus.ChainReader, header *types.Head
if ethash.shared != nil {
return ethash.shared.VerifySeal(chain, header)
}
// Sanity check that the block number is below the lookup table size (60M blocks)
number := header.Number.Uint64()
if number/epochLength >= maxEpoch {
// Go < 1.7 cannot calculate new cache/dataset sizes (no fast prime check)
return errNonceOutOfRange
}
// Ensure that we have a valid difficulty for the block
if header.Difficulty.Sign() <= 0 {
return errInvalidDifficulty
}
// Recompute the digest and PoW value and verify against the header
number := header.Number.Uint64()
cache := ethash.cache(number)
size := datasetSize(number)
if ethash.config.PowMode == ModeTest {

View File

@@ -35,9 +35,9 @@ import (
mmap "github.com/edsrzf/mmap-go"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/rpc"
"github.com/hashicorp/golang-lru/simplelru"
metrics "github.com/rcrowley/go-metrics"
)
var ErrInvalidDumpMagic = errors.New("invalid dump magic")

View File

@@ -26,6 +26,7 @@ import (
"regexp"
"sort"
"strings"
"syscall"
"github.com/ethereum/go-ethereum/internal/jsre"
"github.com/ethereum/go-ethereum/internal/web3ext"
@@ -332,7 +333,7 @@ func (c *Console) Interactive() {
}()
// Monitor Ctrl-C too in case the input is empty and we need to bail
abort := make(chan os.Signal, 1)
signal.Notify(abort, os.Interrupt)
signal.Notify(abort, syscall.SIGINT, syscall.SIGTERM)
// Start sending prompts to the user and reading back inputs
for {

View File

@@ -1 +0,0 @@
.vagrant

View File

@@ -1,38 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
require 'yaml'
VAGRANTFILE_API_VERSION = 2
VM_RAM = 2048
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.define "ubuntu", :primary => true do |ubuntu|
ubuntu.vm.box = "ubuntu/trusty64"
ubuntu.vm.provision "shell", :path => "provisioners/shell/ubuntu.sh"
end
config.vm.define "debian", :primary => true do |debian|
debian.vm.box = "debian/jessie64"
debian.vm.provision "shell", :path => "provisioners/shell/debian.sh"
end
config.vm.define "centos", :autostart => false do |centos|
centos.vm.box = "centos/7"
centos.vm.provision "shell", :path => "provisioners/shell/centos.sh"
end
config.vm.provider "virtualbox" do |vb|
vb.memory = VM_RAM
end
config.vm.provider "libvirt" do |lv|
lv.memory = VM_RAM
config.vm.synced_folder ".", "/home/vagrant/sync", :disabled => true
end
config.vm.synced_folder ".", "/vagrant", :disabled => true
config.vm.synced_folder "../../", "/home/vagrant/go/src/github.com/ethereum/go-ethereum"
end

View File

@@ -1,11 +0,0 @@
#!/bin/bash
sudo yum install -y git wget
sudo yum update -y
wget --continue https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz
sudo tar -C /usr/local -xzf go1.8.1.linux-amd64.tar.gz
GETH_PATH="~vagrant/go/src/github.com/ethereum/go-ethereum/build/bin/"
echo "export PATH=$PATH:/usr/local/go/bin:$GETH_PATH" >> ~vagrant/.bashrc

View File

@@ -1,11 +0,0 @@
#!/bin/bash
sudo apt-get install -y build-essential git-all wget
sudo apt-get update
wget --continue https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz
sudo tar -C /usr/local -xzf go1.8.1.linux-amd64.tar.gz
GETH_PATH="~vagrant/go/src/github.com/ethereum/go-ethereum/build/bin/"
echo "export PATH=$PATH:/usr/local/go/bin:$GETH_PATH" >> ~vagrant/.bashrc

View File

@@ -1,11 +0,0 @@
#!/bin/bash
sudo apt-get install -y build-essential git-all wget
sudo apt-get update
wget --continue https://storage.googleapis.com/golang/go1.8.1.linux-amd64.tar.gz
sudo tar -C /usr/local -xzf go1.8.1.linux-amd64.tar.gz
GETH_PATH="~vagrant/go/src/github.com/ethereum/go-ethereum/build/bin/"
echo "export PATH=$PATH:/usr/local/go/bin:$GETH_PATH" >> ~vagrant/.bashrc

View File

@@ -281,8 +281,8 @@ func TestDeposit(t *testing.T) {
t.Fatalf("expected balance %v, got %v", exp, chbook.Balance())
}
// autodeposit every 30ms if new cheque issued
interval := 30 * time.Millisecond
// autodeposit every 200ms if new cheque issued
interval := 200 * time.Millisecond
chbook.AutoDeposit(interval, common.Big1, balance)
_, err = chbook.Issue(addr1, amount)
if err != nil {

View File

@@ -122,7 +122,7 @@ func (c *Compiler) next() token {
}
// compile line compiles a single line instruction e.g.
// "push 1", "jump @labal".
// "push 1", "jump @label".
func (c *Compiler) compileLine() error {
n := c.next()
if n.typ != lineStart {

View File

@@ -48,7 +48,7 @@ const (
lineEnd // emitted when a line ends
invalidStatement // any invalid statement
element // any element during element parsing
label // label is emitted when a labal is found
label // label is emitted when a label is found
labelDef // label definition is emitted when a new label is found
number // number is emitted when a number is found
stringValue // stringValue is emitted when a string has been found
@@ -206,7 +206,7 @@ func lexLine(l *lexer) stateFn {
return lexComment
case isSpace(r):
l.ignore()
case isAlphaNumeric(r) || r == '_':
case isLetter(r) || r == '_':
return lexElement
case isNumber(r):
return lexNumber
@@ -278,7 +278,7 @@ func lexElement(l *lexer) stateFn {
return lexLine
}
func isAlphaNumeric(t rune) bool {
func isLetter(t rune) bool {
return unicode.IsLetter(t)
}

View File

@@ -46,7 +46,7 @@ import (
)
var (
blockInsertTimer = metrics.NewTimer("chain/inserts")
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
ErrNoGenesis = errors.New("Genesis not found in chain")
)
@@ -107,8 +107,8 @@ type BlockChain struct {
procmu sync.RWMutex // block processor lock
checkpoint int // checkpoint counts towards the new checkpoint
currentBlock *types.Block // Current head of the block chain
currentFastBlock *types.Block // Current head of the fast-sync chain (may be above the block chain!)
currentBlock atomic.Value // Current head of the block chain
currentFastBlock atomic.Value // Current head of the fast-sync chain (may be above the block chain!)
stateCache state.Database // State database to reuse between imports (contains state cache)
bodyCache *lru.Cache // Cache for the most recent block bodies
@@ -224,10 +224,10 @@ func (bc *BlockChain) loadLastState() error {
}
}
// Everything seems to be fine, set as the head block
bc.currentBlock = currentBlock
bc.currentBlock.Store(currentBlock)
// Restore the last known head header
currentHeader := bc.currentBlock.Header()
currentHeader := currentBlock.Header()
if head := GetHeadHeaderHash(bc.db); head != (common.Hash{}) {
if header := bc.GetHeaderByHash(head); header != nil {
currentHeader = header
@@ -236,21 +236,23 @@ func (bc *BlockChain) loadLastState() error {
bc.hc.SetCurrentHeader(currentHeader)
// Restore the last known head fast block
bc.currentFastBlock = bc.currentBlock
bc.currentFastBlock.Store(currentBlock)
if head := GetHeadFastBlockHash(bc.db); head != (common.Hash{}) {
if block := bc.GetBlockByHash(head); block != nil {
bc.currentFastBlock = block
bc.currentFastBlock.Store(block)
}
}
// Issue a status log for the user
currentFastBlock := bc.CurrentFastBlock()
headerTd := bc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64())
blockTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64())
fastTd := bc.GetTd(bc.currentFastBlock.Hash(), bc.currentFastBlock.NumberU64())
blockTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
fastTd := bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64())
log.Info("Loaded most recent local header", "number", currentHeader.Number, "hash", currentHeader.Hash(), "td", headerTd)
log.Info("Loaded most recent local full block", "number", bc.currentBlock.Number(), "hash", bc.currentBlock.Hash(), "td", blockTd)
log.Info("Loaded most recent local fast block", "number", bc.currentFastBlock.Number(), "hash", bc.currentFastBlock.Hash(), "td", fastTd)
log.Info("Loaded most recent local full block", "number", currentBlock.Number(), "hash", currentBlock.Hash(), "td", blockTd)
log.Info("Loaded most recent local fast block", "number", currentFastBlock.Number(), "hash", currentFastBlock.Hash(), "td", fastTd)
return nil
}
@@ -279,30 +281,32 @@ func (bc *BlockChain) SetHead(head uint64) error {
bc.futureBlocks.Purge()
// Rewind the block chain, ensuring we don't end up with a stateless head block
if bc.currentBlock != nil && currentHeader.Number.Uint64() < bc.currentBlock.NumberU64() {
bc.currentBlock = bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())
if currentBlock := bc.CurrentBlock(); currentBlock != nil && currentHeader.Number.Uint64() < currentBlock.NumberU64() {
bc.currentBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
}
if bc.currentBlock != nil {
if _, err := state.New(bc.currentBlock.Root(), bc.stateCache); err != nil {
if currentBlock := bc.CurrentBlock(); currentBlock != nil {
if _, err := state.New(currentBlock.Root(), bc.stateCache); err != nil {
// Rewound state missing, rolled back to before pivot, reset to genesis
bc.currentBlock = nil
bc.currentBlock.Store(bc.genesisBlock)
}
}
// Rewind the fast block in a simpleton way to the target head
if bc.currentFastBlock != nil && currentHeader.Number.Uint64() < bc.currentFastBlock.NumberU64() {
bc.currentFastBlock = bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64())
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock != nil && currentHeader.Number.Uint64() < currentFastBlock.NumberU64() {
bc.currentFastBlock.Store(bc.GetBlock(currentHeader.Hash(), currentHeader.Number.Uint64()))
}
// If either blocks reached nil, reset to the genesis state
if bc.currentBlock == nil {
bc.currentBlock = bc.genesisBlock
if currentBlock := bc.CurrentBlock(); currentBlock == nil {
bc.currentBlock.Store(bc.genesisBlock)
}
if bc.currentFastBlock == nil {
bc.currentFastBlock = bc.genesisBlock
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock == nil {
bc.currentFastBlock.Store(bc.genesisBlock)
}
if err := WriteHeadBlockHash(bc.db, bc.currentBlock.Hash()); err != nil {
currentBlock := bc.CurrentBlock()
currentFastBlock := bc.CurrentFastBlock()
if err := WriteHeadBlockHash(bc.db, currentBlock.Hash()); err != nil {
log.Crit("Failed to reset head full block", "err", err)
}
if err := WriteHeadFastBlockHash(bc.db, bc.currentFastBlock.Hash()); err != nil {
if err := WriteHeadFastBlockHash(bc.db, currentFastBlock.Hash()); err != nil {
log.Crit("Failed to reset head fast block", "err", err)
}
return bc.loadLastState()
@@ -321,7 +325,7 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
}
// If all checks out, manually set the head block
bc.mu.Lock()
bc.currentBlock = block
bc.currentBlock.Store(block)
bc.mu.Unlock()
log.Info("Committed new head block", "number", block.Number(), "hash", hash)
@@ -330,28 +334,19 @@ func (bc *BlockChain) FastSyncCommitHead(hash common.Hash) error {
// GasLimit returns the gas limit of the current HEAD block.
func (bc *BlockChain) GasLimit() uint64 {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.currentBlock.GasLimit()
return bc.CurrentBlock().GasLimit()
}
// CurrentBlock retrieves the current head block of the canonical chain. The
// block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentBlock() *types.Block {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.currentBlock
return bc.currentBlock.Load().(*types.Block)
}
// CurrentFastBlock retrieves the current fast-sync head block of the canonical
// chain. The block is retrieved from the blockchain's internal cache.
func (bc *BlockChain) CurrentFastBlock() *types.Block {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.currentFastBlock
return bc.currentFastBlock.Load().(*types.Block)
}
// SetProcessor sets the processor required for making state modifications.
@@ -416,10 +411,10 @@ func (bc *BlockChain) ResetWithGenesisBlock(genesis *types.Block) error {
}
bc.genesisBlock = genesis
bc.insert(bc.genesisBlock)
bc.currentBlock = bc.genesisBlock
bc.currentBlock.Store(bc.genesisBlock)
bc.hc.SetGenesis(bc.genesisBlock.Header())
bc.hc.SetCurrentHeader(bc.genesisBlock.Header())
bc.currentFastBlock = bc.genesisBlock
bc.currentFastBlock.Store(bc.genesisBlock)
return nil
}
@@ -444,7 +439,7 @@ func (bc *BlockChain) repair(head **types.Block) error {
// Export writes the active chain to the given writer.
func (bc *BlockChain) Export(w io.Writer) error {
return bc.ExportN(w, uint64(0), bc.currentBlock.NumberU64())
return bc.ExportN(w, uint64(0), bc.CurrentBlock().NumberU64())
}
// ExportN writes a subset of the active chain to the given writer.
@@ -488,7 +483,7 @@ func (bc *BlockChain) insert(block *types.Block) {
if err := WriteHeadBlockHash(bc.db, block.Hash()); err != nil {
log.Crit("Failed to insert head block hash", "err", err)
}
bc.currentBlock = block
bc.currentBlock.Store(block)
// If the block is better than our head or is on a different chain, force update heads
if updateHeads {
@@ -497,7 +492,7 @@ func (bc *BlockChain) insert(block *types.Block) {
if err := WriteHeadFastBlockHash(bc.db, block.Hash()); err != nil {
log.Crit("Failed to insert head fast block hash", "err", err)
}
bc.currentFastBlock = block
bc.currentFastBlock.Store(block)
}
}
@@ -648,22 +643,21 @@ func (bc *BlockChain) Stop() {
bc.wg.Wait()
// Ensure the state of a recent block is also stored to disk before exiting.
// It is fine if this state does not exist (fast start/stop cycle), but it is
// advisable to leave an N block gap from the head so 1) a restart loads up
// the last N blocks as sync assistance to remote nodes; 2) a restart during
// a (small) reorg doesn't require deep reprocesses; 3) chain "repair" from
// missing states are constantly tested.
//
// This may be tuned a bit on mainnet if its too annoying to reprocess the last
// N blocks.
// We're writing three different states to catch different restart scenarios:
// - HEAD: So we don't need to reprocess any blocks in the general case
// - HEAD-1: So we don't do large reorgs if our HEAD becomes an uncle
// - HEAD-127: So we have a hard limit on the number of blocks reexecuted
if !bc.cacheConfig.Disabled {
triedb := bc.stateCache.TrieDB()
if number := bc.CurrentBlock().NumberU64(); number >= triesInMemory {
recent := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - triesInMemory + 1)
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
if err := triedb.Commit(recent.Root(), true); err != nil {
log.Error("Failed to commit recent state trie", "err", err)
for _, offset := range []uint64{0, 1, triesInMemory - 1} {
if number := bc.CurrentBlock().NumberU64(); number > offset {
recent := bc.GetBlockByNumber(number - offset)
log.Info("Writing cached state to disk", "block", recent.Number(), "hash", recent.Hash(), "root", recent.Root())
if err := triedb.Commit(recent.Root(), true); err != nil {
log.Error("Failed to commit recent state trie", "err", err)
}
}
}
for !bc.triegc.Empty() {
@@ -715,22 +709,27 @@ func (bc *BlockChain) Rollback(chain []common.Hash) {
if currentHeader.Hash() == hash {
bc.hc.SetCurrentHeader(bc.GetHeader(currentHeader.ParentHash, currentHeader.Number.Uint64()-1))
}
if bc.currentFastBlock.Hash() == hash {
bc.currentFastBlock = bc.GetBlock(bc.currentFastBlock.ParentHash(), bc.currentFastBlock.NumberU64()-1)
WriteHeadFastBlockHash(bc.db, bc.currentFastBlock.Hash())
if currentFastBlock := bc.CurrentFastBlock(); currentFastBlock.Hash() == hash {
newFastBlock := bc.GetBlock(currentFastBlock.ParentHash(), currentFastBlock.NumberU64()-1)
bc.currentFastBlock.Store(newFastBlock)
WriteHeadFastBlockHash(bc.db, newFastBlock.Hash())
}
if bc.currentBlock.Hash() == hash {
bc.currentBlock = bc.GetBlock(bc.currentBlock.ParentHash(), bc.currentBlock.NumberU64()-1)
WriteHeadBlockHash(bc.db, bc.currentBlock.Hash())
if currentBlock := bc.CurrentBlock(); currentBlock.Hash() == hash {
newBlock := bc.GetBlock(currentBlock.ParentHash(), currentBlock.NumberU64()-1)
bc.currentBlock.Store(newBlock)
WriteHeadBlockHash(bc.db, newBlock.Hash())
}
}
}
// SetReceiptsData computes all the non-consensus fields of the receipts
func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) {
func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts types.Receipts) error {
signer := types.MakeSigner(config, block.Number())
transactions, logIndex := block.Transactions(), uint(0)
if len(transactions) != len(receipts) {
return errors.New("transaction and receipt count mismatch")
}
for j := 0; j < len(receipts); j++ {
// The transaction hash can be retrieved from the transaction itself
@@ -758,6 +757,7 @@ func SetReceiptsData(config *params.ChainConfig, block *types.Block, receipts ty
logIndex++
}
}
return nil
}
// InsertReceiptChain attempts to complete an already existing header chain with
@@ -798,7 +798,9 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
continue
}
// Compute all the non-consensus fields of the receipts
SetReceiptsData(bc.chainConfig, block, receipts)
if err := SetReceiptsData(bc.chainConfig, block, receipts); err != nil {
return i, fmt.Errorf("failed to set receipts data: %v", err)
}
// Write all the data out into the database
if err := WriteBody(batch, block.Hash(), block.NumberU64(), block.Body()); err != nil {
return i, fmt.Errorf("failed to write block body: %v", err)
@@ -830,11 +832,12 @@ func (bc *BlockChain) InsertReceiptChain(blockChain types.Blocks, receiptChain [
bc.mu.Lock()
head := blockChain[len(blockChain)-1]
if td := bc.GetTd(head.Hash(), head.NumberU64()); td != nil { // Rewind may have occurred, skip in that case
if bc.GetTd(bc.currentFastBlock.Hash(), bc.currentFastBlock.NumberU64()).Cmp(td) < 0 {
currentFastBlock := bc.CurrentFastBlock()
if bc.GetTd(currentFastBlock.Hash(), currentFastBlock.NumberU64()).Cmp(td) < 0 {
if err := WriteHeadFastBlockHash(bc.db, head.Hash()); err != nil {
log.Crit("Failed to update head fast block hash", "err", err)
}
bc.currentFastBlock = head
bc.currentFastBlock.Store(head)
}
}
bc.mu.Unlock()
@@ -881,7 +884,8 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
bc.mu.Lock()
defer bc.mu.Unlock()
localTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64())
currentBlock := bc.CurrentBlock()
localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
externTd := new(big.Int).Add(block.Difficulty(), ptd)
// Irrelevant of the canonical status, write the block itself to the database
@@ -956,14 +960,15 @@ func (bc *BlockChain) WriteBlockWithState(block *types.Block, receipts []*types.
// Second clause in the if statement reduces the vulnerability to selfish mining.
// Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf
reorg := externTd.Cmp(localTd) > 0
currentBlock = bc.CurrentBlock()
if !reorg && externTd.Cmp(localTd) == 0 {
// Split same-difficulty blocks by number, then at random
reorg = block.NumberU64() < bc.currentBlock.NumberU64() || (block.NumberU64() == bc.currentBlock.NumberU64() && mrand.Float64() < 0.5)
reorg = block.NumberU64() < currentBlock.NumberU64() || (block.NumberU64() == currentBlock.NumberU64() && mrand.Float64() < 0.5)
}
if reorg {
// Reorganise the chain if the parent is not the head block
if block.ParentHash() != bc.currentBlock.Hash() {
if err := bc.reorg(bc.currentBlock, block); err != nil {
if block.ParentHash() != currentBlock.Hash() {
if err := bc.reorg(currentBlock, block); err != nil {
return NonStatTy, err
}
}
@@ -1092,7 +1097,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks) (int, []interface{}, []*ty
case err == consensus.ErrPrunedAncestor:
// Block competing with the canonical chain, store in the db, but don't process
// until the competitor TD goes above the canonical TD
localTd := bc.GetTd(bc.currentBlock.Hash(), bc.currentBlock.NumberU64())
currentBlock := bc.CurrentBlock()
localTd := bc.GetTd(currentBlock.Hash(), currentBlock.NumberU64())
externTd := new(big.Int).Add(bc.GetTd(block.ParentHash(), block.NumberU64()-1), block.Difficulty())
if localTd.Cmp(externTd) > 0 {
if err = bc.WriteBlockWithoutState(block, externTd); err != nil {
@@ -1481,9 +1487,6 @@ func (bc *BlockChain) writeHeader(header *types.Header) error {
// CurrentHeader retrieves the current head header of the canonical chain. The
// header is retrieved from the HeaderChain's internal cache.
func (bc *BlockChain) CurrentHeader() *types.Header {
bc.mu.RLock()
defer bc.mu.RUnlock()
return bc.hc.CurrentHeader()
}

View File

@@ -34,26 +34,6 @@ import (
"github.com/ethereum/go-ethereum/params"
)
// newTestBlockChain creates a blockchain without validation.
func newTestBlockChain(fake bool) *BlockChain {
db, _ := ethdb.NewMemDatabase()
gspec := &Genesis{
Config: params.TestChainConfig,
Difficulty: big.NewInt(1),
}
gspec.MustCommit(db)
engine := ethash.NewFullFaker()
if !fake {
engine = ethash.NewTester()
}
blockchain, err := NewBlockChain(db, nil, gspec.Config, engine, vm.Config{})
if err != nil {
panic(err)
}
blockchain.SetValidator(bproc{})
return blockchain
}
// Test fork of length N starting from block i
func testFork(t *testing.T, blockchain *BlockChain, i, n int, full bool, comparator func(td1, td2 *big.Int)) {
// Copy old chain up to #i into a new db
@@ -183,13 +163,18 @@ func insertChain(done chan bool, blockchain *BlockChain, chain types.Blocks, t *
}
func TestLastBlock(t *testing.T) {
bchain := newTestBlockChain(false)
defer bchain.Stop()
_, blockchain, err := newCanonical(ethash.NewFaker(), 0, true)
if err != nil {
t.Fatalf("failed to create pristine chain: %v", err)
}
defer blockchain.Stop()
block := makeBlockChain(bchain.CurrentBlock(), 1, ethash.NewFaker(), bchain.db, 0)[0]
bchain.insert(block)
if block.Hash() != GetHeadBlockHash(bchain.db) {
t.Errorf("Write/Get HeadBlockHash failed")
blocks := makeBlockChain(blockchain.CurrentBlock(), 1, ethash.NewFullFaker(), blockchain.db, 0)
if _, err := blockchain.InsertChain(blocks); err != nil {
t.Fatalf("Failed to insert block: %v", err)
}
if blocks[len(blocks)-1].Hash() != GetHeadBlockHash(blockchain.db) {
t.Fatalf("Write/Get HeadBlockHash failed")
}
}
@@ -337,55 +322,13 @@ func testBrokenChain(t *testing.T, full bool) {
}
}
type bproc struct{}
func (bproc) ValidateBody(*types.Block) error { return nil }
func (bproc) ValidateState(block, parent *types.Block, state *state.StateDB, receipts types.Receipts, usedGas uint64) error {
return nil
}
func (bproc) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config) (types.Receipts, []*types.Log, uint64, error) {
return nil, nil, 0, nil
}
func makeHeaderChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Header {
blocks := makeBlockChainWithDiff(genesis, d, seed)
headers := make([]*types.Header, len(blocks))
for i, block := range blocks {
headers[i] = block.Header()
}
return headers
}
func makeBlockChainWithDiff(genesis *types.Block, d []int, seed byte) []*types.Block {
var chain []*types.Block
for i, difficulty := range d {
header := &types.Header{
Coinbase: common.Address{seed},
Number: big.NewInt(int64(i + 1)),
Difficulty: big.NewInt(int64(difficulty)),
UncleHash: types.EmptyUncleHash,
TxHash: types.EmptyRootHash,
ReceiptHash: types.EmptyRootHash,
Time: big.NewInt(int64(i) + 1),
}
if i == 0 {
header.ParentHash = genesis.Hash()
} else {
header.ParentHash = chain[i-1].Hash()
}
block := types.NewBlockWithHeader(header)
chain = append(chain, block)
}
return chain
}
// Tests that reorganising a long difficult chain after a short easy one
// overwrites the canonical numbers and links in the database.
func TestReorgLongHeaders(t *testing.T) { testReorgLong(t, false) }
func TestReorgLongBlocks(t *testing.T) { testReorgLong(t, true) }
func testReorgLong(t *testing.T, full bool) {
testReorg(t, []int{1, 2, 4}, []int{1, 2, 3, 4}, 10, full)
testReorg(t, []int64{0, 0, -9}, []int64{0, 0, 0, -9}, 393280, full)
}
// Tests that reorganising a short difficult chain after a long easy one
@@ -394,45 +337,82 @@ func TestReorgShortHeaders(t *testing.T) { testReorgShort(t, false) }
func TestReorgShortBlocks(t *testing.T) { testReorgShort(t, true) }
func testReorgShort(t *testing.T, full bool) {
testReorg(t, []int{1, 2, 3, 4}, []int{1, 10}, 11, full)
// Create a long easy chain vs. a short heavy one. Due to difficulty adjustment
// we need a fairly long chain of blocks with different difficulties for a short
// one to become heavyer than a long one. The 96 is an empirical value.
easy := make([]int64, 96)
for i := 0; i < len(easy); i++ {
easy[i] = 60
}
diff := make([]int64, len(easy)-1)
for i := 0; i < len(diff); i++ {
diff[i] = -9
}
testReorg(t, easy, diff, 12615120, full)
}
func testReorg(t *testing.T, first, second []int, td int64, full bool) {
bc := newTestBlockChain(true)
defer bc.Stop()
func testReorg(t *testing.T, first, second []int64, td int64, full bool) {
// Create a pristine chain and database
db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
if err != nil {
t.Fatalf("failed to create pristine chain: %v", err)
}
defer blockchain.Stop()
// Insert an easy and a difficult chain afterwards
easyBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.CurrentBlock(), ethash.NewFaker(), db, len(first), func(i int, b *BlockGen) {
b.OffsetTime(first[i])
})
diffBlocks, _ := GenerateChain(params.TestChainConfig, blockchain.CurrentBlock(), ethash.NewFaker(), db, len(second), func(i int, b *BlockGen) {
b.OffsetTime(second[i])
})
if full {
bc.InsertChain(makeBlockChainWithDiff(bc.genesisBlock, first, 11))
bc.InsertChain(makeBlockChainWithDiff(bc.genesisBlock, second, 22))
if _, err := blockchain.InsertChain(easyBlocks); err != nil {
t.Fatalf("failed to insert easy chain: %v", err)
}
if _, err := blockchain.InsertChain(diffBlocks); err != nil {
t.Fatalf("failed to insert difficult chain: %v", err)
}
} else {
bc.InsertHeaderChain(makeHeaderChainWithDiff(bc.genesisBlock, first, 11), 1)
bc.InsertHeaderChain(makeHeaderChainWithDiff(bc.genesisBlock, second, 22), 1)
easyHeaders := make([]*types.Header, len(easyBlocks))
for i, block := range easyBlocks {
easyHeaders[i] = block.Header()
}
diffHeaders := make([]*types.Header, len(diffBlocks))
for i, block := range diffBlocks {
diffHeaders[i] = block.Header()
}
if _, err := blockchain.InsertHeaderChain(easyHeaders, 1); err != nil {
t.Fatalf("failed to insert easy chain: %v", err)
}
if _, err := blockchain.InsertHeaderChain(diffHeaders, 1); err != nil {
t.Fatalf("failed to insert difficult chain: %v", err)
}
}
// Check that the chain is valid number and link wise
if full {
prev := bc.CurrentBlock()
for block := bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, bc.GetBlockByNumber(block.NumberU64()-1) {
prev := blockchain.CurrentBlock()
for block := blockchain.GetBlockByNumber(blockchain.CurrentBlock().NumberU64() - 1); block.NumberU64() != 0; prev, block = block, blockchain.GetBlockByNumber(block.NumberU64()-1) {
if prev.ParentHash() != block.Hash() {
t.Errorf("parent block hash mismatch: have %x, want %x", prev.ParentHash(), block.Hash())
}
}
} else {
prev := bc.CurrentHeader()
for header := bc.GetHeaderByNumber(bc.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, bc.GetHeaderByNumber(header.Number.Uint64()-1) {
prev := blockchain.CurrentHeader()
for header := blockchain.GetHeaderByNumber(blockchain.CurrentHeader().Number.Uint64() - 1); header.Number.Uint64() != 0; prev, header = header, blockchain.GetHeaderByNumber(header.Number.Uint64()-1) {
if prev.ParentHash != header.Hash() {
t.Errorf("parent header hash mismatch: have %x, want %x", prev.ParentHash, header.Hash())
}
}
}
// Make sure the chain total difficulty is the correct one
want := new(big.Int).Add(bc.genesisBlock.Difficulty(), big.NewInt(td))
want := new(big.Int).Add(blockchain.genesisBlock.Difficulty(), big.NewInt(td))
if full {
if have := bc.GetTdByHash(bc.CurrentBlock().Hash()); have.Cmp(want) != 0 {
if have := blockchain.GetTdByHash(blockchain.CurrentBlock().Hash()); have.Cmp(want) != 0 {
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
}
} else {
if have := bc.GetTdByHash(bc.CurrentHeader().Hash()); have.Cmp(want) != 0 {
if have := blockchain.GetTdByHash(blockchain.CurrentHeader().Hash()); have.Cmp(want) != 0 {
t.Errorf("total difficulty mismatch: have %v, want %v", have, want)
}
}
@@ -443,19 +423,28 @@ func TestBadHeaderHashes(t *testing.T) { testBadHashes(t, false) }
func TestBadBlockHashes(t *testing.T) { testBadHashes(t, true) }
func testBadHashes(t *testing.T, full bool) {
bc := newTestBlockChain(true)
defer bc.Stop()
// Create a pristine chain and database
db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
if err != nil {
t.Fatalf("failed to create pristine chain: %v", err)
}
defer blockchain.Stop()
// Create a chain, ban a hash and try to import
var err error
if full {
blocks := makeBlockChainWithDiff(bc.genesisBlock, []int{1, 2, 4}, 10)
blocks := makeBlockChain(blockchain.CurrentBlock(), 3, ethash.NewFaker(), db, 10)
BadHashes[blocks[2].Header().Hash()] = true
_, err = bc.InsertChain(blocks)
defer func() { delete(BadHashes, blocks[2].Header().Hash()) }()
_, err = blockchain.InsertChain(blocks)
} else {
headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 4}, 10)
headers := makeHeaderChain(blockchain.CurrentHeader(), 3, ethash.NewFaker(), db, 10)
BadHashes[headers[2].Hash()] = true
_, err = bc.InsertHeaderChain(headers, 1)
defer func() { delete(BadHashes, headers[2].Hash()) }()
_, err = blockchain.InsertHeaderChain(headers, 1)
}
if err != ErrBlacklistedHash {
t.Errorf("error mismatch: have: %v, want: %v", err, ErrBlacklistedHash)
@@ -468,40 +457,41 @@ func TestReorgBadHeaderHashes(t *testing.T) { testReorgBadHashes(t, false) }
func TestReorgBadBlockHashes(t *testing.T) { testReorgBadHashes(t, true) }
func testReorgBadHashes(t *testing.T, full bool) {
bc := newTestBlockChain(true)
defer bc.Stop()
// Create a pristine chain and database
db, blockchain, err := newCanonical(ethash.NewFaker(), 0, full)
if err != nil {
t.Fatalf("failed to create pristine chain: %v", err)
}
// Create a chain, import and ban afterwards
headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 3, 4}, 10)
blocks := makeBlockChainWithDiff(bc.genesisBlock, []int{1, 2, 3, 4}, 10)
headers := makeHeaderChain(blockchain.CurrentHeader(), 4, ethash.NewFaker(), db, 10)
blocks := makeBlockChain(blockchain.CurrentBlock(), 4, ethash.NewFaker(), db, 10)
if full {
if _, err := bc.InsertChain(blocks); err != nil {
t.Fatalf("failed to import blocks: %v", err)
if _, err = blockchain.InsertChain(blocks); err != nil {
t.Errorf("failed to import blocks: %v", err)
}
if bc.CurrentBlock().Hash() != blocks[3].Hash() {
t.Errorf("last block hash mismatch: have: %x, want %x", bc.CurrentBlock().Hash(), blocks[3].Header().Hash())
if blockchain.CurrentBlock().Hash() != blocks[3].Hash() {
t.Errorf("last block hash mismatch: have: %x, want %x", blockchain.CurrentBlock().Hash(), blocks[3].Header().Hash())
}
BadHashes[blocks[3].Header().Hash()] = true
defer func() { delete(BadHashes, blocks[3].Header().Hash()) }()
} else {
if _, err := bc.InsertHeaderChain(headers, 1); err != nil {
t.Fatalf("failed to import headers: %v", err)
if _, err = blockchain.InsertHeaderChain(headers, 1); err != nil {
t.Errorf("failed to import headers: %v", err)
}
if bc.CurrentHeader().Hash() != headers[3].Hash() {
t.Errorf("last header hash mismatch: have: %x, want %x", bc.CurrentHeader().Hash(), headers[3].Hash())
if blockchain.CurrentHeader().Hash() != headers[3].Hash() {
t.Errorf("last header hash mismatch: have: %x, want %x", blockchain.CurrentHeader().Hash(), headers[3].Hash())
}
BadHashes[headers[3].Hash()] = true
defer func() { delete(BadHashes, headers[3].Hash()) }()
}
blockchain.Stop()
// Create a new BlockChain and check that it rolled back the state.
ncm, err := NewBlockChain(bc.db, nil, bc.chainConfig, ethash.NewFaker(), vm.Config{})
ncm, err := NewBlockChain(blockchain.db, nil, blockchain.chainConfig, ethash.NewFaker(), vm.Config{})
if err != nil {
t.Fatalf("failed to create new chain manager: %v", err)
}
defer ncm.Stop()
if full {
if ncm.CurrentBlock().Hash() != blocks[2].Header().Hash() {
t.Errorf("last block hash mismatch: have: %x, want %x", ncm.CurrentBlock().Hash(), blocks[2].Header().Hash())
@@ -514,6 +504,7 @@ func testReorgBadHashes(t *testing.T, full bool) {
t.Errorf("last header hash mismatch: have: %x, want %x", ncm.CurrentHeader().Hash(), headers[2].Hash())
}
}
ncm.Stop()
}
// Tests chain insertions in the face of one entity containing an invalid nonce.
@@ -989,10 +980,13 @@ done:
// Tests if the canonical block can be fetched from the database during chain insertion.
func TestCanonicalBlockRetrieval(t *testing.T) {
bc := newTestBlockChain(true)
defer bc.Stop()
_, blockchain, err := newCanonical(ethash.NewFaker(), 0, true)
if err != nil {
t.Fatalf("failed to create pristine chain: %v", err)
}
defer blockchain.Stop()
chain, _ := GenerateChain(bc.chainConfig, bc.genesisBlock, ethash.NewFaker(), bc.db, 10, func(i int, gen *BlockGen) {})
chain, _ := GenerateChain(blockchain.chainConfig, blockchain.genesisBlock, ethash.NewFaker(), blockchain.db, 10, func(i int, gen *BlockGen) {})
var pend sync.WaitGroup
pend.Add(len(chain))
@@ -1003,14 +997,14 @@ func TestCanonicalBlockRetrieval(t *testing.T) {
// try to retrieve a block by its canonical hash and see if the block data can be retrieved.
for {
ch := GetCanonicalHash(bc.db, block.NumberU64())
ch := GetCanonicalHash(blockchain.db, block.NumberU64())
if ch == (common.Hash{}) {
continue // busy wait for canonical hash to be written
}
if ch != block.Hash() {
t.Fatalf("unknown canonical hash, want %s, got %s", block.Hash().Hex(), ch.Hex())
}
fb := GetBlock(bc.db, ch, block.NumberU64())
fb := GetBlock(blockchain.db, ch, block.NumberU64())
if fb == nil {
t.Fatalf("unable to retrieve block %d for canonical hash: %s", block.NumberU64(), ch.Hex())
}
@@ -1021,7 +1015,7 @@ func TestCanonicalBlockRetrieval(t *testing.T) {
}
}(chain[i])
if _, err := bc.InsertChain(types.Blocks{chain[i]}); err != nil {
if _, err := blockchain.InsertChain(types.Blocks{chain[i]}); err != nil {
t.Fatalf("failed to insert block %d: %v", i, err)
}
}

View File

@@ -82,11 +82,23 @@ func (b *BlockGen) SetExtra(data []byte) {
// added. Notably, contract code relying on the BLOCKHASH instruction
// will panic during execution.
func (b *BlockGen) AddTx(tx *types.Transaction) {
b.AddTxWithChain(nil, tx)
}
// AddTxWithChain adds a transaction to the generated block. If no coinbase has
// been set, the block's coinbase is set to the zero address.
//
// AddTxWithChain panics if the transaction cannot be executed. In addition to
// the protocol-imposed limitations (gas limit, etc.), there are some
// further limitations on the content of transactions that can be
// added. If contract code relies on the BLOCKHASH instruction,
// the block in chain will be returned.
func (b *BlockGen) AddTxWithChain(bc *BlockChain, tx *types.Transaction) {
if b.gasPool == nil {
b.SetCoinbase(common.Address{})
}
b.statedb.Prepare(tx.Hash(), common.Hash{}, len(b.txs))
receipt, _, err := ApplyTransaction(b.config, nil, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vm.Config{})
receipt, _, err := ApplyTransaction(b.config, bc, &b.header.Coinbase, b.gasPool, b.statedb, b.header, tx, &b.header.GasUsed, vm.Config{})
if err != nil {
panic(err)
}

View File

@@ -47,6 +47,7 @@ var (
headHeaderKey = []byte("LastHeader")
headBlockKey = []byte("LastBlock")
headFastKey = []byte("LastFast")
trieSyncKey = []byte("TrieSync")
// Data item prefixes (use single byte to avoid mixing data types, avoid `i`).
headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header
@@ -70,8 +71,8 @@ var (
ErrChainConfigNotFound = errors.New("ChainConfig not found") // general config not found error
preimageCounter = metrics.NewCounter("db/preimage/total")
preimageHitCounter = metrics.NewCounter("db/preimage/hits")
preimageCounter = metrics.NewRegisteredCounter("db/preimage/total", nil)
preimageHitCounter = metrics.NewRegisteredCounter("db/preimage/hits", nil)
)
// TxLookupEntry is a positional metadata to help looking up the data content of
@@ -146,6 +147,16 @@ func GetHeadFastBlockHash(db DatabaseReader) common.Hash {
return common.BytesToHash(data)
}
// GetTrieSyncProgress retrieves the number of tries nodes fast synced to allow
// reportinc correct numbers across restarts.
func GetTrieSyncProgress(db DatabaseReader) uint64 {
data, _ := db.Get(trieSyncKey)
if len(data) == 0 {
return 0
}
return new(big.Int).SetBytes(data).Uint64()
}
// GetHeaderRLP retrieves a block header in its raw RLP database encoding, or nil
// if the header's not found.
func GetHeaderRLP(db DatabaseReader, hash common.Hash, number uint64) rlp.RawValue {
@@ -374,6 +385,15 @@ func WriteHeadFastBlockHash(db ethdb.Putter, hash common.Hash) error {
return nil
}
// WriteTrieSyncProgress stores the fast sync trie process counter to support
// retrieving it across restarts.
func WriteTrieSyncProgress(db ethdb.Putter, count uint64) error {
if err := db.Put(trieSyncKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
log.Crit("Failed to store fast sync trie progress", "err", err)
}
return nil
}
// WriteHeader serializes a block header into the database.
func WriteHeader(db ethdb.Putter, header *types.Header) error {
data, err := rlp.EncodeToBytes(header)

View File

@@ -118,10 +118,12 @@ func TestSetupGenesis(t *testing.T) {
// Commit the 'old' genesis block with Homestead transition at #2.
// Advance to block #4, past the homestead transition block of customg.
genesis := oldcustomg.MustCommit(db)
bc, _ := NewBlockChain(db, nil, oldcustomg.Config, ethash.NewFullFaker(), vm.Config{})
defer bc.Stop()
bc.SetValidator(bproc{})
bc.InsertChain(makeBlockChainWithDiff(genesis, []int{2, 3, 4, 5}, 0))
blocks, _ := GenerateChain(oldcustomg.Config, genesis, ethash.NewFaker(), db, 4, nil)
bc.InsertChain(blocks)
bc.CurrentBlock()
// This should return a compatibility error.
return SetupGenesisBlock(db, &customg)

View File

@@ -32,6 +32,7 @@ import (
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/params"
"github.com/hashicorp/golang-lru"
"sync/atomic"
)
const (
@@ -51,8 +52,8 @@ type HeaderChain struct {
chainDb ethdb.Database
genesisHeader *types.Header
currentHeader *types.Header // Current head of the header chain (may be above the block chain!)
currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time)
currentHeader atomic.Value // Current head of the header chain (may be above the block chain!)
currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time)
headerCache *lru.Cache // Cache for the most recent block headers
tdCache *lru.Cache // Cache for the most recent block total difficulties
@@ -95,13 +96,13 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c
return nil, ErrNoGenesis
}
hc.currentHeader = hc.genesisHeader
hc.currentHeader.Store(hc.genesisHeader)
if head := GetHeadBlockHash(chainDb); head != (common.Hash{}) {
if chead := hc.GetHeaderByHash(head); chead != nil {
hc.currentHeader = chead
hc.currentHeader.Store(chead)
}
}
hc.currentHeaderHash = hc.currentHeader.Hash()
hc.currentHeaderHash = hc.CurrentHeader().Hash()
return hc, nil
}
@@ -139,7 +140,7 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
if ptd == nil {
return NonStatTy, consensus.ErrUnknownAncestor
}
localTd := hc.GetTd(hc.currentHeaderHash, hc.currentHeader.Number.Uint64())
localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64())
externTd := new(big.Int).Add(header.Difficulty, ptd)
// Irrelevant of the canonical status, write the td and header to the database
@@ -181,7 +182,8 @@ func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, er
if err := WriteHeadHeaderHash(hc.chainDb, hash); err != nil {
log.Crit("Failed to insert head header hash", "err", err)
}
hc.currentHeaderHash, hc.currentHeader = hash, types.CopyHeader(header)
hc.currentHeaderHash = hash
hc.currentHeader.Store(types.CopyHeader(header))
status = CanonStatTy
} else {
@@ -383,7 +385,7 @@ func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
// CurrentHeader retrieves the current head header of the canonical chain. The
// header is retrieved from the HeaderChain's internal cache.
func (hc *HeaderChain) CurrentHeader() *types.Header {
return hc.currentHeader
return hc.currentHeader.Load().(*types.Header)
}
// SetCurrentHeader sets the current head header of the canonical chain.
@@ -391,7 +393,7 @@ func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
if err := WriteHeadHeaderHash(hc.chainDb, head.Hash()); err != nil {
log.Crit("Failed to insert head header hash", "err", err)
}
hc.currentHeader = head
hc.currentHeader.Store(head)
hc.currentHeaderHash = head.Hash()
}
@@ -403,19 +405,20 @@ type DeleteCallback func(common.Hash, uint64)
// will be deleted and the new one set.
func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
height := uint64(0)
if hc.currentHeader != nil {
height = hc.currentHeader.Number.Uint64()
if hdr := hc.CurrentHeader(); hdr != nil {
height = hdr.Number.Uint64()
}
for hc.currentHeader != nil && hc.currentHeader.Number.Uint64() > head {
hash := hc.currentHeader.Hash()
num := hc.currentHeader.Number.Uint64()
for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() {
hash := hdr.Hash()
num := hdr.Number.Uint64()
if delFn != nil {
delFn(hash, num)
}
DeleteHeader(hc.chainDb, hash, num)
DeleteTd(hc.chainDb, hash, num)
hc.currentHeader = hc.GetHeader(hc.currentHeader.ParentHash, hc.currentHeader.Number.Uint64()-1)
hc.currentHeader.Store(hc.GetHeader(hdr.ParentHash, hdr.Number.Uint64()-1))
}
// Roll back the canonical chain numbering
for i := height; i > head; i-- {
@@ -426,10 +429,10 @@ func (hc *HeaderChain) SetHead(head uint64, delFn DeleteCallback) {
hc.tdCache.Purge()
hc.numberCache.Purge()
if hc.currentHeader == nil {
hc.currentHeader = hc.genesisHeader
if hc.CurrentHeader() == nil {
hc.currentHeader.Store(hc.genesisHeader)
}
hc.currentHeaderHash = hc.currentHeader.Hash()
hc.currentHeaderHash = hc.CurrentHeader().Hash()
if err := WriteHeadHeaderHash(hc.chainDb, hc.currentHeaderHash); err != nil {
log.Crit("Failed to reset head header hash", "err", err)

View File

@@ -87,20 +87,20 @@ var (
var (
// Metrics for the pending pool
pendingDiscardCounter = metrics.NewCounter("txpool/pending/discard")
pendingReplaceCounter = metrics.NewCounter("txpool/pending/replace")
pendingRateLimitCounter = metrics.NewCounter("txpool/pending/ratelimit") // Dropped due to rate limiting
pendingNofundsCounter = metrics.NewCounter("txpool/pending/nofunds") // Dropped due to out-of-funds
pendingDiscardCounter = metrics.NewRegisteredCounter("txpool/pending/discard", nil)
pendingReplaceCounter = metrics.NewRegisteredCounter("txpool/pending/replace", nil)
pendingRateLimitCounter = metrics.NewRegisteredCounter("txpool/pending/ratelimit", nil) // Dropped due to rate limiting
pendingNofundsCounter = metrics.NewRegisteredCounter("txpool/pending/nofunds", nil) // Dropped due to out-of-funds
// Metrics for the queued pool
queuedDiscardCounter = metrics.NewCounter("txpool/queued/discard")
queuedReplaceCounter = metrics.NewCounter("txpool/queued/replace")
queuedRateLimitCounter = metrics.NewCounter("txpool/queued/ratelimit") // Dropped due to rate limiting
queuedNofundsCounter = metrics.NewCounter("txpool/queued/nofunds") // Dropped due to out-of-funds
queuedDiscardCounter = metrics.NewRegisteredCounter("txpool/queued/discard", nil)
queuedReplaceCounter = metrics.NewRegisteredCounter("txpool/queued/replace", nil)
queuedRateLimitCounter = metrics.NewRegisteredCounter("txpool/queued/ratelimit", nil) // Dropped due to rate limiting
queuedNofundsCounter = metrics.NewRegisteredCounter("txpool/queued/nofunds", nil) // Dropped due to out-of-funds
// General tx metrics
invalidTxCounter = metrics.NewCounter("txpool/invalid")
underpricedTxCounter = metrics.NewCounter("txpool/underpriced")
invalidTxCounter = metrics.NewRegisteredCounter("txpool/invalid", nil)
underpricedTxCounter = metrics.NewRegisteredCounter("txpool/underpriced", nil)
)
// TxStatus is the current status of a transaction as seen by the pool.
@@ -877,15 +877,14 @@ func (pool *TxPool) removeTx(hash common.Hash) {
// Remove the transaction from the pending lists and reset the account nonce
if pending := pool.pending[addr]; pending != nil {
if removed, invalids := pending.Remove(tx); removed {
// If no more transactions are left, remove the list
// If no more pending transactions are left, remove the list
if pending.Empty() {
delete(pool.pending, addr)
delete(pool.beats, addr)
} else {
// Otherwise postpone any invalidated transactions
for _, tx := range invalids {
pool.enqueueTx(tx.Hash(), tx)
}
}
// Postpone any invalidated transactions
for _, tx := range invalids {
pool.enqueueTx(tx.Hash(), tx)
}
// Update the account nonce if needed
if nonce := tx.Nonce(); pool.pendingState.GetNonce(addr) > nonce {

View File

@@ -557,74 +557,112 @@ func TestTransactionDropping(t *testing.T) {
func TestTransactionPostponing(t *testing.T) {
t.Parallel()
// Create a test account and fund it
pool, key := setupTxPool()
// Create the pool to test the postponing with
db, _ := ethdb.NewMemDatabase()
statedb, _ := state.New(common.Hash{}, state.NewDatabase(db))
blockchain := &testBlockChain{statedb, 1000000, new(event.Feed)}
pool := NewTxPool(testTxPoolConfig, params.TestChainConfig, blockchain)
defer pool.Stop()
account, _ := deriveSender(transaction(0, 0, key))
pool.currentState.AddBalance(account, big.NewInt(1000))
// Create two test accounts to produce different gap profiles with
keys := make([]*ecdsa.PrivateKey, 2)
accs := make([]common.Address, len(keys))
for i := 0; i < len(keys); i++ {
keys[i], _ = crypto.GenerateKey()
accs[i] = crypto.PubkeyToAddress(keys[i].PublicKey)
pool.currentState.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(50100))
}
// Add a batch consecutive pending transactions for validation
txns := []*types.Transaction{}
for i := 0; i < 100; i++ {
var tx *types.Transaction
if i%2 == 0 {
tx = transaction(uint64(i), 100, key)
} else {
tx = transaction(uint64(i), 500, key)
txs := []*types.Transaction{}
for i, key := range keys {
for j := 0; j < 100; j++ {
var tx *types.Transaction
if (i+j)%2 == 0 {
tx = transaction(uint64(j), 25000, key)
} else {
tx = transaction(uint64(j), 50000, key)
}
txs = append(txs, tx)
}
}
for i, err := range pool.AddRemotes(txs) {
if err != nil {
t.Fatalf("tx %d: failed to add transactions: %v", i, err)
}
pool.promoteTx(account, tx.Hash(), tx)
txns = append(txns, tx)
}
// Check that pre and post validations leave the pool as is
if pool.pending[account].Len() != len(txns) {
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), len(txns))
if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) {
t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs))
}
if len(pool.queue) != 0 {
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 0)
t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0)
}
if len(pool.all) != len(txns) {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txns))
if len(pool.all) != len(txs) {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txs))
}
pool.lockedReset(nil, nil)
if pool.pending[account].Len() != len(txns) {
t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), len(txns))
if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) {
t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs))
}
if len(pool.queue) != 0 {
t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 0)
t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0)
}
if len(pool.all) != len(txns) {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txns))
if len(pool.all) != len(txs) {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txs))
}
// Reduce the balance of the account, and check that transactions are reorganised
pool.currentState.AddBalance(account, big.NewInt(-750))
for _, addr := range accs {
pool.currentState.AddBalance(addr, big.NewInt(-1))
}
pool.lockedReset(nil, nil)
if _, ok := pool.pending[account].txs.items[txns[0].Nonce()]; !ok {
t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txns[0])
// The first account's first transaction remains valid, check that subsequent
// ones are either filtered out, or queued up for later.
if _, ok := pool.pending[accs[0]].txs.items[txs[0].Nonce()]; !ok {
t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txs[0])
}
if _, ok := pool.queue[account].txs.items[txns[0].Nonce()]; ok {
t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txns[0])
if _, ok := pool.queue[accs[0]].txs.items[txs[0].Nonce()]; ok {
t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txs[0])
}
for i, tx := range txns[1:] {
for i, tx := range txs[1:100] {
if i%2 == 1 {
if _, ok := pool.pending[account].txs.items[tx.Nonce()]; ok {
if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok {
t.Errorf("tx %d: valid but future transaction present in pending pool: %v", i+1, tx)
}
if _, ok := pool.queue[account].txs.items[tx.Nonce()]; !ok {
if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; !ok {
t.Errorf("tx %d: valid but future transaction missing from future queue: %v", i+1, tx)
}
} else {
if _, ok := pool.pending[account].txs.items[tx.Nonce()]; ok {
if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok {
t.Errorf("tx %d: out-of-fund transaction present in pending pool: %v", i+1, tx)
}
if _, ok := pool.queue[account].txs.items[tx.Nonce()]; ok {
if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; ok {
t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", i+1, tx)
}
}
}
if len(pool.all) != len(txns)/2 {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txns)/2)
// The second account's first transaction got invalid, check that all transactions
// are either filtered out, or queued up for later.
if pool.pending[accs[1]] != nil {
t.Errorf("invalidated account still has pending transactions")
}
for i, tx := range txs[100:] {
if i%2 == 1 {
if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; !ok {
t.Errorf("tx %d: valid but future transaction missing from future queue: %v", 100+i, tx)
}
} else {
if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; ok {
t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", 100+i, tx)
}
}
}
if len(pool.all) != len(txs)/2 {
t.Errorf("total transaction mismatch: have %d, want %d", len(pool.all), len(txs)/2)
}
}
@@ -949,11 +987,11 @@ func testTransactionLimitingEquivalency(t *testing.T, origin uint64) {
account2, _ := deriveSender(transaction(0, 0, key2))
pool2.currentState.AddBalance(account2, big.NewInt(1000000))
txns := []*types.Transaction{}
txs := []*types.Transaction{}
for i := uint64(0); i < testTxPoolConfig.AccountQueue+5; i++ {
txns = append(txns, transaction(origin+i, 100000, key2))
txs = append(txs, transaction(origin+i, 100000, key2))
}
pool2.AddRemotes(txns)
pool2.AddRemotes(txs)
// Ensure the batch optimization honors the same pool mechanics
if len(pool1.pending) != len(pool2.pending) {
@@ -1124,7 +1162,7 @@ func TestTransactionPoolRepricing(t *testing.T) {
defer sub.Unsubscribe()
// Create a number of test accounts and fund them
keys := make([]*ecdsa.PrivateKey, 3)
keys := make([]*ecdsa.PrivateKey, 4)
for i := 0; i < len(keys); i++ {
keys[i], _ = crypto.GenerateKey()
pool.currentState.AddBalance(crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
@@ -1136,24 +1174,28 @@ func TestTransactionPoolRepricing(t *testing.T) {
txs = append(txs, pricedTransaction(1, 100000, big.NewInt(1), keys[0]))
txs = append(txs, pricedTransaction(2, 100000, big.NewInt(2), keys[0]))
txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[1]))
txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[1]))
txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[1]))
txs = append(txs, pricedTransaction(3, 100000, big.NewInt(2), keys[1]))
txs = append(txs, pricedTransaction(2, 100000, big.NewInt(2), keys[1]))
ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[2])
txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[2]))
txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2]))
txs = append(txs, pricedTransaction(3, 100000, big.NewInt(2), keys[2]))
ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3])
// Import the batch and that both pending and queued transactions match up
pool.AddRemotes(txs)
pool.AddLocal(ltx)
pending, queued := pool.Stats()
if pending != 4 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4)
if pending != 7 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7)
}
if queued != 3 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
}
if err := validateEvents(events, 4); err != nil {
if err := validateEvents(events, 7); err != nil {
t.Fatalf("original event firing failed: %v", err)
}
if err := validateTxPoolInternals(pool); err != nil {
@@ -1166,8 +1208,8 @@ func TestTransactionPoolRepricing(t *testing.T) {
if pending != 2 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
}
if queued != 3 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
if queued != 5 {
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5)
}
if err := validateEvents(events, 0); err != nil {
t.Fatalf("reprice event firing failed: %v", err)
@@ -1179,7 +1221,10 @@ func TestTransactionPoolRepricing(t *testing.T) {
if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); err != ErrUnderpriced {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced {
if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); err != ErrUnderpriced {
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); err != ErrUnderpriced {
t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, ErrUnderpriced)
}
if err := validateEvents(events, 0); err != nil {
@@ -1189,7 +1234,7 @@ func TestTransactionPoolRepricing(t *testing.T) {
t.Fatalf("pool internal state corrupted: %v", err)
}
// However we can add local underpriced transactions
tx := pricedTransaction(1, 100000, big.NewInt(1), keys[2])
tx := pricedTransaction(1, 100000, big.NewInt(1), keys[3])
if err := pool.AddLocal(tx); err != nil {
t.Fatalf("failed to add underpriced local transaction: %v", err)
}
@@ -1202,6 +1247,22 @@ func TestTransactionPoolRepricing(t *testing.T) {
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
// And we can fill gaps with properly priced transactions
if err := pool.AddRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
if err := pool.AddRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil {
t.Fatalf("failed to add pending transaction: %v", err)
}
if err := pool.AddRemote(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil {
t.Fatalf("failed to add queued transaction: %v", err)
}
if err := validateEvents(events, 5); err != nil {
t.Fatalf("post-reprice event firing failed: %v", err)
}
if err := validateTxPoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
}
// Tests that setting the transaction pool gas price to a higher value does not

View File

@@ -251,26 +251,12 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) {
return common.LeftPadBytes(base.Exp(base, exp, mod).Bytes(), int(modLen)), nil
}
var (
// errNotOnCurve is returned if a point being unmarshalled as a bn256 elliptic
// curve point is not on the curve.
errNotOnCurve = errors.New("point not on elliptic curve")
// errInvalidCurvePoint is returned if a point being unmarshalled as a bn256
// elliptic curve point is invalid.
errInvalidCurvePoint = errors.New("invalid elliptic curve point")
)
// newCurvePoint unmarshals a binary blob into a bn256 elliptic curve point,
// returning it, or an error if the point is invalid.
func newCurvePoint(blob []byte) (*bn256.G1, error) {
p, onCurve := new(bn256.G1).Unmarshal(blob)
if !onCurve {
return nil, errNotOnCurve
}
gx, gy, _, _ := p.CurvePoints()
if gx.Cmp(bn256.P) >= 0 || gy.Cmp(bn256.P) >= 0 {
return nil, errInvalidCurvePoint
p := new(bn256.G1)
if _, err := p.Unmarshal(blob); err != nil {
return nil, err
}
return p, nil
}
@@ -278,14 +264,9 @@ func newCurvePoint(blob []byte) (*bn256.G1, error) {
// newTwistPoint unmarshals a binary blob into a bn256 elliptic curve point,
// returning it, or an error if the point is invalid.
func newTwistPoint(blob []byte) (*bn256.G2, error) {
p, onCurve := new(bn256.G2).Unmarshal(blob)
if !onCurve {
return nil, errNotOnCurve
}
x2, y2, _, _ := p.CurvePoints()
if x2.Real().Cmp(bn256.P) >= 0 || x2.Imag().Cmp(bn256.P) >= 0 ||
y2.Real().Cmp(bn256.P) >= 0 || y2.Imag().Cmp(bn256.P) >= 0 {
return nil, errInvalidCurvePoint
p := new(bn256.G2)
if _, err := p.Unmarshal(blob); err != nil {
return nil, err
}
return p, nil
}

View File

@@ -30,6 +30,8 @@ import (
var (
bigZero = new(big.Int)
tt255 = math.BigPow(2, 255)
tt256 = math.BigPow(2, 256)
errWriteProtection = errors.New("evm: write protection")
errReturnDataOutOfBounds = errors.New("evm: return data out of bounds")
errExecutionReverted = errors.New("evm: execution reverted")
@@ -191,50 +193,71 @@ func opGt(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack
}
func opSlt(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
x, y := math.S256(stack.pop()), math.S256(stack.pop())
if x.Cmp(math.S256(y)) < 0 {
stack.push(evm.interpreter.intPool.get().SetUint64(1))
} else {
stack.push(new(big.Int))
}
x, y := stack.pop(), stack.peek()
evm.interpreter.intPool.put(x, y)
xSign := x.Cmp(tt255)
ySign := y.Cmp(tt255)
switch {
case xSign >= 0 && ySign < 0:
y.SetUint64(1)
case xSign < 0 && ySign >= 0:
y.SetUint64(0)
default:
if x.Cmp(y) < 0 {
y.SetUint64(1)
} else {
y.SetUint64(0)
}
}
evm.interpreter.intPool.put(x)
return nil, nil
}
func opSgt(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
x, y := math.S256(stack.pop()), math.S256(stack.pop())
if x.Cmp(y) > 0 {
stack.push(evm.interpreter.intPool.get().SetUint64(1))
} else {
stack.push(new(big.Int))
}
x, y := stack.pop(), stack.peek()
evm.interpreter.intPool.put(x, y)
xSign := x.Cmp(tt255)
ySign := y.Cmp(tt255)
switch {
case xSign >= 0 && ySign < 0:
y.SetUint64(0)
case xSign < 0 && ySign >= 0:
y.SetUint64(1)
default:
if x.Cmp(y) > 0 {
y.SetUint64(1)
} else {
y.SetUint64(0)
}
}
evm.interpreter.intPool.put(x)
return nil, nil
}
func opEq(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
x, y := stack.pop(), stack.pop()
x, y := stack.pop(), stack.peek()
if x.Cmp(y) == 0 {
stack.push(evm.interpreter.intPool.get().SetUint64(1))
y.SetUint64(1)
} else {
stack.push(new(big.Int))
y.SetUint64(0)
}
evm.interpreter.intPool.put(x, y)
evm.interpreter.intPool.put(x)
return nil, nil
}
func opIszero(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
x := stack.pop()
x := stack.peek()
if x.Sign() > 0 {
stack.push(new(big.Int))
x.SetUint64(0)
} else {
stack.push(evm.interpreter.intPool.get().SetUint64(1))
x.SetUint64(1)
}
evm.interpreter.intPool.put(x)
return nil, nil
}
@@ -302,6 +325,66 @@ func opMulmod(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *S
return nil, nil
}
// opSHL implements Shift Left
// The SHL instruction (shift left) pops 2 values from the stack, first arg1 and then arg2,
// and pushes on the stack arg2 shifted to the left by arg1 number of bits.
func opSHL(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
// Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards
shift, value := math.U256(stack.pop()), math.U256(stack.peek())
defer evm.interpreter.intPool.put(shift) // First operand back into the pool
if shift.Cmp(common.Big256) >= 0 {
value.SetUint64(0)
return nil, nil
}
n := uint(shift.Uint64())
math.U256(value.Lsh(value, n))
return nil, nil
}
// opSHR implements Logical Shift Right
// The SHR instruction (logical shift right) pops 2 values from the stack, first arg1 and then arg2,
// and pushes on the stack arg2 shifted to the right by arg1 number of bits with zero fill.
func opSHR(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
// Note, second operand is left in the stack; accumulate result into it, and no need to push it afterwards
shift, value := math.U256(stack.pop()), math.U256(stack.peek())
defer evm.interpreter.intPool.put(shift) // First operand back into the pool
if shift.Cmp(common.Big256) >= 0 {
value.SetUint64(0)
return nil, nil
}
n := uint(shift.Uint64())
math.U256(value.Rsh(value, n))
return nil, nil
}
// opSAR implements Arithmetic Shift Right
// The SAR instruction (arithmetic shift right) pops 2 values from the stack, first arg1 and then arg2,
// and pushes on the stack arg2 shifted to the right by arg1 number of bits with sign extension.
func opSAR(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
// Note, S256 returns (potentially) a new bigint, so we're popping, not peeking this one
shift, value := math.U256(stack.pop()), math.S256(stack.pop())
defer evm.interpreter.intPool.put(shift) // First operand back into the pool
if shift.Cmp(common.Big256) >= 0 {
if value.Sign() > 0 {
value.SetUint64(0)
} else {
value.SetInt64(-1)
}
stack.push(math.U256(value))
return nil, nil
}
n := uint(shift.Uint64())
value.Rsh(value, n)
stack.push(math.U256(value))
return nil, nil
}
func opSha3(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error) {
offset, size := stack.pop(), stack.pop()
data := memory.Get(offset.Int64(), size.Int64())

View File

@@ -24,6 +24,48 @@ import (
"github.com/ethereum/go-ethereum/params"
)
type twoOperandTest struct {
x string
y string
expected string
}
func testTwoOperandOp(t *testing.T, tests []twoOperandTest, opFn func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error)) {
var (
env = NewEVM(Context{}, nil, params.TestChainConfig, Config{EnableJit: false, ForceJit: false})
stack = newstack()
pc = uint64(0)
)
for i, test := range tests {
x := new(big.Int).SetBytes(common.Hex2Bytes(test.x))
shift := new(big.Int).SetBytes(common.Hex2Bytes(test.y))
expected := new(big.Int).SetBytes(common.Hex2Bytes(test.expected))
stack.push(x)
stack.push(shift)
opFn(&pc, env, nil, nil, stack)
actual := stack.pop()
if actual.Cmp(expected) != 0 {
t.Errorf("Testcase %d, expected %v, got %v", i, expected, actual)
}
// Check pool usage
// 1.pool is not allowed to contain anything on the stack
// 2.pool is not allowed to contain the same pointers twice
if env.interpreter.intPool.pool.len() > 0 {
poolvals := make(map[*big.Int]struct{})
poolvals[actual] = struct{}{}
for env.interpreter.intPool.pool.len() > 0 {
key := env.interpreter.intPool.get()
if _, exist := poolvals[key]; exist {
t.Errorf("Testcase %d, pool contains double-entry", i)
}
poolvals[key] = struct{}{}
}
}
}
}
func TestByteOp(t *testing.T) {
var (
env = NewEVM(Context{}, nil, params.TestChainConfig, Config{EnableJit: false, ForceJit: false})
@@ -57,6 +99,103 @@ func TestByteOp(t *testing.T) {
}
}
func TestSHL(t *testing.T) {
// Testcases from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-145.md#shl-shift-left
tests := []twoOperandTest{
{"0000000000000000000000000000000000000000000000000000000000000001", "00", "0000000000000000000000000000000000000000000000000000000000000001"},
{"0000000000000000000000000000000000000000000000000000000000000001", "01", "0000000000000000000000000000000000000000000000000000000000000002"},
{"0000000000000000000000000000000000000000000000000000000000000001", "ff", "8000000000000000000000000000000000000000000000000000000000000000"},
{"0000000000000000000000000000000000000000000000000000000000000001", "0100", "0000000000000000000000000000000000000000000000000000000000000000"},
{"0000000000000000000000000000000000000000000000000000000000000001", "0101", "0000000000000000000000000000000000000000000000000000000000000000"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "00", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "01", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "ff", "8000000000000000000000000000000000000000000000000000000000000000"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0100", "0000000000000000000000000000000000000000000000000000000000000000"},
{"0000000000000000000000000000000000000000000000000000000000000000", "01", "0000000000000000000000000000000000000000000000000000000000000000"},
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "01", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe"},
}
testTwoOperandOp(t, tests, opSHL)
}
func TestSHR(t *testing.T) {
// Testcases from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-145.md#shr-logical-shift-right
tests := []twoOperandTest{
{"0000000000000000000000000000000000000000000000000000000000000001", "00", "0000000000000000000000000000000000000000000000000000000000000001"},
{"0000000000000000000000000000000000000000000000000000000000000001", "01", "0000000000000000000000000000000000000000000000000000000000000000"},
{"8000000000000000000000000000000000000000000000000000000000000000", "01", "4000000000000000000000000000000000000000000000000000000000000000"},
{"8000000000000000000000000000000000000000000000000000000000000000", "ff", "0000000000000000000000000000000000000000000000000000000000000001"},
{"8000000000000000000000000000000000000000000000000000000000000000", "0100", "0000000000000000000000000000000000000000000000000000000000000000"},
{"8000000000000000000000000000000000000000000000000000000000000000", "0101", "0000000000000000000000000000000000000000000000000000000000000000"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "00", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "01", "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "ff", "0000000000000000000000000000000000000000000000000000000000000001"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0100", "0000000000000000000000000000000000000000000000000000000000000000"},
{"0000000000000000000000000000000000000000000000000000000000000000", "01", "0000000000000000000000000000000000000000000000000000000000000000"},
}
testTwoOperandOp(t, tests, opSHR)
}
func TestSAR(t *testing.T) {
// Testcases from https://github.com/ethereum/EIPs/blob/master/EIPS/eip-145.md#sar-arithmetic-shift-right
tests := []twoOperandTest{
{"0000000000000000000000000000000000000000000000000000000000000001", "00", "0000000000000000000000000000000000000000000000000000000000000001"},
{"0000000000000000000000000000000000000000000000000000000000000001", "01", "0000000000000000000000000000000000000000000000000000000000000000"},
{"8000000000000000000000000000000000000000000000000000000000000000", "01", "c000000000000000000000000000000000000000000000000000000000000000"},
{"8000000000000000000000000000000000000000000000000000000000000000", "ff", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{"8000000000000000000000000000000000000000000000000000000000000000", "0100", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{"8000000000000000000000000000000000000000000000000000000000000000", "0101", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "00", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "01", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "ff", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0100", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"},
{"0000000000000000000000000000000000000000000000000000000000000000", "01", "0000000000000000000000000000000000000000000000000000000000000000"},
{"4000000000000000000000000000000000000000000000000000000000000000", "fe", "0000000000000000000000000000000000000000000000000000000000000001"},
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "f8", "000000000000000000000000000000000000000000000000000000000000007f"},
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "fe", "0000000000000000000000000000000000000000000000000000000000000001"},
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "ff", "0000000000000000000000000000000000000000000000000000000000000000"},
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0100", "0000000000000000000000000000000000000000000000000000000000000000"},
}
testTwoOperandOp(t, tests, opSAR)
}
func TestSGT(t *testing.T) {
tests := []twoOperandTest{
{"0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000000"},
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000000"},
{"0000000000000000000000000000000000000000000000000000000000000001", "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000001"},
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000001"},
{"0000000000000000000000000000000000000000000000000000000000000001", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000000"},
{"8000000000000000000000000000000000000000000000000000000000000001", "8000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000"},
{"8000000000000000000000000000000000000000000000000000000000000001", "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000001"},
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "8000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000"},
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd", "0000000000000000000000000000000000000000000000000000000000000001"},
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb", "0000000000000000000000000000000000000000000000000000000000000000"},
}
testTwoOperandOp(t, tests, opSgt)
}
func TestSLT(t *testing.T) {
tests := []twoOperandTest{
{"0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000000"},
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000000"},
{"0000000000000000000000000000000000000000000000000000000000000001", "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000000"},
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000001"},
{"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000"},
{"0000000000000000000000000000000000000000000000000000000000000001", "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000001"},
{"8000000000000000000000000000000000000000000000000000000000000001", "8000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000000"},
{"8000000000000000000000000000000000000000000000000000000000000001", "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "0000000000000000000000000000000000000000000000000000000000000000"},
{"7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", "8000000000000000000000000000000000000000000000000000000000000001", "0000000000000000000000000000000000000000000000000000000000000001"},
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd", "0000000000000000000000000000000000000000000000000000000000000000"},
{"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd", "fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffb", "0000000000000000000000000000000000000000000000000000000000000001"},
}
testTwoOperandOp(t, tests, opSlt)
}
func opBenchmark(bench *testing.B, op func(pc *uint64, evm *EVM, contract *Contract, memory *Memory, stack *Stack) ([]byte, error), args ...string) {
var (
env = NewEVM(Context{}, nil, params.TestChainConfig, Config{EnableJit: false, ForceJit: false})
@@ -215,7 +354,11 @@ func BenchmarkOpEq(b *testing.B) {
opBenchmark(b, opEq, x, y)
}
func BenchmarkOpEq2(b *testing.B) {
x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff"
y := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201fffffffe"
opBenchmark(b, opEq, x, y)
}
func BenchmarkOpAnd(b *testing.B) {
x := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
y := "ABCDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff"
@@ -259,3 +402,26 @@ func BenchmarkOpMulmod(b *testing.B) {
opBenchmark(b, opMulmod, x, y, z)
}
func BenchmarkOpSHL(b *testing.B) {
x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff"
y := "ff"
opBenchmark(b, opSHL, x, y)
}
func BenchmarkOpSHR(b *testing.B) {
x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff"
y := "ff"
opBenchmark(b, opSHR, x, y)
}
func BenchmarkOpSAR(b *testing.B) {
x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff"
y := "ff"
opBenchmark(b, opSAR, x, y)
}
func BenchmarkOpIsZero(b *testing.B) {
x := "FBCDEF090807060504030201ffffffffFBCDEF090807060504030201ffffffff"
opBenchmark(b, opIszero, x)
}

View File

@@ -37,8 +37,6 @@ type Config struct {
// NoRecursion disabled Interpreter call, callcode,
// delegate call and create.
NoRecursion bool
// Disable gas metering
DisableGasMetering bool
// Enable recording of SHA3/keccak preimages
EnablePreimageRecording bool
// JumpTable contains the EVM instruction table. This
@@ -68,6 +66,8 @@ func NewInterpreter(evm *EVM, cfg Config) *Interpreter {
// we'll set the default jump table.
if !cfg.JumpTable[STOP].valid {
switch {
case evm.ChainConfig().IsConstantinople(evm.BlockNumber):
cfg.JumpTable = constantinopleInstructionSet
case evm.ChainConfig().IsByzantium(evm.BlockNumber):
cfg.JumpTable = byzantiumInstructionSet
case evm.ChainConfig().IsHomestead(evm.BlockNumber):
@@ -187,14 +187,11 @@ func (in *Interpreter) Run(contract *Contract, input []byte) (ret []byte, err er
return nil, errGasUintOverflow
}
}
if !in.cfg.DisableGasMetering {
// consume the gas and return an error if not enough gas is available.
// cost is explicitly set so that the capture state defer method cas get the proper cost
cost, err = operation.gasCost(in.gasTable, in.evm, contract, stack, mem, memorySize)
if err != nil || !contract.UseGas(cost) {
return nil, ErrOutOfGas
}
// consume the gas and return an error if not enough gas is available.
// cost is explicitly set so that the capture state defer method cas get the proper cost
cost, err = operation.gasCost(in.gasTable, in.evm, contract, stack, mem, memorySize)
if err != nil || !contract.UseGas(cost) {
return nil, ErrOutOfGas
}
if memorySize > 0 {
mem.Resize(memorySize)

View File

@@ -51,11 +51,38 @@ type operation struct {
}
var (
frontierInstructionSet = NewFrontierInstructionSet()
homesteadInstructionSet = NewHomesteadInstructionSet()
byzantiumInstructionSet = NewByzantiumInstructionSet()
frontierInstructionSet = NewFrontierInstructionSet()
homesteadInstructionSet = NewHomesteadInstructionSet()
byzantiumInstructionSet = NewByzantiumInstructionSet()
constantinopleInstructionSet = NewConstantinopleInstructionSet()
)
// NewConstantinopleInstructionSet returns the frontier, homestead
// byzantium and contantinople instructions.
func NewConstantinopleInstructionSet() [256]operation {
// instructions that can be executed during the byzantium phase.
instructionSet := NewByzantiumInstructionSet()
instructionSet[SHL] = operation{
execute: opSHL,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(2, 1),
valid: true,
}
instructionSet[SHR] = operation{
execute: opSHR,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(2, 1),
valid: true,
}
instructionSet[SAR] = operation{
execute: opSAR,
gasCost: constGasFunc(GasFastestStep),
validateStack: makeStackFunc(2, 1),
valid: true,
}
return instructionSet
}
// NewByzantiumInstructionSet returns the frontier, homestead and
// byzantium instructions.
func NewByzantiumInstructionSet() [256]operation {

View File

@@ -63,6 +63,9 @@ const (
XOR
NOT
BYTE
SHL
SHR
SAR
SHA3 = 0x20
)
@@ -234,6 +237,9 @@ var opCodeToString = map[OpCode]string{
OR: "OR",
XOR: "XOR",
BYTE: "BYTE",
SHL: "SHL",
SHR: "SHR",
SAR: "SAR",
ADDMOD: "ADDMOD",
MULMOD: "MULMOD",
@@ -400,6 +406,9 @@ var stringToOp = map[string]OpCode{
"OR": OR,
"XOR": XOR,
"BYTE": BYTE,
"SHL": SHL,
"SHR": SHR,
"SAR": SAR,
"ADDMOD": ADDMOD,
"MULMOD": MULMOD,
"SHA3": SHA3,

View File

@@ -1,4 +1,4 @@
// Copyright 2017 The go-ethereum Authors
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -14,24 +14,22 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build go1.8
// +build amd64 arm64
package ethash
// Package bn256 implements the Optimal Ate pairing over a 256-bit Barreto-Naehrig curve.
package bn256
import "testing"
import "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
// Tests whether the dataset size calculator works correctly by cross checking the
// hard coded lookup table with the value generated by it.
func TestSizeCalculations(t *testing.T) {
// Verify all the cache and dataset sizes from the lookup table.
for epoch, want := range cacheSizes {
if size := calcCacheSize(epoch); size != want {
t.Errorf("cache %d: cache size mismatch: have %d, want %d", epoch, size, want)
}
}
for epoch, want := range datasetSizes {
if size := calcDatasetSize(epoch); size != want {
t.Errorf("dataset %d: dataset size mismatch: have %d, want %d", epoch, size, want)
}
}
// G1 is an abstract cyclic group. The zero value is suitable for use as the
// output of an operation, but cannot be used as an input.
type G1 = bn256.G1
// G2 is an abstract cyclic group. The zero value is suitable for use as the
// output of an operation, but cannot be used as an input.
type G2 = bn256.G2
// PairingCheck calculates the Optimal Ate pairing for a set of points.
func PairingCheck(a []*G1, b []*G2) bool {
return bn256.PairingCheck(a, b)
}

138
crypto/bn256/bn256_fuzz.go Normal file
View File

@@ -0,0 +1,138 @@
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// The go-ethereum library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
// +build gofuzz
package bn256
import (
"bytes"
"math/big"
cloudflare "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare"
google "github.com/ethereum/go-ethereum/crypto/bn256/google"
)
// FuzzAdd fuzzez bn256 addition between the Google and Cloudflare libraries.
func FuzzAdd(data []byte) int {
// Ensure we have enough data in the first place
if len(data) != 128 {
return 0
}
// Ensure both libs can parse the first curve point
xc := new(cloudflare.G1)
_, errc := xc.Unmarshal(data[:64])
xg := new(google.G1)
_, errg := xg.Unmarshal(data[:64])
if (errc == nil) != (errg == nil) {
panic("parse mismatch")
} else if errc != nil {
return 0
}
// Ensure both libs can parse the second curve point
yc := new(cloudflare.G1)
_, errc = yc.Unmarshal(data[64:])
yg := new(google.G1)
_, errg = yg.Unmarshal(data[64:])
if (errc == nil) != (errg == nil) {
panic("parse mismatch")
} else if errc != nil {
return 0
}
// Add the two points and ensure they result in the same output
rc := new(cloudflare.G1)
rc.Add(xc, yc)
rg := new(google.G1)
rg.Add(xg, yg)
if !bytes.Equal(rc.Marshal(), rg.Marshal()) {
panic("add mismatch")
}
return 0
}
// FuzzMul fuzzez bn256 scalar multiplication between the Google and Cloudflare
// libraries.
func FuzzMul(data []byte) int {
// Ensure we have enough data in the first place
if len(data) != 96 {
return 0
}
// Ensure both libs can parse the curve point
pc := new(cloudflare.G1)
_, errc := pc.Unmarshal(data[:64])
pg := new(google.G1)
_, errg := pg.Unmarshal(data[:64])
if (errc == nil) != (errg == nil) {
panic("parse mismatch")
} else if errc != nil {
return 0
}
// Add the two points and ensure they result in the same output
rc := new(cloudflare.G1)
rc.ScalarMult(pc, new(big.Int).SetBytes(data[64:]))
rg := new(google.G1)
rg.ScalarMult(pg, new(big.Int).SetBytes(data[64:]))
if !bytes.Equal(rc.Marshal(), rg.Marshal()) {
panic("scalar mul mismatch")
}
return 0
}
func FuzzPair(data []byte) int {
// Ensure we have enough data in the first place
if len(data) != 192 {
return 0
}
// Ensure both libs can parse the curve point
pc := new(cloudflare.G1)
_, errc := pc.Unmarshal(data[:64])
pg := new(google.G1)
_, errg := pg.Unmarshal(data[:64])
if (errc == nil) != (errg == nil) {
panic("parse mismatch")
} else if errc != nil {
return 0
}
// Ensure both libs can parse the twist point
tc := new(cloudflare.G2)
_, errc = tc.Unmarshal(data[64:])
tg := new(google.G2)
_, errg = tg.Unmarshal(data[64:])
if (errc == nil) != (errg == nil) {
panic("parse mismatch")
} else if errc != nil {
return 0
}
// Pair the two points and ensure thet result in the same output
if cloudflare.PairingCheck([]*cloudflare.G1{pc}, []*cloudflare.G2{tc}) != google.PairingCheck([]*google.G1{pg}, []*google.G2{tg}) {
panic("pair mismatch")
}
return 0
}

View File

@@ -1,4 +1,4 @@
// Copyright 2014 The go-ethereum Authors
// Copyright 2018 The go-ethereum Authors
// This file is part of the go-ethereum library.
//
// The go-ethereum library is free software: you can redistribute it and/or modify
@@ -14,10 +14,22 @@
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
package core
// +build !amd64,!arm64
import (
"math/big"
)
// Package bn256 implements the Optimal Ate pairing over a 256-bit Barreto-Naehrig curve.
package bn256
var BlockReward = big.NewInt(5e+18)
import "github.com/ethereum/go-ethereum/crypto/bn256/google"
// G1 is an abstract cyclic group. The zero value is suitable for use as the
// output of an operation, but cannot be used as an input.
type G1 = bn256.G1
// G2 is an abstract cyclic group. The zero value is suitable for use as the
// output of an operation, but cannot be used as an input.
type G2 = bn256.G2
// PairingCheck calculates the Optimal Ate pairing for a set of points.
func PairingCheck(a []*G1, b []*G2) bool {
return bn256.PairingCheck(a, b)
}

View File

@@ -0,0 +1,481 @@
// Package bn256 implements a particular bilinear group at the 128-bit security
// level.
//
// Bilinear groups are the basis of many of the new cryptographic protocols that
// have been proposed over the past decade. They consist of a triplet of groups
// (G₁, G₂ and GT) such that there exists a function e(g₁ˣ,g₂ʸ)=gTˣʸ (where gₓ
// is a generator of the respective group). That function is called a pairing
// function.
//
// This package specifically implements the Optimal Ate pairing over a 256-bit
// Barreto-Naehrig curve as described in
// http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is compatible
// with the implementation described in that paper.
package bn256
import (
"crypto/rand"
"errors"
"io"
"math/big"
)
func randomK(r io.Reader) (k *big.Int, err error) {
for {
k, err = rand.Int(r, Order)
if k.Sign() > 0 || err != nil {
return
}
}
}
// G1 is an abstract cyclic group. The zero value is suitable for use as the
// output of an operation, but cannot be used as an input.
type G1 struct {
p *curvePoint
}
// RandomG1 returns x and g₁ˣ where x is a random, non-zero number read from r.
func RandomG1(r io.Reader) (*big.Int, *G1, error) {
k, err := randomK(r)
if err != nil {
return nil, nil, err
}
return k, new(G1).ScalarBaseMult(k), nil
}
func (g *G1) String() string {
return "bn256.G1" + g.p.String()
}
// ScalarBaseMult sets e to g*k where g is the generator of the group and then
// returns e.
func (e *G1) ScalarBaseMult(k *big.Int) *G1 {
if e.p == nil {
e.p = &curvePoint{}
}
e.p.Mul(curveGen, k)
return e
}
// ScalarMult sets e to a*k and then returns e.
func (e *G1) ScalarMult(a *G1, k *big.Int) *G1 {
if e.p == nil {
e.p = &curvePoint{}
}
e.p.Mul(a.p, k)
return e
}
// Add sets e to a+b and then returns e.
func (e *G1) Add(a, b *G1) *G1 {
if e.p == nil {
e.p = &curvePoint{}
}
e.p.Add(a.p, b.p)
return e
}
// Neg sets e to -a and then returns e.
func (e *G1) Neg(a *G1) *G1 {
if e.p == nil {
e.p = &curvePoint{}
}
e.p.Neg(a.p)
return e
}
// Set sets e to a and then returns e.
func (e *G1) Set(a *G1) *G1 {
if e.p == nil {
e.p = &curvePoint{}
}
e.p.Set(a.p)
return e
}
// Marshal converts e to a byte slice.
func (e *G1) Marshal() []byte {
// Each value is a 256-bit number.
const numBytes = 256 / 8
e.p.MakeAffine()
ret := make([]byte, numBytes*2)
if e.p.IsInfinity() {
return ret
}
temp := &gfP{}
montDecode(temp, &e.p.x)
temp.Marshal(ret)
montDecode(temp, &e.p.y)
temp.Marshal(ret[numBytes:])
return ret
}
// Unmarshal sets e to the result of converting the output of Marshal back into
// a group element and then returns e.
func (e *G1) Unmarshal(m []byte) ([]byte, error) {
// Each value is a 256-bit number.
const numBytes = 256 / 8
if len(m) < 2*numBytes {
return nil, errors.New("bn256: not enough data")
}
// Unmarshal the points and check their caps
if e.p == nil {
e.p = &curvePoint{}
} else {
e.p.x, e.p.y = gfP{0}, gfP{0}
}
var err error
if err = e.p.x.Unmarshal(m); err != nil {
return nil, err
}
if err = e.p.y.Unmarshal(m[numBytes:]); err != nil {
return nil, err
}
// Encode into Montgomery form and ensure it's on the curve
montEncode(&e.p.x, &e.p.x)
montEncode(&e.p.y, &e.p.y)
zero := gfP{0}
if e.p.x == zero && e.p.y == zero {
// This is the point at infinity.
e.p.y = *newGFp(1)
e.p.z = gfP{0}
e.p.t = gfP{0}
} else {
e.p.z = *newGFp(1)
e.p.t = *newGFp(1)
if !e.p.IsOnCurve() {
return nil, errors.New("bn256: malformed point")
}
}
return m[2*numBytes:], nil
}
// G2 is an abstract cyclic group. The zero value is suitable for use as the
// output of an operation, but cannot be used as an input.
type G2 struct {
p *twistPoint
}
// RandomG2 returns x and g₂ˣ where x is a random, non-zero number read from r.
func RandomG2(r io.Reader) (*big.Int, *G2, error) {
k, err := randomK(r)
if err != nil {
return nil, nil, err
}
return k, new(G2).ScalarBaseMult(k), nil
}
func (e *G2) String() string {
return "bn256.G2" + e.p.String()
}
// ScalarBaseMult sets e to g*k where g is the generator of the group and then
// returns out.
func (e *G2) ScalarBaseMult(k *big.Int) *G2 {
if e.p == nil {
e.p = &twistPoint{}
}
e.p.Mul(twistGen, k)
return e
}
// ScalarMult sets e to a*k and then returns e.
func (e *G2) ScalarMult(a *G2, k *big.Int) *G2 {
if e.p == nil {
e.p = &twistPoint{}
}
e.p.Mul(a.p, k)
return e
}
// Add sets e to a+b and then returns e.
func (e *G2) Add(a, b *G2) *G2 {
if e.p == nil {
e.p = &twistPoint{}
}
e.p.Add(a.p, b.p)
return e
}
// Neg sets e to -a and then returns e.
func (e *G2) Neg(a *G2) *G2 {
if e.p == nil {
e.p = &twistPoint{}
}
e.p.Neg(a.p)
return e
}
// Set sets e to a and then returns e.
func (e *G2) Set(a *G2) *G2 {
if e.p == nil {
e.p = &twistPoint{}
}
e.p.Set(a.p)
return e
}
// Marshal converts e into a byte slice.
func (e *G2) Marshal() []byte {
// Each value is a 256-bit number.
const numBytes = 256 / 8
if e.p == nil {
e.p = &twistPoint{}
}
e.p.MakeAffine()
ret := make([]byte, numBytes*4)
if e.p.IsInfinity() {
return ret
}
temp := &gfP{}
montDecode(temp, &e.p.x.x)
temp.Marshal(ret)
montDecode(temp, &e.p.x.y)
temp.Marshal(ret[numBytes:])
montDecode(temp, &e.p.y.x)
temp.Marshal(ret[2*numBytes:])
montDecode(temp, &e.p.y.y)
temp.Marshal(ret[3*numBytes:])
return ret
}
// Unmarshal sets e to the result of converting the output of Marshal back into
// a group element and then returns e.
func (e *G2) Unmarshal(m []byte) ([]byte, error) {
// Each value is a 256-bit number.
const numBytes = 256 / 8
if len(m) < 4*numBytes {
return nil, errors.New("bn256: not enough data")
}
// Unmarshal the points and check their caps
if e.p == nil {
e.p = &twistPoint{}
}
var err error
if err = e.p.x.x.Unmarshal(m); err != nil {
return nil, err
}
if err = e.p.x.y.Unmarshal(m[numBytes:]); err != nil {
return nil, err
}
if err = e.p.y.x.Unmarshal(m[2*numBytes:]); err != nil {
return nil, err
}
if err = e.p.y.y.Unmarshal(m[3*numBytes:]); err != nil {
return nil, err
}
// Encode into Montgomery form and ensure it's on the curve
montEncode(&e.p.x.x, &e.p.x.x)
montEncode(&e.p.x.y, &e.p.x.y)
montEncode(&e.p.y.x, &e.p.y.x)
montEncode(&e.p.y.y, &e.p.y.y)
if e.p.x.IsZero() && e.p.y.IsZero() {
// This is the point at infinity.
e.p.y.SetOne()
e.p.z.SetZero()
e.p.t.SetZero()
} else {
e.p.z.SetOne()
e.p.t.SetOne()
if !e.p.IsOnCurve() {
return nil, errors.New("bn256: malformed point")
}
}
return m[4*numBytes:], nil
}
// GT is an abstract cyclic group. The zero value is suitable for use as the
// output of an operation, but cannot be used as an input.
type GT struct {
p *gfP12
}
// Pair calculates an Optimal Ate pairing.
func Pair(g1 *G1, g2 *G2) *GT {
return &GT{optimalAte(g2.p, g1.p)}
}
// PairingCheck calculates the Optimal Ate pairing for a set of points.
func PairingCheck(a []*G1, b []*G2) bool {
acc := new(gfP12)
acc.SetOne()
for i := 0; i < len(a); i++ {
if a[i].p.IsInfinity() || b[i].p.IsInfinity() {
continue
}
acc.Mul(acc, miller(b[i].p, a[i].p))
}
return finalExponentiation(acc).IsOne()
}
// Miller applies Miller's algorithm, which is a bilinear function from the
// source groups to F_p^12. Miller(g1, g2).Finalize() is equivalent to Pair(g1,
// g2).
func Miller(g1 *G1, g2 *G2) *GT {
return &GT{miller(g2.p, g1.p)}
}
func (g *GT) String() string {
return "bn256.GT" + g.p.String()
}
// ScalarMult sets e to a*k and then returns e.
func (e *GT) ScalarMult(a *GT, k *big.Int) *GT {
if e.p == nil {
e.p = &gfP12{}
}
e.p.Exp(a.p, k)
return e
}
// Add sets e to a+b and then returns e.
func (e *GT) Add(a, b *GT) *GT {
if e.p == nil {
e.p = &gfP12{}
}
e.p.Mul(a.p, b.p)
return e
}
// Neg sets e to -a and then returns e.
func (e *GT) Neg(a *GT) *GT {
if e.p == nil {
e.p = &gfP12{}
}
e.p.Conjugate(a.p)
return e
}
// Set sets e to a and then returns e.
func (e *GT) Set(a *GT) *GT {
if e.p == nil {
e.p = &gfP12{}
}
e.p.Set(a.p)
return e
}
// Finalize is a linear function from F_p^12 to GT.
func (e *GT) Finalize() *GT {
ret := finalExponentiation(e.p)
e.p.Set(ret)
return e
}
// Marshal converts e into a byte slice.
func (e *GT) Marshal() []byte {
// Each value is a 256-bit number.
const numBytes = 256 / 8
ret := make([]byte, numBytes*12)
temp := &gfP{}
montDecode(temp, &e.p.x.x.x)
temp.Marshal(ret)
montDecode(temp, &e.p.x.x.y)
temp.Marshal(ret[numBytes:])
montDecode(temp, &e.p.x.y.x)
temp.Marshal(ret[2*numBytes:])
montDecode(temp, &e.p.x.y.y)
temp.Marshal(ret[3*numBytes:])
montDecode(temp, &e.p.x.z.x)
temp.Marshal(ret[4*numBytes:])
montDecode(temp, &e.p.x.z.y)
temp.Marshal(ret[5*numBytes:])
montDecode(temp, &e.p.y.x.x)
temp.Marshal(ret[6*numBytes:])
montDecode(temp, &e.p.y.x.y)
temp.Marshal(ret[7*numBytes:])
montDecode(temp, &e.p.y.y.x)
temp.Marshal(ret[8*numBytes:])
montDecode(temp, &e.p.y.y.y)
temp.Marshal(ret[9*numBytes:])
montDecode(temp, &e.p.y.z.x)
temp.Marshal(ret[10*numBytes:])
montDecode(temp, &e.p.y.z.y)
temp.Marshal(ret[11*numBytes:])
return ret
}
// Unmarshal sets e to the result of converting the output of Marshal back into
// a group element and then returns e.
func (e *GT) Unmarshal(m []byte) ([]byte, error) {
// Each value is a 256-bit number.
const numBytes = 256 / 8
if len(m) < 12*numBytes {
return nil, errors.New("bn256: not enough data")
}
if e.p == nil {
e.p = &gfP12{}
}
var err error
if err = e.p.x.x.x.Unmarshal(m); err != nil {
return nil, err
}
if err = e.p.x.x.y.Unmarshal(m[numBytes:]); err != nil {
return nil, err
}
if err = e.p.x.y.x.Unmarshal(m[2*numBytes:]); err != nil {
return nil, err
}
if err = e.p.x.y.y.Unmarshal(m[3*numBytes:]); err != nil {
return nil, err
}
if err = e.p.x.z.x.Unmarshal(m[4*numBytes:]); err != nil {
return nil, err
}
if err = e.p.x.z.y.Unmarshal(m[5*numBytes:]); err != nil {
return nil, err
}
if err = e.p.y.x.x.Unmarshal(m[6*numBytes:]); err != nil {
return nil, err
}
if err = e.p.y.x.y.Unmarshal(m[7*numBytes:]); err != nil {
return nil, err
}
if err = e.p.y.y.x.Unmarshal(m[8*numBytes:]); err != nil {
return nil, err
}
if err = e.p.y.y.y.Unmarshal(m[9*numBytes:]); err != nil {
return nil, err
}
if err = e.p.y.z.x.Unmarshal(m[10*numBytes:]); err != nil {
return nil, err
}
if err = e.p.y.z.y.Unmarshal(m[11*numBytes:]); err != nil {
return nil, err
}
montEncode(&e.p.x.x.x, &e.p.x.x.x)
montEncode(&e.p.x.x.y, &e.p.x.x.y)
montEncode(&e.p.x.y.x, &e.p.x.y.x)
montEncode(&e.p.x.y.y, &e.p.x.y.y)
montEncode(&e.p.x.z.x, &e.p.x.z.x)
montEncode(&e.p.x.z.y, &e.p.x.z.y)
montEncode(&e.p.y.x.x, &e.p.y.x.x)
montEncode(&e.p.y.x.y, &e.p.y.x.y)
montEncode(&e.p.y.y.x, &e.p.y.y.x)
montEncode(&e.p.y.y.y, &e.p.y.y.y)
montEncode(&e.p.y.z.x, &e.p.y.z.x)
montEncode(&e.p.y.z.y, &e.p.y.z.y)
return m[12*numBytes:], nil
}

View File

@@ -0,0 +1,116 @@
package bn256
import (
"bytes"
"crypto/rand"
"testing"
)
func TestG1Marshal(t *testing.T) {
_, Ga, err := RandomG1(rand.Reader)
if err != nil {
t.Fatal(err)
}
ma := Ga.Marshal()
Gb := new(G1)
_, err = Gb.Unmarshal(ma)
if err != nil {
t.Fatal(err)
}
mb := Gb.Marshal()
if !bytes.Equal(ma, mb) {
t.Fatal("bytes are different")
}
}
func TestG2Marshal(t *testing.T) {
_, Ga, err := RandomG2(rand.Reader)
if err != nil {
t.Fatal(err)
}
ma := Ga.Marshal()
Gb := new(G2)
_, err = Gb.Unmarshal(ma)
if err != nil {
t.Fatal(err)
}
mb := Gb.Marshal()
if !bytes.Equal(ma, mb) {
t.Fatal("bytes are different")
}
}
func TestBilinearity(t *testing.T) {
for i := 0; i < 2; i++ {
a, p1, _ := RandomG1(rand.Reader)
b, p2, _ := RandomG2(rand.Reader)
e1 := Pair(p1, p2)
e2 := Pair(&G1{curveGen}, &G2{twistGen})
e2.ScalarMult(e2, a)
e2.ScalarMult(e2, b)
if *e1.p != *e2.p {
t.Fatalf("bad pairing result: %s", e1)
}
}
}
func TestTripartiteDiffieHellman(t *testing.T) {
a, _ := rand.Int(rand.Reader, Order)
b, _ := rand.Int(rand.Reader, Order)
c, _ := rand.Int(rand.Reader, Order)
pa, pb, pc := new(G1), new(G1), new(G1)
qa, qb, qc := new(G2), new(G2), new(G2)
pa.Unmarshal(new(G1).ScalarBaseMult(a).Marshal())
qa.Unmarshal(new(G2).ScalarBaseMult(a).Marshal())
pb.Unmarshal(new(G1).ScalarBaseMult(b).Marshal())
qb.Unmarshal(new(G2).ScalarBaseMult(b).Marshal())
pc.Unmarshal(new(G1).ScalarBaseMult(c).Marshal())
qc.Unmarshal(new(G2).ScalarBaseMult(c).Marshal())
k1 := Pair(pb, qc)
k1.ScalarMult(k1, a)
k1Bytes := k1.Marshal()
k2 := Pair(pc, qa)
k2.ScalarMult(k2, b)
k2Bytes := k2.Marshal()
k3 := Pair(pa, qb)
k3.ScalarMult(k3, c)
k3Bytes := k3.Marshal()
if !bytes.Equal(k1Bytes, k2Bytes) || !bytes.Equal(k2Bytes, k3Bytes) {
t.Errorf("keys didn't agree")
}
}
func BenchmarkG1(b *testing.B) {
x, _ := rand.Int(rand.Reader, Order)
b.ResetTimer()
for i := 0; i < b.N; i++ {
new(G1).ScalarBaseMult(x)
}
}
func BenchmarkG2(b *testing.B) {
x, _ := rand.Int(rand.Reader, Order)
b.ResetTimer()
for i := 0; i < b.N; i++ {
new(G2).ScalarBaseMult(x)
}
}
func BenchmarkPairing(b *testing.B) {
for i := 0; i < b.N; i++ {
Pair(&G1{curveGen}, &G2{twistGen})
}
}

View File

@@ -0,0 +1,59 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package bn256
import (
"math/big"
)
func bigFromBase10(s string) *big.Int {
n, _ := new(big.Int).SetString(s, 10)
return n
}
// u is the BN parameter that determines the prime: 1868033³.
var u = bigFromBase10("4965661367192848881")
// Order is the number of elements in both G₁ and G₂: 36u⁴+36u³+18u²+6u+1.
var Order = bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495617")
// P is a prime over which we form a basic field: 36u⁴+36u³+24u²+6u+1.
var P = bigFromBase10("21888242871839275222246405745257275088696311157297823662689037894645226208583")
// p2 is p, represented as little-endian 64-bit words.
var p2 = [4]uint64{0x3c208c16d87cfd47, 0x97816a916871ca8d, 0xb85045b68181585d, 0x30644e72e131a029}
// np is the negative inverse of p, mod 2^256.
var np = [4]uint64{0x87d20782e4866389, 0x9ede7d651eca6ac9, 0xd8afcbd01833da80, 0xf57a22b791888c6b}
// rN1 is R^-1 where R = 2^256 mod p.
var rN1 = &gfP{0xed84884a014afa37, 0xeb2022850278edf8, 0xcf63e9cfb74492d9, 0x2e67157159e5c639}
// r2 is R^2 where R = 2^256 mod p.
var r2 = &gfP{0xf32cfc5b538afa89, 0xb5e71911d44501fb, 0x47ab1eff0a417ff6, 0x06d89f71cab8351f}
// r3 is R^3 where R = 2^256 mod p.
var r3 = &gfP{0xb1cd6dafda1530df, 0x62f210e6a7283db6, 0xef7f0b0c0ada0afb, 0x20fd6e902d592544}
// xiToPMinus1Over6 is ξ^((p-1)/6) where ξ = i+9.
var xiToPMinus1Over6 = &gfP2{gfP{0xa222ae234c492d72, 0xd00f02a4565de15b, 0xdc2ff3a253dfc926, 0x10a75716b3899551}, gfP{0xaf9ba69633144907, 0xca6b1d7387afb78a, 0x11bded5ef08a2087, 0x02f34d751a1f3a7c}}
// xiToPMinus1Over3 is ξ^((p-1)/3) where ξ = i+9.
var xiToPMinus1Over3 = &gfP2{gfP{0x6e849f1ea0aa4757, 0xaa1c7b6d89f89141, 0xb6e713cdfae0ca3a, 0x26694fbb4e82ebc3}, gfP{0xb5773b104563ab30, 0x347f91c8a9aa6454, 0x7a007127242e0991, 0x1956bcd8118214ec}}
// xiToPMinus1Over2 is ξ^((p-1)/2) where ξ = i+9.
var xiToPMinus1Over2 = &gfP2{gfP{0xa1d77ce45ffe77c7, 0x07affd117826d1db, 0x6d16bd27bb7edc6b, 0x2c87200285defecc}, gfP{0xe4bbdd0c2936b629, 0xbb30f162e133bacb, 0x31a9d1b6f9645366, 0x253570bea500f8dd}}
// xiToPSquaredMinus1Over3 is ξ^((p²-1)/3) where ξ = i+9.
var xiToPSquaredMinus1Over3 = &gfP{0x3350c88e13e80b9c, 0x7dce557cdb5e56b9, 0x6001b4b8b615564a, 0x2682e617020217e0}
// xiTo2PSquaredMinus2Over3 is ξ^((2p²-2)/3) where ξ = i+9 (a cubic root of unity, mod p).
var xiTo2PSquaredMinus2Over3 = &gfP{0x71930c11d782e155, 0xa6bb947cffbe3323, 0xaa303344d4741444, 0x2c3b3f0d26594943}
// xiToPSquaredMinus1Over6 is ξ^((1p²-1)/6) where ξ = i+9 (a cubic root of -1, mod p).
var xiToPSquaredMinus1Over6 = &gfP{0xca8d800500fa1bf2, 0xf0c5d61468b39769, 0x0e201271ad0d4418, 0x04290f65bad856e6}
// xiTo2PMinus2Over3 is ξ^((2p-2)/3) where ξ = i+9.
var xiTo2PMinus2Over3 = &gfP2{gfP{0x5dddfd154bd8c949, 0x62cb29a5a4445b60, 0x37bc870a0c7dd2b9, 0x24830a9d3171f0fd}, gfP{0x7361d77f843abe92, 0xa5bb2bd3273411fb, 0x9c941f314b3e2399, 0x15df9cddbb9fd3ec}}

View File

@@ -0,0 +1,238 @@
package bn256
import (
"math/big"
)
// curvePoint implements the elliptic curve y²=x³+3. Points are kept in Jacobian
// form and t=z² when valid. G₁ is the set of points of this curve on GF(p).
type curvePoint struct {
x, y, z, t gfP
}
var curveB = newGFp(3)
// curveGen is the generator of G₁.
var curveGen = &curvePoint{
x: *newGFp(1),
y: *newGFp(2),
z: *newGFp(1),
t: *newGFp(1),
}
func (c *curvePoint) String() string {
c.MakeAffine()
x, y := &gfP{}, &gfP{}
montDecode(x, &c.x)
montDecode(y, &c.y)
return "(" + x.String() + ", " + y.String() + ")"
}
func (c *curvePoint) Set(a *curvePoint) {
c.x.Set(&a.x)
c.y.Set(&a.y)
c.z.Set(&a.z)
c.t.Set(&a.t)
}
// IsOnCurve returns true iff c is on the curve.
func (c *curvePoint) IsOnCurve() bool {
c.MakeAffine()
if c.IsInfinity() {
return true
}
y2, x3 := &gfP{}, &gfP{}
gfpMul(y2, &c.y, &c.y)
gfpMul(x3, &c.x, &c.x)
gfpMul(x3, x3, &c.x)
gfpAdd(x3, x3, curveB)
return *y2 == *x3
}
func (c *curvePoint) SetInfinity() {
c.x = gfP{0}
c.y = *newGFp(1)
c.z = gfP{0}
c.t = gfP{0}
}
func (c *curvePoint) IsInfinity() bool {
return c.z == gfP{0}
}
func (c *curvePoint) Add(a, b *curvePoint) {
if a.IsInfinity() {
c.Set(b)
return
}
if b.IsInfinity() {
c.Set(a)
return
}
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
// Normalize the points by replacing a = [x1:y1:z1] and b = [x2:y2:z2]
// by [u1:s1:z1·z2] and [u2:s2:z1·z2]
// where u1 = x1·z2², s1 = y1·z2³ and u1 = x2·z1², s2 = y2·z1³
z12, z22 := &gfP{}, &gfP{}
gfpMul(z12, &a.z, &a.z)
gfpMul(z22, &b.z, &b.z)
u1, u2 := &gfP{}, &gfP{}
gfpMul(u1, &a.x, z22)
gfpMul(u2, &b.x, z12)
t, s1 := &gfP{}, &gfP{}
gfpMul(t, &b.z, z22)
gfpMul(s1, &a.y, t)
s2 := &gfP{}
gfpMul(t, &a.z, z12)
gfpMul(s2, &b.y, t)
// Compute x = (2h)²(s²-u1-u2)
// where s = (s2-s1)/(u2-u1) is the slope of the line through
// (u1,s1) and (u2,s2). The extra factor 2h = 2(u2-u1) comes from the value of z below.
// This is also:
// 4(s2-s1)² - 4h²(u1+u2) = 4(s2-s1)² - 4h³ - 4h²(2u1)
// = r² - j - 2v
// with the notations below.
h := &gfP{}
gfpSub(h, u2, u1)
xEqual := *h == gfP{0}
gfpAdd(t, h, h)
// i = 4h²
i := &gfP{}
gfpMul(i, t, t)
// j = 4h³
j := &gfP{}
gfpMul(j, h, i)
gfpSub(t, s2, s1)
yEqual := *t == gfP{0}
if xEqual && yEqual {
c.Double(a)
return
}
r := &gfP{}
gfpAdd(r, t, t)
v := &gfP{}
gfpMul(v, u1, i)
// t4 = 4(s2-s1)²
t4, t6 := &gfP{}, &gfP{}
gfpMul(t4, r, r)
gfpAdd(t, v, v)
gfpSub(t6, t4, j)
gfpSub(&c.x, t6, t)
// Set y = -(2h)³(s1 + s*(x/4h²-u1))
// This is also
// y = - 2·s1·j - (s2-s1)(2x - 2i·u1) = r(v-x) - 2·s1·j
gfpSub(t, v, &c.x) // t7
gfpMul(t4, s1, j) // t8
gfpAdd(t6, t4, t4) // t9
gfpMul(t4, r, t) // t10
gfpSub(&c.y, t4, t6)
// Set z = 2(u2-u1)·z1·z2 = 2h·z1·z2
gfpAdd(t, &a.z, &b.z) // t11
gfpMul(t4, t, t) // t12
gfpSub(t, t4, z12) // t13
gfpSub(t4, t, z22) // t14
gfpMul(&c.z, t4, h)
}
func (c *curvePoint) Double(a *curvePoint) {
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
A, B, C := &gfP{}, &gfP{}, &gfP{}
gfpMul(A, &a.x, &a.x)
gfpMul(B, &a.y, &a.y)
gfpMul(C, B, B)
t, t2 := &gfP{}, &gfP{}
gfpAdd(t, &a.x, B)
gfpMul(t2, t, t)
gfpSub(t, t2, A)
gfpSub(t2, t, C)
d, e, f := &gfP{}, &gfP{}, &gfP{}
gfpAdd(d, t2, t2)
gfpAdd(t, A, A)
gfpAdd(e, t, A)
gfpMul(f, e, e)
gfpAdd(t, d, d)
gfpSub(&c.x, f, t)
gfpAdd(t, C, C)
gfpAdd(t2, t, t)
gfpAdd(t, t2, t2)
gfpSub(&c.y, d, &c.x)
gfpMul(t2, e, &c.y)
gfpSub(&c.y, t2, t)
gfpMul(t, &a.y, &a.z)
gfpAdd(&c.z, t, t)
}
func (c *curvePoint) Mul(a *curvePoint, scalar *big.Int) {
precomp := [1 << 2]*curvePoint{nil, {}, {}, {}}
precomp[1].Set(a)
precomp[2].Set(a)
gfpMul(&precomp[2].x, &precomp[2].x, xiTo2PSquaredMinus2Over3)
precomp[3].Add(precomp[1], precomp[2])
multiScalar := curveLattice.Multi(scalar)
sum := &curvePoint{}
sum.SetInfinity()
t := &curvePoint{}
for i := len(multiScalar) - 1; i >= 0; i-- {
t.Double(sum)
if multiScalar[i] == 0 {
sum.Set(t)
} else {
sum.Add(t, precomp[multiScalar[i]])
}
}
c.Set(sum)
}
func (c *curvePoint) MakeAffine() {
if c.z == *newGFp(1) {
return
} else if c.z == *newGFp(0) {
c.x = gfP{0}
c.y = *newGFp(1)
c.t = gfP{0}
return
}
zInv := &gfP{}
zInv.Invert(&c.z)
t, zInv2 := &gfP{}, &gfP{}
gfpMul(t, &c.y, zInv)
gfpMul(zInv2, zInv, zInv)
gfpMul(&c.x, &c.x, zInv2)
gfpMul(&c.y, t, zInv2)
c.z = *newGFp(1)
c.t = *newGFp(1)
}
func (c *curvePoint) Neg(a *curvePoint) {
c.x.Set(&a.x)
gfpNeg(&c.y, &a.y)
c.z.Set(&a.z)
c.t = gfP{0}
}

View File

@@ -0,0 +1,81 @@
package bn256
import (
"errors"
"fmt"
)
type gfP [4]uint64
func newGFp(x int64) (out *gfP) {
if x >= 0 {
out = &gfP{uint64(x)}
} else {
out = &gfP{uint64(-x)}
gfpNeg(out, out)
}
montEncode(out, out)
return out
}
func (e *gfP) String() string {
return fmt.Sprintf("%16.16x%16.16x%16.16x%16.16x", e[3], e[2], e[1], e[0])
}
func (e *gfP) Set(f *gfP) {
e[0] = f[0]
e[1] = f[1]
e[2] = f[2]
e[3] = f[3]
}
func (e *gfP) Invert(f *gfP) {
bits := [4]uint64{0x3c208c16d87cfd45, 0x97816a916871ca8d, 0xb85045b68181585d, 0x30644e72e131a029}
sum, power := &gfP{}, &gfP{}
sum.Set(rN1)
power.Set(f)
for word := 0; word < 4; word++ {
for bit := uint(0); bit < 64; bit++ {
if (bits[word]>>bit)&1 == 1 {
gfpMul(sum, sum, power)
}
gfpMul(power, power, power)
}
}
gfpMul(sum, sum, r3)
e.Set(sum)
}
func (e *gfP) Marshal(out []byte) {
for w := uint(0); w < 4; w++ {
for b := uint(0); b < 8; b++ {
out[8*w+b] = byte(e[3-w] >> (56 - 8*b))
}
}
}
func (e *gfP) Unmarshal(in []byte) error {
// Unmarshal the bytes into little endian form
for w := uint(0); w < 4; w++ {
for b := uint(0); b < 8; b++ {
e[3-w] += uint64(in[8*w+b]) << (56 - 8*b)
}
}
// Ensure the point respects the curve modulus
for i := 3; i >= 0; i-- {
if e[i] < p2[i] {
return nil
}
if e[i] > p2[i] {
return errors.New("bn256: coordinate exceeds modulus")
}
}
return errors.New("bn256: coordinate equals modulus")
}
func montEncode(c, a *gfP) { gfpMul(c, a, r2) }
func montDecode(c, a *gfP) { gfpMul(c, a, &gfP{1}) }

View File

@@ -0,0 +1,160 @@
package bn256
// For details of the algorithms used, see "Multiplication and Squaring on
// Pairing-Friendly Fields, Devegili et al.
// http://eprint.iacr.org/2006/471.pdf.
import (
"math/big"
)
// gfP12 implements the field of size p¹² as a quadratic extension of gfP6
// where ω²=τ.
type gfP12 struct {
x, y gfP6 // value is xω + y
}
func (e *gfP12) String() string {
return "(" + e.x.String() + "," + e.y.String() + ")"
}
func (e *gfP12) Set(a *gfP12) *gfP12 {
e.x.Set(&a.x)
e.y.Set(&a.y)
return e
}
func (e *gfP12) SetZero() *gfP12 {
e.x.SetZero()
e.y.SetZero()
return e
}
func (e *gfP12) SetOne() *gfP12 {
e.x.SetZero()
e.y.SetOne()
return e
}
func (e *gfP12) IsZero() bool {
return e.x.IsZero() && e.y.IsZero()
}
func (e *gfP12) IsOne() bool {
return e.x.IsZero() && e.y.IsOne()
}
func (e *gfP12) Conjugate(a *gfP12) *gfP12 {
e.x.Neg(&a.x)
e.y.Set(&a.y)
return e
}
func (e *gfP12) Neg(a *gfP12) *gfP12 {
e.x.Neg(&a.x)
e.y.Neg(&a.y)
return e
}
// Frobenius computes (xω+y)^p = x^p ω·ξ^((p-1)/6) + y^p
func (e *gfP12) Frobenius(a *gfP12) *gfP12 {
e.x.Frobenius(&a.x)
e.y.Frobenius(&a.y)
e.x.MulScalar(&e.x, xiToPMinus1Over6)
return e
}
// FrobeniusP2 computes (xω+y)^p² = x^p² ω·ξ^((p²-1)/6) + y^p²
func (e *gfP12) FrobeniusP2(a *gfP12) *gfP12 {
e.x.FrobeniusP2(&a.x)
e.x.MulGFP(&e.x, xiToPSquaredMinus1Over6)
e.y.FrobeniusP2(&a.y)
return e
}
func (e *gfP12) FrobeniusP4(a *gfP12) *gfP12 {
e.x.FrobeniusP4(&a.x)
e.x.MulGFP(&e.x, xiToPSquaredMinus1Over3)
e.y.FrobeniusP4(&a.y)
return e
}
func (e *gfP12) Add(a, b *gfP12) *gfP12 {
e.x.Add(&a.x, &b.x)
e.y.Add(&a.y, &b.y)
return e
}
func (e *gfP12) Sub(a, b *gfP12) *gfP12 {
e.x.Sub(&a.x, &b.x)
e.y.Sub(&a.y, &b.y)
return e
}
func (e *gfP12) Mul(a, b *gfP12) *gfP12 {
tx := (&gfP6{}).Mul(&a.x, &b.y)
t := (&gfP6{}).Mul(&b.x, &a.y)
tx.Add(tx, t)
ty := (&gfP6{}).Mul(&a.y, &b.y)
t.Mul(&a.x, &b.x).MulTau(t)
e.x.Set(tx)
e.y.Add(ty, t)
return e
}
func (e *gfP12) MulScalar(a *gfP12, b *gfP6) *gfP12 {
e.x.Mul(&e.x, b)
e.y.Mul(&e.y, b)
return e
}
func (c *gfP12) Exp(a *gfP12, power *big.Int) *gfP12 {
sum := (&gfP12{}).SetOne()
t := &gfP12{}
for i := power.BitLen() - 1; i >= 0; i-- {
t.Square(sum)
if power.Bit(i) != 0 {
sum.Mul(t, a)
} else {
sum.Set(t)
}
}
c.Set(sum)
return c
}
func (e *gfP12) Square(a *gfP12) *gfP12 {
// Complex squaring algorithm
v0 := (&gfP6{}).Mul(&a.x, &a.y)
t := (&gfP6{}).MulTau(&a.x)
t.Add(&a.y, t)
ty := (&gfP6{}).Add(&a.x, &a.y)
ty.Mul(ty, t).Sub(ty, v0)
t.MulTau(v0)
ty.Sub(ty, t)
e.x.Add(v0, v0)
e.y.Set(ty)
return e
}
func (e *gfP12) Invert(a *gfP12) *gfP12 {
// See "Implementing cryptographic pairings", M. Scott, section 3.2.
// ftp://136.206.11.249/pub/crypto/pairings.pdf
t1, t2 := &gfP6{}, &gfP6{}
t1.Square(&a.x)
t2.Square(&a.y)
t1.MulTau(t1).Sub(t2, t1)
t2.Invert(t1)
e.x.Neg(&a.x)
e.y.Set(&a.y)
e.MulScalar(e, t2)
return e
}

View File

@@ -0,0 +1,156 @@
package bn256
// For details of the algorithms used, see "Multiplication and Squaring on
// Pairing-Friendly Fields, Devegili et al.
// http://eprint.iacr.org/2006/471.pdf.
// gfP2 implements a field of size p² as a quadratic extension of the base field
// where i²=-1.
type gfP2 struct {
x, y gfP // value is xi+y.
}
func gfP2Decode(in *gfP2) *gfP2 {
out := &gfP2{}
montDecode(&out.x, &in.x)
montDecode(&out.y, &in.y)
return out
}
func (e *gfP2) String() string {
return "(" + e.x.String() + ", " + e.y.String() + ")"
}
func (e *gfP2) Set(a *gfP2) *gfP2 {
e.x.Set(&a.x)
e.y.Set(&a.y)
return e
}
func (e *gfP2) SetZero() *gfP2 {
e.x = gfP{0}
e.y = gfP{0}
return e
}
func (e *gfP2) SetOne() *gfP2 {
e.x = gfP{0}
e.y = *newGFp(1)
return e
}
func (e *gfP2) IsZero() bool {
zero := gfP{0}
return e.x == zero && e.y == zero
}
func (e *gfP2) IsOne() bool {
zero, one := gfP{0}, *newGFp(1)
return e.x == zero && e.y == one
}
func (e *gfP2) Conjugate(a *gfP2) *gfP2 {
e.y.Set(&a.y)
gfpNeg(&e.x, &a.x)
return e
}
func (e *gfP2) Neg(a *gfP2) *gfP2 {
gfpNeg(&e.x, &a.x)
gfpNeg(&e.y, &a.y)
return e
}
func (e *gfP2) Add(a, b *gfP2) *gfP2 {
gfpAdd(&e.x, &a.x, &b.x)
gfpAdd(&e.y, &a.y, &b.y)
return e
}
func (e *gfP2) Sub(a, b *gfP2) *gfP2 {
gfpSub(&e.x, &a.x, &b.x)
gfpSub(&e.y, &a.y, &b.y)
return e
}
// See "Multiplication and Squaring in Pairing-Friendly Fields",
// http://eprint.iacr.org/2006/471.pdf
func (e *gfP2) Mul(a, b *gfP2) *gfP2 {
tx, t := &gfP{}, &gfP{}
gfpMul(tx, &a.x, &b.y)
gfpMul(t, &b.x, &a.y)
gfpAdd(tx, tx, t)
ty := &gfP{}
gfpMul(ty, &a.y, &b.y)
gfpMul(t, &a.x, &b.x)
gfpSub(ty, ty, t)
e.x.Set(tx)
e.y.Set(ty)
return e
}
func (e *gfP2) MulScalar(a *gfP2, b *gfP) *gfP2 {
gfpMul(&e.x, &a.x, b)
gfpMul(&e.y, &a.y, b)
return e
}
// MulXi sets e=ξa where ξ=i+9 and then returns e.
func (e *gfP2) MulXi(a *gfP2) *gfP2 {
// (xi+y)(i+9) = (9x+y)i+(9y-x)
tx := &gfP{}
gfpAdd(tx, &a.x, &a.x)
gfpAdd(tx, tx, tx)
gfpAdd(tx, tx, tx)
gfpAdd(tx, tx, &a.x)
gfpAdd(tx, tx, &a.y)
ty := &gfP{}
gfpAdd(ty, &a.y, &a.y)
gfpAdd(ty, ty, ty)
gfpAdd(ty, ty, ty)
gfpAdd(ty, ty, &a.y)
gfpSub(ty, ty, &a.x)
e.x.Set(tx)
e.y.Set(ty)
return e
}
func (e *gfP2) Square(a *gfP2) *gfP2 {
// Complex squaring algorithm:
// (xi+y)² = (x+y)(y-x) + 2*i*x*y
tx, ty := &gfP{}, &gfP{}
gfpSub(tx, &a.y, &a.x)
gfpAdd(ty, &a.x, &a.y)
gfpMul(ty, tx, ty)
gfpMul(tx, &a.x, &a.y)
gfpAdd(tx, tx, tx)
e.x.Set(tx)
e.y.Set(ty)
return e
}
func (e *gfP2) Invert(a *gfP2) *gfP2 {
// See "Implementing cryptographic pairings", M. Scott, section 3.2.
// ftp://136.206.11.249/pub/crypto/pairings.pdf
t1, t2 := &gfP{}, &gfP{}
gfpMul(t1, &a.x, &a.x)
gfpMul(t2, &a.y, &a.y)
gfpAdd(t1, t1, t2)
inv := &gfP{}
inv.Invert(t1)
gfpNeg(t1, &a.x)
gfpMul(&e.x, t1, inv)
gfpMul(&e.y, &a.y, inv)
return e
}

View File

@@ -0,0 +1,213 @@
package bn256
// For details of the algorithms used, see "Multiplication and Squaring on
// Pairing-Friendly Fields, Devegili et al.
// http://eprint.iacr.org/2006/471.pdf.
// gfP6 implements the field of size p⁶ as a cubic extension of gfP2 where τ³=ξ
// and ξ=i+3.
type gfP6 struct {
x, y, z gfP2 // value is xτ² + yτ + z
}
func (e *gfP6) String() string {
return "(" + e.x.String() + ", " + e.y.String() + ", " + e.z.String() + ")"
}
func (e *gfP6) Set(a *gfP6) *gfP6 {
e.x.Set(&a.x)
e.y.Set(&a.y)
e.z.Set(&a.z)
return e
}
func (e *gfP6) SetZero() *gfP6 {
e.x.SetZero()
e.y.SetZero()
e.z.SetZero()
return e
}
func (e *gfP6) SetOne() *gfP6 {
e.x.SetZero()
e.y.SetZero()
e.z.SetOne()
return e
}
func (e *gfP6) IsZero() bool {
return e.x.IsZero() && e.y.IsZero() && e.z.IsZero()
}
func (e *gfP6) IsOne() bool {
return e.x.IsZero() && e.y.IsZero() && e.z.IsOne()
}
func (e *gfP6) Neg(a *gfP6) *gfP6 {
e.x.Neg(&a.x)
e.y.Neg(&a.y)
e.z.Neg(&a.z)
return e
}
func (e *gfP6) Frobenius(a *gfP6) *gfP6 {
e.x.Conjugate(&a.x)
e.y.Conjugate(&a.y)
e.z.Conjugate(&a.z)
e.x.Mul(&e.x, xiTo2PMinus2Over3)
e.y.Mul(&e.y, xiToPMinus1Over3)
return e
}
// FrobeniusP2 computes (xτ²+yτ+z)^(p²) = xτ^(2p²) + yτ^(p²) + z
func (e *gfP6) FrobeniusP2(a *gfP6) *gfP6 {
// τ^(2p²) = τ²τ^(2p²-2) = τ²ξ^((2p²-2)/3)
e.x.MulScalar(&a.x, xiTo2PSquaredMinus2Over3)
// τ^(p²) = ττ^(p²-1) = τξ^((p²-1)/3)
e.y.MulScalar(&a.y, xiToPSquaredMinus1Over3)
e.z.Set(&a.z)
return e
}
func (e *gfP6) FrobeniusP4(a *gfP6) *gfP6 {
e.x.MulScalar(&a.x, xiToPSquaredMinus1Over3)
e.y.MulScalar(&a.y, xiTo2PSquaredMinus2Over3)
e.z.Set(&a.z)
return e
}
func (e *gfP6) Add(a, b *gfP6) *gfP6 {
e.x.Add(&a.x, &b.x)
e.y.Add(&a.y, &b.y)
e.z.Add(&a.z, &b.z)
return e
}
func (e *gfP6) Sub(a, b *gfP6) *gfP6 {
e.x.Sub(&a.x, &b.x)
e.y.Sub(&a.y, &b.y)
e.z.Sub(&a.z, &b.z)
return e
}
func (e *gfP6) Mul(a, b *gfP6) *gfP6 {
// "Multiplication and Squaring on Pairing-Friendly Fields"
// Section 4, Karatsuba method.
// http://eprint.iacr.org/2006/471.pdf
v0 := (&gfP2{}).Mul(&a.z, &b.z)
v1 := (&gfP2{}).Mul(&a.y, &b.y)
v2 := (&gfP2{}).Mul(&a.x, &b.x)
t0 := (&gfP2{}).Add(&a.x, &a.y)
t1 := (&gfP2{}).Add(&b.x, &b.y)
tz := (&gfP2{}).Mul(t0, t1)
tz.Sub(tz, v1).Sub(tz, v2).MulXi(tz).Add(tz, v0)
t0.Add(&a.y, &a.z)
t1.Add(&b.y, &b.z)
ty := (&gfP2{}).Mul(t0, t1)
t0.MulXi(v2)
ty.Sub(ty, v0).Sub(ty, v1).Add(ty, t0)
t0.Add(&a.x, &a.z)
t1.Add(&b.x, &b.z)
tx := (&gfP2{}).Mul(t0, t1)
tx.Sub(tx, v0).Add(tx, v1).Sub(tx, v2)
e.x.Set(tx)
e.y.Set(ty)
e.z.Set(tz)
return e
}
func (e *gfP6) MulScalar(a *gfP6, b *gfP2) *gfP6 {
e.x.Mul(&a.x, b)
e.y.Mul(&a.y, b)
e.z.Mul(&a.z, b)
return e
}
func (e *gfP6) MulGFP(a *gfP6, b *gfP) *gfP6 {
e.x.MulScalar(&a.x, b)
e.y.MulScalar(&a.y, b)
e.z.MulScalar(&a.z, b)
return e
}
// MulTau computes τ·(aτ²+bτ+c) = bτ²+cτ+aξ
func (e *gfP6) MulTau(a *gfP6) *gfP6 {
tz := (&gfP2{}).MulXi(&a.x)
ty := (&gfP2{}).Set(&a.y)
e.y.Set(&a.z)
e.x.Set(ty)
e.z.Set(tz)
return e
}
func (e *gfP6) Square(a *gfP6) *gfP6 {
v0 := (&gfP2{}).Square(&a.z)
v1 := (&gfP2{}).Square(&a.y)
v2 := (&gfP2{}).Square(&a.x)
c0 := (&gfP2{}).Add(&a.x, &a.y)
c0.Square(c0).Sub(c0, v1).Sub(c0, v2).MulXi(c0).Add(c0, v0)
c1 := (&gfP2{}).Add(&a.y, &a.z)
c1.Square(c1).Sub(c1, v0).Sub(c1, v1)
xiV2 := (&gfP2{}).MulXi(v2)
c1.Add(c1, xiV2)
c2 := (&gfP2{}).Add(&a.x, &a.z)
c2.Square(c2).Sub(c2, v0).Add(c2, v1).Sub(c2, v2)
e.x.Set(c2)
e.y.Set(c1)
e.z.Set(c0)
return e
}
func (e *gfP6) Invert(a *gfP6) *gfP6 {
// See "Implementing cryptographic pairings", M. Scott, section 3.2.
// ftp://136.206.11.249/pub/crypto/pairings.pdf
// Here we can give a short explanation of how it works: let j be a cubic root of
// unity in GF(p²) so that 1+j+j²=0.
// Then (xτ² + yτ + z)(xj²τ² + yjτ + z)(xjτ² + yj²τ + z)
// = (xτ² + yτ + z)(Cτ²+Bτ+A)
// = (x³ξ²+y³ξ+z³-3ξxyz) = F is an element of the base field (the norm).
//
// On the other hand (xj²τ² + yjτ + z)(xjτ² + yj²τ + z)
// = τ²(y²-ξxz) + τ(ξx²-yz) + (z²-ξxy)
//
// So that's why A = (z²-ξxy), B = (ξx²-yz), C = (y²-ξxz)
t1 := (&gfP2{}).Mul(&a.x, &a.y)
t1.MulXi(t1)
A := (&gfP2{}).Square(&a.z)
A.Sub(A, t1)
B := (&gfP2{}).Square(&a.x)
B.MulXi(B)
t1.Mul(&a.y, &a.z)
B.Sub(B, t1)
C := (&gfP2{}).Square(&a.y)
t1.Mul(&a.x, &a.z)
C.Sub(C, t1)
F := (&gfP2{}).Mul(C, &a.y)
F.MulXi(F)
t1.Mul(A, &a.z)
F.Add(F, t1)
t1.Mul(B, &a.x).MulXi(t1)
F.Add(F, t1)
F.Invert(F)
e.x.Mul(C, F)
e.y.Mul(B, F)
e.z.Mul(A, F)
return e
}

View File

@@ -0,0 +1,129 @@
// +build amd64,!generic
#define storeBlock(a0,a1,a2,a3, r) \
MOVQ a0, 0+r \
MOVQ a1, 8+r \
MOVQ a2, 16+r \
MOVQ a3, 24+r
#define loadBlock(r, a0,a1,a2,a3) \
MOVQ 0+r, a0 \
MOVQ 8+r, a1 \
MOVQ 16+r, a2 \
MOVQ 24+r, a3
#define gfpCarry(a0,a1,a2,a3,a4, b0,b1,b2,b3,b4) \
\ // b = a-p
MOVQ a0, b0 \
MOVQ a1, b1 \
MOVQ a2, b2 \
MOVQ a3, b3 \
MOVQ a4, b4 \
\
SUBQ ·p2+0(SB), b0 \
SBBQ ·p2+8(SB), b1 \
SBBQ ·p2+16(SB), b2 \
SBBQ ·p2+24(SB), b3 \
SBBQ $0, b4 \
\
\ // if b is negative then return a
\ // else return b
CMOVQCC b0, a0 \
CMOVQCC b1, a1 \
CMOVQCC b2, a2 \
CMOVQCC b3, a3
#include "mul_amd64.h"
#include "mul_bmi2_amd64.h"
TEXT ·gfpNeg(SB),0,$0-16
MOVQ ·p2+0(SB), R8
MOVQ ·p2+8(SB), R9
MOVQ ·p2+16(SB), R10
MOVQ ·p2+24(SB), R11
MOVQ a+8(FP), DI
SUBQ 0(DI), R8
SBBQ 8(DI), R9
SBBQ 16(DI), R10
SBBQ 24(DI), R11
MOVQ $0, AX
gfpCarry(R8,R9,R10,R11,AX, R12,R13,R14,R15,BX)
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
RET
TEXT ·gfpAdd(SB),0,$0-24
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
loadBlock(0(DI), R8,R9,R10,R11)
MOVQ $0, R12
ADDQ 0(SI), R8
ADCQ 8(SI), R9
ADCQ 16(SI), R10
ADCQ 24(SI), R11
ADCQ $0, R12
gfpCarry(R8,R9,R10,R11,R12, R13,R14,R15,AX,BX)
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
RET
TEXT ·gfpSub(SB),0,$0-24
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
loadBlock(0(DI), R8,R9,R10,R11)
MOVQ ·p2+0(SB), R12
MOVQ ·p2+8(SB), R13
MOVQ ·p2+16(SB), R14
MOVQ ·p2+24(SB), R15
MOVQ $0, AX
SUBQ 0(SI), R8
SBBQ 8(SI), R9
SBBQ 16(SI), R10
SBBQ 24(SI), R11
CMOVQCC AX, R12
CMOVQCC AX, R13
CMOVQCC AX, R14
CMOVQCC AX, R15
ADDQ R12, R8
ADCQ R13, R9
ADCQ R14, R10
ADCQ R15, R11
MOVQ c+0(FP), DI
storeBlock(R8,R9,R10,R11, 0(DI))
RET
TEXT ·gfpMul(SB),0,$160-24
MOVQ a+8(FP), DI
MOVQ b+16(FP), SI
// Jump to a slightly different implementation if MULX isn't supported.
CMPB runtime·support_bmi2(SB), $0
JE nobmi2Mul
mulBMI2(0(DI),8(DI),16(DI),24(DI), 0(SI))
storeBlock( R8, R9,R10,R11, 0(SP))
storeBlock(R12,R13,R14,R15, 32(SP))
gfpReduceBMI2()
JMP end
nobmi2Mul:
mul(0(DI),8(DI),16(DI),24(DI), 0(SI), 0(SP))
gfpReduce(0(SP))
end:
MOVQ c+0(FP), DI
storeBlock(R12,R13,R14,R15, 0(DI))
RET

View File

@@ -0,0 +1,113 @@
// +build arm64,!generic
#define storeBlock(a0,a1,a2,a3, r) \
MOVD a0, 0+r \
MOVD a1, 8+r \
MOVD a2, 16+r \
MOVD a3, 24+r
#define loadBlock(r, a0,a1,a2,a3) \
MOVD 0+r, a0 \
MOVD 8+r, a1 \
MOVD 16+r, a2 \
MOVD 24+r, a3
#define loadModulus(p0,p1,p2,p3) \
MOVD ·p2+0(SB), p0 \
MOVD ·p2+8(SB), p1 \
MOVD ·p2+16(SB), p2 \
MOVD ·p2+24(SB), p3
#include "mul_arm64.h"
TEXT ·gfpNeg(SB),0,$0-16
MOVD a+8(FP), R0
loadBlock(0(R0), R1,R2,R3,R4)
loadModulus(R5,R6,R7,R8)
SUBS R1, R5, R1
SBCS R2, R6, R2
SBCS R3, R7, R3
SBCS R4, R8, R4
SUBS R5, R1, R5
SBCS R6, R2, R6
SBCS R7, R3, R7
SBCS R8, R4, R8
CSEL CS, R5, R1, R1
CSEL CS, R6, R2, R2
CSEL CS, R7, R3, R3
CSEL CS, R8, R4, R4
MOVD c+0(FP), R0
storeBlock(R1,R2,R3,R4, 0(R0))
RET
TEXT ·gfpAdd(SB),0,$0-24
MOVD a+8(FP), R0
loadBlock(0(R0), R1,R2,R3,R4)
MOVD b+16(FP), R0
loadBlock(0(R0), R5,R6,R7,R8)
loadModulus(R9,R10,R11,R12)
MOVD ZR, R0
ADDS R5, R1
ADCS R6, R2
ADCS R7, R3
ADCS R8, R4
ADCS ZR, R0
SUBS R9, R1, R5
SBCS R10, R2, R6
SBCS R11, R3, R7
SBCS R12, R4, R8
SBCS ZR, R0, R0
CSEL CS, R5, R1, R1
CSEL CS, R6, R2, R2
CSEL CS, R7, R3, R3
CSEL CS, R8, R4, R4
MOVD c+0(FP), R0
storeBlock(R1,R2,R3,R4, 0(R0))
RET
TEXT ·gfpSub(SB),0,$0-24
MOVD a+8(FP), R0
loadBlock(0(R0), R1,R2,R3,R4)
MOVD b+16(FP), R0
loadBlock(0(R0), R5,R6,R7,R8)
loadModulus(R9,R10,R11,R12)
SUBS R5, R1
SBCS R6, R2
SBCS R7, R3
SBCS R8, R4
CSEL CS, ZR, R9, R9
CSEL CS, ZR, R10, R10
CSEL CS, ZR, R11, R11
CSEL CS, ZR, R12, R12
ADDS R9, R1
ADCS R10, R2
ADCS R11, R3
ADCS R12, R4
MOVD c+0(FP), R0
storeBlock(R1,R2,R3,R4, 0(R0))
RET
TEXT ·gfpMul(SB),0,$0-24
MOVD a+8(FP), R0
loadBlock(0(R0), R1,R2,R3,R4)
MOVD b+16(FP), R0
loadBlock(0(R0), R5,R6,R7,R8)
mul(R9,R10,R11,R12,R13,R14,R15,R16)
gfpReduce()
MOVD c+0(FP), R0
storeBlock(R1,R2,R3,R4, 0(R0))
RET

View File

@@ -0,0 +1,18 @@
// +build amd64,!generic arm64,!generic
package bn256
// This file contains forward declarations for the architecture-specific
// assembly implementations of these functions, provided that they exist.
// go:noescape
func gfpNeg(c, a *gfP)
//go:noescape
func gfpAdd(c, a, b *gfP)
//go:noescape
func gfpSub(c, a, b *gfP)
//go:noescape
func gfpMul(c, a, b *gfP)

View File

@@ -0,0 +1,173 @@
// +build !amd64,!arm64 generic
package bn256
func gfpCarry(a *gfP, head uint64) {
b := &gfP{}
var carry uint64
for i, pi := range p2 {
ai := a[i]
bi := ai - pi - carry
b[i] = bi
carry = (pi&^ai | (pi|^ai)&bi) >> 63
}
carry = carry &^ head
// If b is negative, then return a.
// Else return b.
carry = -carry
ncarry := ^carry
for i := 0; i < 4; i++ {
a[i] = (a[i] & carry) | (b[i] & ncarry)
}
}
func gfpNeg(c, a *gfP) {
var carry uint64
for i, pi := range p2 {
ai := a[i]
ci := pi - ai - carry
c[i] = ci
carry = (ai&^pi | (ai|^pi)&ci) >> 63
}
gfpCarry(c, 0)
}
func gfpAdd(c, a, b *gfP) {
var carry uint64
for i, ai := range a {
bi := b[i]
ci := ai + bi + carry
c[i] = ci
carry = (ai&bi | (ai|bi)&^ci) >> 63
}
gfpCarry(c, carry)
}
func gfpSub(c, a, b *gfP) {
t := &gfP{}
var carry uint64
for i, pi := range p2 {
bi := b[i]
ti := pi - bi - carry
t[i] = ti
carry = (bi&^pi | (bi|^pi)&ti) >> 63
}
carry = 0
for i, ai := range a {
ti := t[i]
ci := ai + ti + carry
c[i] = ci
carry = (ai&ti | (ai|ti)&^ci) >> 63
}
gfpCarry(c, carry)
}
func mul(a, b [4]uint64) [8]uint64 {
const (
mask16 uint64 = 0x0000ffff
mask32 uint64 = 0xffffffff
)
var buff [32]uint64
for i, ai := range a {
a0, a1, a2, a3 := ai&mask16, (ai>>16)&mask16, (ai>>32)&mask16, ai>>48
for j, bj := range b {
b0, b2 := bj&mask32, bj>>32
off := 4 * (i + j)
buff[off+0] += a0 * b0
buff[off+1] += a1 * b0
buff[off+2] += a2*b0 + a0*b2
buff[off+3] += a3*b0 + a1*b2
buff[off+4] += a2 * b2
buff[off+5] += a3 * b2
}
}
for i := uint(1); i < 4; i++ {
shift := 16 * i
var head, carry uint64
for j := uint(0); j < 8; j++ {
block := 4 * j
xi := buff[block]
yi := (buff[block+i] << shift) + head
zi := xi + yi + carry
buff[block] = zi
carry = (xi&yi | (xi|yi)&^zi) >> 63
head = buff[block+i] >> (64 - shift)
}
}
return [8]uint64{buff[0], buff[4], buff[8], buff[12], buff[16], buff[20], buff[24], buff[28]}
}
func halfMul(a, b [4]uint64) [4]uint64 {
const (
mask16 uint64 = 0x0000ffff
mask32 uint64 = 0xffffffff
)
var buff [18]uint64
for i, ai := range a {
a0, a1, a2, a3 := ai&mask16, (ai>>16)&mask16, (ai>>32)&mask16, ai>>48
for j, bj := range b {
if i+j > 3 {
break
}
b0, b2 := bj&mask32, bj>>32
off := 4 * (i + j)
buff[off+0] += a0 * b0
buff[off+1] += a1 * b0
buff[off+2] += a2*b0 + a0*b2
buff[off+3] += a3*b0 + a1*b2
buff[off+4] += a2 * b2
buff[off+5] += a3 * b2
}
}
for i := uint(1); i < 4; i++ {
shift := 16 * i
var head, carry uint64
for j := uint(0); j < 4; j++ {
block := 4 * j
xi := buff[block]
yi := (buff[block+i] << shift) + head
zi := xi + yi + carry
buff[block] = zi
carry = (xi&yi | (xi|yi)&^zi) >> 63
head = buff[block+i] >> (64 - shift)
}
}
return [4]uint64{buff[0], buff[4], buff[8], buff[12]}
}
func gfpMul(c, a, b *gfP) {
T := mul(*a, *b)
m := halfMul([4]uint64{T[0], T[1], T[2], T[3]}, np)
t := mul([4]uint64{m[0], m[1], m[2], m[3]}, p2)
var carry uint64
for i, Ti := range T {
ti := t[i]
zi := Ti + ti + carry
T[i] = zi
carry = (Ti&ti | (Ti|ti)&^zi) >> 63
}
*c = gfP{T[4], T[5], T[6], T[7]}
gfpCarry(c, carry)
}

View File

@@ -0,0 +1,60 @@
package bn256
import (
"testing"
)
// Tests that negation works the same way on both assembly-optimized and pure Go
// implementation.
func TestGFpNeg(t *testing.T) {
n := &gfP{0x0123456789abcdef, 0xfedcba9876543210, 0xdeadbeefdeadbeef, 0xfeebdaedfeebdaed}
w := &gfP{0xfedcba9876543211, 0x0123456789abcdef, 0x2152411021524110, 0x0114251201142512}
h := &gfP{}
gfpNeg(h, n)
if *h != *w {
t.Errorf("negation mismatch: have %#x, want %#x", *h, *w)
}
}
// Tests that addition works the same way on both assembly-optimized and pure Go
// implementation.
func TestGFpAdd(t *testing.T) {
a := &gfP{0x0123456789abcdef, 0xfedcba9876543210, 0xdeadbeefdeadbeef, 0xfeebdaedfeebdaed}
b := &gfP{0xfedcba9876543210, 0x0123456789abcdef, 0xfeebdaedfeebdaed, 0xdeadbeefdeadbeef}
w := &gfP{0xc3df73e9278302b8, 0x687e956e978e3572, 0x254954275c18417f, 0xad354b6afc67f9b4}
h := &gfP{}
gfpAdd(h, a, b)
if *h != *w {
t.Errorf("addition mismatch: have %#x, want %#x", *h, *w)
}
}
// Tests that subtraction works the same way on both assembly-optimized and pure Go
// implementation.
func TestGFpSub(t *testing.T) {
a := &gfP{0x0123456789abcdef, 0xfedcba9876543210, 0xdeadbeefdeadbeef, 0xfeebdaedfeebdaed}
b := &gfP{0xfedcba9876543210, 0x0123456789abcdef, 0xfeebdaedfeebdaed, 0xdeadbeefdeadbeef}
w := &gfP{0x02468acf13579bdf, 0xfdb97530eca86420, 0xdfc1e401dfc1e402, 0x203e1bfe203e1bfd}
h := &gfP{}
gfpSub(h, a, b)
if *h != *w {
t.Errorf("subtraction mismatch: have %#x, want %#x", *h, *w)
}
}
// Tests that multiplication works the same way on both assembly-optimized and pure Go
// implementation.
func TestGFpMul(t *testing.T) {
a := &gfP{0x0123456789abcdef, 0xfedcba9876543210, 0xdeadbeefdeadbeef, 0xfeebdaedfeebdaed}
b := &gfP{0xfedcba9876543210, 0x0123456789abcdef, 0xfeebdaedfeebdaed, 0xdeadbeefdeadbeef}
w := &gfP{0xcbcbd377f7ad22d3, 0x3b89ba5d849379bf, 0x87b61627bd38b6d2, 0xc44052a2a0e654b2}
h := &gfP{}
gfpMul(h, a, b)
if *h != *w {
t.Errorf("multiplication mismatch: have %#x, want %#x", *h, *w)
}
}

View File

@@ -0,0 +1,115 @@
package bn256
import (
"math/big"
)
var half = new(big.Int).Rsh(Order, 1)
var curveLattice = &lattice{
vectors: [][]*big.Int{
{bigFromBase10("147946756881789319000765030803803410728"), bigFromBase10("147946756881789319010696353538189108491")},
{bigFromBase10("147946756881789319020627676272574806254"), bigFromBase10("-147946756881789318990833708069417712965")},
},
inverse: []*big.Int{
bigFromBase10("147946756881789318990833708069417712965"),
bigFromBase10("147946756881789319010696353538189108491"),
},
det: bigFromBase10("43776485743678550444492811490514550177096728800832068687396408373151616991234"),
}
var targetLattice = &lattice{
vectors: [][]*big.Int{
{bigFromBase10("9931322734385697761"), bigFromBase10("9931322734385697761"), bigFromBase10("9931322734385697763"), bigFromBase10("9931322734385697764")},
{bigFromBase10("4965661367192848881"), bigFromBase10("4965661367192848881"), bigFromBase10("4965661367192848882"), bigFromBase10("-9931322734385697762")},
{bigFromBase10("-9931322734385697762"), bigFromBase10("-4965661367192848881"), bigFromBase10("4965661367192848881"), bigFromBase10("-4965661367192848882")},
{bigFromBase10("9931322734385697763"), bigFromBase10("-4965661367192848881"), bigFromBase10("-4965661367192848881"), bigFromBase10("-4965661367192848881")},
},
inverse: []*big.Int{
bigFromBase10("734653495049373973658254490726798021314063399421879442165"),
bigFromBase10("147946756881789319000765030803803410728"),
bigFromBase10("-147946756881789319005730692170996259609"),
bigFromBase10("1469306990098747947464455738335385361643788813749140841702"),
},
det: new(big.Int).Set(Order),
}
type lattice struct {
vectors [][]*big.Int
inverse []*big.Int
det *big.Int
}
// decompose takes a scalar mod Order as input and finds a short, positive decomposition of it wrt to the lattice basis.
func (l *lattice) decompose(k *big.Int) []*big.Int {
n := len(l.inverse)
// Calculate closest vector in lattice to <k,0,0,...> with Babai's rounding.
c := make([]*big.Int, n)
for i := 0; i < n; i++ {
c[i] = new(big.Int).Mul(k, l.inverse[i])
round(c[i], l.det)
}
// Transform vectors according to c and subtract <k,0,0,...>.
out := make([]*big.Int, n)
temp := new(big.Int)
for i := 0; i < n; i++ {
out[i] = new(big.Int)
for j := 0; j < n; j++ {
temp.Mul(c[j], l.vectors[j][i])
out[i].Add(out[i], temp)
}
out[i].Neg(out[i])
out[i].Add(out[i], l.vectors[0][i]).Add(out[i], l.vectors[0][i])
}
out[0].Add(out[0], k)
return out
}
func (l *lattice) Precompute(add func(i, j uint)) {
n := uint(len(l.vectors))
total := uint(1) << n
for i := uint(0); i < n; i++ {
for j := uint(0); j < total; j++ {
if (j>>i)&1 == 1 {
add(i, j)
}
}
}
}
func (l *lattice) Multi(scalar *big.Int) []uint8 {
decomp := l.decompose(scalar)
maxLen := 0
for _, x := range decomp {
if x.BitLen() > maxLen {
maxLen = x.BitLen()
}
}
out := make([]uint8, maxLen)
for j, x := range decomp {
for i := 0; i < maxLen; i++ {
out[i] += uint8(x.Bit(i)) << uint(j)
}
}
return out
}
// round sets num to num/denom rounded to the nearest integer.
func round(num, denom *big.Int) {
r := new(big.Int)
num.DivMod(num, denom, r)
if r.Cmp(half) == 1 {
num.Add(num, big.NewInt(1))
}
}

View File

@@ -0,0 +1,29 @@
package bn256
import (
"crypto/rand"
"testing"
)
func TestLatticeReduceCurve(t *testing.T) {
k, _ := rand.Int(rand.Reader, Order)
ks := curveLattice.decompose(k)
if ks[0].BitLen() > 130 || ks[1].BitLen() > 130 {
t.Fatal("reduction too large")
} else if ks[0].Sign() < 0 || ks[1].Sign() < 0 {
t.Fatal("reduction must be positive")
}
}
func TestLatticeReduceTarget(t *testing.T) {
k, _ := rand.Int(rand.Reader, Order)
ks := targetLattice.decompose(k)
if ks[0].BitLen() > 66 || ks[1].BitLen() > 66 || ks[2].BitLen() > 66 || ks[3].BitLen() > 66 {
t.Fatal("reduction too large")
} else if ks[0].Sign() < 0 || ks[1].Sign() < 0 || ks[2].Sign() < 0 || ks[3].Sign() < 0 {
t.Fatal("reduction must be positive")
}
}

View File

@@ -0,0 +1,181 @@
#define mul(a0,a1,a2,a3, rb, stack) \
MOVQ a0, AX \
MULQ 0+rb \
MOVQ AX, R8 \
MOVQ DX, R9 \
MOVQ a0, AX \
MULQ 8+rb \
ADDQ AX, R9 \
ADCQ $0, DX \
MOVQ DX, R10 \
MOVQ a0, AX \
MULQ 16+rb \
ADDQ AX, R10 \
ADCQ $0, DX \
MOVQ DX, R11 \
MOVQ a0, AX \
MULQ 24+rb \
ADDQ AX, R11 \
ADCQ $0, DX \
MOVQ DX, R12 \
\
storeBlock(R8,R9,R10,R11, 0+stack) \
MOVQ R12, 32+stack \
\
MOVQ a1, AX \
MULQ 0+rb \
MOVQ AX, R8 \
MOVQ DX, R9 \
MOVQ a1, AX \
MULQ 8+rb \
ADDQ AX, R9 \
ADCQ $0, DX \
MOVQ DX, R10 \
MOVQ a1, AX \
MULQ 16+rb \
ADDQ AX, R10 \
ADCQ $0, DX \
MOVQ DX, R11 \
MOVQ a1, AX \
MULQ 24+rb \
ADDQ AX, R11 \
ADCQ $0, DX \
MOVQ DX, R12 \
\
ADDQ 8+stack, R8 \
ADCQ 16+stack, R9 \
ADCQ 24+stack, R10 \
ADCQ 32+stack, R11 \
ADCQ $0, R12 \
storeBlock(R8,R9,R10,R11, 8+stack) \
MOVQ R12, 40+stack \
\
MOVQ a2, AX \
MULQ 0+rb \
MOVQ AX, R8 \
MOVQ DX, R9 \
MOVQ a2, AX \
MULQ 8+rb \
ADDQ AX, R9 \
ADCQ $0, DX \
MOVQ DX, R10 \
MOVQ a2, AX \
MULQ 16+rb \
ADDQ AX, R10 \
ADCQ $0, DX \
MOVQ DX, R11 \
MOVQ a2, AX \
MULQ 24+rb \
ADDQ AX, R11 \
ADCQ $0, DX \
MOVQ DX, R12 \
\
ADDQ 16+stack, R8 \
ADCQ 24+stack, R9 \
ADCQ 32+stack, R10 \
ADCQ 40+stack, R11 \
ADCQ $0, R12 \
storeBlock(R8,R9,R10,R11, 16+stack) \
MOVQ R12, 48+stack \
\
MOVQ a3, AX \
MULQ 0+rb \
MOVQ AX, R8 \
MOVQ DX, R9 \
MOVQ a3, AX \
MULQ 8+rb \
ADDQ AX, R9 \
ADCQ $0, DX \
MOVQ DX, R10 \
MOVQ a3, AX \
MULQ 16+rb \
ADDQ AX, R10 \
ADCQ $0, DX \
MOVQ DX, R11 \
MOVQ a3, AX \
MULQ 24+rb \
ADDQ AX, R11 \
ADCQ $0, DX \
MOVQ DX, R12 \
\
ADDQ 24+stack, R8 \
ADCQ 32+stack, R9 \
ADCQ 40+stack, R10 \
ADCQ 48+stack, R11 \
ADCQ $0, R12 \
storeBlock(R8,R9,R10,R11, 24+stack) \
MOVQ R12, 56+stack
#define gfpReduce(stack) \
\ // m = (T * N') mod R, store m in R8:R9:R10:R11
MOVQ ·np+0(SB), AX \
MULQ 0+stack \
MOVQ AX, R8 \
MOVQ DX, R9 \
MOVQ ·np+0(SB), AX \
MULQ 8+stack \
ADDQ AX, R9 \
ADCQ $0, DX \
MOVQ DX, R10 \
MOVQ ·np+0(SB), AX \
MULQ 16+stack \
ADDQ AX, R10 \
ADCQ $0, DX \
MOVQ DX, R11 \
MOVQ ·np+0(SB), AX \
MULQ 24+stack \
ADDQ AX, R11 \
\
MOVQ ·np+8(SB), AX \
MULQ 0+stack \
MOVQ AX, R12 \
MOVQ DX, R13 \
MOVQ ·np+8(SB), AX \
MULQ 8+stack \
ADDQ AX, R13 \
ADCQ $0, DX \
MOVQ DX, R14 \
MOVQ ·np+8(SB), AX \
MULQ 16+stack \
ADDQ AX, R14 \
\
ADDQ R12, R9 \
ADCQ R13, R10 \
ADCQ R14, R11 \
\
MOVQ ·np+16(SB), AX \
MULQ 0+stack \
MOVQ AX, R12 \
MOVQ DX, R13 \
MOVQ ·np+16(SB), AX \
MULQ 8+stack \
ADDQ AX, R13 \
\
ADDQ R12, R10 \
ADCQ R13, R11 \
\
MOVQ ·np+24(SB), AX \
MULQ 0+stack \
ADDQ AX, R11 \
\
storeBlock(R8,R9,R10,R11, 64+stack) \
\
\ // m * N
mul(·p2+0(SB),·p2+8(SB),·p2+16(SB),·p2+24(SB), 64+stack, 96+stack) \
\
\ // Add the 512-bit intermediate to m*N
loadBlock(96+stack, R8,R9,R10,R11) \
loadBlock(128+stack, R12,R13,R14,R15) \
\
MOVQ $0, AX \
ADDQ 0+stack, R8 \
ADCQ 8+stack, R9 \
ADCQ 16+stack, R10 \
ADCQ 24+stack, R11 \
ADCQ 32+stack, R12 \
ADCQ 40+stack, R13 \
ADCQ 48+stack, R14 \
ADCQ 56+stack, R15 \
ADCQ $0, AX \
\
gfpCarry(R12,R13,R14,R15,AX, R8,R9,R10,R11,BX)

View File

@@ -0,0 +1,133 @@
#define mul(c0,c1,c2,c3,c4,c5,c6,c7) \
MUL R1, R5, c0 \
UMULH R1, R5, c1 \
MUL R1, R6, R0 \
ADDS R0, c1 \
UMULH R1, R6, c2 \
MUL R1, R7, R0 \
ADCS R0, c2 \
UMULH R1, R7, c3 \
MUL R1, R8, R0 \
ADCS R0, c3 \
UMULH R1, R8, c4 \
ADCS ZR, c4 \
\
MUL R2, R5, R25 \
UMULH R2, R5, R26 \
MUL R2, R6, R0 \
ADDS R0, R26 \
UMULH R2, R6, R27 \
MUL R2, R7, R0 \
ADCS R0, R27 \
UMULH R2, R7, R29 \
MUL R2, R8, R0 \
ADCS R0, R29 \
UMULH R2, R8, c5 \
ADCS ZR, c5 \
ADDS R25, c1 \
ADCS R26, c2 \
ADCS R27, c3 \
ADCS R29, c4 \
ADCS ZR, c5 \
\
MUL R3, R5, R25 \
UMULH R3, R5, R26 \
MUL R3, R6, R0 \
ADDS R0, R26 \
UMULH R3, R6, R27 \
MUL R3, R7, R0 \
ADCS R0, R27 \
UMULH R3, R7, R29 \
MUL R3, R8, R0 \
ADCS R0, R29 \
UMULH R3, R8, c6 \
ADCS ZR, c6 \
ADDS R25, c2 \
ADCS R26, c3 \
ADCS R27, c4 \
ADCS R29, c5 \
ADCS ZR, c6 \
\
MUL R4, R5, R25 \
UMULH R4, R5, R26 \
MUL R4, R6, R0 \
ADDS R0, R26 \
UMULH R4, R6, R27 \
MUL R4, R7, R0 \
ADCS R0, R27 \
UMULH R4, R7, R29 \
MUL R4, R8, R0 \
ADCS R0, R29 \
UMULH R4, R8, c7 \
ADCS ZR, c7 \
ADDS R25, c3 \
ADCS R26, c4 \
ADCS R27, c5 \
ADCS R29, c6 \
ADCS ZR, c7
#define gfpReduce() \
\ // m = (T * N') mod R, store m in R1:R2:R3:R4
MOVD ·np+0(SB), R17 \
MOVD ·np+8(SB), R18 \
MOVD ·np+16(SB), R19 \
MOVD ·np+24(SB), R20 \
\
MUL R9, R17, R1 \
UMULH R9, R17, R2 \
MUL R9, R18, R0 \
ADDS R0, R2 \
UMULH R9, R18, R3 \
MUL R9, R19, R0 \
ADCS R0, R3 \
UMULH R9, R19, R4 \
MUL R9, R20, R0 \
ADCS R0, R4 \
\
MUL R10, R17, R21 \
UMULH R10, R17, R22 \
MUL R10, R18, R0 \
ADDS R0, R22 \
UMULH R10, R18, R23 \
MUL R10, R19, R0 \
ADCS R0, R23 \
ADDS R21, R2 \
ADCS R22, R3 \
ADCS R23, R4 \
\
MUL R11, R17, R21 \
UMULH R11, R17, R22 \
MUL R11, R18, R0 \
ADDS R0, R22 \
ADDS R21, R3 \
ADCS R22, R4 \
\
MUL R12, R17, R21 \
ADDS R21, R4 \
\
\ // m * N
loadModulus(R5,R6,R7,R8) \
mul(R17,R18,R19,R20,R21,R22,R23,R24) \
\
\ // Add the 512-bit intermediate to m*N
MOVD ZR, R25 \
ADDS R9, R17 \
ADCS R10, R18 \
ADCS R11, R19 \
ADCS R12, R20 \
ADCS R13, R21 \
ADCS R14, R22 \
ADCS R15, R23 \
ADCS R16, R24 \
ADCS ZR, R25 \
\
\ // Our output is R21:R22:R23:R24. Reduce mod p if necessary.
SUBS R5, R21, R10 \
SBCS R6, R22, R11 \
SBCS R7, R23, R12 \
SBCS R8, R24, R13 \
\
CSEL CS, R10, R21, R1 \
CSEL CS, R11, R22, R2 \
CSEL CS, R12, R23, R3 \
CSEL CS, R13, R24, R4

View File

@@ -0,0 +1,112 @@
#define mulBMI2(a0,a1,a2,a3, rb) \
MOVQ a0, DX \
MOVQ $0, R13 \
MULXQ 0+rb, R8, R9 \
MULXQ 8+rb, AX, R10 \
ADDQ AX, R9 \
MULXQ 16+rb, AX, R11 \
ADCQ AX, R10 \
MULXQ 24+rb, AX, R12 \
ADCQ AX, R11 \
ADCQ $0, R12 \
ADCQ $0, R13 \
\
MOVQ a1, DX \
MOVQ $0, R14 \
MULXQ 0+rb, AX, BX \
ADDQ AX, R9 \
ADCQ BX, R10 \
MULXQ 16+rb, AX, BX \
ADCQ AX, R11 \
ADCQ BX, R12 \
ADCQ $0, R13 \
MULXQ 8+rb, AX, BX \
ADDQ AX, R10 \
ADCQ BX, R11 \
MULXQ 24+rb, AX, BX \
ADCQ AX, R12 \
ADCQ BX, R13 \
ADCQ $0, R14 \
\
MOVQ a2, DX \
MOVQ $0, R15 \
MULXQ 0+rb, AX, BX \
ADDQ AX, R10 \
ADCQ BX, R11 \
MULXQ 16+rb, AX, BX \
ADCQ AX, R12 \
ADCQ BX, R13 \
ADCQ $0, R14 \
MULXQ 8+rb, AX, BX \
ADDQ AX, R11 \
ADCQ BX, R12 \
MULXQ 24+rb, AX, BX \
ADCQ AX, R13 \
ADCQ BX, R14 \
ADCQ $0, R15 \
\
MOVQ a3, DX \
MULXQ 0+rb, AX, BX \
ADDQ AX, R11 \
ADCQ BX, R12 \
MULXQ 16+rb, AX, BX \
ADCQ AX, R13 \
ADCQ BX, R14 \
ADCQ $0, R15 \
MULXQ 8+rb, AX, BX \
ADDQ AX, R12 \
ADCQ BX, R13 \
MULXQ 24+rb, AX, BX \
ADCQ AX, R14 \
ADCQ BX, R15
#define gfpReduceBMI2() \
\ // m = (T * N') mod R, store m in R8:R9:R10:R11
MOVQ ·np+0(SB), DX \
MULXQ 0(SP), R8, R9 \
MULXQ 8(SP), AX, R10 \
ADDQ AX, R9 \
MULXQ 16(SP), AX, R11 \
ADCQ AX, R10 \
MULXQ 24(SP), AX, BX \
ADCQ AX, R11 \
\
MOVQ ·np+8(SB), DX \
MULXQ 0(SP), AX, BX \
ADDQ AX, R9 \
ADCQ BX, R10 \
MULXQ 16(SP), AX, BX \
ADCQ AX, R11 \
MULXQ 8(SP), AX, BX \
ADDQ AX, R10 \
ADCQ BX, R11 \
\
MOVQ ·np+16(SB), DX \
MULXQ 0(SP), AX, BX \
ADDQ AX, R10 \
ADCQ BX, R11 \
MULXQ 8(SP), AX, BX \
ADDQ AX, R11 \
\
MOVQ ·np+24(SB), DX \
MULXQ 0(SP), AX, BX \
ADDQ AX, R11 \
\
storeBlock(R8,R9,R10,R11, 64(SP)) \
\
\ // m * N
mulBMI2(·p2+0(SB),·p2+8(SB),·p2+16(SB),·p2+24(SB), 64(SP)) \
\
\ // Add the 512-bit intermediate to m*N
MOVQ $0, AX \
ADDQ 0(SP), R8 \
ADCQ 8(SP), R9 \
ADCQ 16(SP), R10 \
ADCQ 24(SP), R11 \
ADCQ 32(SP), R12 \
ADCQ 40(SP), R13 \
ADCQ 48(SP), R14 \
ADCQ 56(SP), R15 \
ADCQ $0, AX \
\
gfpCarry(R12,R13,R14,R15,AX, R8,R9,R10,R11,BX)

View File

@@ -0,0 +1,271 @@
package bn256
func lineFunctionAdd(r, p *twistPoint, q *curvePoint, r2 *gfP2) (a, b, c *gfP2, rOut *twistPoint) {
// See the mixed addition algorithm from "Faster Computation of the
// Tate Pairing", http://arxiv.org/pdf/0904.0854v3.pdf
B := (&gfP2{}).Mul(&p.x, &r.t)
D := (&gfP2{}).Add(&p.y, &r.z)
D.Square(D).Sub(D, r2).Sub(D, &r.t).Mul(D, &r.t)
H := (&gfP2{}).Sub(B, &r.x)
I := (&gfP2{}).Square(H)
E := (&gfP2{}).Add(I, I)
E.Add(E, E)
J := (&gfP2{}).Mul(H, E)
L1 := (&gfP2{}).Sub(D, &r.y)
L1.Sub(L1, &r.y)
V := (&gfP2{}).Mul(&r.x, E)
rOut = &twistPoint{}
rOut.x.Square(L1).Sub(&rOut.x, J).Sub(&rOut.x, V).Sub(&rOut.x, V)
rOut.z.Add(&r.z, H).Square(&rOut.z).Sub(&rOut.z, &r.t).Sub(&rOut.z, I)
t := (&gfP2{}).Sub(V, &rOut.x)
t.Mul(t, L1)
t2 := (&gfP2{}).Mul(&r.y, J)
t2.Add(t2, t2)
rOut.y.Sub(t, t2)
rOut.t.Square(&rOut.z)
t.Add(&p.y, &rOut.z).Square(t).Sub(t, r2).Sub(t, &rOut.t)
t2.Mul(L1, &p.x)
t2.Add(t2, t2)
a = (&gfP2{}).Sub(t2, t)
c = (&gfP2{}).MulScalar(&rOut.z, &q.y)
c.Add(c, c)
b = (&gfP2{}).Neg(L1)
b.MulScalar(b, &q.x).Add(b, b)
return
}
func lineFunctionDouble(r *twistPoint, q *curvePoint) (a, b, c *gfP2, rOut *twistPoint) {
// See the doubling algorithm for a=0 from "Faster Computation of the
// Tate Pairing", http://arxiv.org/pdf/0904.0854v3.pdf
A := (&gfP2{}).Square(&r.x)
B := (&gfP2{}).Square(&r.y)
C := (&gfP2{}).Square(B)
D := (&gfP2{}).Add(&r.x, B)
D.Square(D).Sub(D, A).Sub(D, C).Add(D, D)
E := (&gfP2{}).Add(A, A)
E.Add(E, A)
G := (&gfP2{}).Square(E)
rOut = &twistPoint{}
rOut.x.Sub(G, D).Sub(&rOut.x, D)
rOut.z.Add(&r.y, &r.z).Square(&rOut.z).Sub(&rOut.z, B).Sub(&rOut.z, &r.t)
rOut.y.Sub(D, &rOut.x).Mul(&rOut.y, E)
t := (&gfP2{}).Add(C, C)
t.Add(t, t).Add(t, t)
rOut.y.Sub(&rOut.y, t)
rOut.t.Square(&rOut.z)
t.Mul(E, &r.t).Add(t, t)
b = (&gfP2{}).Neg(t)
b.MulScalar(b, &q.x)
a = (&gfP2{}).Add(&r.x, E)
a.Square(a).Sub(a, A).Sub(a, G)
t.Add(B, B).Add(t, t)
a.Sub(a, t)
c = (&gfP2{}).Mul(&rOut.z, &r.t)
c.Add(c, c).MulScalar(c, &q.y)
return
}
func mulLine(ret *gfP12, a, b, c *gfP2) {
a2 := &gfP6{}
a2.y.Set(a)
a2.z.Set(b)
a2.Mul(a2, &ret.x)
t3 := (&gfP6{}).MulScalar(&ret.y, c)
t := (&gfP2{}).Add(b, c)
t2 := &gfP6{}
t2.y.Set(a)
t2.z.Set(t)
ret.x.Add(&ret.x, &ret.y)
ret.y.Set(t3)
ret.x.Mul(&ret.x, t2).Sub(&ret.x, a2).Sub(&ret.x, &ret.y)
a2.MulTau(a2)
ret.y.Add(&ret.y, a2)
}
// sixuPlus2NAF is 6u+2 in non-adjacent form.
var sixuPlus2NAF = []int8{0, 0, 0, 1, 0, 1, 0, -1, 0, 0, 1, -1, 0, 0, 1, 0,
0, 1, 1, 0, -1, 0, 0, 1, 0, -1, 0, 0, 0, 0, 1, 1,
1, 0, 0, -1, 0, 0, 1, 0, 0, 0, 0, 0, -1, 0, 0, 1,
1, 0, 0, -1, 0, 0, 0, 1, 1, 0, -1, 0, 0, 1, 0, 1, 1}
// miller implements the Miller loop for calculating the Optimal Ate pairing.
// See algorithm 1 from http://cryptojedi.org/papers/dclxvi-20100714.pdf
func miller(q *twistPoint, p *curvePoint) *gfP12 {
ret := (&gfP12{}).SetOne()
aAffine := &twistPoint{}
aAffine.Set(q)
aAffine.MakeAffine()
bAffine := &curvePoint{}
bAffine.Set(p)
bAffine.MakeAffine()
minusA := &twistPoint{}
minusA.Neg(aAffine)
r := &twistPoint{}
r.Set(aAffine)
r2 := (&gfP2{}).Square(&aAffine.y)
for i := len(sixuPlus2NAF) - 1; i > 0; i-- {
a, b, c, newR := lineFunctionDouble(r, bAffine)
if i != len(sixuPlus2NAF)-1 {
ret.Square(ret)
}
mulLine(ret, a, b, c)
r = newR
switch sixuPlus2NAF[i-1] {
case 1:
a, b, c, newR = lineFunctionAdd(r, aAffine, bAffine, r2)
case -1:
a, b, c, newR = lineFunctionAdd(r, minusA, bAffine, r2)
default:
continue
}
mulLine(ret, a, b, c)
r = newR
}
// In order to calculate Q1 we have to convert q from the sextic twist
// to the full GF(p^12) group, apply the Frobenius there, and convert
// back.
//
// The twist isomorphism is (x', y') -> (xω², yω³). If we consider just
// x for a moment, then after applying the Frobenius, we have x̄ω^(2p)
// where x̄ is the conjugate of x. If we are going to apply the inverse
// isomorphism we need a value with a single coefficient of ω² so we
// rewrite this as x̄ω^(2p-2)ω². ξ⁶ = ω and, due to the construction of
// p, 2p-2 is a multiple of six. Therefore we can rewrite as
// x̄ξ^((p-1)/3)ω² and applying the inverse isomorphism eliminates the
// ω².
//
// A similar argument can be made for the y value.
q1 := &twistPoint{}
q1.x.Conjugate(&aAffine.x).Mul(&q1.x, xiToPMinus1Over3)
q1.y.Conjugate(&aAffine.y).Mul(&q1.y, xiToPMinus1Over2)
q1.z.SetOne()
q1.t.SetOne()
// For Q2 we are applying the p² Frobenius. The two conjugations cancel
// out and we are left only with the factors from the isomorphism. In
// the case of x, we end up with a pure number which is why
// xiToPSquaredMinus1Over3 is ∈ GF(p). With y we get a factor of -1. We
// ignore this to end up with -Q2.
minusQ2 := &twistPoint{}
minusQ2.x.MulScalar(&aAffine.x, xiToPSquaredMinus1Over3)
minusQ2.y.Set(&aAffine.y)
minusQ2.z.SetOne()
minusQ2.t.SetOne()
r2.Square(&q1.y)
a, b, c, newR := lineFunctionAdd(r, q1, bAffine, r2)
mulLine(ret, a, b, c)
r = newR
r2.Square(&minusQ2.y)
a, b, c, newR = lineFunctionAdd(r, minusQ2, bAffine, r2)
mulLine(ret, a, b, c)
r = newR
return ret
}
// finalExponentiation computes the (p¹²-1)/Order-th power of an element of
// GF(p¹²) to obtain an element of GT (steps 13-15 of algorithm 1 from
// http://cryptojedi.org/papers/dclxvi-20100714.pdf)
func finalExponentiation(in *gfP12) *gfP12 {
t1 := &gfP12{}
// This is the p^6-Frobenius
t1.x.Neg(&in.x)
t1.y.Set(&in.y)
inv := &gfP12{}
inv.Invert(in)
t1.Mul(t1, inv)
t2 := (&gfP12{}).FrobeniusP2(t1)
t1.Mul(t1, t2)
fp := (&gfP12{}).Frobenius(t1)
fp2 := (&gfP12{}).FrobeniusP2(t1)
fp3 := (&gfP12{}).Frobenius(fp2)
fu := (&gfP12{}).Exp(t1, u)
fu2 := (&gfP12{}).Exp(fu, u)
fu3 := (&gfP12{}).Exp(fu2, u)
y3 := (&gfP12{}).Frobenius(fu)
fu2p := (&gfP12{}).Frobenius(fu2)
fu3p := (&gfP12{}).Frobenius(fu3)
y2 := (&gfP12{}).FrobeniusP2(fu2)
y0 := &gfP12{}
y0.Mul(fp, fp2).Mul(y0, fp3)
y1 := (&gfP12{}).Conjugate(t1)
y5 := (&gfP12{}).Conjugate(fu2)
y3.Conjugate(y3)
y4 := (&gfP12{}).Mul(fu, fu2p)
y4.Conjugate(y4)
y6 := (&gfP12{}).Mul(fu3, fu3p)
y6.Conjugate(y6)
t0 := (&gfP12{}).Square(y6)
t0.Mul(t0, y4).Mul(t0, y5)
t1.Mul(y3, y5).Mul(t1, t0)
t0.Mul(t0, y2)
t1.Square(t1).Mul(t1, t0).Square(t1)
t0.Mul(t1, y1)
t1.Mul(t1, y0)
t0.Square(t0).Mul(t0, t1)
return t0
}
func optimalAte(a *twistPoint, b *curvePoint) *gfP12 {
e := miller(a, b)
ret := finalExponentiation(e)
if a.IsInfinity() || b.IsInfinity() {
ret.SetOne()
}
return ret
}

View File

@@ -0,0 +1,204 @@
package bn256
import (
"math/big"
)
// twistPoint implements the elliptic curve y²=x³+3/ξ over GF(p²). Points are
// kept in Jacobian form and t=z² when valid. The group G₂ is the set of
// n-torsion points of this curve over GF(p²) (where n = Order)
type twistPoint struct {
x, y, z, t gfP2
}
var twistB = &gfP2{
gfP{0x38e7ecccd1dcff67, 0x65f0b37d93ce0d3e, 0xd749d0dd22ac00aa, 0x0141b9ce4a688d4d},
gfP{0x3bf938e377b802a8, 0x020b1b273633535d, 0x26b7edf049755260, 0x2514c6324384a86d},
}
// twistGen is the generator of group G₂.
var twistGen = &twistPoint{
gfP2{
gfP{0xafb4737da84c6140, 0x6043dd5a5802d8c4, 0x09e950fc52a02f86, 0x14fef0833aea7b6b},
gfP{0x8e83b5d102bc2026, 0xdceb1935497b0172, 0xfbb8264797811adf, 0x19573841af96503b},
},
gfP2{
gfP{0x64095b56c71856ee, 0xdc57f922327d3cbb, 0x55f935be33351076, 0x0da4a0e693fd6482},
gfP{0x619dfa9d886be9f6, 0xfe7fd297f59e9b78, 0xff9e1a62231b7dfe, 0x28fd7eebae9e4206},
},
gfP2{*newGFp(0), *newGFp(1)},
gfP2{*newGFp(0), *newGFp(1)},
}
func (c *twistPoint) String() string {
c.MakeAffine()
x, y := gfP2Decode(&c.x), gfP2Decode(&c.y)
return "(" + x.String() + ", " + y.String() + ")"
}
func (c *twistPoint) Set(a *twistPoint) {
c.x.Set(&a.x)
c.y.Set(&a.y)
c.z.Set(&a.z)
c.t.Set(&a.t)
}
// IsOnCurve returns true iff c is on the curve.
func (c *twistPoint) IsOnCurve() bool {
c.MakeAffine()
if c.IsInfinity() {
return true
}
y2, x3 := &gfP2{}, &gfP2{}
y2.Square(&c.y)
x3.Square(&c.x).Mul(x3, &c.x).Add(x3, twistB)
if *y2 != *x3 {
return false
}
cneg := &twistPoint{}
cneg.Mul(c, Order)
return cneg.z.IsZero()
}
func (c *twistPoint) SetInfinity() {
c.x.SetZero()
c.y.SetOne()
c.z.SetZero()
c.t.SetZero()
}
func (c *twistPoint) IsInfinity() bool {
return c.z.IsZero()
}
func (c *twistPoint) Add(a, b *twistPoint) {
// For additional comments, see the same function in curve.go.
if a.IsInfinity() {
c.Set(b)
return
}
if b.IsInfinity() {
c.Set(a)
return
}
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/addition/add-2007-bl.op3
z12 := (&gfP2{}).Square(&a.z)
z22 := (&gfP2{}).Square(&b.z)
u1 := (&gfP2{}).Mul(&a.x, z22)
u2 := (&gfP2{}).Mul(&b.x, z12)
t := (&gfP2{}).Mul(&b.z, z22)
s1 := (&gfP2{}).Mul(&a.y, t)
t.Mul(&a.z, z12)
s2 := (&gfP2{}).Mul(&b.y, t)
h := (&gfP2{}).Sub(u2, u1)
xEqual := h.IsZero()
t.Add(h, h)
i := (&gfP2{}).Square(t)
j := (&gfP2{}).Mul(h, i)
t.Sub(s2, s1)
yEqual := t.IsZero()
if xEqual && yEqual {
c.Double(a)
return
}
r := (&gfP2{}).Add(t, t)
v := (&gfP2{}).Mul(u1, i)
t4 := (&gfP2{}).Square(r)
t.Add(v, v)
t6 := (&gfP2{}).Sub(t4, j)
c.x.Sub(t6, t)
t.Sub(v, &c.x) // t7
t4.Mul(s1, j) // t8
t6.Add(t4, t4) // t9
t4.Mul(r, t) // t10
c.y.Sub(t4, t6)
t.Add(&a.z, &b.z) // t11
t4.Square(t) // t12
t.Sub(t4, z12) // t13
t4.Sub(t, z22) // t14
c.z.Mul(t4, h)
}
func (c *twistPoint) Double(a *twistPoint) {
// See http://hyperelliptic.org/EFD/g1p/auto-code/shortw/jacobian-0/doubling/dbl-2009-l.op3
A := (&gfP2{}).Square(&a.x)
B := (&gfP2{}).Square(&a.y)
C := (&gfP2{}).Square(B)
t := (&gfP2{}).Add(&a.x, B)
t2 := (&gfP2{}).Square(t)
t.Sub(t2, A)
t2.Sub(t, C)
d := (&gfP2{}).Add(t2, t2)
t.Add(A, A)
e := (&gfP2{}).Add(t, A)
f := (&gfP2{}).Square(e)
t.Add(d, d)
c.x.Sub(f, t)
t.Add(C, C)
t2.Add(t, t)
t.Add(t2, t2)
c.y.Sub(d, &c.x)
t2.Mul(e, &c.y)
c.y.Sub(t2, t)
t.Mul(&a.y, &a.z)
c.z.Add(t, t)
}
func (c *twistPoint) Mul(a *twistPoint, scalar *big.Int) {
sum, t := &twistPoint{}, &twistPoint{}
for i := scalar.BitLen(); i >= 0; i-- {
t.Double(sum)
if scalar.Bit(i) != 0 {
sum.Add(t, a)
} else {
sum.Set(t)
}
}
c.Set(sum)
}
func (c *twistPoint) MakeAffine() {
if c.z.IsOne() {
return
} else if c.z.IsZero() {
c.x.SetZero()
c.y.SetOne()
c.t.SetZero()
return
}
zInv := (&gfP2{}).Invert(&c.z)
t := (&gfP2{}).Mul(&c.y, zInv)
zInv2 := (&gfP2{}).Square(zInv)
c.y.Mul(t, zInv2)
t.Mul(&c.x, zInv2)
c.x.Set(t)
c.z.SetOne()
c.t.SetOne()
}
func (c *twistPoint) Neg(a *twistPoint) {
c.x.Set(&a.x)
c.y.Neg(&a.y)
c.z.Set(&a.z)
c.t.SetZero()
}

View File

@@ -18,6 +18,7 @@ package bn256
import (
"crypto/rand"
"errors"
"io"
"math/big"
)
@@ -115,21 +116,25 @@ func (n *G1) Marshal() []byte {
// Unmarshal sets e to the result of converting the output of Marshal back into
// a group element and then returns e.
func (e *G1) Unmarshal(m []byte) (*G1, bool) {
func (e *G1) Unmarshal(m []byte) ([]byte, error) {
// Each value is a 256-bit number.
const numBytes = 256 / 8
if len(m) != 2*numBytes {
return nil, false
return nil, errors.New("bn256: not enough data")
}
// Unmarshal the points and check their caps
if e.p == nil {
e.p = newCurvePoint(nil)
}
e.p.x.SetBytes(m[0*numBytes : 1*numBytes])
if e.p.x.Cmp(P) >= 0 {
return nil, errors.New("bn256: coordinate exceeds modulus")
}
e.p.y.SetBytes(m[1*numBytes : 2*numBytes])
if e.p.y.Cmp(P) >= 0 {
return nil, errors.New("bn256: coordinate exceeds modulus")
}
// Ensure the point is on the curve
if e.p.x.Sign() == 0 && e.p.y.Sign() == 0 {
// This is the point at infinity.
e.p.y.SetInt64(1)
@@ -140,11 +145,10 @@ func (e *G1) Unmarshal(m []byte) (*G1, bool) {
e.p.t.SetInt64(1)
if !e.p.IsOnCurve() {
return nil, false
return nil, errors.New("bn256: malformed point")
}
}
return e, true
return m[2*numBytes:], nil
}
// G2 is an abstract cyclic group. The zero value is suitable for use as the
@@ -233,23 +237,33 @@ func (n *G2) Marshal() []byte {
// Unmarshal sets e to the result of converting the output of Marshal back into
// a group element and then returns e.
func (e *G2) Unmarshal(m []byte) (*G2, bool) {
func (e *G2) Unmarshal(m []byte) ([]byte, error) {
// Each value is a 256-bit number.
const numBytes = 256 / 8
if len(m) != 4*numBytes {
return nil, false
return nil, errors.New("bn256: not enough data")
}
// Unmarshal the points and check their caps
if e.p == nil {
e.p = newTwistPoint(nil)
}
e.p.x.x.SetBytes(m[0*numBytes : 1*numBytes])
if e.p.x.x.Cmp(P) >= 0 {
return nil, errors.New("bn256: coordinate exceeds modulus")
}
e.p.x.y.SetBytes(m[1*numBytes : 2*numBytes])
if e.p.x.y.Cmp(P) >= 0 {
return nil, errors.New("bn256: coordinate exceeds modulus")
}
e.p.y.x.SetBytes(m[2*numBytes : 3*numBytes])
if e.p.y.x.Cmp(P) >= 0 {
return nil, errors.New("bn256: coordinate exceeds modulus")
}
e.p.y.y.SetBytes(m[3*numBytes : 4*numBytes])
if e.p.y.y.Cmp(P) >= 0 {
return nil, errors.New("bn256: coordinate exceeds modulus")
}
// Ensure the point is on the curve
if e.p.x.x.Sign() == 0 &&
e.p.x.y.Sign() == 0 &&
e.p.y.x.Sign() == 0 &&
@@ -263,11 +277,10 @@ func (e *G2) Unmarshal(m []byte) (*G2, bool) {
e.p.t.SetOne()
if !e.p.IsOnCurve() {
return nil, false
return nil, errors.New("bn256: malformed point")
}
}
return e, true
return m[4*numBytes:], nil
}
// GT is an abstract cyclic group. The zero value is suitable for use as the

View File

@@ -219,15 +219,16 @@ func TestBilinearity(t *testing.T) {
func TestG1Marshal(t *testing.T) {
g := new(G1).ScalarBaseMult(new(big.Int).SetInt64(1))
form := g.Marshal()
_, ok := new(G1).Unmarshal(form)
if !ok {
_, err := new(G1).Unmarshal(form)
if err != nil {
t.Fatalf("failed to unmarshal")
}
g.ScalarBaseMult(Order)
form = g.Marshal()
g2, ok := new(G1).Unmarshal(form)
if !ok {
g2 := new(G1)
if _, err = g2.Unmarshal(form); err != nil {
t.Fatalf("failed to unmarshal ∞")
}
if !g2.p.IsInfinity() {
@@ -238,15 +239,15 @@ func TestG1Marshal(t *testing.T) {
func TestG2Marshal(t *testing.T) {
g := new(G2).ScalarBaseMult(new(big.Int).SetInt64(1))
form := g.Marshal()
_, ok := new(G2).Unmarshal(form)
if !ok {
_, err := new(G2).Unmarshal(form)
if err != nil {
t.Fatalf("failed to unmarshal")
}
g.ScalarBaseMult(Order)
form = g.Marshal()
g2, ok := new(G2).Unmarshal(form)
if !ok {
g2 := new(G2)
if _, err = g2.Unmarshal(form); err != nil {
t.Fatalf("failed to unmarshal ∞")
}
if !g2.p.IsInfinity() {
@@ -273,12 +274,18 @@ func TestTripartiteDiffieHellman(t *testing.T) {
b, _ := rand.Int(rand.Reader, Order)
c, _ := rand.Int(rand.Reader, Order)
pa, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(a).Marshal())
qa, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(a).Marshal())
pb, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(b).Marshal())
qb, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(b).Marshal())
pc, _ := new(G1).Unmarshal(new(G1).ScalarBaseMult(c).Marshal())
qc, _ := new(G2).Unmarshal(new(G2).ScalarBaseMult(c).Marshal())
pa := new(G1)
pa.Unmarshal(new(G1).ScalarBaseMult(a).Marshal())
qa := new(G2)
qa.Unmarshal(new(G2).ScalarBaseMult(a).Marshal())
pb := new(G1)
pb.Unmarshal(new(G1).ScalarBaseMult(b).Marshal())
qb := new(G2)
qb.Unmarshal(new(G2).ScalarBaseMult(b).Marshal())
pc := new(G1)
pc.Unmarshal(new(G1).ScalarBaseMult(c).Marshal())
qc := new(G2)
qc.Unmarshal(new(G2).ScalarBaseMult(c).Marshal())
k1 := Pair(pb, qc)
k1.ScalarMult(k1, a)

Some files were not shown because too many files have changed in this diff Show More