diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index f87996cdcb..a08542df25 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -30,11 +30,11 @@ Please make sure your contributions adhere to our coding guidelines: Before you submit a feature request, please check and make sure that it isn't possible through some other means. The JavaScript-enabled console is a powerful feature in the right hands. Please check our -[Wiki page](https://github.com/ethereum/go-ethereum/wiki) for more info +[Geth documentation page](https://geth.ethereum.org/docs/) for more info and help. ## Configuration, dependencies, and tests -Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide) +Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide) for more details on configuring your environment, managing project dependencies and testing procedures. diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE/bug.md similarity index 50% rename from .github/ISSUE_TEMPLATE.md rename to .github/ISSUE_TEMPLATE/bug.md index 59285e456d..c5a3654bde 100644 --- a/.github/ISSUE_TEMPLATE.md +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -1,8 +1,10 @@ -Hi there, - -Please note that this is an issue tracker reserved for bug reports and feature requests. - -For general questions please use [discord](https://discord.gg/nthXNEv) or the Ethereum stack exchange at https://ethereum.stackexchange.com. +--- +name: Report a bug +about: Something with go-ethereum is not working as expected +title: '' +labels: 'type:bug' +assignees: '' +--- #### System information diff --git a/.github/ISSUE_TEMPLATE/feature.md b/.github/ISSUE_TEMPLATE/feature.md new file mode 100644 index 0000000000..aacd885f9e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature.md @@ -0,0 +1,17 @@ +--- +name: Request a feature +about: Report a missing feature - e.g. as a step before submitting a PR +title: '' +labels: 'type:feature' +assignees: '' +--- + +# Rationale + +Why should this feature exist? +What are the use-cases? + +# Implementation + +Do you have ideas regarding the implementation of this feature? +Are you willing to implement this feature? \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 0000000000..8f460ab558 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,9 @@ +--- +name: Ask a question +about: Something is unclear +title: '' +labels: 'type:docs' +assignees: '' +--- + +This should only be used in very rare cases e.g. if you are not 100% sure if something is a bug or asking a question that leads to improving the documentation. For general questions please use [discord](https://discord.gg/nthXNEv) or the Ethereum stack exchange at https://ethereum.stackexchange.com. diff --git a/.travis.yml b/.travis.yml index fd31e3d506..7406f31fe7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,7 @@ jobs: allow_failures: - stage: build os: osx - go: 1.14.x + go: 1.15.x env: - azure-osx - azure-ios @@ -15,8 +15,8 @@ jobs: # This builder only tests code linters on latest version of Go - stage: lint os: linux - dist: xenial - go: 1.15.x + dist: bionic + go: 1.16.x env: - lint git: @@ -28,8 +28,8 @@ jobs: - stage: build if: type = push os: linux - dist: xenial - go: 1.15.x + dist: bionic + go: 1.16.x env: - ubuntu-ppa - GO111MODULE=on @@ -52,9 +52,9 @@ jobs: - stage: build if: type = push os: linux - dist: xenial + dist: bionic sudo: required - go: 1.15.x + go: 1.16.x env: - azure-linux - GO111MODULE=on @@ -67,31 +67,31 @@ jobs: script: # Build for the primary platforms that Trusty can manage - go run build/ci.go install -dlgo - - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go install -dlgo -arch 386 - - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -arch 386 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # Switch over GCC to cross compilation (breaks 386, hence why do it here only) - sudo -E apt-get -yq --no-install-suggests --no-install-recommends --force-yes install gcc-arm-linux-gnueabi libc6-dev-armel-cross gcc-arm-linux-gnueabihf libc6-dev-armhf-cross gcc-aarch64-linux-gnu libc6-dev-arm64-cross - sudo ln -s /usr/include/asm-generic /usr/include/asm - GOARM=5 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabi-gcc - - GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - GOARM=5 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - GOARM=6 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabi-gcc - - GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - GOARM=6 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - GOARM=7 go run build/ci.go install -dlgo -arch arm -cc arm-linux-gnueabihf-gcc - - GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - GOARM=7 go run build/ci.go archive -arch arm -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go install -dlgo -arch arm64 -cc aarch64-linux-gnu-gcc - - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -arch arm64 -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # This builder does the Linux Azure MIPS xgo uploads - stage: build if: type = push os: linux - dist: xenial + dist: bionic services: - docker - go: 1.15.x + go: 1.16.x env: - azure-linux-mips - GO111MODULE=on @@ -100,38 +100,29 @@ jobs: script: - go run build/ci.go xgo --alltools -- --targets=linux/mips --ldflags '-extldflags "-static"' -v - for bin in build/bin/*-linux-mips; do mv -f "${bin}" "${bin/-linux-mips/}"; done - - go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -arch mips -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go xgo --alltools -- --targets=linux/mipsle --ldflags '-extldflags "-static"' -v - for bin in build/bin/*-linux-mipsle; do mv -f "${bin}" "${bin/-linux-mipsle/}"; done - - go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -arch mipsle -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go xgo --alltools -- --targets=linux/mips64 --ldflags '-extldflags "-static"' -v - for bin in build/bin/*-linux-mips64; do mv -f "${bin}" "${bin/-linux-mips64/}"; done - - go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -arch mips64 -type tar -signer LINUX_SIGNING_KEY signify SIGNIFY_KEY -upload gethstore/builds - go run build/ci.go xgo --alltools -- --targets=linux/mips64le --ldflags '-extldflags "-static"' -v - for bin in build/bin/*-linux-mips64le; do mv -f "${bin}" "${bin/-linux-mips64le/}"; done - - go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -arch mips64le -type tar -signer LINUX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # This builder does the Android Maven and Azure uploads - stage: build if: type = push os: linux - dist: xenial + dist: bionic addons: apt: packages: - - oracle-java8-installer - - oracle-java8-set-default - language: android - android: - components: - - platform-tools - - tools - - android-15 - - android-19 - - android-24 + - openjdk-8-jdk env: - azure-android - maven-android @@ -139,25 +130,33 @@ jobs: git: submodules: false # avoid cloning ethereum/tests before_install: - - curl https://dl.google.com/go/go1.15.5.linux-amd64.tar.gz | tar -xz + # Install Android and it's dependencies manually, Travis is stale + - export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64 + - curl https://dl.google.com/android/repository/commandlinetools-linux-6858069_latest.zip -o android.zip + - unzip -q android.zip -d $HOME/sdk && rm android.zip + - mv $HOME/sdk/cmdline-tools $HOME/sdk/latest && mkdir $HOME/sdk/cmdline-tools && mv $HOME/sdk/latest $HOME/sdk/cmdline-tools + - export PATH=$PATH:$HOME/sdk/cmdline-tools/latest/bin + - export ANDROID_HOME=$HOME/sdk + + - yes | sdkmanager --licenses >/dev/null + - sdkmanager "platform-tools" "platforms;android-15" "platforms;android-19" "platforms;android-24" "ndk-bundle" + + # Install Go to allow building with + - curl https://dl.google.com/go/go1.16.linux-amd64.tar.gz | tar -xz - export PATH=`pwd`/go/bin:$PATH - export GOROOT=`pwd`/go - export GOPATH=$HOME/go script: # Build the Android archive and upload it to Maven Central and Azure - - curl https://dl.google.com/android/repository/android-ndk-r19b-linux-x86_64.zip -o android-ndk-r19b.zip - - unzip -q android-ndk-r19b.zip && rm android-ndk-r19b.zip - - mv android-ndk-r19b $ANDROID_HOME/ndk-bundle - - mkdir -p $GOPATH/src/github.com/ethereum - ln -s `pwd` $GOPATH/src/github.com/ethereum/go-ethereum - - go run build/ci.go aar -signer ANDROID_SIGNING_KEY -deploy https://oss.sonatype.org -upload gethstore/builds + - go run build/ci.go aar -signer ANDROID_SIGNING_KEY -signify SIGNIFY_KEY -deploy https://oss.sonatype.org -upload gethstore/builds # This builder does the OSX Azure, iOS CocoaPods and iOS Azure uploads - stage: build if: type = push os: osx - go: 1.15.x + go: 1.16.x env: - azure-osx - azure-ios @@ -167,7 +166,7 @@ jobs: submodules: false # avoid cloning ethereum/tests script: - go run build/ci.go install -dlgo - - go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -upload gethstore/builds + - go run build/ci.go archive -type tar -signer OSX_SIGNING_KEY -signify SIGNIFY_KEY -upload gethstore/builds # Build the iOS framework and upload it to CocoaPods and Azure - gem uninstall cocoapods -a -x @@ -182,14 +181,14 @@ jobs: # Workaround for https://github.com/golang/go/issues/23749 - export CGO_CFLAGS_ALLOW='-fmodules|-fblocks|-fobjc-arc' - - go run build/ci.go xcode -signer IOS_SIGNING_KEY -deploy trunk -upload gethstore/builds + - go run build/ci.go xcode -signer IOS_SIGNING_KEY -signify SIGNIFY_KEY -deploy trunk -upload gethstore/builds # These builders run the tests - stage: build os: linux arch: amd64 - dist: xenial - go: 1.15.x + dist: bionic + go: 1.16.x env: - GO111MODULE=on script: @@ -199,8 +198,8 @@ jobs: if: type = pull_request os: linux arch: arm64 - dist: xenial - go: 1.15.x + dist: bionic + go: 1.16.x env: - GO111MODULE=on script: @@ -208,8 +207,8 @@ jobs: - stage: build os: linux - dist: xenial - go: 1.14.x + dist: bionic + go: 1.15.x env: - GO111MODULE=on script: @@ -219,8 +218,8 @@ jobs: - stage: build if: type = cron os: linux - dist: xenial - go: 1.15.x + dist: bionic + go: 1.16.x env: - azure-purge - GO111MODULE=on diff --git a/Dockerfile b/Dockerfile index d86b776611..6e0dea11b8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build Geth in a stock Go builder container -FROM golang:1.15-alpine as builder +FROM golang:1.16-alpine as builder RUN apk add --no-cache make gcc musl-dev linux-headers git diff --git a/Dockerfile.alltools b/Dockerfile.alltools index 715213c5de..483afad8c3 100644 --- a/Dockerfile.alltools +++ b/Dockerfile.alltools @@ -1,5 +1,5 @@ # Build Geth in a stock Go builder container -FROM golang:1.15-alpine as builder +FROM golang:1.16-alpine as builder RUN apk add --no-cache make gcc musl-dev linux-headers git diff --git a/README.md b/README.md index ddb885dfdc..4a083d117a 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ Official Golang implementation of the Ethereum protocol. https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 )](https://pkg.go.dev/github.com/ethereum/go-ethereum?tab=doc) [![Go Report Card](https://goreportcard.com/badge/github.com/ethereum/go-ethereum)](https://goreportcard.com/report/github.com/ethereum/go-ethereum) -[![Travis](https://travis-ci.org/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.org/ethereum/go-ethereum) +[![Travis](https://travis-ci.com/ethereum/go-ethereum.svg?branch=master)](https://travis-ci.com/ethereum/go-ethereum) [![Discord](https://img.shields.io/badge/discord-join%20chat-blue.svg)](https://discord.gg/nthXNEv) Automated builds are available for stable releases and the unstable master branch. Binary @@ -14,7 +14,7 @@ archives are published at https://geth.ethereum.org/downloads/. ## Building the source -For prerequisites and detailed build instructions please read the [Installation Instructions](https://github.com/ethereum/go-ethereum/wiki/Building-Ethereum) on the wiki. +For prerequisites and detailed build instructions please read the [Installation Instructions](https://geth.ethereum.org/docs/install-and-build/installing-geth). Building `geth` requires both a Go (version 1.13 or later) and a C compiler. You can install them using your favourite package manager. Once the dependencies are installed, run @@ -36,18 +36,18 @@ directory. | Command | Description | | :-----------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options) for command line options. | -| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://github.com/ethereum/go-ethereum/wiki/Native-DApps:-Go-bindings-to-Ethereum-contracts) wiki page for details. | +| **`geth`** | Our main Ethereum CLI client. It is the entry point into the Ethereum network (main-, test- or private net), capable of running as a full node (default), archive node (retaining all historical state) or a light node (retrieving data live). It can be used by other processes as a gateway into the Ethereum network via JSON RPC endpoints exposed on top of HTTP, WebSocket and/or IPC transports. `geth --help` and the [CLI page](https://geth.ethereum.org/docs/interface/command-line-options) for command line options. | +| `abigen` | Source code generator to convert Ethereum contract definitions into easy to use, compile-time type-safe Go packages. It operates on plain [Ethereum contract ABIs](https://docs.soliditylang.org/en/develop/abi-spec.html) with expanded functionality if the contract bytecode is also available. However, it also accepts Solidity source files, making development much more streamlined. Please see our [Native DApps](https://geth.ethereum.org/docs/dapp/native-bindings) page for details. | | `bootnode` | Stripped down version of our Ethereum client implementation that only takes part in the network node discovery protocol, but does not run any of the higher level application protocols. It can be used as a lightweight bootstrap node to aid in finding peers in private networks. | | `evm` | Developer utility version of the EVM (Ethereum Virtual Machine) that is capable of running bytecode snippets within a configurable environment and execution mode. Its purpose is to allow isolated, fine-grained debugging of EVM opcodes (e.g. `evm --code 60ff60ff --debug run`). | -| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://github.com/ethereum/wiki/wiki/JSON-RPC) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. | -| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://github.com/ethereum/wiki/wiki/RLP)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | +| `gethrpctest` | Developer utility tool to support our [ethereum/rpc-test](https://github.com/ethereum/rpc-tests) test suite which validates baseline conformity to the [Ethereum JSON RPC](https://eth.wiki/json-rpc/API) specs. Please see the [test suite's readme](https://github.com/ethereum/rpc-tests/blob/master/README.md) for details. | +| `rlpdump` | Developer utility tool to convert binary RLP ([Recursive Length Prefix](https://eth.wiki/en/fundamentals/rlp)) dumps (data encoding used by the Ethereum protocol both network as well as consensus wise) to user-friendlier hierarchical representation (e.g. `rlpdump --hex CE0183FFFFFFC4C304050583616263`). | | `puppeth` | a CLI wizard that aids in creating a new Ethereum network. | ## Running `geth` Going through all the possible command line flags is out of scope here (please consult our -[CLI Wiki page](https://github.com/ethereum/go-ethereum/wiki/Command-Line-Options)), +[CLI Wiki page](https://geth.ethereum.org/docs/interface/command-line-options)), but we've enumerated a few common parameter combos to get you up to speed quickly on how you can run your own `geth` instance. @@ -66,9 +66,9 @@ This command will: * Start `geth` in fast sync mode (default, can be changed with the `--syncmode` flag), causing it to download more data in exchange for avoiding processing the entire history of the Ethereum network, which is very CPU intensive. - * Start up `geth`'s built-in interactive [JavaScript console](https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console), - (via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://github.com/ethereum/wiki/wiki/JavaScript-API) - as well as `geth`'s own [management APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs). + * Start up `geth`'s built-in interactive [JavaScript console](https://geth.ethereum.org/docs/interface/javascript-console), + (via the trailing `console` subcommand) through which you can invoke all official [`web3` methods](https://web3js.readthedocs.io/en/) + as well as `geth`'s own [management APIs](https://geth.ethereum.org/docs/rpc/server). This tool is optional and if you leave it out you can always attach to an already running `geth` instance with `geth attach`. @@ -170,8 +170,8 @@ accessible from the outside. As a developer, sooner rather than later you'll want to start interacting with `geth` and the Ethereum network via your own programs and not manually through the console. To aid -this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://github.com/ethereum/wiki/wiki/JSON-RPC) -and [`geth` specific APIs](https://github.com/ethereum/go-ethereum/wiki/Management-APIs)). +this, `geth` has built-in support for a JSON-RPC based APIs ([standard APIs](https://eth.wiki/json-rpc/API) +and [`geth` specific APIs](https://geth.ethereum.org/docs/rpc/server)). These can be exposed via HTTP, WebSockets and IPC (UNIX sockets on UNIX based platforms, and named pipes on Windows). @@ -277,7 +277,7 @@ $ bootnode --genkey=boot.key $ bootnode --nodekey=boot.key ``` -With the bootnode online, it will display an [`enode` URL](https://github.com/ethereum/wiki/wiki/enode-url-format) +With the bootnode online, it will display an [`enode` URL](https://eth.wiki/en/fundamentals/enode-url-format) that other nodes can use to connect to it and exchange peer information. Make sure to replace the displayed IP address information (most probably `[::]`) with your externally accessible IP to get the actual `enode` URL. @@ -314,13 +314,13 @@ ones either). To start a `geth` instance for mining, run it with all your usual by: ```shell -$ geth --mine --miner.threads=1 --etherbase=0x0000000000000000000000000000000000000000 +$ geth --mine --miner.threads=1 --miner.etherbase=0x0000000000000000000000000000000000000000 ``` Which will start mining blocks and transactions on a single CPU thread, crediting all -proceedings to the account specified by `--etherbase`. You can further tune the mining -by changing the default gas limit blocks converge to (`--targetgaslimit`) and the price -transactions are accepted at (`--gasprice`). +proceedings to the account specified by `--miner.etherbase`. You can further tune the mining +by changing the default gas limit blocks converge to (`--miner.targetgaslimit`) and the price +transactions are accepted at (`--miner.gasprice`). ## Contribution @@ -344,7 +344,7 @@ Please make sure your contributions adhere to our coding guidelines: * Commit messages should be prefixed with the package(s) they modify. * E.g. "eth, rpc: make trace configs optional" -Please see the [Developers' Guide](https://github.com/ethereum/go-ethereum/wiki/Developers'-Guide) +Please see the [Developers' Guide](https://geth.ethereum.org/docs/developers/devguide) for more details on configuring your environment, managing project dependencies, and testing procedures. diff --git a/SECURITY.md b/SECURITY.md index bc54ede42f..bdce7b8d2a 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,31 +2,29 @@ ## Supported Versions -Please see Releases. We recommend to use the most recent released version. +Please see [Releases](https://github.com/ethereum/go-ethereum/releases). We recommend using the [most recently released version](https://github.com/ethereum/go-ethereum/releases/latest). ## Audit reports Audit reports are published in the `docs` folder: https://github.com/ethereum/go-ethereum/tree/master/docs/audits - | Scope | Date | Report Link | | ------- | ------- | ----------- | | `geth` | 20170425 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2017-04-25_Geth-audit_Truesec.pdf) | | `clef` | 20180914 | [pdf](https://github.com/ethereum/go-ethereum/blob/master/docs/audits/2018-09-14_Clef-audit_NCC.pdf) | - - ## Reporting a Vulnerability **Please do not file a public ticket** mentioning the vulnerability. -To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. +To find out how to disclose a vulnerability in Ethereum visit [https://bounty.ethereum.org](https://bounty.ethereum.org) or email bounty@ethereum.org. Please read the [disclosure page](https://github.com/ethereum/go-ethereum/security/advisories?state=published) for more information about publically disclosed security vulnerabilities. + +Use the built-in `geth version-check` feature to check whether the software is affected by any known vulnerability. This command will fetch the latest [`vulnerabilities.json`](https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json) file which contains known security vulnerabilities concerning `geth`, and cross-check the data against its own version number. The following key may be used to communicate sensitive information to developers. Fingerprint: `AE96 ED96 9E47 9B00 84F3 E17F E88D 3334 FA5F 6A0A` - ``` -----BEGIN PGP PUBLIC KEY BLOCK----- Version: GnuPG v1 diff --git a/accounts/abi/abi_test.go b/accounts/abi/abi_test.go index ad8acdf522..a022ec5f9d 100644 --- a/accounts/abi/abi_test.go +++ b/accounts/abi/abi_test.go @@ -43,6 +43,7 @@ const jsondata = ` { "type" : "function", "name" : "uint64[2]", "inputs" : [ { "name" : "inputs", "type" : "uint64[2]" } ] }, { "type" : "function", "name" : "uint64[]", "inputs" : [ { "name" : "inputs", "type" : "uint64[]" } ] }, { "type" : "function", "name" : "int8", "inputs" : [ { "name" : "inputs", "type" : "int8" } ] }, + { "type" : "function", "name" : "bytes32", "inputs" : [ { "name" : "inputs", "type" : "bytes32" } ] }, { "type" : "function", "name" : "foo", "inputs" : [ { "name" : "inputs", "type" : "uint32" } ] }, { "type" : "function", "name" : "bar", "inputs" : [ { "name" : "inputs", "type" : "uint32" }, { "name" : "string", "type" : "uint16" } ] }, { "type" : "function", "name" : "slice", "inputs" : [ { "name" : "inputs", "type" : "uint32[2]" } ] }, @@ -68,6 +69,7 @@ var ( String, _ = NewType("string", "", nil) Bool, _ = NewType("bool", "", nil) Bytes, _ = NewType("bytes", "", nil) + Bytes32, _ = NewType("bytes32", "", nil) Address, _ = NewType("address", "", nil) Uint64Arr, _ = NewType("uint64[]", "", nil) AddressArr, _ = NewType("address[]", "", nil) @@ -98,6 +100,7 @@ var methods = map[string]Method{ "uint64[]": NewMethod("uint64[]", "uint64[]", Function, "", false, false, []Argument{{"inputs", Uint64Arr, false}}, nil), "uint64[2]": NewMethod("uint64[2]", "uint64[2]", Function, "", false, false, []Argument{{"inputs", Uint64Arr2, false}}, nil), "int8": NewMethod("int8", "int8", Function, "", false, false, []Argument{{"inputs", Int8, false}}, nil), + "bytes32": NewMethod("bytes32", "bytes32", Function, "", false, false, []Argument{{"inputs", Bytes32, false}}, nil), "foo": NewMethod("foo", "foo", Function, "", false, false, []Argument{{"inputs", Uint32, false}}, nil), "bar": NewMethod("bar", "bar", Function, "", false, false, []Argument{{"inputs", Uint32, false}, {"string", Uint16, false}}, nil), "slice": NewMethod("slice", "slice", Function, "", false, false, []Argument{{"inputs", Uint32Arr2, false}}, nil), diff --git a/accounts/abi/bind/auth.go b/accounts/abi/bind/auth.go index c891b0a3e9..b8065e8488 100644 --- a/accounts/abi/bind/auth.go +++ b/accounts/abi/bind/auth.go @@ -21,6 +21,7 @@ import ( "errors" "io" "io/ioutil" + "math/big" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/accounts/external" @@ -28,11 +29,21 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" ) +// ErrNoChainID is returned whenever the user failed to specify a chain id. +var ErrNoChainID = errors.New("no chain id specified") + +// ErrNotAuthorized is returned when an account is not properly unlocked. +var ErrNotAuthorized = errors.New("not authorized to sign this account") + // NewTransactor is a utility method to easily create a transaction signer from // an encrypted json key stream and the associated passphrase. +// +// Deprecated: Use NewTransactorWithChainID instead. func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) { + log.Warn("WARNING: NewTransactor has been deprecated in favour of NewTransactorWithChainID") json, err := ioutil.ReadAll(keyin) if err != nil { return nil, err @@ -45,13 +56,17 @@ func NewTransactor(keyin io.Reader, passphrase string) (*TransactOpts, error) { } // NewKeyStoreTransactor is a utility method to easily create a transaction signer from -// a decrypted key from a keystore. +// an decrypted key from a keystore. +// +// Deprecated: Use NewKeyStoreTransactorWithChainID instead. func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account) (*TransactOpts, error) { + log.Warn("WARNING: NewKeyStoreTransactor has been deprecated in favour of NewTransactorWithChainID") + signer := types.HomesteadSigner{} return &TransactOpts{ From: account.Address, - Signer: func(signer types.Signer, address common.Address, tx *types.Transaction) (*types.Transaction, error) { + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { if address != account.Address { - return nil, errors.New("not authorized to sign this account") + return nil, ErrNotAuthorized } signature, err := keystore.SignHash(account, signer.Hash(tx).Bytes()) if err != nil { @@ -64,13 +79,17 @@ func NewKeyStoreTransactor(keystore *keystore.KeyStore, account accounts.Account // NewKeyedTransactor is a utility method to easily create a transaction signer // from a single private key. +// +// Deprecated: Use NewKeyedTransactorWithChainID instead. func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts { + log.Warn("WARNING: NewKeyedTransactor has been deprecated in favour of NewKeyedTransactorWithChainID") keyAddr := crypto.PubkeyToAddress(key.PublicKey) + signer := types.HomesteadSigner{} return &TransactOpts{ From: keyAddr, - Signer: func(signer types.Signer, address common.Address, tx *types.Transaction) (*types.Transaction, error) { + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { if address != keyAddr { - return nil, errors.New("not authorized to sign this account") + return nil, ErrNotAuthorized } signature, err := crypto.Sign(signer.Hash(tx).Bytes(), key) if err != nil { @@ -81,14 +100,73 @@ func NewKeyedTransactor(key *ecdsa.PrivateKey) *TransactOpts { } } +// NewTransactorWithChainID is a utility method to easily create a transaction signer from +// an encrypted json key stream and the associated passphrase. +func NewTransactorWithChainID(keyin io.Reader, passphrase string, chainID *big.Int) (*TransactOpts, error) { + json, err := ioutil.ReadAll(keyin) + if err != nil { + return nil, err + } + key, err := keystore.DecryptKey(json, passphrase) + if err != nil { + return nil, err + } + return NewKeyedTransactorWithChainID(key.PrivateKey, chainID) +} + +// NewKeyStoreTransactorWithChainID is a utility method to easily create a transaction signer from +// an decrypted key from a keystore. +func NewKeyStoreTransactorWithChainID(keystore *keystore.KeyStore, account accounts.Account, chainID *big.Int) (*TransactOpts, error) { + if chainID == nil { + return nil, ErrNoChainID + } + signer := types.LatestSignerForChainID(chainID) + return &TransactOpts{ + From: account.Address, + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { + if address != account.Address { + return nil, ErrNotAuthorized + } + signature, err := keystore.SignHash(account, signer.Hash(tx).Bytes()) + if err != nil { + return nil, err + } + return tx.WithSignature(signer, signature) + }, + }, nil +} + +// NewKeyedTransactorWithChainID is a utility method to easily create a transaction signer +// from a single private key. +func NewKeyedTransactorWithChainID(key *ecdsa.PrivateKey, chainID *big.Int) (*TransactOpts, error) { + keyAddr := crypto.PubkeyToAddress(key.PublicKey) + if chainID == nil { + return nil, ErrNoChainID + } + signer := types.LatestSignerForChainID(chainID) + return &TransactOpts{ + From: keyAddr, + Signer: func(address common.Address, tx *types.Transaction) (*types.Transaction, error) { + if address != keyAddr { + return nil, ErrNotAuthorized + } + signature, err := crypto.Sign(signer.Hash(tx).Bytes(), key) + if err != nil { + return nil, err + } + return tx.WithSignature(signer, signature) + }, + }, nil +} + // NewClefTransactor is a utility method to easily create a transaction signer // with a clef backend. func NewClefTransactor(clef *external.ExternalSigner, account accounts.Account) *TransactOpts { return &TransactOpts{ From: account.Address, - Signer: func(signer types.Signer, address common.Address, transaction *types.Transaction) (*types.Transaction, error) { + Signer: func(address common.Address, transaction *types.Transaction) (*types.Transaction, error) { if address != account.Address { - return nil, errors.New("not authorized to sign this account") + return nil, ErrNotAuthorized } return clef.SignTx(account, transaction, nil) // Clef enforces its own chain id }, diff --git a/accounts/abi/bind/backends/simulated.go b/accounts/abi/bind/backends/simulated.go index c7efca440b..d6d525eae1 100644 --- a/accounts/abi/bind/backends/simulated.go +++ b/accounts/abi/bind/backends/simulated.go @@ -74,6 +74,7 @@ type SimulatedBackend struct { // NewSimulatedBackendWithDatabase creates a new binding backend based on the given database // and uses a simulated blockchain for testing purposes. +// A simulated backend always uses chainID 1337. func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { genesis := core.Genesis{Config: params.AllEthashProtocolChanges, GasLimit: gasLimit, Alloc: alloc} genesis.MustCommit(database) @@ -91,6 +92,7 @@ func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.Genesis // NewSimulatedBackend creates a new binding backend using a simulated blockchain // for testing purposes. +// A simulated backend always uses chainID 1337. func NewSimulatedBackend(alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { return NewSimulatedBackendWithDatabase(rawdb.NewMemoryDatabase(), alloc, gasLimit) } @@ -123,10 +125,9 @@ func (b *SimulatedBackend) Rollback() { func (b *SimulatedBackend) rollback() { blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(int, *core.BlockGen) {}) - stateDB, _ := b.blockchain.State() b.pendingBlock = blocks[0] - b.pendingState, _ = state.New(b.pendingBlock.Root(), stateDB.Database(), nil) + b.pendingState, _ = state.New(b.pendingBlock.Root(), b.blockchain.StateCache(), nil) } // stateByBlockNumber retrieves a state by a given blocknumber. @@ -479,7 +480,7 @@ func (b *SimulatedBackend) EstimateGas(ctx context.Context, call ethereum.CallMs b.pendingState.RevertToSnapshot(snapshot) if err != nil { - if err == core.ErrIntrinsicGas { + if errors.Is(err, core.ErrIntrinsicGas) { return true, nil, nil // Special case, raise gas limit } return true, nil, err // Bail out @@ -542,10 +543,11 @@ func (b *SimulatedBackend) callContract(ctx context.Context, call ethereum.CallM // Execute the call. msg := callMsg{call} - evmContext := core.NewEVMContext(msg, block.Header(), b.blockchain, nil) + txContext := core.NewEVMTxContext(msg) + evmContext := core.NewEVMBlockContext(block.Header(), b.blockchain, nil) // Create a new environment which holds all relevant information // about the transaction and calling mechanisms. - vmEnv := vm.NewEVM(evmContext, stateDB, b.config, vm.Config{}) + vmEnv := vm.NewEVM(evmContext, txContext, stateDB, b.config, vm.Config{}) gasPool := new(core.GasPool).AddGas(math.MaxUint64) return core.NewStateTransition(vmEnv, msg, gasPool).TransitionDb() @@ -557,7 +559,10 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa b.mu.Lock() defer b.mu.Unlock() - sender, err := types.Sender(types.NewEIP155Signer(b.config.ChainID), tx) + // Check transaction validity. + block := b.blockchain.CurrentBlock() + signer := types.MakeSigner(b.blockchain.Config(), block.Number()) + sender, err := types.Sender(signer, tx) if err != nil { panic(fmt.Errorf("invalid transaction: %v", err)) } @@ -566,7 +571,8 @@ func (b *SimulatedBackend) SendTransaction(ctx context.Context, tx *types.Transa panic(fmt.Errorf("invalid transaction nonce: got %d, want %d", tx.Nonce(), nonce)) } - blocks, _ := core.GenerateChain(b.config, b.blockchain.CurrentBlock(), ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { + // Include tx in chain. + blocks, _ := core.GenerateChain(b.config, block, ethash.NewFaker(), b.database, 1, func(number int, block *core.BlockGen) { for _, tx := range b.pendingBlock.Transactions() { block.AddTxWithChain(b.blockchain, tx) } @@ -705,14 +711,15 @@ type callMsg struct { ethereum.CallMsg } -func (m callMsg) From() common.Address { return m.CallMsg.From } -func (m callMsg) Nonce() uint64 { return 0 } -func (m callMsg) CheckNonce() bool { return false } -func (m callMsg) To() *common.Address { return m.CallMsg.To } -func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice } -func (m callMsg) Gas() uint64 { return m.CallMsg.Gas } -func (m callMsg) Value() *big.Int { return m.CallMsg.Value } -func (m callMsg) Data() []byte { return m.CallMsg.Data } +func (m callMsg) From() common.Address { return m.CallMsg.From } +func (m callMsg) Nonce() uint64 { return 0 } +func (m callMsg) CheckNonce() bool { return false } +func (m callMsg) To() *common.Address { return m.CallMsg.To } +func (m callMsg) GasPrice() *big.Int { return m.CallMsg.GasPrice } +func (m callMsg) Gas() uint64 { return m.CallMsg.Gas } +func (m callMsg) Value() *big.Int { return m.CallMsg.Value } +func (m callMsg) Data() []byte { return m.CallMsg.Data } +func (m callMsg) AccessList() types.AccessList { return m.CallMsg.AccessList } // filterBackend implements filters.Backend to support filtering for logs without // taking bloom-bits acceleration structures into account. diff --git a/accounts/abi/bind/backends/simulated_test.go b/accounts/abi/bind/backends/simulated_test.go index e2597cca01..64ddf8bb2c 100644 --- a/accounts/abi/bind/backends/simulated_test.go +++ b/accounts/abi/bind/backends/simulated_test.go @@ -39,7 +39,7 @@ import ( func TestSimulatedBackend(t *testing.T) { var gasLimit uint64 = 8000029 key, _ := crypto.GenerateKey() // nolint: gosec - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) genAlloc := make(core.GenesisAlloc) genAlloc[auth.From] = core.GenesisAccount{Balance: big.NewInt(9223372036854775807)} @@ -411,7 +411,7 @@ func TestSimulatedBackend_EstimateGas(t *testing.T) { key, _ := crypto.GenerateKey() addr := crypto.PubkeyToAddress(key.PublicKey) - opts := bind.NewKeyedTransactor(key) + opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(params.Ether)}}, 10000000) defer sim.Close() @@ -888,7 +888,7 @@ func TestSimulatedBackend_PendingCodeAt(t *testing.T) { if err != nil { t.Errorf("could not get code at test addr: %v", err) } - auth := bind.NewKeyedTransactor(testKey) + auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim) if err != nil { t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract) @@ -924,7 +924,7 @@ func TestSimulatedBackend_CodeAt(t *testing.T) { if err != nil { t.Errorf("could not get code at test addr: %v", err) } - auth := bind.NewKeyedTransactor(testKey) + auth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) contractAddr, tx, contract, err := bind.DeployContract(auth, parsed, common.FromHex(abiBin), sim) if err != nil { t.Errorf("could not deploy contract: %v tx: %v contract: %v", err, tx, contract) @@ -956,7 +956,7 @@ func TestSimulatedBackend_PendingAndCallContract(t *testing.T) { if err != nil { t.Errorf("could not get code at test addr: %v", err) } - contractAuth := bind.NewKeyedTransactor(testKey) + contractAuth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) addr, _, _, err := bind.DeployContract(contractAuth, parsed, common.FromHex(abiBin), sim) if err != nil { t.Errorf("could not deploy contract: %v", err) @@ -1043,7 +1043,7 @@ func TestSimulatedBackend_CallContractRevert(t *testing.T) { if err != nil { t.Errorf("could not get code at test addr: %v", err) } - contractAuth := bind.NewKeyedTransactor(testKey) + contractAuth, _ := bind.NewKeyedTransactorWithChainID(testKey, big.NewInt(1337)) addr, _, _, err := bind.DeployContract(contractAuth, parsed, common.FromHex(reverterBin), sim) if err != nil { t.Errorf("could not deploy contract: %v", err) diff --git a/accounts/abi/bind/base.go b/accounts/abi/bind/base.go index 9e6d898eaf..f5a6fe22fc 100644 --- a/accounts/abi/bind/base.go +++ b/accounts/abi/bind/base.go @@ -32,7 +32,7 @@ import ( // SignerFn is a signer function callback when a contract requires a method to // sign the transaction before submission. -type SignerFn func(types.Signer, common.Address, *types.Transaction) (*types.Transaction, error) +type SignerFn func(common.Address, *types.Transaction) (*types.Transaction, error) // CallOpts is the collection of options to fine tune a contract call request. type CallOpts struct { @@ -256,7 +256,7 @@ func (c *BoundContract) transact(opts *TransactOpts, contract *common.Address, i if opts.Signer == nil { return nil, errors.New("no signer to authorize the transaction with") } - signedTx, err := opts.Signer(types.HomesteadSigner{}, opts.From, rawTx) + signedTx, err := opts.Signer(opts.From, rawTx) if err != nil { return nil, err } diff --git a/accounts/abi/bind/bind_test.go b/accounts/abi/bind/bind_test.go index 8bfbf30b53..d0958cb62f 100644 --- a/accounts/abi/bind/bind_test.go +++ b/accounts/abi/bind/bind_test.go @@ -296,7 +296,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -351,7 +351,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -397,7 +397,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -455,7 +455,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -503,7 +503,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -529,6 +529,70 @@ var bindTests = []struct { nil, nil, }, + // Tests that structs are correctly unpacked + { + + `Structs`, + ` + pragma solidity ^0.6.5; + pragma experimental ABIEncoderV2; + contract Structs { + struct A { + bytes32 B; + } + + function F() public view returns (A[] memory a, uint256[] memory c, bool[] memory d) { + A[] memory a = new A[](2); + a[0].B = bytes32(uint256(1234) << 96); + uint256[] memory c; + bool[] memory d; + return (a, c, d); + } + + function G() public view returns (A[] memory a) { + A[] memory a = new A[](2); + a[0].B = bytes32(uint256(1234) << 96); + return a; + } + } + `, + []string{`608060405234801561001057600080fd5b50610278806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806328811f591461003b5780636fecb6231461005b575b600080fd5b610043610070565b604051610052939291906101a0565b60405180910390f35b6100636100d6565b6040516100529190610186565b604080516002808252606082810190935282918291829190816020015b610095610131565b81526020019060019003908161008d575050805190915061026960611b9082906000906100be57fe5b60209081029190910101515293606093508392509050565b6040805160028082526060828101909352829190816020015b6100f7610131565b8152602001906001900390816100ef575050805190915061026960611b90829060009061012057fe5b602090810291909101015152905090565b60408051602081019091526000815290565b815260200190565b6000815180845260208085019450808401835b8381101561017b578151518752958201959082019060010161015e565b509495945050505050565b600060208252610199602083018461014b565b9392505050565b6000606082526101b3606083018661014b565b6020838203818501528186516101c98185610239565b91508288019350845b818110156101f3576101e5838651610143565b9484019492506001016101d2565b505084810360408601528551808252908201925081860190845b8181101561022b57825115158552938301939183019160010161020d565b509298975050505050505050565b9081526020019056fea2646970667358221220eb85327e285def14230424c52893aebecec1e387a50bb6b75fc4fdbed647f45f64736f6c63430006050033`}, + []string{`[{"inputs":[],"name":"F","outputs":[{"components":[{"internalType":"bytes32","name":"B","type":"bytes32"}],"internalType":"structStructs.A[]","name":"a","type":"tuple[]"},{"internalType":"uint256[]","name":"c","type":"uint256[]"},{"internalType":"bool[]","name":"d","type":"bool[]"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"G","outputs":[{"components":[{"internalType":"bytes32","name":"B","type":"bytes32"}],"internalType":"structStructs.A[]","name":"a","type":"tuple[]"}],"stateMutability":"view","type":"function"}]`}, + ` + "math/big" + + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + `, + ` + // Generate a new random account and a funded simulator + key, _ := crypto.GenerateKey() + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) + + sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) + defer sim.Close() + + // Deploy a structs method invoker contract and execute its default method + _, _, structs, err := DeployStructs(auth, sim) + if err != nil { + t.Fatalf("Failed to deploy defaulter contract: %v", err) + } + sim.Commit() + opts := bind.CallOpts{} + if _, err := structs.F(&opts); err != nil { + t.Fatalf("Failed to invoke F method: %v", err) + } + if _, err := structs.G(&opts); err != nil { + t.Fatalf("Failed to invoke G method: %v", err) + } + `, + nil, + nil, + nil, + nil, + }, // Tests that non-existent contracts are reported as such (though only simulator test) { `NonExistent`, @@ -569,6 +633,45 @@ var bindTests = []struct { nil, nil, }, + { + `NonExistentStruct`, + ` + contract NonExistentStruct { + function Struct() public view returns(uint256 a, uint256 b) { + return (10, 10); + } + } + `, + []string{`6080604052348015600f57600080fd5b5060888061001e6000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c8063d5f6622514602d575b600080fd5b6033604c565b6040805192835260208301919091528051918290030190f35b600a809156fea264697066735822beefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeefbeef64736f6c6343decafe0033`}, + []string{`[{"inputs":[],"name":"Struct","outputs":[{"internalType":"uint256","name":"a","type":"uint256"},{"internalType":"uint256","name":"b","type":"uint256"}],"stateMutability":"pure","type":"function"}]`}, + ` + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/accounts/abi/bind/backends" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + `, + ` + // Create a simulator and wrap a non-deployed contract + + sim := backends.NewSimulatedBackend(core.GenesisAlloc{}, uint64(10000000000)) + defer sim.Close() + + nonexistent, err := NewNonExistentStruct(common.Address{}, sim) + if err != nil { + t.Fatalf("Failed to access non-existent contract: %v", err) + } + // Ensure that contract calls fail with the appropriate error + if res, err := nonexistent.Struct(nil); err == nil { + t.Fatalf("Call succeeded on non-existent contract: %v", res) + } else if (err != bind.ErrNoCode) { + t.Fatalf("Error mismatch: have %v, want %v", err, bind.ErrNoCode) + } + `, + nil, + nil, + nil, + nil, + }, // Tests that gas estimation works for contracts with weird gas mechanics too. { `FunkyGasPattern`, @@ -598,7 +701,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -648,7 +751,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -723,7 +826,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -817,7 +920,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -1007,7 +1110,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -1142,7 +1245,7 @@ var bindTests = []struct { ` key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -1284,7 +1387,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -1350,7 +1453,7 @@ var bindTests = []struct { ` // Initialize test accounts key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -1444,7 +1547,7 @@ var bindTests = []struct { sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000) defer sim.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) _, _, _, err := DeployIdentifierCollision(transactOpts, sim) if err != nil { t.Fatalf("failed to deploy contract: %v", err) @@ -1506,7 +1609,7 @@ var bindTests = []struct { sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 10000000) defer sim.Close() - transactOpts := bind.NewKeyedTransactor(key) + transactOpts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) _, _, c1, err := DeployContractOne(transactOpts, sim) if err != nil { t.Fatal("Failed to deploy contract") @@ -1563,7 +1666,7 @@ var bindTests = []struct { ` // Generate a new random account and a funded simulator key, _ := crypto.GenerateKey() - auth := bind.NewKeyedTransactor(key) + auth, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) sim := backends.NewSimulatedBackend(core.GenesisAlloc{auth.From: {Balance: big.NewInt(10000000000)}}, 10000000) defer sim.Close() @@ -1601,11 +1704,7 @@ var bindTests = []struct { contract NewFallbacks { event Fallback(bytes data); fallback() external { - bytes memory data; - assembly { - calldatacopy(data, 0, calldatasize()) - } - emit Fallback(data); + emit Fallback(msg.data); } event Received(address addr, uint value); @@ -1614,7 +1713,7 @@ var bindTests = []struct { } } `, - []string{"60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040526004361061000d575b36610081575b7f88a5966d370b9919b20f3e2c13ff65706f196a4e32cc2c12bf57088f885258743334604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a15b005b34801561008e5760006000fd5b505b606036600082377f9043988963722edecc2099c75b0af0ff76af14ffca42ed6bce059a20a2a9f986816040518080602001828103825283818151815260200191508051906020019080838360005b838110156100fa5780820151818401525b6020810190506100de565b50505050905090810190601f1680156101275780820380516001836020036101000a031916815260200191505b509250505060405180910390a1505b00fea26469706673582212205643ca37f40c2b352dc541f42e9e6720de065de756324b7fcc9fb1d67eda4a7d64736f6c63430006040033"}, + []string{"6080604052348015600f57600080fd5b506101078061001f6000396000f3fe608060405236605f577f88a5966d370b9919b20f3e2c13ff65706f196a4e32cc2c12bf57088f885258743334604051808373ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a1005b348015606a57600080fd5b507f9043988963722edecc2099c75b0af0ff76af14ffca42ed6bce059a20a2a9f98660003660405180806020018281038252848482818152602001925080828437600081840152601f19601f820116905080830192505050935050505060405180910390a100fea26469706673582212201f994dcfbc53bf610b19176f9a361eafa77b447fd9c796fa2c615dfd0aaf3b8b64736f6c634300060c0033"}, []string{`[{"anonymous":false,"inputs":[{"indexed":false,"internalType":"bytes","name":"data","type":"bytes"}],"name":"Fallback","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"addr","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Received","type":"event"},{"stateMutability":"nonpayable","type":"fallback"},{"stateMutability":"payable","type":"receive"}]`}, ` "bytes" @@ -1632,7 +1731,7 @@ var bindTests = []struct { sim := backends.NewSimulatedBackend(core.GenesisAlloc{addr: {Balance: big.NewInt(1000000000)}}, 1000000) defer sim.Close() - opts := bind.NewKeyedTransactor(key) + opts, _ := bind.NewKeyedTransactorWithChainID(key, big.NewInt(1337)) _, _, c, err := DeployNewFallbacks(opts, sim) if err != nil { t.Fatalf("Failed to deploy contract: %v", err) @@ -1662,6 +1761,7 @@ var bindTests = []struct { } // Test fallback function + gotEvent = false opts.Value = nil calldata := []byte{0x01, 0x02, 0x03} c.Fallback(opts, calldata) @@ -1746,11 +1846,16 @@ func TestGolangBindings(t *testing.T) { t.Fatalf("failed to convert binding test to modules: %v\n%s", err, out) } pwd, _ := os.Getwd() - replacer := exec.Command(gocmd, "mod", "edit", "-replace", "github.com/ethereum/go-ethereum="+filepath.Join(pwd, "..", "..", "..")) // Repo root + replacer := exec.Command(gocmd, "mod", "edit", "-x", "-require", "github.com/ethereum/go-ethereum@v0.0.0", "-replace", "github.com/ethereum/go-ethereum="+filepath.Join(pwd, "..", "..", "..")) // Repo root replacer.Dir = pkg if out, err := replacer.CombinedOutput(); err != nil { t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out) } + tidier := exec.Command(gocmd, "mod", "tidy") + tidier.Dir = pkg + if out, err := tidier.CombinedOutput(); err != nil { + t.Fatalf("failed to tidy Go module file: %v\n%s", err, out) + } // Test the entire package and report any failures cmd := exec.Command(gocmd, "test", "-v", "-count", "1") cmd.Dir = pkg diff --git a/accounts/abi/bind/template.go b/accounts/abi/bind/template.go index 5329b3ebc3..e9bdd3d414 100644 --- a/accounts/abi/bind/template.go +++ b/accounts/abi/bind/template.go @@ -304,8 +304,11 @@ var ( err := _{{$contract.Type}}.contract.Call(opts, &out, "{{.Original.Name}}" {{range .Normalized.Inputs}}, {{.Name}}{{end}}) {{if .Structured}} outstruct := new(struct{ {{range .Normalized.Outputs}} {{.Name}} {{bindtype .Type $structs}}; {{end}} }) + if err != nil { + return *outstruct, err + } {{range $i, $t := .Normalized.Outputs}} - outstruct.{{.Name}} = out[{{$i}}].({{bindtype .Type $structs}}){{end}} + outstruct.{{.Name}} = *abi.ConvertType(out[{{$i}}], new({{bindtype .Type $structs}})).(*{{bindtype .Type $structs}}){{end}} return *outstruct, err {{else}} @@ -541,6 +544,7 @@ var ( if err := _{{$contract.Type}}.contract.UnpackLog(event, "{{.Original.Name}}", log); err != nil { return nil, err } + event.Raw = log return event, nil } diff --git a/accounts/abi/reflect_test.go b/accounts/abi/reflect_test.go index bac4cd9530..cf13a79da8 100644 --- a/accounts/abi/reflect_test.go +++ b/accounts/abi/reflect_test.go @@ -202,12 +202,12 @@ func TestConvertType(t *testing.T) { fields = append(fields, reflect.StructField{ Name: "X", Type: reflect.TypeOf(new(big.Int)), - Tag: reflect.StructTag("json:\"" + "x" + "\""), + Tag: "json:\"" + "x" + "\"", }) fields = append(fields, reflect.StructField{ Name: "Y", Type: reflect.TypeOf(new(big.Int)), - Tag: reflect.StructTag("json:\"" + "y" + "\""), + Tag: "json:\"" + "y" + "\"", }) val := reflect.New(reflect.StructOf(fields)) val.Elem().Field(0).Set(reflect.ValueOf(big.NewInt(1))) diff --git a/accounts/abi/type_test.go b/accounts/abi/type_test.go index 48df3aa383..8c3aedca6a 100644 --- a/accounts/abi/type_test.go +++ b/accounts/abi/type_test.go @@ -255,7 +255,7 @@ func TestTypeCheck(t *testing.T) { {"bytes", nil, [2]byte{0, 1}, "abi: cannot use array as type slice as argument"}, {"bytes", nil, common.Hash{1}, "abi: cannot use array as type slice as argument"}, {"string", nil, "hello world", ""}, - {"string", nil, string(""), ""}, + {"string", nil, "", ""}, {"string", nil, []byte{}, "abi: cannot use slice as type string as argument"}, {"bytes32[]", nil, [][32]byte{{}}, ""}, {"function", nil, [24]byte{}, ""}, diff --git a/accounts/accounts.go b/accounts/accounts.go index dc85cba174..08a1f0f2b1 100644 --- a/accounts/accounts.go +++ b/accounts/accounts.go @@ -21,7 +21,7 @@ import ( "fmt" "math/big" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/event" diff --git a/accounts/hd.go b/accounts/hd.go index 75c4761106..54acea3b26 100644 --- a/accounts/hd.go +++ b/accounts/hd.go @@ -150,3 +150,31 @@ func (path *DerivationPath) UnmarshalJSON(b []byte) error { *path, err = ParseDerivationPath(dp) return err } + +// DefaultIterator creates a BIP-32 path iterator, which progresses by increasing the last component: +// i.e. m/44'/60'/0'/0/0, m/44'/60'/0'/0/1, m/44'/60'/0'/0/2, ... m/44'/60'/0'/0/N. +func DefaultIterator(base DerivationPath) func() DerivationPath { + path := make(DerivationPath, len(base)) + copy(path[:], base[:]) + // Set it back by one, so the first call gives the first result + path[len(path)-1]-- + return func() DerivationPath { + path[len(path)-1]++ + return path + } +} + +// LedgerLiveIterator creates a bip44 path iterator for Ledger Live. +// Ledger Live increments the third component rather than the fifth component +// i.e. m/44'/60'/0'/0/0, m/44'/60'/1'/0/0, m/44'/60'/2'/0/0, ... m/44'/60'/N'/0/0. +func LedgerLiveIterator(base DerivationPath) func() DerivationPath { + path := make(DerivationPath, len(base)) + copy(path[:], base[:]) + // Set it back by one, so the first call gives the first result + path[2]-- + return func() DerivationPath { + // ledgerLivePathIterator iterates on the third component + path[2]++ + return path + } +} diff --git a/accounts/hd_test.go b/accounts/hd_test.go index 3156a487ee..0743bbe666 100644 --- a/accounts/hd_test.go +++ b/accounts/hd_test.go @@ -17,6 +17,7 @@ package accounts import ( + "fmt" "reflect" "testing" ) @@ -77,3 +78,41 @@ func TestHDPathParsing(t *testing.T) { } } } + +func testDerive(t *testing.T, next func() DerivationPath, expected []string) { + t.Helper() + for i, want := range expected { + if have := next(); fmt.Sprintf("%v", have) != want { + t.Errorf("step %d, have %v, want %v", i, have, want) + } + } +} + +func TestHdPathIteration(t *testing.T) { + testDerive(t, DefaultIterator(DefaultBaseDerivationPath), + []string{ + "m/44'/60'/0'/0/0", "m/44'/60'/0'/0/1", + "m/44'/60'/0'/0/2", "m/44'/60'/0'/0/3", + "m/44'/60'/0'/0/4", "m/44'/60'/0'/0/5", + "m/44'/60'/0'/0/6", "m/44'/60'/0'/0/7", + "m/44'/60'/0'/0/8", "m/44'/60'/0'/0/9", + }) + + testDerive(t, DefaultIterator(LegacyLedgerBaseDerivationPath), + []string{ + "m/44'/60'/0'/0", "m/44'/60'/0'/1", + "m/44'/60'/0'/2", "m/44'/60'/0'/3", + "m/44'/60'/0'/4", "m/44'/60'/0'/5", + "m/44'/60'/0'/6", "m/44'/60'/0'/7", + "m/44'/60'/0'/8", "m/44'/60'/0'/9", + }) + + testDerive(t, LedgerLiveIterator(DefaultBaseDerivationPath), + []string{ + "m/44'/60'/0'/0/0", "m/44'/60'/1'/0/0", + "m/44'/60'/2'/0/0", "m/44'/60'/3'/0/0", + "m/44'/60'/4'/0/0", "m/44'/60'/5'/0/0", + "m/44'/60'/6'/0/0", "m/44'/60'/7'/0/0", + "m/44'/60'/8'/0/0", "m/44'/60'/9'/0/0", + }) +} diff --git a/accounts/keystore/account_cache.go b/accounts/keystore/account_cache.go index 8f660e282f..a3ec6e9c56 100644 --- a/accounts/keystore/account_cache.go +++ b/accounts/keystore/account_cache.go @@ -262,7 +262,7 @@ func (ac *accountCache) scanAccounts() error { switch { case err != nil: log.Debug("Failed to decode keystore key", "path", path, "err", err) - case (addr == common.Address{}): + case addr == common.Address{}: log.Debug("Failed to decode keystore key", "path", path, "err", "missing or zero address") default: return &accounts.Account{ diff --git a/accounts/keystore/key.go b/accounts/keystore/key.go index 84d8df0c5a..2b815ce0f9 100644 --- a/accounts/keystore/key.go +++ b/accounts/keystore/key.go @@ -32,7 +32,7 @@ import ( "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" - "github.com/pborman/uuid" + "github.com/google/uuid" ) const ( @@ -110,7 +110,10 @@ func (k *Key) UnmarshalJSON(j []byte) (err error) { } u := new(uuid.UUID) - *u = uuid.Parse(keyJSON.Id) + *u, err = uuid.Parse(keyJSON.Id) + if err != nil { + return err + } k.Id = *u addr, err := hex.DecodeString(keyJSON.Address) if err != nil { @@ -128,7 +131,10 @@ func (k *Key) UnmarshalJSON(j []byte) (err error) { } func newKeyFromECDSA(privateKeyECDSA *ecdsa.PrivateKey) *Key { - id := uuid.NewRandom() + id, err := uuid.NewRandom() + if err != nil { + panic(fmt.Sprintf("Could not create random uuid: %v", err)) + } key := &Key{ Id: id, Address: crypto.PubkeyToAddress(privateKeyECDSA.PublicKey), diff --git a/accounts/keystore/keystore.go b/accounts/keystore/keystore.go index 9d5e2cf6a2..88dcfbeb69 100644 --- a/accounts/keystore/keystore.go +++ b/accounts/keystore/keystore.go @@ -283,11 +283,9 @@ func (ks *KeyStore) SignTx(a accounts.Account, tx *types.Transaction, chainID *b if !found { return nil, ErrLocked } - // Depending on the presence of the chain ID, sign with EIP155 or homestead - if chainID != nil { - return types.SignTx(tx, types.NewEIP155Signer(chainID), unlockedKey.PrivateKey) - } - return types.SignTx(tx, types.HomesteadSigner{}, unlockedKey.PrivateKey) + // Depending on the presence of the chain ID, sign with 2718 or homestead + signer := types.LatestSignerForChainID(chainID) + return types.SignTx(tx, signer, unlockedKey.PrivateKey) } // SignHashWithPassphrase signs hash if the private key matching the given address @@ -310,12 +308,9 @@ func (ks *KeyStore) SignTxWithPassphrase(a accounts.Account, passphrase string, return nil, err } defer zeroKey(key.PrivateKey) - - // Depending on the presence of the chain ID, sign with EIP155 or homestead - if chainID != nil { - return types.SignTx(tx, types.NewEIP155Signer(chainID), key.PrivateKey) - } - return types.SignTx(tx, types.HomesteadSigner{}, key.PrivateKey) + // Depending on the presence of the chain ID, sign with or without replay protection. + signer := types.LatestSignerForChainID(chainID) + return types.SignTx(tx, signer, key.PrivateKey) } // Unlock unlocks the given account indefinitely. diff --git a/accounts/keystore/passphrase.go b/accounts/keystore/passphrase.go index 89cdf0bfca..3b3e631888 100644 --- a/accounts/keystore/passphrase.go +++ b/accounts/keystore/passphrase.go @@ -42,7 +42,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" - "github.com/pborman/uuid" + "github.com/google/uuid" "golang.org/x/crypto/pbkdf2" "golang.org/x/crypto/scrypt" ) @@ -228,9 +228,12 @@ func DecryptKey(keyjson []byte, auth string) (*Key, error) { return nil, err } key := crypto.ToECDSAUnsafe(keyBytes) - + id, err := uuid.FromBytes(keyId) + if err != nil { + return nil, err + } return &Key{ - Id: uuid.UUID(keyId), + Id: id, Address: crypto.PubkeyToAddress(key.PublicKey), PrivateKey: key, }, nil @@ -276,7 +279,11 @@ func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byt if keyProtected.Version != version { return nil, nil, fmt.Errorf("version not supported: %v", keyProtected.Version) } - keyId = uuid.Parse(keyProtected.Id) + keyUUID, err := uuid.Parse(keyProtected.Id) + if err != nil { + return nil, nil, err + } + keyId = keyUUID[:] plainText, err := DecryptDataV3(keyProtected.Crypto, auth) if err != nil { return nil, nil, err @@ -285,7 +292,11 @@ func decryptKeyV3(keyProtected *encryptedKeyJSONV3, auth string) (keyBytes []byt } func decryptKeyV1(keyProtected *encryptedKeyJSONV1, auth string) (keyBytes []byte, keyId []byte, err error) { - keyId = uuid.Parse(keyProtected.Id) + keyUUID, err := uuid.Parse(keyProtected.Id) + if err != nil { + return nil, nil, err + } + keyId = keyUUID[:] mac, err := hex.DecodeString(keyProtected.Crypto.MAC) if err != nil { return nil, nil, err diff --git a/accounts/keystore/presale.go b/accounts/keystore/presale.go index 03055245f5..0664dc2cdd 100644 --- a/accounts/keystore/presale.go +++ b/accounts/keystore/presale.go @@ -27,7 +27,7 @@ import ( "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/crypto" - "github.com/pborman/uuid" + "github.com/google/uuid" "golang.org/x/crypto/pbkdf2" ) @@ -37,7 +37,10 @@ func importPreSaleKey(keyStore keyStore, keyJSON []byte, password string) (accou if err != nil { return accounts.Account{}, nil, err } - key.Id = uuid.NewRandom() + key.Id, err = uuid.NewRandom() + if err != nil { + return accounts.Account{}, nil, err + } a := accounts.Account{ Address: key.Address, URL: accounts.URL{ @@ -86,7 +89,7 @@ func decryptPreSaleKey(fileContent []byte, password string) (key *Key, err error ecKey := crypto.ToECDSAUnsafe(ethPriv) key = &Key{ - Id: nil, + Id: uuid.UUID{}, Address: crypto.PubkeyToAddress(ecKey.PublicKey), PrivateKey: ecKey, } diff --git a/accounts/keystore/wallet.go b/accounts/keystore/wallet.go index 498067d497..1066095f6d 100644 --- a/accounts/keystore/wallet.go +++ b/accounts/keystore/wallet.go @@ -19,7 +19,7 @@ package keystore import ( "math/big" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" @@ -58,7 +58,7 @@ func (w *keystoreWallet) Open(passphrase string) error { return nil } func (w *keystoreWallet) Close() error { return nil } // Accounts implements accounts.Wallet, returning an account list consisting of -// a single account that the plain kestore wallet contains. +// a single account that the plain keystore wallet contains. func (w *keystoreWallet) Accounts() []accounts.Account { return []accounts.Account{w.account} } @@ -93,12 +93,12 @@ func (w *keystoreWallet) signHash(account accounts.Account, hash []byte) ([]byte return w.keystore.SignHash(account, hash) } -// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed +// SignData signs keccak256(data). The mimetype parameter describes the type of data being signed. func (w *keystoreWallet) SignData(account accounts.Account, mimeType string, data []byte) ([]byte, error) { return w.signHash(account, crypto.Keccak256(data)) } -// SignDataWithPassphrase signs keccak256(data). The mimetype parameter describes the type of data being signed +// SignDataWithPassphrase signs keccak256(data). The mimetype parameter describes the type of data being signed. func (w *keystoreWallet) SignDataWithPassphrase(account accounts.Account, passphrase, mimeType string, data []byte) ([]byte, error) { // Make sure the requested account is contained within if !w.Contains(account) { @@ -108,12 +108,14 @@ func (w *keystoreWallet) SignDataWithPassphrase(account accounts.Account, passph return w.keystore.SignHashWithPassphrase(account, passphrase, crypto.Keccak256(data)) } +// SignText implements accounts.Wallet, attempting to sign the hash of +// the given text with the given account. func (w *keystoreWallet) SignText(account accounts.Account, text []byte) ([]byte, error) { return w.signHash(account, accounts.TextHash(text)) } // SignTextWithPassphrase implements accounts.Wallet, attempting to sign the -// given hash with the given account using passphrase as extra authentication. +// hash of the given text with the given account using passphrase as extra authentication. func (w *keystoreWallet) SignTextWithPassphrase(account accounts.Account, passphrase string, text []byte) ([]byte, error) { // Make sure the requested account is contained within if !w.Contains(account) { diff --git a/accounts/scwallet/README.md b/accounts/scwallet/README.md index cfca916b3a..4313d9c6b2 100644 --- a/accounts/scwallet/README.md +++ b/accounts/scwallet/README.md @@ -31,12 +31,16 @@ Write down the URL (`keycard://044def09` in this example). Then ask `geth` to open the wallet: ``` - > personal.openWallet("keycard://044def09") - Please enter the pairing password: + > personal.openWallet("keycard://044def09", "pairing password") ``` - Enter the pairing password that you have received during card initialization. Same with the PIN that you will subsequently be - asked for. + The pairing password has been generated during the card initialization process. + + The process needs to be repeated once more with the PIN: + + ``` + > personal.openWallet("keycard://044def09", "PIN number") + ``` If everything goes well, you should see your new account when typing `personal` on the console: diff --git a/accounts/scwallet/securechannel.go b/accounts/scwallet/securechannel.go index 9b70c69dcc..10887a8b43 100644 --- a/accounts/scwallet/securechannel.go +++ b/accounts/scwallet/securechannel.go @@ -20,6 +20,7 @@ import ( "bytes" "crypto/aes" "crypto/cipher" + "crypto/elliptic" "crypto/rand" "crypto/sha256" "crypto/sha512" @@ -27,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" pcsc "github.com/gballet/go-libpcsclite" - "github.com/wsddn/go-ecdh" "golang.org/x/crypto/pbkdf2" "golang.org/x/text/unicode/norm" ) @@ -63,26 +63,19 @@ type SecureChannelSession struct { // NewSecureChannelSession creates a new secure channel for the given card and public key. func NewSecureChannelSession(card *pcsc.Card, keyData []byte) (*SecureChannelSession, error) { // Generate an ECDSA keypair for ourselves - gen := ecdh.NewEllipticECDH(crypto.S256()) - private, public, err := gen.GenerateKey(rand.Reader) + key, err := crypto.GenerateKey() if err != nil { return nil, err } - - cardPublic, ok := gen.Unmarshal(keyData) - if !ok { - return nil, fmt.Errorf("could not unmarshal public key from card") - } - - secret, err := gen.GenerateSharedSecret(private, cardPublic) + cardPublic, err := crypto.UnmarshalPubkey(keyData) if err != nil { - return nil, err + return nil, fmt.Errorf("could not unmarshal public key from card: %v", err) } - + secret, _ := key.Curve.ScalarMult(cardPublic.X, cardPublic.Y, key.D.Bytes()) return &SecureChannelSession{ card: card, - secret: secret, - publicKey: gen.Marshal(public), + secret: secret.Bytes(), + publicKey: elliptic.Marshal(crypto.S256(), key.PublicKey.X, key.PublicKey.Y), }, nil } diff --git a/accounts/scwallet/wallet.go b/accounts/scwallet/wallet.go index 85fae8c114..b4d229bc0b 100644 --- a/accounts/scwallet/wallet.go +++ b/accounts/scwallet/wallet.go @@ -33,7 +33,7 @@ import ( "sync" "time" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -699,7 +699,7 @@ func (w *Wallet) signHash(account accounts.Account, hash []byte) ([]byte, error) // the needed details via SignTxWithPassphrase, or by other means (e.g. unlock // the account in a keystore). func (w *Wallet) SignTx(account accounts.Account, tx *types.Transaction, chainID *big.Int) (*types.Transaction, error) { - signer := types.NewEIP155Signer(chainID) + signer := types.LatestSignerForChainID(chainID) hash := signer.Hash(tx) sig, err := w.signHash(account, hash[:]) if err != nil { diff --git a/accounts/usbwallet/trezor.go b/accounts/usbwallet/trezor.go index 1892097baf..0546458c47 100644 --- a/accounts/usbwallet/trezor.go +++ b/accounts/usbwallet/trezor.go @@ -255,9 +255,11 @@ func (w *trezorDriver) trezorSign(derivationPath []uint32, tx *types.Transaction if chainID == nil { signer = new(types.HomesteadSigner) } else { + // Trezor backend does not support typed transactions yet. signer = types.NewEIP155Signer(chainID) signature[64] -= byte(chainID.Uint64()*2 + 35) } + // Inject the final signature into the transaction and sanity check the sender signed, err := tx.WithSignature(signer, signature) if err != nil { diff --git a/accounts/usbwallet/wallet.go b/accounts/usbwallet/wallet.go index e39c6bdf34..9f74e5554f 100644 --- a/accounts/usbwallet/wallet.go +++ b/accounts/usbwallet/wallet.go @@ -25,7 +25,7 @@ import ( "sync" "time" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" diff --git a/appveyor.yml b/appveyor.yml index 2bf67d4568..052280be15 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -24,8 +24,8 @@ environment: install: - git submodule update --init - rmdir C:\go /s /q - - appveyor DownloadFile https://dl.google.com/go/go1.15.5.windows-%GETH_ARCH%.zip - - 7z x go1.15.5.windows-%GETH_ARCH%.zip -y -oC:\ > NUL + - appveyor DownloadFile https://dl.google.com/go/go1.16.windows-%GETH_ARCH%.zip + - 7z x go1.16.windows-%GETH_ARCH%.zip -y -oC:\ > NUL - go version - gcc --version diff --git a/build/checksums.txt b/build/checksums.txt index 32b376519f..d5bd4d0cd3 100644 --- a/build/checksums.txt +++ b/build/checksums.txt @@ -1,13 +1,17 @@ # This file contains sha256 checksums of optional build dependencies. -c1076b90cf94b73ebed62a81d802cd84d43d02dea8c07abdc922c57a071c84f1 go1.15.5.src.tar.gz -359a4334b8c8f5e3067e5a76f16419791ac3fef4613d8e8e1eac0b9719915f6d go1.15.5.darwin-amd64.tar.gz -4c8179d406136979724c71732009c7e2e7c794dbeaaa2a043c00da34d4be0559 go1.15.5.linux-386.tar.gz -9a58494e8da722c3aef248c9227b0e9c528c7318309827780f16220998180a0d go1.15.5.linux-amd64.tar.gz -a72a0b036beb4193a0214bca3fca4c5d68a38a4ccf098c909f7ce8bf08567c48 go1.15.5.linux-arm64.tar.gz -5ea6456620d3efed5dda99238c7f23866eafdd915e5348736e631bc283c0238a go1.15.5.linux-armv6l.tar.gz -d812436c7e3482ba3c97172edf26afaf35aca60a5621ff4a5f6a08386505ab9c go1.15.5.windows-386.zip -1d24be3a200201a74be25e4134fbec467750e834e84e9c7789a9fc13248c5507 go1.15.5.windows-amd64.zip +7688063d55656105898f323d90a79a39c378d86fe89ae192eb3b7fc46347c95a go1.16.src.tar.gz +6000a9522975d116bf76044967d7e69e04e982e9625330d9a539a8b45395f9a8 go1.16.darwin-amd64.tar.gz +ea435a1ac6d497b03e367fdfb74b33e961d813883468080f6e239b3b03bea6aa go1.16.linux-386.tar.gz +013a489ebb3e24ef3d915abe5b94c3286c070dfe0818d5bca8108f1d6e8440d2 go1.16.linux-amd64.tar.gz +3770f7eb22d05e25fbee8fb53c2a4e897da043eb83c69b9a14f8d98562cd8098 go1.16.linux-arm64.tar.gz +d1d9404b1dbd77afa2bdc70934e10fbfcf7d785c372efc29462bb7d83d0a32fd go1.16.linux-armv6l.tar.gz +481492a17d42193d471b93b7a06da3555331bd833b76336afc87be820c48933f go1.16.windows-386.zip +5cc88fa506b3d5c453c54c3ea218fc8dd05d7362ae1de15bb67986b72089ce93 go1.16.windows-amd64.zip +d7d6c70b05a7c2f68b48aab5ab8cb5116b8444c9ddad131673b152e7cff7c726 go1.16.freebsd-386.tar.gz +40b03216f6945fb6883a50604fc7f409a83f62171607229a9c598e701e684f8a go1.16.freebsd-amd64.tar.gz +27a1aaa988e930b7932ce459c8a63ad5b3333b3a06b016d87ff289f2a11aacd6 go1.16.linux-ppc64le.tar.gz +be4c9e4e2cf058efc4e3eb013a760cb989ddc4362f111950c990d1c63b27ccbe go1.16.linux-s390x.tar.gz d998a84eea42f2271aca792a7b027ca5c1edfcba229e8e5a844c9ac3f336df35 golangci-lint-1.27.0-linux-armv7.tar.gz bf781f05b0d393b4bf0a327d9e62926949a4f14d7774d950c4e009fc766ed1d4 golangci-lint.exe-1.27.0-windows-amd64.zip diff --git a/build/ci.go b/build/ci.go index 0cffb903aa..756fba8399 100644 --- a/build/ci.go +++ b/build/ci.go @@ -26,7 +26,7 @@ Available commands are: install [ -arch architecture ] [ -cc compiler ] [ packages... ] -- builds packages and executables test [ -coverage ] [ packages... ] -- runs the tests lint -- runs certain pre-selected linters - archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -upload dest ] -- archives build artifacts + archive [ -arch architecture ] [ -type zip|tar ] [ -signer key-envvar ] [ -signify key-envvar ] [ -upload dest ] -- archives build artifacts importkeys -- imports signing keys from env debsrc [ -signer key-id ] [ -upload dest ] -- creates a debian source package nsis -- creates a Windows NSIS installer @@ -58,6 +58,7 @@ import ( "time" "github.com/cespare/cp" + "github.com/ethereum/go-ethereum/crypto/signify" "github.com/ethereum/go-ethereum/internal/build" "github.com/ethereum/go-ethereum/params" ) @@ -151,7 +152,7 @@ var ( // This is the version of go that will be downloaded by // // go run ci.go install -dlgo - dlgoVersion = "1.15.5" + dlgoVersion = "1.16" ) var GOBIN, _ = filepath.Abs(filepath.Join("build", "bin")) @@ -396,11 +397,12 @@ func downloadLinter(cachedir string) string { // Release Packaging func doArchive(cmdline []string) { var ( - arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging") - atype = flag.String("type", "zip", "Type of archive to write (zip|tar)") - signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. LINUX_SIGNING_KEY)`) - upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) - ext string + arch = flag.String("arch", runtime.GOARCH, "Architecture cross packaging") + atype = flag.String("type", "zip", "Type of archive to write (zip|tar)") + signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. LINUX_SIGNING_KEY)`) + signify = flag.String("signify", "", `Environment variable holding the signify key (e.g. LINUX_SIGNIFY_KEY)`) + upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) + ext string ) flag.CommandLine.Parse(cmdline) switch *atype { @@ -427,7 +429,7 @@ func doArchive(cmdline []string) { log.Fatal(err) } for _, archive := range []string{geth, alltools} { - if err := archiveUpload(archive, *upload, *signer); err != nil { + if err := archiveUpload(archive, *upload, *signer, *signify); err != nil { log.Fatal(err) } } @@ -447,7 +449,7 @@ func archiveBasename(arch string, archiveVersion string) string { return platform + "-" + archiveVersion } -func archiveUpload(archive string, blobstore string, signer string) error { +func archiveUpload(archive string, blobstore string, signer string, signifyVar string) error { // If signing was requested, generate the signature files if signer != "" { key := getenvBase64(signer) @@ -455,6 +457,14 @@ func archiveUpload(archive string, blobstore string, signer string) error { return err } } + if signifyVar != "" { + key := os.Getenv(signifyVar) + untrustedComment := "verify with geth-release.pub" + trustedComment := fmt.Sprintf("%s (%s)", archive, time.Now().UTC().Format(time.RFC1123)) + if err := signify.SignFile(archive, archive+".sig", key, untrustedComment, trustedComment); err != nil { + return err + } + } // If uploading to Azure was requested, push the archive possibly with its signature if blobstore != "" { auth := build.AzureBlobstoreConfig{ @@ -470,6 +480,11 @@ func archiveUpload(archive string, blobstore string, signer string) error { return err } } + if signifyVar != "" { + if err := build.AzureBlobstoreUpload(archive+".sig", filepath.Base(archive+".sig"), auth); err != nil { + return err + } + } } return nil } @@ -806,6 +821,7 @@ func doWindowsInstaller(cmdline []string) { var ( arch = flag.String("arch", runtime.GOARCH, "Architecture for cross build packaging") signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. WINDOWS_SIGNING_KEY)`) + signify = flag.String("signify key", "", `Environment variable holding the signify signing key (e.g. WINDOWS_SIGNIFY_KEY)`) upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) workdir = flag.String("workdir", "", `Output directory for packages (uses temp dir if unset)`) ) @@ -867,7 +883,7 @@ func doWindowsInstaller(cmdline []string) { filepath.Join(*workdir, "geth.nsi"), ) // Sign and publish installer. - if err := archiveUpload(installer, *upload, *signer); err != nil { + if err := archiveUpload(installer, *upload, *signer, *signify); err != nil { log.Fatal(err) } } @@ -876,10 +892,11 @@ func doWindowsInstaller(cmdline []string) { func doAndroidArchive(cmdline []string) { var ( - local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) - signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. ANDROID_SIGNING_KEY)`) - deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "https://oss.sonatype.org")`) - upload = flag.String("upload", "", `Destination to upload the archive (usually "gethstore/builds")`) + local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) + signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. ANDROID_SIGNING_KEY)`) + signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. ANDROID_SIGNIFY_KEY)`) + deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "https://oss.sonatype.org")`) + upload = flag.String("upload", "", `Destination to upload the archive (usually "gethstore/builds")`) ) flag.CommandLine.Parse(cmdline) env := build.Env() @@ -908,7 +925,7 @@ func doAndroidArchive(cmdline []string) { archive := "geth-" + archiveBasename("android", params.ArchiveVersion(env.Commit)) + ".aar" os.Rename("geth.aar", archive) - if err := archiveUpload(archive, *upload, *signer); err != nil { + if err := archiveUpload(archive, *upload, *signer, *signify); err != nil { log.Fatal(err) } // Sign and upload all the artifacts to Maven Central @@ -1001,10 +1018,11 @@ func newMavenMetadata(env build.Environment) mavenMetadata { func doXCodeFramework(cmdline []string) { var ( - local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) - signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. IOS_SIGNING_KEY)`) - deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "trunk")`) - upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) + local = flag.Bool("local", false, `Flag whether we're only doing a local build (skip Maven artifacts)`) + signer = flag.String("signer", "", `Environment variable holding the signing key (e.g. IOS_SIGNING_KEY)`) + signify = flag.String("signify", "", `Environment variable holding the signify signing key (e.g. IOS_SIGNIFY_KEY)`) + deploy = flag.String("deploy", "", `Destination to deploy the archive (usually "trunk")`) + upload = flag.String("upload", "", `Destination to upload the archives (usually "gethstore/builds")`) ) flag.CommandLine.Parse(cmdline) env := build.Env() @@ -1032,7 +1050,7 @@ func doXCodeFramework(cmdline []string) { maybeSkipArchive(env) // Sign and upload the framework to Azure - if err := archiveUpload(archive+".tar.gz", *upload, *signer); err != nil { + if err := archiveUpload(archive+".tar.gz", *upload, *signer, *signify); err != nil { log.Fatal(err) } // Prepare and upload a PodSpec to CocoaPods diff --git a/cmd/abigen/main.go b/cmd/abigen/main.go index a74b0396d4..7b3b35e4e5 100644 --- a/cmd/abigen/main.go +++ b/cmd/abigen/main.go @@ -96,7 +96,7 @@ var ( } aliasFlag = cli.StringFlag{ Name: "alias", - Usage: "Comma separated aliases for function and event renaming, e.g. foo=bar", + Usage: "Comma separated aliases for function and event renaming, e.g. original1=alias1, original2=alias2", } ) diff --git a/cmd/bootnode/main.go b/cmd/bootnode/main.go index 6c9ff615a1..036b968ef8 100644 --- a/cmd/bootnode/main.go +++ b/cmd/bootnode/main.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/netutil" @@ -121,17 +120,17 @@ func main() { printNotice(&nodeKey.PublicKey, *realaddr) + db, _ := enode.OpenDB("") + ln := enode.NewLocalNode(db, nodeKey) + cfg := discover.Config{ + PrivateKey: nodeKey, + NetRestrict: restrictList, + } if *runv5 { - if _, err := discv5.ListenUDP(nodeKey, conn, "", restrictList); err != nil { + if _, err := discover.ListenV5(conn, ln, cfg); err != nil { utils.Fatalf("%v", err) } } else { - db, _ := enode.OpenDB("") - ln := enode.NewLocalNode(db, nodeKey) - cfg := discover.Config{ - PrivateKey: nodeKey, - NetRestrict: restrictList, - } if _, err := discover.ListenUDP(conn, ln, cfg); err != nil { utils.Fatalf("%v", err) } diff --git a/cmd/clef/main.go b/cmd/clef/main.go index aef3cfba4f..8befce88dc 100644 --- a/cmd/clef/main.go +++ b/cmd/clef/main.go @@ -54,7 +54,7 @@ import ( "github.com/ethereum/go-ethereum/signer/rules" "github.com/ethereum/go-ethereum/signer/storage" - colorable "github.com/mattn/go-colorable" + "github.com/mattn/go-colorable" "github.com/mattn/go-isatty" "gopkg.in/urfave/cli.v1" ) @@ -107,11 +107,6 @@ var ( Usage: "HTTP-RPC server listening port", Value: node.DefaultHTTPPort + 5, } - legacyRPCPortFlag = cli.IntFlag{ - Name: "rpcport", - Usage: "HTTP-RPC server listening port (Deprecated, please use --http.port).", - Value: node.DefaultHTTPPort + 5, - } signerSecretFlag = cli.StringFlag{ Name: "signersecret", Usage: "A file containing the (encrypted) master seed to encrypt Clef data, e.g. keystore credentials and ruleset hash", @@ -250,12 +245,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{ acceptFlag, }, }, - { - Name: "ALIASED (deprecated)", - Flags: []cli.Flag{ - legacyRPCPortFlag, - }, - }, } func init() { @@ -283,7 +272,6 @@ func init() { testFlag, advancedMode, acceptFlag, - legacyRPCPortFlag, } app.Action = signer app.Commands = []cli.Command{initCommand, @@ -677,12 +665,6 @@ func signer(c *cli.Context) error { // set port port := c.Int(rpcPortFlag.Name) - if c.GlobalIsSet(legacyRPCPortFlag.Name) { - if !c.GlobalIsSet(rpcPortFlag.Name) { - port = c.Int(legacyRPCPortFlag.Name) - } - log.Warn("The flag --rpcport is deprecated and will be removed in the future, please use --http.port") - } // start http server httpEndpoint := fmt.Sprintf("%s:%d", c.GlobalString(utils.HTTPListenAddrFlag.Name), port) @@ -747,12 +729,10 @@ func DefaultConfigDir() string { appdata := os.Getenv("APPDATA") if appdata != "" { return filepath.Join(appdata, "Signer") - } else { - return filepath.Join(home, "AppData", "Roaming", "Signer") } - } else { - return filepath.Join(home, ".clef") + return filepath.Join(home, "AppData", "Roaming", "Signer") } + return filepath.Join(home, ".clef") } // As we cannot guess a stable location, return empty and handle later return "" @@ -807,14 +787,16 @@ func readMasterKey(ctx *cli.Context, ui core.UIClientAPI) ([]byte, error) { // checkFile is a convenience function to check if a file // * exists -// * is mode 0400 +// * is mode 0400 (unix only) func checkFile(filename string) error { info, err := os.Stat(filename) if err != nil { return fmt.Errorf("failed stat on %s: %v", filename, err) } // Check the unix permission bits - if info.Mode().Perm()&0377 != 0 { + // However, on windows, we cannot use the unix perm-bits, see + // https://github.com/ethereum/go-ethereum/issues/20123 + if runtime.GOOS != "windows" && info.Mode().Perm()&0377 != 0 { return fmt.Errorf("file (%v) has insecure file permissions (%v)", filename, info.Mode().String()) } return nil @@ -1124,7 +1106,7 @@ func GenDoc(ctx *cli.Context) { rlpdata := common.FromHex("0xf85d640101948a8eafb1cf62bfbeb1741769dae1a9dd47996192018026a0716bd90515acb1e68e5ac5867aa11a1e65399c3349d479f5fb698554ebc6f293a04e8a4ebfff434e971e0ef12c5bf3a881b06fd04fc3f8b8a7291fb67a26a1d4ed") var tx types.Transaction - rlp.DecodeBytes(rlpdata, &tx) + tx.UnmarshalBinary(rlpdata) add("OnApproved - SignTransactionResult", desc, ðapi.SignTransactionResult{Raw: rlpdata, Tx: &tx}) } diff --git a/cmd/devp2p/README.md b/cmd/devp2p/README.md index e1372d0158..e934ee25c9 100644 --- a/cmd/devp2p/README.md +++ b/cmd/devp2p/README.md @@ -94,11 +94,23 @@ To run the eth protocol test suite against your implementation, the node needs t geth --datadir --nodiscover --nat=none --networkid 19763 --verbosity 5 ``` -Then, run the following command, replacing `` with the enode of the geth node: +Then, run the following command, replacing `` with the enode of the geth node: ``` - devp2p rlpx eth-test cmd/devp2p/internal/ethtest/testdata/fullchain.rlp cmd/devp2p/internal/ethtest/testdata/genesis.json + devp2p rlpx eth-test cmd/devp2p/internal/ethtest/testdata/chain.rlp cmd/devp2p/internal/ethtest/testdata/genesis.json ``` - + +Repeat the above process (re-initialising the node) in order to run the Eth Protocol test suite again. + +#### Eth66 Test Suite + +The Eth66 test suite is also a conformance test suite for the eth 66 protocol version specifically. +To run the eth66 protocol test suite, initialize a geth node as described above and run the following command, +replacing `` with the enode of the geth node: + + ``` + devp2p rlpx eth66-test cmd/devp2p/internal/ethtest/testdata/chain.rlp cmd/devp2p/internal/ethtest/testdata/genesis.json +``` + [eth]: https://github.com/ethereum/devp2p/blob/master/caps/eth.md [dns-tutorial]: https://geth.ethereum.org/docs/developers/dns-discovery-setup [discv4]: https://github.com/ethereum/devp2p/tree/master/discv4.md diff --git a/cmd/devp2p/dnscmd.go b/cmd/devp2p/dnscmd.go index 13110f21c6..f56f0f34e4 100644 --- a/cmd/devp2p/dnscmd.go +++ b/cmd/devp2p/dnscmd.go @@ -30,7 +30,7 @@ import ( "github.com/ethereum/go-ethereum/console/prompt" "github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/enode" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var ( diff --git a/cmd/devp2p/internal/ethtest/chain_test.go b/cmd/devp2p/internal/ethtest/chain_test.go index 604b908687..5e4289d80a 100644 --- a/cmd/devp2p/internal/ethtest/chain_test.go +++ b/cmd/devp2p/internal/ethtest/chain_test.go @@ -21,6 +21,7 @@ import ( "strconv" "testing" + "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/p2p" "github.com/stretchr/testify/assert" ) @@ -73,7 +74,7 @@ func TestEthProtocolNegotiation(t *testing.T) { // TestChain_GetHeaders tests whether the test suite can correctly // respond to a GetBlockHeaders request from a node. func TestChain_GetHeaders(t *testing.T) { - chainFile, err := filepath.Abs("./testdata/fullchain.rlp.gz") + chainFile, err := filepath.Abs("./testdata/chain.rlp") if err != nil { t.Fatal(err) } @@ -93,7 +94,7 @@ func TestChain_GetHeaders(t *testing.T) { }{ { req: GetBlockHeaders{ - Origin: hashOrNumber{ + Origin: eth.HashOrNumber{ Number: uint64(2), }, Amount: uint64(5), @@ -110,7 +111,7 @@ func TestChain_GetHeaders(t *testing.T) { }, { req: GetBlockHeaders{ - Origin: hashOrNumber{ + Origin: eth.HashOrNumber{ Number: uint64(chain.Len() - 1), }, Amount: uint64(3), @@ -125,7 +126,7 @@ func TestChain_GetHeaders(t *testing.T) { }, { req: GetBlockHeaders{ - Origin: hashOrNumber{ + Origin: eth.HashOrNumber{ Hash: chain.Head().Hash(), }, Amount: uint64(1), diff --git a/cmd/devp2p/internal/ethtest/eth66_suite.go b/cmd/devp2p/internal/ethtest/eth66_suite.go new file mode 100644 index 0000000000..644fed61eb --- /dev/null +++ b/cmd/devp2p/internal/ethtest/eth66_suite.go @@ -0,0 +1,382 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "time" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p" +) + +// TestStatus_66 attempts to connect to the given node and exchange +// a status message with it on the eth66 protocol, and then check to +// make sure the chain head is correct. +func (s *Suite) TestStatus_66(t *utesting.T) { + conn := s.dial66(t) + // get protoHandshake + conn.handshake(t) + // get status + switch msg := conn.statusExchange66(t, s.chain).(type) { + case *Status: + status := *msg + if status.ProtocolVersion != uint32(66) { + t.Fatalf("mismatch in version: wanted 66, got %d", status.ProtocolVersion) + } + t.Logf("got status message: %s", pretty.Sdump(msg)) + default: + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } +} + +// TestGetBlockHeaders_66 tests whether the given node can respond to +// an eth66 `GetBlockHeaders` request and that the response is accurate. +func (s *Suite) TestGetBlockHeaders_66(t *utesting.T) { + conn := s.setupConnection66(t) + // get block headers + req := ð.GetBlockHeadersPacket66{ + RequestId: 3, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 2, + Skip: 1, + Reverse: false, + }, + } + // write message + headers := s.getBlockHeaders66(t, conn, req, req.RequestId) + // check for correct headers + headersMatch(t, s.chain, headers) +} + +// TestSimultaneousRequests_66 sends two simultaneous `GetBlockHeader` requests +// with different request IDs and checks to make sure the node responds with the correct +// headers per request. +func (s *Suite) TestSimultaneousRequests_66(t *utesting.T) { + // create two connections + conn1, conn2 := s.setupConnection66(t), s.setupConnection66(t) + // create two requests + req1 := ð.GetBlockHeadersPacket66{ + RequestId: 111, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 2, + Skip: 1, + Reverse: false, + }, + } + req2 := ð.GetBlockHeadersPacket66{ + RequestId: 222, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Hash: s.chain.blocks[1].Hash(), + }, + Amount: 4, + Skip: 1, + Reverse: false, + }, + } + // wait for headers for first request + headerChan := make(chan BlockHeaders, 1) + go func(headers chan BlockHeaders) { + headers <- s.getBlockHeaders66(t, conn1, req1, req1.RequestId) + }(headerChan) + // check headers of second request + headersMatch(t, s.chain, s.getBlockHeaders66(t, conn2, req2, req2.RequestId)) + // check headers of first request + headersMatch(t, s.chain, <-headerChan) +} + +// TestBroadcast_66 tests whether a block announcement is correctly +// propagated to the given node's peer(s) on the eth66 protocol. +func (s *Suite) TestBroadcast_66(t *utesting.T) { + sendConn, receiveConn := s.setupConnection66(t), s.setupConnection66(t) + nextBlock := len(s.chain.blocks) + blockAnnouncement := &NewBlock{ + Block: s.fullChain.blocks[nextBlock], + TD: s.fullChain.TD(nextBlock + 1), + } + s.testAnnounce66(t, sendConn, receiveConn, blockAnnouncement) + // update test suite chain + s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock]) + // wait for client to update its chain + if err := receiveConn.waitForBlock66(s.chain.Head()); err != nil { + t.Fatal(err) + } +} + +// TestGetBlockBodies_66 tests whether the given node can respond to +// a `GetBlockBodies` request and that the response is accurate over +// the eth66 protocol. +func (s *Suite) TestGetBlockBodies_66(t *utesting.T) { + conn := s.setupConnection66(t) + // create block bodies request + id := uint64(55) + req := ð.GetBlockBodiesPacket66{ + RequestId: id, + GetBlockBodiesPacket: eth.GetBlockBodiesPacket{ + s.chain.blocks[54].Hash(), + s.chain.blocks[75].Hash(), + }, + } + if err := conn.write66(req, GetBlockBodies{}.Code()); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + + reqID, msg := conn.readAndServe66(s.chain, timeout) + switch msg := msg.(type) { + case BlockBodies: + if reqID != req.RequestId { + t.Fatalf("request ID mismatch: wanted %d, got %d", req.RequestId, reqID) + } + t.Logf("received %d block bodies", len(msg)) + default: + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } +} + +// TestLargeAnnounce_66 tests the announcement mechanism with a large block. +func (s *Suite) TestLargeAnnounce_66(t *utesting.T) { + nextBlock := len(s.chain.blocks) + blocks := []*NewBlock{ + { + Block: largeBlock(), + TD: s.fullChain.TD(nextBlock + 1), + }, + { + Block: s.fullChain.blocks[nextBlock], + TD: largeNumber(2), + }, + { + Block: largeBlock(), + TD: largeNumber(2), + }, + { + Block: s.fullChain.blocks[nextBlock], + TD: s.fullChain.TD(nextBlock + 1), + }, + } + + for i, blockAnnouncement := range blocks[0:3] { + t.Logf("Testing malicious announcement: %v\n", i) + sendConn := s.setupConnection66(t) + if err := sendConn.Write(blockAnnouncement); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // Invalid announcement, check that peer disconnected + switch msg := sendConn.ReadAndServe(s.chain, timeout).(type) { + case *Disconnect: + case *Error: + break + default: + t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg)) + } + } + // Test the last block as a valid block + sendConn := s.setupConnection66(t) + receiveConn := s.setupConnection66(t) + s.testAnnounce66(t, sendConn, receiveConn, blocks[3]) + // update test suite chain + s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock]) + // wait for client to update its chain + if err := receiveConn.waitForBlock66(s.fullChain.blocks[nextBlock]); err != nil { + t.Fatal(err) + } +} + +// TestMaliciousHandshake_66 tries to send malicious data during the handshake. +func (s *Suite) TestMaliciousHandshake_66(t *utesting.T) { + conn := s.dial66(t) + // write hello to client + pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:] + handshakes := []*Hello{ + { + Version: 5, + Caps: []p2p.Cap{ + {Name: largeString(2), Version: 66}, + }, + ID: pub0, + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + {Name: "eth", Version: 66}, + }, + ID: append(pub0, byte(0)), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + {Name: "eth", Version: 66}, + }, + ID: append(pub0, pub0...), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + {Name: "eth", Version: 66}, + }, + ID: largeBuffer(2), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: largeString(2), Version: 66}, + }, + ID: largeBuffer(2), + }, + } + for i, handshake := range handshakes { + t.Logf("Testing malicious handshake %v\n", i) + // Init the handshake + if err := conn.Write(handshake); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // check that the peer disconnected + timeout := 20 * time.Second + // Discard one hello + for i := 0; i < 2; i++ { + switch msg := conn.ReadAndServe(s.chain, timeout).(type) { + case *Disconnect: + case *Error: + case *Hello: + // Hello's are sent concurrently, so ignore them + continue + default: + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } + } + // Dial for the next round + conn = s.dial66(t) + } +} + +// TestMaliciousStatus_66 sends a status package with a large total difficulty. +func (s *Suite) TestMaliciousStatus_66(t *utesting.T) { + conn := s.dial66(t) + // get protoHandshake + conn.handshake(t) + status := &Status{ + ProtocolVersion: uint32(66), + NetworkID: s.chain.chainConfig.ChainID.Uint64(), + TD: largeNumber(2), + Head: s.chain.blocks[s.chain.Len()-1].Hash(), + Genesis: s.chain.blocks[0].Hash(), + ForkID: s.chain.ForkID(), + } + // get status + switch msg := conn.statusExchange(t, s.chain, status).(type) { + case *Status: + t.Logf("%+v\n", msg) + default: + t.Fatalf("expected status, got: %#v ", msg) + } + // wait for disconnect + switch msg := conn.ReadAndServe(s.chain, timeout).(type) { + case *Disconnect: + case *Error: + return + default: + t.Fatalf("expected disconnect, got: %s", pretty.Sdump(msg)) + } +} + +func (s *Suite) TestTransaction_66(t *utesting.T) { + tests := []*types.Transaction{ + getNextTxFromChain(t, s), + unknownTx(t, s), + } + for i, tx := range tests { + t.Logf("Testing tx propagation: %v\n", i) + sendSuccessfulTx66(t, s, tx) + } +} + +func (s *Suite) TestMaliciousTx_66(t *utesting.T) { + tests := []*types.Transaction{ + getOldTxFromChain(t, s), + invalidNonceTx(t, s), + hugeAmount(t, s), + hugeGasPrice(t, s), + hugeData(t, s), + } + for i, tx := range tests { + t.Logf("Testing malicious tx propagation: %v\n", i) + sendFailingTx66(t, s, tx) + } +} + +// TestZeroRequestID_66 checks that a request ID of zero is still handled +// by the node. +func (s *Suite) TestZeroRequestID_66(t *utesting.T) { + conn := s.setupConnection66(t) + req := ð.GetBlockHeadersPacket66{ + RequestId: 0, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Number: 0, + }, + Amount: 2, + }, + } + headersMatch(t, s.chain, s.getBlockHeaders66(t, conn, req, req.RequestId)) +} + +// TestSameRequestID_66 sends two requests with the same request ID +// concurrently to a single node. +func (s *Suite) TestSameRequestID_66(t *utesting.T) { + conn := s.setupConnection66(t) + // create two separate requests with same ID + reqID := uint64(1234) + req1 := ð.GetBlockHeadersPacket66{ + RequestId: reqID, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Number: 0, + }, + Amount: 2, + }, + } + req2 := ð.GetBlockHeadersPacket66{ + RequestId: reqID, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Number: 33, + }, + Amount: 2, + }, + } + // send requests concurrently + go func() { + headersMatch(t, s.chain, s.getBlockHeaders66(t, conn, req2, reqID)) + }() + // check response from first request + headersMatch(t, s.chain, s.getBlockHeaders66(t, conn, req1, reqID)) +} diff --git a/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go b/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go new file mode 100644 index 0000000000..b7fa1dce26 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/eth66_suiteHelpers.go @@ -0,0 +1,270 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "fmt" + "time" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/assert" +) + +func (c *Conn) statusExchange66(t *utesting.T, chain *Chain) Message { + status := &Status{ + ProtocolVersion: uint32(66), + NetworkID: chain.chainConfig.ChainID.Uint64(), + TD: chain.TD(chain.Len()), + Head: chain.blocks[chain.Len()-1].Hash(), + Genesis: chain.blocks[0].Hash(), + ForkID: chain.ForkID(), + } + return c.statusExchange(t, chain, status) +} + +func (s *Suite) dial66(t *utesting.T) *Conn { + conn, err := s.dial() + if err != nil { + t.Fatalf("could not dial: %v", err) + } + conn.caps = append(conn.caps, p2p.Cap{Name: "eth", Version: 66}) + return conn +} + +func (c *Conn) write66(req eth.Packet, code int) error { + payload, err := rlp.EncodeToBytes(req) + if err != nil { + return err + } + _, err = c.Conn.Write(uint64(code), payload) + return err +} + +func (c *Conn) read66() (uint64, Message) { + code, rawData, _, err := c.Conn.Read() + if err != nil { + return 0, errorf("could not read from connection: %v", err) + } + + var msg Message + + switch int(code) { + case (Hello{}).Code(): + msg = new(Hello) + + case (Ping{}).Code(): + msg = new(Ping) + case (Pong{}).Code(): + msg = new(Pong) + case (Disconnect{}).Code(): + msg = new(Disconnect) + case (Status{}).Code(): + msg = new(Status) + case (GetBlockHeaders{}).Code(): + ethMsg := new(eth.GetBlockHeadersPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, GetBlockHeaders(*ethMsg.GetBlockHeadersPacket) + case (BlockHeaders{}).Code(): + ethMsg := new(eth.BlockHeadersPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, BlockHeaders(ethMsg.BlockHeadersPacket) + case (GetBlockBodies{}).Code(): + ethMsg := new(eth.GetBlockBodiesPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, GetBlockBodies(ethMsg.GetBlockBodiesPacket) + case (BlockBodies{}).Code(): + ethMsg := new(eth.BlockBodiesPacket66) + if err := rlp.DecodeBytes(rawData, ethMsg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return ethMsg.RequestId, BlockBodies(ethMsg.BlockBodiesPacket) + case (NewBlock{}).Code(): + msg = new(NewBlock) + case (NewBlockHashes{}).Code(): + msg = new(NewBlockHashes) + case (Transactions{}).Code(): + msg = new(Transactions) + case (NewPooledTransactionHashes{}).Code(): + msg = new(NewPooledTransactionHashes) + default: + msg = errorf("invalid message code: %d", code) + } + + if msg != nil { + if err := rlp.DecodeBytes(rawData, msg); err != nil { + return 0, errorf("could not rlp decode message: %v", err) + } + return 0, msg + } + return 0, errorf("invalid message: %s", string(rawData)) +} + +// ReadAndServe serves GetBlockHeaders requests while waiting +// on another message from the node. +func (c *Conn) readAndServe66(chain *Chain, timeout time.Duration) (uint64, Message) { + start := time.Now() + for time.Since(start) < timeout { + timeout := time.Now().Add(10 * time.Second) + c.SetReadDeadline(timeout) + + reqID, msg := c.read66() + + switch msg := msg.(type) { + case *Ping: + c.Write(&Pong{}) + case *GetBlockHeaders: + headers, err := chain.GetHeaders(*msg) + if err != nil { + return 0, errorf("could not get headers for inbound header request: %v", err) + } + + if err := c.Write(headers); err != nil { + return 0, errorf("could not write to connection: %v", err) + } + default: + return reqID, msg + } + } + return 0, errorf("no message received within %v", timeout) +} + +func (s *Suite) setupConnection66(t *utesting.T) *Conn { + // create conn + sendConn := s.dial66(t) + sendConn.handshake(t) + sendConn.statusExchange66(t, s.chain) + return sendConn +} + +func (s *Suite) testAnnounce66(t *utesting.T, sendConn, receiveConn *Conn, blockAnnouncement *NewBlock) { + // Announce the block. + if err := sendConn.Write(blockAnnouncement); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + s.waitAnnounce66(t, receiveConn, blockAnnouncement) +} + +func (s *Suite) waitAnnounce66(t *utesting.T, conn *Conn, blockAnnouncement *NewBlock) { + timeout := 20 * time.Second + _, msg := conn.readAndServe66(s.chain, timeout) + switch msg := msg.(type) { + case *NewBlock: + t.Logf("received NewBlock message: %s", pretty.Sdump(msg.Block)) + assert.Equal(t, + blockAnnouncement.Block.Header(), msg.Block.Header(), + "wrong block header in announcement", + ) + assert.Equal(t, + blockAnnouncement.TD, msg.TD, + "wrong TD in announcement", + ) + case *NewBlockHashes: + blockHashes := *msg + t.Logf("received NewBlockHashes message: %s", pretty.Sdump(blockHashes)) + assert.Equal(t, blockAnnouncement.Block.Hash(), blockHashes[0].Hash, + "wrong block hash in announcement", + ) + default: + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } +} + +// waitForBlock66 waits for confirmation from the client that it has +// imported the given block. +func (c *Conn) waitForBlock66(block *types.Block) error { + defer c.SetReadDeadline(time.Time{}) + + timeout := time.Now().Add(20 * time.Second) + c.SetReadDeadline(timeout) + for { + req := eth.GetBlockHeadersPacket66{ + RequestId: 54, + GetBlockHeadersPacket: ð.GetBlockHeadersPacket{ + Origin: eth.HashOrNumber{ + Hash: block.Hash(), + }, + Amount: 1, + }, + } + if err := c.write66(req, GetBlockHeaders{}.Code()); err != nil { + return err + } + + reqID, msg := c.read66() + // check message + switch msg := msg.(type) { + case BlockHeaders: + // check request ID + if reqID != req.RequestId { + return fmt.Errorf("request ID mismatch: wanted %d, got %d", req.RequestId, reqID) + } + if len(msg) > 0 { + return nil + } + time.Sleep(100 * time.Millisecond) + default: + return fmt.Errorf("invalid message: %s", pretty.Sdump(msg)) + } + } +} + +func sendSuccessfulTx66(t *utesting.T, s *Suite, tx *types.Transaction) { + sendConn := s.setupConnection66(t) + sendSuccessfulTxWithConn(t, s, tx, sendConn) +} + +func sendFailingTx66(t *utesting.T, s *Suite, tx *types.Transaction) { + sendConn, recvConn := s.setupConnection66(t), s.setupConnection66(t) + sendFailingTxWithConns(t, s, tx, sendConn, recvConn) +} + +func (s *Suite) getBlockHeaders66(t *utesting.T, conn *Conn, req eth.Packet, expectedID uint64) BlockHeaders { + if err := conn.write66(req, GetBlockHeaders{}.Code()); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // check block headers response + reqID, msg := conn.readAndServe66(s.chain, timeout) + + switch msg := msg.(type) { + case BlockHeaders: + if reqID != expectedID { + t.Fatalf("request ID mismatch: wanted %d, got %d", expectedID, reqID) + } + return msg + default: + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + return nil + } +} + +func headersMatch(t *utesting.T, chain *Chain, headers BlockHeaders) { + for _, header := range headers { + num := header.Number.Uint64() + t.Logf("received header (%d): %s", num, pretty.Sdump(header.Hash())) + assert.Equal(t, chain.blocks[int(num)].Header(), header) + } +} diff --git a/cmd/devp2p/internal/ethtest/large.go b/cmd/devp2p/internal/ethtest/large.go new file mode 100644 index 0000000000..deca00be53 --- /dev/null +++ b/cmd/devp2p/internal/ethtest/large.go @@ -0,0 +1,80 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "crypto/rand" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" +) + +// largeNumber returns a very large big.Int. +func largeNumber(megabytes int) *big.Int { + buf := make([]byte, megabytes*1024*1024) + rand.Read(buf) + bigint := new(big.Int) + bigint.SetBytes(buf) + return bigint +} + +// largeBuffer returns a very large buffer. +func largeBuffer(megabytes int) []byte { + buf := make([]byte, megabytes*1024*1024) + rand.Read(buf) + return buf +} + +// largeString returns a very large string. +func largeString(megabytes int) string { + buf := make([]byte, megabytes*1024*1024) + rand.Read(buf) + return hexutil.Encode(buf) +} + +func largeBlock() *types.Block { + return types.NewBlockWithHeader(largeHeader()) +} + +// Returns a random hash +func randHash() common.Hash { + var h common.Hash + rand.Read(h[:]) + return h +} + +func largeHeader() *types.Header { + return &types.Header{ + MixDigest: randHash(), + ReceiptHash: randHash(), + TxHash: randHash(), + Nonce: types.BlockNonce{}, + Extra: []byte{}, + Bloom: types.Bloom{}, + GasUsed: 0, + Coinbase: common.Address{}, + GasLimit: 0, + UncleHash: randHash(), + Time: 1337, + ParentHash: randHash(), + Root: randHash(), + Number: largeNumber(2), + Difficulty: largeNumber(2), + } +} diff --git a/cmd/devp2p/internal/ethtest/suite.go b/cmd/devp2p/internal/ethtest/suite.go index d5928bede4..48010b90dd 100644 --- a/cmd/devp2p/internal/ethtest/suite.go +++ b/cmd/devp2p/internal/ethtest/suite.go @@ -22,8 +22,11 @@ import ( "time" "github.com/davecgh/go-spew/spew" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/internal/utesting" + "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/rlpx" "github.com/stretchr/testify/assert" @@ -36,6 +39,8 @@ var pretty = spew.ConfigState{ SortKeys: true, } +var timeout = 20 * time.Second + // Suite represents a structure used to test the eth // protocol of a node(s). type Suite struct { @@ -48,24 +53,47 @@ type Suite struct { // NewSuite creates and returns a new eth-test suite that can // be used to test the given node against the given blockchain // data. -func NewSuite(dest *enode.Node, chainfile string, genesisfile string) *Suite { +func NewSuite(dest *enode.Node, chainfile string, genesisfile string) (*Suite, error) { chain, err := loadChain(chainfile, genesisfile) if err != nil { - panic(err) + return nil, err } return &Suite{ Dest: dest, chain: chain.Shorten(1000), fullChain: chain, - } + }, nil } -func (s *Suite) AllTests() []utesting.Test { +func (s *Suite) EthTests() []utesting.Test { return []utesting.Test{ + // status {Name: "Status", Fn: s.TestStatus}, + {Name: "Status_66", Fn: s.TestStatus_66}, + // get block headers {Name: "GetBlockHeaders", Fn: s.TestGetBlockHeaders}, - {Name: "Broadcast", Fn: s.TestBroadcast}, + {Name: "GetBlockHeaders_66", Fn: s.TestGetBlockHeaders_66}, + {Name: "TestSimultaneousRequests_66", Fn: s.TestSimultaneousRequests_66}, + {Name: "TestSameRequestID_66", Fn: s.TestSameRequestID_66}, + {Name: "TestZeroRequestID_66", Fn: s.TestZeroRequestID_66}, + // get block bodies {Name: "GetBlockBodies", Fn: s.TestGetBlockBodies}, + {Name: "GetBlockBodies_66", Fn: s.TestGetBlockBodies_66}, + // broadcast + {Name: "Broadcast", Fn: s.TestBroadcast}, + {Name: "Broadcast_66", Fn: s.TestBroadcast_66}, + {Name: "TestLargeAnnounce", Fn: s.TestLargeAnnounce}, + {Name: "TestLargeAnnounce_66", Fn: s.TestLargeAnnounce_66}, + // malicious handshakes + status + {Name: "TestMaliciousHandshake", Fn: s.TestMaliciousHandshake}, + {Name: "TestMaliciousStatus", Fn: s.TestMaliciousStatus}, + {Name: "TestMaliciousHandshake_66", Fn: s.TestMaliciousHandshake_66}, + {Name: "TestMaliciousStatus_66", Fn: s.TestMaliciousStatus}, + // test transactions + {Name: "TestTransactions", Fn: s.TestTransaction}, + {Name: "TestTransactions_66", Fn: s.TestTransaction_66}, + {Name: "TestMaliciousTransactions", Fn: s.TestMaliciousTx}, + {Name: "TestMaliciousTransactions_66", Fn: s.TestMaliciousTx_66}, } } @@ -80,7 +108,7 @@ func (s *Suite) TestStatus(t *utesting.T) { // get protoHandshake conn.handshake(t) // get status - switch msg := conn.statusExchange(t, s.chain).(type) { + switch msg := conn.statusExchange(t, s.chain, nil).(type) { case *Status: t.Logf("got status message: %s", pretty.Sdump(msg)) default: @@ -88,6 +116,39 @@ func (s *Suite) TestStatus(t *utesting.T) { } } +// TestMaliciousStatus sends a status package with a large total difficulty. +func (s *Suite) TestMaliciousStatus(t *utesting.T) { + conn, err := s.dial() + if err != nil { + t.Fatalf("could not dial: %v", err) + } + // get protoHandshake + conn.handshake(t) + status := &Status{ + ProtocolVersion: uint32(conn.ethProtocolVersion), + NetworkID: s.chain.chainConfig.ChainID.Uint64(), + TD: largeNumber(2), + Head: s.chain.blocks[s.chain.Len()-1].Hash(), + Genesis: s.chain.blocks[0].Hash(), + ForkID: s.chain.ForkID(), + } + // get status + switch msg := conn.statusExchange(t, s.chain, status).(type) { + case *Status: + t.Logf("%+v\n", msg) + default: + t.Fatalf("expected status, got: %#v ", msg) + } + // wait for disconnect + switch msg := conn.ReadAndServe(s.chain, timeout).(type) { + case *Disconnect: + case *Error: + return + default: + t.Fatalf("expected disconnect, got: %s", pretty.Sdump(msg)) + } +} + // TestGetBlockHeaders tests whether the given node can respond to // a `GetBlockHeaders` request and that the response is accurate. func (s *Suite) TestGetBlockHeaders(t *utesting.T) { @@ -97,11 +158,11 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) { } conn.handshake(t) - conn.statusExchange(t, s.chain) + conn.statusExchange(t, s.chain, nil) // get block headers req := &GetBlockHeaders{ - Origin: hashOrNumber{ + Origin: eth.HashOrNumber{ Hash: s.chain.blocks[1].Hash(), }, Amount: 2, @@ -113,13 +174,12 @@ func (s *Suite) TestGetBlockHeaders(t *utesting.T) { t.Fatalf("could not write to connection: %v", err) } - timeout := 20 * time.Second switch msg := conn.ReadAndServe(s.chain, timeout).(type) { case *BlockHeaders: - headers := msg - for _, header := range *headers { + headers := *msg + for _, header := range headers { num := header.Number.Uint64() - t.Logf("received header (%d): %s", num, pretty.Sdump(header)) + t.Logf("received header (%d): %s", num, pretty.Sdump(header.Hash())) assert.Equal(t, s.chain.blocks[int(num)].Header(), header) } default: @@ -136,14 +196,16 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { } conn.handshake(t) - conn.statusExchange(t, s.chain) + conn.statusExchange(t, s.chain, nil) // create block bodies request - req := &GetBlockBodies{s.chain.blocks[54].Hash(), s.chain.blocks[75].Hash()} + req := &GetBlockBodies{ + s.chain.blocks[54].Hash(), + s.chain.blocks[75].Hash(), + } if err := conn.Write(req); err != nil { t.Fatalf("could not write to connection: %v", err) } - timeout := 20 * time.Second switch msg := conn.ReadAndServe(s.chain, timeout).(type) { case *BlockBodies: t.Logf("received %d block bodies", len(*msg)) @@ -155,34 +217,157 @@ func (s *Suite) TestGetBlockBodies(t *utesting.T) { // TestBroadcast tests whether a block announcement is correctly // propagated to the given node's peer(s). func (s *Suite) TestBroadcast(t *utesting.T) { - // create conn to send block announcement - sendConn, err := s.dial() - if err != nil { - t.Fatalf("could not dial: %v", err) + sendConn, receiveConn := s.setupConnection(t), s.setupConnection(t) + nextBlock := len(s.chain.blocks) + blockAnnouncement := &NewBlock{ + Block: s.fullChain.blocks[nextBlock], + TD: s.fullChain.TD(nextBlock + 1), + } + s.testAnnounce(t, sendConn, receiveConn, blockAnnouncement) + // update test suite chain + s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock]) + // wait for client to update its chain + if err := receiveConn.waitForBlock(s.chain.Head()); err != nil { + t.Fatal(err) } - // create conn to receive block announcement - receiveConn, err := s.dial() +} + +// TestMaliciousHandshake tries to send malicious data during the handshake. +func (s *Suite) TestMaliciousHandshake(t *utesting.T) { + conn, err := s.dial() if err != nil { t.Fatalf("could not dial: %v", err) } + // write hello to client + pub0 := crypto.FromECDSAPub(&conn.ourKey.PublicKey)[1:] + handshakes := []*Hello{ + { + Version: 5, + Caps: []p2p.Cap{ + {Name: largeString(2), Version: 64}, + }, + ID: pub0, + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: append(pub0, byte(0)), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: append(pub0, pub0...), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + }, + ID: largeBuffer(2), + }, + { + Version: 5, + Caps: []p2p.Cap{ + {Name: largeString(2), Version: 64}, + }, + ID: largeBuffer(2), + }, + } + for i, handshake := range handshakes { + t.Logf("Testing malicious handshake %v\n", i) + // Init the handshake + if err := conn.Write(handshake); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // check that the peer disconnected + timeout := 20 * time.Second + // Discard one hello + for i := 0; i < 2; i++ { + switch msg := conn.ReadAndServe(s.chain, timeout).(type) { + case *Disconnect: + case *Error: + case *Hello: + // Hello's are send concurrently, so ignore them + continue + default: + t.Fatalf("unexpected: %s", pretty.Sdump(msg)) + } + } + // Dial for the next round + conn, err = s.dial() + if err != nil { + t.Fatalf("could not dial: %v", err) + } + } +} - sendConn.handshake(t) - receiveConn.handshake(t) - - sendConn.statusExchange(t, s.chain) - receiveConn.statusExchange(t, s.chain) +// TestLargeAnnounce tests the announcement mechanism with a large block. +func (s *Suite) TestLargeAnnounce(t *utesting.T) { + nextBlock := len(s.chain.blocks) + blocks := []*NewBlock{ + { + Block: largeBlock(), + TD: s.fullChain.TD(nextBlock + 1), + }, + { + Block: s.fullChain.blocks[nextBlock], + TD: largeNumber(2), + }, + { + Block: largeBlock(), + TD: largeNumber(2), + }, + { + Block: s.fullChain.blocks[nextBlock], + TD: s.fullChain.TD(nextBlock + 1), + }, + } - // sendConn sends the block announcement - blockAnnouncement := &NewBlock{ - Block: s.fullChain.blocks[1000], - TD: s.fullChain.TD(1001), + for i, blockAnnouncement := range blocks[0:3] { + t.Logf("Testing malicious announcement: %v\n", i) + sendConn := s.setupConnection(t) + if err := sendConn.Write(blockAnnouncement); err != nil { + t.Fatalf("could not write to connection: %v", err) + } + // Invalid announcement, check that peer disconnected + switch msg := sendConn.ReadAndServe(s.chain, timeout).(type) { + case *Disconnect: + case *Error: + break + default: + t.Fatalf("unexpected: %s wanted disconnect", pretty.Sdump(msg)) + } + } + // Test the last block as a valid block + sendConn := s.setupConnection(t) + receiveConn := s.setupConnection(t) + s.testAnnounce(t, sendConn, receiveConn, blocks[3]) + // update test suite chain + s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[nextBlock]) + // wait for client to update its chain + if err := receiveConn.waitForBlock(s.fullChain.blocks[nextBlock]); err != nil { + t.Fatal(err) } +} + +func (s *Suite) testAnnounce(t *utesting.T, sendConn, receiveConn *Conn, blockAnnouncement *NewBlock) { + // Announce the block. if err := sendConn.Write(blockAnnouncement); err != nil { t.Fatalf("could not write to connection: %v", err) } + s.waitAnnounce(t, receiveConn, blockAnnouncement) +} +func (s *Suite) waitAnnounce(t *utesting.T, conn *Conn, blockAnnouncement *NewBlock) { timeout := 20 * time.Second - switch msg := receiveConn.ReadAndServe(s.chain, timeout).(type) { + switch msg := conn.ReadAndServe(s.chain, timeout).(type) { case *NewBlock: t.Logf("received NewBlock message: %s", pretty.Sdump(msg.Block)) assert.Equal(t, @@ -194,40 +379,72 @@ func (s *Suite) TestBroadcast(t *utesting.T) { "wrong TD in announcement", ) case *NewBlockHashes: - hashes := *msg - t.Logf("received NewBlockHashes message: %s", pretty.Sdump(hashes)) - assert.Equal(t, - blockAnnouncement.Block.Hash(), hashes[0].Hash, + message := *msg + t.Logf("received NewBlockHashes message: %s", pretty.Sdump(message)) + assert.Equal(t, blockAnnouncement.Block.Hash(), message[0].Hash, "wrong block hash in announcement", ) default: t.Fatalf("unexpected: %s", pretty.Sdump(msg)) } - // update test suite chain - s.chain.blocks = append(s.chain.blocks, s.fullChain.blocks[1000]) - // wait for client to update its chain - if err := receiveConn.waitForBlock(s.chain.Head()); err != nil { - t.Fatal(err) +} + +func (s *Suite) setupConnection(t *utesting.T) *Conn { + // create conn + sendConn, err := s.dial() + if err != nil { + t.Fatalf("could not dial: %v", err) } + sendConn.handshake(t) + sendConn.statusExchange(t, s.chain, nil) + return sendConn } // dial attempts to dial the given node and perform a handshake, // returning the created Conn if successful. func (s *Suite) dial() (*Conn, error) { var conn Conn - + // dial fd, err := net.Dial("tcp", fmt.Sprintf("%v:%d", s.Dest.IP(), s.Dest.TCP())) if err != nil { return nil, err } conn.Conn = rlpx.NewConn(fd, s.Dest.Pubkey()) - // do encHandshake conn.ourKey, _ = crypto.GenerateKey() _, err = conn.Handshake(conn.ourKey) if err != nil { return nil, err } - + // set default p2p capabilities + conn.caps = []p2p.Cap{ + {Name: "eth", Version: 64}, + {Name: "eth", Version: 65}, + } return &conn, nil } + +func (s *Suite) TestTransaction(t *utesting.T) { + tests := []*types.Transaction{ + getNextTxFromChain(t, s), + unknownTx(t, s), + } + for i, tx := range tests { + t.Logf("Testing tx propagation: %v\n", i) + sendSuccessfulTx(t, s, tx) + } +} + +func (s *Suite) TestMaliciousTx(t *utesting.T) { + tests := []*types.Transaction{ + getOldTxFromChain(t, s), + invalidNonceTx(t, s), + hugeAmount(t, s), + hugeGasPrice(t, s), + hugeData(t, s), + } + for i, tx := range tests { + t.Logf("Testing malicious tx propagation: %v\n", i) + sendFailingTx(t, s, tx) + } +} diff --git a/cmd/devp2p/internal/ethtest/testdata/chain.rlp b/cmd/devp2p/internal/ethtest/testdata/chain.rlp new file mode 100644 index 0000000000..5ebc2f3bb7 Binary files /dev/null and b/cmd/devp2p/internal/ethtest/testdata/chain.rlp differ diff --git a/cmd/devp2p/internal/ethtest/testdata/fullchain.rlp.gz b/cmd/devp2p/internal/ethtest/testdata/fullchain.rlp.gz deleted file mode 100644 index 50f52eafa2..0000000000 Binary files a/cmd/devp2p/internal/ethtest/testdata/fullchain.rlp.gz and /dev/null differ diff --git a/cmd/devp2p/internal/ethtest/testdata/genesis.json b/cmd/devp2p/internal/ethtest/testdata/genesis.json index ed78488b67..d8b5d22502 100644 --- a/cmd/devp2p/internal/ethtest/testdata/genesis.json +++ b/cmd/devp2p/internal/ethtest/testdata/genesis.json @@ -17,7 +17,7 @@ "coinbase": "0x0000000000000000000000000000000000000000", "alloc": { "71562b71999873db5b286df957af199ec94617f7": { - "balance": "0xffffffff" + "balance": "0xffffffffffffffffffffffffff" } }, "number": "0x0", diff --git a/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp b/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp new file mode 100644 index 0000000000..1a820734e1 Binary files /dev/null and b/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp differ diff --git a/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp.gz b/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp.gz deleted file mode 100644 index 82d5271361..0000000000 Binary files a/cmd/devp2p/internal/ethtest/testdata/halfchain.rlp.gz and /dev/null differ diff --git a/cmd/devp2p/internal/ethtest/transaction.go b/cmd/devp2p/internal/ethtest/transaction.go new file mode 100644 index 0000000000..21aa221e8b --- /dev/null +++ b/cmd/devp2p/internal/ethtest/transaction.go @@ -0,0 +1,189 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethtest + +import ( + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/internal/utesting" +) + +//var faucetAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") +var faucetKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + +func sendSuccessfulTx(t *utesting.T, s *Suite, tx *types.Transaction) { + sendConn := s.setupConnection(t) + sendSuccessfulTxWithConn(t, s, tx, sendConn) +} + +func sendSuccessfulTxWithConn(t *utesting.T, s *Suite, tx *types.Transaction, sendConn *Conn) { + t.Logf("sending tx: %v %v %v\n", tx.Hash().String(), tx.GasPrice(), tx.Gas()) + // Send the transaction + if err := sendConn.Write(&Transactions{tx}); err != nil { + t.Fatal(err) + } + time.Sleep(100 * time.Millisecond) + recvConn := s.setupConnection(t) + // Wait for the transaction announcement + switch msg := recvConn.ReadAndServe(s.chain, timeout).(type) { + case *Transactions: + recTxs := *msg + for _, gotTx := range recTxs { + if gotTx.Hash() == tx.Hash() { + // Ok + return + } + } + t.Fatalf("missing transaction: got %v missing %v", recTxs, tx.Hash()) + case *NewPooledTransactionHashes: + txHashes := *msg + for _, gotHash := range txHashes { + if gotHash == tx.Hash() { + return + } + } + t.Fatalf("missing transaction announcement: got %v missing %v", txHashes, tx.Hash()) + default: + t.Fatalf("unexpected message in sendSuccessfulTx: %s", pretty.Sdump(msg)) + } +} + +func sendFailingTx(t *utesting.T, s *Suite, tx *types.Transaction) { + sendConn, recvConn := s.setupConnection(t), s.setupConnection(t) + sendFailingTxWithConns(t, s, tx, sendConn, recvConn) +} + +func sendFailingTxWithConns(t *utesting.T, s *Suite, tx *types.Transaction, sendConn, recvConn *Conn) { + // Wait for a transaction announcement + switch msg := recvConn.ReadAndServe(s.chain, timeout).(type) { + case *NewPooledTransactionHashes: + break + default: + t.Logf("unexpected message, logging: %v", pretty.Sdump(msg)) + } + // Send the transaction + if err := sendConn.Write(&Transactions{tx}); err != nil { + t.Fatal(err) + } + // Wait for another transaction announcement + switch msg := recvConn.ReadAndServe(s.chain, timeout).(type) { + case *Transactions: + t.Fatalf("Received unexpected transaction announcement: %v", msg) + case *NewPooledTransactionHashes: + t.Fatalf("Received unexpected pooledTx announcement: %v", msg) + case *Error: + // Transaction should not be announced -> wait for timeout + return + default: + t.Fatalf("unexpected message in sendFailingTx: %s", pretty.Sdump(msg)) + } +} + +func unknownTx(t *utesting.T, s *Suite) *types.Transaction { + tx := getNextTxFromChain(t, s) + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce()+1, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data()) + return signWithFaucet(t, txNew) +} + +func getNextTxFromChain(t *utesting.T, s *Suite) *types.Transaction { + // Get a new transaction + var tx *types.Transaction + for _, blocks := range s.fullChain.blocks[s.chain.Len():] { + txs := blocks.Transactions() + if txs.Len() != 0 { + tx = txs[0] + break + } + } + if tx == nil { + t.Fatal("could not find transaction") + } + return tx +} + +func getOldTxFromChain(t *utesting.T, s *Suite) *types.Transaction { + var tx *types.Transaction + for _, blocks := range s.fullChain.blocks[:s.chain.Len()-1] { + txs := blocks.Transactions() + if txs.Len() != 0 { + tx = txs[0] + break + } + } + if tx == nil { + t.Fatal("could not find transaction") + } + return tx +} + +func invalidNonceTx(t *utesting.T, s *Suite) *types.Transaction { + tx := getNextTxFromChain(t, s) + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce()-2, to, tx.Value(), tx.Gas(), tx.GasPrice(), tx.Data()) + return signWithFaucet(t, txNew) +} + +func hugeAmount(t *utesting.T, s *Suite) *types.Transaction { + tx := getNextTxFromChain(t, s) + amount := largeNumber(2) + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce(), to, amount, tx.Gas(), tx.GasPrice(), tx.Data()) + return signWithFaucet(t, txNew) +} + +func hugeGasPrice(t *utesting.T, s *Suite) *types.Transaction { + tx := getNextTxFromChain(t, s) + gasPrice := largeNumber(2) + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), gasPrice, tx.Data()) + return signWithFaucet(t, txNew) +} + +func hugeData(t *utesting.T, s *Suite) *types.Transaction { + tx := getNextTxFromChain(t, s) + var to common.Address + if tx.To() != nil { + to = *tx.To() + } + txNew := types.NewTransaction(tx.Nonce(), to, tx.Value(), tx.Gas(), tx.GasPrice(), largeBuffer(2)) + return signWithFaucet(t, txNew) +} + +func signWithFaucet(t *utesting.T, tx *types.Transaction) *types.Transaction { + signer := types.HomesteadSigner{} + signedTx, err := types.SignTx(tx, signer, faucetKey) + if err != nil { + t.Fatalf("could not sign tx: %v\n", err) + } + return signedTx +} diff --git a/cmd/devp2p/internal/ethtest/types.go b/cmd/devp2p/internal/ethtest/types.go index 69367cb6cd..734adff366 100644 --- a/cmd/devp2p/internal/ethtest/types.go +++ b/cmd/devp2p/internal/ethtest/types.go @@ -19,15 +19,12 @@ package ethtest import ( "crypto/ecdsa" "fmt" - "io" - "math/big" "reflect" "time" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/internal/utesting" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/rlpx" @@ -81,98 +78,54 @@ type Pong struct{} func (p Pong) Code() int { return 0x03 } // Status is the network packet for the status message for eth/64 and later. -type Status struct { - ProtocolVersion uint32 - NetworkID uint64 - TD *big.Int - Head common.Hash - Genesis common.Hash - ForkID forkid.ID -} +type Status eth.StatusPacket func (s Status) Code() int { return 16 } // NewBlockHashes is the network packet for the block announcements. -type NewBlockHashes []struct { - Hash common.Hash // Hash of one particular block being announced - Number uint64 // Number of one particular block being announced -} +type NewBlockHashes eth.NewBlockHashesPacket func (nbh NewBlockHashes) Code() int { return 17 } -// NewBlock is the network packet for the block propagation message. -type NewBlock struct { - Block *types.Block - TD *big.Int -} +type Transactions eth.TransactionsPacket -func (nb NewBlock) Code() int { return 23 } +func (t Transactions) Code() int { return 18 } // GetBlockHeaders represents a block header query. -type GetBlockHeaders struct { - Origin hashOrNumber // Block from which to retrieve headers - Amount uint64 // Maximum number of headers to retrieve - Skip uint64 // Blocks to skip between consecutive headers - Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) -} +type GetBlockHeaders eth.GetBlockHeadersPacket func (g GetBlockHeaders) Code() int { return 19 } -type BlockHeaders []*types.Header +type BlockHeaders eth.BlockHeadersPacket func (bh BlockHeaders) Code() int { return 20 } -// HashOrNumber is a combined field for specifying an origin block. -type hashOrNumber struct { - Hash common.Hash // Block hash from which to retrieve headers (excludes Number) - Number uint64 // Block hash from which to retrieve headers (excludes Hash) -} - -// EncodeRLP is a specialized encoder for hashOrNumber to encode only one of the -// two contained union fields. -func (hn *hashOrNumber) EncodeRLP(w io.Writer) error { - if hn.Hash == (common.Hash{}) { - return rlp.Encode(w, hn.Number) - } - if hn.Number != 0 { - return fmt.Errorf("both origin hash (%x) and number (%d) provided", hn.Hash, hn.Number) - } - return rlp.Encode(w, hn.Hash) -} - -// DecodeRLP is a specialized decoder for hashOrNumber to decode the contents -// into either a block hash or a block number. -func (hn *hashOrNumber) DecodeRLP(s *rlp.Stream) error { - _, size, _ := s.Kind() - origin, err := s.Raw() - if err == nil { - switch { - case size == 32: - err = rlp.DecodeBytes(origin, &hn.Hash) - case size <= 8: - err = rlp.DecodeBytes(origin, &hn.Number) - default: - err = fmt.Errorf("invalid input size %d for origin", size) - } - } - return err -} - // GetBlockBodies represents a GetBlockBodies request -type GetBlockBodies []common.Hash +type GetBlockBodies eth.GetBlockBodiesPacket func (gbb GetBlockBodies) Code() int { return 21 } // BlockBodies is the network packet for block content distribution. -type BlockBodies []*types.Body +type BlockBodies eth.BlockBodiesPacket func (bb BlockBodies) Code() int { return 22 } +// NewBlock is the network packet for the block propagation message. +type NewBlock eth.NewBlockPacket + +func (nb NewBlock) Code() int { return 23 } + +// NewPooledTransactionHashes is the network packet for the tx hash propagation message. +type NewPooledTransactionHashes eth.NewPooledTransactionHashesPacket + +func (nb NewPooledTransactionHashes) Code() int { return 24 } + // Conn represents an individual connection with a peer type Conn struct { *rlpx.Conn ourKey *ecdsa.PrivateKey ethProtocolVersion uint + caps []p2p.Cap } func (c *Conn) Read() Message { @@ -205,10 +158,14 @@ func (c *Conn) Read() Message { msg = new(NewBlock) case (NewBlockHashes{}).Code(): msg = new(NewBlockHashes) + case (Transactions{}).Code(): + msg = new(Transactions) + case (NewPooledTransactionHashes{}).Code(): + msg = new(NewPooledTransactionHashes) default: return errorf("invalid message code: %d", code) } - + // if message is devp2p, decode here if err := rlp.DecodeBytes(rawData, msg); err != nil { return errorf("could not rlp decode message: %v", err) } @@ -243,7 +200,12 @@ func (c *Conn) ReadAndServe(chain *Chain, timeout time.Duration) Message { } func (c *Conn) Write(msg Message) error { - payload, err := rlp.EncodeToBytes(msg) + // check if message is eth protocol message + var ( + payload []byte + err error + ) + payload, err = rlp.EncodeToBytes(msg) if err != nil { return err } @@ -260,11 +222,8 @@ func (c *Conn) handshake(t *utesting.T) Message { pub0 := crypto.FromECDSAPub(&c.ourKey.PublicKey)[1:] ourHandshake := &Hello{ Version: 5, - Caps: []p2p.Cap{ - {Name: "eth", Version: 64}, - {Name: "eth", Version: 65}, - }, - ID: pub0, + Caps: c.caps, + ID: pub0, } if err := c.Write(ourHandshake); err != nil { t.Fatalf("could not write to connection: %v", err) @@ -304,7 +263,7 @@ func (c *Conn) negotiateEthProtocol(caps []p2p.Cap) { // statusExchange performs a `Status` message exchange with the given // node. -func (c *Conn) statusExchange(t *utesting.T, chain *Chain) Message { +func (c *Conn) statusExchange(t *utesting.T, chain *Chain, status *Status) Message { defer c.SetDeadline(time.Time{}) c.SetDeadline(time.Now().Add(20 * time.Second)) @@ -338,15 +297,18 @@ loop: if c.ethProtocolVersion == 0 { t.Fatalf("eth protocol version must be set in Conn") } - // write status message to client - status := Status{ - ProtocolVersion: uint32(c.ethProtocolVersion), - NetworkID: chain.chainConfig.ChainID.Uint64(), - TD: chain.TD(chain.Len()), - Head: chain.blocks[chain.Len()-1].Hash(), - Genesis: chain.blocks[0].Hash(), - ForkID: chain.ForkID(), + if status == nil { + // write status message to client + status = &Status{ + ProtocolVersion: uint32(c.ethProtocolVersion), + NetworkID: chain.chainConfig.ChainID.Uint64(), + TD: chain.TD(chain.Len()), + Head: chain.blocks[chain.Len()-1].Hash(), + Genesis: chain.blocks[0].Hash(), + ForkID: chain.ForkID(), + } } + if err := c.Write(status); err != nil { t.Fatalf("could not write to connection: %v", err) } @@ -362,7 +324,7 @@ func (c *Conn) waitForBlock(block *types.Block) error { timeout := time.Now().Add(20 * time.Second) c.SetReadDeadline(timeout) for { - req := &GetBlockHeaders{Origin: hashOrNumber{Hash: block.Hash()}, Amount: 1} + req := &GetBlockHeaders{Origin: eth.HashOrNumber{Hash: block.Hash()}, Amount: 1} if err := c.Write(req); err != nil { return err } diff --git a/cmd/devp2p/nodesetcmd.go b/cmd/devp2p/nodesetcmd.go index 228d3319e3..ba97405abc 100644 --- a/cmd/devp2p/nodesetcmd.go +++ b/cmd/devp2p/nodesetcmd.go @@ -95,6 +95,7 @@ var filterFlags = map[string]nodeFilterC{ "-min-age": {1, minAgeFilter}, "-eth-network": {1, ethFilter}, "-les-server": {0, lesFilter}, + "-snap": {0, snapFilter}, } func parseFilters(args []string) ([]nodeFilter, error) { @@ -104,15 +105,15 @@ func parseFilters(args []string) ([]nodeFilter, error) { if !ok { return nil, fmt.Errorf("invalid filter %q", args[0]) } - if len(args) < fc.narg { - return nil, fmt.Errorf("filter %q wants %d arguments, have %d", args[0], fc.narg, len(args)) + if len(args)-1 < fc.narg { + return nil, fmt.Errorf("filter %q wants %d arguments, have %d", args[0], fc.narg, len(args)-1) } - filter, err := fc.fn(args[1:]) + filter, err := fc.fn(args[1 : 1+fc.narg]) if err != nil { return nil, fmt.Errorf("%s: %v", args[0], err) } filters = append(filters, filter) - args = args[fc.narg+1:] + args = args[1+fc.narg:] } return filters, nil } @@ -191,3 +192,13 @@ func lesFilter(args []string) (nodeFilter, error) { } return f, nil } + +func snapFilter(args []string) (nodeFilter, error) { + f := func(n nodeJSON) bool { + var snap struct { + _ []rlp.RawValue `rlp:"tail"` + } + return n.N.Load(enr.WithEntry("snap", &snap)) == nil + } + return f, nil +} diff --git a/cmd/devp2p/rlpxcmd.go b/cmd/devp2p/rlpxcmd.go index d90eb4687c..ac92818aa4 100644 --- a/cmd/devp2p/rlpxcmd.go +++ b/cmd/devp2p/rlpxcmd.go @@ -94,6 +94,9 @@ func rlpxEthTest(ctx *cli.Context) error { if ctx.NArg() < 3 { exit("missing path to chain.rlp as command-line argument") } - suite := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2]) - return runTests(ctx, suite.AllTests()) + suite, err := ethtest.NewSuite(getNodeArg(ctx), ctx.Args()[1], ctx.Args()[2]) + if err != nil { + exit(err) + } + return runTests(ctx, suite.EthTests()) } diff --git a/cmd/ethkey/generate.go b/cmd/ethkey/generate.go index c2aa1c6fb4..629d23da5b 100644 --- a/cmd/ethkey/generate.go +++ b/cmd/ethkey/generate.go @@ -26,7 +26,7 @@ import ( "github.com/ethereum/go-ethereum/accounts/keystore" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/crypto" - "github.com/pborman/uuid" + "github.com/google/uuid" "gopkg.in/urfave/cli.v1" ) @@ -86,9 +86,12 @@ If you want to encrypt an existing private key, it can be specified by setting } // Create the keyfile object with a random UUID. - id := uuid.NewRandom() + UUID, err := uuid.NewRandom() + if err != nil { + utils.Fatalf("Failed to generate random uuid: %v", err) + } key := &keystore.Key{ - Id: id, + Id: UUID, Address: crypto.PubkeyToAddress(privateKey.PublicKey), PrivateKey: privateKey, } diff --git a/cmd/evm/README.md b/cmd/evm/README.md index 8f0848bde8..7742ccbbb7 100644 --- a/cmd/evm/README.md +++ b/cmd/evm/README.md @@ -30,7 +30,7 @@ Command line params that has to be supported are --trace.nomemory Disable full memory dump in traces --trace.nostack Disable stack output in traces --trace.noreturndata Disable return data output in traces - --output.basedir value Specifies where output files are placed. Will be created if it does not exist. (default: ".") + --output.basedir value Specifies where output files are placed. Will be created if it does not exist. --output.alloc alloc Determines where to put the alloc of the post-state. `stdout` - into the stdout output `stderr` - into the stderr output @@ -237,10 +237,10 @@ Example where blockhashes are provided: cat trace-0-0x72fadbef39cd251a437eea619cfeda752271a5faaaa2147df012e112159ffb81.jsonl | grep BLOCKHASH -C2 ``` ``` -{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"PUSH1","error":""} -{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"BLOCKHASH","error":""} -{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnStack":[],"returnData":null,"depth":1,"refund":0,"opName":"STOP","error":""} -{"output":"","gasUsed":"0x17","time":112885} +{"pc":0,"op":96,"gas":"0x5f58ef8","gasCost":"0x3","memory":"0x","memSize":0,"stack":[],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"PUSH1","error":""} +{"pc":2,"op":64,"gas":"0x5f58ef5","gasCost":"0x14","memory":"0x","memSize":0,"stack":["0x1"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"BLOCKHASH","error":""} +{"pc":3,"op":0,"gas":"0x5f58ee1","gasCost":"0x0","memory":"0x","memSize":0,"stack":["0xdac58aa524e50956d0c0bae7f3f8bb9d35381365d07804dd5b48a5a297c06af4"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"STOP","error":""} +{"output":"","gasUsed":"0x17","time":142709} ``` In this example, the caller has not provided the required blockhash: @@ -256,9 +256,9 @@ Error code: 4 Another thing that can be done, is to chain invocations: ``` ./evm t8n --input.alloc=./testdata/1/alloc.json --input.txs=./testdata/1/txs.json --input.env=./testdata/1/env.json --output.alloc=stdout | ./evm t8n --input.alloc=stdin --input.env=./testdata/1/env.json --input.txs=./testdata/1/txs.json -INFO [08-03|15:25:15.168] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" -INFO [08-03|15:25:15.169] rejected tx index=0 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" -INFO [08-03|15:25:15.169] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low" +INFO [01-21|22:41:22.963] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" +INFO [01-21|22:41:22.966] rejected tx index=0 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" +INFO [01-21|22:41:22.967] rejected tx index=1 hash="0557ba…18d673" from=0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192 error="nonce too low: address 0x8A8eAFb1cf62BfBeb1741769DAE1a9dd47996192, tx: 0 state: 1" ``` What happened here, is that we first applied two identical transactions, so the second one was rejected. @@ -267,4 +267,3 @@ the same two transactions: this time, both failed due to too low nonce. In order to meaningfully chain invocations, one would need to provide meaningful new `env`, otherwise the actual blocknumber (exposed to the EVM) would not increase. - diff --git a/cmd/evm/compiler.go b/cmd/evm/compiler.go index c019a2fe70..40ad9313c5 100644 --- a/cmd/evm/compiler.go +++ b/cmd/evm/compiler.go @@ -23,7 +23,7 @@ import ( "github.com/ethereum/go-ethereum/cmd/evm/internal/compiler" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var compileCommand = cli.Command{ diff --git a/cmd/evm/disasm.go b/cmd/evm/disasm.go index 5bc743aa88..68a09cbf50 100644 --- a/cmd/evm/disasm.go +++ b/cmd/evm/disasm.go @@ -23,7 +23,7 @@ import ( "strings" "github.com/ethereum/go-ethereum/core/asm" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var disasmCommand = cli.Command{ diff --git a/cmd/evm/internal/t8ntool/execution.go b/cmd/evm/internal/t8ntool/execution.go index d8b93d291a..c3f1b16efc 100644 --- a/cmd/evm/internal/t8ntool/execution.go +++ b/cmd/evm/internal/t8ntool/execution.go @@ -110,7 +110,7 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, txIndex = 0 ) gaspool.AddGas(pre.Env.GasLimit) - vmContext := vm.Context{ + vmContext := vm.BlockContext{ CanTransfer: core.CanTransfer, Transfer: core.Transfer, Coinbase: pre.Env.Coinbase, @@ -119,7 +119,6 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, Difficulty: pre.Env.Difficulty, GasLimit: pre.Env.GasLimit, GetHash: getHash, - // GasPrice and Origin needs to be set per transaction } // If DAO is supported/enabled, we need to handle it here. In geth 'proper', it's // done in StateProcessor.Process(block, ...), right before transactions are applied. @@ -143,21 +142,10 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, vmConfig.Tracer = tracer vmConfig.Debug = (tracer != nil) statedb.Prepare(tx.Hash(), blockHash, txIndex) - vmContext.GasPrice = msg.GasPrice() - vmContext.Origin = msg.From() - - evm := vm.NewEVM(vmContext, statedb, chainConfig, vmConfig) - if chainConfig.IsYoloV2(vmContext.BlockNumber) { - statedb.AddAddressToAccessList(msg.From()) - if dst := msg.To(); dst != nil { - statedb.AddAddressToAccessList(*dst) - // If it's a create-tx, the destination will be added inside evm.create - } - for _, addr := range evm.ActivePrecompiles() { - statedb.AddAddressToAccessList(addr) - } - } + txContext := core.NewEVMTxContext(msg) snapshot := statedb.Snapshot() + evm := vm.NewEVM(vmContext, txContext, statedb, chainConfig, vmConfig) + // (ret []byte, usedGas uint64, failed bool, err error) msgResult, err := core.ApplyMessage(evm, msg, gaspool) if err != nil { @@ -171,7 +159,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, return nil, nil, NewError(ErrorMissingBlockhash, hashError) } gasUsed += msgResult.UsedGas - // Create a new receipt for the transaction, storing the intermediate root and gas used by the tx + + // Receipt: { var root []byte if chainConfig.IsByzantium(vmContext.BlockNumber) { @@ -180,22 +169,32 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, root = statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber)).Bytes() } - receipt := types.NewReceipt(root, msgResult.Failed(), gasUsed) + // Create a new receipt for the transaction, storing the intermediate root and + // gas used by the tx. + receipt := &types.Receipt{Type: tx.Type(), PostState: root, CumulativeGasUsed: gasUsed} + if msgResult.Failed() { + receipt.Status = types.ReceiptStatusFailed + } else { + receipt.Status = types.ReceiptStatusSuccessful + } receipt.TxHash = tx.Hash() receipt.GasUsed = msgResult.UsedGas - // if the transaction created a contract, store the creation address in the receipt. + + // If the transaction created a contract, store the creation address in the receipt. if msg.To() == nil { - receipt.ContractAddress = crypto.CreateAddress(evm.Context.Origin, tx.Nonce()) + receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) } - // Set the receipt logs and create a bloom for filtering + + // Set the receipt logs and create the bloom filter. receipt.Logs = statedb.GetLogs(tx.Hash()) receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) - // These three are non-consensus fields + // These three are non-consensus fields: //receipt.BlockHash - //receipt.BlockNumber = + //receipt.BlockNumber receipt.TransactionIndex = uint(txIndex) receipts = append(receipts, receipt) } + txIndex++ } statedb.IntermediateRoot(chainConfig.IsEIP158(vmContext.BlockNumber)) @@ -231,8 +230,8 @@ func (pre *Prestate) Apply(vmConfig vm.Config, chainConfig *params.ChainConfig, } execRs := &ExecutionResult{ StateRoot: root, - TxRoot: types.DeriveSha(includedTxs, new(trie.Trie)), - ReceiptRoot: types.DeriveSha(receipts, new(trie.Trie)), + TxRoot: types.DeriveSha(includedTxs, trie.NewStackTrie(nil)), + ReceiptRoot: types.DeriveSha(receipts, trie.NewStackTrie(nil)), Bloom: types.CreateBloom(receipts), LogsHash: rlpHash(statedb.Logs()), Receipts: receipts, diff --git a/cmd/evm/internal/t8ntool/flags.go b/cmd/evm/internal/t8ntool/flags.go index 424156ba82..a599462cc6 100644 --- a/cmd/evm/internal/t8ntool/flags.go +++ b/cmd/evm/internal/t8ntool/flags.go @@ -47,6 +47,11 @@ var ( Usage: "Specifies where output files are placed. Will be created if it does not exist.", Value: "", } + OutputBodyFlag = cli.StringFlag{ + Name: "output.body", + Usage: "If set, the RLP of the transactions (block body) will be written to this file.", + Value: "", + } OutputAllocFlag = cli.StringFlag{ Name: "output.alloc", Usage: "Determines where to put the `alloc` of the post-state.\n" + diff --git a/cmd/evm/internal/t8ntool/transition.go b/cmd/evm/internal/t8ntool/transition.go index 5119ed5fb7..fedcd12435 100644 --- a/cmd/evm/internal/t8ntool/transition.go +++ b/cmd/evm/internal/t8ntool/transition.go @@ -17,6 +17,7 @@ package t8ntool import ( + "crypto/ecdsa" "encoding/json" "fmt" "io/ioutil" @@ -25,12 +26,15 @@ import ( "path" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/tests" "gopkg.in/urfave/cli.v1" ) @@ -64,9 +68,9 @@ func (n *NumberedError) Code() int { } type input struct { - Alloc core.GenesisAlloc `json:"alloc,omitempty"` - Env *stEnv `json:"env,omitempty"` - Txs types.Transactions `json:"txs,omitempty"` + Alloc core.GenesisAlloc `json:"alloc,omitempty"` + Env *stEnv `json:"env,omitempty"` + Txs []*txWithKey `json:"txs,omitempty"` } func Main(ctx *cli.Context) error { @@ -135,7 +139,7 @@ func Main(ctx *cli.Context) error { txStr = ctx.String(InputTxsFlag.Name) inputData = &input{} ) - + // Figure out the prestate alloc if allocStr == stdinSelector || envStr == stdinSelector || txStr == stdinSelector { decoder := json.NewDecoder(os.Stdin) decoder.Decode(inputData) @@ -151,7 +155,9 @@ func Main(ctx *cli.Context) error { return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling alloc-file: %v", err)) } } + prestate.Pre = inputData.Alloc + // Set the block environment if envStr != stdinSelector { inFile, err := os.Open(envStr) if err != nil { @@ -165,26 +171,8 @@ func Main(ctx *cli.Context) error { } inputData.Env = &env } - - if txStr != stdinSelector { - inFile, err := os.Open(txStr) - if err != nil { - return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err)) - } - defer inFile.Close() - decoder := json.NewDecoder(inFile) - var txs types.Transactions - if err := decoder.Decode(&txs); err != nil { - return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling txs-file: %v", err)) - } - inputData.Txs = txs - } - - prestate.Pre = inputData.Alloc prestate.Env = *inputData.Env - txs = inputData.Txs - // Iterate over all the tests, run them and aggregate the results vmConfig := vm.Config{ Tracer: tracer, Debug: (tracer != nil), @@ -200,17 +188,105 @@ func Main(ctx *cli.Context) error { // Set the chain id chainConfig.ChainID = big.NewInt(ctx.Int64(ChainIDFlag.Name)) + var txsWithKeys []*txWithKey + if txStr != stdinSelector { + inFile, err := os.Open(txStr) + if err != nil { + return NewError(ErrorIO, fmt.Errorf("failed reading txs file: %v", err)) + } + defer inFile.Close() + decoder := json.NewDecoder(inFile) + if err := decoder.Decode(&txsWithKeys); err != nil { + return NewError(ErrorJson, fmt.Errorf("Failed unmarshaling txs-file: %v", err)) + } + } else { + txsWithKeys = inputData.Txs + } + // We may have to sign the transactions. + signer := types.MakeSigner(chainConfig, big.NewInt(int64(prestate.Env.Number))) + + if txs, err = signUnsignedTransactions(txsWithKeys, signer); err != nil { + return NewError(ErrorJson, fmt.Errorf("Failed signing transactions: %v", err)) + } + + // Iterate over all the tests, run them and aggregate the results + // Run the test and aggregate the result state, result, err := prestate.Apply(vmConfig, chainConfig, txs, ctx.Int64(RewardFlag.Name), getTracer) if err != nil { return err } + body, _ := rlp.EncodeToBytes(txs) // Dump the excution result - //postAlloc := state.DumpGenesisFormat(false, false, false) collector := make(Alloc) state.DumpToCollector(collector, false, false, false, nil, -1) - return dispatchOutput(ctx, baseDir, result, collector) + return dispatchOutput(ctx, baseDir, result, collector, body) + +} +// txWithKey is a helper-struct, to allow us to use the types.Transaction along with +// a `secretKey`-field, for input +type txWithKey struct { + key *ecdsa.PrivateKey + tx *types.Transaction +} + +func (t *txWithKey) UnmarshalJSON(input []byte) error { + // Read the secretKey, if present + type sKey struct { + Key *common.Hash `json:"secretKey"` + } + var key sKey + if err := json.Unmarshal(input, &key); err != nil { + return err + } + if key.Key != nil { + k := key.Key.Hex()[2:] + if ecdsaKey, err := crypto.HexToECDSA(k); err != nil { + return err + } else { + t.key = ecdsaKey + } + } + // Now, read the transaction itself + var tx types.Transaction + if err := json.Unmarshal(input, &tx); err != nil { + return err + } + t.tx = &tx + return nil +} + +// signUnsignedTransactions converts the input txs to canonical transactions. +// +// The transactions can have two forms, either +// 1. unsigned or +// 2. signed +// For (1), r, s, v, need so be zero, and the `secretKey` needs to be set. +// If so, we sign it here and now, with the given `secretKey` +// If the condition above is not met, then it's considered a signed transaction. +// +// To manage this, we read the transactions twice, first trying to read the secretKeys, +// and secondly to read them with the standard tx json format +func signUnsignedTransactions(txs []*txWithKey, signer types.Signer) (types.Transactions, error) { + var signedTxs []*types.Transaction + for i, txWithKey := range txs { + tx := txWithKey.tx + key := txWithKey.key + v, r, s := tx.RawSignatureValues() + if key != nil && v.BitLen()+r.BitLen()+s.BitLen() == 0 { + // This transaction needs to be signed + signed, err := types.SignTx(tx, signer, key) + if err != nil { + return nil, NewError(ErrorJson, fmt.Errorf("Tx %d: failed to sign tx: %v", i, err)) + } + signedTxs = append(signedTxs, signed) + } else { + // Already signed + signedTxs = append(signedTxs, tx) + } + } + return signedTxs, nil } type Alloc map[common.Address]core.GenesisAccount @@ -241,15 +317,17 @@ func saveFile(baseDir, filename string, data interface{}) error { if err != nil { return NewError(ErrorJson, fmt.Errorf("failed marshalling output: %v", err)) } - if err = ioutil.WriteFile(path.Join(baseDir, filename), b, 0644); err != nil { + location := path.Join(baseDir, filename) + if err = ioutil.WriteFile(location, b, 0644); err != nil { return NewError(ErrorIO, fmt.Errorf("failed writing output: %v", err)) } + log.Info("Wrote file", "file", location) return nil } // dispatchOutput writes the output data to either stderr or stdout, or to the specified // files -func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc) error { +func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, alloc Alloc, body hexutil.Bytes) error { stdOutObject := make(map[string]interface{}) stdErrObject := make(map[string]interface{}) dispatch := func(baseDir, fName, name string, obj interface{}) error { @@ -258,6 +336,8 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a stdOutObject[name] = obj case "stderr": stdErrObject[name] = obj + case "": + // don't save default: // save to file if err := saveFile(baseDir, fName, obj); err != nil { return err @@ -271,6 +351,9 @@ func dispatchOutput(ctx *cli.Context, baseDir string, result *ExecutionResult, a if err := dispatch(baseDir, ctx.String(OutputResultFlag.Name), "result", result); err != nil { return err } + if err := dispatch(baseDir, ctx.String(OutputBodyFlag.Name), "body", body); err != nil { + return err + } if len(stdOutObject) > 0 { b, err := json.MarshalIndent(stdOutObject, "", " ") if err != nil { diff --git a/cmd/evm/main.go b/cmd/evm/main.go index 35c672142d..8a3e4e0ea2 100644 --- a/cmd/evm/main.go +++ b/cmd/evm/main.go @@ -149,6 +149,7 @@ var stateTransitionCommand = cli.Command{ t8ntool.OutputBasedir, t8ntool.OutputAllocFlag, t8ntool.OutputResultFlag, + t8ntool.OutputBodyFlag, t8ntool.InputAllocFlag, t8ntool.InputEnvFlag, t8ntool.InputTxsFlag, diff --git a/cmd/evm/runner.go b/cmd/evm/runner.go index d0be6ca1e1..4063767cb8 100644 --- a/cmd/evm/runner.go +++ b/cmd/evm/runner.go @@ -38,7 +38,7 @@ import ( "github.com/ethereum/go-ethereum/core/vm/runtime" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var runCommand = cli.Command{ diff --git a/cmd/evm/staterunner.go b/cmd/evm/staterunner.go index f9a6b06b8f..c4df936c75 100644 --- a/cmd/evm/staterunner.go +++ b/cmd/evm/staterunner.go @@ -28,7 +28,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/tests" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var stateTestCommand = cli.Command{ diff --git a/cmd/evm/testdata/8/alloc.json b/cmd/evm/testdata/8/alloc.json new file mode 100644 index 0000000000..1d1b5f86c6 --- /dev/null +++ b/cmd/evm/testdata/8/alloc.json @@ -0,0 +1,11 @@ +{ + "0x000000000000000000000000000000000000aaaa": { + "balance": "0x03", + "code": "0x5854505854", + "nonce": "0x1" + }, + "0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": { + "balance": "0x100000", + "nonce": "0x00" + } +} \ No newline at end of file diff --git a/cmd/evm/testdata/8/env.json b/cmd/evm/testdata/8/env.json new file mode 100644 index 0000000000..8b91934724 --- /dev/null +++ b/cmd/evm/testdata/8/env.json @@ -0,0 +1,7 @@ +{ + "currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba", + "currentDifficulty": "0x20000", + "currentGasLimit": "0x1000000000", + "currentNumber": "0x1000000", + "currentTimestamp": "0x04" +} \ No newline at end of file diff --git a/cmd/evm/testdata/8/readme.md b/cmd/evm/testdata/8/readme.md new file mode 100644 index 0000000000..778fc6151a --- /dev/null +++ b/cmd/evm/testdata/8/readme.md @@ -0,0 +1,63 @@ +## EIP-2930 testing + +This test contains testcases for EIP-2930, which uses transactions with access lists. + +### Prestate + +The alloc portion contains one contract (`0x000000000000000000000000000000000000aaaa`), containing the +following code: `0x5854505854`: `PC ;SLOAD; POP; PC; SLOAD`. + +Essentialy, this contract does `SLOAD(0)` and `SLOAD(3)`. + +The alloc also contains some funds on `0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b`. + +## Transactions + +There are three transactions, each invokes the contract above. + +1. ACL-transaction, which contains some non-used slots +2. Regular transaction +3. ACL-transaction, which contains the slots `1` and `3` in `0x000000000000000000000000000000000000aaaa` + +## Execution + +Running it yields: +``` +dir=./testdata/8 && ./evm t8n --state.fork=Berlin --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json --trace && cat trace-* | grep SLOAD +{"pc":1,"op":84,"gas":"0x484be","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x47c86","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":1,"op":84,"gas":"0x49cf6","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x494be","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":1,"op":84,"gas":"0x484be","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x48456","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} + +``` + +Simlarly, we can provide the input transactions via `stdin` instead of as file: + +``` +dir=./testdata/8 \ + && cat $dir/txs.json | jq "{txs: .}" \ + | ./evm t8n --state.fork=Berlin \ + --input.alloc=$dir/alloc.json \ + --input.txs=stdin \ + --input.env=$dir/env.json \ + --trace \ + && cat trace-* | grep SLOAD + +{"pc":1,"op":84,"gas":"0x484be","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x47c86","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":1,"op":84,"gas":"0x49cf6","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x494be","gasCost":"0x834","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":1,"op":84,"gas":"0x484be","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x0"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +{"pc":4,"op":84,"gas":"0x48456","gasCost":"0x64","memory":"0x","memSize":0,"stack":["0x3"],"returnStack":[],"returnData":"0x","depth":1,"refund":0,"opName":"SLOAD","error":""} +``` + +If we try to execute it on older rules: +``` +dir=./testdata/8 && ./evm t8n --state.fork=Istanbul --input.alloc=$dir/alloc.json --input.txs=$dir/txs.json --input.env=$dir/env.json +INFO [01-21|23:21:51.265] rejected tx index=0 hash="d2818d…6ab3da" error="tx type not supported" +INFO [01-21|23:21:51.265] rejected tx index=1 hash="26ea00…81c01b" from=0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B error="nonce too high: address 0xa94f5374Fce5edBC8E2a8697C15331677e6EbF0B, tx: 1 state: 0" +INFO [01-21|23:21:51.265] rejected tx index=2 hash="698d01…369cee" error="tx type not supported" +``` +Number `1` and `3` are not applicable, and therefore number `2` has wrong nonce. \ No newline at end of file diff --git a/cmd/evm/testdata/8/txs.json b/cmd/evm/testdata/8/txs.json new file mode 100644 index 0000000000..35142ba234 --- /dev/null +++ b/cmd/evm/testdata/8/txs.json @@ -0,0 +1,58 @@ +[ + { + "gas": "0x4ef00", + "gasPrice": "0x1", + "chainId": "0x1", + "input": "0x", + "nonce": "0x0", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x1", + "type" : "0x1", + "accessList": [ + {"address": "0x0000000000000000000000000000000000000000", + "storageKeys": [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000000" + ] + } + ], + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + { + "gas": "0x4ef00", + "gasPrice": "0x1", + "input": "0x", + "nonce": "0x1", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x2", + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + }, + { + "gas": "0x4ef00", + "gasPrice": "0x1", + "chainId": "0x1", + "input": "0x", + "nonce": "0x2", + "to": "0x000000000000000000000000000000000000aaaa", + "value": "0x1", + "type" : "0x1", + "accessList": [ + {"address": "0x000000000000000000000000000000000000aaaa", + "storageKeys": [ + "0x0000000000000000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000003" + ] + } + ], + "v": "0x0", + "r": "0x0", + "s": "0x0", + "secretKey": "0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8" + } +] diff --git a/cmd/faucet/README.md b/cmd/faucet/README.md new file mode 100644 index 0000000000..364689a782 --- /dev/null +++ b/cmd/faucet/README.md @@ -0,0 +1,50 @@ +# Faucet + +The `faucet` is a simplistic web application with the goal of distributing small amounts of Ether in private and test networks. + +Users need to post their Ethereum addresses to fund in a Twitter status update or public Facebook post and share the link to the faucet. The faucet will in turn deduplicate user requests and send the Ether. After a funding round, the faucet prevents the same user requesting again for a pre-configured amount of time, proportional to the amount of Ether requested. + +## Operation + +The `faucet` is a single binary app (everything included) with all configurations set via command line flags and a few files. + +First thing's first, the `faucet` needs to connect to an Ethereum network, for which it needs the necessary genesis and network infos. Each of the following flags must be set: + +- `--genesis` is a path to a file containin the network `genesis.json` +- `--network` is the devp2p network id used during connection +- `--bootnodes` is a list of `enode://` ids to join the network through + +The `faucet` will use the `les` protocol to join the configured Ethereum network and will store its data in `$HOME/.faucet` (currently not configurable). + +## Funding + +To be able to distribute funds, the `faucet` needs access to an already funded Ethereum account. This can be configured via: + +- `--account.json` is a path to the Ethereum account's JSON key file +- `--account.pass` is a path to a text file with the decryption passphrase + +The faucet is able to distribute various amounts of Ether in exchange for various timeouts. These can be configured via: + +- `--faucet.amount` is the number of Ethers to send by default +- `--faucet.minutes` is the time to wait before allowing a rerequest +- `--faucet.tiers` is the funding tiers to support (x3 time, x2.5 funds) + +## Sybil protection + +To prevent the same user from exhausting funds in a loop, the `faucet` ties requests to social networks and captcha resolvers. + +Captcha protection uses Google's invisible ReCaptcha, thus the `faucet` needs to run on a live domain. The domain needs to be registered in Google's systems to retrieve the captcha API token and secrets. After doing so, captcha protection may be enabled via: + +- `--captcha.token` is the API token for ReCaptcha +- `--captcha.secret` is the API secret for ReCaptcha + +Sybil protection via Twitter requires an API key as of 15th December, 2020. To obtain it, a Twitter user must be upgraded to developer status and a new Twitter App deployed with it. The app's `Bearer` token is required by the faucet to retrieve tweet data: + +- `--twitter.token` is the Bearer token for `v2` API access +- `--twitter.token.v1` is the Bearer token for `v1` API access + +Sybil protection via Facebook uses the website to directly download post data thus does not currently require an API configuration. + +## Miscellaneous + +Beside the above - mostly essential - CLI flags, there are a number that can be used to fine tune the `faucet`'s operation. Please see `faucet --help` for a full list. \ No newline at end of file diff --git a/cmd/faucet/faucet.go b/cmd/faucet/faucet.go index 346c412acb..e839f1c886 100644 --- a/cmd/faucet/faucet.go +++ b/cmd/faucet/faucet.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with go-ethereum. If not, see . -// faucet is a Ether faucet backed by a light client. +// faucet is an Ether faucet backed by a light client. package main //go:generate go-bindata -nometadata -o website.go faucet.html @@ -47,15 +47,14 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethstats" "github.com/ethereum/go-ethereum/les" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/params" @@ -83,6 +82,9 @@ var ( noauthFlag = flag.Bool("noauth", false, "Enables funding requests without authentication") logFlag = flag.Int("loglevel", 3, "Log level to use for Ethereum and the faucet") + + twitterTokenFlag = flag.String("twitter.token", "", "Bearer token to authenticate with the v2 Twitter API") + twitterTokenV1Flag = flag.String("twitter.token.v1", "", "Bearer token to authenticate with the v1.1 Twitter API") ) var ( @@ -151,9 +153,9 @@ func main() { log.Crit("Failed to parse genesis block json", "err", err) } // Convert the bootnodes to internal enode representations - var enodes []*discv5.Node + var enodes []*enode.Node for _, boot := range strings.Split(*bootFlag, ",") { - if url, err := discv5.ParseNode(boot); err == nil { + if url, err := enode.Parse(enode.ValidSchemes, boot); err == nil { enodes = append(enodes, url) } else { log.Error("Failed to parse bootnode URL", "url", boot, "err", err) @@ -210,7 +212,7 @@ type faucet struct { nonce uint64 // Current pending nonce of the faucet price *big.Int // Current gas price to issue funds with - conns []*websocket.Conn // Currently live websocket connections + conns []*wsConn // Currently live websocket connections timeouts map[string]time.Time // History of users and their funding timeouts reqs []*request // Currently pending funding requests update chan struct{} // Channel to signal request updates @@ -218,7 +220,14 @@ type faucet struct { lock sync.RWMutex // Lock protecting the faucet's internals } -func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network uint64, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) { +// wsConn wraps a websocket connection with a write mutex as the underlying +// websocket library does not synchronize access to the stream. +type wsConn struct { + conn *websocket.Conn + wlock sync.Mutex +} + +func newFaucet(genesis *core.Genesis, port int, enodes []*enode.Node, network uint64, stats string, ks *keystore.KeyStore, index []byte) (*faucet, error) { // Assemble the raw devp2p protocol stack stack, err := node.New(&node.Config{ Name: "geth", @@ -238,11 +247,12 @@ func newFaucet(genesis *core.Genesis, port int, enodes []*discv5.Node, network u } // Assemble the Ethereum light client protocol - cfg := eth.DefaultConfig + cfg := ethconfig.Defaults cfg.SyncMode = downloader.LightSync cfg.NetworkId = network cfg.Genesis = genesis utils.SetDNSDiscoveryDefaults(&cfg, genesis.ToBlock(nil).Hash()) + lesBackend, err := les.New(stack, &cfg) if err != nil { return nil, fmt.Errorf("Failed to register the Ethereum service: %w", err) @@ -317,13 +327,14 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { defer conn.Close() f.lock.Lock() - f.conns = append(f.conns, conn) + wsconn := &wsConn{conn: conn} + f.conns = append(f.conns, wsconn) f.lock.Unlock() defer func() { f.lock.Lock() for i, c := range f.conns { - if c == conn { + if c.conn == conn { f.conns = append(f.conns[:i], f.conns[i+1:]...) break } @@ -351,7 +362,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { if head == nil || balance == nil { // Report the faucet offline until initial stats are ready //lint:ignore ST1005 This error is to be displayed in the browser - if err = sendError(conn, errors.New("Faucet offline")); err != nil { + if err = sendError(wsconn, errors.New("Faucet offline")); err != nil { log.Warn("Failed to send faucet error to client", "err", err) return } @@ -362,7 +373,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { f.lock.RLock() reqs := f.reqs f.lock.RUnlock() - if err = send(conn, map[string]interface{}{ + if err = send(wsconn, map[string]interface{}{ "funds": new(big.Int).Div(balance, ether), "funded": nonce, "peers": f.stack.Server().PeerCount(), @@ -371,7 +382,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { log.Warn("Failed to send initial stats to client", "err", err) return } - if err = send(conn, head, 3*time.Second); err != nil { + if err = send(wsconn, head, 3*time.Second); err != nil { log.Warn("Failed to send initial header to client", "err", err) return } @@ -386,9 +397,8 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { if err = conn.ReadJSON(&msg); err != nil { return } - if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://gist.github.com/") && !strings.HasPrefix(msg.URL, "https://twitter.com/") && - !strings.HasPrefix(msg.URL, "https://plus.google.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") { - if err = sendError(conn, errors.New("URL doesn't link to supported services")); err != nil { + if !*noauthFlag && !strings.HasPrefix(msg.URL, "https://twitter.com/") && !strings.HasPrefix(msg.URL, "https://www.facebook.com/") { + if err = sendError(wsconn, errors.New("URL doesn't link to supported services")); err != nil { log.Warn("Failed to send URL error to client", "err", err) return } @@ -396,7 +406,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { } if msg.Tier >= uint(*tiersFlag) { //lint:ignore ST1005 This error is to be displayed in the browser - if err = sendError(conn, errors.New("Invalid funding tier requested")); err != nil { + if err = sendError(wsconn, errors.New("Invalid funding tier requested")); err != nil { log.Warn("Failed to send tier error to client", "err", err) return } @@ -412,7 +422,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { res, err := http.PostForm("https://www.google.com/recaptcha/api/siteverify", form) if err != nil { - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send captcha post error to client", "err", err) return } @@ -425,7 +435,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { err = json.NewDecoder(res.Body).Decode(&result) res.Body.Close() if err != nil { - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send captcha decode error to client", "err", err) return } @@ -434,7 +444,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { if !result.Success { log.Warn("Captcha verification failed", "err", string(result.Errors)) //lint:ignore ST1005 it's funny and the robot won't mind - if err = sendError(conn, errors.New("Beep-bop, you're a robot!")); err != nil { + if err = sendError(wsconn, errors.New("Beep-bop, you're a robot!")); err != nil { log.Warn("Failed to send captcha failure to client", "err", err) return } @@ -443,36 +453,26 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { } // Retrieve the Ethereum address to fund, the requesting user and a profile picture var ( + id string username string avatar string address common.Address ) switch { - case strings.HasPrefix(msg.URL, "https://gist.github.com/"): - if err = sendError(conn, errors.New("GitHub authentication discontinued at the official request of GitHub")); err != nil { - log.Warn("Failed to send GitHub deprecation to client", "err", err) - return - } - continue - case strings.HasPrefix(msg.URL, "https://plus.google.com/"): - //lint:ignore ST1005 Google is a company name and should be capitalized. - if err = sendError(conn, errors.New("Google+ authentication discontinued as the service was sunset")); err != nil { - log.Warn("Failed to send Google+ deprecation to client", "err", err) - return - } - continue case strings.HasPrefix(msg.URL, "https://twitter.com/"): - username, avatar, address, err = authTwitter(msg.URL) + id, username, avatar, address, err = authTwitter(msg.URL, *twitterTokenV1Flag, *twitterTokenFlag) case strings.HasPrefix(msg.URL, "https://www.facebook.com/"): username, avatar, address, err = authFacebook(msg.URL) + id = username case *noauthFlag: username, avatar, address, err = authNoAuth(msg.URL) + id = username default: //lint:ignore ST1005 This error is to be displayed in the browser err = errors.New("Something funky happened, please open an issue at https://github.com/ethereum/go-ethereum/issues") } if err != nil { - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send prefix error to client", "err", err) return } @@ -486,7 +486,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { fund bool timeout time.Time ) - if timeout = f.timeouts[username]; time.Now().After(timeout) { + if timeout = f.timeouts[id]; time.Now().After(timeout) { // User wasn't funded recently, create the funding transaction amount := new(big.Int).Mul(big.NewInt(int64(*payoutFlag)), ether) amount = new(big.Int).Mul(amount, new(big.Int).Exp(big.NewInt(5), big.NewInt(int64(msg.Tier)), nil)) @@ -496,7 +496,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { signed, err := f.keystore.SignTx(f.account, tx, f.config.ChainID) if err != nil { f.lock.Unlock() - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send transaction creation error to client", "err", err) return } @@ -505,7 +505,7 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { // Submit the transaction and mark as funded if successful if err := f.client.SendTransaction(context.Background(), signed); err != nil { f.lock.Unlock() - if err = sendError(conn, err); err != nil { + if err = sendError(wsconn, err); err != nil { log.Warn("Failed to send transaction transmission error to client", "err", err) return } @@ -520,20 +520,20 @@ func (f *faucet) apiHandler(w http.ResponseWriter, r *http.Request) { timeout := time.Duration(*minutesFlag*int(math.Pow(3, float64(msg.Tier)))) * time.Minute grace := timeout / 288 // 24h timeout => 5m grace - f.timeouts[username] = time.Now().Add(timeout - grace) + f.timeouts[id] = time.Now().Add(timeout - grace) fund = true } f.lock.Unlock() // Send an error if too frequent funding, othewise a success if !fund { - if err = sendError(conn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple + if err = sendError(wsconn, fmt.Errorf("%s left until next allowance", common.PrettyDuration(time.Until(timeout)))); err != nil { // nolint: gosimple log.Warn("Failed to send funding error to client", "err", err) return } continue } - if err = sendSuccess(conn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil { + if err = sendSuccess(wsconn, fmt.Sprintf("Funding request accepted for %s into %s", username, address.Hex())); err != nil { log.Warn("Failed to send funding success to client", "err", err) return } @@ -626,12 +626,12 @@ func (f *faucet) loop() { "requests": f.reqs, }, time.Second); err != nil { log.Warn("Failed to send stats to client", "err", err) - conn.Close() + conn.conn.Close() continue } if err := send(conn, head, time.Second); err != nil { log.Warn("Failed to send header to client", "err", err) - conn.Close() + conn.conn.Close() } } f.lock.RUnlock() @@ -653,7 +653,7 @@ func (f *faucet) loop() { for _, conn := range f.conns { if err := send(conn, map[string]interface{}{"requests": f.reqs}, time.Second); err != nil { log.Warn("Failed to send requests to client", "err", err) - conn.Close() + conn.conn.Close() } } f.lock.RUnlock() @@ -663,44 +663,63 @@ func (f *faucet) loop() { // sends transmits a data packet to the remote end of the websocket, but also // setting a write deadline to prevent waiting forever on the node. -func send(conn *websocket.Conn, value interface{}, timeout time.Duration) error { +func send(conn *wsConn, value interface{}, timeout time.Duration) error { if timeout == 0 { timeout = 60 * time.Second } - conn.SetWriteDeadline(time.Now().Add(timeout)) - return conn.WriteJSON(value) + conn.wlock.Lock() + defer conn.wlock.Unlock() + conn.conn.SetWriteDeadline(time.Now().Add(timeout)) + return conn.conn.WriteJSON(value) } // sendError transmits an error to the remote end of the websocket, also setting // the write deadline to 1 second to prevent waiting forever. -func sendError(conn *websocket.Conn, err error) error { +func sendError(conn *wsConn, err error) error { return send(conn, map[string]string{"error": err.Error()}, time.Second) } // sendSuccess transmits a success message to the remote end of the websocket, also // setting the write deadline to 1 second to prevent waiting forever. -func sendSuccess(conn *websocket.Conn, msg string) error { +func sendSuccess(conn *wsConn, msg string) error { return send(conn, map[string]string{"success": msg}, time.Second) } // authTwitter tries to authenticate a faucet request using Twitter posts, returning -// the username, avatar URL and Ethereum address to fund on success. -func authTwitter(url string) (string, string, common.Address, error) { +// the uniqueness identifier (user id/username), username, avatar URL and Ethereum address to fund on success. +func authTwitter(url string, tokenV1, tokenV2 string) (string, string, string, common.Address, error) { // Ensure the user specified a meaningful URL, no fancy nonsense parts := strings.Split(url, "/") if len(parts) < 4 || parts[len(parts)-2] != "status" { //lint:ignore ST1005 This error is to be displayed in the browser - return "", "", common.Address{}, errors.New("Invalid Twitter status URL") - } - // Twitter's API isn't really friendly with direct links. Still, we don't - // want to do ask read permissions from users, so just load the public posts + return "", "", "", common.Address{}, errors.New("Invalid Twitter status URL") + } + // Strip any query parameters from the tweet id and ensure it's numeric + tweetID := strings.Split(parts[len(parts)-1], "?")[0] + if !regexp.MustCompile("^[0-9]+$").MatchString(tweetID) { + return "", "", "", common.Address{}, errors.New("Invalid Tweet URL") + } + // Twitter's API isn't really friendly with direct links. + // It is restricted to 300 queries / 15 minute with an app api key. + // Anything more will require read only authorization from the users and that we want to avoid. + + // If Twitter bearer token is provided, use the API, selecting the version + // the user would prefer (currently there's a limit of 1 v2 app / developer + // but unlimited v1.1 apps). + switch { + case tokenV1 != "": + return authTwitterWithTokenV1(tweetID, tokenV1) + case tokenV2 != "": + return authTwitterWithTokenV2(tweetID, tokenV2) + } + // Twiter API token isn't provided so we just load the public posts // and scrape it for the Ethereum address and profile URL. We need to load // the mobile page though since the main page loads tweet contents via JS. url = strings.Replace(url, "https://twitter.com/", "https://mobile.twitter.com/", 1) res, err := http.Get(url) if err != nil { - return "", "", common.Address{}, err + return "", "", "", common.Address{}, err } defer res.Body.Close() @@ -708,31 +727,115 @@ func authTwitter(url string) (string, string, common.Address, error) { parts = strings.Split(res.Request.URL.String(), "/") if len(parts) < 4 || parts[len(parts)-2] != "status" { //lint:ignore ST1005 This error is to be displayed in the browser - return "", "", common.Address{}, errors.New("Invalid Twitter status URL") + return "", "", "", common.Address{}, errors.New("Invalid Twitter status URL") } username := parts[len(parts)-3] body, err := ioutil.ReadAll(res.Body) if err != nil { - return "", "", common.Address{}, err + return "", "", "", common.Address{}, err } address := common.HexToAddress(string(regexp.MustCompile("0x[0-9a-fA-F]{40}").Find(body))) if address == (common.Address{}) { //lint:ignore ST1005 This error is to be displayed in the browser - return "", "", common.Address{}, errors.New("No Ethereum address found to fund") + return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund") } var avatar string if parts = regexp.MustCompile("src=\"([^\"]+twimg.com/profile_images[^\"]+)\"").FindStringSubmatch(string(body)); len(parts) == 2 { avatar = parts[1] } - return username + "@twitter", avatar, address, nil + return username + "@twitter", username, avatar, address, nil +} + +// authTwitterWithTokenV1 tries to authenticate a faucet request using Twitter's v1 +// API, returning the user id, username, avatar URL and Ethereum address to fund on +// success. +func authTwitterWithTokenV1(tweetID string, token string) (string, string, string, common.Address, error) { + // Query the tweet details from Twitter + url := fmt.Sprintf("https://api.twitter.com/1.1/statuses/show.json?id=%s", tweetID) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "", "", "", common.Address{}, err + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + res, err := http.DefaultClient.Do(req) + if err != nil { + return "", "", "", common.Address{}, err + } + defer res.Body.Close() + + var result struct { + Text string `json:"text"` + User struct { + ID string `json:"id_str"` + Username string `json:"screen_name"` + Avatar string `json:"profile_image_url"` + } `json:"user"` + } + err = json.NewDecoder(res.Body).Decode(&result) + if err != nil { + return "", "", "", common.Address{}, err + } + address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(result.Text)) + if address == (common.Address{}) { + //lint:ignore ST1005 This error is to be displayed in the browser + return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund") + } + return result.User.ID + "@twitter", result.User.Username, result.User.Avatar, address, nil +} + +// authTwitterWithTokenV2 tries to authenticate a faucet request using Twitter's v2 +// API, returning the user id, username, avatar URL and Ethereum address to fund on +// success. +func authTwitterWithTokenV2(tweetID string, token string) (string, string, string, common.Address, error) { + // Query the tweet details from Twitter + url := fmt.Sprintf("https://api.twitter.com/2/tweets/%s?expansions=author_id&user.fields=profile_image_url", tweetID) + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "", "", "", common.Address{}, err + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + res, err := http.DefaultClient.Do(req) + if err != nil { + return "", "", "", common.Address{}, err + } + defer res.Body.Close() + + var result struct { + Data struct { + AuthorID string `json:"author_id"` + Text string `json:"text"` + } `json:"data"` + Includes struct { + Users []struct { + ID string `json:"id"` + Username string `json:"username"` + Avatar string `json:"profile_image_url"` + } `json:"users"` + } `json:"includes"` + } + + err = json.NewDecoder(res.Body).Decode(&result) + if err != nil { + return "", "", "", common.Address{}, err + } + + address := common.HexToAddress(regexp.MustCompile("0x[0-9a-fA-F]{40}").FindString(result.Data.Text)) + if address == (common.Address{}) { + //lint:ignore ST1005 This error is to be displayed in the browser + return "", "", "", common.Address{}, errors.New("No Ethereum address found to fund") + } + return result.Data.AuthorID + "@twitter", result.Includes.Users[0].Username, result.Includes.Users[0].Avatar, address, nil } // authFacebook tries to authenticate a faucet request using Facebook posts, // returning the username, avatar URL and Ethereum address to fund on success. func authFacebook(url string) (string, string, common.Address, error) { // Ensure the user specified a meaningful URL, no fancy nonsense - parts := strings.Split(url, "/") + parts := strings.Split(strings.Split(url, "?")[0], "/") + if parts[len(parts)-1] == "" { + parts = parts[0 : len(parts)-1] + } if len(parts) < 4 || parts[len(parts)-2] != "posts" { //lint:ignore ST1005 This error is to be displayed in the browser return "", "", common.Address{}, errors.New("Invalid Facebook post URL") @@ -742,7 +845,13 @@ func authFacebook(url string) (string, string, common.Address, error) { // Facebook's Graph API isn't really friendly with direct links. Still, we don't // want to do ask read permissions from users, so just load the public posts and // scrape it for the Ethereum address and profile URL. - res, err := http.Get(url) + // + // Facebook recently changed their desktop webpage to use AJAX for loading post + // content, so switch over to the mobile site for now. Will probably end up having + // to use the API eventually. + crawl := strings.Replace(url, "www.facebook.com", "m.facebook.com", 1) + + res, err := http.Get(crawl) if err != nil { return "", "", common.Address{}, err } diff --git a/cmd/faucet/faucet.html b/cmd/faucet/faucet.html index ba14333186..dad5ad84f2 100644 --- a/cmd/faucet/faucet.html +++ b/cmd/faucet/faucet.html @@ -177,7 +177,7 @@

How does this work?

} // Iterate over our entire local collection and re-render the funding table var content = ""; - for (var i=0; i= 0; i--) { var done = requests[i].time == ""; var elapsed = moment().unix()-moment(requests[i].time).unix(); diff --git a/cmd/faucet/faucet_test.go b/cmd/faucet/faucet_test.go new file mode 100644 index 0000000000..4f3e47084e --- /dev/null +++ b/cmd/faucet/faucet_test.go @@ -0,0 +1,43 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "testing" + + "github.com/ethereum/go-ethereum/common" +) + +func TestFacebook(t *testing.T) { + for _, tt := range []struct { + url string + want common.Address + }{ + { + "https://www.facebook.com/fooz.gazonk/posts/2837228539847129", + common.HexToAddress("0xDeadDeaDDeaDbEefbEeFbEEfBeeFBeefBeeFbEEF"), + }, + } { + _, _, gotAddress, err := authFacebook(tt.url) + if err != nil { + t.Fatal(err) + } + if gotAddress != tt.want { + t.Fatalf("address wrong, have %v want %v", gotAddress, tt.want) + } + } +} diff --git a/cmd/faucet/website.go b/cmd/faucet/website.go index a091d24919..aed067893a 100644 --- a/cmd/faucet/website.go +++ b/cmd/faucet/website.go @@ -1,6 +1,6 @@ // Code generated by go-bindata. DO NOT EDIT. // sources: -// faucet.html (11.27kB) +// faucet.html (11.276kB) package main @@ -69,7 +69,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _faucetHtml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x5a\x6d\x93\xdb\x36\x92\xfe\x3c\xfe\x15\x1d\x9e\xbd\x92\xce\x43\x52\x33\x63\x7b\x7d\x12\xa9\x94\xd7\x9b\xdd\xf3\xd5\x5d\x92\x4a\x9c\xba\xdb\xca\xa6\xae\x40\xb2\x25\xc2\x03\x02\x0c\x00\x4a\xa3\x4c\xe9\xbf\x5f\x35\x40\x52\xd4\xcb\x4c\xec\xb5\xaf\x6a\xfd\x61\x4c\x02\x8d\x46\xa3\xfb\x69\xf4\x0b\x95\x7c\xf5\xe7\xef\xde\xbe\xff\xdb\xf7\xdf\x40\x69\x2b\xb1\x78\x92\xd0\x7f\x20\x98\x5c\xa5\x01\xca\x60\xf1\xe4\x22\x29\x91\x15\x8b\x27\x17\x17\x49\x85\x96\x41\x5e\x32\x6d\xd0\xa6\x41\x63\x97\xe1\xeb\x60\x3f\x51\x5a\x5b\x87\xf8\x6b\xc3\xd7\x69\xf0\x3f\xe1\x4f\x6f\xc2\xb7\xaa\xaa\x99\xe5\x99\xc0\x00\x72\x25\x2d\x4a\x9b\x06\xef\xbe\x49\xb1\x58\xe1\x60\x9d\x64\x15\xa6\xc1\x9a\xe3\xa6\x56\xda\x0e\x48\x37\xbc\xb0\x65\x5a\xe0\x9a\xe7\x18\xba\x97\x4b\xe0\x92\x5b\xce\x44\x68\x72\x26\x30\xbd\x0a\x16\x4f\x88\x8f\xe5\x56\xe0\xe2\xfe\x3e\xfa\x16\xed\x46\xe9\xdb\xdd\x6e\x06\x6f\x1a\x5b\xa2\xb4\x3c\x67\x16\x0b\xf8\x0b\x6b\x72\xb4\x49\xec\x29\xdd\x22\xc1\xe5\x2d\x94\x1a\x97\x69\x40\xa2\x9b\x59\x1c\xe7\x85\xfc\x60\xa2\x5c\xa8\xa6\x58\x0a\xa6\x31\xca\x55\x15\xb3\x0f\xec\x2e\x16\x3c\x33\xb1\xdd\x70\x6b\x51\x87\x99\x52\xd6\x58\xcd\xea\xf8\x26\xba\x89\xfe\x18\xe7\xc6\xc4\xfd\x58\x54\x71\x19\xe5\xc6\x04\xa0\x51\xa4\x81\xb1\x5b\x81\xa6\x44\xb4\x01\xc4\x8b\x7f\x6c\xdf\xa5\x92\x36\x64\x1b\x34\xaa\xc2\xf8\x45\xf4\xc7\x68\xea\xb6\x1c\x0e\x3f\xbe\x2b\x6d\x6b\x72\xcd\x6b\x0b\x46\xe7\x1f\xbd\xef\x87\x5f\x1b\xd4\xdb\xf8\x26\xba\x8a\xae\xda\x17\xb7\xcf\x07\x13\x2c\x92\xd8\x33\x5c\x7c\x16\xef\x50\x2a\xbb\x8d\xaf\xa3\x17\xd1\x55\x5c\xb3\xfc\x96\xad\xb0\xe8\x76\xa2\xa9\xa8\x1b\xfc\x62\xfb\x3e\x64\xc3\x0f\xc7\x26\xfc\x12\x9b\x55\xaa\x42\x69\xa3\x0f\x26\xbe\x8e\xae\x5e\x47\xd3\x6e\xe0\x94\xbf\xdb\x80\x8c\x46\x5b\x5d\x44\x6b\xd4\x84\x5c\x11\xe6\x28\x2d\x6a\xb8\xa7\xd1\x8b\x8a\xcb\xb0\x44\xbe\x2a\xed\x0c\xae\xa6\xd3\x67\xf3\x73\xa3\xeb\xd2\x0f\x17\xdc\xd4\x82\x6d\x67\xb0\x14\x78\xe7\x87\x98\xe0\x2b\x19\x72\x8b\x95\x99\x81\xe7\xec\x26\x76\x6e\xcf\x5a\xab\x95\x46\x63\xda\xcd\x6a\x65\xb8\xe5\x4a\xce\x08\x51\xcc\xf2\x35\x9e\xa3\x35\x35\x93\x27\x0b\x58\x66\x94\x68\x2c\x1e\x09\x92\x09\x95\xdf\xfa\x31\xe7\xcd\xc3\x43\xe4\x4a\x28\x3d\x83\x4d\xc9\xdb\x65\xe0\x36\x82\x5a\x63\xcb\x1e\x6a\x56\x14\x5c\xae\x66\xf0\xaa\x6e\xcf\x03\x15\xd3\x2b\x2e\x67\x30\xdd\x2f\x49\xe2\x4e\x8d\x49\xec\x2f\xae\x27\x17\x49\xa6\x8a\xad\xb3\x61\xc1\xd7\x90\x0b\x66\x4c\x1a\x1c\xa9\xd8\x5d\x48\x07\x04\x74\x0f\x31\x2e\xbb\xa9\x83\x39\xad\x36\x01\xb8\x8d\xd2\xc0\x0b\x11\x66\xca\x5a\x55\xcd\xe0\x8a\xc4\x6b\x97\x1c\xf1\x13\xa1\x58\x85\x57\xd7\xdd\xe4\x45\x52\x5e\x75\x4c\x2c\xde\xd9\xd0\xd9\xa7\xb7\x4c\xb0\x48\x78\xb7\x76\xc9\x60\xc9\xc2\x8c\xd9\x32\x00\xa6\x39\x0b\x4b\x5e\x14\x28\xd3\xc0\xea\x06\x09\x47\x7c\x01\xc3\xeb\xef\x81\xdb\xaf\xbc\xea\xe4\x8a\x0b\xbe\x6e\x8f\x35\x78\x3c\x3a\xe1\xc3\x87\x78\x0d\xed\x83\x5a\x2e\x0d\xda\x70\x70\xa6\x01\x31\x97\x75\x63\xc3\x95\x56\x4d\xdd\xcf\x5f\x24\x6e\x14\x78\x91\x06\x8d\x16\x41\x7b\xfd\xbb\x47\xbb\xad\x5b\x55\x04\xfd\xc1\x95\xae\x42\xb2\x84\x56\x22\x80\x5a\xb0\x1c\x4b\x25\x0a\xd4\x69\xf0\xa3\xca\x39\x13\x20\xfd\x99\xe1\xa7\x1f\xfe\x13\x5a\x93\x71\xb9\x82\xad\x6a\x34\x7c\x63\x4b\xd4\xd8\x54\xc0\x8a\x82\xe0\x1a\x45\xd1\x40\x10\x87\xdd\x53\x51\xc3\xcc\xca\x3d\xd5\x45\x92\x35\xd6\xaa\x9e\x30\xb3\x12\x32\x2b\xc3\x02\x97\xac\x11\x16\x0a\xad\xea\x42\x6d\x64\x68\xd5\x6a\x45\x91\xce\x1f\xc2\x2f\x0a\xa0\x60\x96\xb5\x53\x69\xd0\xd1\x76\x36\x64\xa6\x56\x75\x53\xb7\x56\xf4\x83\x78\x57\x33\x59\x60\x41\x36\x17\x06\x83\xc5\x5f\xf9\x1a\xa1\x42\x7f\x96\x8b\x63\x48\xe4\x4c\xa3\x0d\x87\x4c\x4f\x80\x91\xc4\x5e\x18\x7f\x24\x68\xff\x25\x8d\xe8\x38\xf5\x47\xa8\x50\x36\x70\xf0\x16\x6a\xba\x57\x82\xc5\xfd\xbd\x66\x72\x85\xf0\x94\x17\x77\x97\xf0\x94\x55\xaa\x91\x16\x66\x29\x44\x6f\xdc\xa3\xd9\xed\x0e\xb8\x03\x24\x82\x2f\x12\xf6\x18\xbc\x41\xc9\x5c\xf0\xfc\x36\x0d\x2c\x47\x9d\xde\xdf\x13\xf3\xdd\x6e\x0e\xf7\xf7\x7c\x09\x4f\xa3\x1f\x30\x67\xb5\xcd\x4b\xb6\xdb\xad\x74\xf7\x1c\xe1\x1d\xe6\x8d\xc5\xf1\xe4\xfe\x1e\x85\xc1\xdd\xce\x34\x59\xc5\xed\xb8\x5b\x4e\xe3\xb2\xd8\xed\x48\xe6\x56\xce\xdd\x0e\x62\x62\x2a\x0b\xbc\x83\xa7\xd1\xf7\xa8\xb9\x2a\x0c\x78\xfa\x24\x66\x8b\x24\x16\x7c\xd1\xae\x3b\x54\x52\xdc\x88\x3d\x5e\x62\x02\x4c\x8f\x73\xe7\x36\x4e\xd4\xa1\xa4\x67\xbc\x60\x15\xf6\xd2\xb7\x78\x30\xdc\xe2\x2d\x6e\xd3\xe0\xfe\x7e\xb8\xb6\x9d\xcd\x99\x10\x19\x23\xbd\xf8\xa3\xf5\x8b\x7e\x43\xc2\xe9\x9a\x1b\x97\x52\x2d\x3a\x09\xf6\x62\x7f\xa4\x5b\x1f\x5d\x5c\x56\xd5\x33\xb8\xb9\x1e\xdc\x5a\xe7\x3c\xfe\xd5\x91\xc7\xdf\x9c\x25\xae\x99\x44\x01\xee\x6f\x68\x2a\x26\xba\xe7\xd6\x5b\x06\xce\x77\xbc\x28\xa4\x3b\xba\x17\xad\xbf\xeb\xa7\x73\x50\x6b\xd4\x4b\xa1\x36\x33\x60\x8d\x55\x73\xa8\xd8\x5d\x1f\xef\x6e\xa6\xd3\xa1\xdc\x94\x0a\xb2\x4c\xa0\xbb\x5d\x34\xfe\xda\xa0\xb1\xa6\xbf\x4b\xfc\x94\xfb\x4b\x57\x4a\x81\xd2\x60\x71\xa4\x0d\xda\x91\x54\xeb\xa8\x06\xa6\xef\x95\x79\x56\xf6\xa5\x52\x7d\x08\x19\x8a\xd1\xb2\x1e\x44\xbb\x60\x91\x58\xbd\xa7\xbb\x48\x6c\xf1\x49\x21\x40\x53\x8a\xf7\x50\x04\xf0\x37\x1a\x9d\xbd\x46\xd4\x3e\xbf\x20\xc8\x82\x7b\x4d\x62\x5b\x7c\xc6\xce\x04\xc2\x8c\x19\xfc\x98\xed\x5d\xa4\xdf\x6f\xef\x5e\x3f\x77\xff\x12\x99\xb6\x19\x32\xfb\x31\x02\x2c\x1b\x59\x0c\xce\xef\xee\xce\xcf\x15\xa0\x91\x7c\x8d\xda\x70\xbb\xfd\x58\x09\xb0\xd8\x8b\xe0\xdf\x0f\x45\x48\x62\xab\x1f\xc7\xda\xf0\xe5\x0b\x39\xf7\xef\xa5\x24\x37\x8b\x7f\x57\x1b\x28\x14\x1a\xb0\x25\x37\x40\xc1\xf5\xeb\x24\x2e\x6f\x7a\x92\x7a\xf1\x9e\x26\x9c\x52\x61\xe9\x52\x0b\xe0\x06\x74\x23\x5d\xe4\x55\x12\x6c\x89\x87\xe9\x48\x1b\xa4\x23\x78\xaf\x28\xa5\x5b\xa3\xb4\x50\x31\xc1\x73\xae\x1a\x03\x2c\xb7\x4a\x1b\x58\x6a\x55\x01\xde\x95\xac\x31\x96\x18\xd1\xf5\xc1\xd6\x8c\x0b\xe7\x4b\xce\xa4\xa0\x34\xb0\x3c\x6f\xaa\x86\x52\x52\xb9\x02\x94\xaa\x59\x95\xad\x2c\x56\x81\x0f\x4c\x42\xc9\x55\x2f\x8f\xa9\x59\x05\xcc\x5a\x96\xdf\x9a\x4b\xe8\x6e\x05\x60\x1a\xc1\x72\x2c\x68\x55\xae\xaa\x4a\x49\xb8\xd1\x05\xd4\x4c\xdb\x2d\x98\xc3\xdc\x82\xe5\xb9\x8b\x72\x11\xbc\x91\x5b\x25\x11\x4a\xb6\x76\x12\xc2\x7b\x5f\x4e\x90\x5c\x7f\x61\x39\x66\x4a\xf5\xd4\x50\xb1\x6d\xb7\x5d\x2b\xfd\x86\xdb\x92\x7b\xf5\xd4\xa8\x2b\x5a\x5a\x80\xe0\x15\xb7\x26\x4a\xe2\x7a\x7f\xa3\xee\x63\xb3\x08\x4b\xa5\xf9\x6f\x94\xd8\x88\xe1\xf5\x69\x8f\x2e\x97\xee\x6e\x74\x56\x17\xb8\xb4\x33\x78\xe1\xef\xc6\x63\x1c\xb7\x15\xd0\x39\x10\x77\x3c\x5d\x65\x49\x01\x67\x06\x37\x3e\x9d\xf5\x89\x44\x61\x07\x12\x14\x47\x50\xf3\x9b\xbe\x7e\x5d\xdf\xf5\x72\xf4\x39\xf1\xb4\x67\x42\x08\x38\x54\xca\x9a\xf7\x6a\xbc\x84\x8a\xdd\x22\x30\x48\xd8\x51\x85\xdc\x0a\xed\xea\x2b\xee\xfa\x03\xb1\xdd\x20\xda\xaf\xc9\x75\xd3\x1f\x3c\x43\x2e\x57\xcf\xae\xa7\x1e\x91\xf4\x40\xec\x9f\x5d\x4f\xb9\xb4\xea\xd9\xf5\x74\x7a\x37\xfd\xc8\x7f\xcf\xae\xa7\x4a\x3e\xbb\x9e\xda\x12\x9f\x5d\x4f\x9f\x5d\xdf\x0c\xb1\xec\x47\xba\xcc\x92\xa8\xd0\xd0\x6e\x1d\xc4\x03\xb0\x4c\xaf\xd0\xa6\xc1\xff\xb2\x4c\x35\x76\x96\x09\x26\x6f\x83\x85\x13\x97\xb2\x0d\x87\x82\xf3\xf9\x29\xd4\xcc\x10\x24\x48\x62\x87\x92\xb6\x17\x62\x60\x6c\x1a\xad\x55\x23\x29\x2a\x02\x9d\xd9\x79\xa8\x1c\x11\xca\x48\x31\x93\x28\xc9\x74\xbc\x78\xab\xea\x6d\xe8\x98\xb8\xe5\x27\x6a\x34\x4d\x5d\x2b\x6d\xa3\xa1\x3a\x19\xd5\x41\x02\x4d\xfc\x7a\xfa\xf2\xf5\xab\x47\xc5\x37\x94\x65\xbb\x33\xf4\x12\xb2\x4c\xad\x11\x7c\x4e\x9f\xa9\x3b\x60\xb2\x80\x25\xd7\x08\x6c\xc3\xb6\x5f\x25\x71\xe1\x2a\xb0\xcf\x47\xed\xb2\xf5\xae\x7f\x2a\xd8\x76\x2e\x7f\x09\x75\x93\x09\x6e\x4a\x60\x20\x71\x03\x89\xb1\x5a\xc9\xd5\xc2\x8d\xe6\x54\x92\xba\x57\xa8\x95\xb1\x8f\x99\x1f\xab\x0c\x8b\xe2\x0c\x00\xbe\x94\xfd\x37\x9b\x4d\xd4\x69\xd2\x19\xbf\x44\x51\xc7\x74\xfd\x35\x92\xdb\x6d\xec\xdd\x48\xc9\xf8\x6b\x5e\xa4\xd7\xaf\xaf\x5f\xbd\xba\x7e\xf1\x6f\xaf\x5f\xbe\xbc\x7e\xfd\xe2\xe5\x43\xc8\xa0\x43\x7d\x26\x30\x7c\x1a\xfd\xad\xa2\xaa\xb5\xcf\xa1\x3d\x5e\xba\xdc\x8d\x22\x74\x41\x35\x88\x0e\xfe\x61\x0c\x35\x92\x12\x91\x90\x89\xb3\x39\xc4\x27\xa0\xc8\xc1\xe8\x11\xc9\x3e\x13\x5a\x1d\x7c\x08\x29\xaa\xb1\x74\xc2\xae\x98\xe7\x4a\xf6\x70\xba\x04\xc3\xab\x5a\x6c\x21\xdf\x5b\xfd\x3c\xae\x1e\x34\xca\xef\xc2\xea\xd0\x6c\x1e\x64\x2e\xfa\x57\xaa\x40\x8a\xfa\xa6\x31\x39\xd6\xae\xcb\x4b\x91\xf4\x4f\xdb\xdf\x98\xb4\x5c\x62\x17\x71\x23\xf8\x4e\x8a\x2d\x34\x06\x61\xa9\x34\x14\x98\x35\xab\x95\x4b\x13\x34\xd4\x9a\xaf\x99\xc5\x2e\xcc\x9a\x16\x15\x3d\x28\x06\x95\x0d\xa5\x3c\x62\x90\x81\xfc\x4d\x35\x90\x33\x09\x56\xb3\xfc\xd6\x7b\x4a\xa3\x35\x79\x4a\x8d\xfe\x34\x7d\xa0\xcf\x50\xa8\x8d\x23\xf1\xe7\x5e\x72\x14\x2e\xea\x1b\x44\x28\xd5\x06\xaa\x26\x77\x0e\x49\x51\xdd\x1d\x62\xc3\xb8\x85\x46\x5a\x2e\xbc\x3e\x6d\xa3\x25\xe5\x08\x78\x10\xa5\x4f\x6a\xbf\x04\xab\xc5\xfb\x12\xcf\xa4\x44\x7d\xd5\x06\x1a\xdf\x7a\x72\xa8\xb5\xb2\x98\x93\x41\x81\xad\x18\x97\x86\x2c\xe2\xf2\x00\xac\x3e\xa2\xaa\xeb\x9f\xda\x87\x7d\x87\xd2\x4d\xc7\x31\xfc\x55\xa8\x8c\x09\x58\x13\xd2\x33\x41\xe9\x9c\x82\x52\xd1\xd1\x07\xda\x32\x96\xd9\xc6\x80\x5a\xba\x51\x2f\x39\xad\x5f\x33\x4d\x16\xc4\xaa\xb6\x90\xb6\xfd\x35\x1a\x33\xa8\xd7\x6d\xd7\x90\x5e\xa9\x72\x3f\x98\xef\xb5\x9e\xc2\xcf\xbf\xcc\x9f\xb4\xa2\xfc\x19\x97\x0e\x12\x84\x6f\x7f\x64\x5b\x32\x0b\xb9\x46\x66\xd1\x40\x2e\x94\x69\xb4\x97\xb0\xd0\xaa\x06\x92\xb2\xe3\xd4\x71\xa6\x89\xda\xed\xd6\x31\x19\x97\xcc\x94\x93\xb6\x3d\xa8\xd1\x59\xa9\x9f\xeb\xc6\x2f\x08\x75\x63\x62\xc0\xd3\xe9\x1c\x78\xd2\xf1\x8d\x04\xca\x95\x2d\xe7\xc0\x9f\x3f\xef\x89\x2f\xf8\x12\xc6\x1d\xc5\xcf\xfc\x97\xc8\xde\x45\xb4\x0b\xa4\x29\x0c\x77\x73\x1b\xb6\x7c\x4c\x2d\x78\x8e\x63\x7e\x09\x57\x93\x79\x37\x9b\x69\x64\xb7\xdd\x5b\x6b\x47\xff\x9f\xfb\xbb\x9b\x1f\x6a\xc6\x29\xff\x40\x37\xbe\xf6\x37\xc0\x60\xc5\x8d\x85\x46\x0b\x68\x7d\xd8\x9b\xa0\x37\x88\xa3\x1b\x6a\xe5\x04\x97\xed\x43\x8b\xa9\xee\x08\x9e\x4d\x64\x50\x16\xe3\xff\xf8\xf1\xbb\x6f\x23\x63\x35\x97\x2b\xbe\xdc\x8e\xef\x1b\x2d\x66\xf0\x74\x1c\xfc\x4b\xa3\x45\x30\xf9\x79\xfa\x4b\xb4\x66\xa2\xc1\x4b\x67\xef\x99\xfb\x7b\xb2\xcb\x25\xb4\x8f\x33\x38\xdc\x70\x37\x99\xcc\xcf\xf7\x49\x06\x6d\x1d\x8d\x06\xed\x98\x08\x7b\xe0\x1f\xeb\x88\x41\x85\xb6\x54\xce\x75\x35\xe6\x4a\x4a\xcc\x2d\x34\xb5\x92\xad\x4a\x40\x28\x63\xf6\x40\xec\x28\xd2\x53\x50\xb4\xf4\xa9\x0b\xd6\xff\x8d\xd9\x8f\x2a\xbf\x45\x3b\x1e\x8f\x37\x5c\x16\x6a\x13\x09\xe5\xaf\xda\x88\x9c\x54\xe5\x4a\x40\x9a\xa6\xd0\x46\xd1\x60\x02\x5f\x43\xb0\x31\x14\x4f\x03\x98\xd1\x23\x3d\x4d\xe0\x39\x1c\x2f\x2f\x29\xde\x3f\x87\x20\x66\x35\x0f\x26\xde\x1d\x3a\xc5\x2b\x59\xa1\x31\x6c\x85\x43\x01\x5d\x65\xd4\x83\x8c\xce\x51\x99\x15\xa4\xe0\x0c\x54\x33\x6d\xd0\x93\x44\x54\x8d\x77\x68\x23\xcc\x3a\xb2\x34\x05\xd9\x08\xb1\x07\xa9\x77\x8a\x79\x07\xbf\x03\xf2\xc8\xc7\x9a\xaf\xd2\x14\xa8\x34\x25\x15\x17\xfb\x95\x64\x7c\x5f\x44\x4f\x22\x8a\x0b\xfb\x15\x93\xf9\x10\xcd\x07\xdc\xb0\xf8\x3d\x76\x58\x1c\xf3\xc3\xe2\x01\x86\xae\x67\xf1\x18\x3f\xdf\xe3\x18\xb0\x73\x03\x0f\x70\x93\x4d\x95\xa1\x7e\x8c\x9d\xef\x59\xb4\xec\x9c\xaa\xdf\x49\x3b\x58\x7b\x09\x57\xaf\x26\x0f\x70\x47\xad\xd5\x83\xcc\xa5\xb2\xdb\xf1\xbd\x60\x5b\xca\x99\x60\x64\x55\xfd\xd6\xb5\x18\x46\x97\x2e\xe2\xce\xa0\xe7\x70\xe9\x9a\xc7\x33\x18\xb9\x37\x9a\xe7\x15\xba\x55\x2f\xa7\xd3\xe9\x25\x74\x5f\x5d\xfe\xc4\xc8\x09\x75\x83\xbb\x07\xe4\x31\x4d\x9e\x53\xdc\xff\x1c\x89\x5a\x1e\xbd\x4c\xed\xfb\x67\x48\xd5\xc7\x86\x03\xb1\xe0\x0f\x7f\x80\x93\xd9\x43\x18\xc7\x31\xfc\x17\xa3\x32\x5c\x08\xd7\x3d\x70\x4d\x83\x9e\xbe\xe2\xc6\xb8\x62\xdc\x40\xa1\x24\xb6\x6b\x3e\xed\xda\x3f\x91\xb1\x25\x83\x05\x4c\x8f\x05\xa4\xeb\x70\x10\x16\xce\x44\x8b\x01\xdf\xc3\x40\x70\xb1\x1b\xee\x77\xb0\x92\x57\x08\x5f\xa5\x10\x04\xc3\xc5\x27\x14\x44\xd0\x33\xbb\x30\x68\xdf\x7b\x5b\x8c\xdb\xe8\x78\x2e\x76\x4d\x2e\xe1\x66\x3a\x9d\x4e\x4e\x84\xd8\xed\xd5\xfb\xa6\xa6\xb4\x09\x98\xdc\xba\x2b\xb1\xd7\xad\x4b\x1c\x29\x05\xa2\x2b\x4d\x40\xae\x84\xf0\x39\x4b\xbb\x94\x14\xdc\x36\x4f\x52\x08\xaf\xe6\x67\xa2\xe8\x40\x93\x83\xa3\x1d\x9b\xe7\x8c\xee\x8f\x4d\x74\xa8\xb3\x23\xe2\xf0\xea\xc0\x28\x07\xf6\x3a\x6f\x98\x8b\x5e\x6e\xbe\xd7\xe8\x91\xb9\xf6\xf6\x3a\xd6\xd9\x40\x7e\xcf\xe7\xf9\xd5\x47\x1e\xa3\x9f\xae\x1b\x53\x8e\x8f\x04\x9d\xcc\x4f\x6d\xf3\xce\xa2\xa6\x2c\x59\x51\xc8\x22\x5b\x50\x29\xa0\xf1\xc4\x24\x2e\x55\xd7\x18\x6a\x94\x05\xea\x2e\xa5\xf0\x99\x3d\x25\x80\x07\x26\xf3\x55\xe5\x10\x4e\x9f\xe8\x30\x2e\x25\x53\x12\x01\x00\x8e\x9c\xc0\x01\xf5\x00\xa9\x44\x8c\x82\xd5\x06\x0b\x48\xc1\x7f\x04\x1f\x4f\xa2\x46\xf2\xbb\xf1\x24\x6c\xdf\x8f\x79\x74\xf3\xf3\xbe\x4c\xec\xc4\x7e\x9e\x42\x90\x58\x0d\xbc\x48\x47\x01\x3c\x3f\xe7\x82\x14\x75\x47\x8b\xbd\x04\xc3\xa5\x00\x89\x2d\x16\xae\x0f\xea\xeb\xb5\xbf\x07\x19\xcb\x6f\x57\xae\x10\x9a\x51\xaa\x35\x3e\x61\xcb\xd6\xcc\x32\xed\xb8\x4e\xe6\xb0\x27\x6f\x0b\xc5\x9c\x8c\x33\x07\x5f\x91\xba\x76\x2b\xf4\x9f\x28\xdc\x5b\xa6\x74\x81\x3a\xd4\xac\xe0\x8d\x99\xc1\x8b\xfa\x6e\xfe\xf7\xee\x13\x8e\x6b\x0a\x3f\x2a\x6a\xad\x71\x71\x22\x51\xdb\x65\x7c\x0e\x41\x12\x13\xc1\xef\xb1\xe9\x0f\x3b\xfc\xf8\x0e\x67\x5a\xdf\xd0\x7f\x1a\x6f\xc7\x2b\x5e\x14\x02\x49\xe0\x3d\x7b\x72\x46\xb2\xff\xd0\xa5\x0e\xb7\x84\xb6\xe7\xbd\x5f\xb3\x03\x14\x06\x1f\x59\xd0\xb7\xcf\x47\x04\x80\x90\x8e\xcc\x9d\xce\xdb\x62\xdb\x0d\xeb\x91\xd3\x45\xfb\x53\x8a\xa2\xd1\x2e\xd7\x1a\x87\x2d\xc0\x2e\x61\x64\x28\xf7\x2b\xcc\x68\x12\x95\x4d\xc5\x24\xff\x0d\xc7\x14\x97\x26\x5e\x57\xae\x1f\x1f\x9c\x5e\xc9\x27\xc2\xec\x1b\xe5\xa3\x2e\xc6\x8d\x5a\x25\x8e\x3a\xeb\xbe\xd8\xd7\xf6\x33\x98\xce\x47\x9f\xa8\xa1\xf3\xbb\x84\x19\xd3\x30\x7c\x09\xbb\xe0\x0b\x5a\xd1\xee\xdd\x5c\xc6\xf4\xc8\x77\x32\x5c\x7e\x2e\xd5\x26\x1d\xdd\x4c\x7b\x21\xbd\xa1\x9d\x9d\x47\x2d\xd6\x4e\x8c\x41\x52\x76\xae\xb9\x80\x9b\xe9\x97\x90\xd6\x77\x43\x8e\x4e\x60\x35\xaf\xb1\x00\x96\x5b\xbe\xc6\xff\x87\x83\x7c\x01\x25\x7f\xb2\x88\x84\xc3\x4e\x79\x0e\xa6\x07\xf2\xd2\x6c\xaf\xdb\x7f\x25\x7f\x83\xd8\x69\xf8\x39\x04\x67\x0f\xf2\x20\x12\x8f\x08\x8f\x5c\xfb\x61\xbf\x77\x1f\x98\x82\xe3\x98\x42\xd9\x6e\xff\x71\x74\x12\x95\xb6\x12\xe3\x20\xb1\xee\x47\x32\x24\x73\xcf\xc1\x31\xf0\xc3\x87\x29\xdd\xee\xb0\x90\xa1\xfa\x1d\x8f\xea\x2c\x18\x24\x27\x7d\x2d\xd6\x65\x22\xb0\xdb\xff\x96\x28\x8e\xe1\x47\xcb\xb4\x05\x06\x3f\xbd\x83\xa6\x2e\x98\xf5\x9f\x72\x28\x3e\xfa\x4f\x25\xdd\x8f\x8d\x32\xa6\x0d\x2c\x95\xde\x30\x5d\xb4\xfd\x19\x5b\xe2\xd6\x7d\xca\xe9\x52\x3f\x83\xf6\x1d\xdd\x62\x6b\x26\xc6\x27\x75\xdf\xd3\xf1\x28\x1a\x9a\x7c\x34\x89\x90\xe5\xe5\x29\xa1\x8b\x58\xfd\xbe\x29\x7c\xeb\x4a\x80\xf1\xd3\xb1\x2d\xb9\x99\x44\xcc\x5a\x3d\x1e\x1d\x80\x61\x34\x21\xbb\x5e\x0d\x4a\xb2\x7e\x79\x72\xe0\x56\x8f\xf1\xd8\x27\xd3\x7d\x22\xd0\x91\xe7\xc6\x8c\x3d\xae\x46\x97\x03\xde\x87\xb0\x1a\x3d\x1b\xf5\x86\xda\xbb\xf7\xfe\x1c\xe9\x59\x49\x0e\x58\x8f\xc8\xcb\x46\x27\xdb\xb3\xa2\x78\x4b\xfe\x33\x0e\xce\x78\xfa\x31\x3a\x26\xbd\xb2\xfd\x7d\xfd\xa8\x96\xfd\xcf\x32\x1e\x50\x31\x2f\x46\x93\xc8\x34\x99\xef\x4d\x8c\x5f\xf6\x05\x58\x47\xe6\xc0\x7b\x1c\x0a\x4e\x12\x0a\xda\xe2\x30\xa9\x08\x8f\x92\x90\x47\xa2\x46\xbb\xa5\x3f\xd5\xee\x92\x14\x3e\x9d\xf4\xad\xad\x6f\x0c\x25\x57\xbe\xf5\xbf\xc1\xcc\xb8\x4e\x02\xb4\x78\x77\xdd\x1c\xdf\xb5\x79\xf3\xfd\xbb\x41\xe7\xa6\xf7\x88\xb1\xe3\xde\xff\x0e\xf0\x5c\x9f\xe4\xec\x0f\x0f\x37\x9b\x4d\xb4\x52\x6a\x25\xfc\x4f\x0e\xfb\x46\x4a\xcc\x6a\x1e\x7d\x30\x01\x30\xb3\x95\x39\x14\xb8\x44\xbd\x18\xb0\x6f\xbb\x2b\x49\xec\x7f\x12\x97\xc4\xfe\x57\xbf\xff\x17\x00\x00\xff\xff\x31\x9f\x54\x5e\x06\x2c\x00\x00") +var _faucetHtml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x5a\x7b\x93\xdb\x36\x92\xff\x7b\xfc\x29\x3a\x3c\x7b\x25\x9d\x87\xa4\x66\xc6\xf6\xfa\x24\x52\x29\xaf\x37\xbb\xe7\xab\xbb\x24\x95\x38\x75\xb7\x95\x4d\x5d\x81\x64\x4b\x84\x07\x04\x18\x00\x94\x46\x99\xd2\x77\xbf\x6a\x80\xa4\xa8\xc7\x4c\xec\xb5\xaf\x6a\xfd\xc7\x98\xc4\xa3\xd1\x8f\x5f\xa3\x1f\x54\xf2\xd5\x9f\xbf\x7b\xfb\xfe\x6f\xdf\x7f\x03\xa5\xad\xc4\xe2\x49\x42\xff\x81\x60\x72\x95\x06\x28\x83\xc5\x93\x8b\xa4\x44\x56\x2c\x9e\x5c\x5c\x24\x15\x5a\x06\x79\xc9\xb4\x41\x9b\x06\x8d\x5d\x86\xaf\x83\xfd\x44\x69\x6d\x1d\xe2\xaf\x0d\x5f\xa7\xc1\xff\x84\x3f\xbd\x09\xdf\xaa\xaa\x66\x96\x67\x02\x03\xc8\x95\xb4\x28\x6d\x1a\xbc\xfb\x26\xc5\x62\x85\x83\x7d\x92\x55\x98\x06\x6b\x8e\x9b\x5a\x69\x3b\x58\xba\xe1\x85\x2d\xd3\x02\xd7\x3c\xc7\xd0\xbd\x5c\x02\x97\xdc\x72\x26\x42\x93\x33\x81\xe9\x55\xb0\x78\x42\x74\x2c\xb7\x02\x17\xf7\xf7\xd1\xb7\x68\x37\x4a\xdf\xee\x76\x33\x78\xd3\xd8\x12\xa5\xe5\x39\xb3\x58\xc0\x5f\x58\x93\xa3\x4d\x62\xbf\xd2\x6d\x12\x5c\xde\x42\xa9\x71\x99\x06\xc4\xba\x99\xc5\x71\x5e\xc8\x0f\x26\xca\x85\x6a\x8a\xa5\x60\x1a\xa3\x5c\x55\x31\xfb\xc0\xee\x62\xc1\x33\x13\xdb\x0d\xb7\x16\x75\x98\x29\x65\x8d\xd5\xac\x8e\x6f\xa2\x9b\xe8\x8f\x71\x6e\x4c\xdc\x8f\x45\x15\x97\x51\x6e\x4c\x00\x1a\x45\x1a\x18\xbb\x15\x68\x4a\x44\x1b\x40\xbc\xf8\xc7\xce\x5d\x2a\x69\x43\xb6\x41\xa3\x2a\x8c\x5f\x44\x7f\x8c\xa6\xee\xc8\xe1\xf0\xe3\xa7\xd2\xb1\x26\xd7\xbc\xb6\x60\x74\xfe\xd1\xe7\x7e\xf8\xb5\x41\xbd\x8d\x6f\xa2\xab\xe8\xaa\x7d\x71\xe7\x7c\x30\xc1\x22\x89\x3d\xc1\xc5\x67\xd1\x0e\xa5\xb2\xdb\xf8\x3a\x7a\x11\x5d\xc5\x35\xcb\x6f\xd9\x0a\x8b\xee\x24\x9a\x8a\xba\xc1\x2f\x76\xee\x43\x36\xfc\x70\x6c\xc2\x2f\x71\x58\xa5\x2a\x94\x36\xfa\x60\xe2\xeb\xe8\xea\x75\x34\xed\x06\x4e\xe9\xbb\x03\xc8\x68\x74\xd4\x45\xb4\x46\x4d\xc8\x15\x61\x8e\xd2\xa2\x86\x7b\x1a\xbd\xa8\xb8\x0c\x4b\xe4\xab\xd2\xce\xe0\x6a\x3a\x7d\x36\x3f\x37\xba\x2e\xfd\x70\xc1\x4d\x2d\xd8\x76\x06\x4b\x81\x77\x7e\x88\x09\xbe\x92\x21\xb7\x58\x99\x19\x78\xca\x6e\x62\xe7\xce\xac\xb5\x5a\x69\x34\xa6\x3d\xac\x56\x86\x5b\xae\xe4\x8c\x10\xc5\x2c\x5f\xe3\xb9\xb5\xa6\x66\xf2\x64\x03\xcb\x8c\x12\x8d\xc5\x23\x46\x32\xa1\xf2\x5b\x3f\xe6\xbc\x79\x28\x44\xae\x84\xd2\x33\xd8\x94\xbc\xdd\x06\xee\x20\xa8\x35\xb6\xe4\xa1\x66\x45\xc1\xe5\x6a\x06\xaf\xea\x56\x1e\xa8\x98\x5e\x71\x39\x83\xe9\x7e\x4b\x12\x77\x6a\x4c\x62\x7f\x71\x3d\xb9\x48\x32\x55\x6c\x9d\x0d\x0b\xbe\x86\x5c\x30\x63\xd2\xe0\x48\xc5\xee\x42\x3a\x58\x40\xf7\x10\xe3\xb2\x9b\x3a\x98\xd3\x6a\x13\x80\x3b\x28\x0d\x3c\x13\x61\xa6\xac\x55\xd5\x0c\xae\x88\xbd\x76\xcb\x11\x3d\x11\x8a\x55\x78\x75\xdd\x4d\x5e\x24\xe5\x55\x47\xc4\xe2\x9d\x0d\x9d\x7d\x7a\xcb\x04\x8b\x84\x77\x7b\x97\x0c\x96\x2c\xcc\x98\x2d\x03\x60\x9a\xb3\xb0\xe4\x45\x81\x32\x0d\xac\x6e\x90\x70\xc4\x17\x30\xbc\xfe\x1e\xb8\xfd\xca\xab\x8e\xaf\xb8\xe0\xeb\x56\xac\xc1\xe3\x91\x84\x0f\x0b\xf1\x1a\xda\x07\xb5\x5c\x1a\xb4\xe1\x40\xa6\xc1\x62\x2e\xeb\xc6\x86\x2b\xad\x9a\xba\x9f\xbf\x48\xdc\x28\xf0\x22\x0d\x1a\x2d\x82\xf6\xfa\x77\x8f\x76\x5b\xb7\xaa\x08\x7a\xc1\x95\xae\x42\xb2\x84\x56\x22\x80\x5a\xb0\x1c\x4b\x25\x0a\xd4\x69\xf0\xa3\xca\x39\x13\x20\xbd\xcc\xf0\xd3\x0f\xff\x09\xad\xc9\xb8\x5c\xc1\x56\x35\x1a\xbe\xb1\x25\x6a\x6c\x2a\x60\x45\x41\x70\x8d\xa2\x28\x88\xf7\x9c\x38\xf0\x9e\xf2\x1a\x66\x56\xee\xf9\xbd\x48\xb2\xc6\x5a\xd5\x2f\xcc\xac\x84\xcc\xca\xb0\xc0\x25\x6b\x84\x85\x42\xab\xba\x50\x1b\x19\x5a\xb5\x5a\x51\xa8\xf3\x52\xf8\x4d\x01\x14\xcc\xb2\x76\x2a\x0d\xba\xb5\x9d\x11\x99\xa9\x55\xdd\xd4\xad\x19\xfd\x20\xde\xd5\x4c\x16\x58\x90\xd1\x85\xc1\x60\xf1\x57\xbe\x46\xa8\xd0\x0b\x73\x71\x8c\x89\x9c\x69\xb4\xe1\x90\xe8\x09\x32\x92\xd8\x33\xe3\x45\x82\xf6\x5f\xd2\x88\x8e\x52\x2f\x42\x85\xb2\x81\x83\xb7\x50\xd3\xc5\x12\x2c\xee\xef\x35\x93\x2b\x84\xa7\xbc\xb8\xbb\x84\xa7\xac\x52\x8d\xb4\x30\x4b\x21\x7a\xe3\x1e\xcd\x6e\x77\x40\x1d\x20\x11\x7c\x91\xb0\xc7\xf0\x0d\x4a\xe6\x82\xe7\xb7\x69\x60\x39\xea\xf4\xfe\x9e\x88\xef\x76\x73\xb8\xbf\xe7\x4b\x78\x1a\xfd\x80\x39\xab\x6d\x5e\xb2\xdd\x6e\xa5\xbb\xe7\x08\xef\x30\x6f\x2c\x8e\x27\xf7\xf7\x28\x0c\xee\x76\xa6\xc9\x2a\x6e\xc7\xdd\x76\x1a\x97\xc5\x6e\x47\x3c\xb7\x7c\xee\x76\x10\x13\x51\x59\xe0\x1d\x3c\x8d\xbe\x47\xcd\x55\x61\xc0\xaf\x4f\x62\xb6\x48\x62\xc1\x17\xed\xbe\x43\x25\xc5\x8d\xd8\xe3\x25\x26\xc0\xf4\x40\x77\x7e\xe3\x58\x1d\x72\x7a\xc6\x0d\x56\x61\xcf\x7d\x8b\x07\xc3\x2d\xde\xe2\x36\x0d\xee\xef\x87\x7b\xdb\xd9\x9c\x09\x91\x31\xd2\x8b\x17\xad\xdf\xf4\x1b\x12\x4e\xd7\xdc\xb8\x9c\x6a\xd1\x71\xb0\x67\xfb\x23\xfd\xfa\xe8\xe6\xb2\xaa\x9e\xc1\xcd\xf5\xe0\xda\x3a\xe7\xf2\xaf\x8e\x5c\xfe\xe6\xec\xe2\x9a\x49\x14\xe0\xfe\x86\xa6\x62\xa2\x7b\x6e\xbd\x65\x70\x0d\x1c\x6f\x0a\xe9\x92\xee\x59\xeb\x2f\xfb\xe9\x1c\xd4\x1a\xf5\x52\xa8\xcd\x0c\x58\x63\xd5\x1c\x2a\x76\xd7\x07\xbc\x9b\xe9\x74\xc8\x37\xe5\x82\x2c\x13\xe8\xae\x17\x8d\xbf\x36\x68\xac\xe9\x2f\x13\x3f\xe5\xfe\xd2\x9d\x52\xa0\x34\x58\x1c\x69\x83\x4e\x24\xd5\xba\x55\x03\xd3\xf7\xca\x3c\xcb\xfb\x52\xa9\x3e\x86\x0c\xd9\x68\x49\x0f\xc2\x5d\xb0\x48\xac\xde\xaf\xbb\x48\x6c\xf1\x49\x31\x40\x53\x8e\xf7\x50\x08\xf0\x37\x1a\xc9\x5e\x23\x6a\x9f\x60\x10\x64\xc1\xbd\x26\xb1\x2d\x3e\xe3\x64\x02\x61\xc6\x0c\x7e\xcc\xf1\x2e\xd4\xef\x8f\x77\xaf\x9f\x7b\x7e\x89\x4c\xdb\x0c\x99\xfd\x18\x06\x96\x8d\x2c\x06\xf2\xbb\xbb\xf3\x73\x19\x68\x24\x5f\xa3\x36\xdc\x6e\x3f\x96\x03\x2c\xf6\x2c\xf8\xf7\x43\x16\x92\xd8\xea\xc7\xb1\x36\x7c\xf9\x42\xce\xfd\x7b\x39\xc9\xcd\xe2\xdf\xd5\x06\x0a\x85\x06\x6c\xc9\x0d\x50\x74\xfd\x3a\x89\xcb\x9b\x7e\x49\xbd\x78\x4f\x13\x4e\xa9\xb0\x74\xb9\x05\x70\x03\xba\x91\x2e\xf4\x2a\x09\xb6\xc4\xc3\x7c\xa4\x8d\xd2\x11\xbc\x57\x94\xd3\xad\x51\x5a\xa8\x98\xe0\x39\x57\x8d\x01\x96\x5b\xa5\x0d\x2c\xb5\xaa\x00\xef\x4a\xd6\x18\x4b\x84\xe8\xfa\x60\x6b\xc6\x85\xf3\x25\x67\x52\x50\x1a\x58\x9e\x37\x55\x43\x39\xa9\x5c\x01\x4a\xd5\xac\xca\x96\x17\xab\xc0\x07\x26\xa1\xe4\xaa\xe7\xc7\xd4\xac\x02\x66\x2d\xcb\x6f\xcd\x25\x74\xb7\x02\x30\x8d\x60\x39\x16\xb4\x2b\x57\x55\xa5\x24\xdc\xe8\x02\x6a\xa6\xed\x16\xcc\x61\x72\xc1\xf2\xdc\x45\xb9\x08\xde\xc8\xad\x92\x08\x25\x5b\x3b\x0e\xe1\xbd\xaf\x27\x88\xaf\xbf\xb0\x1c\x33\xa5\xfa\xd5\x50\xb1\x6d\x77\x5c\xcb\xfd\x86\xdb\x92\x7b\xf5\xd4\xa8\x2b\xda\x5a\x80\xe0\x15\xb7\x26\x4a\xe2\x7a\x7f\xa3\xee\x63\xb3\x08\x4b\xa5\xf9\x6f\x94\xd9\x88\xe1\xf5\x69\x8f\x2e\x97\xee\x6e\x74\x56\x17\xb8\xb4\x33\x78\xe1\xef\xc6\x63\x1c\xb7\x25\xd0\x39\x10\x77\x34\x5d\x69\x49\x01\x67\x06\x37\x3e\x9f\xf5\x89\x44\x61\x07\x1c\x14\x47\x50\xf3\x87\xbe\x7e\x5d\xdf\xf5\x7c\xf4\x49\xf1\xb4\x27\x42\x08\x38\x54\xca\x9a\xf7\x6a\xbc\x84\x8a\xdd\x22\x30\x48\xd8\x51\x89\xdc\x32\xed\x0a\x2c\xee\x1a\x04\xb1\xdd\x20\xda\xaf\xc9\x75\xd3\x1f\x3c\x41\x2e\x57\xcf\xae\xa7\x1e\x91\xf4\x40\xe4\x9f\x5d\x4f\xb9\xb4\xea\xd9\xf5\x74\x7a\x37\xfd\xc8\x7f\xcf\xae\xa7\x4a\x3e\xbb\x9e\xda\x12\x9f\x5d\x4f\x9f\x5d\xdf\x0c\xb1\xec\x47\xba\xd4\x92\x56\xa1\xa1\xd3\x3a\x88\x07\x60\x99\x5e\xa1\x4d\x83\xff\x65\x99\x6a\xec\x2c\x13\x4c\xde\x06\x0b\xc7\x2e\x65\x1b\x0e\x05\xe7\x13\x54\xa8\x99\x21\x48\x10\xc7\x0e\x25\x6d\x33\xc4\xc0\xd8\x34\x5a\xab\x46\x52\x54\x04\x92\xd9\x79\xa8\x1c\x11\xca\x48\x31\x93\x28\xc9\x74\xbc\x78\xab\xea\x6d\xe8\x88\xb8\xed\x27\x6a\x34\x4d\x5d\x2b\x6d\xa3\xa1\x3a\x19\x15\x42\x02\x4d\xfc\x7a\xfa\xf2\xf5\xab\x47\xd9\x37\x94\x66\x3b\x19\x7a\x0e\x59\xa6\xd6\x08\x3e\xa9\xcf\xd4\x1d\x30\x59\xc0\x92\x6b\x04\xb6\x61\xdb\xaf\x92\xb8\x70\x25\xd8\xe7\xa3\x76\xd9\x7a\xd7\x3f\x15\x6c\x3b\x97\xbf\x84\xba\xc9\x04\x37\x25\x30\x90\xb8\x81\xc4\x58\xad\xe4\x6a\xe1\x46\x73\xaa\x49\xdd\x2b\xd4\xca\xd8\xc7\xcc\x8f\x55\x86\x45\x71\x06\x00\x5f\xca\xfe\x9b\xcd\x26\xea\x34\xe9\x8c\x5f\xa2\xa8\x63\xba\xfe\x1a\xc9\xed\x36\xf6\x6e\xa4\x64\xfc\x35\x2f\xd2\xeb\xd7\xd7\xaf\x5e\x5d\xbf\xf8\xb7\xd7\x2f\x5f\x5e\xbf\x7e\xf1\xf2\x21\x64\x90\x50\x9f\x09\x0c\x9f\x46\x7f\xab\xa8\x6c\xed\x73\x68\x8f\x97\x2e\x77\xa3\x08\x5d\x50\x0d\xa2\x83\x7f\x18\x43\x8d\xa4\x44\x24\x64\xe2\x6c\x0e\xf1\x09\x28\x72\x30\x7a\x84\xb3\xcf\x84\x56\x07\x1f\x42\x8a\x6a\x2c\x49\xd8\x55\xf3\x5c\xc9\x1e\x4e\x97\x60\x78\x55\x8b\x2d\xe4\x7b\xab\x9f\xc7\xd5\x83\x46\xf9\x5d\x58\x1d\x9a\xcd\x83\xcc\x45\xff\x4a\x15\x48\x51\xdf\x34\x26\xc7\xda\xb5\x79\x29\x92\xfe\x69\xfb\x1b\x93\x96\x4b\xec\x22\x6e\x04\xdf\x49\xb1\x85\xc6\x20\x2c\x95\x86\x02\xb3\x66\xb5\x72\x69\x82\x86\x5a\xf3\x35\xb3\xd8\x85\x59\xd3\xa2\xa2\x07\xc5\xa0\xb2\xa1\x94\x47\x0c\x32\x90\xbf\xa9\x06\x72\x26\xc1\x6a\x96\xdf\x7a\x4f\x69\xb4\x26\x4f\xa9\xd1\x4b\xd3\x07\xfa\x0c\x85\xda\xb8\x25\x5e\xee\x25\x47\xe1\xa2\xbe\x41\x84\x52\x6d\xa0\x6a\x72\xe7\x90\x14\xd5\x9d\x10\x1b\xc6\x2d\x34\xd2\x72\xe1\xf5\x69\x1b\x2d\x29\x47\xc0\x83\x28\x7d\x52\xfb\x25\x58\x2d\xde\x97\x78\x26\x25\xea\xab\x36\xd0\xf8\xd6\x2f\x87\x5a\x2b\x8b\x39\x19\x14\xd8\x8a\x71\x69\xc8\x22\x2e\x0f\xc0\xea\x23\xaa\xba\xfe\xa9\x7d\xd8\xb7\x28\xdd\x74\x1c\xc3\x5f\x85\xca\x98\x80\x35\x21\x3d\x13\x94\xce\x29\x28\x15\x89\x3e\xd0\x96\xb1\xcc\x36\x06\xd4\xd2\x8d\x7a\xce\x69\xff\x9a\x69\xb2\x20\x56\xb5\x85\xb4\x6d\xb0\xd1\x98\x41\xbd\x6e\xdb\x86\xf4\x4a\x95\xfb\xc1\x7c\xaf\xf5\x14\x7e\xfe\x65\xfe\xa4\x65\xe5\xcf\xb8\x74\x90\x20\x7c\x7b\x91\x6d\xc9\x2c\xe4\x1a\x99\x45\x03\xb9\x50\xa6\xd1\x9e\xc3\x42\xab\x1a\x88\xcb\x8e\x52\x47\x99\x26\x6a\x77\x5a\x47\x64\x5c\x32\x53\x4e\xda\xfe\xa0\x46\x67\xa5\x7e\xae\x1b\xbf\x20\xd4\x8d\x89\x00\x4f\xa7\x73\xe0\x49\x47\x37\x12\x28\x57\xb6\x9c\x03\x7f\xfe\xbc\x5f\x7c\xc1\x97\x30\xee\x56\xfc\xcc\x7f\x89\xec\x5d\x44\xa7\x40\x9a\xc2\xf0\x34\x77\x60\x4b\xc7\xd4\x82\xe7\x38\xe6\x97\x70\x35\x99\x77\xb3\x99\x46\x76\xdb\xbd\xb5\x76\xf4\xff\xb9\xbf\xbb\xf9\xa1\x66\x9c\xf2\x0f\x74\xe3\x6b\x7f\x03\x0c\x56\xdc\x58\x68\xb4\x80\xd6\x87\xbd\x09\x7a\x83\xb8\x75\x43\xad\x9c\xe0\xb2\x7d\x68\x31\xd5\x89\xe0\xc9\x44\x06\x65\x31\xfe\x8f\x1f\xbf\xfb\x36\x32\x56\x73\xb9\xe2\xcb\xed\xf8\xbe\xd1\x62\x06\x4f\xc7\xc1\xbf\x34\x5a\x04\x93\x9f\xa7\xbf\x44\x6b\x26\x1a\xbc\x74\xf6\x9e\xb9\xbf\x27\xa7\x5c\x42\xfb\x38\x83\xc3\x03\x77\x93\xc9\xfc\x7c\x9f\x64\xd0\xd6\xd1\x68\xd0\x8e\x69\x61\x0f\xfc\x63\x1d\x31\xa8\xd0\x96\xca\xb9\xae\xc6\x5c\x49\x89\xb9\x85\xa6\x56\xb2\x55\x09\x08\x65\xcc\x1e\x88\xdd\x8a\xf4\x14\x14\xed\xfa\xd4\x05\xeb\xff\xc6\xec\x47\x95\xdf\xa2\x1d\x8f\xc7\x1b\x2e\x0b\xb5\x89\x84\xf2\x57\x6d\x44\x4e\xaa\x72\x25\x20\x4d\x53\x68\xa3\x68\x30\x81\xaf\x21\xd8\x18\x8a\xa7\x01\xcc\xe8\x91\x9e\x26\xf0\x1c\x8e\xb7\x97\x14\xef\x9f\x43\x10\xb3\x9a\x07\x13\xef\x0e\x9d\xe2\x95\xac\xd0\x18\xb6\xc2\x21\x83\xae\x32\xea\x41\x46\x72\x54\x66\x05\x29\x38\x03\xd5\x4c\x1b\xf4\x4b\x22\xaa\xc6\x3b\xb4\x11\x66\xdd\xb2\x34\x05\xd9\x08\xb1\x07\xa9\x77\x8a\x79\x07\xbf\x83\xe5\x91\x8f\x35\x5f\xa5\x29\x50\x69\x4a\x2a\x2e\xf6\x3b\xc9\xf8\xbe\x88\x9e\x44\x14\x17\xf6\x3b\x26\xf3\x21\x9a\x0f\xa8\x61\xf1\x7b\xe4\xb0\x38\xa6\x87\xc5\x03\x04\x5d\xcf\xe2\x31\x7a\xbe\xc7\x31\x20\xe7\x06\x1e\xa0\x26\x9b\x2a\x43\xfd\x18\x39\xdf\xb3\x68\xc9\x39\x55\xbf\x93\x76\xb0\xf7\x12\xae\x5e\x4d\x1e\xa0\x8e\x5a\xab\x07\x89\x4b\x65\xb7\xe3\x7b\xc1\xb6\x94\x33\xc1\xc8\xaa\xfa\xad\x6b\x31\x8c\x2e\x5d\xc4\x9d\x41\x4f\xe1\xd2\x35\x8f\x67\x30\x72\x6f\x34\xcf\x2b\x74\xbb\x5e\x4e\xa7\xd3\x4b\xe8\x3e\xbb\xfc\x89\x91\x13\xea\x06\x77\x0f\xf0\x63\x9a\x3c\xa7\xb8\xff\x39\x1c\xb5\x34\x7a\x9e\xda\xf7\xcf\xe0\xaa\x8f\x0d\x07\x6c\xc1\x1f\xfe\x00\x27\xb3\x87\x30\x8e\x63\xf8\x2f\x46\x65\xb8\x10\xae\x7b\xe0\x9a\x06\xfd\xfa\x8a\x1b\xe3\x8a\x71\x03\x85\x92\xd8\xee\xf9\xb4\x6b\xff\x84\xc7\x76\x19\x2c\x60\x7a\xcc\x20\x5d\x87\x83\xb0\x70\x26\x5a\x0c\xe8\x1e\x06\x82\x8b\xdd\xf0\xbc\x83\x9d\xbc\x42\xf8\x2a\x85\x20\x18\x6e\x3e\x59\x41\x0b\x7a\x62\x17\x06\xed\x7b\x6f\x8b\x71\x1b\x1d\xcf\xc5\xae\xc9\x25\xdc\x4c\xa7\xd3\xc9\x09\x13\xbb\xbd\x7a\xdf\xd4\x94\x36\x01\x93\x5b\x77\x25\xf6\xba\x75\x89\x23\xa5\x40\x74\xa5\x09\xc8\x95\x10\x3e\x67\x69\xb7\x92\x82\xdb\xe6\x49\x0a\xe1\xd5\xfc\x4c\x14\x1d\x68\x72\x20\xda\xb1\x79\xce\xe8\xfe\xd8\x44\x87\x3a\x3b\x5a\x1c\x5e\x1d\x18\xe5\xc0\x5e\xe7\x0d\x73\xd1\xf3\xcd\xf7\x1a\x3d\x32\xd7\xde\x5e\xc7\x3a\x1b\xf0\xef\xe9\x3c\xbf\xfa\x48\x31\xfa\xe9\xba\x31\xe5\xf8\x88\xd1\xc9\xfc\xd4\x36\xef\x2c\x6a\xca\x92\x15\x85\x2c\xb2\x05\x95\x02\x1a\x4f\x4c\xe2\x52\x75\x8d\xa1\x46\x59\xa0\xee\x52\x0a\x9f\xd9\x53\x02\x78\x60\x32\x5f\x55\x0e\xe1\x34\x90\xe8\x44\xb7\x73\xe0\xb0\xa0\x34\x0f\x78\x18\x0e\x64\x71\x79\x99\x92\x08\x00\x70\xe4\x09\x0e\xad\x07\x70\xa5\xc5\x28\x58\x6d\xb0\x80\x14\xfc\xa7\xf0\xf1\x24\x6a\x24\xbf\x1b\x4f\xc2\xf6\xfd\x98\x46\x37\x3f\xef\x6b\xc5\x8e\xf7\xe7\x29\x04\x89\xd5\xc0\x8b\x74\x14\xc0\xf3\x73\x7e\x48\xa1\x77\xb4\xd8\x73\x30\xdc\x0a\x90\xd8\x62\xe1\x9a\xa1\xbe\x68\xfb\x7b\x90\xb1\xfc\x76\xe5\xaa\xa1\x19\xe5\x5b\xe3\x13\xb2\x6c\xcd\x2c\xd3\x8e\xea\x64\x0e\xfb\xe5\x6d\xb5\x98\x93\x85\xe6\xe0\xcb\x52\xd7\x73\x85\xfe\x3b\x85\x7b\xcb\x94\x2e\x50\x87\x9a\x15\xbc\x31\x33\x78\x51\xdf\xcd\xff\xde\x7d\xc7\x71\x9d\xe1\x47\x59\xad\x35\x2e\x4e\x38\x6a\x5b\x8d\xcf\x21\x48\x62\x5a\xf0\x7b\x64\x7a\x61\x87\x9f\xe0\xe1\x4c\xff\x1b\xfa\x0f\xe4\xed\x78\xc5\x8b\x42\x20\x31\xbc\x27\x4f\x1e\x49\xf6\x1f\xfa\xd5\xe1\x91\xd0\x36\xbe\xf7\x7b\x76\x80\xc2\xe0\x23\x1b\xfa\x1e\xfa\x88\x00\x10\x92\xc8\xdc\xe9\xbc\xad\xb8\xdd\xb0\x1e\x39\x5d\xb4\x3f\xa8\x28\x1a\xed\x12\xae\x71\xd8\x02\xec\x12\x46\x86\x12\xc0\xc2\x8c\x26\x51\xd9\x54\x4c\xf2\xdf\x70\x4c\xc1\x69\xe2\x75\xe5\x9a\xf2\xc1\xe9\xbd\x7c\xc2\xcc\xbe\x5b\x3e\xea\x02\xdd\xa8\x55\xe2\xa8\xb3\xee\x8b\x7d\x81\x3f\x83\xe9\x7c\xf4\x89\x1a\x3a\x7f\x4a\x98\x31\x0d\xc3\x97\xb0\x8b\xc0\xa0\x15\x9d\xde\xcd\x65\x4c\x8f\x7c\x3b\xc3\x25\xe9\x52\x6d\xd2\xd1\xcd\xb4\x67\xd2\x1b\xda\xd9\x79\xd4\x62\xed\xc4\x18\xc4\x65\xe7\x9a\x0b\xb8\x99\x7e\x09\x6e\x7d\x4b\xe4\x48\x02\xab\x79\x8d\x05\xb0\xdc\xf2\x35\xfe\x3f\x08\xf2\x05\x94\xfc\xc9\x2c\x12\x0e\x3b\xe5\x39\x98\x1e\xf0\x4b\xb3\xbd\x6e\xff\x95\xfc\x0d\x62\xa7\xe1\xe7\x10\x9c\x15\xe4\x41\x24\x1e\x2d\x3c\x72\xed\x87\xfd\xde\x7d\x65\x0a\x8e\x03\x0b\xa5\xbc\xfd\x17\xd2\x49\x54\xda\x4a\x8c\x83\xc4\xba\x9f\xca\x10\xcf\x3d\x05\x47\xc0\x0f\x1f\xe6\x75\xbb\xc3\x6a\x86\x8a\x78\x3c\x2a\xb6\x60\x90\xa1\xf4\x05\x59\x97\x8e\xc0\x6e\xff\x8b\xa2\x38\x86\x1f\x2d\xd3\x16\x18\xfc\xf4\x0e\x9a\xba\x60\xd6\x7f\xcf\xa1\x20\xe9\xbf\x97\x74\x3f\x39\xca\x98\x36\xb0\x54\x7a\xc3\x74\xd1\x36\x69\x6c\x89\x5b\xf7\x3d\xa7\xcb\xff\x0c\xda\x77\x74\x8b\xad\x99\x18\x9f\x14\x7f\x4f\xc7\xa3\x68\x68\xf2\xd1\x24\x42\x96\x97\xa7\x0b\x5d\xc4\xea\xcf\x4d\xe1\x5b\x57\x07\x8c\x9f\x8e\x6d\xc9\xcd\x24\x62\xd6\xea\xf1\xe8\x00\x0c\xa3\x09\xd9\xf5\x6a\x50\x97\xf5\xdb\x93\x03\xb7\x7a\x8c\xc6\x3e\xa3\xee\xb3\x81\x6e\x79\x6e\xcc\xd8\xe3\x6a\x74\x39\xa0\x7d\x08\xab\xd1\xb3\x51\x6f\xa8\xbd\x7b\xef\xe5\x48\xcf\x72\x72\x40\x7a\x44\x5e\x36\x3a\x39\x9e\x15\xc5\x5b\xf2\x9f\x71\x70\xc6\xd3\x8f\xd1\x31\xe9\x95\xed\xef\xeb\x47\xb5\xec\x7f\x9b\xf1\x80\x8a\x79\x31\x9a\x44\xa6\xc9\x7c\x83\x62\xfc\xb2\xaf\xc2\xba\x65\x0e\xbc\xc7\xa1\xe0\x24\xa1\xa0\x23\x0e\x93\x8a\xf0\x28\x09\x79\x24\x6a\xb4\x47\x7a\xa9\x76\x97\xa4\xf0\xe9\xa4\xef\x6f\x7d\x63\x28\xc3\xf2\xfd\xff\x0d\x66\xc6\xb5\x13\xa0\xc5\xbb\x6b\xe9\xf8\xd6\xcd\x9b\xef\xdf\x0d\xda\x37\xbd\x47\x8c\x1d\xf5\xfe\xd7\x80\xe7\x9a\x25\x67\x7f\x7e\xb8\xd9\x6c\xa2\x95\x52\x2b\xe1\x7f\x78\xd8\x77\x53\x62\x56\xf3\xe8\x83\x09\x80\x99\xad\xcc\xa1\xc0\x25\xea\xc5\x80\x7c\xdb\x62\x49\x62\xff\xc3\xb8\x24\xf6\xbf\xfd\xfd\xbf\x00\x00\x00\xff\xff\xb2\x1e\x6f\x68\x0c\x2c\x00\x00") func faucetHtmlBytes() ([]byte, error) { return bindataRead( @@ -85,7 +85,7 @@ func faucetHtml() (*asset, error) { } info := bindataFileInfo{name: "faucet.html", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xdb, 0xa2, 0x98, 0x44, 0x4b, 0x50, 0xf8, 0xa1, 0xac, 0x4a, 0x76, 0x2e, 0xcc, 0x3d, 0xcb, 0x81, 0x9e, 0x2a, 0xaa, 0x87, 0xf5, 0x9d, 0x53, 0x4, 0x8a, 0xdd, 0x5a, 0xfe, 0xd3, 0xc3, 0xf, 0x11}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc5, 0x8d, 0xb, 0x7a, 0xfd, 0x70, 0x68, 0x68, 0xd2, 0xd8, 0xf3, 0xf6, 0xac, 0x72, 0xed, 0xc2, 0x76, 0x18, 0x2d, 0x1, 0xe5, 0x3b, 0x55, 0xb, 0xce, 0xfc, 0xb6, 0xd5, 0x59, 0xc3, 0x94, 0x5b}} return a, nil } diff --git a/cmd/geth/accountcmd_test.go b/cmd/geth/accountcmd_test.go index 6213e5195d..9455eeda36 100644 --- a/cmd/geth/accountcmd_test.go +++ b/cmd/geth/accountcmd_test.go @@ -102,6 +102,7 @@ func TestAccountImport(t *testing.T) { }, } for _, test := range tests { + test := test t.Run(test.name, func(t *testing.T) { t.Parallel() importAccountWithExpect(t, test.key, test.output) @@ -178,11 +179,8 @@ Fatal: could not decrypt key with given password } func TestUnlockFlag(t *testing.T) { - datadir := tmpDatadirWithKeystore(t) - geth := runGeth(t, - "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", - "js", "testdata/empty.js") + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "js", "testdata/empty.js") geth.Expect(` Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 !! Unsupported terminal, password will be echoed. @@ -202,10 +200,9 @@ Password: {{.InputLine "foobar"}} } func TestUnlockFlagWrongPassword(t *testing.T) { - datadir := tmpDatadirWithKeystore(t) - geth := runGeth(t, - "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a") + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "js", "testdata/empty.js") + defer geth.ExpectExit() geth.Expect(` Unlocking account f466859ead1932d743d622cb74fc058882e8648a | Attempt 1/3 @@ -221,11 +218,9 @@ Fatal: Failed to unlock account f466859ead1932d743d622cb74fc058882e8648a (could // https://github.com/ethereum/go-ethereum/issues/1785 func TestUnlockFlagMultiIndex(t *testing.T) { - datadir := tmpDatadirWithKeystore(t) - geth := runGeth(t, - "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", - "--unlock", "0,2", - "js", "testdata/empty.js") + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--unlock", "0,2", "js", "testdata/empty.js") + geth.Expect(` Unlocking account 0 | Attempt 1/3 !! Unsupported terminal, password will be echoed. @@ -248,11 +243,9 @@ Password: {{.InputLine "foobar"}} } func TestUnlockFlagPasswordFile(t *testing.T) { - datadir := tmpDatadirWithKeystore(t) - geth := runGeth(t, - "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", - "--password", "testdata/passwords.txt", "--unlock", "0,2", - "js", "testdata/empty.js") + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", "testdata/passwords.txt", "--unlock", "0,2", "js", "testdata/empty.js") + geth.ExpectExit() wantMessages := []string{ @@ -268,10 +261,9 @@ func TestUnlockFlagPasswordFile(t *testing.T) { } func TestUnlockFlagPasswordFileWrongPassword(t *testing.T) { - datadir := tmpDatadirWithKeystore(t) - geth := runGeth(t, - "--datadir", datadir, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", - "--password", "testdata/wrong-passwords.txt", "--unlock", "0,2") + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--password", + "testdata/wrong-passwords.txt", "--unlock", "0,2") defer geth.ExpectExit() geth.Expect(` Fatal: Failed to unlock account 0 (could not decrypt key with given password) @@ -280,9 +272,9 @@ Fatal: Failed to unlock account 0 (could not decrypt key with given password) func TestUnlockFlagAmbiguous(t *testing.T) { store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes") - geth := runGeth(t, - "--keystore", store, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore", + store, "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "js", "testdata/empty.js") defer geth.ExpectExit() @@ -318,9 +310,10 @@ In order to avoid this warning, you need to remove the following duplicate key f func TestUnlockFlagAmbiguousWrongPassword(t *testing.T) { store := filepath.Join("..", "..", "accounts", "keystore", "testdata", "dupes") - geth := runGeth(t, - "--keystore", store, "--nat", "none", "--nodiscover", "--maxpeers", "0", "--port", "0", - "--unlock", "f466859ead1932d743d622cb74fc058882e8648a") + geth := runMinimalGeth(t, "--port", "0", "--ipcdisable", "--datadir", tmpDatadirWithKeystore(t), + "--unlock", "f466859ead1932d743d622cb74fc058882e8648a", "--keystore", + store, "--unlock", "f466859ead1932d743d622cb74fc058882e8648a") + defer geth.ExpectExit() // Helper for the expect template, returns absolute keystore path. diff --git a/cmd/geth/chaincmd.go b/cmd/geth/chaincmd.go index 6418f90957..ff9581fd88 100644 --- a/cmd/geth/chaincmd.go +++ b/cmd/geth/chaincmd.go @@ -20,7 +20,6 @@ import ( "encoding/json" "fmt" "os" - "path/filepath" "runtime" "strconv" "sync/atomic" @@ -28,7 +27,6 @@ import ( "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/console/prompt" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" @@ -159,28 +157,16 @@ The export-preimages command export hash preimages to an RLP encoded stream`, utils.CacheFlag, utils.SyncModeFlag, utils.FakePoWFlag, + utils.MainnetFlag, utils.RopstenFlag, utils.RinkebyFlag, utils.TxLookupLimitFlag, utils.GoerliFlag, - utils.YoloV2Flag, - utils.LegacyTestnetFlag, + utils.YoloV3Flag, }, Category: "BLOCKCHAIN COMMANDS", Description: ` The first argument must be the directory containing the blockchain to download from`, - } - removedbCommand = cli.Command{ - Action: utils.MigrateFlags(removeDB), - Name: "removedb", - Usage: "Remove blockchain and state databases", - ArgsUsage: " ", - Flags: []cli.Flag{ - utils.DataDirFlag, - }, - Category: "BLOCKCHAIN COMMANDS", - Description: ` -Remove blockchain and state databases`, } dumpCommand = cli.Command{ Action: utils.MigrateFlags(dump), @@ -201,24 +187,6 @@ Remove blockchain and state databases`, The arguments are interpreted as block numbers or hashes. Use "ethereum dump 0" to dump the genesis block.`, } - inspectCommand = cli.Command{ - Action: utils.MigrateFlags(inspect), - Name: "inspect", - Usage: "Inspect the storage size for each type of data in the database", - ArgsUsage: " ", - Flags: []cli.Flag{ - utils.DataDirFlag, - utils.AncientFlag, - utils.CacheFlag, - utils.RopstenFlag, - utils.RinkebyFlag, - utils.GoerliFlag, - utils.YoloV2Flag, - utils.LegacyTestnetFlag, - utils.SyncModeFlag, - }, - Category: "BLOCKCHAIN COMMANDS", - } ) // initGenesis will initialise the given JSON format genesis file and writes it as @@ -321,17 +289,7 @@ func importChain(ctx *cli.Context) error { fmt.Printf("Import done in %v.\n\n", time.Since(start)) // Output pre-compaction stats mostly to see the import trashing - stats, err := db.Stat("leveldb.stats") - if err != nil { - utils.Fatalf("Failed to read database stats: %v", err) - } - fmt.Println(stats) - - ioStats, err := db.Stat("leveldb.iostats") - if err != nil { - utils.Fatalf("Failed to read database iostats: %v", err) - } - fmt.Println(ioStats) + showLeveldbStats(db) // Print the memory statistics used by the importing mem := new(runtime.MemStats) @@ -349,22 +307,12 @@ func importChain(ctx *cli.Context) error { // Compact the entire database to more accurately measure disk io and print the stats start = time.Now() fmt.Println("Compacting entire database...") - if err = db.Compact(nil, nil); err != nil { + if err := db.Compact(nil, nil); err != nil { utils.Fatalf("Compaction failed: %v", err) } fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) - stats, err = db.Stat("leveldb.stats") - if err != nil { - utils.Fatalf("Failed to read database stats: %v", err) - } - fmt.Println(stats) - - ioStats, err = db.Stat("leveldb.iostats") - if err != nil { - utils.Fatalf("Failed to read database iostats: %v", err) - } - fmt.Println(ioStats) + showLeveldbStats(db) return importErr } @@ -497,66 +445,6 @@ func copyDb(ctx *cli.Context) error { return nil } -func removeDB(ctx *cli.Context) error { - stack, config := makeConfigNode(ctx) - - // Remove the full node state database - path := stack.ResolvePath("chaindata") - if common.FileExist(path) { - confirmAndRemoveDB(path, "full node state database") - } else { - log.Info("Full node state database missing", "path", path) - } - // Remove the full node ancient database - path = config.Eth.DatabaseFreezer - switch { - case path == "": - path = filepath.Join(stack.ResolvePath("chaindata"), "ancient") - case !filepath.IsAbs(path): - path = config.Node.ResolvePath(path) - } - if common.FileExist(path) { - confirmAndRemoveDB(path, "full node ancient database") - } else { - log.Info("Full node ancient database missing", "path", path) - } - // Remove the light node database - path = stack.ResolvePath("lightchaindata") - if common.FileExist(path) { - confirmAndRemoveDB(path, "light node database") - } else { - log.Info("Light node database missing", "path", path) - } - return nil -} - -// confirmAndRemoveDB prompts the user for a last confirmation and removes the -// folder if accepted. -func confirmAndRemoveDB(database string, kind string) { - confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database)) - switch { - case err != nil: - utils.Fatalf("%v", err) - case !confirm: - log.Info("Database deletion skipped", "path", database) - default: - start := time.Now() - filepath.Walk(database, func(path string, info os.FileInfo, err error) error { - // If we're at the top level folder, recurse into - if path == database { - return nil - } - // Delete all the files, but not subfolders - if !info.IsDir() { - os.Remove(path) - return nil - } - return filepath.SkipDir - }) - log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start))) - } -} - func dump(ctx *cli.Context) error { stack, _ := makeConfigNode(ctx) defer stack.Close() @@ -596,16 +484,6 @@ func dump(ctx *cli.Context) error { return nil } -func inspect(ctx *cli.Context) error { - node, _ := makeConfigNode(ctx) - defer node.Close() - - _, chainDb := utils.MakeChain(ctx, node, true) - defer chainDb.Close() - - return rawdb.InspectDatabase(chainDb) -} - // hashish returns true for strings that look like hashes. func hashish(x string) bool { _, err := strconv.Atoi(x) diff --git a/cmd/geth/config.go b/cmd/geth/config.go index 6b51843aa4..6fc75363c6 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -20,16 +20,17 @@ import ( "bufio" "errors" "fmt" + "math/big" "os" "reflect" "unicode" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" "github.com/ethereum/go-ethereum/cmd/utils" - "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/internal/ethapi" - "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" "github.com/naoina/toml" @@ -41,7 +42,7 @@ var ( Name: "dumpconfig", Usage: "Show configuration values", ArgsUsage: "", - Flags: append(append(nodeFlags, rpcFlags...), whisperFlags...), + Flags: append(nodeFlags, rpcFlags...), Category: "MISCELLANEOUS COMMANDS", Description: `The dumpconfig command shows configuration values.`, } @@ -73,21 +74,11 @@ type ethstatsConfig struct { URL string `toml:",omitempty"` } -// whisper has been deprecated, but clients out there might still have [Shh] -// in their config, which will crash. Cut them some slack by keeping the -// config, and displaying a message that those config switches are ineffectual. -// To be removed circa Q1 2021 -- @gballet. -type whisperDeprecatedConfig struct { - MaxMessageSize uint32 `toml:",omitempty"` - MinimumAcceptedPOW float64 `toml:",omitempty"` - RestrictConnectionBetweenLightClients bool `toml:",omitempty"` -} - type gethConfig struct { - Eth eth.Config - Shh whisperDeprecatedConfig + Eth ethconfig.Config Node node.Config Ethstats ethstatsConfig + Metrics metrics.Config } func loadConfig(file string, cfg *gethConfig) error { @@ -119,8 +110,9 @@ func defaultNodeConfig() node.Config { func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { // Load defaults. cfg := gethConfig{ - Eth: eth.DefaultConfig, - Node: defaultNodeConfig(), + Eth: ethconfig.Defaults, + Node: defaultNodeConfig(), + Metrics: metrics.DefaultConfig, } // Load config file. @@ -128,10 +120,6 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { if err := loadConfig(file, &cfg); err != nil { utils.Fatalf("%v", err) } - - if cfg.Shh != (whisperDeprecatedConfig{}) { - log.Warn("Deprecated whisper config detected. Whisper has been moved to github.com/ethereum/whisper") - } } // Apply flags. @@ -144,27 +132,19 @@ func makeConfigNode(ctx *cli.Context) (*node.Node, gethConfig) { if ctx.GlobalIsSet(utils.EthStatsURLFlag.Name) { cfg.Ethstats.URL = ctx.GlobalString(utils.EthStatsURLFlag.Name) } - utils.SetShhConfig(ctx, stack) + applyMetricConfig(ctx, &cfg) return stack, cfg } -// enableWhisper returns true in case one of the whisper flags is set. -func checkWhisper(ctx *cli.Context) { - for _, flag := range whisperFlags { - if ctx.GlobalIsSet(flag.GetName()) { - log.Warn("deprecated whisper flag detected. Whisper has been moved to github.com/ethereum/whisper") - } - } -} - // makeFullNode loads geth configuration and creates the Ethereum backend. func makeFullNode(ctx *cli.Context) (*node.Node, ethapi.Backend) { stack, cfg := makeConfigNode(ctx) - + if ctx.GlobalIsSet(utils.OverrideBerlinFlag.Name) { + cfg.Eth.OverrideBerlin = new(big.Int).SetUint64(ctx.GlobalUint64(utils.OverrideBerlinFlag.Name)) + } backend := utils.RegisterEthService(stack, &cfg.Eth) - checkWhisper(ctx) // Configure GraphQL if requested if ctx.GlobalIsSet(utils.GraphQLEnabledFlag.Name) { utils.RegisterGraphQLService(stack, backend, cfg.Node) @@ -204,3 +184,36 @@ func dumpConfig(ctx *cli.Context) error { return nil } + +func applyMetricConfig(ctx *cli.Context, cfg *gethConfig) { + if ctx.GlobalIsSet(utils.MetricsEnabledFlag.Name) { + cfg.Metrics.Enabled = ctx.GlobalBool(utils.MetricsEnabledFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsEnabledExpensiveFlag.Name) { + cfg.Metrics.EnabledExpensive = ctx.GlobalBool(utils.MetricsEnabledExpensiveFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsHTTPFlag.Name) { + cfg.Metrics.HTTP = ctx.GlobalString(utils.MetricsHTTPFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsPortFlag.Name) { + cfg.Metrics.Port = ctx.GlobalInt(utils.MetricsPortFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsEnableInfluxDBFlag.Name) { + cfg.Metrics.EnableInfluxDB = ctx.GlobalBool(utils.MetricsEnableInfluxDBFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBEndpointFlag.Name) { + cfg.Metrics.InfluxDBEndpoint = ctx.GlobalString(utils.MetricsInfluxDBEndpointFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBDatabaseFlag.Name) { + cfg.Metrics.InfluxDBDatabase = ctx.GlobalString(utils.MetricsInfluxDBDatabaseFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBUsernameFlag.Name) { + cfg.Metrics.InfluxDBUsername = ctx.GlobalString(utils.MetricsInfluxDBUsernameFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBPasswordFlag.Name) { + cfg.Metrics.InfluxDBPassword = ctx.GlobalString(utils.MetricsInfluxDBPasswordFlag.Name) + } + if ctx.GlobalIsSet(utils.MetricsInfluxDBTagsFlag.Name) { + cfg.Metrics.InfluxDBTags = ctx.GlobalString(utils.MetricsInfluxDBTagsFlag.Name) + } +} diff --git a/cmd/geth/consolecmd.go b/cmd/geth/consolecmd.go index cbecbe0a5f..9d8794eb15 100644 --- a/cmd/geth/consolecmd.go +++ b/cmd/geth/consolecmd.go @@ -19,10 +19,8 @@ package main import ( "fmt" "os" - "os/signal" "path/filepath" "strings" - "syscall" "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/console" @@ -38,12 +36,12 @@ var ( Action: utils.MigrateFlags(localConsole), Name: "console", Usage: "Start an interactive JavaScript environment", - Flags: append(append(append(nodeFlags, rpcFlags...), consoleFlags...), whisperFlags...), + Flags: append(append(nodeFlags, rpcFlags...), consoleFlags...), Category: "CONSOLE COMMANDS", Description: ` The Geth console is an interactive shell for the JavaScript runtime environment which exposes a node admin interface as well as the Ðapp JavaScript API. -See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console.`, +See https://geth.ethereum.org/docs/interface/javascript-console.`, } attachCommand = cli.Command{ @@ -56,7 +54,7 @@ See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console.`, Description: ` The Geth console is an interactive shell for the JavaScript runtime environment which exposes a node admin interface as well as the Ðapp JavaScript API. -See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console. +See https://geth.ethereum.org/docs/interface/javascript-console. This command allows to open a console on a running geth node.`, } @@ -69,7 +67,7 @@ This command allows to open a console on a running geth node.`, Category: "CONSOLE COMMANDS", Description: ` The JavaScript VM exposes a node admin interface as well as the Ðapp -JavaScript API. See https://github.com/ethereum/go-ethereum/wiki/JavaScript-Console`, +JavaScript API. See https://geth.ethereum.org/docs/interface/javascript-console`, } ) @@ -123,7 +121,7 @@ func remoteConsole(ctx *cli.Context) error { path = ctx.GlobalString(utils.DataDirFlag.Name) } if path != "" { - if ctx.GlobalBool(utils.LegacyTestnetFlag.Name) || ctx.GlobalBool(utils.RopstenFlag.Name) { + if ctx.GlobalBool(utils.RopstenFlag.Name) { // Maintain compatibility with older Geth configurations storing the // Ropsten database in `testnet` instead of `ropsten`. legacyPath := filepath.Join(path, "testnet") @@ -136,8 +134,8 @@ func remoteConsole(ctx *cli.Context) error { path = filepath.Join(path, "rinkeby") } else if ctx.GlobalBool(utils.GoerliFlag.Name) { path = filepath.Join(path, "goerli") - } else if ctx.GlobalBool(utils.YoloV2Flag.Name) { - path = filepath.Join(path, "yolo-v2") + } else if ctx.GlobalBool(utils.YoloV3Flag.Name) { + path = filepath.Join(path, "yolo-v3") } } endpoint = fmt.Sprintf("%s/geth.ipc", path) @@ -218,13 +216,10 @@ func ephemeralConsole(ctx *cli.Context) error { utils.Fatalf("Failed to execute %s: %v", file, err) } } - // Wait for pending callbacks, but stop for Ctrl-C. - abort := make(chan os.Signal, 1) - signal.Notify(abort, syscall.SIGINT, syscall.SIGTERM) go func() { - <-abort - os.Exit(0) + stack.Wait() + console.Stop(false) }() console.Stop(true) diff --git a/cmd/geth/consolecmd_test.go b/cmd/geth/consolecmd_test.go index 913b060361..c3f41b187c 100644 --- a/cmd/geth/consolecmd_test.go +++ b/cmd/geth/consolecmd_test.go @@ -35,16 +35,25 @@ const ( httpAPIs = "eth:1.0 net:1.0 rpc:1.0 web3:1.0" ) +// spawns geth with the given command line args, using a set of flags to minimise +// memory and disk IO. If the args don't set --datadir, the +// child g gets a temporary data directory. +func runMinimalGeth(t *testing.T, args ...string) *testgeth { + // --ropsten to make the 'writing genesis to disk' faster (no accounts) + // --networkid=1337 to avoid cache bump + // --syncmode=full to avoid allocating fast sync bloom + allArgs := []string{"--ropsten", "--networkid", "1337", "--syncmode=full", "--port", "0", + "--nat", "none", "--nodiscover", "--maxpeers", "0", "--cache", "64"} + return runGeth(t, append(allArgs, args...)...) +} + // Tests that a node embedded within a console can be started up properly and // then terminated by closing the input stream. func TestConsoleWelcome(t *testing.T) { coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" // Start a geth console, make sure it's cleaned up and terminate the console - geth := runGeth(t, - "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", - "--etherbase", coinbase, - "console") + geth := runMinimalGeth(t, "--miner.etherbase", coinbase, "console") // Gather all the infos the welcome message needs to contain geth.SetTemplateFunc("goos", func() string { return runtime.GOOS }) @@ -73,10 +82,13 @@ To exit, press ctrl-d } // Tests that a console can be attached to a running node via various means. -func TestIPCAttachWelcome(t *testing.T) { +func TestAttachWelcome(t *testing.T) { + var ( + ipc string + httpPort string + wsPort string + ) // Configure the instance for IPC attachment - coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" - var ipc string if runtime.GOOS == "windows" { ipc = `\\.\pipe\geth` + strconv.Itoa(trulyRandInt(100000, 999999)) } else { @@ -84,51 +96,28 @@ func TestIPCAttachWelcome(t *testing.T) { defer os.RemoveAll(ws) ipc = filepath.Join(ws, "geth.ipc") } - geth := runGeth(t, - "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", - "--etherbase", coinbase, "--ipcpath", ipc) - - defer func() { - geth.Interrupt() - geth.ExpectExit() - }() - - waitForEndpoint(t, ipc, 3*time.Second) - testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs) - -} - -func TestHTTPAttachWelcome(t *testing.T) { - coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" - port := strconv.Itoa(trulyRandInt(1024, 65536)) // Yeah, sometimes this will fail, sorry :P - geth := runGeth(t, - "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", - "--etherbase", coinbase, "--http", "--http.port", port) - defer func() { - geth.Interrupt() - geth.ExpectExit() - }() - - endpoint := "http://127.0.0.1:" + port - waitForEndpoint(t, endpoint, 3*time.Second) - testAttachWelcome(t, geth, endpoint, httpAPIs) -} - -func TestWSAttachWelcome(t *testing.T) { - coinbase := "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182" - port := strconv.Itoa(trulyRandInt(1024, 65536)) // Yeah, sometimes this will fail, sorry :P - - geth := runGeth(t, - "--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", - "--etherbase", coinbase, "--ws", "--ws.port", port) - defer func() { - geth.Interrupt() - geth.ExpectExit() - }() - - endpoint := "ws://127.0.0.1:" + port - waitForEndpoint(t, endpoint, 3*time.Second) - testAttachWelcome(t, geth, endpoint, httpAPIs) + // And HTTP + WS attachment + p := trulyRandInt(1024, 65533) // Yeah, sometimes this will fail, sorry :P + httpPort = strconv.Itoa(p) + wsPort = strconv.Itoa(p + 1) + geth := runMinimalGeth(t, "--miner.etherbase", "0x8605cdbbdb6d264aa742e77020dcbc58fcdce182", + "--ipcpath", ipc, + "--http", "--http.port", httpPort, + "--ws", "--ws.port", wsPort) + t.Run("ipc", func(t *testing.T) { + waitForEndpoint(t, ipc, 3*time.Second) + testAttachWelcome(t, geth, "ipc:"+ipc, ipcAPIs) + }) + t.Run("http", func(t *testing.T) { + endpoint := "http://127.0.0.1:" + httpPort + waitForEndpoint(t, endpoint, 3*time.Second) + testAttachWelcome(t, geth, endpoint, httpAPIs) + }) + t.Run("ws", func(t *testing.T) { + endpoint := "ws://127.0.0.1:" + wsPort + waitForEndpoint(t, endpoint, 3*time.Second) + testAttachWelcome(t, geth, endpoint, httpAPIs) + }) } func testAttachWelcome(t *testing.T, geth *testgeth, endpoint, apis string) { diff --git a/cmd/geth/dao_test.go b/cmd/geth/dao_test.go index 6c36771e97..29b1a7f474 100644 --- a/cmd/geth/dao_test.go +++ b/cmd/geth/dao_test.go @@ -115,10 +115,10 @@ func testDAOForkBlockNewChain(t *testing.T, test int, genesis string, expectBloc if err := ioutil.WriteFile(json, []byte(genesis), 0600); err != nil { t.Fatalf("test %d: failed to write genesis file: %v", test, err) } - runGeth(t, "--datadir", datadir, "init", json).WaitExit() + runGeth(t, "--datadir", datadir, "--networkid", "1337", "init", json).WaitExit() } else { // Force chain initialization - args := []string{"--port", "0", "--maxpeers", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--datadir", datadir} + args := []string{"--port", "0", "--networkid", "1337", "--maxpeers", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--datadir", datadir} runGeth(t, append(args, []string{"--exec", "2+2", "console"}...)...).WaitExit() } // Retrieve the DAO config flag from the database diff --git a/cmd/geth/dbcmd.go b/cmd/geth/dbcmd.go new file mode 100644 index 0000000000..48478f613e --- /dev/null +++ b/cmd/geth/dbcmd.go @@ -0,0 +1,341 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "fmt" + "os" + "path/filepath" + "time" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/console/prompt" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/ethdb/leveldb" + "github.com/ethereum/go-ethereum/log" + "github.com/syndtr/goleveldb/leveldb/opt" + "gopkg.in/urfave/cli.v1" +) + +var ( + removedbCommand = cli.Command{ + Action: utils.MigrateFlags(removeDB), + Name: "removedb", + Usage: "Remove blockchain and state databases", + ArgsUsage: "", + Flags: []cli.Flag{ + utils.DataDirFlag, + }, + Category: "DATABASE COMMANDS", + Description: ` +Remove blockchain and state databases`, + } + dbCommand = cli.Command{ + Name: "db", + Usage: "Low level database operations", + ArgsUsage: "", + Category: "DATABASE COMMANDS", + Subcommands: []cli.Command{ + dbInspectCmd, + dbStatCmd, + dbCompactCmd, + dbGetCmd, + dbDeleteCmd, + dbPutCmd, + }, + } + dbInspectCmd = cli.Command{ + Action: utils.MigrateFlags(inspect), + Name: "inspect", + ArgsUsage: " ", + + Usage: "Inspect the storage size for each type of data in the database", + Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`, + } + dbStatCmd = cli.Command{ + Action: dbStats, + Name: "stats", + Usage: "Print leveldb statistics", + } + dbCompactCmd = cli.Command{ + Action: dbCompact, + Name: "compact", + Usage: "Compact leveldb database. WARNING: May take a very long time", + Description: `This command performs a database compaction. +WARNING: This operation may take a very long time to finish, and may cause database +corruption if it is aborted during execution'!`, + } + dbGetCmd = cli.Command{ + Action: dbGet, + Name: "get", + Usage: "Show the value of a database key", + ArgsUsage: "", + Description: "This command looks up the specified database key from the database.", + } + dbDeleteCmd = cli.Command{ + Action: dbDelete, + Name: "delete", + Usage: "Delete a database key (WARNING: may corrupt your database)", + ArgsUsage: "", + Description: `This command deletes the specified database key from the database. +WARNING: This is a low-level operation which may cause database corruption!`, + } + dbPutCmd = cli.Command{ + Action: dbPut, + Name: "put", + Usage: "Set the value of a database key (WARNING: may corrupt your database)", + ArgsUsage: " ", + Description: `This command sets a given database key to the given value. +WARNING: This is a low-level operation which may cause database corruption!`, + } +) + +func removeDB(ctx *cli.Context) error { + stack, config := makeConfigNode(ctx) + + // Remove the full node state database + path := stack.ResolvePath("chaindata") + if common.FileExist(path) { + confirmAndRemoveDB(path, "full node state database") + } else { + log.Info("Full node state database missing", "path", path) + } + // Remove the full node ancient database + path = config.Eth.DatabaseFreezer + switch { + case path == "": + path = filepath.Join(stack.ResolvePath("chaindata"), "ancient") + case !filepath.IsAbs(path): + path = config.Node.ResolvePath(path) + } + if common.FileExist(path) { + confirmAndRemoveDB(path, "full node ancient database") + } else { + log.Info("Full node ancient database missing", "path", path) + } + // Remove the light node database + path = stack.ResolvePath("lightchaindata") + if common.FileExist(path) { + confirmAndRemoveDB(path, "light node database") + } else { + log.Info("Light node database missing", "path", path) + } + return nil +} + +// confirmAndRemoveDB prompts the user for a last confirmation and removes the +// folder if accepted. +func confirmAndRemoveDB(database string, kind string) { + confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database)) + switch { + case err != nil: + utils.Fatalf("%v", err) + case !confirm: + log.Info("Database deletion skipped", "path", database) + default: + start := time.Now() + filepath.Walk(database, func(path string, info os.FileInfo, err error) error { + // If we're at the top level folder, recurse into + if path == database { + return nil + } + // Delete all the files, but not subfolders + if !info.IsDir() { + os.Remove(path) + return nil + } + return filepath.SkipDir + }) + log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start))) + } +} + +func inspect(ctx *cli.Context) error { + var ( + prefix []byte + start []byte + ) + if ctx.NArg() > 2 { + return fmt.Errorf("Max 2 arguments: %v", ctx.Command.ArgsUsage) + } + if ctx.NArg() >= 1 { + if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil { + return fmt.Errorf("failed to hex-decode 'prefix': %v", err) + } else { + prefix = d + } + } + if ctx.NArg() >= 2 { + if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil { + return fmt.Errorf("failed to hex-decode 'start': %v", err) + } else { + start = d + } + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + _, chainDb := utils.MakeChain(ctx, stack, true) + defer chainDb.Close() + + return rawdb.InspectDatabase(chainDb, prefix, start) +} + +func showLeveldbStats(db ethdb.Stater) { + if stats, err := db.Stat("leveldb.stats"); err != nil { + log.Warn("Failed to read database stats", "error", err) + } else { + fmt.Println(stats) + } + if ioStats, err := db.Stat("leveldb.iostats"); err != nil { + log.Warn("Failed to read database iostats", "error", err) + } else { + fmt.Println(ioStats) + } +} + +func dbStats(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + path := stack.ResolvePath("chaindata") + db, err := leveldb.NewCustom(path, "", func(options *opt.Options) { + options.ReadOnly = true + }) + if err != nil { + return err + } + showLeveldbStats(db) + err = db.Close() + if err != nil { + log.Info("Close err", "error", err) + } + return nil +} + +func dbCompact(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + path := stack.ResolvePath("chaindata") + cache := ctx.GlobalInt(utils.CacheFlag.Name) * ctx.GlobalInt(utils.CacheDatabaseFlag.Name) / 100 + db, err := leveldb.NewCustom(path, "", func(options *opt.Options) { + options.OpenFilesCacheCapacity = utils.MakeDatabaseHandles() + options.BlockCacheCapacity = cache / 2 * opt.MiB + options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally + }) + if err != nil { + return err + } + showLeveldbStats(db) + log.Info("Triggering compaction") + err = db.Compact(nil, nil) + if err != nil { + log.Info("Compact err", "error", err) + } + showLeveldbStats(db) + log.Info("Closing db") + err = db.Close() + if err != nil { + log.Info("Close err", "error", err) + } + log.Info("Exiting") + return err +} + +// dbGet shows the value of a given database key +func dbGet(ctx *cli.Context) error { + if ctx.NArg() != 1 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + path := stack.ResolvePath("chaindata") + db, err := leveldb.NewCustom(path, "", func(options *opt.Options) { + options.ReadOnly = true + }) + if err != nil { + return err + } + defer db.Close() + key, err := hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the key", "error", err) + return err + } + data, err := db.Get(key) + if err != nil { + log.Info("Get operation failed", "error", err) + return err + } + fmt.Printf("key %#x:\n\t%#x\n", key, data) + return nil +} + +// dbDelete deletes a key from the database +func dbDelete(ctx *cli.Context) error { + if ctx.NArg() != 1 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + db := utils.MakeChainDatabase(ctx, stack) + defer db.Close() + key, err := hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the key", "error", err) + return err + } + if err = db.Delete(key); err != nil { + log.Info("Delete operation returned an error", "error", err) + return err + } + return nil +} + +// dbPut overwrite a value in the database +func dbPut(ctx *cli.Context) error { + if ctx.NArg() != 2 { + return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage) + } + stack, _ := makeConfigNode(ctx) + defer stack.Close() + db := utils.MakeChainDatabase(ctx, stack) + defer db.Close() + var ( + key []byte + value []byte + data []byte + err error + ) + key, err = hexutil.Decode(ctx.Args().Get(0)) + if err != nil { + log.Info("Could not decode the key", "error", err) + return err + } + value, err = hexutil.Decode(ctx.Args().Get(1)) + if err != nil { + log.Info("Could not decode the value", "error", err) + return err + } + data, err = db.Get(key) + if err == nil { + fmt.Printf("Previous value:\n%#x\n", data) + } + return db.Put(key, value) +} diff --git a/cmd/geth/genesis_test.go b/cmd/geth/genesis_test.go index ee3991acd1..cbc1b38374 100644 --- a/cmd/geth/genesis_test.go +++ b/cmd/geth/genesis_test.go @@ -81,10 +81,10 @@ func TestCustomGenesis(t *testing.T) { if err := ioutil.WriteFile(json, []byte(tt.genesis), 0600); err != nil { t.Fatalf("test %d: failed to write genesis file: %v", i, err) } - runGeth(t, "--nousb", "--datadir", datadir, "init", json).WaitExit() + runGeth(t, "--datadir", datadir, "init", json).WaitExit() // Query the custom genesis block - geth := runGeth(t, "--nousb", + geth := runGeth(t, "--networkid", "1337", "--syncmode=full", "--datadir", datadir, "--maxpeers", "0", "--port", "0", "--nodiscover", "--nat", "none", "--ipcdisable", "--exec", tt.query, "console") diff --git a/cmd/geth/les_test.go b/cmd/geth/les_test.go index 259d4a8067..053ce96aa3 100644 --- a/cmd/geth/les_test.go +++ b/cmd/geth/les_test.go @@ -2,7 +2,12 @@ package main import ( "context" + "fmt" + "os" "path/filepath" + "runtime" + "strings" + "sync/atomic" "testing" "time" @@ -95,24 +100,57 @@ func (g *gethrpc) waitSynced() { } } +// ipcEndpoint resolves an IPC endpoint based on a configured value, taking into +// account the set data folders as well as the designated platform we're currently +// running on. +func ipcEndpoint(ipcPath, datadir string) string { + // On windows we can only use plain top-level pipes + if runtime.GOOS == "windows" { + if strings.HasPrefix(ipcPath, `\\.\pipe\`) { + return ipcPath + } + return `\\.\pipe\` + ipcPath + } + // Resolve names into the data directory full paths otherwise + if filepath.Base(ipcPath) == ipcPath { + if datadir == "" { + return filepath.Join(os.TempDir(), ipcPath) + } + return filepath.Join(datadir, ipcPath) + } + return ipcPath +} + +// nextIPC ensures that each ipc pipe gets a unique name. +// On linux, it works well to use ipc pipes all over the filesystem (in datadirs), +// but windows require pipes to sit in "\\.\pipe\". Therefore, to run several +// nodes simultaneously, we need to distinguish between them, which we do by +// the pipe filename instead of folder. +var nextIPC = uint32(0) + func startGethWithIpc(t *testing.T, name string, args ...string) *gethrpc { - g := &gethrpc{name: name} - args = append([]string{"--networkid=42", "--port=0", "--nousb"}, args...) + ipcName := fmt.Sprintf("geth-%d.ipc", atomic.AddUint32(&nextIPC, 1)) + args = append([]string{"--networkid=42", "--port=0", "--ipcpath", ipcName}, args...) t.Logf("Starting %v with rpc: %v", name, args) - g.geth = runGeth(t, args...) + + g := &gethrpc{ + name: name, + geth: runGeth(t, args...), + } // wait before we can attach to it. TODO: probe for it properly time.Sleep(1 * time.Second) var err error - ipcpath := filepath.Join(g.geth.Datadir, "geth.ipc") - g.rpc, err = rpc.Dial(ipcpath) - if err != nil { - t.Fatalf("%v rpc connect: %v", name, err) + ipcpath := ipcEndpoint(ipcName, g.geth.Datadir) + if g.rpc, err = rpc.Dial(ipcpath); err != nil { + t.Fatalf("%v rpc connect to %v: %v", name, ipcpath, err) } return g } func initGeth(t *testing.T) string { - g := runGeth(t, "--nousb", "--networkid=42", "init", "./testdata/clique.json") + args := []string{"--networkid=42", "init", "./testdata/clique.json"} + t.Logf("Initializing geth: %v ", args) + g := runGeth(t, args...) datadir := g.Datadir g.WaitExit() return datadir @@ -120,15 +158,16 @@ func initGeth(t *testing.T) string { func startLightServer(t *testing.T) *gethrpc { datadir := initGeth(t) - runGeth(t, "--nousb", "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv").WaitExit() + t.Logf("Importing keys to geth") + runGeth(t, "--datadir", datadir, "--password", "./testdata/password.txt", "account", "import", "./testdata/key.prv", "--lightkdf").WaitExit() account := "0x02f0d131f1f97aef08aec6e3291b957d9efe7105" - server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1") + server := startGethWithIpc(t, "lightserver", "--allow-insecure-unlock", "--datadir", datadir, "--password", "./testdata/password.txt", "--unlock", account, "--mine", "--light.serve=100", "--light.maxpeers=1", "--nodiscover", "--nat=extip:127.0.0.1", "--verbosity=4") return server } func startClient(t *testing.T, name string) *gethrpc { datadir := initGeth(t) - return startGethWithIpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1") + return startGethWithIpc(t, name, "--datadir", datadir, "--nodiscover", "--syncmode=light", "--nat=extip:127.0.0.1", "--verbosity=4") } func TestPriorityClient(t *testing.T) { @@ -151,7 +190,7 @@ func TestPriorityClient(t *testing.T) { prioCli := startClient(t, "prioCli") defer prioCli.killAndWait() // 3_000_000_000 once we move to Go 1.13 - tokens := 3000000000 + tokens := uint64(3000000000) lightServer.callRPC(nil, "les_addBalance", prioCli.getNodeInfo().ID, tokens) prioCli.addPeer(lightServer) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 38e48534dc..207c93c0d8 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -42,7 +42,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/node" gopsutil "github.com/shirou/gopsutil/mem" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) const ( @@ -61,14 +61,15 @@ var ( utils.UnlockedAccountFlag, utils.PasswordFileFlag, utils.BootnodesFlag, - utils.LegacyBootnodesV4Flag, - utils.LegacyBootnodesV5Flag, utils.DataDirFlag, utils.AncientFlag, + utils.MinFreeDiskSpaceFlag, utils.KeyStoreDirFlag, utils.ExternalSignerFlag, utils.NoUSBFlag, + utils.USBFlag, utils.SmartCardDaemonPathFlag, + utils.OverrideBerlinFlag, utils.EthashCacheDirFlag, utils.EthashCachesInMemoryFlag, utils.EthashCachesOnDiskFlag, @@ -94,17 +95,17 @@ var ( utils.SnapshotFlag, utils.TxLookupLimitFlag, utils.LightServeFlag, - utils.LegacyLightServFlag, utils.LightIngressFlag, utils.LightEgressFlag, utils.LightMaxPeersFlag, - utils.LegacyLightPeersFlag, utils.LightNoPruneFlag, utils.LightKDFFlag, utils.UltraLightServersFlag, utils.UltraLightFractionFlag, utils.UltraLightOnlyAnnounceFlag, + utils.LightNoSyncServeFlag, utils.WhitelistFlag, + utils.BloomFilterSizeFlag, utils.CacheFlag, utils.CacheDatabaseFlag, utils.CacheTrieFlag, @@ -113,22 +114,18 @@ var ( utils.CacheGCFlag, utils.CacheSnapshotFlag, utils.CacheNoPrefetchFlag, + utils.CachePreimagesFlag, utils.ListenPortFlag, utils.MaxPeersFlag, utils.MaxPendingPeersFlag, utils.MiningEnabledFlag, utils.MinerThreadsFlag, - utils.LegacyMinerThreadsFlag, utils.MinerNotifyFlag, utils.MinerGasTargetFlag, - utils.LegacyMinerGasTargetFlag, utils.MinerGasLimitFlag, utils.MinerGasPriceFlag, - utils.LegacyMinerGasPriceFlag, utils.MinerEtherbaseFlag, - utils.LegacyMinerEtherbaseFlag, utils.MinerExtraDataFlag, - utils.LegacyMinerExtraDataFlag, utils.MinerRecommitIntervalFlag, utils.MinerNoVerfiyFlag, utils.NATFlag, @@ -138,22 +135,20 @@ var ( utils.NodeKeyFileFlag, utils.NodeKeyHexFlag, utils.DNSDiscoveryFlag, + utils.MainnetFlag, utils.DeveloperFlag, utils.DeveloperPeriodFlag, - utils.LegacyTestnetFlag, utils.RopstenFlag, utils.RinkebyFlag, utils.GoerliFlag, - utils.YoloV2Flag, + utils.YoloV3Flag, utils.VMEnableDebugFlag, utils.NetworkIdFlag, utils.EthStatsURLFlag, utils.FakePoWFlag, utils.NoCompactionFlag, utils.GpoBlocksFlag, - utils.LegacyGpoBlocksFlag, utils.GpoPercentileFlag, - utils.LegacyGpoPercentileFlag, utils.GpoMaxGasPriceFlag, utils.EWASMInterpreterFlag, utils.EVMInterpreterFlag, @@ -171,32 +166,24 @@ var ( utils.LegacyRPCPortFlag, utils.LegacyRPCCORSDomainFlag, utils.LegacyRPCVirtualHostsFlag, + utils.LegacyRPCApiFlag, utils.GraphQLEnabledFlag, utils.GraphQLCORSDomainFlag, utils.GraphQLVirtualHostsFlag, utils.HTTPApiFlag, - utils.LegacyRPCApiFlag, + utils.HTTPPathPrefixFlag, utils.WSEnabledFlag, utils.WSListenAddrFlag, - utils.LegacyWSListenAddrFlag, utils.WSPortFlag, - utils.LegacyWSPortFlag, utils.WSApiFlag, - utils.LegacyWSApiFlag, utils.WSAllowedOriginsFlag, - utils.LegacyWSAllowedOriginsFlag, + utils.WSPathPrefixFlag, utils.IPCDisabledFlag, utils.IPCPathFlag, utils.InsecureUnlockAllowedFlag, utils.RPCGlobalGasCapFlag, utils.RPCGlobalTxFeeCapFlag, - } - - whisperFlags = []cli.Flag{ - utils.WhisperEnabledFlag, - utils.WhisperMaxMessageSizeFlag, - utils.WhisperMinPOWFlag, - utils.WhisperRestrictConnectionBetweenLightClientsFlag, + utils.AllowUnprotectedTxs, } metricsFlags = []cli.Flag{ @@ -217,7 +204,7 @@ func init() { // Initialize the CLI app and start Geth app.Action = geth app.HideVersion = true // we have a command to print the version - app.Copyright = "Copyright 2013-2020 The go-ethereum Authors" + app.Copyright = "Copyright 2013-2021 The go-ethereum Authors" app.Commands = []cli.Command{ // See chaincmd.go: initCommand, @@ -229,7 +216,6 @@ func init() { removedbCommand, dumpCommand, dumpGenesisCommand, - inspectCommand, // See accountcmd.go: accountCommand, walletCommand, @@ -241,13 +227,16 @@ func init() { makecacheCommand, makedagCommand, versionCommand, + versionCheckCommand, licenseCommand, // See config.go dumpConfigCommand, - // See retesteth.go - retestethCommand, + // see dbcmd.go + dbCommand, // See cmd/utils/flags_legacy.go utils.ShowDeprecated, + // See snapshot.go + snapshotCommand, } sort.Sort(cli.CommandsByName(app.Commands)) @@ -255,8 +244,6 @@ func init() { app.Flags = append(app.Flags, rpcFlags...) app.Flags = append(app.Flags, consoleFlags...) app.Flags = append(app.Flags, debug.Flags...) - app.Flags = append(app.Flags, debug.DeprecatedFlags...) - app.Flags = append(app.Flags, whisperFlags...) app.Flags = append(app.Flags, metricsFlags...) app.Before = func(ctx *cli.Context) error { @@ -281,11 +268,6 @@ func main() { func prepare(ctx *cli.Context) { // If we're running a known preset, log it for convenience. switch { - case ctx.GlobalIsSet(utils.LegacyTestnetFlag.Name): - log.Info("Starting Geth on Ropsten testnet...") - log.Warn("The --testnet flag is ambiguous! Please specify one of --goerli, --rinkeby, or --ropsten.") - log.Warn("The generic --testnet flag is deprecated and will be removed in the future!") - case ctx.GlobalIsSet(utils.RopstenFlag.Name): log.Info("Starting Geth on Ropsten testnet...") @@ -295,6 +277,9 @@ func prepare(ctx *cli.Context) { case ctx.GlobalIsSet(utils.GoerliFlag.Name): log.Info("Starting Geth on Görli testnet...") + case ctx.GlobalIsSet(utils.YoloV3Flag.Name): + log.Info("Starting Geth on YOLOv3 testnet...") + case ctx.GlobalIsSet(utils.DeveloperFlag.Name): log.Info("Starting Geth in ephemeral dev mode...") @@ -304,7 +289,7 @@ func prepare(ctx *cli.Context) { // If we're a full node on mainnet without --cache specified, bump default cache allowance if ctx.GlobalString(utils.SyncModeFlag.Name) != "light" && !ctx.GlobalIsSet(utils.CacheFlag.Name) && !ctx.GlobalIsSet(utils.NetworkIdFlag.Name) { // Make sure we're not on any supported preconfigured testnet either - if !ctx.GlobalIsSet(utils.LegacyTestnetFlag.Name) && !ctx.GlobalIsSet(utils.RopstenFlag.Name) && !ctx.GlobalIsSet(utils.RinkebyFlag.Name) && !ctx.GlobalIsSet(utils.GoerliFlag.Name) && !ctx.GlobalIsSet(utils.DeveloperFlag.Name) { + if !ctx.GlobalIsSet(utils.RopstenFlag.Name) && !ctx.GlobalIsSet(utils.RinkebyFlag.Name) && !ctx.GlobalIsSet(utils.GoerliFlag.Name) && !ctx.GlobalIsSet(utils.DeveloperFlag.Name) { // Nope, we're really on mainnet. Bump that cache up! log.Info("Bumping default cache on mainnet", "provided", ctx.GlobalInt(utils.CacheFlag.Name), "updated", 4096) ctx.GlobalSet(utils.CacheFlag.Name, strconv.Itoa(4096)) @@ -366,7 +351,7 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) { debug.Memsize.Add("node", stack) // Start up the node itself - utils.StartNode(stack) + utils.StartNode(ctx, stack) // Unlock any account specifically requested unlockAccounts(ctx, stack) @@ -449,19 +434,11 @@ func startNode(ctx *cli.Context, stack *node.Node, backend ethapi.Backend) { if !ok { utils.Fatalf("Ethereum service not running: %v", err) } - // Set the gas price to the limits from the CLI and start mining gasprice := utils.GlobalBig(ctx, utils.MinerGasPriceFlag.Name) - if ctx.GlobalIsSet(utils.LegacyMinerGasPriceFlag.Name) && !ctx.GlobalIsSet(utils.MinerGasPriceFlag.Name) { - gasprice = utils.GlobalBig(ctx, utils.LegacyMinerGasPriceFlag.Name) - } ethBackend.TxPool().SetGasPrice(gasprice) // start mining threads := ctx.GlobalInt(utils.MinerThreadsFlag.Name) - if ctx.GlobalIsSet(utils.LegacyMinerThreadsFlag.Name) && !ctx.GlobalIsSet(utils.MinerThreadsFlag.Name) { - threads = ctx.GlobalInt(utils.LegacyMinerThreadsFlag.Name) - log.Warn("The flag --minerthreads is deprecated and will be removed in the future, please use --miner.threads") - } if err := ethBackend.StartMining(threads); err != nil { utils.Fatalf("Failed to start mining: %v", err) } diff --git a/cmd/geth/misccmd.go b/cmd/geth/misccmd.go index 0e7ee96513..b347d31d97 100644 --- a/cmd/geth/misccmd.go +++ b/cmd/geth/misccmd.go @@ -25,12 +25,23 @@ import ( "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/params" "gopkg.in/urfave/cli.v1" ) var ( + VersionCheckUrlFlag = cli.StringFlag{ + Name: "check.url", + Usage: "URL to use when checking vulnerabilities", + Value: "https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json", + } + VersionCheckVersionFlag = cli.StringFlag{ + Name: "check.version", + Usage: "Version to check", + Value: fmt.Sprintf("Geth/v%v/%v-%v/%v", + params.VersionWithCommit(gitCommit, gitDate), + runtime.GOOS, runtime.GOARCH, runtime.Version()), + } makecacheCommand = cli.Command{ Action: utils.MigrateFlags(makecache), Name: "makecache", @@ -65,6 +76,21 @@ Regular users do not need to execute it. Category: "MISCELLANEOUS COMMANDS", Description: ` The output of this command is supposed to be machine-readable. +`, + } + versionCheckCommand = cli.Command{ + Action: utils.MigrateFlags(versionCheck), + Flags: []cli.Flag{ + VersionCheckUrlFlag, + VersionCheckVersionFlag, + }, + Name: "version-check", + Usage: "Checks (online) whether the current version suffers from any known security vulnerabilities", + ArgsUsage: "", + Category: "MISCELLANEOUS COMMANDS", + Description: ` +The version-check command fetches vulnerability-information from https://geth.ethereum.org/docs/vulnerabilities/vulnerabilities.json, +and displays information about any security vulnerabilities that affect the currently executing version. `, } licenseCommand = cli.Command{ @@ -116,7 +142,6 @@ func version(ctx *cli.Context) error { fmt.Println("Git Commit Date:", gitDate) } fmt.Println("Architecture:", runtime.GOARCH) - fmt.Println("Protocol Versions:", eth.ProtocolVersions) fmt.Println("Go Version:", runtime.Version()) fmt.Println("Operating System:", runtime.GOOS) fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH")) diff --git a/cmd/geth/retesteth.go b/cmd/geth/retesteth.go deleted file mode 100644 index debee1182b..0000000000 --- a/cmd/geth/retesteth.go +++ /dev/null @@ -1,916 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "bytes" - "context" - "fmt" - "math/big" - "os" - "os/signal" - "time" - - "github.com/ethereum/go-ethereum/cmd/utils" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/consensus" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/consensus/misc" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" - "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/trie" - - cli "gopkg.in/urfave/cli.v1" -) - -var ( - rpcPortFlag = cli.IntFlag{ - Name: "rpcport", - Usage: "HTTP-RPC server listening port", - Value: node.DefaultHTTPPort, - } - retestethCommand = cli.Command{ - Action: utils.MigrateFlags(retesteth), - Name: "retesteth", - Usage: "Launches geth in retesteth mode", - ArgsUsage: "", - Flags: []cli.Flag{rpcPortFlag}, - Category: "MISCELLANEOUS COMMANDS", - Description: `Launches geth in retesteth mode (no database, no network, only retesteth RPC interface)`, - } -) - -type RetestethTestAPI interface { - SetChainParams(ctx context.Context, chainParams ChainParams) (bool, error) - MineBlocks(ctx context.Context, number uint64) (bool, error) - ModifyTimestamp(ctx context.Context, interval uint64) (bool, error) - ImportRawBlock(ctx context.Context, rawBlock hexutil.Bytes) (common.Hash, error) - RewindToBlock(ctx context.Context, number uint64) (bool, error) - GetLogHash(ctx context.Context, txHash common.Hash) (common.Hash, error) -} - -type RetestethEthAPI interface { - SendRawTransaction(ctx context.Context, rawTx hexutil.Bytes) (common.Hash, error) - BlockNumber(ctx context.Context) (uint64, error) - GetBlockByNumber(ctx context.Context, blockNr math.HexOrDecimal64, fullTx bool) (map[string]interface{}, error) - GetBlockByHash(ctx context.Context, blockHash common.Hash, fullTx bool) (map[string]interface{}, error) - GetBalance(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (*math.HexOrDecimal256, error) - GetCode(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (hexutil.Bytes, error) - GetTransactionCount(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (uint64, error) -} - -type RetestethDebugAPI interface { - AccountRange(ctx context.Context, - blockHashOrNumber *math.HexOrDecimal256, txIndex uint64, - addressHash *math.HexOrDecimal256, maxResults uint64, - ) (AccountRangeResult, error) - StorageRangeAt(ctx context.Context, - blockHashOrNumber *math.HexOrDecimal256, txIndex uint64, - address common.Address, - begin *math.HexOrDecimal256, maxResults uint64, - ) (StorageRangeResult, error) -} - -type RetestWeb3API interface { - ClientVersion(ctx context.Context) (string, error) -} - -type RetestethAPI struct { - ethDb ethdb.Database - db state.Database - chainConfig *params.ChainConfig - author common.Address - extraData []byte - genesisHash common.Hash - engine *NoRewardEngine - blockchain *core.BlockChain - txMap map[common.Address]map[uint64]*types.Transaction // Sender -> Nonce -> Transaction - txSenders map[common.Address]struct{} // Set of transaction senders - blockInterval uint64 -} - -type ChainParams struct { - SealEngine string `json:"sealEngine"` - Params CParamsParams `json:"params"` - Genesis CParamsGenesis `json:"genesis"` - Accounts map[common.Address]CParamsAccount `json:"accounts"` -} - -type CParamsParams struct { - AccountStartNonce math.HexOrDecimal64 `json:"accountStartNonce"` - HomesteadForkBlock *math.HexOrDecimal64 `json:"homesteadForkBlock"` - EIP150ForkBlock *math.HexOrDecimal64 `json:"EIP150ForkBlock"` - EIP158ForkBlock *math.HexOrDecimal64 `json:"EIP158ForkBlock"` - DaoHardforkBlock *math.HexOrDecimal64 `json:"daoHardforkBlock"` - ByzantiumForkBlock *math.HexOrDecimal64 `json:"byzantiumForkBlock"` - ConstantinopleForkBlock *math.HexOrDecimal64 `json:"constantinopleForkBlock"` - ConstantinopleFixForkBlock *math.HexOrDecimal64 `json:"constantinopleFixForkBlock"` - IstanbulBlock *math.HexOrDecimal64 `json:"istanbulForkBlock"` - ChainID *math.HexOrDecimal256 `json:"chainID"` - MaximumExtraDataSize math.HexOrDecimal64 `json:"maximumExtraDataSize"` - TieBreakingGas bool `json:"tieBreakingGas"` - MinGasLimit math.HexOrDecimal64 `json:"minGasLimit"` - MaxGasLimit math.HexOrDecimal64 `json:"maxGasLimit"` - GasLimitBoundDivisor math.HexOrDecimal64 `json:"gasLimitBoundDivisor"` - MinimumDifficulty math.HexOrDecimal256 `json:"minimumDifficulty"` - DifficultyBoundDivisor math.HexOrDecimal256 `json:"difficultyBoundDivisor"` - DurationLimit math.HexOrDecimal256 `json:"durationLimit"` - BlockReward math.HexOrDecimal256 `json:"blockReward"` - NetworkID math.HexOrDecimal256 `json:"networkID"` -} - -type CParamsGenesis struct { - Nonce math.HexOrDecimal64 `json:"nonce"` - Difficulty *math.HexOrDecimal256 `json:"difficulty"` - MixHash *math.HexOrDecimal256 `json:"mixHash"` - Author common.Address `json:"author"` - Timestamp math.HexOrDecimal64 `json:"timestamp"` - ParentHash common.Hash `json:"parentHash"` - ExtraData hexutil.Bytes `json:"extraData"` - GasLimit math.HexOrDecimal64 `json:"gasLimit"` -} - -type CParamsAccount struct { - Balance *math.HexOrDecimal256 `json:"balance"` - Precompiled *CPAccountPrecompiled `json:"precompiled"` - Code hexutil.Bytes `json:"code"` - Storage map[string]string `json:"storage"` - Nonce *math.HexOrDecimal64 `json:"nonce"` -} - -type CPAccountPrecompiled struct { - Name string `json:"name"` - StartingBlock math.HexOrDecimal64 `json:"startingBlock"` - Linear *CPAPrecompiledLinear `json:"linear"` -} - -type CPAPrecompiledLinear struct { - Base uint64 `json:"base"` - Word uint64 `json:"word"` -} - -type AccountRangeResult struct { - AddressMap map[common.Hash]common.Address `json:"addressMap"` - NextKey common.Hash `json:"nextKey"` -} - -type StorageRangeResult struct { - Complete bool `json:"complete"` - Storage map[common.Hash]SRItem `json:"storage"` -} - -type SRItem struct { - Key string `json:"key"` - Value string `json:"value"` -} - -type NoRewardEngine struct { - inner consensus.Engine - rewardsOn bool -} - -func (e *NoRewardEngine) Author(header *types.Header) (common.Address, error) { - return e.inner.Author(header) -} - -func (e *NoRewardEngine) VerifyHeader(chain consensus.ChainHeaderReader, header *types.Header, seal bool) error { - return e.inner.VerifyHeader(chain, header, seal) -} - -func (e *NoRewardEngine) VerifyHeaders(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool) (chan<- struct{}, <-chan error) { - return e.inner.VerifyHeaders(chain, headers, seals) -} - -func (e *NoRewardEngine) VerifyUncles(chain consensus.ChainReader, block *types.Block) error { - return e.inner.VerifyUncles(chain, block) -} - -func (e *NoRewardEngine) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error { - return e.inner.VerifySeal(chain, header) -} - -func (e *NoRewardEngine) Prepare(chain consensus.ChainHeaderReader, header *types.Header) error { - return e.inner.Prepare(chain, header) -} - -func (e *NoRewardEngine) accumulateRewards(config *params.ChainConfig, state *state.StateDB, header *types.Header, uncles []*types.Header) { - // Simply touch miner and uncle coinbase accounts - reward := big.NewInt(0) - for _, uncle := range uncles { - state.AddBalance(uncle.Coinbase, reward) - } - state.AddBalance(header.Coinbase, reward) -} - -func (e *NoRewardEngine) Finalize(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction, - uncles []*types.Header) { - if e.rewardsOn { - e.inner.Finalize(chain, header, statedb, txs, uncles) - } else { - e.accumulateRewards(chain.Config(), statedb, header, uncles) - header.Root = statedb.IntermediateRoot(chain.Config().IsEIP158(header.Number)) - } -} - -func (e *NoRewardEngine) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, statedb *state.StateDB, txs []*types.Transaction, - uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { - if e.rewardsOn { - return e.inner.FinalizeAndAssemble(chain, header, statedb, txs, uncles, receipts) - } else { - e.accumulateRewards(chain.Config(), statedb, header, uncles) - header.Root = statedb.IntermediateRoot(chain.Config().IsEIP158(header.Number)) - - // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie)), nil - } -} - -func (e *NoRewardEngine) Seal(chain consensus.ChainHeaderReader, block *types.Block, results chan<- *types.Block, stop <-chan struct{}) error { - return e.inner.Seal(chain, block, results, stop) -} - -func (e *NoRewardEngine) SealHash(header *types.Header) common.Hash { - return e.inner.SealHash(header) -} - -func (e *NoRewardEngine) CalcDifficulty(chain consensus.ChainHeaderReader, time uint64, parent *types.Header) *big.Int { - return e.inner.CalcDifficulty(chain, time, parent) -} - -func (e *NoRewardEngine) APIs(chain consensus.ChainHeaderReader) []rpc.API { - return e.inner.APIs(chain) -} - -func (e *NoRewardEngine) Close() error { - return e.inner.Close() -} - -func (api *RetestethAPI) SetChainParams(ctx context.Context, chainParams ChainParams) (bool, error) { - // Clean up - if api.blockchain != nil { - api.blockchain.Stop() - } - if api.engine != nil { - api.engine.Close() - } - if api.ethDb != nil { - api.ethDb.Close() - } - ethDb := rawdb.NewMemoryDatabase() - accounts := make(core.GenesisAlloc) - for address, account := range chainParams.Accounts { - balance := big.NewInt(0) - if account.Balance != nil { - balance.Set((*big.Int)(account.Balance)) - } - var nonce uint64 - if account.Nonce != nil { - nonce = uint64(*account.Nonce) - } - if account.Precompiled == nil || account.Balance != nil { - storage := make(map[common.Hash]common.Hash) - for k, v := range account.Storage { - storage[common.HexToHash(k)] = common.HexToHash(v) - } - accounts[address] = core.GenesisAccount{ - Balance: balance, - Code: account.Code, - Nonce: nonce, - Storage: storage, - } - } - } - chainId := big.NewInt(1) - if chainParams.Params.ChainID != nil { - chainId.Set((*big.Int)(chainParams.Params.ChainID)) - } - var ( - homesteadBlock *big.Int - daoForkBlock *big.Int - eip150Block *big.Int - eip155Block *big.Int - eip158Block *big.Int - byzantiumBlock *big.Int - constantinopleBlock *big.Int - petersburgBlock *big.Int - istanbulBlock *big.Int - ) - if chainParams.Params.HomesteadForkBlock != nil { - homesteadBlock = big.NewInt(int64(*chainParams.Params.HomesteadForkBlock)) - } - if chainParams.Params.DaoHardforkBlock != nil { - daoForkBlock = big.NewInt(int64(*chainParams.Params.DaoHardforkBlock)) - } - if chainParams.Params.EIP150ForkBlock != nil { - eip150Block = big.NewInt(int64(*chainParams.Params.EIP150ForkBlock)) - } - if chainParams.Params.EIP158ForkBlock != nil { - eip158Block = big.NewInt(int64(*chainParams.Params.EIP158ForkBlock)) - eip155Block = eip158Block - } - if chainParams.Params.ByzantiumForkBlock != nil { - byzantiumBlock = big.NewInt(int64(*chainParams.Params.ByzantiumForkBlock)) - } - if chainParams.Params.ConstantinopleForkBlock != nil { - constantinopleBlock = big.NewInt(int64(*chainParams.Params.ConstantinopleForkBlock)) - } - if chainParams.Params.ConstantinopleFixForkBlock != nil { - petersburgBlock = big.NewInt(int64(*chainParams.Params.ConstantinopleFixForkBlock)) - } - if constantinopleBlock != nil && petersburgBlock == nil { - petersburgBlock = big.NewInt(100000000000) - } - if chainParams.Params.IstanbulBlock != nil { - istanbulBlock = big.NewInt(int64(*chainParams.Params.IstanbulBlock)) - } - - genesis := &core.Genesis{ - Config: ¶ms.ChainConfig{ - ChainID: chainId, - HomesteadBlock: homesteadBlock, - DAOForkBlock: daoForkBlock, - DAOForkSupport: true, - EIP150Block: eip150Block, - EIP155Block: eip155Block, - EIP158Block: eip158Block, - ByzantiumBlock: byzantiumBlock, - ConstantinopleBlock: constantinopleBlock, - PetersburgBlock: petersburgBlock, - IstanbulBlock: istanbulBlock, - }, - Nonce: uint64(chainParams.Genesis.Nonce), - Timestamp: uint64(chainParams.Genesis.Timestamp), - ExtraData: chainParams.Genesis.ExtraData, - GasLimit: uint64(chainParams.Genesis.GasLimit), - Difficulty: big.NewInt(0).Set((*big.Int)(chainParams.Genesis.Difficulty)), - Mixhash: common.BigToHash((*big.Int)(chainParams.Genesis.MixHash)), - Coinbase: chainParams.Genesis.Author, - ParentHash: chainParams.Genesis.ParentHash, - Alloc: accounts, - } - chainConfig, genesisHash, err := core.SetupGenesisBlock(ethDb, genesis) - if err != nil { - return false, err - } - fmt.Printf("Chain config: %v\n", chainConfig) - - var inner consensus.Engine - switch chainParams.SealEngine { - case "NoProof", "NoReward": - inner = ethash.NewFaker() - case "Ethash": - inner = ethash.New(ethash.Config{ - CacheDir: "ethash", - CachesInMem: 2, - CachesOnDisk: 3, - CachesLockMmap: false, - DatasetsInMem: 1, - DatasetsOnDisk: 2, - DatasetsLockMmap: false, - }, nil, false) - default: - return false, fmt.Errorf("unrecognised seal engine: %s", chainParams.SealEngine) - } - engine := &NoRewardEngine{inner: inner, rewardsOn: chainParams.SealEngine != "NoReward"} - - blockchain, err := core.NewBlockChain(ethDb, nil, chainConfig, engine, vm.Config{}, nil, nil) - if err != nil { - return false, err - } - - api.chainConfig = chainConfig - api.genesisHash = genesisHash - api.author = chainParams.Genesis.Author - api.extraData = chainParams.Genesis.ExtraData - api.ethDb = ethDb - api.engine = engine - api.blockchain = blockchain - api.db = state.NewDatabase(api.ethDb) - api.txMap = make(map[common.Address]map[uint64]*types.Transaction) - api.txSenders = make(map[common.Address]struct{}) - api.blockInterval = 0 - return true, nil -} - -func (api *RetestethAPI) SendRawTransaction(ctx context.Context, rawTx hexutil.Bytes) (common.Hash, error) { - tx := new(types.Transaction) - if err := rlp.DecodeBytes(rawTx, tx); err != nil { - // Return nil is not by mistake - some tests include sending transaction where gasLimit overflows uint64 - return common.Hash{}, nil - } - signer := types.MakeSigner(api.chainConfig, big.NewInt(int64(api.currentNumber()))) - sender, err := types.Sender(signer, tx) - if err != nil { - return common.Hash{}, err - } - if nonceMap, ok := api.txMap[sender]; ok { - nonceMap[tx.Nonce()] = tx - } else { - nonceMap = make(map[uint64]*types.Transaction) - nonceMap[tx.Nonce()] = tx - api.txMap[sender] = nonceMap - } - api.txSenders[sender] = struct{}{} - return tx.Hash(), nil -} - -func (api *RetestethAPI) MineBlocks(ctx context.Context, number uint64) (bool, error) { - for i := 0; i < int(number); i++ { - if err := api.mineBlock(); err != nil { - return false, err - } - } - fmt.Printf("Mined %d blocks\n", number) - return true, nil -} - -func (api *RetestethAPI) currentNumber() uint64 { - if current := api.blockchain.CurrentBlock(); current != nil { - return current.NumberU64() - } - return 0 -} - -func (api *RetestethAPI) mineBlock() error { - number := api.currentNumber() - parentHash := rawdb.ReadCanonicalHash(api.ethDb, number) - parent := rawdb.ReadBlock(api.ethDb, parentHash, number) - var timestamp uint64 - if api.blockInterval == 0 { - timestamp = uint64(time.Now().Unix()) - } else { - timestamp = parent.Time() + api.blockInterval - } - gasLimit := core.CalcGasLimit(parent, 9223372036854775807, 9223372036854775807) - header := &types.Header{ - ParentHash: parent.Hash(), - Number: big.NewInt(int64(number + 1)), - GasLimit: gasLimit, - Extra: api.extraData, - Time: timestamp, - } - header.Coinbase = api.author - if api.engine != nil { - api.engine.Prepare(api.blockchain, header) - } - // If we are care about TheDAO hard-fork check whether to override the extra-data or not - if daoBlock := api.chainConfig.DAOForkBlock; daoBlock != nil { - // Check whether the block is among the fork extra-override range - limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) - if header.Number.Cmp(daoBlock) >= 0 && header.Number.Cmp(limit) < 0 { - // Depending whether we support or oppose the fork, override differently - if api.chainConfig.DAOForkSupport { - header.Extra = common.CopyBytes(params.DAOForkBlockExtra) - } else if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { - header.Extra = []byte{} // If miner opposes, don't let it use the reserved extra-data - } - } - } - statedb, err := api.blockchain.StateAt(parent.Root()) - if err != nil { - return err - } - if api.chainConfig.DAOForkSupport && api.chainConfig.DAOForkBlock != nil && api.chainConfig.DAOForkBlock.Cmp(header.Number) == 0 { - misc.ApplyDAOHardFork(statedb) - } - gasPool := new(core.GasPool).AddGas(header.GasLimit) - txCount := 0 - var txs []*types.Transaction - var receipts []*types.Receipt - var blockFull = gasPool.Gas() < params.TxGas - for address := range api.txSenders { - if blockFull { - break - } - m := api.txMap[address] - for nonce := statedb.GetNonce(address); ; nonce++ { - if tx, ok := m[nonce]; ok { - // Try to apply transactions to the state - statedb.Prepare(tx.Hash(), common.Hash{}, txCount) - snap := statedb.Snapshot() - - receipt, err := core.ApplyTransaction( - api.chainConfig, - api.blockchain, - &api.author, - gasPool, - statedb, - header, tx, &header.GasUsed, *api.blockchain.GetVMConfig(), - ) - if err != nil { - statedb.RevertToSnapshot(snap) - break - } - txs = append(txs, tx) - receipts = append(receipts, receipt) - delete(m, nonce) - if len(m) == 0 { - // Last tx for the sender - delete(api.txMap, address) - delete(api.txSenders, address) - } - txCount++ - if gasPool.Gas() < params.TxGas { - blockFull = true - break - } - } else { - break // Gap in the nonces - } - } - } - block, err := api.engine.FinalizeAndAssemble(api.blockchain, header, statedb, txs, []*types.Header{}, receipts) - if err != nil { - return err - } - return api.importBlock(block) -} - -func (api *RetestethAPI) importBlock(block *types.Block) error { - if _, err := api.blockchain.InsertChain([]*types.Block{block}); err != nil { - return err - } - fmt.Printf("Imported block %d, head is %d\n", block.NumberU64(), api.currentNumber()) - return nil -} - -func (api *RetestethAPI) ModifyTimestamp(ctx context.Context, interval uint64) (bool, error) { - api.blockInterval = interval - return true, nil -} - -func (api *RetestethAPI) ImportRawBlock(ctx context.Context, rawBlock hexutil.Bytes) (common.Hash, error) { - block := new(types.Block) - if err := rlp.DecodeBytes(rawBlock, block); err != nil { - return common.Hash{}, err - } - fmt.Printf("Importing block %d with parent hash: %x, genesisHash: %x\n", block.NumberU64(), block.ParentHash(), api.genesisHash) - if err := api.importBlock(block); err != nil { - return common.Hash{}, err - } - return block.Hash(), nil -} - -func (api *RetestethAPI) RewindToBlock(ctx context.Context, newHead uint64) (bool, error) { - if err := api.blockchain.SetHead(newHead); err != nil { - return false, err - } - // When we rewind, the transaction pool should be cleaned out. - api.txMap = make(map[common.Address]map[uint64]*types.Transaction) - api.txSenders = make(map[common.Address]struct{}) - return true, nil -} - -var emptyListHash common.Hash = common.HexToHash("0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347") - -func (api *RetestethAPI) GetLogHash(ctx context.Context, txHash common.Hash) (common.Hash, error) { - receipt, _, _, _ := rawdb.ReadReceipt(api.ethDb, txHash, api.chainConfig) - if receipt == nil { - return emptyListHash, nil - } else { - if logListRlp, err := rlp.EncodeToBytes(receipt.Logs); err != nil { - return common.Hash{}, err - } else { - return common.BytesToHash(crypto.Keccak256(logListRlp)), nil - } - } -} - -func (api *RetestethAPI) BlockNumber(ctx context.Context) (uint64, error) { - return api.currentNumber(), nil -} - -func (api *RetestethAPI) GetBlockByNumber(ctx context.Context, blockNr math.HexOrDecimal64, fullTx bool) (map[string]interface{}, error) { - block := api.blockchain.GetBlockByNumber(uint64(blockNr)) - if block != nil { - response, err := RPCMarshalBlock(block, true, fullTx) - if err != nil { - return nil, err - } - response["author"] = response["miner"] - response["totalDifficulty"] = (*hexutil.Big)(api.blockchain.GetTd(block.Hash(), uint64(blockNr))) - return response, err - } - return nil, fmt.Errorf("block %d not found", blockNr) -} - -func (api *RetestethAPI) GetBlockByHash(ctx context.Context, blockHash common.Hash, fullTx bool) (map[string]interface{}, error) { - block := api.blockchain.GetBlockByHash(blockHash) - if block != nil { - response, err := RPCMarshalBlock(block, true, fullTx) - if err != nil { - return nil, err - } - response["author"] = response["miner"] - response["totalDifficulty"] = (*hexutil.Big)(api.blockchain.GetTd(block.Hash(), block.Number().Uint64())) - return response, err - } - return nil, fmt.Errorf("block 0x%x not found", blockHash) -} - -func (api *RetestethAPI) AccountRange(ctx context.Context, - blockHashOrNumber *math.HexOrDecimal256, txIndex uint64, - addressHash *math.HexOrDecimal256, maxResults uint64, -) (AccountRangeResult, error) { - var ( - header *types.Header - block *types.Block - ) - if (*big.Int)(blockHashOrNumber).Cmp(big.NewInt(math.MaxInt64)) > 0 { - blockHash := common.BigToHash((*big.Int)(blockHashOrNumber)) - header = api.blockchain.GetHeaderByHash(blockHash) - block = api.blockchain.GetBlockByHash(blockHash) - //fmt.Printf("Account range: %x, txIndex %d, start: %x, maxResults: %d\n", blockHash, txIndex, common.BigToHash((*big.Int)(addressHash)), maxResults) - } else { - blockNumber := (*big.Int)(blockHashOrNumber).Uint64() - header = api.blockchain.GetHeaderByNumber(blockNumber) - block = api.blockchain.GetBlockByNumber(blockNumber) - //fmt.Printf("Account range: %d, txIndex %d, start: %x, maxResults: %d\n", blockNumber, txIndex, common.BigToHash((*big.Int)(addressHash)), maxResults) - } - parentHeader := api.blockchain.GetHeaderByHash(header.ParentHash) - var root common.Hash - var statedb *state.StateDB - var err error - if parentHeader == nil || int(txIndex) >= len(block.Transactions()) { - root = header.Root - statedb, err = api.blockchain.StateAt(root) - if err != nil { - return AccountRangeResult{}, err - } - } else { - root = parentHeader.Root - statedb, err = api.blockchain.StateAt(root) - if err != nil { - return AccountRangeResult{}, err - } - // Recompute transactions up to the target index. - signer := types.MakeSigner(api.blockchain.Config(), block.Number()) - for idx, tx := range block.Transactions() { - // Assemble the transaction call message and return if the requested offset - msg, _ := tx.AsMessage(signer) - context := core.NewEVMContext(msg, block.Header(), api.blockchain, nil) - // Not yet the searched for transaction, execute on top of the current state - vmenv := vm.NewEVM(context, statedb, api.blockchain.Config(), vm.Config{}) - if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return AccountRangeResult{}, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) - } - // Ensure any modifications are committed to the state - // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect - root = statedb.IntermediateRoot(vmenv.ChainConfig().IsEIP158(block.Number())) - if idx == int(txIndex) { - // This is to make sure root can be opened by OpenTrie - root, err = statedb.Commit(api.chainConfig.IsEIP158(block.Number())) - if err != nil { - return AccountRangeResult{}, err - } - break - } - } - } - accountTrie, err := statedb.Database().OpenTrie(root) - if err != nil { - return AccountRangeResult{}, err - } - it := trie.NewIterator(accountTrie.NodeIterator(common.BigToHash((*big.Int)(addressHash)).Bytes())) - result := AccountRangeResult{AddressMap: make(map[common.Hash]common.Address)} - for i := 0; i < int(maxResults) && it.Next(); i++ { - if preimage := accountTrie.GetKey(it.Key); preimage != nil { - result.AddressMap[common.BytesToHash(it.Key)] = common.BytesToAddress(preimage) - } - } - //fmt.Printf("Number of entries returned: %d\n", len(result.AddressMap)) - // Add the 'next key' so clients can continue downloading. - if it.Next() { - next := common.BytesToHash(it.Key) - result.NextKey = next - } - return result, nil -} - -func (api *RetestethAPI) GetBalance(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (*math.HexOrDecimal256, error) { - //fmt.Printf("GetBalance %x, block %d\n", address, blockNr) - header := api.blockchain.GetHeaderByNumber(uint64(blockNr)) - statedb, err := api.blockchain.StateAt(header.Root) - if err != nil { - return nil, err - } - return (*math.HexOrDecimal256)(statedb.GetBalance(address)), nil -} - -func (api *RetestethAPI) GetCode(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (hexutil.Bytes, error) { - header := api.blockchain.GetHeaderByNumber(uint64(blockNr)) - statedb, err := api.blockchain.StateAt(header.Root) - if err != nil { - return nil, err - } - return statedb.GetCode(address), nil -} - -func (api *RetestethAPI) GetTransactionCount(ctx context.Context, address common.Address, blockNr math.HexOrDecimal64) (uint64, error) { - header := api.blockchain.GetHeaderByNumber(uint64(blockNr)) - statedb, err := api.blockchain.StateAt(header.Root) - if err != nil { - return 0, err - } - return statedb.GetNonce(address), nil -} - -func (api *RetestethAPI) StorageRangeAt(ctx context.Context, - blockHashOrNumber *math.HexOrDecimal256, txIndex uint64, - address common.Address, - begin *math.HexOrDecimal256, maxResults uint64, -) (StorageRangeResult, error) { - var ( - header *types.Header - block *types.Block - ) - if (*big.Int)(blockHashOrNumber).Cmp(big.NewInt(math.MaxInt64)) > 0 { - blockHash := common.BigToHash((*big.Int)(blockHashOrNumber)) - header = api.blockchain.GetHeaderByHash(blockHash) - block = api.blockchain.GetBlockByHash(blockHash) - //fmt.Printf("Storage range: %x, txIndex %d, addr: %x, start: %x, maxResults: %d\n", - // blockHash, txIndex, address, common.BigToHash((*big.Int)(begin)), maxResults) - } else { - blockNumber := (*big.Int)(blockHashOrNumber).Uint64() - header = api.blockchain.GetHeaderByNumber(blockNumber) - block = api.blockchain.GetBlockByNumber(blockNumber) - //fmt.Printf("Storage range: %d, txIndex %d, addr: %x, start: %x, maxResults: %d\n", - // blockNumber, txIndex, address, common.BigToHash((*big.Int)(begin)), maxResults) - } - parentHeader := api.blockchain.GetHeaderByHash(header.ParentHash) - var root common.Hash - var statedb *state.StateDB - var err error - if parentHeader == nil || int(txIndex) >= len(block.Transactions()) { - root = header.Root - statedb, err = api.blockchain.StateAt(root) - if err != nil { - return StorageRangeResult{}, err - } - } else { - root = parentHeader.Root - statedb, err = api.blockchain.StateAt(root) - if err != nil { - return StorageRangeResult{}, err - } - // Recompute transactions up to the target index. - signer := types.MakeSigner(api.blockchain.Config(), block.Number()) - for idx, tx := range block.Transactions() { - // Assemble the transaction call message and return if the requested offset - msg, _ := tx.AsMessage(signer) - context := core.NewEVMContext(msg, block.Header(), api.blockchain, nil) - // Not yet the searched for transaction, execute on top of the current state - vmenv := vm.NewEVM(context, statedb, api.blockchain.Config(), vm.Config{}) - if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return StorageRangeResult{}, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) - } - // Ensure any modifications are committed to the state - // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect - _ = statedb.IntermediateRoot(vmenv.ChainConfig().IsEIP158(block.Number())) - if idx == int(txIndex) { - // This is to make sure root can be opened by OpenTrie - _, err = statedb.Commit(vmenv.ChainConfig().IsEIP158(block.Number())) - if err != nil { - return StorageRangeResult{}, err - } - } - } - } - storageTrie := statedb.StorageTrie(address) - it := trie.NewIterator(storageTrie.NodeIterator(common.BigToHash((*big.Int)(begin)).Bytes())) - result := StorageRangeResult{Storage: make(map[common.Hash]SRItem)} - for i := 0; /*i < int(maxResults) && */ it.Next(); i++ { - if preimage := storageTrie.GetKey(it.Key); preimage != nil { - key := (*math.HexOrDecimal256)(big.NewInt(0).SetBytes(preimage)) - v, _, err := rlp.SplitString(it.Value) - if err != nil { - return StorageRangeResult{}, err - } - value := (*math.HexOrDecimal256)(big.NewInt(0).SetBytes(v)) - ks, _ := key.MarshalText() - vs, _ := value.MarshalText() - if len(ks)%2 != 0 { - ks = append(append(append([]byte{}, ks[:2]...), byte('0')), ks[2:]...) - } - if len(vs)%2 != 0 { - vs = append(append(append([]byte{}, vs[:2]...), byte('0')), vs[2:]...) - } - result.Storage[common.BytesToHash(it.Key)] = SRItem{ - Key: string(ks), - Value: string(vs), - } - } - } - if it.Next() { - result.Complete = false - } else { - result.Complete = true - } - return result, nil -} - -func (api *RetestethAPI) ClientVersion(ctx context.Context) (string, error) { - return "Geth-" + params.VersionWithCommit(gitCommit, gitDate), nil -} - -func retesteth(ctx *cli.Context) error { - log.Info("Welcome to retesteth!") - // register signer API with server - var ( - extapiURL string - ) - apiImpl := &RetestethAPI{} - var testApi RetestethTestAPI = apiImpl - var ethApi RetestethEthAPI = apiImpl - var debugApi RetestethDebugAPI = apiImpl - var web3Api RetestWeb3API = apiImpl - rpcAPI := []rpc.API{ - { - Namespace: "test", - Public: true, - Service: testApi, - Version: "1.0", - }, - { - Namespace: "eth", - Public: true, - Service: ethApi, - Version: "1.0", - }, - { - Namespace: "debug", - Public: true, - Service: debugApi, - Version: "1.0", - }, - { - Namespace: "web3", - Public: true, - Service: web3Api, - Version: "1.0", - }, - } - vhosts := utils.SplitAndTrim(ctx.GlobalString(utils.HTTPVirtualHostsFlag.Name)) - cors := utils.SplitAndTrim(ctx.GlobalString(utils.HTTPCORSDomainFlag.Name)) - - // register apis and create handler stack - srv := rpc.NewServer() - err := node.RegisterApisFromWhitelist(rpcAPI, []string{"test", "eth", "debug", "web3"}, srv, false) - if err != nil { - utils.Fatalf("Could not register RPC apis: %w", err) - } - handler := node.NewHTTPHandlerStack(srv, cors, vhosts) - - // start http server - var RetestethHTTPTimeouts = rpc.HTTPTimeouts{ - ReadTimeout: 120 * time.Second, - WriteTimeout: 120 * time.Second, - IdleTimeout: 120 * time.Second, - } - httpEndpoint := fmt.Sprintf("%s:%d", ctx.GlobalString(utils.HTTPListenAddrFlag.Name), ctx.Int(rpcPortFlag.Name)) - httpServer, _, err := node.StartHTTPEndpoint(httpEndpoint, RetestethHTTPTimeouts, handler) - if err != nil { - utils.Fatalf("Could not start RPC api: %v", err) - } - extapiURL = fmt.Sprintf("http://%s", httpEndpoint) - log.Info("HTTP endpoint opened", "url", extapiURL) - - defer func() { - // Don't bother imposing a timeout here. - httpServer.Shutdown(context.Background()) - log.Info("HTTP endpoint closed", "url", httpEndpoint) - }() - - abortChan := make(chan os.Signal, 11) - signal.Notify(abortChan, os.Interrupt) - - sig := <-abortChan - log.Info("Exiting...", "signal", sig) - return nil -} diff --git a/cmd/geth/retesteth_copypaste.go b/cmd/geth/retesteth_copypaste.go deleted file mode 100644 index e2795af7f9..0000000000 --- a/cmd/geth/retesteth_copypaste.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of go-ethereum. -// -// go-ethereum is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// go-ethereum is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with go-ethereum. If not, see . - -package main - -import ( - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" -) - -// RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction -type RPCTransaction struct { - BlockHash common.Hash `json:"blockHash"` - BlockNumber *hexutil.Big `json:"blockNumber"` - From common.Address `json:"from"` - Gas hexutil.Uint64 `json:"gas"` - GasPrice *hexutil.Big `json:"gasPrice"` - Hash common.Hash `json:"hash"` - Input hexutil.Bytes `json:"input"` - Nonce hexutil.Uint64 `json:"nonce"` - To *common.Address `json:"to"` - TransactionIndex hexutil.Uint `json:"transactionIndex"` - Value *hexutil.Big `json:"value"` - V *hexutil.Big `json:"v"` - R *hexutil.Big `json:"r"` - S *hexutil.Big `json:"s"` -} - -// newRPCTransaction returns a transaction that will serialize to the RPC -// representation, with the given location metadata set (if available). -func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64) *RPCTransaction { - var signer types.Signer = types.FrontierSigner{} - if tx.Protected() { - signer = types.NewEIP155Signer(tx.ChainId()) - } - from, _ := types.Sender(signer, tx) - v, r, s := tx.RawSignatureValues() - - result := &RPCTransaction{ - From: from, - Gas: hexutil.Uint64(tx.Gas()), - GasPrice: (*hexutil.Big)(tx.GasPrice()), - Hash: tx.Hash(), - Input: hexutil.Bytes(tx.Data()), - Nonce: hexutil.Uint64(tx.Nonce()), - To: tx.To(), - Value: (*hexutil.Big)(tx.Value()), - V: (*hexutil.Big)(v), - R: (*hexutil.Big)(r), - S: (*hexutil.Big)(s), - } - if blockHash != (common.Hash{}) { - result.BlockHash = blockHash - result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber)) - result.TransactionIndex = hexutil.Uint(index) - } - return result -} - -// newRPCTransactionFromBlockIndex returns a transaction that will serialize to the RPC representation. -func newRPCTransactionFromBlockIndex(b *types.Block, index uint64) *RPCTransaction { - txs := b.Transactions() - if index >= uint64(len(txs)) { - return nil - } - return newRPCTransaction(txs[index], b.Hash(), b.NumberU64(), index) -} - -// newRPCTransactionFromBlockHash returns a transaction that will serialize to the RPC representation. -func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransaction { - for idx, tx := range b.Transactions() { - if tx.Hash() == hash { - return newRPCTransactionFromBlockIndex(b, uint64(idx)) - } - } - return nil -} - -// RPCMarshalBlock converts the given block to the RPC output which depends on fullTx. If inclTx is true transactions are -// returned. When fullTx is true the returned block contains full transaction details, otherwise it will only contain -// transaction hashes. -func RPCMarshalBlock(b *types.Block, inclTx bool, fullTx bool) (map[string]interface{}, error) { - head := b.Header() // copies the header once - fields := map[string]interface{}{ - "number": (*hexutil.Big)(head.Number), - "hash": b.Hash(), - "parentHash": head.ParentHash, - "nonce": head.Nonce, - "mixHash": head.MixDigest, - "sha3Uncles": head.UncleHash, - "logsBloom": head.Bloom, - "stateRoot": head.Root, - "miner": head.Coinbase, - "difficulty": (*hexutil.Big)(head.Difficulty), - "extraData": hexutil.Bytes(head.Extra), - "size": hexutil.Uint64(b.Size()), - "gasLimit": hexutil.Uint64(head.GasLimit), - "gasUsed": hexutil.Uint64(head.GasUsed), - "timestamp": hexutil.Uint64(head.Time), - "transactionsRoot": head.TxHash, - "receiptsRoot": head.ReceiptHash, - } - - if inclTx { - formatTx := func(tx *types.Transaction) (interface{}, error) { - return tx.Hash(), nil - } - if fullTx { - formatTx = func(tx *types.Transaction) (interface{}, error) { - return newRPCTransactionFromBlockHash(b, tx.Hash()), nil - } - } - txs := b.Transactions() - transactions := make([]interface{}, len(txs)) - var err error - for i, tx := range txs { - if transactions[i], err = formatTx(tx); err != nil { - return nil, err - } - } - fields["transactions"] = transactions - } - - uncles := b.Uncles() - uncleHashes := make([]common.Hash, len(uncles)) - for i, uncle := range uncles { - uncleHashes[i] = uncle.Hash() - } - fields["uncles"] = uncleHashes - - return fields, nil -} diff --git a/cmd/geth/run_test.go b/cmd/geth/run_test.go index f7b735b84c..527c38a657 100644 --- a/cmd/geth/run_test.go +++ b/cmd/geth/run_test.go @@ -70,12 +70,12 @@ func runGeth(t *testing.T, args ...string) *testgeth { tt := &testgeth{} tt.TestCmd = cmdtest.NewTestCmd(t, tt) for i, arg := range args { - switch { - case arg == "-datadir" || arg == "--datadir": + switch arg { + case "--datadir": if i < len(args)-1 { tt.Datadir = args[i+1] } - case arg == "-etherbase" || arg == "--etherbase": + case "--miner.etherbase": if i < len(args)-1 { tt.Etherbase = args[i+1] } @@ -84,7 +84,7 @@ func runGeth(t *testing.T, args ...string) *testgeth { if tt.Datadir == "" { tt.Datadir = tmpdir(t) tt.Cleanup = func() { os.RemoveAll(tt.Datadir) } - args = append([]string{"-datadir", tt.Datadir}, args...) + args = append([]string{"--datadir", tt.Datadir}, args...) // Remove the temporary datadir if something fails below. defer func() { if t.Failed() { diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go new file mode 100644 index 0000000000..b59c530c27 --- /dev/null +++ b/cmd/geth/snapshot.go @@ -0,0 +1,433 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "bytes" + "errors" + "time" + + "github.com/ethereum/go-ethereum/cmd/utils" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/state/pruner" + "github.com/ethereum/go-ethereum/core/state/snapshot" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + cli "gopkg.in/urfave/cli.v1" +) + +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + // emptyCode is the known hash of the empty EVM bytecode. + emptyCode = crypto.Keccak256(nil) +) + +var ( + snapshotCommand = cli.Command{ + Name: "snapshot", + Usage: "A set of commands based on the snapshot", + Category: "MISCELLANEOUS COMMANDS", + Description: "", + Subcommands: []cli.Command{ + { + Name: "prune-state", + Usage: "Prune stale ethereum state data based on the snapshot", + ArgsUsage: "", + Action: utils.MigrateFlags(pruneState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + utils.CacheTrieJournalFlag, + utils.BloomFilterSizeFlag, + }, + Description: ` +geth snapshot prune-state +will prune historical state data with the help of the state snapshot. +All trie nodes and contract codes that do not belong to the specified +version state will be deleted from the database. After pruning, only +two version states are available: genesis and the specific one. + +The default pruning target is the HEAD-127 state. + +WARNING: It's necessary to delete the trie clean cache after the pruning. +If you specify another directory for the trie clean cache via "--cache.trie.journal" +during the use of Geth, please also specify it here for correct deletion. Otherwise +the trie clean cache with default directory will be deleted. +`, + }, + { + Name: "verify-state", + Usage: "Recalculate state hash based on the snapshot for verification", + ArgsUsage: "", + Action: utils.MigrateFlags(verifyState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + }, + Description: ` +geth snapshot verify-state +will traverse the whole accounts and storages set based on the specified +snapshot and recalculate the root hash of state for verification. +In other words, this command does the snapshot to trie conversion. +`, + }, + { + Name: "traverse-state", + Usage: "Traverse the state with given root hash for verification", + ArgsUsage: "", + Action: utils.MigrateFlags(traverseState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + }, + Description: ` +geth snapshot traverse-state +will traverse the whole state from the given state root and will abort if any +referenced trie node or contract code is missing. This command can be used for +state integrity verification. The default checking target is the HEAD state. + +It's also usable without snapshot enabled. +`, + }, + { + Name: "traverse-rawstate", + Usage: "Traverse the state with given root hash for verification", + ArgsUsage: "", + Action: utils.MigrateFlags(traverseRawState), + Category: "MISCELLANEOUS COMMANDS", + Flags: []cli.Flag{ + utils.DataDirFlag, + utils.RopstenFlag, + utils.RinkebyFlag, + utils.GoerliFlag, + }, + Description: ` +geth snapshot traverse-rawstate +will traverse the whole state from the given root and will abort if any referenced +trie node or contract code is missing. This command can be used for state integrity +verification. The default checking target is the HEAD state. It's basically identical +to traverse-state, but the check granularity is smaller. + +It's also usable without snapshot enabled. +`, + }, + }, + } +) + +func pruneState(ctx *cli.Context) error { + stack, config := makeConfigNode(ctx) + defer stack.Close() + + chain, chaindb := utils.MakeChain(ctx, stack, true) + defer chaindb.Close() + + pruner, err := pruner.NewPruner(chaindb, chain.CurrentBlock().Header(), stack.ResolvePath(""), stack.ResolvePath(config.Eth.TrieCleanCacheJournal), ctx.GlobalUint64(utils.BloomFilterSizeFlag.Name)) + if err != nil { + log.Error("Failed to open snapshot tree", "error", err) + return err + } + if ctx.NArg() > 1 { + log.Error("Too many arguments given") + return errors.New("too many arguments") + } + var targetRoot common.Hash + if ctx.NArg() == 1 { + targetRoot, err = parseRoot(ctx.Args()[0]) + if err != nil { + log.Error("Failed to resolve state root", "error", err) + return err + } + } + if err = pruner.Prune(targetRoot); err != nil { + log.Error("Failed to prune state", "error", err) + return err + } + return nil +} + +func verifyState(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chain, chaindb := utils.MakeChain(ctx, stack, true) + defer chaindb.Close() + + snaptree, err := snapshot.New(chaindb, trie.NewDatabase(chaindb), 256, chain.CurrentBlock().Root(), false, false, false) + if err != nil { + log.Error("Failed to open snapshot tree", "error", err) + return err + } + if ctx.NArg() > 1 { + log.Error("Too many arguments given") + return errors.New("too many arguments") + } + var root = chain.CurrentBlock().Root() + if ctx.NArg() == 1 { + root, err = parseRoot(ctx.Args()[0]) + if err != nil { + log.Error("Failed to resolve state root", "error", err) + return err + } + } + if err := snaptree.Verify(root); err != nil { + log.Error("Failed to verfiy state", "error", err) + return err + } + log.Info("Verified the state") + return nil +} + +// traverseState is a helper function used for pruning verification. +// Basically it just iterates the trie, ensure all nodes and associated +// contract codes are present. +func traverseState(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chain, chaindb := utils.MakeChain(ctx, stack, true) + defer chaindb.Close() + + if ctx.NArg() > 1 { + log.Error("Too many arguments given") + return errors.New("too many arguments") + } + // Use the HEAD root as the default + head := chain.CurrentBlock() + if head == nil { + log.Error("Head block is missing") + return errors.New("head block is missing") + } + var ( + root common.Hash + err error + ) + if ctx.NArg() == 1 { + root, err = parseRoot(ctx.Args()[0]) + if err != nil { + log.Error("Failed to resolve state root", "error", err) + return err + } + log.Info("Start traversing the state", "root", root) + } else { + root = head.Root() + log.Info("Start traversing the state", "root", root, "number", head.NumberU64()) + } + triedb := trie.NewDatabase(chaindb) + t, err := trie.NewSecure(root, triedb) + if err != nil { + log.Error("Failed to open trie", "root", root, "error", err) + return err + } + var ( + accounts int + slots int + codes int + lastReport time.Time + start = time.Now() + ) + accIter := trie.NewIterator(t.NodeIterator(nil)) + for accIter.Next() { + accounts += 1 + var acc state.Account + if err := rlp.DecodeBytes(accIter.Value, &acc); err != nil { + log.Error("Invalid account encountered during traversal", "error", err) + return err + } + if acc.Root != emptyRoot { + storageTrie, err := trie.NewSecure(acc.Root, triedb) + if err != nil { + log.Error("Failed to open storage trie", "root", acc.Root, "error", err) + return err + } + storageIter := trie.NewIterator(storageTrie.NodeIterator(nil)) + for storageIter.Next() { + slots += 1 + } + if storageIter.Err != nil { + log.Error("Failed to traverse storage trie", "root", acc.Root, "error", storageIter.Err) + return storageIter.Err + } + } + if !bytes.Equal(acc.CodeHash, emptyCode) { + code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash)) + if len(code) == 0 { + log.Error("Code is missing", "hash", common.BytesToHash(acc.CodeHash)) + return errors.New("missing code") + } + codes += 1 + } + if time.Since(lastReport) > time.Second*8 { + log.Info("Traversing state", "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + lastReport = time.Now() + } + } + if accIter.Err != nil { + log.Error("Failed to traverse state trie", "root", root, "error", accIter.Err) + return accIter.Err + } + log.Info("State is complete", "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +// traverseRawState is a helper function used for pruning verification. +// Basically it just iterates the trie, ensure all nodes and associated +// contract codes are present. It's basically identical to traverseState +// but it will check each trie node. +func traverseRawState(ctx *cli.Context) error { + stack, _ := makeConfigNode(ctx) + defer stack.Close() + + chain, chaindb := utils.MakeChain(ctx, stack, true) + defer chaindb.Close() + + if ctx.NArg() > 1 { + log.Error("Too many arguments given") + return errors.New("too many arguments") + } + // Use the HEAD root as the default + head := chain.CurrentBlock() + if head == nil { + log.Error("Head block is missing") + return errors.New("head block is missing") + } + var ( + root common.Hash + err error + ) + if ctx.NArg() == 1 { + root, err = parseRoot(ctx.Args()[0]) + if err != nil { + log.Error("Failed to resolve state root", "error", err) + return err + } + log.Info("Start traversing the state", "root", root) + } else { + root = head.Root() + log.Info("Start traversing the state", "root", root, "number", head.NumberU64()) + } + triedb := trie.NewDatabase(chaindb) + t, err := trie.NewSecure(root, triedb) + if err != nil { + log.Error("Failed to open trie", "root", root, "error", err) + return err + } + var ( + nodes int + accounts int + slots int + codes int + lastReport time.Time + start = time.Now() + ) + accIter := t.NodeIterator(nil) + for accIter.Next(true) { + nodes += 1 + node := accIter.Hash() + + if node != (common.Hash{}) { + // Check the present for non-empty hash node(embedded node doesn't + // have their own hash). + blob := rawdb.ReadTrieNode(chaindb, node) + if len(blob) == 0 { + log.Error("Missing trie node(account)", "hash", node) + return errors.New("missing account") + } + } + // If it's a leaf node, yes we are touching an account, + // dig into the storage trie further. + if accIter.Leaf() { + accounts += 1 + var acc state.Account + if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil { + log.Error("Invalid account encountered during traversal", "error", err) + return errors.New("invalid account") + } + if acc.Root != emptyRoot { + storageTrie, err := trie.NewSecure(acc.Root, triedb) + if err != nil { + log.Error("Failed to open storage trie", "root", acc.Root, "error", err) + return errors.New("missing storage trie") + } + storageIter := storageTrie.NodeIterator(nil) + for storageIter.Next(true) { + nodes += 1 + node := storageIter.Hash() + + // Check the present for non-empty hash node(embedded node doesn't + // have their own hash). + if node != (common.Hash{}) { + blob := rawdb.ReadTrieNode(chaindb, node) + if len(blob) == 0 { + log.Error("Missing trie node(storage)", "hash", node) + return errors.New("missing storage") + } + } + // Bump the counter if it's leaf node. + if storageIter.Leaf() { + slots += 1 + } + } + if storageIter.Error() != nil { + log.Error("Failed to traverse storage trie", "root", acc.Root, "error", storageIter.Error()) + return storageIter.Error() + } + } + if !bytes.Equal(acc.CodeHash, emptyCode) { + code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash)) + if len(code) == 0 { + log.Error("Code is missing", "account", common.BytesToHash(accIter.LeafKey())) + return errors.New("missing code") + } + codes += 1 + } + if time.Since(lastReport) > time.Second*8 { + log.Info("Traversing state", "nodes", nodes, "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + lastReport = time.Now() + } + } + } + if accIter.Error() != nil { + log.Error("Failed to traverse state trie", "root", root, "error", accIter.Error()) + return accIter.Error() + } + log.Info("State is complete", "nodes", nodes, "accounts", accounts, "slots", slots, "codes", codes, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +func parseRoot(input string) (common.Hash, error) { + var h common.Hash + if err := h.UnmarshalText([]byte(input)); err != nil { + return h, err + } + return h, nil +} diff --git a/cmd/geth/testdata/vcheck/data.json b/cmd/geth/testdata/vcheck/data.json new file mode 100644 index 0000000000..e7ee2bf7e4 --- /dev/null +++ b/cmd/geth/testdata/vcheck/data.json @@ -0,0 +1,61 @@ +[ + { + "name": "CorruptedDAG", + "uid": "GETH-2020-01", + "summary": "Mining nodes will generate erroneous PoW on epochs > `385`.", + "description": "A mining flaw could cause miners to erroneously calculate PoW, due to an index overflow, if DAG size is exceeding the maximum 32 bit unsigned value.\n\nThis occurred on the ETC chain on 2020-11-06. This is likely to trigger for ETH mainnet around block `11550000`/epoch `385`, slated to occur early January 2021.\n\nThis issue is relevant only for miners, non-mining nodes are unaffected, since non-mining nodes use a smaller verification cache instead of a full DAG.", + "links": [ + "https://github.com/ethereum/go-ethereum/pull/21793", + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://github.com/ethereum/go-ethereum/commit/567d41d9363706b4b13ce0903804e8acf214af49" + ], + "introduced": "v1.6.0", + "fixed": "v1.9.24", + "published": "2020-11-12", + "severity": "Medium", + "check": "Geth\\/v1\\.(6|7|8)\\..*|Geth\\/v1\\.9\\.2(1|2|3)-.*" + }, + { + "name": "GoCrash", + "uid": "GETH-2020-02", + "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing, due to an underlying bug in Go (CVE-2020-28362) versions < `1.15.5`, or `<1.14.12`", + "description": "The DoS issue can be used to crash all Geth nodes during block processing, the effects of which would be that a major part of the Ethereum network went offline.\n\nOutside of Go-Ethereum, the issue is most likely relevant for all forks of Geth (such as TurboGeth or ETC’s core-geth) which is built with versions of Go which contains the vulnerability.", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://groups.google.com/g/golang-announce/c/NpBGTTmKzpM", + "https://github.com/golang/go/issues/42552" + ], + "fixed": "v1.9.24", + "published": "2020-11-12", + "severity": "Critical", + "check": "Geth.*\\/go1\\.(11(.*)|12(.*)|13(.*)|14|14\\.(\\d|10|11|)|15|15\\.[0-4])$" + }, + { + "name": "ShallowCopy", + "uid": "GETH-2020-03", + "summary": "A consensus flaw in Geth, related to `datacopy` precompile", + "description": "Geth erroneously performed a 'shallow' copy when the precompiled `datacopy` (at `0x00...04`) was invoked. An attacker could deploy a contract that uses the shallow copy to corrupt the contents of the `RETURNDATA`, thus causing a consensus failure.", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/" + ], + "introduced": "v1.9.7", + "fixed": "v1.9.17", + "published": "2020-11-12", + "severity": "Critical", + "check": "Geth\\/v1\\.9\\.(7|8|9|10|11|12|13|14|15|16).*$" + }, + { + "name": "GethCrash", + "uid": "GETH-2020-04", + "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing", + "description": "Full details to be disclosed at a later date", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/" + ], + "introduced": "v1.9.16", + "fixed": "v1.9.18", + "published": "2020-11-12", + "severity": "Critical", + "check": "Geth\\/v1\\.9.(16|17).*$" + } +] diff --git a/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.1 b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.1 new file mode 100644 index 0000000000..f9066d4fe0 --- /dev/null +++ b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.1 @@ -0,0 +1,4 @@ +untrusted comment: signature from minisign secret key +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: timestamp:1605618622 file:vulnerabilities.json +osAPs4QPdDkmiWQxqeMIzYv/b+ZGxJ+19Sbrk1Cpq4t2gHBT+lqFtwL3OCzKWWyjGRTmHfsVGBYpzEdPRQ0/BQ== diff --git a/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.2 b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.2 new file mode 100644 index 0000000000..a89a83d21a --- /dev/null +++ b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.2 @@ -0,0 +1,4 @@ +untrusted comment: Here's a comment +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: Here's a trusted comment +3CnkIuz9MEDa7uNyGZAbKZhuirwfiqm7E1uQHrd2SiO4Y8+Akw9vs052AyKw0s5nhbYHCZE2IMQdHNjKwxEGAQ== diff --git a/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.3 b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.3 new file mode 100644 index 0000000000..6fd33b19a3 --- /dev/null +++ b/cmd/geth/testdata/vcheck/minisig-sigs/vulnerabilities.json.minisig.3 @@ -0,0 +1,4 @@ +untrusted comment: One more (untrusted) comment +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: Here's a trusted comment +3CnkIuz9MEDa7uNyGZAbKZhuirwfiqm7E1uQHrd2SiO4Y8+Akw9vs052AyKw0s5nhbYHCZE2IMQdHNjKwxEGAQ== diff --git a/cmd/geth/testdata/vcheck/minisign.pub b/cmd/geth/testdata/vcheck/minisign.pub new file mode 100644 index 0000000000..183dce5f6b --- /dev/null +++ b/cmd/geth/testdata/vcheck/minisign.pub @@ -0,0 +1,2 @@ +untrusted comment: minisign public key 284E00B52C269624 +RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp diff --git a/cmd/geth/testdata/vcheck/minisign.sec b/cmd/geth/testdata/vcheck/minisign.sec new file mode 100644 index 0000000000..5c50715b20 --- /dev/null +++ b/cmd/geth/testdata/vcheck/minisign.sec @@ -0,0 +1,2 @@ +untrusted comment: minisign encrypted secret key +RWRTY0Iyz8kmPMKrqk6DCtlO9a33akKiaOQG1aLolqDxs52qvPoAAAACAAAAAAAAAEAAAAAArEiggdvyn6+WzTprirLtgiYQoU+ihz/HyGgjhuF+Pz2ddMduyCO+xjCHeq+vgVVW039fbsI8hW6LRGJZLBKV5/jdxCXAVVQE7qTQ6xpEdO0z8Z731/pV1hlspQXG2PNd16NMtwd9dWw= diff --git a/cmd/geth/testdata/vcheck/signify-sigs/data.json.sig b/cmd/geth/testdata/vcheck/signify-sigs/data.json.sig new file mode 100644 index 0000000000..3d5fcacf9a --- /dev/null +++ b/cmd/geth/testdata/vcheck/signify-sigs/data.json.sig @@ -0,0 +1,2 @@ +untrusted comment: verify with ./signifykey.pub +RWSKLNhZb0KdAbhRUhW2LQZXdnwttu2SYhM9EuC4mMgOJB85h7/YIPupf8/ldTs4N8e9Y/fhgdY40q5LQpt5IFC62fq0v8U1/w8= diff --git a/cmd/geth/testdata/vcheck/signifykey.pub b/cmd/geth/testdata/vcheck/signifykey.pub new file mode 100644 index 0000000000..328f973ab4 --- /dev/null +++ b/cmd/geth/testdata/vcheck/signifykey.pub @@ -0,0 +1,2 @@ +untrusted comment: signify public key +RWSKLNhZb0KdATtRT7mZC/bybI3t3+Hv/O2i3ye04Dq9fnT9slpZ1a2/ diff --git a/cmd/geth/testdata/vcheck/signifykey.sec b/cmd/geth/testdata/vcheck/signifykey.sec new file mode 100644 index 0000000000..3279a2e58b --- /dev/null +++ b/cmd/geth/testdata/vcheck/signifykey.sec @@ -0,0 +1,2 @@ +untrusted comment: signify secret key +RWRCSwAAACpLQDLawSQCtI7eAVIvaiHzjTsTyJsfV5aKLNhZb0KdAWeICXJGa93/bHAcsY6jUh9I8RdEcDWEoGxmaXZC+IdVBPxDpkix9fBRGEUdKWHi3dOfqME0YRzErWI5AVg3cRw= diff --git a/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.1 b/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.1 new file mode 100644 index 0000000000..f9066d4fe0 --- /dev/null +++ b/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.1 @@ -0,0 +1,4 @@ +untrusted comment: signature from minisign secret key +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: timestamp:1605618622 file:vulnerabilities.json +osAPs4QPdDkmiWQxqeMIzYv/b+ZGxJ+19Sbrk1Cpq4t2gHBT+lqFtwL3OCzKWWyjGRTmHfsVGBYpzEdPRQ0/BQ== diff --git a/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.2 b/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.2 new file mode 100644 index 0000000000..a89a83d21a --- /dev/null +++ b/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.2 @@ -0,0 +1,4 @@ +untrusted comment: Here's a comment +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: Here's a trusted comment +3CnkIuz9MEDa7uNyGZAbKZhuirwfiqm7E1uQHrd2SiO4Y8+Akw9vs052AyKw0s5nhbYHCZE2IMQdHNjKwxEGAQ== diff --git a/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.3 b/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.3 new file mode 100644 index 0000000000..6fd33b19a3 --- /dev/null +++ b/cmd/geth/testdata/vcheck/sigs/vulnerabilities.json.minisig.3 @@ -0,0 +1,4 @@ +untrusted comment: One more (untrusted) comment +RWQkliYstQBOKFQFQTjmCd6TPw07VZyWFSB3v4+1BM1kv8eHLE5FDy2OkPEqtdaL53xftlrHoJQie0uCcovdlSV8kpyxiLrxEQ0= +trusted comment: Here's a trusted comment +3CnkIuz9MEDa7uNyGZAbKZhuirwfiqm7E1uQHrd2SiO4Y8+Akw9vs052AyKw0s5nhbYHCZE2IMQdHNjKwxEGAQ== diff --git a/cmd/geth/testdata/vcheck/vulnerabilities.json b/cmd/geth/testdata/vcheck/vulnerabilities.json new file mode 100644 index 0000000000..36509f95a9 --- /dev/null +++ b/cmd/geth/testdata/vcheck/vulnerabilities.json @@ -0,0 +1,70 @@ +[ + { + "name": "CorruptedDAG", + "uid": "GETH-2020-01", + "summary": "Mining nodes will generate erroneous PoW on epochs > `385`.", + "description": "A mining flaw could cause miners to erroneously calculate PoW, due to an index overflow, if DAG size is exceeding the maximum 32 bit unsigned value.\n\nThis occurred on the ETC chain on 2020-11-06. This is likely to trigger for ETH mainnet around block `11550000`/epoch `385`, slated to occur early January 2021.\n\nThis issue is relevant only for miners, non-mining nodes are unaffected, since non-mining nodes use a smaller verification cache instead of a full DAG.", + "links": [ + "https://github.com/ethereum/go-ethereum/pull/21793", + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://github.com/ethereum/go-ethereum/commit/567d41d9363706b4b13ce0903804e8acf214af49", + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-v592-xf75-856p" + ], + "introduced": "v1.6.0", + "fixed": "v1.9.24", + "published": "2020-11-12", + "severity": "Medium", + "CVE": "CVE-2020-26240", + "check": "Geth\\/v1\\.(6|7|8)\\..*|Geth\\/v1\\.9\\.\\d-.*|Geth\\/v1\\.9\\.1.*|Geth\\/v1\\.9\\.2(0|1|2|3)-.*" + }, + { + "name": "Denial of service due to Go CVE-2020-28362", + "uid": "GETH-2020-02", + "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing, due to an underlying bug in Go (CVE-2020-28362) versions < `1.15.5`, or `<1.14.12`", + "description": "The DoS issue can be used to crash all Geth nodes during block processing, the effects of which would be that a major part of the Ethereum network went offline.\n\nOutside of Go-Ethereum, the issue is most likely relevant for all forks of Geth (such as TurboGeth or ETC’s core-geth) which is built with versions of Go which contains the vulnerability.", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://groups.google.com/g/golang-announce/c/NpBGTTmKzpM", + "https://github.com/golang/go/issues/42552", + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-m6gx-rhvj-fh52" + ], + "introduced": "v0.0.0", + "fixed": "v1.9.24", + "published": "2020-11-12", + "severity": "Critical", + "CVE": "CVE-2020-28362", + "check": "Geth.*\\/go1\\.(11(.*)|12(.*)|13(.*)|14|14\\.(\\d|10|11|)|15|15\\.[0-4])$" + }, + { + "name": "ShallowCopy", + "uid": "GETH-2020-03", + "summary": "A consensus flaw in Geth, related to `datacopy` precompile", + "description": "Geth erroneously performed a 'shallow' copy when the precompiled `datacopy` (at `0x00...04`) was invoked. An attacker could deploy a contract that uses the shallow copy to corrupt the contents of the `RETURNDATA`, thus causing a consensus failure.", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-69v6-xc2j-r2jf" + ], + "introduced": "v1.9.7", + "fixed": "v1.9.17", + "published": "2020-11-12", + "severity": "Critical", + "CVE": "CVE-2020-26241", + "check": "Geth\\/v1\\.9\\.(7|8|9|10|11|12|13|14|15|16).*$" + }, + { + "name": "GethCrash", + "uid": "GETH-2020-04", + "summary": "A denial-of-service issue can be used to crash Geth nodes during block processing", + "description": "Full details to be disclosed at a later date", + "links": [ + "https://blog.ethereum.org/2020/11/12/geth_security_release/", + "https://github.com/ethereum/go-ethereum/security/advisories/GHSA-jm5c-rv3w-w83m" + ], + "introduced": "v1.9.16", + "fixed": "v1.9.18", + "published": "2020-11-12", + "severity": "Critical", + "CVE": "CVE-2020-26242", + "check": "Geth\\/v1\\.9.(16|17).*$" + } +] diff --git a/cmd/geth/usage.go b/cmd/geth/usage.go index 237cb8d516..daea0afc4e 100644 --- a/cmd/geth/usage.go +++ b/cmd/geth/usage.go @@ -25,7 +25,7 @@ import ( "github.com/ethereum/go-ethereum/cmd/utils" "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/internal/flags" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) // AppHelpFlagGroups is the application flags, grouped by functionality. @@ -36,13 +36,15 @@ var AppHelpFlagGroups = []flags.FlagGroup{ configFileFlag, utils.DataDirFlag, utils.AncientFlag, + utils.MinFreeDiskSpaceFlag, utils.KeyStoreDirFlag, - utils.NoUSBFlag, + utils.USBFlag, utils.SmartCardDaemonPathFlag, utils.NetworkIdFlag, + utils.MainnetFlag, utils.GoerliFlag, utils.RinkebyFlag, - utils.YoloV2Flag, + utils.YoloV3Flag, utils.RopstenFlag, utils.SyncModeFlag, utils.ExitWhenSyncedFlag, @@ -65,6 +67,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.UltraLightFractionFlag, utils.UltraLightOnlyAnnounceFlag, utils.LightNoPruneFlag, + utils.LightNoSyncServeFlag, }, }, { @@ -114,6 +117,7 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.CacheGCFlag, utils.CacheSnapshotFlag, utils.CacheNoPrefetchFlag, + utils.CachePreimagesFlag, }, }, { @@ -134,18 +138,21 @@ var AppHelpFlagGroups = []flags.FlagGroup{ utils.HTTPListenAddrFlag, utils.HTTPPortFlag, utils.HTTPApiFlag, + utils.HTTPPathPrefixFlag, utils.HTTPCORSDomainFlag, utils.HTTPVirtualHostsFlag, utils.WSEnabledFlag, utils.WSListenAddrFlag, utils.WSPortFlag, utils.WSApiFlag, + utils.WSPathPrefixFlag, utils.WSAllowedOriginsFlag, utils.GraphQLEnabledFlag, utils.GraphQLCORSDomainFlag, utils.GraphQLVirtualHostsFlag, utils.RPCGlobalGasCapFlag, utils.RPCGlobalTxFeeCapFlag, + utils.AllowUnprotectedTxs, utils.JSpathFlag, utils.ExecFlag, utils.PreloadJSFlag, @@ -155,8 +162,6 @@ var AppHelpFlagGroups = []flags.FlagGroup{ Name: "NETWORKING", Flags: []cli.Flag{ utils.BootnodesFlag, - utils.LegacyBootnodesV4Flag, - utils.LegacyBootnodesV5Flag, utils.DNSDiscoveryFlag, utils.ListenPortFlag, utils.MaxPeersFlag, @@ -211,33 +216,23 @@ var AppHelpFlagGroups = []flags.FlagGroup{ Name: "METRICS AND STATS", Flags: metricsFlags, }, - { - Name: "WHISPER (deprecated)", - Flags: whisperFlags, - }, { Name: "ALIASED (deprecated)", - Flags: append([]cli.Flag{ + Flags: []cli.Flag{ + utils.NoUSBFlag, utils.LegacyRPCEnabledFlag, utils.LegacyRPCListenAddrFlag, utils.LegacyRPCPortFlag, utils.LegacyRPCCORSDomainFlag, utils.LegacyRPCVirtualHostsFlag, utils.LegacyRPCApiFlag, - utils.LegacyWSListenAddrFlag, - utils.LegacyWSPortFlag, - utils.LegacyWSAllowedOriginsFlag, - utils.LegacyWSApiFlag, - utils.LegacyGpoBlocksFlag, - utils.LegacyGpoPercentileFlag, - utils.LegacyGraphQLListenAddrFlag, - utils.LegacyGraphQLPortFlag, - }, debug.DeprecatedFlags...), + }, }, { Name: "MISC", Flags: []cli.Flag{ utils.SnapshotFlag, + utils.BloomFilterSizeFlag, cli.HelpFlag, }, }, diff --git a/cmd/geth/version_check.go b/cmd/geth/version_check.go new file mode 100644 index 0000000000..2101a69e98 --- /dev/null +++ b/cmd/geth/version_check.go @@ -0,0 +1,169 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strings" + + "github.com/ethereum/go-ethereum/log" + "github.com/jedisct1/go-minisign" + "gopkg.in/urfave/cli.v1" +) + +var gethPubKeys []string = []string{ + //@holiman, minisign public key FB1D084D39BAEC24 + "RWQk7Lo5TQgd+wxBNZM+Zoy+7UhhMHaWKzqoes9tvSbFLJYZhNTbrIjx", + //minisign public key 138B1CA303E51687 + "RWSHFuUDoxyLEzjszuWZI1xStS66QTyXFFZG18uDfO26CuCsbckX1e9J", + //minisign public key FD9813B2D2098484 + "RWSEhAnSshOY/b+GmaiDkObbCWefsAoavjoLcPjBo1xn71yuOH5I+Lts", +} + +type vulnJson struct { + Name string + Uid string + Summary string + Description string + Links []string + Introduced string + Fixed string + Published string + Severity string + Check string + CVE string +} + +func versionCheck(ctx *cli.Context) error { + url := ctx.String(VersionCheckUrlFlag.Name) + version := ctx.String(VersionCheckVersionFlag.Name) + log.Info("Checking vulnerabilities", "version", version, "url", url) + return checkCurrent(url, version) +} + +func checkCurrent(url, current string) error { + var ( + data []byte + sig []byte + err error + ) + if data, err = fetch(url); err != nil { + return fmt.Errorf("could not retrieve data: %w", err) + } + if sig, err = fetch(fmt.Sprintf("%v.minisig", url)); err != nil { + return fmt.Errorf("could not retrieve signature: %w", err) + } + if err = verifySignature(gethPubKeys, data, sig); err != nil { + return err + } + var vulns []vulnJson + if err = json.Unmarshal(data, &vulns); err != nil { + return err + } + allOk := true + for _, vuln := range vulns { + r, err := regexp.Compile(vuln.Check) + if err != nil { + return err + } + if r.MatchString(current) { + allOk = false + fmt.Printf("## Vulnerable to %v (%v)\n\n", vuln.Uid, vuln.Name) + fmt.Printf("Severity: %v\n", vuln.Severity) + fmt.Printf("Summary : %v\n", vuln.Summary) + fmt.Printf("Fixed in: %v\n", vuln.Fixed) + if len(vuln.CVE) > 0 { + fmt.Printf("CVE: %v\n", vuln.CVE) + } + if len(vuln.Links) > 0 { + fmt.Printf("References:\n") + for _, ref := range vuln.Links { + fmt.Printf("\t- %v\n", ref) + } + } + fmt.Println() + } + } + if allOk { + fmt.Println("No vulnerabilities found") + } + return nil +} + +// fetch makes an HTTP request to the given url and returns the response body +func fetch(url string) ([]byte, error) { + if filep := strings.TrimPrefix(url, "file://"); filep != url { + return ioutil.ReadFile(filep) + } + res, err := http.Get(url) + if err != nil { + return nil, err + } + defer res.Body.Close() + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + return body, nil +} + +// verifySignature checks that the sigData is a valid signature of the given +// data, for pubkey GethPubkey +func verifySignature(pubkeys []string, data, sigdata []byte) error { + sig, err := minisign.DecodeSignature(string(sigdata)) + if err != nil { + return err + } + // find the used key + var key *minisign.PublicKey + for _, pubkey := range pubkeys { + pub, err := minisign.NewPublicKey(pubkey) + if err != nil { + // our pubkeys should be parseable + return err + } + if pub.KeyId != sig.KeyId { + continue + } + key = &pub + break + } + if key == nil { + log.Info("Signing key not trusted", "keyid", keyID(sig.KeyId), "error", err) + return errors.New("signature could not be verified") + } + if ok, err := key.Verify(data, sig); !ok || err != nil { + log.Info("Verification failed error", "keyid", keyID(key.KeyId), "error", err) + return errors.New("signature could not be verified") + } + return nil +} + +// keyID turns a binary minisign key ID into a hex string. +// Note: key IDs are printed in reverse byte order. +func keyID(id [8]byte) string { + var rev [8]byte + for i := range id { + rev[len(rev)-1-i] = id[i] + } + return fmt.Sprintf("%X", rev) +} diff --git a/cmd/geth/version_check_test.go b/cmd/geth/version_check_test.go new file mode 100644 index 0000000000..0f056d1967 --- /dev/null +++ b/cmd/geth/version_check_test.go @@ -0,0 +1,130 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of go-ethereum. +// +// go-ethereum is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// go-ethereum is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with go-ethereum. If not, see . + +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "regexp" + "strconv" + "strings" + "testing" +) + +func TestVerification(t *testing.T) { + // Signatures generated with `minisign` + t.Run("minisig", func(t *testing.T) { + // For this test, the pubkey is in testdata/minisign.pub + // (the privkey is `minisign.sec`, if we want to expand this test. Password 'test' ) + pub := "RWQkliYstQBOKOdtClfgC3IypIPX6TAmoEi7beZ4gyR3wsaezvqOMWsp" + testVerification(t, pub, "./testdata/vcheck/minisig-sigs/") + }) + // Signatures generated with `signify-openbsd` + t.Run("signify-openbsd", func(t *testing.T) { + t.Skip("This currently fails, minisign expects 4 lines of data, signify provides only 2") + // For this test, the pubkey is in testdata/signifykey.pub + // (the privkey is `signifykey.sec`, if we want to expand this test. Password 'test' ) + pub := "RWSKLNhZb0KdATtRT7mZC/bybI3t3+Hv/O2i3ye04Dq9fnT9slpZ1a2/" + testVerification(t, pub, "./testdata/vcheck/signify-sigs/") + }) +} + +func testVerification(t *testing.T, pubkey, sigdir string) { + // Data to verify + data, err := ioutil.ReadFile("./testdata/vcheck/data.json") + if err != nil { + t.Fatal(err) + } + // Signatures, with and without comments, both trusted and untrusted + files, err := ioutil.ReadDir(sigdir) + if err != nil { + t.Fatal(err) + } + for _, f := range files { + sig, err := ioutil.ReadFile(filepath.Join(sigdir, f.Name())) + if err != nil { + t.Fatal(err) + } + err = verifySignature([]string{pubkey}, data, sig) + if err != nil { + t.Fatal(err) + } + } +} + +func versionUint(v string) int { + mustInt := func(s string) int { + a, err := strconv.Atoi(s) + if err != nil { + panic(v) + } + return a + } + components := strings.Split(strings.TrimPrefix(v, "v"), ".") + a := mustInt(components[0]) + b := mustInt(components[1]) + c := mustInt(components[2]) + return a*100*100 + b*100 + c +} + +// TestMatching can be used to check that the regexps are correct +func TestMatching(t *testing.T) { + data, _ := ioutil.ReadFile("./testdata/vcheck/vulnerabilities.json") + var vulns []vulnJson + if err := json.Unmarshal(data, &vulns); err != nil { + t.Fatal(err) + } + check := func(version string) { + vFull := fmt.Sprintf("Geth/%v-unstable-15339cf1-20201204/linux-amd64/go1.15.4", version) + for _, vuln := range vulns { + r, err := regexp.Compile(vuln.Check) + vulnIntro := versionUint(vuln.Introduced) + vulnFixed := versionUint(vuln.Fixed) + current := versionUint(version) + if err != nil { + t.Fatal(err) + } + if vuln.Name == "Denial of service due to Go CVE-2020-28362" { + // this one is not tied to geth-versions + continue + } + if vulnIntro <= current && vulnFixed > current { + // Should be vulnerable + if !r.MatchString(vFull) { + t.Errorf("Should be vulnerable, version %v, intro: %v, fixed: %v %v %v", + version, vuln.Introduced, vuln.Fixed, vuln.Name, vuln.Check) + } + } else { + if r.MatchString(vFull) { + t.Errorf("Should not be flagged vulnerable, version %v, intro: %v, fixed: %v %v %d %d %d", + version, vuln.Introduced, vuln.Fixed, vuln.Name, vulnIntro, current, vulnFixed) + } + } + + } + } + for major := 1; major < 2; major++ { + for minor := 0; minor < 30; minor++ { + for patch := 0; patch < 30; patch++ { + vShort := fmt.Sprintf("v%d.%d.%d", major, minor, patch) + check(vShort) + } + } + } +} diff --git a/cmd/puppeth/genesis.go b/cmd/puppeth/genesis.go index b3e1709dbf..ef1f977bf0 100644 --- a/cmd/puppeth/genesis.go +++ b/cmd/puppeth/genesis.go @@ -152,7 +152,7 @@ func newAlethGenesisSpec(network string, genesis *core.Genesis) (*alethGenesisSp spec.Genesis.Author = genesis.Coinbase spec.Genesis.Timestamp = (hexutil.Uint64)(genesis.Timestamp) spec.Genesis.ParentHash = genesis.ParentHash - spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData) + spec.Genesis.ExtraData = genesis.ExtraData spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit) for address, account := range genesis.Alloc { @@ -425,12 +425,12 @@ func newParityChainSpec(network string, genesis *core.Genesis, bootnodes []strin spec.Params.EIP98Transition = math.MaxInt64 spec.Genesis.Seal.Ethereum.Nonce = types.EncodeNonce(genesis.Nonce) - spec.Genesis.Seal.Ethereum.MixHash = (genesis.Mixhash[:]) + spec.Genesis.Seal.Ethereum.MixHash = genesis.Mixhash[:] spec.Genesis.Difficulty = (*hexutil.Big)(genesis.Difficulty) spec.Genesis.Author = genesis.Coinbase spec.Genesis.Timestamp = (hexutil.Uint64)(genesis.Timestamp) spec.Genesis.ParentHash = genesis.ParentHash - spec.Genesis.ExtraData = (hexutil.Bytes)(genesis.ExtraData) + spec.Genesis.ExtraData = genesis.ExtraData spec.Genesis.GasLimit = (hexutil.Uint64)(genesis.GasLimit) spec.Accounts = make(map[common.UnprefixedAddress]*parityChainSpecAccount) diff --git a/cmd/puppeth/module_faucet.go b/cmd/puppeth/module_faucet.go index 987bed14aa..88cb80ae4c 100644 --- a/cmd/puppeth/module_faucet.go +++ b/cmd/puppeth/module_faucet.go @@ -46,6 +46,7 @@ ENTRYPOINT [ \ "--faucet.name", "{{.FaucetName}}", "--faucet.amount", "{{.FaucetAmount}}", "--faucet.minutes", "{{.FaucetMinutes}}", "--faucet.tiers", "{{.FaucetTiers}}", \ "--account.json", "/account.json", "--account.pass", "/account.pass" \ {{if .CaptchaToken}}, "--captcha.token", "{{.CaptchaToken}}", "--captcha.secret", "{{.CaptchaSecret}}"{{end}}{{if .NoAuth}}, "--noauth"{{end}} \ + {{if .TwitterToken}}, "--twitter.token.v1", "{{.TwitterToken}}"{{end}} \ ]` // faucetComposefile is the docker-compose.yml file required to deploy and maintain @@ -71,6 +72,7 @@ services: - FAUCET_TIERS={{.FaucetTiers}} - CAPTCHA_TOKEN={{.CaptchaToken}} - CAPTCHA_SECRET={{.CaptchaSecret}} + - TWITTER_TOKEN={{.TwitterToken}} - NO_AUTH={{.NoAuth}}{{if .VHost}} - VIRTUAL_HOST={{.VHost}} - VIRTUAL_PORT=8080{{end}} @@ -103,6 +105,7 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config "FaucetMinutes": config.minutes, "FaucetTiers": config.tiers, "NoAuth": config.noauth, + "TwitterToken": config.twitterToken, }) files[filepath.Join(workdir, "Dockerfile")] = dockerfile.Bytes() @@ -120,6 +123,7 @@ func deployFaucet(client *sshClient, network string, bootnodes []string, config "FaucetMinutes": config.minutes, "FaucetTiers": config.tiers, "NoAuth": config.noauth, + "TwitterToken": config.twitterToken, }) files[filepath.Join(workdir, "docker-compose.yaml")] = composefile.Bytes() @@ -152,6 +156,7 @@ type faucetInfos struct { noauth bool captchaToken string captchaSecret string + twitterToken string } // Report converts the typed struct into a plain string->string map, containing @@ -165,6 +170,7 @@ func (info *faucetInfos) Report() map[string]string { "Funding cooldown (base tier)": fmt.Sprintf("%d mins", info.minutes), "Funding tiers": strconv.Itoa(info.tiers), "Captha protection": fmt.Sprintf("%v", info.captchaToken != ""), + "Using Twitter API": fmt.Sprintf("%v", info.twitterToken != ""), "Ethstats username": info.node.ethstats, } if info.noauth { @@ -243,5 +249,6 @@ func checkFaucet(client *sshClient, network string) (*faucetInfos, error) { captchaToken: infos.envvars["CAPTCHA_TOKEN"], captchaSecret: infos.envvars["CAPTCHA_SECRET"], noauth: infos.envvars["NO_AUTH"] == "true", + twitterToken: infos.envvars["TWITTER_TOKEN"], }, nil } diff --git a/cmd/puppeth/module_node.go b/cmd/puppeth/module_node.go index 5d9ef46523..3ea96870d4 100644 --- a/cmd/puppeth/module_node.go +++ b/cmd/puppeth/module_node.go @@ -94,7 +94,7 @@ func deployNode(client *sshClient, network string, bootnodes []string, config *n lightFlag := "" if config.peersLight > 0 { - lightFlag = fmt.Sprintf("--lightpeers=%d --lightserv=50", config.peersLight) + lightFlag = fmt.Sprintf("--light.maxpeers=%d --light.serve=50", config.peersLight) } dockerfile := new(bytes.Buffer) template.Must(template.New("").Parse(nodeDockerfile)).Execute(dockerfile, map[string]interface{}{ diff --git a/cmd/puppeth/wizard_faucet.go b/cmd/puppeth/wizard_faucet.go index 9f753ad68b..65d4e8b8ed 100644 --- a/cmd/puppeth/wizard_faucet.go +++ b/cmd/puppeth/wizard_faucet.go @@ -102,6 +102,21 @@ func (w *wizard) deployFaucet() { infos.captchaSecret = w.readPassword() } } + // Accessing the Twitter API requires a bearer token, request it + if infos.twitterToken != "" { + fmt.Println() + fmt.Println("Reuse previous Twitter API token (y/n)? (default = yes)") + if !w.readDefaultYesNo(true) { + infos.twitterToken = "" + } + } + if infos.twitterToken == "" { + // No previous twitter token (or old one discarded) + fmt.Println() + fmt.Println() + fmt.Printf("What is the Twitter API app Bearer token?\n") + infos.twitterToken = w.readString() + } // Figure out where the user wants to store the persistent data fmt.Println() if infos.node.datadir == "" { diff --git a/cmd/puppeth/wizard_genesis.go b/cmd/puppeth/wizard_genesis.go index 2d014e83bc..4f701fa1c3 100644 --- a/cmd/puppeth/wizard_genesis.go +++ b/cmd/puppeth/wizard_genesis.go @@ -236,8 +236,12 @@ func (w *wizard) manageGenesis() { w.conf.Genesis.Config.IstanbulBlock = w.readDefaultBigInt(w.conf.Genesis.Config.IstanbulBlock) fmt.Println() - fmt.Printf("Which block should YOLOv2 come into effect? (default = %v)\n", w.conf.Genesis.Config.YoloV2Block) - w.conf.Genesis.Config.YoloV2Block = w.readDefaultBigInt(w.conf.Genesis.Config.YoloV2Block) + fmt.Printf("Which block should Berlin come into effect? (default = %v)\n", w.conf.Genesis.Config.BerlinBlock) + w.conf.Genesis.Config.BerlinBlock = w.readDefaultBigInt(w.conf.Genesis.Config.BerlinBlock) + + fmt.Println() + fmt.Printf("Which block should YOLOv3 come into effect? (default = %v)\n", w.conf.Genesis.Config.YoloV3Block) + w.conf.Genesis.Config.YoloV3Block = w.readDefaultBigInt(w.conf.Genesis.Config.YoloV3Block) out, _ := json.MarshalIndent(w.conf.Genesis.Config, "", " ") fmt.Printf("Chain configuration updated:\n\n%s\n", out) @@ -259,7 +263,7 @@ func (w *wizard) manageGenesis() { // Export the native genesis spec used by puppeth and Geth gethJson := filepath.Join(folder, fmt.Sprintf("%s.json", w.network)) - if err := ioutil.WriteFile((gethJson), out, 0644); err != nil { + if err := ioutil.WriteFile(gethJson, out, 0644); err != nil { log.Error("Failed to save genesis file", "err", err) return } diff --git a/cmd/utils/cmd.go b/cmd/utils/cmd.go index 869cf90ea5..d4051e59ef 100644 --- a/cmd/utils/cmd.go +++ b/cmd/utils/cmd.go @@ -26,17 +26,20 @@ import ( "runtime" "strings" "syscall" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/internal/debug" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/rlp" + "gopkg.in/urfave/cli.v1" ) const ( @@ -63,7 +66,7 @@ func Fatalf(format string, args ...interface{}) { os.Exit(1) } -func StartNode(stack *node.Node) { +func StartNode(ctx *cli.Context, stack *node.Node) { if err := stack.Start(); err != nil { Fatalf("Error starting protocol stack: %v", err) } @@ -71,6 +74,17 @@ func StartNode(stack *node.Node) { sigc := make(chan os.Signal, 1) signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM) defer signal.Stop(sigc) + + minFreeDiskSpace := ethconfig.Defaults.TrieDirtyCache + if ctx.GlobalIsSet(MinFreeDiskSpaceFlag.Name) { + minFreeDiskSpace = ctx.GlobalInt(MinFreeDiskSpaceFlag.Name) + } else if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) { + minFreeDiskSpace = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100 + } + if minFreeDiskSpace > 0 { + go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024) + } + <-sigc log.Info("Got interrupt, shutting down...") go stack.Close() @@ -85,6 +99,24 @@ func StartNode(stack *node.Node) { }() } +func monitorFreeDiskSpace(sigc chan os.Signal, path string, freeDiskSpaceCritical uint64) { + for { + freeSpace, err := getFreeDiskSpace(path) + if err != nil { + log.Warn("Failed to get free disk space", "path", path, "err", err) + break + } + if freeSpace < freeDiskSpaceCritical { + log.Error("Low disk space. Gracefully shutting down Geth to prevent database corruption.", "available", common.StorageSize(freeSpace)) + sigc <- syscall.SIGTERM + break + } else if freeSpace < 2*freeDiskSpaceCritical { + log.Warn("Disk space is running low. Geth will shutdown if disk space runs below critical level.", "available", common.StorageSize(freeSpace), "critical_level", common.StorageSize(freeDiskSpaceCritical)) + } + time.Sleep(60 * time.Second) + } +} + func ImportChain(chain *core.BlockChain, fn string) error { // Watch for Ctrl-C while the import is running. // If a signal is received, the import will stop at the next batch. diff --git a/cmd/utils/diskusage.go b/cmd/utils/diskusage.go new file mode 100644 index 0000000000..da696de6bf --- /dev/null +++ b/cmd/utils/diskusage.go @@ -0,0 +1,42 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// +build !windows + +package utils + +import ( + "fmt" + + "golang.org/x/sys/unix" +) + +func getFreeDiskSpace(path string) (uint64, error) { + var stat unix.Statfs_t + if err := unix.Statfs(path, &stat); err != nil { + return 0, fmt.Errorf("failed to call Statfs: %v", err) + } + + // Available blocks * size per block = available space in bytes + var bavail = stat.Bavail + if stat.Bavail < 0 { + // FreeBSD can have a negative number of blocks available + // because of the grace limit. + bavail = 0 + } + //nolint:unconvert + return uint64(bavail) * uint64(stat.Bsize), nil +} diff --git a/cmd/utils/diskusage_windows.go b/cmd/utils/diskusage_windows.go new file mode 100644 index 0000000000..9bf7740b99 --- /dev/null +++ b/cmd/utils/diskusage_windows.go @@ -0,0 +1,38 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package utils + +import ( + "fmt" + + "golang.org/x/sys/windows" +) + +func getFreeDiskSpace(path string) (uint64, error) { + + cwd, err := windows.UTF16PtrFromString(path) + if err != nil { + return 0, fmt.Errorf("failed to call UTF16PtrFromString: %v", err) + } + + var freeBytesAvailableToCaller, totalNumberOfBytes, totalNumberOfFreeBytes uint64 + if err := windows.GetDiskFreeSpaceEx(cwd, &freeBytesAvailableToCaller, &totalNumberOfBytes, &totalNumberOfFreeBytes); err != nil { + return 0, fmt.Errorf("failed to call GetDiskFreeSpaceEx: %v", err) + } + + return freeBytesAvailableToCaller, nil +} diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index e5ccfd7435..fc479f3987 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -44,7 +44,9 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/gasprice" + "github.com/ethereum/go-ethereum/eth/tracers" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethstats" "github.com/ethereum/go-ethereum/graphql" @@ -58,13 +60,12 @@ import ( "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/nat" "github.com/ethereum/go-ethereum/p2p/netutil" "github.com/ethereum/go-ethereum/params" pcsclite "github.com/gballet/go-libpcsclite" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) func init() { @@ -113,13 +114,21 @@ var ( Name: "datadir.ancient", Usage: "Data directory for ancient chain segments (default = inside chaindata)", } + MinFreeDiskSpaceFlag = DirectoryFlag{ + Name: "datadir.minfreedisk", + Usage: "Minimum free disk space in MB, once reached triggers auto shut down (default = --cache.gc converted to MB, 0 = disabled)", + } KeyStoreDirFlag = DirectoryFlag{ Name: "keystore", Usage: "Directory for the keystore (default = inside the datadir)", } NoUSBFlag = cli.BoolFlag{ Name: "nousb", - Usage: "Disables monitoring for and managing USB hardware wallets", + Usage: "Disables monitoring for and managing USB hardware wallets (deprecated)", + } + USBFlag = cli.BoolFlag{ + Name: "usb", + Usage: "Enable monitoring and management of USB hardware wallets", } SmartCardDaemonPathFlag = cli.StringFlag{ Name: "pcscdpath", @@ -129,15 +138,19 @@ var ( NetworkIdFlag = cli.Uint64Flag{ Name: "networkid", Usage: "Explicitly set network id (integer)(For testnets: use --ropsten, --rinkeby, --goerli instead)", - Value: eth.DefaultConfig.NetworkId, + Value: ethconfig.Defaults.NetworkId, + } + MainnetFlag = cli.BoolFlag{ + Name: "mainnet", + Usage: "Ethereum mainnet", } GoerliFlag = cli.BoolFlag{ Name: "goerli", Usage: "Görli network: pre-configured proof-of-authority test network", } - YoloV2Flag = cli.BoolFlag{ - Name: "yolov2", - Usage: "YOLOv2 network: pre-configured proof-of-authority shortlived test network.", + YoloV3Flag = cli.BoolFlag{ + Name: "yolov3", + Usage: "YOLOv3 network: pre-configured proof-of-authority shortlived test network.", } RinkebyFlag = cli.BoolFlag{ Name: "rinkeby", @@ -184,10 +197,10 @@ var ( Name: "nocode", Usage: "Exclude contract code (save db lookups)", } - defaultSyncMode = eth.DefaultConfig.SyncMode + defaultSyncMode = ethconfig.Defaults.SyncMode SyncModeFlag = TextMarshalerFlag{ Name: "syncmode", - Usage: `Blockchain sync mode ("fast", "full", or "light")`, + Usage: `Blockchain sync mode ("fast", "full", "snap" or "light")`, Value: &defaultSyncMode, } GCModeFlag = cli.StringFlag{ @@ -195,14 +208,14 @@ var ( Usage: `Blockchain garbage collection mode ("full", "archive")`, Value: "full", } - SnapshotFlag = cli.BoolFlag{ + SnapshotFlag = cli.BoolTFlag{ Name: "snapshot", - Usage: `Enables snapshot-database mode -- experimental work in progress feature`, + Usage: `Enables snapshot-database mode (default = enable)`, } - TxLookupLimitFlag = cli.Int64Flag{ + TxLookupLimitFlag = cli.Uint64Flag{ Name: "txlookuplimit", - Usage: "Number of recent blocks to maintain transactions index by-hash for (default = index all blocks)", - Value: 0, + Usage: "Number of recent blocks to maintain transactions index for (default = about one year, 0 = entire chain)", + Value: ethconfig.Defaults.TxLookupLimit, } LightKDFFlag = cli.BoolFlag{ Name: "lightkdf", @@ -212,36 +225,45 @@ var ( Name: "whitelist", Usage: "Comma separated block number-to-hash mappings to enforce (=)", } + BloomFilterSizeFlag = cli.Uint64Flag{ + Name: "bloomfilter.size", + Usage: "Megabytes of memory allocated to bloom-filter for pruning", + Value: 2048, + } + OverrideBerlinFlag = cli.Uint64Flag{ + Name: "override.berlin", + Usage: "Manually specify Berlin fork-block, overriding the bundled setting", + } // Light server and client settings LightServeFlag = cli.IntFlag{ Name: "light.serve", Usage: "Maximum percentage of time allowed for serving LES requests (multi-threaded processing allows values over 100)", - Value: eth.DefaultConfig.LightServ, + Value: ethconfig.Defaults.LightServ, } LightIngressFlag = cli.IntFlag{ Name: "light.ingress", Usage: "Incoming bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)", - Value: eth.DefaultConfig.LightIngress, + Value: ethconfig.Defaults.LightIngress, } LightEgressFlag = cli.IntFlag{ Name: "light.egress", Usage: "Outgoing bandwidth limit for serving light clients (kilobytes/sec, 0 = unlimited)", - Value: eth.DefaultConfig.LightEgress, + Value: ethconfig.Defaults.LightEgress, } LightMaxPeersFlag = cli.IntFlag{ Name: "light.maxpeers", Usage: "Maximum number of light clients to serve, or light servers to attach to", - Value: eth.DefaultConfig.LightPeers, + Value: ethconfig.Defaults.LightPeers, } UltraLightServersFlag = cli.StringFlag{ Name: "ulc.servers", Usage: "List of trusted ultra-light servers", - Value: strings.Join(eth.DefaultConfig.UltraLightServers, ","), + Value: strings.Join(ethconfig.Defaults.UltraLightServers, ","), } UltraLightFractionFlag = cli.IntFlag{ Name: "ulc.fraction", Usage: "Minimum % of trusted ultra-light servers required to announce a new head", - Value: eth.DefaultConfig.UltraLightFraction, + Value: ethconfig.Defaults.UltraLightFraction, } UltraLightOnlyAnnounceFlag = cli.BoolFlag{ Name: "ulc.onlyannounce", @@ -251,6 +273,10 @@ var ( Name: "light.nopruning", Usage: "Disable ancient light chain data pruning", } + LightNoSyncServeFlag = cli.BoolFlag{ + Name: "light.nosyncserve", + Usage: "Enables serving light clients before syncing", + } // Ethash settings EthashCacheDirFlag = DirectoryFlag{ Name: "ethash.cachedir", @@ -259,12 +285,12 @@ var ( EthashCachesInMemoryFlag = cli.IntFlag{ Name: "ethash.cachesinmem", Usage: "Number of recent ethash caches to keep in memory (16MB each)", - Value: eth.DefaultConfig.Ethash.CachesInMem, + Value: ethconfig.Defaults.Ethash.CachesInMem, } EthashCachesOnDiskFlag = cli.IntFlag{ Name: "ethash.cachesondisk", Usage: "Number of recent ethash caches to keep on disk (16MB each)", - Value: eth.DefaultConfig.Ethash.CachesOnDisk, + Value: ethconfig.Defaults.Ethash.CachesOnDisk, } EthashCachesLockMmapFlag = cli.BoolFlag{ Name: "ethash.cacheslockmmap", @@ -273,17 +299,17 @@ var ( EthashDatasetDirFlag = DirectoryFlag{ Name: "ethash.dagdir", Usage: "Directory to store the ethash mining DAGs", - Value: DirectoryString(eth.DefaultConfig.Ethash.DatasetDir), + Value: DirectoryString(ethconfig.Defaults.Ethash.DatasetDir), } EthashDatasetsInMemoryFlag = cli.IntFlag{ Name: "ethash.dagsinmem", Usage: "Number of recent ethash mining DAGs to keep in memory (1+GB each)", - Value: eth.DefaultConfig.Ethash.DatasetsInMem, + Value: ethconfig.Defaults.Ethash.DatasetsInMem, } EthashDatasetsOnDiskFlag = cli.IntFlag{ Name: "ethash.dagsondisk", Usage: "Number of recent ethash mining DAGs to keep on disk (1+GB each)", - Value: eth.DefaultConfig.Ethash.DatasetsOnDisk, + Value: ethconfig.Defaults.Ethash.DatasetsOnDisk, } EthashDatasetsLockMmapFlag = cli.BoolFlag{ Name: "ethash.dagslockmmap", @@ -311,37 +337,37 @@ var ( TxPoolPriceLimitFlag = cli.Uint64Flag{ Name: "txpool.pricelimit", Usage: "Minimum gas price limit to enforce for acceptance into the pool", - Value: eth.DefaultConfig.TxPool.PriceLimit, + Value: ethconfig.Defaults.TxPool.PriceLimit, } TxPoolPriceBumpFlag = cli.Uint64Flag{ Name: "txpool.pricebump", Usage: "Price bump percentage to replace an already existing transaction", - Value: eth.DefaultConfig.TxPool.PriceBump, + Value: ethconfig.Defaults.TxPool.PriceBump, } TxPoolAccountSlotsFlag = cli.Uint64Flag{ Name: "txpool.accountslots", Usage: "Minimum number of executable transaction slots guaranteed per account", - Value: eth.DefaultConfig.TxPool.AccountSlots, + Value: ethconfig.Defaults.TxPool.AccountSlots, } TxPoolGlobalSlotsFlag = cli.Uint64Flag{ Name: "txpool.globalslots", Usage: "Maximum number of executable transaction slots for all accounts", - Value: eth.DefaultConfig.TxPool.GlobalSlots, + Value: ethconfig.Defaults.TxPool.GlobalSlots, } TxPoolAccountQueueFlag = cli.Uint64Flag{ Name: "txpool.accountqueue", Usage: "Maximum number of non-executable transaction slots permitted per account", - Value: eth.DefaultConfig.TxPool.AccountQueue, + Value: ethconfig.Defaults.TxPool.AccountQueue, } TxPoolGlobalQueueFlag = cli.Uint64Flag{ Name: "txpool.globalqueue", Usage: "Maximum number of non-executable transaction slots for all accounts", - Value: eth.DefaultConfig.TxPool.GlobalQueue, + Value: ethconfig.Defaults.TxPool.GlobalQueue, } TxPoolLifetimeFlag = cli.DurationFlag{ Name: "txpool.lifetime", Usage: "Maximum amount of time non-executable transaction are queued", - Value: eth.DefaultConfig.TxPool.Lifetime, + Value: ethconfig.Defaults.TxPool.Lifetime, } // Performance tuning settings CacheFlag = cli.IntFlag{ @@ -362,12 +388,12 @@ var ( CacheTrieJournalFlag = cli.StringFlag{ Name: "cache.trie.journal", Usage: "Disk journal directory for trie cache to survive node restarts", - Value: eth.DefaultConfig.TrieCleanCacheJournal, + Value: ethconfig.Defaults.TrieCleanCacheJournal, } CacheTrieRejournalFlag = cli.DurationFlag{ Name: "cache.trie.rejournal", Usage: "Time interval to regenerate the trie cache journal", - Value: eth.DefaultConfig.TrieCleanCacheRejournal, + Value: ethconfig.Defaults.TrieCleanCacheRejournal, } CacheGCFlag = cli.IntFlag{ Name: "cache.gc", @@ -383,6 +409,10 @@ var ( Name: "cache.noprefetch", Usage: "Disable heuristic state prefetch during block import (less CPU and disk IO, more time waiting for data)", } + CachePreimagesFlag = cli.BoolFlag{ + Name: "cache.preimages", + Usage: "Enable recording the SHA3/keccak preimages of trie keys", + } // Miner settings MiningEnabledFlag = cli.BoolFlag{ Name: "mine", @@ -400,17 +430,17 @@ var ( MinerGasTargetFlag = cli.Uint64Flag{ Name: "miner.gastarget", Usage: "Target gas floor for mined blocks", - Value: eth.DefaultConfig.Miner.GasFloor, + Value: ethconfig.Defaults.Miner.GasFloor, } MinerGasLimitFlag = cli.Uint64Flag{ Name: "miner.gaslimit", Usage: "Target gas ceiling for mined blocks", - Value: eth.DefaultConfig.Miner.GasCeil, + Value: ethconfig.Defaults.Miner.GasCeil, } MinerGasPriceFlag = BigFlag{ Name: "miner.gasprice", Usage: "Minimum gas price for mining a transaction", - Value: eth.DefaultConfig.Miner.GasPrice, + Value: ethconfig.Defaults.Miner.GasPrice, } MinerEtherbaseFlag = cli.StringFlag{ Name: "miner.etherbase", @@ -424,7 +454,7 @@ var ( MinerRecommitIntervalFlag = cli.DurationFlag{ Name: "miner.recommit", Usage: "Time interval to recreate the block being mined", - Value: eth.DefaultConfig.Miner.Recommit, + Value: ethconfig.Defaults.Miner.Recommit, } MinerNoVerfiyFlag = cli.BoolFlag{ Name: "miner.noverify", @@ -457,12 +487,12 @@ var ( RPCGlobalGasCapFlag = cli.Uint64Flag{ Name: "rpc.gascap", Usage: "Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite)", - Value: eth.DefaultConfig.RPCGasCap, + Value: ethconfig.Defaults.RPCGasCap, } RPCGlobalTxFeeCapFlag = cli.Float64Flag{ Name: "rpc.txfeecap", Usage: "Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 = no cap)", - Value: eth.DefaultConfig.RPCTxFeeCap, + Value: ethconfig.Defaults.RPCTxFeeCap, } // Logging and debug settings EthStatsURLFlag = cli.StringFlag{ @@ -515,6 +545,11 @@ var ( Usage: "API's offered over the HTTP-RPC interface", Value: "", } + HTTPPathPrefixFlag = cli.StringFlag{ + Name: "http.rpcprefix", + Usage: "HTTP path path prefix on which JSON-RPC is served. Use '/' to serve on all paths.", + Value: "", + } GraphQLEnabledFlag = cli.BoolFlag{ Name: "graphql", Usage: "Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if an HTTP server is started as well.", @@ -553,6 +588,11 @@ var ( Usage: "Origins from which to accept websockets requests", Value: "", } + WSPathPrefixFlag = cli.StringFlag{ + Name: "ws.rpcprefix", + Usage: "HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths.", + Value: "", + } ExecFlag = cli.StringFlag{ Name: "exec", Usage: "Execute JavaScript statement", @@ -561,6 +601,10 @@ var ( Name: "preload", Usage: "Comma separated list of JavaScript files to preload into the console", } + AllowUnprotectedTxs = cli.BoolFlag{ + Name: "rpc.allow-unprotected-txs", + Usage: "Allow for unprotected (non EIP155 signed) transactions to be submitted via RPC", + } // Network Settings MaxPeersFlag = cli.IntFlag{ @@ -624,35 +668,17 @@ var ( GpoBlocksFlag = cli.IntFlag{ Name: "gpo.blocks", Usage: "Number of recent blocks to check for gas prices", - Value: eth.DefaultConfig.GPO.Blocks, + Value: ethconfig.Defaults.GPO.Blocks, } GpoPercentileFlag = cli.IntFlag{ Name: "gpo.percentile", Usage: "Suggested gas price is the given percentile of a set of recent transaction gas prices", - Value: eth.DefaultConfig.GPO.Percentile, + Value: ethconfig.Defaults.GPO.Percentile, } GpoMaxGasPriceFlag = cli.Int64Flag{ Name: "gpo.maxprice", Usage: "Maximum gas price will be recommended by gpo", - Value: eth.DefaultConfig.GPO.MaxPrice.Int64(), - } - WhisperEnabledFlag = cli.BoolFlag{ - Name: "shh", - Usage: "Enable Whisper", - } - WhisperMaxMessageSizeFlag = cli.IntFlag{ - Name: "shh.maxmessagesize", - Usage: "Max message size accepted", - Value: 1024 * 1024, - } - WhisperMinPOWFlag = cli.Float64Flag{ - Name: "shh.pow", - Usage: "Minimum POW accepted", - Value: 0.2, - } - WhisperRestrictConnectionBetweenLightClientsFlag = cli.BoolFlag{ - Name: "shh.restrict-light", - Usage: "Restrict connection between two whisper light clients", + Value: ethconfig.Defaults.GPO.MaxPrice.Int64(), } // Metrics flags @@ -672,12 +698,12 @@ var ( MetricsHTTPFlag = cli.StringFlag{ Name: "metrics.addr", Usage: "Enable stand-alone metrics HTTP server listening interface", - Value: "127.0.0.1", + Value: metrics.DefaultConfig.HTTP, } MetricsPortFlag = cli.IntFlag{ Name: "metrics.port", Usage: "Metrics HTTP server listening port", - Value: 6060, + Value: metrics.DefaultConfig.Port, } MetricsEnableInfluxDBFlag = cli.BoolFlag{ Name: "metrics.influxdb", @@ -686,22 +712,22 @@ var ( MetricsInfluxDBEndpointFlag = cli.StringFlag{ Name: "metrics.influxdb.endpoint", Usage: "InfluxDB API endpoint to report metrics to", - Value: "http://localhost:8086", + Value: metrics.DefaultConfig.InfluxDBEndpoint, } MetricsInfluxDBDatabaseFlag = cli.StringFlag{ Name: "metrics.influxdb.database", Usage: "InfluxDB database name to push reported metrics to", - Value: "geth", + Value: metrics.DefaultConfig.InfluxDBDatabase, } MetricsInfluxDBUsernameFlag = cli.StringFlag{ Name: "metrics.influxdb.username", Usage: "Username to authorize access to the database", - Value: "test", + Value: metrics.DefaultConfig.InfluxDBUsername, } MetricsInfluxDBPasswordFlag = cli.StringFlag{ Name: "metrics.influxdb.password", Usage: "Password to authorize access to the database", - Value: "test", + Value: metrics.DefaultConfig.InfluxDBPassword, } // Tags are part of every measurement sent to InfluxDB. Queries on tags are faster in InfluxDB. // For example `host` tag could be used so that we can group all nodes and average a measurement @@ -710,7 +736,7 @@ var ( MetricsInfluxDBTagsFlag = cli.StringFlag{ Name: "metrics.influxdb.tags", Usage: "Comma-separated InfluxDB tags (key/values) attached to all measurements", - Value: "host=localhost", + Value: metrics.DefaultConfig.InfluxDBTags, } EWASMInterpreterFlag = cli.StringFlag{ Name: "vm.ewasm", @@ -729,13 +755,9 @@ var ( // then a subdirectory of the specified datadir will be used. func MakeDataDir(ctx *cli.Context) string { if path := ctx.GlobalString(DataDirFlag.Name); path != "" { - if ctx.GlobalBool(LegacyTestnetFlag.Name) || ctx.GlobalBool(RopstenFlag.Name) { + if ctx.GlobalBool(RopstenFlag.Name) { // Maintain compatibility with older Geth configurations storing the // Ropsten database in `testnet` instead of `ropsten`. - legacyPath := filepath.Join(path, "testnet") - if _, err := os.Stat(legacyPath); !os.IsNotExist(err) { - return legacyPath - } return filepath.Join(path, "ropsten") } if ctx.GlobalBool(RinkebyFlag.Name) { @@ -744,8 +766,8 @@ func MakeDataDir(ctx *cli.Context) string { if ctx.GlobalBool(GoerliFlag.Name) { return filepath.Join(path, "goerli") } - if ctx.GlobalBool(YoloV2Flag.Name) { - return filepath.Join(path, "yolo-v2") + if ctx.GlobalBool(YoloV3Flag.Name) { + return filepath.Join(path, "yolo-v3") } return path } @@ -791,20 +813,16 @@ func setNodeUserIdent(ctx *cli.Context, cfg *node.Config) { func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) { urls := params.MainnetBootnodes switch { - case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(LegacyBootnodesV4Flag.Name): - if ctx.GlobalIsSet(LegacyBootnodesV4Flag.Name) { - urls = SplitAndTrim(ctx.GlobalString(LegacyBootnodesV4Flag.Name)) - } else { - urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name)) - } - case ctx.GlobalBool(LegacyTestnetFlag.Name) || ctx.GlobalBool(RopstenFlag.Name): + case ctx.GlobalIsSet(BootnodesFlag.Name): + urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name)) + case ctx.GlobalBool(RopstenFlag.Name): urls = params.RopstenBootnodes case ctx.GlobalBool(RinkebyFlag.Name): urls = params.RinkebyBootnodes case ctx.GlobalBool(GoerliFlag.Name): urls = params.GoerliBootnodes - case ctx.GlobalBool(YoloV2Flag.Name): - urls = params.YoloV2Bootnodes + case ctx.GlobalBool(YoloV3Flag.Name): + urls = params.YoloV3Bootnodes case cfg.BootstrapNodes != nil: return // already set, don't apply defaults. } @@ -825,30 +843,18 @@ func setBootstrapNodes(ctx *cli.Context, cfg *p2p.Config) { // setBootstrapNodesV5 creates a list of bootstrap nodes from the command line // flags, reverting to pre-configured ones if none have been specified. func setBootstrapNodesV5(ctx *cli.Context, cfg *p2p.Config) { - urls := params.MainnetBootnodes + urls := params.V5Bootnodes switch { - case ctx.GlobalIsSet(BootnodesFlag.Name) || ctx.GlobalIsSet(LegacyBootnodesV5Flag.Name): - if ctx.GlobalIsSet(LegacyBootnodesV5Flag.Name) { - urls = SplitAndTrim(ctx.GlobalString(LegacyBootnodesV5Flag.Name)) - } else { - urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name)) - } - case ctx.GlobalBool(RopstenFlag.Name): - urls = params.RopstenBootnodes - case ctx.GlobalBool(RinkebyFlag.Name): - urls = params.RinkebyBootnodes - case ctx.GlobalBool(GoerliFlag.Name): - urls = params.GoerliBootnodes - case ctx.GlobalBool(YoloV2Flag.Name): - urls = params.YoloV2Bootnodes + case ctx.GlobalIsSet(BootnodesFlag.Name): + urls = SplitAndTrim(ctx.GlobalString(BootnodesFlag.Name)) case cfg.BootstrapNodesV5 != nil: return // already set, don't apply defaults. } - cfg.BootstrapNodesV5 = make([]*discv5.Node, 0, len(urls)) + cfg.BootstrapNodesV5 = make([]*enode.Node, 0, len(urls)) for _, url := range urls { if url != "" { - node, err := discv5.ParseNode(url) + node, err := enode.Parse(enode.ValidSchemes, url) if err != nil { log.Error("Bootstrap URL invalid", "enode", url, "err", err) continue @@ -893,11 +899,11 @@ func SplitAndTrim(input string) (ret []string) { // command line flags, returning empty if the HTTP endpoint is disabled. func setHTTP(ctx *cli.Context, cfg *node.Config) { if ctx.GlobalBool(LegacyRPCEnabledFlag.Name) && cfg.HTTPHost == "" { - log.Warn("The flag --rpc is deprecated and will be removed in the future, please use --http") + log.Warn("The flag --rpc is deprecated and will be removed June 2021, please use --http") cfg.HTTPHost = "127.0.0.1" if ctx.GlobalIsSet(LegacyRPCListenAddrFlag.Name) { cfg.HTTPHost = ctx.GlobalString(LegacyRPCListenAddrFlag.Name) - log.Warn("The flag --rpcaddr is deprecated and will be removed in the future, please use --http.addr") + log.Warn("The flag --rpcaddr is deprecated and will be removed June 2021, please use --http.addr") } } if ctx.GlobalBool(HTTPEnabledFlag.Name) && cfg.HTTPHost == "" { @@ -909,7 +915,7 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) { if ctx.GlobalIsSet(LegacyRPCPortFlag.Name) { cfg.HTTPPort = ctx.GlobalInt(LegacyRPCPortFlag.Name) - log.Warn("The flag --rpcport is deprecated and will be removed in the future, please use --http.port") + log.Warn("The flag --rpcport is deprecated and will be removed June 2021, please use --http.port") } if ctx.GlobalIsSet(HTTPPortFlag.Name) { cfg.HTTPPort = ctx.GlobalInt(HTTPPortFlag.Name) @@ -917,7 +923,7 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) { if ctx.GlobalIsSet(LegacyRPCCORSDomainFlag.Name) { cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(LegacyRPCCORSDomainFlag.Name)) - log.Warn("The flag --rpccorsdomain is deprecated and will be removed in the future, please use --http.corsdomain") + log.Warn("The flag --rpccorsdomain is deprecated and will be removed June 2021, please use --http.corsdomain") } if ctx.GlobalIsSet(HTTPCORSDomainFlag.Name) { cfg.HTTPCors = SplitAndTrim(ctx.GlobalString(HTTPCORSDomainFlag.Name)) @@ -925,7 +931,7 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) { if ctx.GlobalIsSet(LegacyRPCApiFlag.Name) { cfg.HTTPModules = SplitAndTrim(ctx.GlobalString(LegacyRPCApiFlag.Name)) - log.Warn("The flag --rpcapi is deprecated and will be removed in the future, please use --http.api") + log.Warn("The flag --rpcapi is deprecated and will be removed June 2021, please use --http.api") } if ctx.GlobalIsSet(HTTPApiFlag.Name) { cfg.HTTPModules = SplitAndTrim(ctx.GlobalString(HTTPApiFlag.Name)) @@ -933,11 +939,18 @@ func setHTTP(ctx *cli.Context, cfg *node.Config) { if ctx.GlobalIsSet(LegacyRPCVirtualHostsFlag.Name) { cfg.HTTPVirtualHosts = SplitAndTrim(ctx.GlobalString(LegacyRPCVirtualHostsFlag.Name)) - log.Warn("The flag --rpcvhosts is deprecated and will be removed in the future, please use --http.vhosts") + log.Warn("The flag --rpcvhosts is deprecated and will be removed June 2021, please use --http.vhosts") } if ctx.GlobalIsSet(HTTPVirtualHostsFlag.Name) { cfg.HTTPVirtualHosts = SplitAndTrim(ctx.GlobalString(HTTPVirtualHostsFlag.Name)) } + + if ctx.GlobalIsSet(HTTPPathPrefixFlag.Name) { + cfg.HTTPPathPrefix = ctx.GlobalString(HTTPPathPrefixFlag.Name) + } + if ctx.GlobalIsSet(AllowUnprotectedTxs.Name) { + cfg.AllowUnprotectedTxs = ctx.GlobalBool(AllowUnprotectedTxs.Name) + } } // setGraphQL creates the GraphQL listener interface string from the set @@ -956,37 +969,25 @@ func setGraphQL(ctx *cli.Context, cfg *node.Config) { func setWS(ctx *cli.Context, cfg *node.Config) { if ctx.GlobalBool(WSEnabledFlag.Name) && cfg.WSHost == "" { cfg.WSHost = "127.0.0.1" - if ctx.GlobalIsSet(LegacyWSListenAddrFlag.Name) { - cfg.WSHost = ctx.GlobalString(LegacyWSListenAddrFlag.Name) - log.Warn("The flag --wsaddr is deprecated and will be removed in the future, please use --ws.addr") - } if ctx.GlobalIsSet(WSListenAddrFlag.Name) { cfg.WSHost = ctx.GlobalString(WSListenAddrFlag.Name) } } - if ctx.GlobalIsSet(LegacyWSPortFlag.Name) { - cfg.WSPort = ctx.GlobalInt(LegacyWSPortFlag.Name) - log.Warn("The flag --wsport is deprecated and will be removed in the future, please use --ws.port") - } if ctx.GlobalIsSet(WSPortFlag.Name) { cfg.WSPort = ctx.GlobalInt(WSPortFlag.Name) } - if ctx.GlobalIsSet(LegacyWSAllowedOriginsFlag.Name) { - cfg.WSOrigins = SplitAndTrim(ctx.GlobalString(LegacyWSAllowedOriginsFlag.Name)) - log.Warn("The flag --wsorigins is deprecated and will be removed in the future, please use --ws.origins") - } if ctx.GlobalIsSet(WSAllowedOriginsFlag.Name) { cfg.WSOrigins = SplitAndTrim(ctx.GlobalString(WSAllowedOriginsFlag.Name)) } - if ctx.GlobalIsSet(LegacyWSApiFlag.Name) { - cfg.WSModules = SplitAndTrim(ctx.GlobalString(LegacyWSApiFlag.Name)) - log.Warn("The flag --wsapi is deprecated and will be removed in the future, please use --ws.api") - } if ctx.GlobalIsSet(WSApiFlag.Name) { cfg.WSModules = SplitAndTrim(ctx.GlobalString(WSApiFlag.Name)) } + + if ctx.GlobalIsSet(WSPathPrefixFlag.Name) { + cfg.WSPathPrefix = ctx.GlobalString(WSPathPrefixFlag.Name) + } } // setIPC creates an IPC path configuration from the set command line flags, @@ -1002,11 +1003,7 @@ func setIPC(ctx *cli.Context, cfg *node.Config) { } // setLes configures the les server and ultra light client settings from the command line flags. -func setLes(ctx *cli.Context, cfg *eth.Config) { - if ctx.GlobalIsSet(LegacyLightServFlag.Name) { - cfg.LightServ = ctx.GlobalInt(LegacyLightServFlag.Name) - log.Warn("The flag --lightserv is deprecated and will be removed in the future, please use --light.serve") - } +func setLes(ctx *cli.Context, cfg *ethconfig.Config) { if ctx.GlobalIsSet(LightServeFlag.Name) { cfg.LightServ = ctx.GlobalInt(LightServeFlag.Name) } @@ -1016,10 +1013,6 @@ func setLes(ctx *cli.Context, cfg *eth.Config) { if ctx.GlobalIsSet(LightEgressFlag.Name) { cfg.LightEgress = ctx.GlobalInt(LightEgressFlag.Name) } - if ctx.GlobalIsSet(LegacyLightPeersFlag.Name) { - cfg.LightPeers = ctx.GlobalInt(LegacyLightPeersFlag.Name) - log.Warn("The flag --lightpeers is deprecated and will be removed in the future, please use --light.maxpeers") - } if ctx.GlobalIsSet(LightMaxPeersFlag.Name) { cfg.LightPeers = ctx.GlobalInt(LightMaxPeersFlag.Name) } @@ -1030,8 +1023,8 @@ func setLes(ctx *cli.Context, cfg *eth.Config) { cfg.UltraLightFraction = ctx.GlobalInt(UltraLightFractionFlag.Name) } if cfg.UltraLightFraction <= 0 && cfg.UltraLightFraction > 100 { - log.Error("Ultra light fraction is invalid", "had", cfg.UltraLightFraction, "updated", eth.DefaultConfig.UltraLightFraction) - cfg.UltraLightFraction = eth.DefaultConfig.UltraLightFraction + log.Error("Ultra light fraction is invalid", "had", cfg.UltraLightFraction, "updated", ethconfig.Defaults.UltraLightFraction) + cfg.UltraLightFraction = ethconfig.Defaults.UltraLightFraction } if ctx.GlobalIsSet(UltraLightOnlyAnnounceFlag.Name) { cfg.UltraLightOnlyAnnounce = ctx.GlobalBool(UltraLightOnlyAnnounceFlag.Name) @@ -1039,11 +1032,14 @@ func setLes(ctx *cli.Context, cfg *eth.Config) { if ctx.GlobalIsSet(LightNoPruneFlag.Name) { cfg.LightNoPrune = ctx.GlobalBool(LightNoPruneFlag.Name) } + if ctx.GlobalIsSet(LightNoSyncServeFlag.Name) { + cfg.LightNoSyncServe = ctx.GlobalBool(LightNoSyncServeFlag.Name) + } } -// makeDatabaseHandles raises out the number of allowed file handles per process +// MakeDatabaseHandles raises out the number of allowed file handles per process // for Geth and returns half of the allowance to assign to the database. -func makeDatabaseHandles() int { +func MakeDatabaseHandles() int { limit, err := fdlimit.Maximum() if err != nil { Fatalf("Failed to retrieve file descriptor allowance: %v", err) @@ -1082,14 +1078,9 @@ func MakeAddress(ks *keystore.KeyStore, account string) (accounts.Account, error // setEtherbase retrieves the etherbase either from the directly specified // command line flags or from the keystore if CLI indexed. -func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *eth.Config) { - // Extract the current etherbase, new flag overriding legacy one +func setEtherbase(ctx *cli.Context, ks *keystore.KeyStore, cfg *ethconfig.Config) { + // Extract the current etherbase var etherbase string - if ctx.GlobalIsSet(LegacyMinerEtherbaseFlag.Name) { - etherbase = ctx.GlobalString(LegacyMinerEtherbaseFlag.Name) - log.Warn("The flag --etherbase is deprecated and will be removed in the future, please use --miner.etherbase") - - } if ctx.GlobalIsSet(MinerEtherbaseFlag.Name) { etherbase = ctx.GlobalString(MinerEtherbaseFlag.Name) } @@ -1133,27 +1124,24 @@ func SetP2PConfig(ctx *cli.Context, cfg *p2p.Config) { setBootstrapNodesV5(ctx, cfg) lightClient := ctx.GlobalString(SyncModeFlag.Name) == "light" - lightServer := (ctx.GlobalInt(LegacyLightServFlag.Name) != 0 || ctx.GlobalInt(LightServeFlag.Name) != 0) + lightServer := (ctx.GlobalInt(LightServeFlag.Name) != 0) - lightPeers := ctx.GlobalInt(LegacyLightPeersFlag.Name) - if ctx.GlobalIsSet(LightMaxPeersFlag.Name) { - lightPeers = ctx.GlobalInt(LightMaxPeersFlag.Name) - } - if lightClient && !ctx.GlobalIsSet(LegacyLightPeersFlag.Name) && !ctx.GlobalIsSet(LightMaxPeersFlag.Name) { + lightPeers := ctx.GlobalInt(LightMaxPeersFlag.Name) + if lightClient && !ctx.GlobalIsSet(LightMaxPeersFlag.Name) { // dynamic default - for clients we use 1/10th of the default for servers lightPeers /= 10 } if ctx.GlobalIsSet(MaxPeersFlag.Name) { cfg.MaxPeers = ctx.GlobalInt(MaxPeersFlag.Name) - if lightServer && !ctx.GlobalIsSet(LegacyLightPeersFlag.Name) && !ctx.GlobalIsSet(LightMaxPeersFlag.Name) { + if lightServer && !ctx.GlobalIsSet(LightMaxPeersFlag.Name) { cfg.MaxPeers += lightPeers } } else { if lightServer { cfg.MaxPeers += lightPeers } - if lightClient && (ctx.GlobalIsSet(LegacyLightPeersFlag.Name) || ctx.GlobalIsSet(LightMaxPeersFlag.Name)) && cfg.MaxPeers < lightPeers { + if lightClient && ctx.GlobalIsSet(LightMaxPeersFlag.Name) && cfg.MaxPeers < lightPeers { cfg.MaxPeers = lightPeers } } @@ -1221,8 +1209,11 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { if ctx.GlobalIsSet(LightKDFFlag.Name) { cfg.UseLightweightKDF = ctx.GlobalBool(LightKDFFlag.Name) } - if ctx.GlobalIsSet(NoUSBFlag.Name) { - cfg.NoUSB = ctx.GlobalBool(NoUSBFlag.Name) + if ctx.GlobalIsSet(NoUSBFlag.Name) || cfg.NoUSB { + log.Warn("Option nousb is deprecated and USB is deactivated by default. Use --usb to enable") + } + if ctx.GlobalIsSet(USBFlag.Name) { + cfg.USB = ctx.GlobalBool(USBFlag.Name) } if ctx.GlobalIsSet(InsecureUnlockAllowedFlag.Name) { cfg.InsecureUnlockAllowed = ctx.GlobalBool(InsecureUnlockAllowedFlag.Name) @@ -1255,7 +1246,7 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) { cfg.DataDir = ctx.GlobalString(DataDirFlag.Name) case ctx.GlobalBool(DeveloperFlag.Name): cfg.DataDir = "" // unless explicitly requested, use memory databases - case (ctx.GlobalBool(LegacyTestnetFlag.Name) || ctx.GlobalBool(RopstenFlag.Name)) && cfg.DataDir == node.DefaultDataDir(): + case ctx.GlobalBool(RopstenFlag.Name) && cfg.DataDir == node.DefaultDataDir(): // Maintain compatibility with older Geth configurations storing the // Ropsten database in `testnet` instead of `ropsten`. legacyPath := filepath.Join(node.DefaultDataDir(), "testnet") @@ -1265,12 +1256,14 @@ func setDataDir(ctx *cli.Context, cfg *node.Config) { } else { cfg.DataDir = filepath.Join(node.DefaultDataDir(), "ropsten") } + + cfg.DataDir = filepath.Join(node.DefaultDataDir(), "ropsten") case ctx.GlobalBool(RinkebyFlag.Name) && cfg.DataDir == node.DefaultDataDir(): cfg.DataDir = filepath.Join(node.DefaultDataDir(), "rinkeby") case ctx.GlobalBool(GoerliFlag.Name) && cfg.DataDir == node.DefaultDataDir(): cfg.DataDir = filepath.Join(node.DefaultDataDir(), "goerli") - case ctx.GlobalBool(YoloV2Flag.Name) && cfg.DataDir == node.DefaultDataDir(): - cfg.DataDir = filepath.Join(node.DefaultDataDir(), "yolo-v2") + case ctx.GlobalBool(YoloV3Flag.Name) && cfg.DataDir == node.DefaultDataDir(): + cfg.DataDir = filepath.Join(node.DefaultDataDir(), "yolo-v3") } } @@ -1278,20 +1271,12 @@ func setGPO(ctx *cli.Context, cfg *gasprice.Config, light bool) { // If we are running the light client, apply another group // settings for gas oracle. if light { - cfg.Blocks = eth.DefaultLightGPOConfig.Blocks - cfg.Percentile = eth.DefaultLightGPOConfig.Percentile - } - if ctx.GlobalIsSet(LegacyGpoBlocksFlag.Name) { - cfg.Blocks = ctx.GlobalInt(LegacyGpoBlocksFlag.Name) - log.Warn("The flag --gpoblocks is deprecated and will be removed in the future, please use --gpo.blocks") + cfg.Blocks = ethconfig.LightClientGPO.Blocks + cfg.Percentile = ethconfig.LightClientGPO.Percentile } if ctx.GlobalIsSet(GpoBlocksFlag.Name) { cfg.Blocks = ctx.GlobalInt(GpoBlocksFlag.Name) } - if ctx.GlobalIsSet(LegacyGpoPercentileFlag.Name) { - cfg.Percentile = ctx.GlobalInt(LegacyGpoPercentileFlag.Name) - log.Warn("The flag --gpopercentile is deprecated and will be removed in the future, please use --gpo.percentile") - } if ctx.GlobalIsSet(GpoPercentileFlag.Name) { cfg.Percentile = ctx.GlobalInt(GpoPercentileFlag.Name) } @@ -1343,7 +1328,7 @@ func setTxPool(ctx *cli.Context, cfg *core.TxPoolConfig) { } } -func setEthash(ctx *cli.Context, cfg *eth.Config) { +func setEthash(ctx *cli.Context, cfg *ethconfig.Config) { if ctx.GlobalIsSet(EthashCacheDirFlag.Name) { cfg.Ethash.CacheDir = ctx.GlobalString(EthashCacheDirFlag.Name) } @@ -1374,27 +1359,15 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { if ctx.GlobalIsSet(MinerNotifyFlag.Name) { cfg.Notify = strings.Split(ctx.GlobalString(MinerNotifyFlag.Name), ",") } - if ctx.GlobalIsSet(LegacyMinerExtraDataFlag.Name) { - cfg.ExtraData = []byte(ctx.GlobalString(LegacyMinerExtraDataFlag.Name)) - log.Warn("The flag --extradata is deprecated and will be removed in the future, please use --miner.extradata") - } if ctx.GlobalIsSet(MinerExtraDataFlag.Name) { cfg.ExtraData = []byte(ctx.GlobalString(MinerExtraDataFlag.Name)) } - if ctx.GlobalIsSet(LegacyMinerGasTargetFlag.Name) { - cfg.GasFloor = ctx.GlobalUint64(LegacyMinerGasTargetFlag.Name) - log.Warn("The flag --targetgaslimit is deprecated and will be removed in the future, please use --miner.gastarget") - } if ctx.GlobalIsSet(MinerGasTargetFlag.Name) { cfg.GasFloor = ctx.GlobalUint64(MinerGasTargetFlag.Name) } if ctx.GlobalIsSet(MinerGasLimitFlag.Name) { cfg.GasCeil = ctx.GlobalUint64(MinerGasLimitFlag.Name) } - if ctx.GlobalIsSet(LegacyMinerGasPriceFlag.Name) { - cfg.GasPrice = GlobalBig(ctx, LegacyMinerGasPriceFlag.Name) - log.Warn("The flag --gasprice is deprecated and will be removed in the future, please use --miner.gasprice") - } if ctx.GlobalIsSet(MinerGasPriceFlag.Name) { cfg.GasPrice = GlobalBig(ctx, MinerGasPriceFlag.Name) } @@ -1406,7 +1379,7 @@ func setMiner(ctx *cli.Context, cfg *miner.Config) { } } -func setWhitelist(ctx *cli.Context, cfg *eth.Config) { +func setWhitelist(ctx *cli.Context, cfg *ethconfig.Config) { whitelist := ctx.GlobalString(WhitelistFlag.Name) if whitelist == "" { return @@ -1470,27 +1443,19 @@ func CheckExclusive(ctx *cli.Context, args ...interface{}) { } } -// SetShhConfig applies shh-related command line flags to the config. -func SetShhConfig(ctx *cli.Context, stack *node.Node) { - if ctx.GlobalIsSet(WhisperEnabledFlag.Name) || - ctx.GlobalIsSet(WhisperMaxMessageSizeFlag.Name) || - ctx.GlobalIsSet(WhisperMinPOWFlag.Name) || - ctx.GlobalIsSet(WhisperRestrictConnectionBetweenLightClientsFlag.Name) { - log.Warn("Whisper support has been deprecated and the code has been moved to github.com/ethereum/whisper") - } -} - // SetEthConfig applies eth-related command line flags to the config. -func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { +func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *ethconfig.Config) { // Avoid conflicting network flags - CheckExclusive(ctx, DeveloperFlag, LegacyTestnetFlag, RopstenFlag, RinkebyFlag, GoerliFlag, YoloV2Flag) - CheckExclusive(ctx, LegacyLightServFlag, LightServeFlag, SyncModeFlag, "light") + CheckExclusive(ctx, MainnetFlag, DeveloperFlag, RopstenFlag, RinkebyFlag, GoerliFlag, YoloV3Flag) + CheckExclusive(ctx, LightServeFlag, SyncModeFlag, "light") CheckExclusive(ctx, DeveloperFlag, ExternalSignerFlag) // Can't use both ephemeral unlocked and external signer - CheckExclusive(ctx, GCModeFlag, "archive", TxLookupLimitFlag) - // todo(rjl493456442) make it available for les server - // Ancient tx indices pruning is not available for les server now - // since light client relies on the server for transaction status query. - CheckExclusive(ctx, LegacyLightServFlag, LightServeFlag, TxLookupLimitFlag) + if ctx.GlobalString(GCModeFlag.Name) == "archive" && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 { + ctx.GlobalSet(TxLookupLimitFlag.Name, "0") + log.Warn("Disable transaction unindexing for archive node") + } + if ctx.GlobalIsSet(LightServeFlag.Name) && ctx.GlobalUint64(TxLookupLimitFlag.Name) != 0 { + log.Warn("LES server cannot serve old transaction status and cannot connect below les/4 protocol version if transaction lookup index is limited") + } var ks *keystore.KeyStore if keystores := stack.AccountManager().Backends(keystore.KeyStoreType); len(keystores) > 0 { ks = keystores[0].(*keystore.KeyStore) @@ -1512,7 +1477,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheDatabaseFlag.Name) { cfg.DatabaseCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100 } - cfg.DatabaseHandles = makeDatabaseHandles() + cfg.DatabaseHandles = MakeDatabaseHandles() if ctx.GlobalIsSet(AncientFlag.Name) { cfg.DatabaseFreezer = ctx.GlobalString(AncientFlag.Name) } @@ -1526,6 +1491,12 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { if ctx.GlobalIsSet(CacheNoPrefetchFlag.Name) { cfg.NoPrefetch = ctx.GlobalBool(CacheNoPrefetchFlag.Name) } + // Read the value from the flag no matter if it's set or not. + cfg.Preimages = ctx.GlobalBool(CachePreimagesFlag.Name) + if cfg.NoPruning && !cfg.Preimages { + cfg.Preimages = true + log.Info("Enabling recording of key preimages since archive mode is used") + } if ctx.GlobalIsSet(TxLookupLimitFlag.Name) { cfg.TxLookupLimit = ctx.GlobalUint64(TxLookupLimitFlag.Name) } @@ -1544,9 +1515,14 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheSnapshotFlag.Name) { cfg.SnapshotCache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheSnapshotFlag.Name) / 100 } - if !ctx.GlobalIsSet(SnapshotFlag.Name) { - cfg.TrieCleanCache += cfg.SnapshotCache - cfg.SnapshotCache = 0 // Disabled + if !ctx.GlobalBool(SnapshotFlag.Name) { + // If snap-sync is requested, this flag is also required + if cfg.SyncMode == downloader.SnapSync { + log.Info("Snap sync requested, enabling --snapshot") + } else { + cfg.TrieCleanCache += cfg.SnapshotCache + cfg.SnapshotCache = 0 // Disabled + } } if ctx.GlobalIsSet(DocRootFlag.Name) { cfg.DocRoot = ctx.GlobalString(DocRootFlag.Name) @@ -1574,18 +1550,25 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { if ctx.GlobalIsSet(RPCGlobalTxFeeCapFlag.Name) { cfg.RPCTxFeeCap = ctx.GlobalFloat64(RPCGlobalTxFeeCapFlag.Name) } - if ctx.GlobalIsSet(DNSDiscoveryFlag.Name) { + if ctx.GlobalIsSet(NoDiscoverFlag.Name) { + cfg.EthDiscoveryURLs, cfg.SnapDiscoveryURLs = []string{}, []string{} + } else if ctx.GlobalIsSet(DNSDiscoveryFlag.Name) { urls := ctx.GlobalString(DNSDiscoveryFlag.Name) if urls == "" { - cfg.DiscoveryURLs = []string{} + cfg.EthDiscoveryURLs = []string{} } else { - cfg.DiscoveryURLs = SplitAndTrim(urls) + cfg.EthDiscoveryURLs = SplitAndTrim(urls) } } - // Override any default configs for hard coded networks. switch { - case ctx.GlobalBool(LegacyTestnetFlag.Name) || ctx.GlobalBool(RopstenFlag.Name): + case ctx.GlobalBool(MainnetFlag.Name): + if !ctx.GlobalIsSet(NetworkIdFlag.Name) { + cfg.NetworkId = 1 + } + cfg.Genesis = core.DefaultGenesisBlock() + SetDNSDiscoveryDefaults(cfg, params.MainnetGenesisHash) + case ctx.GlobalBool(RopstenFlag.Name): if !ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkId = 3 } @@ -1603,11 +1586,11 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { } cfg.Genesis = core.DefaultGoerliGenesisBlock() SetDNSDiscoveryDefaults(cfg, params.GoerliGenesisHash) - case ctx.GlobalBool(YoloV2Flag.Name): + case ctx.GlobalBool(YoloV3Flag.Name): if !ctx.GlobalIsSet(NetworkIdFlag.Name) { - cfg.NetworkId = 133519467574834 // "yolov2" + cfg.NetworkId = new(big.Int).SetBytes([]byte("yolov3x")).Uint64() // "yolov3x" } - cfg.Genesis = core.DefaultYoloV2GenesisBlock() + cfg.Genesis = core.DefaultYoloV3GenesisBlock() case ctx.GlobalBool(DeveloperFlag.Name): if !ctx.GlobalIsSet(NetworkIdFlag.Name) { cfg.NetworkId = 1337 @@ -1651,7 +1634,7 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { } chaindb.Close() } - if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) && !ctx.GlobalIsSet(LegacyMinerGasPriceFlag.Name) { + if !ctx.GlobalIsSet(MinerGasPriceFlag.Name) { cfg.Miner.GasPrice = big.NewInt(1) } default: @@ -1663,41 +1646,46 @@ func SetEthConfig(ctx *cli.Context, stack *node.Node, cfg *eth.Config) { // SetDNSDiscoveryDefaults configures DNS discovery with the given URL if // no URLs are set. -func SetDNSDiscoveryDefaults(cfg *eth.Config, genesis common.Hash) { - if cfg.DiscoveryURLs != nil { +func SetDNSDiscoveryDefaults(cfg *ethconfig.Config, genesis common.Hash) { + if cfg.EthDiscoveryURLs != nil { return // already set through flags/config } - protocol := "all" if cfg.SyncMode == downloader.LightSync { protocol = "les" } if url := params.KnownDNSNetwork(genesis, protocol); url != "" { - cfg.DiscoveryURLs = []string{url} + cfg.EthDiscoveryURLs = []string{url} + } + if cfg.SyncMode == downloader.SnapSync { + if url := params.KnownDNSNetwork(genesis, "snap"); url != "" { + cfg.SnapDiscoveryURLs = []string{url} + } } } // RegisterEthService adds an Ethereum client to the stack. -func RegisterEthService(stack *node.Node, cfg *eth.Config) ethapi.Backend { +func RegisterEthService(stack *node.Node, cfg *ethconfig.Config) ethapi.Backend { if cfg.SyncMode == downloader.LightSync { backend, err := les.New(stack, cfg) if err != nil { Fatalf("Failed to register the Ethereum service: %v", err) } + stack.RegisterAPIs(tracers.APIs(backend.ApiBackend)) return backend.ApiBackend - } else { - backend, err := eth.New(stack, cfg) + } + backend, err := eth.New(stack, cfg) + if err != nil { + Fatalf("Failed to register the Ethereum service: %v", err) + } + if cfg.LightServ > 0 { + _, err := les.NewLesServer(stack, backend, cfg) if err != nil { - Fatalf("Failed to register the Ethereum service: %v", err) + Fatalf("Failed to create the LES server: %v", err) } - if cfg.LightServ > 0 { - _, err := les.NewLesServer(stack, backend, cfg) - if err != nil { - Fatalf("Failed to create the LES server: %v", err) - } - } - return backend.APIBackend } + stack.RegisterAPIs(tracers.APIs(backend.APIBackend)) + return backend.APIBackend } // RegisterEthStatsService configures the Ethereum Stats daemon and adds it to @@ -1764,7 +1752,7 @@ func SplitTagsFlag(tagsFlag string) map[string]string { func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { var ( cache = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheDatabaseFlag.Name) / 100 - handles = makeDatabaseHandles() + handles = MakeDatabaseHandles() err error chainDb ethdb.Database @@ -1785,14 +1773,14 @@ func MakeChainDatabase(ctx *cli.Context, stack *node.Node) ethdb.Database { func MakeGenesis(ctx *cli.Context) *core.Genesis { var genesis *core.Genesis switch { - case ctx.GlobalBool(LegacyTestnetFlag.Name) || ctx.GlobalBool(RopstenFlag.Name): + case ctx.GlobalBool(RopstenFlag.Name): genesis = core.DefaultRopstenGenesisBlock() case ctx.GlobalBool(RinkebyFlag.Name): genesis = core.DefaultRinkebyGenesisBlock() case ctx.GlobalBool(GoerliFlag.Name): genesis = core.DefaultGoerliGenesisBlock() - case ctx.GlobalBool(YoloV2Flag.Name): - genesis = core.DefaultYoloV2GenesisBlock() + case ctx.GlobalBool(YoloV3Flag.Name): + genesis = core.DefaultYoloV3GenesisBlock() case ctx.GlobalBool(DeveloperFlag.Name): Fatalf("Developer chains are ephemeral") } @@ -1814,14 +1802,14 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readOnly bool) (chain *core.B engine = ethash.NewFaker() if !ctx.GlobalBool(FakePoWFlag.Name) { engine = ethash.New(ethash.Config{ - CacheDir: stack.ResolvePath(eth.DefaultConfig.Ethash.CacheDir), - CachesInMem: eth.DefaultConfig.Ethash.CachesInMem, - CachesOnDisk: eth.DefaultConfig.Ethash.CachesOnDisk, - CachesLockMmap: eth.DefaultConfig.Ethash.CachesLockMmap, - DatasetDir: stack.ResolvePath(eth.DefaultConfig.Ethash.DatasetDir), - DatasetsInMem: eth.DefaultConfig.Ethash.DatasetsInMem, - DatasetsOnDisk: eth.DefaultConfig.Ethash.DatasetsOnDisk, - DatasetsLockMmap: eth.DefaultConfig.Ethash.DatasetsLockMmap, + CacheDir: stack.ResolvePath(ethconfig.Defaults.Ethash.CacheDir), + CachesInMem: ethconfig.Defaults.Ethash.CachesInMem, + CachesOnDisk: ethconfig.Defaults.Ethash.CachesOnDisk, + CachesLockMmap: ethconfig.Defaults.Ethash.CachesLockMmap, + DatasetDir: stack.ResolvePath(ethconfig.Defaults.Ethash.DatasetDir), + DatasetsInMem: ethconfig.Defaults.Ethash.DatasetsInMem, + DatasetsOnDisk: ethconfig.Defaults.Ethash.DatasetsOnDisk, + DatasetsLockMmap: ethconfig.Defaults.Ethash.DatasetsLockMmap, }, nil, false) } } @@ -1829,14 +1817,19 @@ func MakeChain(ctx *cli.Context, stack *node.Node, readOnly bool) (chain *core.B Fatalf("--%s must be either 'full' or 'archive'", GCModeFlag.Name) } cache := &core.CacheConfig{ - TrieCleanLimit: eth.DefaultConfig.TrieCleanCache, + TrieCleanLimit: ethconfig.Defaults.TrieCleanCache, TrieCleanNoPrefetch: ctx.GlobalBool(CacheNoPrefetchFlag.Name), - TrieDirtyLimit: eth.DefaultConfig.TrieDirtyCache, + TrieDirtyLimit: ethconfig.Defaults.TrieDirtyCache, TrieDirtyDisabled: ctx.GlobalString(GCModeFlag.Name) == "archive", - TrieTimeLimit: eth.DefaultConfig.TrieTimeout, - SnapshotLimit: eth.DefaultConfig.SnapshotCache, + TrieTimeLimit: ethconfig.Defaults.TrieTimeout, + SnapshotLimit: ethconfig.Defaults.SnapshotCache, + Preimages: ctx.GlobalBool(CachePreimagesFlag.Name), + } + if cache.TrieDirtyDisabled && !cache.Preimages { + cache.Preimages = true + log.Info("Enabling recording of key preimages since archive mode is used") } - if !ctx.GlobalIsSet(SnapshotFlag.Name) { + if !ctx.GlobalBool(SnapshotFlag.Name) { cache.SnapshotLimit = 0 // Disabled } if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheTrieFlag.Name) { @@ -1868,9 +1861,8 @@ func MakeConsolePreloads(ctx *cli.Context) []string { // Otherwise resolve absolute paths and return them var preloads []string - assets := ctx.GlobalString(JSpathFlag.Name) for _, file := range strings.Split(ctx.GlobalString(PreloadJSFlag.Name), ",") { - preloads = append(preloads, common.AbsolutePath(assets, strings.TrimSpace(file))) + preloads = append(preloads, strings.TrimSpace(file)) } return preloads } diff --git a/cmd/utils/flags_legacy.go b/cmd/utils/flags_legacy.go index 1376d47c05..fb5fde6576 100644 --- a/cmd/utils/flags_legacy.go +++ b/cmd/utils/flags_legacy.go @@ -20,7 +20,6 @@ import ( "fmt" "strings" - "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/node" "gopkg.in/urfave/cli.v1" ) @@ -34,143 +33,39 @@ var ShowDeprecated = cli.Command{ Description: "Show flags that have been deprecated and will soon be removed", } -var DeprecatedFlags = []cli.Flag{ - LegacyTestnetFlag, - LegacyLightServFlag, - LegacyLightPeersFlag, - LegacyMinerThreadsFlag, - LegacyMinerGasTargetFlag, - LegacyMinerGasPriceFlag, - LegacyMinerEtherbaseFlag, - LegacyMinerExtraDataFlag, -} +var DeprecatedFlags = []cli.Flag{} var ( - // (Deprecated April 2018) - LegacyMinerThreadsFlag = cli.IntFlag{ - Name: "minerthreads", - Usage: "Number of CPU threads to use for mining (deprecated, use --miner.threads)", - Value: 0, - } - LegacyMinerGasTargetFlag = cli.Uint64Flag{ - Name: "targetgaslimit", - Usage: "Target gas floor for mined blocks (deprecated, use --miner.gastarget)", - Value: eth.DefaultConfig.Miner.GasFloor, - } - LegacyMinerGasPriceFlag = BigFlag{ - Name: "gasprice", - Usage: "Minimum gas price for mining a transaction (deprecated, use --miner.gasprice)", - Value: eth.DefaultConfig.Miner.GasPrice, - } - LegacyMinerEtherbaseFlag = cli.StringFlag{ - Name: "etherbase", - Usage: "Public address for block mining rewards (default = first account, deprecated, use --miner.etherbase)", - Value: "0", - } - LegacyMinerExtraDataFlag = cli.StringFlag{ - Name: "extradata", - Usage: "Block extra data set by the miner (default = client version, deprecated, use --miner.extradata)", - } - - // (Deprecated June 2019) - LegacyLightServFlag = cli.IntFlag{ - Name: "lightserv", - Usage: "Maximum percentage of time allowed for serving LES requests (deprecated, use --light.serve)", - Value: eth.DefaultConfig.LightServ, - } - LegacyLightPeersFlag = cli.IntFlag{ - Name: "lightpeers", - Usage: "Maximum number of light clients to serve, or light servers to attach to (deprecated, use --light.maxpeers)", - Value: eth.DefaultConfig.LightPeers, - } - - // (Deprecated April 2020) - LegacyTestnetFlag = cli.BoolFlag{ // TODO(q9f): Remove after Ropsten is discontinued. - Name: "testnet", - Usage: "Pre-configured test network (Deprecated: Please choose one of --goerli, --rinkeby, or --ropsten.)", - } - // (Deprecated May 2020, shown in aliased flags section) LegacyRPCEnabledFlag = cli.BoolFlag{ Name: "rpc", - Usage: "Enable the HTTP-RPC server (deprecated, use --http)", + Usage: "Enable the HTTP-RPC server (deprecated and will be removed June 2021, use --http)", } LegacyRPCListenAddrFlag = cli.StringFlag{ Name: "rpcaddr", - Usage: "HTTP-RPC server listening interface (deprecated, use --http.addr)", + Usage: "HTTP-RPC server listening interface (deprecated and will be removed June 2021, use --http.addr)", Value: node.DefaultHTTPHost, } LegacyRPCPortFlag = cli.IntFlag{ Name: "rpcport", - Usage: "HTTP-RPC server listening port (deprecated, use --http.port)", + Usage: "HTTP-RPC server listening port (deprecated and will be removed June 2021, use --http.port)", Value: node.DefaultHTTPPort, } LegacyRPCCORSDomainFlag = cli.StringFlag{ Name: "rpccorsdomain", - Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced) (deprecated, use --http.corsdomain)", + Usage: "Comma separated list of domains from which to accept cross origin requests (browser enforced) (deprecated and will be removed June 2021, use --http.corsdomain)", Value: "", } LegacyRPCVirtualHostsFlag = cli.StringFlag{ Name: "rpcvhosts", - Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (deprecated, use --http.vhosts)", + Usage: "Comma separated list of virtual hostnames from which to accept requests (server enforced). Accepts '*' wildcard. (deprecated and will be removed June 2021, use --http.vhosts)", Value: strings.Join(node.DefaultConfig.HTTPVirtualHosts, ","), } LegacyRPCApiFlag = cli.StringFlag{ Name: "rpcapi", - Usage: "API's offered over the HTTP-RPC interface (deprecated, use --http.api)", - Value: "", - } - LegacyWSListenAddrFlag = cli.StringFlag{ - Name: "wsaddr", - Usage: "WS-RPC server listening interface (deprecated, use --ws.addr)", - Value: node.DefaultWSHost, - } - LegacyWSPortFlag = cli.IntFlag{ - Name: "wsport", - Usage: "WS-RPC server listening port (deprecated, use --ws.port)", - Value: node.DefaultWSPort, - } - LegacyWSApiFlag = cli.StringFlag{ - Name: "wsapi", - Usage: "API's offered over the WS-RPC interface (deprecated, use --ws.api)", + Usage: "API's offered over the HTTP-RPC interface (deprecated and will be removed June 2021, use --http.api)", Value: "", } - LegacyWSAllowedOriginsFlag = cli.StringFlag{ - Name: "wsorigins", - Usage: "Origins from which to accept websockets requests (deprecated, use --ws.origins)", - Value: "", - } - LegacyGpoBlocksFlag = cli.IntFlag{ - Name: "gpoblocks", - Usage: "Number of recent blocks to check for gas prices (deprecated, use --gpo.blocks)", - Value: eth.DefaultConfig.GPO.Blocks, - } - LegacyGpoPercentileFlag = cli.IntFlag{ - Name: "gpopercentile", - Usage: "Suggested gas price is the given percentile of a set of recent transaction gas prices (deprecated, use --gpo.percentile)", - Value: eth.DefaultConfig.GPO.Percentile, - } - LegacyBootnodesV4Flag = cli.StringFlag{ - Name: "bootnodesv4", - Usage: "Comma separated enode URLs for P2P v4 discovery bootstrap (light server, full nodes) (deprecated, use --bootnodes)", - Value: "", - } - LegacyBootnodesV5Flag = cli.StringFlag{ - Name: "bootnodesv5", - Usage: "Comma separated enode URLs for P2P v5 discovery bootstrap (light server, light nodes) (deprecated, use --bootnodes)", - Value: "", - } - - // (Deprecated July 2020, shown in aliased flags section) - LegacyGraphQLListenAddrFlag = cli.StringFlag{ - Name: "graphql.addr", - Usage: "GraphQL server listening interface (deprecated, graphql can only be enabled on the HTTP-RPC server endpoint, use --graphql)", - } - LegacyGraphQLPortFlag = cli.IntFlag{ - Name: "graphql.port", - Usage: "GraphQL server listening port (deprecated, graphql can only be enabled on the HTTP-RPC server endpoint, use --graphql)", - Value: node.DefaultHTTPPort, - } ) // showDeprecated displays deprecated flags that will be soon removed from the codebase. @@ -179,8 +74,7 @@ func showDeprecated(*cli.Context) { fmt.Println("The following flags are deprecated and will be removed in the future!") fmt.Println("--------------------------------------------------------------------") fmt.Println() - - for _, flag := range DeprecatedFlags { - fmt.Println(flag.String()) - } + // TODO remove when there are newly deprecated flags + fmt.Println("no deprecated flags to show at this time") + fmt.Println() } diff --git a/common/compiler/solidity.go b/common/compiler/solidity.go index b689f258a3..01de3d4c65 100644 --- a/common/compiler/solidity.go +++ b/common/compiler/solidity.go @@ -44,6 +44,20 @@ type solcOutput struct { Version string } +// solidity v.0.8 changes the way ABI, Devdoc and Userdoc are serialized +type solcOutputV8 struct { + Contracts map[string]struct { + BinRuntime string `json:"bin-runtime"` + SrcMapRuntime string `json:"srcmap-runtime"` + Bin, SrcMap, Metadata string + Abi interface{} + Devdoc interface{} + Userdoc interface{} + Hashes map[string]string + } + Version string +} + func (s *Solidity) makeArgs() []string { p := []string{ "--combined-json", "bin,bin-runtime,srcmap,srcmap-runtime,abi,userdoc,devdoc", @@ -125,7 +139,6 @@ func (s *Solidity) run(cmd *exec.Cmd, source string) (map[string]*Contract, erro if err := cmd.Run(); err != nil { return nil, fmt.Errorf("solc: %v\n%s", err, stderr.Bytes()) } - return ParseCombinedJSON(stdout.Bytes(), source, s.Version, s.Version, strings.Join(s.makeArgs(), " ")) } @@ -141,7 +154,8 @@ func (s *Solidity) run(cmd *exec.Cmd, source string) (map[string]*Contract, erro func ParseCombinedJSON(combinedJSON []byte, source string, languageVersion string, compilerVersion string, compilerOptions string) (map[string]*Contract, error) { var output solcOutput if err := json.Unmarshal(combinedJSON, &output); err != nil { - return nil, err + // Try to parse the output with the new solidity v.0.8.0 rules + return parseCombinedJSONV8(combinedJSON, source, languageVersion, compilerVersion, compilerOptions) } // Compilation succeeded, assemble and return the contracts. contracts := make(map[string]*Contract) @@ -176,3 +190,35 @@ func ParseCombinedJSON(combinedJSON []byte, source string, languageVersion strin } return contracts, nil } + +// parseCombinedJSONV8 parses the direct output of solc --combined-output +// and parses it using the rules from solidity v.0.8.0 and later. +func parseCombinedJSONV8(combinedJSON []byte, source string, languageVersion string, compilerVersion string, compilerOptions string) (map[string]*Contract, error) { + var output solcOutputV8 + if err := json.Unmarshal(combinedJSON, &output); err != nil { + return nil, err + } + // Compilation succeeded, assemble and return the contracts. + contracts := make(map[string]*Contract) + for name, info := range output.Contracts { + contracts[name] = &Contract{ + Code: "0x" + info.Bin, + RuntimeCode: "0x" + info.BinRuntime, + Hashes: info.Hashes, + Info: ContractInfo{ + Source: source, + Language: "Solidity", + LanguageVersion: languageVersion, + CompilerVersion: compilerVersion, + CompilerOptions: compilerOptions, + SrcMap: info.SrcMap, + SrcMapRuntime: info.SrcMapRuntime, + AbiDefinition: info.Abi, + UserDoc: info.Userdoc, + DeveloperDoc: info.Devdoc, + Metadata: info.Metadata, + }, + } + } + return contracts, nil +} diff --git a/common/hexutil/json_test.go b/common/hexutil/json_test.go index 8a6b8643a1..ed7d6fad1a 100644 --- a/common/hexutil/json_test.go +++ b/common/hexutil/json_test.go @@ -88,7 +88,7 @@ func TestUnmarshalBytes(t *testing.T) { if !checkError(t, test.input, err, test.wantErr) { continue } - if !bytes.Equal(test.want.([]byte), []byte(v)) { + if !bytes.Equal(test.want.([]byte), v) { t.Errorf("input %s: value mismatch: got %x, want %x", test.input, &v, test.want) continue } diff --git a/common/mclock/mclock.go b/common/mclock/mclock.go index 3aca257cb3..c05738cf2b 100644 --- a/common/mclock/mclock.go +++ b/common/mclock/mclock.go @@ -20,15 +20,19 @@ package mclock import ( "time" - "github.com/aristanetworks/goarista/monotime" + _ "unsafe" // for go:linkname ) +//go:noescape +//go:linkname nanotime runtime.nanotime +func nanotime() int64 + // AbsTime represents absolute monotonic time. -type AbsTime time.Duration +type AbsTime int64 // Now returns the current absolute monotonic time. func Now() AbsTime { - return AbsTime(monotime.Now()) + return AbsTime(nanotime()) } // Add returns t + d as absolute time. @@ -74,7 +78,7 @@ type System struct{} // Now returns the current monotonic time. func (c System) Now() AbsTime { - return AbsTime(monotime.Now()) + return Now() } // Sleep blocks for the given duration. diff --git a/common/mclock/mclock.s b/common/mclock/mclock.s new file mode 100644 index 0000000000..99a7a878f0 --- /dev/null +++ b/common/mclock/mclock.s @@ -0,0 +1 @@ +// This file exists in order to be able to use go:linkname. diff --git a/common/prque/lazyqueue.go b/common/prque/lazyqueue.go index 52403df464..c74faab7e6 100644 --- a/common/prque/lazyqueue.go +++ b/common/prque/lazyqueue.go @@ -48,7 +48,7 @@ type LazyQueue struct { } type ( - PriorityCallback func(data interface{}, now mclock.AbsTime) int64 // actual priority callback + PriorityCallback func(data interface{}) int64 // actual priority callback MaxPriorityCallback func(data interface{}, until mclock.AbsTime) int64 // estimated maximum priority callback ) @@ -139,11 +139,10 @@ func (q *LazyQueue) peekIndex() int { // Pop multiple times. Popped items are passed to the callback. MultiPop returns // when the callback returns false or there are no more items to pop. func (q *LazyQueue) MultiPop(callback func(data interface{}, priority int64) bool) { - now := q.clock.Now() nextIndex := q.peekIndex() for nextIndex != -1 { data := heap.Pop(q.queue[nextIndex]).(*item).value - heap.Push(q.popQueue, &item{data, q.priority(data, now)}) + heap.Push(q.popQueue, &item{data, q.priority(data)}) nextIndex = q.peekIndex() for q.popQueue.Len() != 0 && (nextIndex == -1 || q.queue[nextIndex].blocks[0][0].priority < q.popQueue.blocks[0][0].priority) { i := heap.Pop(q.popQueue).(*item) diff --git a/common/prque/lazyqueue_test.go b/common/prque/lazyqueue_test.go index be9491e24e..9a831d628b 100644 --- a/common/prque/lazyqueue_test.go +++ b/common/prque/lazyqueue_test.go @@ -40,7 +40,7 @@ type lazyItem struct { index int } -func testPriority(a interface{}, now mclock.AbsTime) int64 { +func testPriority(a interface{}) int64 { return a.(*lazyItem).p } diff --git a/common/prque/prque_test.go b/common/prque/prque_test.go new file mode 100644 index 0000000000..1cffcebad4 --- /dev/null +++ b/common/prque/prque_test.go @@ -0,0 +1,130 @@ +// CookieJar - A contestant's algorithm toolbox +// Copyright (c) 2013 Peter Szilagyi. All rights reserved. +// +// CookieJar is dual licensed: use of this source code is governed by a BSD +// license that can be found in the LICENSE file. Alternatively, the CookieJar +// toolbox may be used in accordance with the terms and conditions contained +// in a signed written agreement between you and the author(s). + +package prque + +import ( + "math/rand" + "testing" +) + +func TestPrque(t *testing.T) { + // Generate a batch of random data and a specific priority order + size := 16 * blockSize + prio := rand.Perm(size) + data := make([]int, size) + for i := 0; i < size; i++ { + data[i] = rand.Int() + } + queue := New(nil) + for rep := 0; rep < 2; rep++ { + // Fill a priority queue with the above data + for i := 0; i < size; i++ { + queue.Push(data[i], int64(prio[i])) + if queue.Size() != i+1 { + t.Errorf("queue size mismatch: have %v, want %v.", queue.Size(), i+1) + } + } + // Create a map the values to the priorities for easier verification + dict := make(map[int64]int) + for i := 0; i < size; i++ { + dict[int64(prio[i])] = data[i] + } + // Pop out the elements in priority order and verify them + prevPrio := int64(size + 1) + for !queue.Empty() { + val, prio := queue.Pop() + if prio > prevPrio { + t.Errorf("invalid priority order: %v after %v.", prio, prevPrio) + } + prevPrio = prio + if val != dict[prio] { + t.Errorf("push/pop mismatch: have %v, want %v.", val, dict[prio]) + } + delete(dict, prio) + } + } +} + +func TestReset(t *testing.T) { + // Generate a batch of random data and a specific priority order + size := 16 * blockSize + prio := rand.Perm(size) + data := make([]int, size) + for i := 0; i < size; i++ { + data[i] = rand.Int() + } + queue := New(nil) + for rep := 0; rep < 2; rep++ { + // Fill a priority queue with the above data + for i := 0; i < size; i++ { + queue.Push(data[i], int64(prio[i])) + if queue.Size() != i+1 { + t.Errorf("queue size mismatch: have %v, want %v.", queue.Size(), i+1) + } + } + // Create a map the values to the priorities for easier verification + dict := make(map[int64]int) + for i := 0; i < size; i++ { + dict[int64(prio[i])] = data[i] + } + // Pop out half the elements in priority order and verify them + prevPrio := int64(size + 1) + for i := 0; i < size/2; i++ { + val, prio := queue.Pop() + if prio > prevPrio { + t.Errorf("invalid priority order: %v after %v.", prio, prevPrio) + } + prevPrio = prio + if val != dict[prio] { + t.Errorf("push/pop mismatch: have %v, want %v.", val, dict[prio]) + } + delete(dict, prio) + } + // Reset and ensure it's empty + queue.Reset() + if !queue.Empty() { + t.Errorf("priority queue not empty after reset: %v", queue) + } + } +} + +func BenchmarkPush(b *testing.B) { + // Create some initial data + data := make([]int, b.N) + prio := make([]int64, b.N) + for i := 0; i < len(data); i++ { + data[i] = rand.Int() + prio[i] = rand.Int63() + } + // Execute the benchmark + b.ResetTimer() + queue := New(nil) + for i := 0; i < len(data); i++ { + queue.Push(data[i], prio[i]) + } +} + +func BenchmarkPop(b *testing.B) { + // Create some initial data + data := make([]int, b.N) + prio := make([]int64, b.N) + for i := 0; i < len(data); i++ { + data[i] = rand.Int() + prio[i] = rand.Int63() + } + queue := New(nil) + for i := 0; i < len(data); i++ { + queue.Push(data[i], prio[i]) + } + // Execute the benchmark + b.ResetTimer() + for !queue.Empty() { + queue.Pop() + } +} diff --git a/common/prque/sstack_test.go b/common/prque/sstack_test.go new file mode 100644 index 0000000000..2ff093579d --- /dev/null +++ b/common/prque/sstack_test.go @@ -0,0 +1,100 @@ +// CookieJar - A contestant's algorithm toolbox +// Copyright (c) 2013 Peter Szilagyi. All rights reserved. +// +// CookieJar is dual licensed: use of this source code is governed by a BSD +// license that can be found in the LICENSE file. Alternatively, the CookieJar +// toolbox may be used in accordance with the terms and conditions contained +// in a signed written agreement between you and the author(s). + +package prque + +import ( + "math/rand" + "sort" + "testing" +) + +func TestSstack(t *testing.T) { + // Create some initial data + size := 16 * blockSize + data := make([]*item, size) + for i := 0; i < size; i++ { + data[i] = &item{rand.Int(), rand.Int63()} + } + stack := newSstack(nil) + for rep := 0; rep < 2; rep++ { + // Push all the data into the stack, pop out every second + secs := []*item{} + for i := 0; i < size; i++ { + stack.Push(data[i]) + if i%2 == 0 { + secs = append(secs, stack.Pop().(*item)) + } + } + rest := []*item{} + for stack.Len() > 0 { + rest = append(rest, stack.Pop().(*item)) + } + // Make sure the contents of the resulting slices are ok + for i := 0; i < size; i++ { + if i%2 == 0 && data[i] != secs[i/2] { + t.Errorf("push/pop mismatch: have %v, want %v.", secs[i/2], data[i]) + } + if i%2 == 1 && data[i] != rest[len(rest)-i/2-1] { + t.Errorf("push/pop mismatch: have %v, want %v.", rest[len(rest)-i/2-1], data[i]) + } + } + } +} + +func TestSstackSort(t *testing.T) { + // Create some initial data + size := 16 * blockSize + data := make([]*item, size) + for i := 0; i < size; i++ { + data[i] = &item{rand.Int(), int64(i)} + } + // Push all the data into the stack + stack := newSstack(nil) + for _, val := range data { + stack.Push(val) + } + // Sort and pop the stack contents (should reverse the order) + sort.Sort(stack) + for _, val := range data { + out := stack.Pop() + if out != val { + t.Errorf("push/pop mismatch after sort: have %v, want %v.", out, val) + } + } +} + +func TestSstackReset(t *testing.T) { + // Create some initial data + size := 16 * blockSize + data := make([]*item, size) + for i := 0; i < size; i++ { + data[i] = &item{rand.Int(), rand.Int63()} + } + stack := newSstack(nil) + for rep := 0; rep < 2; rep++ { + // Push all the data into the stack, pop out every second + secs := []*item{} + for i := 0; i < size; i++ { + stack.Push(data[i]) + if i%2 == 0 { + secs = append(secs, stack.Pop().(*item)) + } + } + // Reset and verify both pulled and stack contents + stack.Reset() + if stack.Len() != 0 { + t.Errorf("stack not empty after reset: %v", stack) + } + for i := 0; i < size; i++ { + if i%2 == 0 && data[i] != secs[i/2] { + t.Errorf("push/pop mismatch: have %v, want %v.", secs[i/2], data[i]) + } + } + } +} diff --git a/common/types.go b/common/types.go index cdcc6c20ad..d920e8b1f1 100644 --- a/common/types.go +++ b/common/types.go @@ -17,6 +17,7 @@ package common import ( + "bytes" "database/sql/driver" "encoding/hex" "encoding/json" @@ -84,10 +85,34 @@ func (h Hash) String() string { return h.Hex() } -// Format implements fmt.Formatter, forcing the byte slice to be formatted as is, -// without going through the stringer interface used for logging. +// Format implements fmt.Formatter. +// Hash supports the %v, %s, %v, %x, %X and %d format verbs. func (h Hash) Format(s fmt.State, c rune) { - fmt.Fprintf(s, "%"+string(c), h[:]) + hexb := make([]byte, 2+len(h)*2) + copy(hexb, "0x") + hex.Encode(hexb[2:], h[:]) + + switch c { + case 'x', 'X': + if !s.Flag('#') { + hexb = hexb[2:] + } + if c == 'X' { + hexb = bytes.ToUpper(hexb) + } + fallthrough + case 'v', 's': + s.Write(hexb) + case 'q': + q := []byte{'"'} + s.Write(q) + s.Write(hexb) + s.Write(q) + case 'd': + fmt.Fprint(s, ([len(h)]byte)(h)) + default: + fmt.Fprintf(s, "%%!%c(hash=%x)", c, h) + } } // UnmarshalText parses a hash in hex syntax. @@ -208,39 +233,72 @@ func (a Address) Hash() Hash { return BytesToHash(a[:]) } // Hex returns an EIP55-compliant hex string representation of the address. func (a Address) Hex() string { - unchecksummed := hex.EncodeToString(a[:]) + return string(a.checksumHex()) +} + +// String implements fmt.Stringer. +func (a Address) String() string { + return a.Hex() +} + +func (a *Address) checksumHex() []byte { + buf := a.hex() + + // compute checksum sha := sha3.NewLegacyKeccak256() - sha.Write([]byte(unchecksummed)) + sha.Write(buf[2:]) hash := sha.Sum(nil) - - result := []byte(unchecksummed) - for i := 0; i < len(result); i++ { - hashByte := hash[i/2] + for i := 2; i < len(buf); i++ { + hashByte := hash[(i-2)/2] if i%2 == 0 { hashByte = hashByte >> 4 } else { hashByte &= 0xf } - if result[i] > '9' && hashByte > 7 { - result[i] -= 32 + if buf[i] > '9' && hashByte > 7 { + buf[i] -= 32 } } - return "0x" + string(result) + return buf[:] } -// String implements fmt.Stringer. -func (a Address) String() string { - return a.Hex() +func (a Address) hex() []byte { + var buf [len(a)*2 + 2]byte + copy(buf[:2], "0x") + hex.Encode(buf[2:], a[:]) + return buf[:] } -// Format implements fmt.Formatter, forcing the byte slice to be formatted as is, -// without going through the stringer interface used for logging. +// Format implements fmt.Formatter. +// Address supports the %v, %s, %v, %x, %X and %d format verbs. func (a Address) Format(s fmt.State, c rune) { - fmt.Fprintf(s, "%"+string(c), a[:]) + switch c { + case 'v', 's': + s.Write(a.checksumHex()) + case 'q': + q := []byte{'"'} + s.Write(q) + s.Write(a.checksumHex()) + s.Write(q) + case 'x', 'X': + // %x disables the checksum. + hex := a.hex() + if !s.Flag('#') { + hex = hex[2:] + } + if c == 'X' { + hex = bytes.ToUpper(hex) + } + s.Write(hex) + case 'd': + fmt.Fprint(s, ([len(a)]byte)(a)) + default: + fmt.Fprintf(s, "%%!%c(address=%x)", c, a) + } } // SetBytes sets the address to the value of b. -// If b is larger than len(a) it will panic. +// If b is larger than len(a), b will be cropped from the left. func (a *Address) SetBytes(b []byte) { if len(b) > len(a) { b = b[len(b)-AddressLength:] diff --git a/common/types_test.go b/common/types_test.go index fffd673c6e..318e985f87 100644 --- a/common/types_test.go +++ b/common/types_test.go @@ -17,8 +17,10 @@ package common import ( + "bytes" "database/sql/driver" "encoding/json" + "fmt" "math/big" "reflect" "strings" @@ -371,3 +373,167 @@ func TestAddress_Value(t *testing.T) { }) } } + +func TestAddress_Format(t *testing.T) { + b := []byte{ + 0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e, + 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15, + } + var addr Address + addr.SetBytes(b) + + tests := []struct { + name string + out string + want string + }{ + { + name: "println", + out: fmt.Sprintln(addr), + want: "0xB26f2b342AAb24BCF63ea218c6A9274D30Ab9A15\n", + }, + { + name: "print", + out: fmt.Sprint(addr), + want: "0xB26f2b342AAb24BCF63ea218c6A9274D30Ab9A15", + }, + { + name: "printf-s", + out: func() string { + buf := new(bytes.Buffer) + fmt.Fprintf(buf, "%s", addr) + return buf.String() + }(), + want: "0xB26f2b342AAb24BCF63ea218c6A9274D30Ab9A15", + }, + { + name: "printf-q", + out: fmt.Sprintf("%q", addr), + want: `"0xB26f2b342AAb24BCF63ea218c6A9274D30Ab9A15"`, + }, + { + name: "printf-x", + out: fmt.Sprintf("%x", addr), + want: "b26f2b342aab24bcf63ea218c6a9274d30ab9a15", + }, + { + name: "printf-X", + out: fmt.Sprintf("%X", addr), + want: "B26F2B342AAB24BCF63EA218C6A9274D30AB9A15", + }, + { + name: "printf-#x", + out: fmt.Sprintf("%#x", addr), + want: "0xb26f2b342aab24bcf63ea218c6a9274d30ab9a15", + }, + { + name: "printf-v", + out: fmt.Sprintf("%v", addr), + want: "0xB26f2b342AAb24BCF63ea218c6A9274D30Ab9A15", + }, + // The original default formatter for byte slice + { + name: "printf-d", + out: fmt.Sprintf("%d", addr), + want: "[178 111 43 52 42 171 36 188 246 62 162 24 198 169 39 77 48 171 154 21]", + }, + // Invalid format char. + { + name: "printf-t", + out: fmt.Sprintf("%t", addr), + want: "%!t(address=b26f2b342aab24bcf63ea218c6a9274d30ab9a15)", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.out != tt.want { + t.Errorf("%s does not render as expected:\n got %s\nwant %s", tt.name, tt.out, tt.want) + } + }) + } +} + +func TestHash_Format(t *testing.T) { + var hash Hash + hash.SetBytes([]byte{ + 0xb2, 0x6f, 0x2b, 0x34, 0x2a, 0xab, 0x24, 0xbc, 0xf6, 0x3e, + 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15, + 0xa2, 0x18, 0xc6, 0xa9, 0x27, 0x4d, 0x30, 0xab, 0x9a, 0x15, + 0x10, 0x00, + }) + + tests := []struct { + name string + out string + want string + }{ + { + name: "println", + out: fmt.Sprintln(hash), + want: "0xb26f2b342aab24bcf63ea218c6a9274d30ab9a15a218c6a9274d30ab9a151000\n", + }, + { + name: "print", + out: fmt.Sprint(hash), + want: "0xb26f2b342aab24bcf63ea218c6a9274d30ab9a15a218c6a9274d30ab9a151000", + }, + { + name: "printf-s", + out: func() string { + buf := new(bytes.Buffer) + fmt.Fprintf(buf, "%s", hash) + return buf.String() + }(), + want: "0xb26f2b342aab24bcf63ea218c6a9274d30ab9a15a218c6a9274d30ab9a151000", + }, + { + name: "printf-q", + out: fmt.Sprintf("%q", hash), + want: `"0xb26f2b342aab24bcf63ea218c6a9274d30ab9a15a218c6a9274d30ab9a151000"`, + }, + { + name: "printf-x", + out: fmt.Sprintf("%x", hash), + want: "b26f2b342aab24bcf63ea218c6a9274d30ab9a15a218c6a9274d30ab9a151000", + }, + { + name: "printf-X", + out: fmt.Sprintf("%X", hash), + want: "B26F2B342AAB24BCF63EA218C6A9274D30AB9A15A218C6A9274D30AB9A151000", + }, + { + name: "printf-#x", + out: fmt.Sprintf("%#x", hash), + want: "0xb26f2b342aab24bcf63ea218c6a9274d30ab9a15a218c6a9274d30ab9a151000", + }, + { + name: "printf-#X", + out: fmt.Sprintf("%#X", hash), + want: "0XB26F2B342AAB24BCF63EA218C6A9274D30AB9A15A218C6A9274D30AB9A151000", + }, + { + name: "printf-v", + out: fmt.Sprintf("%v", hash), + want: "0xb26f2b342aab24bcf63ea218c6a9274d30ab9a15a218c6a9274d30ab9a151000", + }, + // The original default formatter for byte slice + { + name: "printf-d", + out: fmt.Sprintf("%d", hash), + want: "[178 111 43 52 42 171 36 188 246 62 162 24 198 169 39 77 48 171 154 21 162 24 198 169 39 77 48 171 154 21 16 0]", + }, + // Invalid format char. + { + name: "printf-t", + out: fmt.Sprintf("%t", hash), + want: "%!t(hash=b26f2b342aab24bcf63ea218c6a9274d30ab9a15a218c6a9274d30ab9a151000)", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.out != tt.want { + t.Errorf("%s does not render as expected:\n got %s\nwant %s", tt.name, tt.out, tt.want) + } + }) + } +} diff --git a/consensus/clique/clique.go b/consensus/clique/clique.go index c05f84cc2e..c62e180faa 100644 --- a/consensus/clique/clique.go +++ b/consensus/clique/clique.go @@ -434,12 +434,6 @@ func (c *Clique) VerifyUncles(chain consensus.ChainReader, block *types.Block) e return nil } -// VerifySeal implements consensus.Engine, checking whether the signature contained -// in the header satisfies the consensus protocol requirements. -func (c *Clique) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error { - return c.verifySeal(chain, header, nil) -} - // verifySeal checks whether the signature contained in the header satisfies the // consensus protocol requirements. The method accepts an optional list of parent // headers that aren't yet part of the local blockchain to generate the snapshots @@ -561,12 +555,11 @@ func (c *Clique) Finalize(chain consensus.ChainHeaderReader, header *types.Heade // FinalizeAndAssemble implements consensus.Engine, ensuring no uncles are set, // nor block rewards given, and returns the final block. func (c *Clique) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { - // No block rewards in PoA, so the state remains as is and uncles are dropped - header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) - header.UncleHash = types.CalcUncleHash(nil) + // Finalize block + c.Finalize(chain, header, state, txs, uncles) // Assemble and return the final block for sealing - return types.NewBlock(header, txs, nil, receipts, new(trie.Trie)), nil + return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)), nil } // Authorize injects a private key into the consensus engine to mint new blocks diff --git a/consensus/consensus.go b/consensus/consensus.go index f7a4d0ff0b..2a5aac945d 100644 --- a/consensus/consensus.go +++ b/consensus/consensus.go @@ -77,10 +77,6 @@ type Engine interface { // rules of a given engine. VerifyUncles(chain ChainReader, block *types.Block) error - // VerifySeal checks whether the crypto seal on a header is valid according to - // the consensus rules of the given engine. - VerifySeal(chain ChainHeaderReader, header *types.Header) error - // Prepare initializes the consensus fields of a block header according to the // rules of a particular engine. The changes are executed inline. Prepare(chain ChainHeaderReader, header *types.Header) error diff --git a/consensus/ethash/algorithm.go b/consensus/ethash/algorithm.go index 47d7e51b59..80379597e2 100644 --- a/consensus/ethash/algorithm.go +++ b/consensus/ethash/algorithm.go @@ -151,10 +151,12 @@ func generateCache(dest []uint32, epoch uint64, seed []byte) { logFn("Generated ethash verification cache", "elapsed", common.PrettyDuration(elapsed)) }() // Convert our destination slice to a byte buffer - header := *(*reflect.SliceHeader)(unsafe.Pointer(&dest)) - header.Len *= 4 - header.Cap *= 4 - cache := *(*[]byte)(unsafe.Pointer(&header)) + var cache []byte + cacheHdr := (*reflect.SliceHeader)(unsafe.Pointer(&cache)) + dstHdr := (*reflect.SliceHeader)(unsafe.Pointer(&dest)) + cacheHdr.Data = dstHdr.Data + cacheHdr.Len = dstHdr.Len * 4 + cacheHdr.Cap = dstHdr.Cap * 4 // Calculate the number of theoretical rows (we'll store in one buffer nonetheless) size := uint64(len(cache)) @@ -283,10 +285,12 @@ func generateDataset(dest []uint32, epoch uint64, cache []uint32) { swapped := !isLittleEndian() // Convert our destination slice to a byte buffer - header := *(*reflect.SliceHeader)(unsafe.Pointer(&dest)) - header.Len *= 4 - header.Cap *= 4 - dataset := *(*[]byte)(unsafe.Pointer(&header)) + var dataset []byte + datasetHdr := (*reflect.SliceHeader)(unsafe.Pointer(&dataset)) + destHdr := (*reflect.SliceHeader)(unsafe.Pointer(&dest)) + datasetHdr.Data = destHdr.Data + datasetHdr.Len = destHdr.Len * 4 + datasetHdr.Cap = destHdr.Cap * 4 // Generate the dataset on many goroutines since it takes a while threads := runtime.NumCPU() diff --git a/consensus/ethash/algorithm_test.go b/consensus/ethash/algorithm_test.go index 51fb6b124d..663687b81c 100644 --- a/consensus/ethash/algorithm_test.go +++ b/consensus/ethash/algorithm_test.go @@ -731,7 +731,7 @@ func TestConcurrentDiskCacheGeneration(t *testing.T) { defer pend.Done() ethash := New(Config{cachedir, 0, 1, false, "", 0, 0, false, ModeNormal, nil}, nil, false) defer ethash.Close() - if err := ethash.VerifySeal(nil, block.Header()); err != nil { + if err := ethash.verifySeal(nil, block.Header(), false); err != nil { t.Errorf("proc %d: block verification failed: %v", idx, err) } }(i) diff --git a/consensus/ethash/consensus.go b/consensus/ethash/consensus.go index bdc02098af..011a5688ef 100644 --- a/consensus/ethash/consensus.go +++ b/consensus/ethash/consensus.go @@ -39,11 +39,11 @@ import ( // Ethash proof-of-work protocol constants. var ( - FrontierBlockReward = big.NewInt(5e+18) // Block reward in wei for successfully mining a block - ByzantiumBlockReward = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium - ConstantinopleBlockReward = big.NewInt(2e+18) // Block reward in wei for successfully mining a block upward from Constantinople - maxUncles = 2 // Maximum number of uncles allowed in a single block - allowedFutureBlockTime = 15 * time.Second // Max time from current time allowed for blocks, before they're considered future blocks + FrontierBlockReward = big.NewInt(5e+18) // Block reward in wei for successfully mining a block + ByzantiumBlockReward = big.NewInt(3e+18) // Block reward in wei for successfully mining a block upward from Byzantium + ConstantinopleBlockReward = big.NewInt(2e+18) // Block reward in wei for successfully mining a block upward from Constantinople + maxUncles = 2 // Maximum number of uncles allowed in a single block + allowedFutureBlockTimeSeconds = int64(15) // Max seconds from current time allowed for blocks, before they're considered future blocks // calcDifficultyEip2384 is the difficulty adjustment algorithm as specified by EIP 2384. // It offsets the bomb 4M blocks from Constantinople, so in total 9M blocks. @@ -102,7 +102,7 @@ func (ethash *Ethash) VerifyHeader(chain consensus.ChainHeaderReader, header *ty return consensus.ErrUnknownAncestor } // Sanity checks passed, do a proper verification - return ethash.verifyHeader(chain, header, parent, false, seal) + return ethash.verifyHeader(chain, header, parent, false, seal, time.Now().Unix()) } // VerifyHeaders is similar to VerifyHeader, but verifies a batch of headers @@ -126,15 +126,16 @@ func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers [ // Create a task channel and spawn the verifiers var ( - inputs = make(chan int) - done = make(chan int, workers) - errors = make([]error, len(headers)) - abort = make(chan struct{}) + inputs = make(chan int) + done = make(chan int, workers) + errors = make([]error, len(headers)) + abort = make(chan struct{}) + unixNow = time.Now().Unix() ) for i := 0; i < workers; i++ { go func() { for index := range inputs { - errors[index] = ethash.verifyHeaderWorker(chain, headers, seals, index) + errors[index] = ethash.verifyHeaderWorker(chain, headers, seals, index, unixNow) done <- index } }() @@ -170,7 +171,7 @@ func (ethash *Ethash) VerifyHeaders(chain consensus.ChainHeaderReader, headers [ return abort, errorsOut } -func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int) error { +func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainHeaderReader, headers []*types.Header, seals []bool, index int, unixNow int64) error { var parent *types.Header if index == 0 { parent = chain.GetHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1) @@ -180,10 +181,7 @@ func (ethash *Ethash) verifyHeaderWorker(chain consensus.ChainHeaderReader, head if parent == nil { return consensus.ErrUnknownAncestor } - if chain.GetHeader(headers[index].Hash(), headers[index].Number.Uint64()) != nil { - return nil // known block - } - return ethash.verifyHeader(chain, headers[index], parent, false, seals[index]) + return ethash.verifyHeader(chain, headers[index], parent, false, seals[index], unixNow) } // VerifyUncles verifies that the given block's uncles conform to the consensus @@ -234,7 +232,7 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo if ancestors[uncle.ParentHash] == nil || uncle.ParentHash == block.ParentHash() { return errDanglingUncle } - if err := ethash.verifyHeader(chain, uncle, ancestors[uncle.ParentHash], true, true); err != nil { + if err := ethash.verifyHeader(chain, uncle, ancestors[uncle.ParentHash], true, true, time.Now().Unix()); err != nil { return err } } @@ -244,14 +242,14 @@ func (ethash *Ethash) VerifyUncles(chain consensus.ChainReader, block *types.Blo // verifyHeader checks whether a header conforms to the consensus rules of the // stock Ethereum ethash engine. // See YP section 4.3.4. "Block Header Validity" -func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool) error { +func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, parent *types.Header, uncle bool, seal bool, unixNow int64) error { // Ensure that the header's extra-data section is of a reasonable size if uint64(len(header.Extra)) > params.MaximumExtraDataSize { return fmt.Errorf("extra-data too long: %d > %d", len(header.Extra), params.MaximumExtraDataSize) } // Verify the header's timestamp if !uncle { - if header.Time > uint64(time.Now().Add(allowedFutureBlockTime).Unix()) { + if header.Time > uint64(unixNow+allowedFutureBlockTimeSeconds) { return consensus.ErrFutureBlock } } @@ -290,7 +288,7 @@ func (ethash *Ethash) verifyHeader(chain consensus.ChainHeaderReader, header, pa } // Verify the engine specific seal securing the block if seal { - if err := ethash.VerifySeal(chain, header); err != nil { + if err := ethash.verifySeal(chain, header, false); err != nil { return err } } @@ -485,11 +483,10 @@ func calcDifficultyFrontier(time uint64, parent *types.Header) *big.Int { return diff } -// VerifySeal implements consensus.Engine, checking whether the given block satisfies -// the PoW difficulty requirements. -func (ethash *Ethash) VerifySeal(chain consensus.ChainHeaderReader, header *types.Header) error { - return ethash.verifySeal(chain, header, false) -} +// Exported for fuzzing +var FrontierDifficultyCalulator = calcDifficultyFrontier +var HomesteadDifficultyCalulator = calcDifficultyHomestead +var DynamicDifficultyCalculator = makeDifficultyCalculator // verifySeal checks whether a block satisfies the PoW difficulty requirements, // either using the usual ethash cache for it, or alternatively using a full DAG @@ -579,12 +576,11 @@ func (ethash *Ethash) Finalize(chain consensus.ChainHeaderReader, header *types. // FinalizeAndAssemble implements consensus.Engine, accumulating the block and // uncle rewards, setting the final state and assembling the block. func (ethash *Ethash) FinalizeAndAssemble(chain consensus.ChainHeaderReader, header *types.Header, state *state.StateDB, txs []*types.Transaction, uncles []*types.Header, receipts []*types.Receipt) (*types.Block, error) { - // Accumulate any block and uncle rewards and commit the final state root - accumulateRewards(chain.Config(), state, header, uncles) - header.Root = state.IntermediateRoot(chain.Config().IsEIP158(header.Number)) + // Finalize block + ethash.Finalize(chain, header, state, txs, uncles) // Header seems complete, assemble into a block and return - return types.NewBlock(header, txs, uncles, receipts, new(trie.Trie)), nil + return types.NewBlock(header, txs, uncles, receipts, trie.NewStackTrie(nil)), nil } // SealHash returns the hash of a block prior to it being sealed. diff --git a/consensus/ethash/consensus_test.go b/consensus/ethash/consensus_test.go index 675737d9e1..6f6dc79fd8 100644 --- a/consensus/ethash/consensus_test.go +++ b/consensus/ethash/consensus_test.go @@ -17,12 +17,15 @@ package ethash import ( + "encoding/binary" "encoding/json" "math/big" + "math/rand" "os" "path/filepath" "testing" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" @@ -84,3 +87,102 @@ func TestCalcDifficulty(t *testing.T) { } } } + +func randSlice(min, max uint32) []byte { + var b = make([]byte, 4) + rand.Read(b) + a := binary.LittleEndian.Uint32(b) + size := min + a%(max-min) + out := make([]byte, size) + rand.Read(out) + return out +} + +func TestDifficultyCalculators(t *testing.T) { + rand.Seed(2) + for i := 0; i < 5000; i++ { + // 1 to 300 seconds diff + var timeDelta = uint64(1 + rand.Uint32()%3000) + diffBig := big.NewInt(0).SetBytes(randSlice(2, 10)) + if diffBig.Cmp(params.MinimumDifficulty) < 0 { + diffBig.Set(params.MinimumDifficulty) + } + //rand.Read(difficulty) + header := &types.Header{ + Difficulty: diffBig, + Number: new(big.Int).SetUint64(rand.Uint64() % 50_000_000), + Time: rand.Uint64() - timeDelta, + } + if rand.Uint32()&1 == 0 { + header.UncleHash = types.EmptyUncleHash + } + bombDelay := new(big.Int).SetUint64(rand.Uint64() % 50_000_000) + for i, pair := range []struct { + bigFn func(time uint64, parent *types.Header) *big.Int + u256Fn func(time uint64, parent *types.Header) *big.Int + }{ + {FrontierDifficultyCalulator, CalcDifficultyFrontierU256}, + {HomesteadDifficultyCalulator, CalcDifficultyHomesteadU256}, + {DynamicDifficultyCalculator(bombDelay), MakeDifficultyCalculatorU256(bombDelay)}, + } { + time := header.Time + timeDelta + want := pair.bigFn(time, header) + have := pair.u256Fn(time, header) + if want.BitLen() > 256 { + continue + } + if want.Cmp(have) != 0 { + t.Fatalf("pair %d: want %x have %x\nparent.Number: %x\np.Time: %x\nc.Time: %x\nBombdelay: %v\n", i, want, have, + header.Number, header.Time, time, bombDelay) + } + } + } +} + +func BenchmarkDifficultyCalculator(b *testing.B) { + x1 := makeDifficultyCalculator(big.NewInt(1000000)) + x2 := MakeDifficultyCalculatorU256(big.NewInt(1000000)) + h := &types.Header{ + ParentHash: common.Hash{}, + UncleHash: types.EmptyUncleHash, + Difficulty: big.NewInt(0xffffff), + Number: big.NewInt(500000), + Time: 1000000, + } + b.Run("big-frontier", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + calcDifficultyFrontier(1000014, h) + } + }) + b.Run("u256-frontier", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + CalcDifficultyFrontierU256(1000014, h) + } + }) + b.Run("big-homestead", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + calcDifficultyHomestead(1000014, h) + } + }) + b.Run("u256-homestead", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + CalcDifficultyHomesteadU256(1000014, h) + } + }) + b.Run("big-generic", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + x1(1000014, h) + } + }) + b.Run("u256-generic", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + x2(1000014, h) + } + }) +} diff --git a/consensus/ethash/difficulty.go b/consensus/ethash/difficulty.go new file mode 100644 index 0000000000..59c4ac7419 --- /dev/null +++ b/consensus/ethash/difficulty.go @@ -0,0 +1,193 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package ethash + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/core/types" + "github.com/holiman/uint256" +) + +const ( + // frontierDurationLimit is for Frontier: + // The decision boundary on the blocktime duration used to determine + // whether difficulty should go up or down. + frontierDurationLimit = 13 + // minimumDifficulty The minimum that the difficulty may ever be. + minimumDifficulty = 131072 + // expDiffPeriod is the exponential difficulty period + expDiffPeriodUint = 100000 + // difficultyBoundDivisorBitShift is the bound divisor of the difficulty (2048), + // This constant is the right-shifts to use for the division. + difficultyBoundDivisor = 11 +) + +// CalcDifficultyFrontierU256 is the difficulty adjustment algorithm. It returns the +// difficulty that a new block should have when created at time given the parent +// block's time and difficulty. The calculation uses the Frontier rules. +func CalcDifficultyFrontierU256(time uint64, parent *types.Header) *big.Int { + /* + Algorithm + block_diff = pdiff + pdiff / 2048 * (1 if time - ptime < 13 else -1) + int(2^((num // 100000) - 2)) + + Where: + - pdiff = parent.difficulty + - ptime = parent.time + - time = block.timestamp + - num = block.number + */ + + pDiff := uint256.NewInt() + pDiff.SetFromBig(parent.Difficulty) // pDiff: pdiff + adjust := pDiff.Clone() + adjust.Rsh(adjust, difficultyBoundDivisor) // adjust: pDiff / 2048 + + if time-parent.Time < frontierDurationLimit { + pDiff.Add(pDiff, adjust) + } else { + pDiff.Sub(pDiff, adjust) + } + if pDiff.LtUint64(minimumDifficulty) { + pDiff.SetUint64(minimumDifficulty) + } + // 'pdiff' now contains: + // pdiff + pdiff / 2048 * (1 if time - ptime < 13 else -1) + + if periodCount := (parent.Number.Uint64() + 1) / expDiffPeriodUint; periodCount > 1 { + // diff = diff + 2^(periodCount - 2) + expDiff := adjust.SetOne() + expDiff.Lsh(expDiff, uint(periodCount-2)) // expdiff: 2 ^ (periodCount -2) + pDiff.Add(pDiff, expDiff) + } + return pDiff.ToBig() +} + +// CalcDifficultyHomesteadU256 is the difficulty adjustment algorithm. It returns +// the difficulty that a new block should have when created at time given the +// parent block's time and difficulty. The calculation uses the Homestead rules. +func CalcDifficultyHomesteadU256(time uint64, parent *types.Header) *big.Int { + /* + https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.md + Algorithm: + block_diff = pdiff + pdiff / 2048 * max(1 - (time - ptime) / 10, -99) + 2 ^ int((num / 100000) - 2)) + + Our modification, to use unsigned ints: + block_diff = pdiff - pdiff / 2048 * max((time - ptime) / 10 - 1, 99) + 2 ^ int((num / 100000) - 2)) + + Where: + - pdiff = parent.difficulty + - ptime = parent.time + - time = block.timestamp + - num = block.number + */ + + pDiff := uint256.NewInt() + pDiff.SetFromBig(parent.Difficulty) // pDiff: pdiff + adjust := pDiff.Clone() + adjust.Rsh(adjust, difficultyBoundDivisor) // adjust: pDiff / 2048 + + x := (time - parent.Time) / 10 // (time - ptime) / 10) + var neg = true + if x == 0 { + x = 1 + neg = false + } else if x >= 100 { + x = 99 + } else { + x = x - 1 + } + z := new(uint256.Int).SetUint64(x) + adjust.Mul(adjust, z) // adjust: (pdiff / 2048) * max((time - ptime) / 10 - 1, 99) + if neg { + pDiff.Sub(pDiff, adjust) // pdiff - pdiff / 2048 * max((time - ptime) / 10 - 1, 99) + } else { + pDiff.Add(pDiff, adjust) // pdiff + pdiff / 2048 * max((time - ptime) / 10 - 1, 99) + } + if pDiff.LtUint64(minimumDifficulty) { + pDiff.SetUint64(minimumDifficulty) + } + // for the exponential factor, a.k.a "the bomb" + // diff = diff + 2^(periodCount - 2) + if periodCount := (1 + parent.Number.Uint64()) / expDiffPeriodUint; periodCount > 1 { + expFactor := adjust.Lsh(adjust.SetOne(), uint(periodCount-2)) + pDiff.Add(pDiff, expFactor) + } + return pDiff.ToBig() +} + +// MakeDifficultyCalculatorU256 creates a difficultyCalculator with the given bomb-delay. +// the difficulty is calculated with Byzantium rules, which differs from Homestead in +// how uncles affect the calculation +func MakeDifficultyCalculatorU256(bombDelay *big.Int) func(time uint64, parent *types.Header) *big.Int { + // Note, the calculations below looks at the parent number, which is 1 below + // the block number. Thus we remove one from the delay given + bombDelayFromParent := bombDelay.Uint64() - 1 + return func(time uint64, parent *types.Header) *big.Int { + /* + https://github.com/ethereum/EIPs/issues/100 + pDiff = parent.difficulty + BLOCK_DIFF_FACTOR = 9 + a = pDiff + (pDiff // BLOCK_DIFF_FACTOR) * adj_factor + b = min(parent.difficulty, MIN_DIFF) + child_diff = max(a,b ) + */ + x := (time - parent.Time) / 9 // (block_timestamp - parent_timestamp) // 9 + c := uint64(1) // if parent.unclehash == emptyUncleHashHash + if parent.UncleHash != types.EmptyUncleHash { + c = 2 + } + xNeg := x >= c + if xNeg { + // x is now _negative_ adjustment factor + x = x - c // - ( (t-p)/p -( 2 or 1) ) + } else { + x = c - x // (2 or 1) - (t-p)/9 + } + if x > 99 { + x = 99 // max(x, 99) + } + // parent_diff + (parent_diff / 2048 * max((2 if len(parent.uncles) else 1) - ((timestamp - parent.timestamp) // 9), -99)) + y := new(uint256.Int) + y.SetFromBig(parent.Difficulty) // y: p_diff + pDiff := y.Clone() // pdiff: p_diff + z := new(uint256.Int).SetUint64(x) //z : +-adj_factor (either pos or negative) + y.Rsh(y, difficultyBoundDivisor) // y: p__diff / 2048 + z.Mul(y, z) // z: (p_diff / 2048 ) * (+- adj_factor) + + if xNeg { + y.Sub(pDiff, z) // y: parent_diff + parent_diff/2048 * adjustment_factor + } else { + y.Add(pDiff, z) // y: parent_diff + parent_diff/2048 * adjustment_factor + } + // minimum difficulty can ever be (before exponential factor) + if y.LtUint64(minimumDifficulty) { + y.SetUint64(minimumDifficulty) + } + // calculate a fake block number for the ice-age delay + // Specification: https://eips.ethereum.org/EIPS/eip-1234 + var pNum = parent.Number.Uint64() + if pNum >= bombDelayFromParent { + if fakeBlockNumber := pNum - bombDelayFromParent; fakeBlockNumber >= 2*expDiffPeriodUint { + z.SetOne() + z.Lsh(z, uint(fakeBlockNumber/expDiffPeriodUint-2)) + y.Add(z, y) + } + } + return y.ToBig() + } +} diff --git a/consensus/ethash/ethash.go b/consensus/ethash/ethash.go index aa3f002c0d..550d99893d 100644 --- a/consensus/ethash/ethash.go +++ b/consensus/ethash/ethash.go @@ -33,7 +33,7 @@ import ( "time" "unsafe" - mmap "github.com/edsrzf/mmap-go" + "github.com/edsrzf/mmap-go" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" diff --git a/consensus/ethash/ethash_test.go b/consensus/ethash/ethash_test.go index fdfd81320f..2639707eb2 100644 --- a/consensus/ethash/ethash_test.go +++ b/consensus/ethash/ethash_test.go @@ -46,10 +46,10 @@ func TestTestMode(t *testing.T) { case block := <-results: header.Nonce = types.EncodeNonce(block.Nonce()) header.MixDigest = block.MixDigest() - if err := ethash.VerifySeal(nil, header); err != nil { + if err := ethash.verifySeal(nil, header, false); err != nil { t.Fatalf("unexpected verification error: %v", err) } - case <-time.NewTimer(2 * time.Second).C: + case <-time.NewTimer(4 * time.Second).C: t.Error("sealing result timeout") } } @@ -86,7 +86,7 @@ func verifyTest(wg *sync.WaitGroup, e *Ethash, workerIndex, epochs int) { block = 0 } header := &types.Header{Number: big.NewInt(block), Difficulty: big.NewInt(100)} - e.VerifySeal(nil, header) + e.verifySeal(nil, header, false) } } diff --git a/console/bridge.go b/console/bridge.go index 1a23269194..21ef0e8e7b 100644 --- a/console/bridge.go +++ b/console/bridge.go @@ -144,15 +144,14 @@ func (b *bridge) OpenWallet(call jsre.Call) (goja.Value, error) { if val, err = openWallet(goja.Null(), wallet, passwd); err != nil { if !strings.HasSuffix(err.Error(), scwallet.ErrPINNeeded.Error()) { return nil, err - } else { - // PIN input requested, fetch from the user and call open again - input, err := b.prompter.PromptPassword("Please enter current PIN: ") - if err != nil { - return nil, err - } - if val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(input)); err != nil { - return nil, err - } + } + // PIN input requested, fetch from the user and call open again + input, err := b.prompter.PromptPassword("Please enter current PIN: ") + if err != nil { + return nil, err + } + if val, err = openWallet(goja.Null(), wallet, call.VM.ToValue(input)); err != nil { + return nil, err } } diff --git a/console/console_test.go b/console/console_test.go index 68c03d108d..f6ab781410 100644 --- a/console/console_test.go +++ b/console/console_test.go @@ -31,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/console/prompt" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/internal/jsre" "github.com/ethereum/go-ethereum/miner" "github.com/ethereum/go-ethereum/node" @@ -85,7 +86,7 @@ type tester struct { // newTester creates a test environment based on which the console can operate. // Please ensure you call Close() on the returned tester to avoid leaks. -func newTester(t *testing.T, confOverride func(*eth.Config)) *tester { +func newTester(t *testing.T, confOverride func(*ethconfig.Config)) *tester { // Create a temporary storage for the node keys and initialize it workspace, err := ioutil.TempDir("", "console-tester-") if err != nil { @@ -97,7 +98,7 @@ func newTester(t *testing.T, confOverride func(*eth.Config)) *tester { if err != nil { t.Fatalf("failed to create node: %v", err) } - ethConf := ð.Config{ + ethConf := ðconfig.Config{ Genesis: core.DeveloperGenesisBlock(15, common.Address{}), Miner: miner.Config{ Etherbase: common.HexToAddress(testAddress), diff --git a/contracts/checkpointoracle/oracle.go b/contracts/checkpointoracle/oracle.go index 1f273272ab..7f3127d0b8 100644 --- a/contracts/checkpointoracle/oracle.go +++ b/contracts/checkpointoracle/oracle.go @@ -65,7 +65,7 @@ func (oracle *CheckpointOracle) LookupCheckpointEvents(blockLogs [][]*types.Log, if err != nil { continue } - if event.Index == section && common.Hash(event.CheckpointHash) == hash { + if event.Index == section && event.CheckpointHash == hash { votes = append(votes, event) } } diff --git a/contracts/checkpointoracle/oracle_test.go b/contracts/checkpointoracle/oracle_test.go index 817954d11a..1218481929 100644 --- a/contracts/checkpointoracle/oracle_test.go +++ b/contracts/checkpointoracle/oracle_test.go @@ -178,7 +178,7 @@ func TestCheckpointRegister(t *testing.T) { contractBackend := backends.NewSimulatedBackend(core.GenesisAlloc{accounts[0].addr: {Balance: big.NewInt(1000000000)}, accounts[1].addr: {Balance: big.NewInt(1000000000)}, accounts[2].addr: {Balance: big.NewInt(1000000000)}}, 10000000) defer contractBackend.Close() - transactOpts := bind.NewKeyedTransactor(accounts[0].key) + transactOpts, _ := bind.NewKeyedTransactorWithChainID(accounts[0].key, big.NewInt(1337)) // 3 trusted signers, threshold 2 contractAddr, _, c, err := contract.DeployCheckpointOracle(transactOpts, contractBackend, []common.Address{accounts[0].addr, accounts[1].addr, accounts[2].addr}, sectionSize, processConfirms, big.NewInt(2)) diff --git a/core/bench_test.go b/core/bench_test.go index 0f4cabd837..85653ea5db 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -85,7 +85,7 @@ func genValueTx(nbytes int) func(int, *BlockGen) { return func(i int, gen *BlockGen) { toaddr := common.Address{} data := make([]byte, nbytes) - gas, _ := IntrinsicGas(data, false, false, false) + gas, _ := IntrinsicGas(data, nil, false, false, false) tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, nil, data), types.HomesteadSigner{}, benchRootKey) gen.AddTx(tx) } diff --git a/core/blockchain.go b/core/blockchain.go index 1c8a7fe60a..d65ce4f048 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -89,7 +89,6 @@ const ( txLookupCacheLimit = 1024 maxFutureBlocks = 256 maxTimeFutureBlocks = 30 - badBlockLimit = 10 TriesInMemory = 128 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. @@ -129,6 +128,7 @@ type CacheConfig struct { TrieDirtyDisabled bool // Whether to disable trie write caching and GC altogether (archive node) TrieTimeLimit time.Duration // Time limit after which to flush the current in-memory trie to disk SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory + Preimages bool // Whether to store preimage of trie key to the disk SnapshotWait bool // Wait for snapshot construction on startup. TODO(karalabe): This is a dirty hack for testing, nuke it } @@ -202,12 +202,11 @@ type BlockChain struct { procInterrupt int32 // interrupt signaler for block processing engine consensus.Engine - validator Validator // Block and state validator interface - prefetcher Prefetcher // Block state prefetcher interface - processor Processor // Block transaction processor interface + validator Validator // Block and state validator interface + prefetcher Prefetcher + processor Processor // Block transaction processor interface vmConfig vm.Config - badBlocks *lru.Cache // Bad block cache shouldPreserve func(*types.Block) bool // Function used to determine whether should preserve the given block. terminateInsert func(common.Hash, uint64) bool // Testing hook used to terminate ancient receipt chain insertion. writeLegacyJournal bool // Testing flag used to flush the snapshot journal in legacy format. @@ -226,14 +225,17 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par blockCache, _ := lru.New(blockCacheLimit) txLookupCache, _ := lru.New(txLookupCacheLimit) futureBlocks, _ := lru.New(maxFutureBlocks) - badBlocks, _ := lru.New(badBlockLimit) bc := &BlockChain{ - chainConfig: chainConfig, - cacheConfig: cacheConfig, - db: db, - triegc: prque.New(nil), - stateCache: state.NewDatabaseWithCache(db, cacheConfig.TrieCleanLimit, cacheConfig.TrieCleanJournal), + chainConfig: chainConfig, + cacheConfig: cacheConfig, + db: db, + triegc: prque.New(nil), + stateCache: state.NewDatabaseWithConfig(db, &trie.Config{ + Cache: cacheConfig.TrieCleanLimit, + Journal: cacheConfig.TrieCleanJournal, + Preimages: cacheConfig.Preimages, + }), quit: make(chan struct{}), shouldPreserve: shouldPreserve, bodyCache: bodyCache, @@ -244,7 +246,6 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par futureBlocks: futureBlocks, engine: engine, vmConfig: vmConfig, - badBlocks: badBlocks, } bc.validator = NewBlockValidator(chainConfig, bc, engine) bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine) @@ -371,7 +372,7 @@ func NewBlockChain(db ethdb.Database, cacheConfig *CacheConfig, chainConfig *par log.Warn("Enabling snapshot recovery", "chainhead", head.NumberU64(), "diskbase", *layer) recover = true } - bc.snaps = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, recover) + bc.snaps, _ = snapshot.New(bc.db, bc.stateCache.TrieDB(), bc.cacheConfig.SnapshotLimit, head.Root(), !bc.cacheConfig.SnapshotWait, true, recover) } // Take ownership of this particular state go bc.update() @@ -523,8 +524,13 @@ func (bc *BlockChain) SetHeadBeyondRoot(head uint64, root common.Hash) (uint64, if _, err := state.New(newHeadBlock.Root(), bc.stateCache, bc.snaps); err != nil { log.Trace("Block state missing, rewinding further", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash()) if pivot == nil || newHeadBlock.NumberU64() > *pivot { - newHeadBlock = bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) - continue + parent := bc.GetBlock(newHeadBlock.ParentHash(), newHeadBlock.NumberU64()-1) + if parent != nil { + newHeadBlock = parent + continue + } + log.Error("Missing block in the middle, aiming genesis", "number", newHeadBlock.NumberU64()-1, "hash", newHeadBlock.ParentHash()) + newHeadBlock = bc.genesisBlock } else { log.Trace("Rewind passed pivot, aiming genesis", "number", newHeadBlock.NumberU64(), "hash", newHeadBlock.Hash(), "pivot", *pivot) newHeadBlock = bc.genesisBlock @@ -654,12 +660,8 @@ func (bc *BlockChain) CurrentBlock() *types.Block { return bc.currentBlock.Load().(*types.Block) } -// Snapshot returns the blockchain snapshot tree. This method is mainly used for -// testing, to make it possible to verify the snapshot after execution. -// -// Warning: There are no guarantees about the safety of using the returned 'snap' if the -// blockchain is simultaneously importing blocks, so take care. -func (bc *BlockChain) Snapshot() *snapshot.Tree { +// Snapshots returns the blockchain snapshot tree. +func (bc *BlockChain) Snapshots() *snapshot.Tree { return bc.snaps } @@ -1803,6 +1805,17 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er return it.index, err } // No validation errors for the first block (or chain prefix skipped) + var activeState *state.StateDB + defer func() { + // The chain importer is starting and stopping trie prefetchers. If a bad + // block or other error is hit however, an early return may not properly + // terminate the background threads. This defer ensures that we clean up + // and dangling prefetcher, without defering each and holding on live refs. + if activeState != nil { + activeState.StopPrefetcher() + } + }() + for ; block != nil && err == nil || err == ErrKnownBlock; block, err = it.next() { // If the chain is terminating, stop processing blocks if bc.insertStopped() { @@ -1863,12 +1876,17 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er if err != nil { return it.index, err } + // Enable prefetching to pull in trie node paths while processing transactions + statedb.StartPrefetcher("chain") + activeState = statedb + // If we have a followup block, run that against the current state to pre-cache // transactions and probabilistically some of the account/storage trie nodes. var followupInterrupt uint32 if !bc.cacheConfig.TrieCleanNoPrefetch { if followup, err := it.peek(); followup != nil && err == nil { throwaway, _ := state.New(parent.Root, bc.stateCache, bc.snaps) + go func(start time.Time, followup *types.Block, throwaway *state.StateDB, interrupt *uint32) { bc.prefetcher.Prefetch(followup, throwaway, bc.vmConfig, &followupInterrupt) @@ -1894,8 +1912,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er storageUpdateTimer.Update(statedb.StorageUpdates) // Storage updates are complete, we can mark them snapshotAccountReadTimer.Update(statedb.SnapshotAccountReads) // Account reads are complete, we can mark them snapshotStorageReadTimer.Update(statedb.SnapshotStorageReads) // Storage reads are complete, we can mark them - - triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation + triehash := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation trieproc := statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates @@ -1923,7 +1940,6 @@ func (bc *BlockChain) insertChain(chain types.Blocks, verifySeals bool) (int, er if err != nil { return it.index, err } - // Update the metrics touched during block commit accountCommitTimer.Update(statedb.AccountCommits) // Account commits are complete, we can mark them storageCommitTimer.Update(statedb.StorageCommits) // Storage commits are complete, we can mark them @@ -2373,26 +2389,9 @@ func (bc *BlockChain) maintainTxIndex(ancients uint64) { } } -// BadBlocks returns a list of the last 'bad blocks' that the client has seen on the network -func (bc *BlockChain) BadBlocks() []*types.Block { - blocks := make([]*types.Block, 0, bc.badBlocks.Len()) - for _, hash := range bc.badBlocks.Keys() { - if blk, exist := bc.badBlocks.Peek(hash); exist { - block := blk.(*types.Block) - blocks = append(blocks, block) - } - } - return blocks -} - -// addBadBlock adds a bad block to the bad-block LRU cache -func (bc *BlockChain) addBadBlock(block *types.Block) { - bc.badBlocks.Add(block.Hash(), block) -} - // reportBlock logs a bad block error. func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, err error) { - bc.addBadBlock(block) + rawdb.WriteBadBlock(bc.db, block) var receiptString string for i, receipt := range receipts { @@ -2433,12 +2432,8 @@ func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (i bc.wg.Add(1) defer bc.wg.Done() - - whFunc := func(header *types.Header) error { - _, err := bc.hc.WriteHeader(header) - return err - } - return bc.hc.InsertHeaderChain(chain, whFunc, start) + _, err := bc.hc.InsertHeaderChain(chain, start) + return 0, err } // CurrentHeader retrieves the current head header of the canonical chain. The diff --git a/core/blockchain_snapshot_test.go b/core/blockchain_snapshot_test.go index e8d3b2470a..96a5c7a8d4 100644 --- a/core/blockchain_snapshot_test.go +++ b/core/blockchain_snapshot_test.go @@ -28,27 +28,18 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/params" ) -// snapshotTest is a test case for snapshot recovery. It can be used for -// simulating these scenarios: -// (i) Geth restarts normally with valid legacy snapshot -// (ii) Geth restarts normally with valid new-format snapshot -// (iii) Geth restarts after the crash, with broken legacy snapshot -// (iv) Geth restarts after the crash, with broken new-format snapshot -// (v) Geth restarts normally, but it's requested to be rewound to a lower point via SetHead -// (vi) Geth restarts normally with a stale snapshot -type snapshotTest struct { - legacy bool // Flag whether the loaded snapshot is in legacy format - crash bool // Flag whether the Geth restarts from the previous crash - restartCrash int // Number of blocks to insert after the normal stop, then the crash happens - gapped int // Number of blocks to insert without enabling snapshot - setHead uint64 // Block number to set head back to - +// snapshotTestBasic wraps the common testing fields in the snapshot tests. +type snapshotTestBasic struct { + legacy bool // Wether write the snapshot journal in legacy format chainBlocks int // Number of blocks to generate for the canonical chain snapshotBlock uint64 // Block number of the relevant snapshot disk layer commitBlock uint64 // Block number for which to commit the state to disk @@ -58,56 +49,418 @@ type snapshotTest struct { expHeadFastBlock uint64 // Block number of the expected head fast sync block expHeadBlock uint64 // Block number of the expected head full block expSnapshotBottom uint64 // The block height corresponding to the snapshot disk layer + + // share fields, set in runtime + datadir string + db ethdb.Database + gendb ethdb.Database + engine consensus.Engine } -func (tt *snapshotTest) dump() string { +func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Block) { + // Create a temporary persistent database + datadir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("Failed to create temporary datadir: %v", err) + } + os.RemoveAll(datadir) + + db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") + if err != nil { + t.Fatalf("Failed to create persistent database: %v", err) + } + // Initialize a fresh chain + var ( + genesis = new(Genesis).MustCommit(db) + engine = ethash.NewFullFaker() + gendb = rawdb.NewMemoryDatabase() + + // Snapshot is enabled, the first snapshot is created from the Genesis. + // The snapshot memory allowance is 256MB, it means no snapshot flush + // will happen during the block insertion. + cacheConfig = defaultCacheConfig + ) + chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to create chain: %v", err) + } + blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, gendb, basic.chainBlocks, func(i int, b *BlockGen) {}) + + // Insert the blocks with configured settings. + var breakpoints []uint64 + if basic.commitBlock > basic.snapshotBlock { + breakpoints = append(breakpoints, basic.snapshotBlock, basic.commitBlock) + } else { + breakpoints = append(breakpoints, basic.commitBlock, basic.snapshotBlock) + } + var startPoint uint64 + for _, point := range breakpoints { + if _, err := chain.InsertChain(blocks[startPoint:point]); err != nil { + t.Fatalf("Failed to import canonical chain start: %v", err) + } + startPoint = point + + if basic.commitBlock > 0 && basic.commitBlock == point { + chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil) + } + if basic.snapshotBlock > 0 && basic.snapshotBlock == point { + if basic.legacy { + // Here we commit the snapshot disk root to simulate + // committing the legacy snapshot. + rawdb.WriteSnapshotRoot(db, blocks[point-1].Root()) + } else { + // Flushing the entire snap tree into the disk, the + // relavant (a) snapshot root and (b) snapshot generator + // will be persisted atomically. + chain.snaps.Cap(blocks[point-1].Root(), 0) + diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root() + if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) { + t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot) + } + } + } + } + if _, err := chain.InsertChain(blocks[startPoint:]); err != nil { + t.Fatalf("Failed to import canonical chain tail: %v", err) + } + + // Set runtime fields + basic.datadir = datadir + basic.db = db + basic.gendb = gendb + basic.engine = engine + + // Ugly hack, notify the chain to flush the journal in legacy format + // if it's requested. + if basic.legacy { + chain.writeLegacyJournal = true + } + return chain, blocks +} + +func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks []*types.Block) { + // Iterate over all the remaining blocks and ensure there are no gaps + verifyNoGaps(t, chain, true, blocks) + verifyCutoff(t, chain, true, blocks, basic.expCanonicalBlocks) + + if head := chain.CurrentHeader(); head.Number.Uint64() != basic.expHeadHeader { + t.Errorf("Head header mismatch: have %d, want %d", head.Number, basic.expHeadHeader) + } + if head := chain.CurrentFastBlock(); head.NumberU64() != basic.expHeadFastBlock { + t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadFastBlock) + } + if head := chain.CurrentBlock(); head.NumberU64() != basic.expHeadBlock { + t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), basic.expHeadBlock) + } + + // Check the disk layer, ensure they are matched + block := chain.GetBlockByNumber(basic.expSnapshotBottom) + if block == nil { + t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom) + } else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) { + t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot()) + } + + // Check the snapshot, ensure it's integrated + if err := chain.snaps.Verify(block.Root()); err != nil { + t.Errorf("The disk layer is not integrated %v", err) + } +} + +func (basic *snapshotTestBasic) dump() string { buffer := new(strings.Builder) fmt.Fprint(buffer, "Chain:\n G") - for i := 0; i < tt.chainBlocks; i++ { + for i := 0; i < basic.chainBlocks; i++ { fmt.Fprintf(buffer, "->C%d", i+1) } fmt.Fprint(buffer, " (HEAD)\n\n") fmt.Fprintf(buffer, "Commit: G") - if tt.commitBlock > 0 { - fmt.Fprintf(buffer, ", C%d", tt.commitBlock) + if basic.commitBlock > 0 { + fmt.Fprintf(buffer, ", C%d", basic.commitBlock) } fmt.Fprint(buffer, "\n") fmt.Fprintf(buffer, "Snapshot: G") - if tt.snapshotBlock > 0 { - fmt.Fprintf(buffer, ", C%d", tt.snapshotBlock) + if basic.snapshotBlock > 0 { + fmt.Fprintf(buffer, ", C%d", basic.snapshotBlock) } fmt.Fprint(buffer, "\n") - if tt.crash { - fmt.Fprintf(buffer, "\nCRASH\n\n") - } else { - fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", tt.setHead) - } + //if crash { + // fmt.Fprintf(buffer, "\nCRASH\n\n") + //} else { + // fmt.Fprintf(buffer, "\nSetHead(%d)\n\n", basic.setHead) + //} fmt.Fprintf(buffer, "------------------------------\n\n") fmt.Fprint(buffer, "Expected in leveldb:\n G") - for i := 0; i < tt.expCanonicalBlocks; i++ { + for i := 0; i < basic.expCanonicalBlocks; i++ { fmt.Fprintf(buffer, "->C%d", i+1) } fmt.Fprintf(buffer, "\n\n") - fmt.Fprintf(buffer, "Expected head header : C%d\n", tt.expHeadHeader) - fmt.Fprintf(buffer, "Expected head fast block: C%d\n", tt.expHeadFastBlock) - if tt.expHeadBlock == 0 { + fmt.Fprintf(buffer, "Expected head header : C%d\n", basic.expHeadHeader) + fmt.Fprintf(buffer, "Expected head fast block: C%d\n", basic.expHeadFastBlock) + if basic.expHeadBlock == 0 { fmt.Fprintf(buffer, "Expected head block : G\n") } else { - fmt.Fprintf(buffer, "Expected head block : C%d\n", tt.expHeadBlock) + fmt.Fprintf(buffer, "Expected head block : C%d\n", basic.expHeadBlock) } - if tt.expSnapshotBottom == 0 { + if basic.expSnapshotBottom == 0 { fmt.Fprintf(buffer, "Expected snapshot disk : G\n") } else { - fmt.Fprintf(buffer, "Expected snapshot disk : C%d\n", tt.expSnapshotBottom) + fmt.Fprintf(buffer, "Expected snapshot disk : C%d\n", basic.expSnapshotBottom) } return buffer.String() } +func (basic *snapshotTestBasic) teardown() { + basic.db.Close() + basic.gendb.Close() + os.RemoveAll(basic.datadir) +} + +// snapshotTest is a test case type for normal snapshot recovery. +// It can be used for testing that restart Geth normally. +type snapshotTest struct { + snapshotTestBasic +} + +func (snaptest *snapshotTest) test(t *testing.T) { + // It's hard to follow the test case, visualize the input + // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + // fmt.Println(tt.dump()) + chain, blocks := snaptest.prepare(t) + + // Restart the chain normally + chain.Stop() + newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer newchain.Stop() + + snaptest.verify(t, newchain, blocks) +} + +// crashSnapshotTest is a test case type for innormal snapshot recovery. +// It can be used for testing that restart Geth after the crash. +type crashSnapshotTest struct { + snapshotTestBasic +} + +func (snaptest *crashSnapshotTest) test(t *testing.T) { + // It's hard to follow the test case, visualize the input + // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + // fmt.Println(tt.dump()) + chain, blocks := snaptest.prepare(t) + + // Pull the plug on the database, simulating a hard crash + db := chain.db + db.Close() + + // Start a new blockchain back up and see where the repair leads us + newdb, err := rawdb.NewLevelDBDatabaseWithFreezer(snaptest.datadir, 0, 0, snaptest.datadir, "") + if err != nil { + t.Fatalf("Failed to reopen persistent database: %v", err) + } + defer newdb.Close() + + // The interesting thing is: instead of starting the blockchain after + // the crash, we do restart twice here: one after the crash and one + // after the normal stop. It's used to ensure the broken snapshot + // can be detected all the time. + newchain, err := NewBlockChain(newdb, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + newchain.Stop() + + newchain, err = NewBlockChain(newdb, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer newchain.Stop() + + snaptest.verify(t, newchain, blocks) +} + +// gappedSnapshotTest is a test type used to test this scenario: +// - have a complete snapshot +// - restart without enabling the snapshot +// - insert a few blocks +// - restart with enabling the snapshot again +type gappedSnapshotTest struct { + snapshotTestBasic + gapped int // Number of blocks to insert without enabling snapshot +} + +func (snaptest *gappedSnapshotTest) test(t *testing.T) { + // It's hard to follow the test case, visualize the input + // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + // fmt.Println(tt.dump()) + chain, blocks := snaptest.prepare(t) + + // Insert blocks without enabling snapshot if gapping is required. + chain.Stop() + gappedBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.gapped, func(i int, b *BlockGen) {}) + + // Insert a few more blocks without enabling snapshot + var cacheConfig = &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 0, + } + newchain, err := NewBlockChain(snaptest.db, cacheConfig, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + newchain.InsertChain(gappedBlocks) + newchain.Stop() + + // Restart the chain with enabling the snapshot + newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer newchain.Stop() + + snaptest.verify(t, newchain, blocks) +} + +// setHeadSnapshotTest is the test type used to test this scenario: +// - have a complete snapshot +// - set the head to a lower point +// - restart +type setHeadSnapshotTest struct { + snapshotTestBasic + setHead uint64 // Block number to set head back to +} + +func (snaptest *setHeadSnapshotTest) test(t *testing.T) { + // It's hard to follow the test case, visualize the input + // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + // fmt.Println(tt.dump()) + chain, blocks := snaptest.prepare(t) + + // Rewind the chain if setHead operation is required. + chain.SetHead(snaptest.setHead) + chain.Stop() + + newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer newchain.Stop() + + snaptest.verify(t, newchain, blocks) +} + +// restartCrashSnapshotTest is the test type used to test this scenario: +// - have a complete snapshot +// - restart chain +// - insert more blocks with enabling the snapshot +// - commit the snapshot +// - crash +// - restart again +type restartCrashSnapshotTest struct { + snapshotTestBasic + newBlocks int +} + +func (snaptest *restartCrashSnapshotTest) test(t *testing.T) { + // It's hard to follow the test case, visualize the input + // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + // fmt.Println(tt.dump()) + chain, blocks := snaptest.prepare(t) + + // Firstly, stop the chain properly, with all snapshot journal + // and state committed. + chain.Stop() + + newchain, err := NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, func(i int, b *BlockGen) {}) + newchain.InsertChain(newBlocks) + + // Commit the entire snapshot into the disk if requested. Note only + // (a) snapshot root and (b) snapshot generator will be committed, + // the diff journal is not. + newchain.Snapshots().Cap(newBlocks[len(newBlocks)-1].Root(), 0) + + // Simulate the blockchain crash + // Don't call chain.Stop here, so that no snapshot + // journal and latest state will be committed + + // Restart the chain after the crash + newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + defer newchain.Stop() + + snaptest.verify(t, newchain, blocks) +} + +// wipeCrashSnapshotTest is the test type used to test this scenario: +// - have a complete snapshot +// - restart, insert more blocks without enabling the snapshot +// - restart again with enabling the snapshot +// - crash +type wipeCrashSnapshotTest struct { + snapshotTestBasic + newBlocks int +} + +func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { + // It's hard to follow the test case, visualize the input + // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) + // fmt.Println(tt.dump()) + chain, blocks := snaptest.prepare(t) + + // Firstly, stop the chain properly, with all snapshot journal + // and state committed. + chain.Stop() + + config := &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 0, + } + newchain, err := NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, func(i int, b *BlockGen) {}) + newchain.InsertChain(newBlocks) + newchain.Stop() + + // Restart the chain, the wiper should starts working + config = &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 256, + SnapshotWait: false, // Don't wait rebuild + } + newchain, err = NewBlockChain(snaptest.db, config, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + // Simulate the blockchain crash. + + newchain, err = NewBlockChain(snaptest.db, nil, params.AllEthashProtocolChanges, snaptest.engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("Failed to recreate chain: %v", err) + } + snaptest.verify(t, newchain, blocks) +} + // Tests a Geth restart with valid snapshot. Before the shutdown, all snapshot // journal will be persisted correctly. In this case no snapshot recovery is // required. @@ -129,20 +482,21 @@ func TestRestartWithNewSnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : C8 // Expected snapshot disk : G - testSnapshot(t, &snapshotTest{ - legacy: false, - crash: false, - gapped: 0, - setHead: 0, - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 8, - expSnapshotBottom: 0, // Initial disk layer built from genesis - }) + test := &snapshotTest{ + snapshotTestBasic{ + legacy: false, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 8, + expSnapshotBottom: 0, // Initial disk layer built from genesis + }, + } + test.test(t) + test.teardown() } // Tests a Geth restart with valid but "legacy" snapshot. Before the shutdown, @@ -166,20 +520,22 @@ func TestRestartWithLegacySnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : C8 // Expected snapshot disk : G - testSnapshot(t, &snapshotTest{ - legacy: true, - crash: false, - gapped: 0, - setHead: 0, - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 8, - expSnapshotBottom: 0, // Initial disk layer built from genesis - }) + t.Skip("Legacy format testing is not supported") + test := &snapshotTest{ + snapshotTestBasic{ + legacy: true, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 8, + expSnapshotBottom: 0, // Initial disk layer built from genesis + }, + } + test.test(t) + test.teardown() } // Tests a Geth was crashed and restarts with a broken snapshot. In this case the @@ -205,20 +561,21 @@ func TestNoCommitCrashWithNewSnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : G // Expected snapshot disk : C4 - testSnapshot(t, &snapshotTest{ - legacy: false, - crash: true, - gapped: 0, - setHead: 0, - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 0, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 0, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }) + test := &crashSnapshotTest{ + snapshotTestBasic{ + legacy: false, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 0, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }, + } + test.test(t) + test.teardown() } // Tests a Geth was crashed and restarts with a broken snapshot. In this case the @@ -244,20 +601,21 @@ func TestLowCommitCrashWithNewSnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : C2 // Expected snapshot disk : C4 - testSnapshot(t, &snapshotTest{ - legacy: false, - crash: true, - gapped: 0, - setHead: 0, - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 2, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 2, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }) + test := &crashSnapshotTest{ + snapshotTestBasic{ + legacy: false, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 2, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 2, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }, + } + test.test(t) + test.teardown() } // Tests a Geth was crashed and restarts with a broken snapshot. In this case @@ -283,20 +641,21 @@ func TestHighCommitCrashWithNewSnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : G // Expected snapshot disk : C4 - testSnapshot(t, &snapshotTest{ - legacy: false, - crash: true, - gapped: 0, - setHead: 0, - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 6, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 0, - expSnapshotBottom: 4, // Last committed disk layer, wait recovery - }) + test := &crashSnapshotTest{ + snapshotTestBasic{ + legacy: false, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 6, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + expSnapshotBottom: 4, // Last committed disk layer, wait recovery + }, + } + test.test(t) + test.teardown() } // Tests a Geth was crashed and restarts with a broken and "legacy format" @@ -321,20 +680,22 @@ func TestNoCommitCrashWithLegacySnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : G // Expected snapshot disk : G - testSnapshot(t, &snapshotTest{ - legacy: true, - crash: true, - gapped: 0, - setHead: 0, - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 0, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 0, - expSnapshotBottom: 0, // Rebuilt snapshot from the latest HEAD(genesis) - }) + t.Skip("Legacy format testing is not supported") + test := &crashSnapshotTest{ + snapshotTestBasic{ + legacy: true, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 0, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + expSnapshotBottom: 0, // Rebuilt snapshot from the latest HEAD(genesis) + }, + } + test.test(t) + test.teardown() } // Tests a Geth was crashed and restarts with a broken and "legacy format" @@ -359,20 +720,22 @@ func TestLowCommitCrashWithLegacySnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : C2 // Expected snapshot disk : C2 - testSnapshot(t, &snapshotTest{ - legacy: true, - crash: true, - gapped: 0, - setHead: 0, - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 2, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 2, - expSnapshotBottom: 2, // Rebuilt snapshot from the latest HEAD - }) + t.Skip("Legacy format testing is not supported") + test := &crashSnapshotTest{ + snapshotTestBasic{ + legacy: true, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 2, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 2, + expSnapshotBottom: 2, // Rebuilt snapshot from the latest HEAD + }, + } + test.test(t) + test.teardown() } // Tests a Geth was crashed and restarts with a broken and "legacy format" @@ -402,20 +765,22 @@ func TestHighCommitCrashWithLegacySnapshot(t *testing.T) { // Expected head fast block: C8 // Expected head block : G // Expected snapshot disk : G - testSnapshot(t, &snapshotTest{ - legacy: true, - crash: true, - gapped: 0, - setHead: 0, - chainBlocks: 8, - snapshotBlock: 4, - commitBlock: 6, - expCanonicalBlocks: 8, - expHeadHeader: 8, - expHeadFastBlock: 8, - expHeadBlock: 0, - expSnapshotBottom: 0, // Rebuilt snapshot from the latest HEAD(genesis) - }) + t.Skip("Legacy format testing is not supported") + test := &crashSnapshotTest{ + snapshotTestBasic{ + legacy: true, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 6, + expCanonicalBlocks: 8, + expHeadHeader: 8, + expHeadFastBlock: 8, + expHeadBlock: 0, + expSnapshotBottom: 0, // Rebuilt snapshot from the latest HEAD(genesis) + }, + } + test.test(t) + test.teardown() } // Tests a Geth was running with snapshot enabled. Then restarts without @@ -439,20 +804,22 @@ func TestGappedNewSnapshot(t *testing.T) { // Expected head fast block: C10 // Expected head block : C10 // Expected snapshot disk : C10 - testSnapshot(t, &snapshotTest{ - legacy: false, - crash: false, - gapped: 2, - setHead: 0, - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 10, - expHeadHeader: 10, - expHeadFastBlock: 10, - expHeadBlock: 10, - expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD - }) + test := &gappedSnapshotTest{ + snapshotTestBasic: snapshotTestBasic{ + legacy: false, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 10, + expHeadHeader: 10, + expHeadFastBlock: 10, + expHeadBlock: 10, + expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD + }, + gapped: 2, + } + test.test(t) + test.teardown() } // Tests a Geth was running with leagcy snapshot enabled. Then restarts @@ -476,20 +843,23 @@ func TestGappedLegacySnapshot(t *testing.T) { // Expected head fast block: C10 // Expected head block : C10 // Expected snapshot disk : C10 - testSnapshot(t, &snapshotTest{ - legacy: true, - crash: false, - gapped: 2, - setHead: 0, - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 10, - expHeadHeader: 10, - expHeadFastBlock: 10, - expHeadBlock: 10, - expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD - }) + t.Skip("Legacy format testing is not supported") + test := &gappedSnapshotTest{ + snapshotTestBasic: snapshotTestBasic{ + legacy: true, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 10, + expHeadHeader: 10, + expHeadFastBlock: 10, + expHeadBlock: 10, + expSnapshotBottom: 10, // Rebuilt snapshot from the latest HEAD + }, + gapped: 2, + } + test.test(t) + test.teardown() } // Tests the Geth was running with snapshot enabled and resetHead is applied. @@ -513,20 +883,22 @@ func TestSetHeadWithNewSnapshot(t *testing.T) { // Expected head fast block: C4 // Expected head block : C4 // Expected snapshot disk : G - testSnapshot(t, &snapshotTest{ - legacy: false, - crash: false, - gapped: 0, - setHead: 4, - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 4, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - expSnapshotBottom: 0, // The initial disk layer is built from the genesis - }) + test := &setHeadSnapshotTest{ + snapshotTestBasic: snapshotTestBasic{ + legacy: false, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 4, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + expSnapshotBottom: 0, // The initial disk layer is built from the genesis + }, + setHead: 4, + } + test.test(t) + test.teardown() } // Tests the Geth was running with snapshot(legacy-format) enabled and resetHead @@ -550,20 +922,23 @@ func TestSetHeadWithLegacySnapshot(t *testing.T) { // Expected head fast block: C4 // Expected head block : C4 // Expected snapshot disk : G - testSnapshot(t, &snapshotTest{ - legacy: true, - crash: false, - gapped: 0, - setHead: 4, - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 4, - expHeadHeader: 4, - expHeadFastBlock: 4, - expHeadBlock: 4, - expSnapshotBottom: 0, // The initial disk layer is built from the genesis - }) + t.Skip("Legacy format testing is not supported") + test := &setHeadSnapshotTest{ + snapshotTestBasic: snapshotTestBasic{ + legacy: true, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 4, + expHeadHeader: 4, + expHeadFastBlock: 4, + expHeadBlock: 4, + expSnapshotBottom: 0, // The initial disk layer is built from the genesis + }, + setHead: 4, + } + test.test(t) + test.teardown() } // Tests the Geth was running with snapshot(legacy-format) enabled and upgrades @@ -589,209 +964,60 @@ func TestRecoverSnapshotFromCrashWithLegacyDiffJournal(t *testing.T) { // Expected head fast block: C10 // Expected head block : C8 // Expected snapshot disk : C10 - testSnapshot(t, &snapshotTest{ - legacy: true, - crash: false, - restartCrash: 2, - gapped: 0, - setHead: 0, - chainBlocks: 8, - snapshotBlock: 0, - commitBlock: 0, - expCanonicalBlocks: 10, - expHeadHeader: 10, - expHeadFastBlock: 10, - expHeadBlock: 8, // The persisted state in the first running - expSnapshotBottom: 10, // The persisted disk layer in the second running - }) -} - -func testSnapshot(t *testing.T, tt *snapshotTest) { - // It's hard to follow the test case, visualize the input - // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(true)))) - // fmt.Println(tt.dump()) - - // Create a temporary persistent database - datadir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("Failed to create temporary datadir: %v", err) + t.Skip("Legacy format testing is not supported") + test := &restartCrashSnapshotTest{ + snapshotTestBasic: snapshotTestBasic{ + legacy: true, + chainBlocks: 8, + snapshotBlock: 0, + commitBlock: 0, + expCanonicalBlocks: 10, + expHeadHeader: 10, + expHeadFastBlock: 10, + expHeadBlock: 8, // The persisted state in the first running + expSnapshotBottom: 10, // The persisted disk layer in the second running + }, + newBlocks: 2, } - os.RemoveAll(datadir) - - db, err := rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") - if err != nil { - t.Fatalf("Failed to create persistent database: %v", err) - } - defer db.Close() // Might double close, should be fine - - // Initialize a fresh chain - var ( - genesis = new(Genesis).MustCommit(db) - engine = ethash.NewFullFaker() - gendb = rawdb.NewMemoryDatabase() - - // Snapshot is enabled, the first snapshot is created from the Genesis. - // The snapshot memory allowance is 256MB, it means no snapshot flush - // will happen during the block insertion. - cacheConfig = defaultCacheConfig - ) - chain, err := NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to create chain: %v", err) - } - blocks, _ := GenerateChain(params.TestChainConfig, genesis, engine, gendb, tt.chainBlocks, func(i int, b *BlockGen) {}) - - // Insert the blocks with configured settings. - var breakpoints []uint64 - if tt.commitBlock > tt.snapshotBlock { - breakpoints = append(breakpoints, tt.snapshotBlock, tt.commitBlock) - } else { - breakpoints = append(breakpoints, tt.commitBlock, tt.snapshotBlock) - } - var startPoint uint64 - for _, point := range breakpoints { - if _, err := chain.InsertChain(blocks[startPoint:point]); err != nil { - t.Fatalf("Failed to import canonical chain start: %v", err) - } - startPoint = point - - if tt.commitBlock > 0 && tt.commitBlock == point { - chain.stateCache.TrieDB().Commit(blocks[point-1].Root(), true, nil) - } - if tt.snapshotBlock > 0 && tt.snapshotBlock == point { - if tt.legacy { - // Here we commit the snapshot disk root to simulate - // committing the legacy snapshot. - rawdb.WriteSnapshotRoot(db, blocks[point-1].Root()) - } else { - chain.snaps.Cap(blocks[point-1].Root(), 0) - diskRoot, blockRoot := chain.snaps.DiskRoot(), blocks[point-1].Root() - if !bytes.Equal(diskRoot.Bytes(), blockRoot.Bytes()) { - t.Fatalf("Failed to flush disk layer change, want %x, got %x", blockRoot, diskRoot) - } - } - } - } - if _, err := chain.InsertChain(blocks[startPoint:]); err != nil { - t.Fatalf("Failed to import canonical chain tail: %v", err) - } - // Set the flag for writing legacy journal if necessary - if tt.legacy { - chain.writeLegacyJournal = true - } - // Pull the plug on the database, simulating a hard crash - if tt.crash { - db.Close() - - // Start a new blockchain back up and see where the repair leads us - db, err = rawdb.NewLevelDBDatabaseWithFreezer(datadir, 0, 0, datadir, "") - if err != nil { - t.Fatalf("Failed to reopen persistent database: %v", err) - } - defer db.Close() - - // The interesting thing is: instead of start the blockchain after - // the crash, we do restart twice here: one after the crash and one - // after the normal stop. It's used to ensure the broken snapshot - // can be detected all the time. - chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - chain.Stop() - - chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - defer chain.Stop() - } else if tt.gapped > 0 { - // Insert blocks without enabling snapshot if gapping is required. - chain.Stop() - gappedBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], engine, gendb, tt.gapped, func(i int, b *BlockGen) {}) - - // Insert a few more blocks without enabling snapshot - var cacheConfig = &CacheConfig{ - TrieCleanLimit: 256, - TrieDirtyLimit: 256, - TrieTimeLimit: 5 * time.Minute, - SnapshotLimit: 0, - } - chain, err = NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - chain.InsertChain(gappedBlocks) - chain.Stop() - - chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - defer chain.Stop() - } else if tt.setHead != 0 { - // Rewind the chain if setHead operation is required. - chain.SetHead(tt.setHead) - chain.Stop() - - chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - defer chain.Stop() - } else if tt.restartCrash != 0 { - // Firstly, stop the chain properly, with all snapshot journal - // and state committed. - chain.Stop() - - // Restart chain, forcibly flush the disk layer journal with new format - newBlocks, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], engine, gendb, tt.restartCrash, func(i int, b *BlockGen) {}) - chain, err = NewBlockChain(db, cacheConfig, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - chain.InsertChain(newBlocks) - chain.Snapshot().Cap(newBlocks[len(newBlocks)-1].Root(), 0) - - // Simulate the blockchain crash - // Don't call chain.Stop here, so that no snapshot - // journal and latest state will be committed - - // Restart the chain after the crash - chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - defer chain.Stop() - } else { - chain.Stop() - - // Restart the chain normally - chain, err = NewBlockChain(db, nil, params.AllEthashProtocolChanges, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("Failed to recreate chain: %v", err) - } - defer chain.Stop() - } - - // Iterate over all the remaining blocks and ensure there are no gaps - verifyNoGaps(t, chain, true, blocks) - verifyCutoff(t, chain, true, blocks, tt.expCanonicalBlocks) + test.test(t) + test.teardown() +} - if head := chain.CurrentHeader(); head.Number.Uint64() != tt.expHeadHeader { - t.Errorf("Head header mismatch: have %d, want %d", head.Number, tt.expHeadHeader) - } - if head := chain.CurrentFastBlock(); head.NumberU64() != tt.expHeadFastBlock { - t.Errorf("Head fast block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadFastBlock) - } - if head := chain.CurrentBlock(); head.NumberU64() != tt.expHeadBlock { - t.Errorf("Head block mismatch: have %d, want %d", head.NumberU64(), tt.expHeadBlock) - } - // Check the disk layer, ensure they are matched - block := chain.GetBlockByNumber(tt.expSnapshotBottom) - if block == nil { - t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", tt.expSnapshotBottom) - } else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) { - t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot()) +// Tests the Geth was running with a complete snapshot and then imports a few +// more new blocks on top without enabling the snapshot. After the restart, +// crash happens. Check everything is ok after the restart. +func TestRecoverSnapshotFromWipingCrash(t *testing.T) { + // Chain: + // G->C1->C2->C3->C4->C5->C6->C7->C8 (HEAD) + // + // Commit: G + // Snapshot: G + // + // SetHead(0) + // + // ------------------------------ + // + // Expected in leveldb: + // G->C1->C2->C3->C4->C5->C6->C7->C8->C9->C10 + // + // Expected head header : C10 + // Expected head fast block: C10 + // Expected head block : C8 + // Expected snapshot disk : C10 + test := &wipeCrashSnapshotTest{ + snapshotTestBasic: snapshotTestBasic{ + legacy: false, + chainBlocks: 8, + snapshotBlock: 4, + commitBlock: 0, + expCanonicalBlocks: 10, + expHeadHeader: 10, + expHeadFastBlock: 10, + expHeadBlock: 10, + expSnapshotBottom: 10, + }, + newBlocks: 2, } + test.test(t) + test.teardown() } diff --git a/core/blockchain_test.go b/core/blockchain_test.go index 7ec62b11dd..3e4757f8b6 100644 --- a/core/blockchain_test.go +++ b/core/blockchain_test.go @@ -17,6 +17,7 @@ package core import ( + "errors" "fmt" "io/ioutil" "math/big" @@ -468,7 +469,7 @@ func testBadHashes(t *testing.T, full bool) { _, err = blockchain.InsertHeaderChain(headers, 1) } - if err != ErrBlacklistedHash { + if !errors.Is(err, ErrBlacklistedHash) { t.Errorf("error mismatch: have: %v, want: %v", err, ErrBlacklistedHash) } } @@ -599,7 +600,7 @@ func TestFastVsFullChains(t *testing.T) { Alloc: GenesisAlloc{address: {Balance: funds}}, } genesis = gspec.MustCommit(gendb) - signer = types.NewEIP155Signer(gspec.Config.ChainID) + signer = types.LatestSigner(gspec.Config) ) blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, 1024, func(i int, block *BlockGen) { block.SetCoinbase(common.Address{0x00}) @@ -682,12 +683,12 @@ func TestFastVsFullChains(t *testing.T) { } if fblock, arblock, anblock := fast.GetBlockByHash(hash), archive.GetBlockByHash(hash), ancient.GetBlockByHash(hash); fblock.Hash() != arblock.Hash() || anblock.Hash() != arblock.Hash() { t.Errorf("block #%d [%x]: block mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock, anblock, arblock) - } else if types.DeriveSha(fblock.Transactions(), new(trie.Trie)) != types.DeriveSha(arblock.Transactions(), new(trie.Trie)) || types.DeriveSha(anblock.Transactions(), new(trie.Trie)) != types.DeriveSha(arblock.Transactions(), new(trie.Trie)) { + } else if types.DeriveSha(fblock.Transactions(), trie.NewStackTrie(nil)) != types.DeriveSha(arblock.Transactions(), trie.NewStackTrie(nil)) || types.DeriveSha(anblock.Transactions(), trie.NewStackTrie(nil)) != types.DeriveSha(arblock.Transactions(), trie.NewStackTrie(nil)) { t.Errorf("block #%d [%x]: transactions mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Transactions(), anblock.Transactions(), arblock.Transactions()) } else if types.CalcUncleHash(fblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) || types.CalcUncleHash(anblock.Uncles()) != types.CalcUncleHash(arblock.Uncles()) { t.Errorf("block #%d [%x]: uncles mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, fblock.Uncles(), anblock, arblock.Uncles()) } - if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts, new(trie.Trie)) != types.DeriveSha(areceipts, new(trie.Trie)) { + if freceipts, anreceipts, areceipts := rawdb.ReadReceipts(fastDb, hash, *rawdb.ReadHeaderNumber(fastDb, hash), fast.Config()), rawdb.ReadReceipts(ancientDb, hash, *rawdb.ReadHeaderNumber(ancientDb, hash), fast.Config()), rawdb.ReadReceipts(archiveDb, hash, *rawdb.ReadHeaderNumber(archiveDb, hash), fast.Config()); types.DeriveSha(freceipts, trie.NewStackTrie(nil)) != types.DeriveSha(areceipts, trie.NewStackTrie(nil)) { t.Errorf("block #%d [%x]: receipts mismatch: fastdb %v, ancientdb %v, archivedb %v", num, hash, freceipts, anreceipts, areceipts) } } @@ -838,7 +839,7 @@ func TestChainTxReorgs(t *testing.T) { }, } genesis = gspec.MustCommit(db) - signer = types.NewEIP155Signer(gspec.Config.ChainID) + signer = types.LatestSigner(gspec.Config) ) // Create two transactions shared between the chains: @@ -943,7 +944,7 @@ func TestLogReorgs(t *testing.T) { code = common.Hex2Bytes("60606040525b7f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405180905060405180910390a15b600a8060416000396000f360606040526008565b00") gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}} genesis = gspec.MustCommit(db) - signer = types.NewEIP155Signer(gspec.Config.ChainID) + signer = types.LatestSigner(gspec.Config) ) blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) @@ -997,7 +998,7 @@ func TestLogRebirth(t *testing.T) { db = rawdb.NewMemoryDatabase() gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}} genesis = gspec.MustCommit(db) - signer = types.NewEIP155Signer(gspec.Config.ChainID) + signer = types.LatestSigner(gspec.Config) engine = ethash.NewFaker() blockchain, _ = NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil, nil) ) @@ -1061,7 +1062,7 @@ func TestSideLogRebirth(t *testing.T) { db = rawdb.NewMemoryDatabase() gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}} genesis = gspec.MustCommit(db) - signer = types.NewEIP155Signer(gspec.Config.ChainID) + signer = types.LatestSigner(gspec.Config) blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) ) @@ -1134,7 +1135,7 @@ func TestReorgSideEvent(t *testing.T) { Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(10000000000000)}}, } genesis = gspec.MustCommit(db) - signer = types.NewEIP155Signer(gspec.Config.ChainID) + signer = types.LatestSigner(gspec.Config) ) blockchain, _ := NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) @@ -1294,7 +1295,7 @@ func TestEIP155Transition(t *testing.T) { } block.AddTx(tx) - tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainID)) + tx, err = basicTx(types.LatestSigner(gspec.Config)) if err != nil { t.Fatal(err) } @@ -1306,7 +1307,7 @@ func TestEIP155Transition(t *testing.T) { } block.AddTx(tx) - tx, err = basicTx(types.NewEIP155Signer(gspec.Config.ChainID)) + tx, err = basicTx(types.LatestSigner(gspec.Config)) if err != nil { t.Fatal(err) } @@ -1344,7 +1345,7 @@ func TestEIP155Transition(t *testing.T) { } ) if i == 0 { - tx, err = basicTx(types.NewEIP155Signer(big.NewInt(2))) + tx, err = basicTx(types.LatestSigner(config)) if err != nil { t.Fatal(err) } @@ -1384,7 +1385,7 @@ func TestEIP161AccountRemoval(t *testing.T) { var ( tx *types.Transaction err error - signer = types.NewEIP155Signer(gspec.Config.ChainID) + signer = types.LatestSigner(gspec.Config) ) switch i { case 0: @@ -2077,7 +2078,7 @@ func TestTransactionIndices(t *testing.T) { funds = big.NewInt(1000000000) gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}} genesis = gspec.MustCommit(gendb) - signer = types.NewEIP155Signer(gspec.Config.ChainID) + signer = types.LatestSigner(gspec.Config) ) height := uint64(128) blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), func(i int, block *BlockGen) { @@ -2204,7 +2205,7 @@ func TestSkipStaleTxIndicesInFastSync(t *testing.T) { funds = big.NewInt(1000000000) gspec = &Genesis{Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}} genesis = gspec.MustCommit(gendb) - signer = types.NewEIP155Signer(gspec.Config.ChainID) + signer = types.LatestSigner(gspec.Config) ) height := uint64(128) blocks, receipts := GenerateChain(gspec.Config, genesis, ethash.NewFaker(), gendb, int(height), func(i int, block *BlockGen) { @@ -3029,3 +3030,81 @@ func TestInitThenFailCreateContract(t *testing.T) { } } } + +// TestEIP2718Transition tests that an EIP-2718 transaction will be accepted +// after the fork block has passed. This is verified by sending an EIP-2930 +// access list transaction, which specifies a single slot access, and then +// checking that the gas usage of a hot SLOAD and a cold SLOAD are calculated +// correctly. +func TestEIP2718Transition(t *testing.T) { + var ( + aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") + + // Generate a canonical chain to act as the main dataset + engine = ethash.NewFaker() + db = rawdb.NewMemoryDatabase() + + // A sender who makes transactions, has some funds + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + address = crypto.PubkeyToAddress(key.PublicKey) + funds = big.NewInt(1000000000) + gspec = &Genesis{ + Config: params.YoloV3ChainConfig, + Alloc: GenesisAlloc{ + address: {Balance: funds}, + // The address 0xAAAA sloads 0x00 and 0x01 + aa: { + Code: []byte{ + byte(vm.PC), + byte(vm.PC), + byte(vm.SLOAD), + byte(vm.SLOAD), + }, + Nonce: 0, + Balance: big.NewInt(0), + }, + }, + } + genesis = gspec.MustCommit(db) + ) + + blocks, _ := GenerateChain(gspec.Config, genesis, engine, db, 1, func(i int, b *BlockGen) { + b.SetCoinbase(common.Address{1}) + + // One transaction to 0xAAAA + signer := types.LatestSigner(gspec.Config) + tx, _ := types.SignNewTx(key, signer, &types.AccessListTx{ + ChainID: gspec.Config.ChainID, + Nonce: 0, + To: &aa, + Gas: 30000, + GasPrice: big.NewInt(1), + AccessList: types.AccessList{{ + Address: aa, + StorageKeys: []common.Hash{{0}}, + }}, + }) + b.AddTx(tx) + }) + + // Import the canonical chain + diskdb := rawdb.NewMemoryDatabase() + gspec.MustCommit(diskdb) + + chain, err := NewBlockChain(diskdb, nil, gspec.Config, engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } + if n, err := chain.InsertChain(blocks); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", n, err) + } + + block := chain.GetBlockByNumber(1) + + // Expected gas is intrinsic + 2 * pc + hot load + cold load, since only one load is in the access list + expected := params.TxGas + params.TxAccessListAddressGas + params.TxAccessListStorageKeyGas + vm.GasQuickStep*2 + vm.WarmStorageReadCostEIP2929 + vm.ColdSloadCostEIP2929 + if block.GasUsed() != expected { + t.Fatalf("incorrect amount of gas spent: expected %d, got %d", expected, block.GasUsed()) + + } +} diff --git a/core/bloom_indexer.go b/core/bloom_indexer.go new file mode 100644 index 0000000000..856746a1c0 --- /dev/null +++ b/core/bloom_indexer.go @@ -0,0 +1,92 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "context" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/bitutil" + "github.com/ethereum/go-ethereum/core/bloombits" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethdb" +) + +const ( + // bloomThrottling is the time to wait between processing two consecutive index + // sections. It's useful during chain upgrades to prevent disk overload. + bloomThrottling = 100 * time.Millisecond +) + +// BloomIndexer implements a core.ChainIndexer, building up a rotated bloom bits index +// for the Ethereum header bloom filters, permitting blazing fast filtering. +type BloomIndexer struct { + size uint64 // section size to generate bloombits for + db ethdb.Database // database instance to write index data and metadata into + gen *bloombits.Generator // generator to rotate the bloom bits crating the bloom index + section uint64 // Section is the section number being processed currently + head common.Hash // Head is the hash of the last header processed +} + +// NewBloomIndexer returns a chain indexer that generates bloom bits data for the +// canonical chain for fast logs filtering. +func NewBloomIndexer(db ethdb.Database, size, confirms uint64) *ChainIndexer { + backend := &BloomIndexer{ + db: db, + size: size, + } + table := rawdb.NewTable(db, string(rawdb.BloomBitsIndexPrefix)) + + return NewChainIndexer(db, table, backend, size, confirms, bloomThrottling, "bloombits") +} + +// Reset implements core.ChainIndexerBackend, starting a new bloombits index +// section. +func (b *BloomIndexer) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error { + gen, err := bloombits.NewGenerator(uint(b.size)) + b.gen, b.section, b.head = gen, section, common.Hash{} + return err +} + +// Process implements core.ChainIndexerBackend, adding a new header's bloom into +// the index. +func (b *BloomIndexer) Process(ctx context.Context, header *types.Header) error { + b.gen.AddBloom(uint(header.Number.Uint64()-b.section*b.size), header.Bloom) + b.head = header.Hash() + return nil +} + +// Commit implements core.ChainIndexerBackend, finalizing the bloom section and +// writing it out into the database. +func (b *BloomIndexer) Commit() error { + batch := b.db.NewBatch() + for i := 0; i < types.BloomBitLength; i++ { + bits, err := b.gen.Bitset(uint(i)) + if err != nil { + return err + } + rawdb.WriteBloomBits(batch, uint(i), b.section, b.head, bitutil.CompressBytes(bits)) + } + return batch.Write() +} + +// Prune returns an empty error since we don't support pruning here. +func (b *BloomIndexer) Prune(threshold uint64) error { + return nil +} diff --git a/core/bloombits/matcher_test.go b/core/bloombits/matcher_test.go index 91143e525e..923579221f 100644 --- a/core/bloombits/matcher_test.go +++ b/core/bloombits/matcher_test.go @@ -30,6 +30,7 @@ const testSectionSize = 4096 // Tests that wildcard filter rules (nil) can be specified and are handled well. func TestMatcherWildcards(t *testing.T) { + t.Parallel() matcher := NewMatcher(testSectionSize, [][][]byte{ {common.Address{}.Bytes(), common.Address{0x01}.Bytes()}, // Default address is not a wildcard {common.Hash{}.Bytes(), common.Hash{0x01}.Bytes()}, // Default hash is not a wildcard @@ -56,6 +57,7 @@ func TestMatcherWildcards(t *testing.T) { // Tests the matcher pipeline on a single continuous workflow without interrupts. func TestMatcherContinuous(t *testing.T) { + t.Parallel() testMatcherDiffBatches(t, [][]bloomIndexes{{{10, 20, 30}}}, 0, 100000, false, 75) testMatcherDiffBatches(t, [][]bloomIndexes{{{32, 3125, 100}}, {{40, 50, 10}}}, 0, 100000, false, 81) testMatcherDiffBatches(t, [][]bloomIndexes{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 0, 10000, false, 36) @@ -64,6 +66,7 @@ func TestMatcherContinuous(t *testing.T) { // Tests the matcher pipeline on a constantly interrupted and resumed work pattern // with the aim of ensuring data items are requested only once. func TestMatcherIntermittent(t *testing.T) { + t.Parallel() testMatcherDiffBatches(t, [][]bloomIndexes{{{10, 20, 30}}}, 0, 100000, true, 75) testMatcherDiffBatches(t, [][]bloomIndexes{{{32, 3125, 100}}, {{40, 50, 10}}}, 0, 100000, true, 81) testMatcherDiffBatches(t, [][]bloomIndexes{{{4, 8, 11}, {7, 8, 17}}, {{9, 9, 12}, {15, 20, 13}}, {{18, 15, 15}, {12, 10, 4}}}, 0, 10000, true, 36) @@ -71,6 +74,7 @@ func TestMatcherIntermittent(t *testing.T) { // Tests the matcher pipeline on random input to hopefully catch anomalies. func TestMatcherRandom(t *testing.T) { + t.Parallel() for i := 0; i < 10; i++ { testMatcherBothModes(t, makeRandomIndexes([]int{1}, 50), 0, 10000, 0) testMatcherBothModes(t, makeRandomIndexes([]int{3}, 50), 0, 10000, 0) @@ -84,6 +88,7 @@ func TestMatcherRandom(t *testing.T) { // shifter from a multiple of 8. This is needed to cover an optimisation with // bitset matching https://github.com/ethereum/go-ethereum/issues/15309. func TestMatcherShifted(t *testing.T) { + t.Parallel() // Block 0 always matches in the tests, skip ahead of first 8 blocks with the // start to get a potential zero byte in the matcher bitset. @@ -97,6 +102,7 @@ func TestMatcherShifted(t *testing.T) { // Tests that matching on everything doesn't crash (special case internally). func TestWildcardMatcher(t *testing.T) { + t.Parallel() testMatcherBothModes(t, nil, 0, 10000, 0) } diff --git a/core/bloombits/scheduler_test.go b/core/bloombits/scheduler_test.go index 70772e4ab9..707e8ea11d 100644 --- a/core/bloombits/scheduler_test.go +++ b/core/bloombits/scheduler_test.go @@ -35,6 +35,7 @@ func TestSchedulerMultiClientSingleFetcher(t *testing.T) { testScheduler(t, 10, func TestSchedulerMultiClientMultiFetcher(t *testing.T) { testScheduler(t, 10, 10, 5000) } func testScheduler(t *testing.T, clients int, fetchers int, requests int) { + t.Parallel() f := newScheduler(0) // Create a batch of handler goroutines that respond to bloom bit requests and @@ -88,10 +89,10 @@ func testScheduler(t *testing.T, clients int, fetchers int, requests int) { } close(in) }() - + b := new(big.Int) for j := 0; j < requests; j++ { bits := <-out - if want := new(big.Int).SetUint64(uint64(j)).Bytes(); !bytes.Equal(bits, want) { + if want := b.SetUint64(uint64(j)).Bytes(); !bytes.Equal(bits, want) { t.Errorf("vector %d: delivered content mismatch: have %x, want %x", j, bits, want) } } diff --git a/core/error.go b/core/error.go index 5a28be7e1c..197dd81567 100644 --- a/core/error.go +++ b/core/error.go @@ -16,7 +16,11 @@ package core -import "errors" +import ( + "errors" + + "github.com/ethereum/go-ethereum/core/types" +) var ( // ErrKnownBlock is returned when a block to import is already known locally. @@ -63,4 +67,8 @@ var ( // ErrIntrinsicGas is returned if the transaction is specified to use less gas // than required to start the invocation. ErrIntrinsicGas = errors.New("intrinsic gas too low") + + // ErrTxTypeNotSupported is returned if a transaction is not supported in the + // current network configuration. + ErrTxTypeNotSupported = types.ErrTxTypeNotSupported ) diff --git a/core/evm.go b/core/evm.go index 8abe5a0477..8f69d51499 100644 --- a/core/evm.go +++ b/core/evm.go @@ -35,8 +35,8 @@ type ChainContext interface { GetHeader(common.Hash, uint64) *types.Header } -// NewEVMContext creates a new context for use in the EVM. -func NewEVMContext(msg Message, header *types.Header, chain ChainContext, author *common.Address) vm.Context { +// NewEVMBlockContext creates a new context for use in the EVM. +func NewEVMBlockContext(header *types.Header, chain ChainContext, author *common.Address) vm.BlockContext { // If we don't have an explicit author (i.e. not mining), extract from the header var beneficiary common.Address if author == nil { @@ -44,17 +44,23 @@ func NewEVMContext(msg Message, header *types.Header, chain ChainContext, author } else { beneficiary = *author } - return vm.Context{ + return vm.BlockContext{ CanTransfer: CanTransfer, Transfer: Transfer, GetHash: GetHashFn(header, chain), - Origin: msg.From(), Coinbase: beneficiary, BlockNumber: new(big.Int).Set(header.Number), Time: new(big.Int).SetUint64(header.Time), Difficulty: new(big.Int).Set(header.Difficulty), GasLimit: header.GasLimit, - GasPrice: new(big.Int).Set(msg.GasPrice()), + } +} + +// NewEVMTxContext creates a new transaction context for a single transaction. +func NewEVMTxContext(msg Message) vm.TxContext { + return vm.TxContext{ + Origin: msg.From(), + GasPrice: new(big.Int).Set(msg.GasPrice()), } } diff --git a/core/forkid/forkid.go b/core/forkid/forkid.go index c432858617..1bf3406828 100644 --- a/core/forkid/forkid.go +++ b/core/forkid/forkid.go @@ -84,6 +84,15 @@ func NewID(config *params.ChainConfig, genesis common.Hash, head uint64) ID { return ID{Hash: checksumToBytes(hash), Next: next} } +// NewIDWithChain calculates the Ethereum fork ID from an existing chain instance. +func NewIDWithChain(chain Blockchain) ID { + return NewID( + chain.Config(), + chain.Genesis().Hash(), + chain.CurrentHeader().Number.Uint64(), + ) +} + // NewFilter creates a filter that returns if a fork ID should be rejected or not // based on the local chain's status. func NewFilter(chain Blockchain) Filter { diff --git a/core/forkid/forkid_test.go b/core/forkid/forkid_test.go index 888b553475..87d64ed67f 100644 --- a/core/forkid/forkid_test.go +++ b/core/forkid/forkid_test.go @@ -60,7 +60,7 @@ func TestCreation(t *testing.T) { {9069000, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // First Istanbul and first Muir Glacier block {9199999, ID{Hash: checksumToBytes(0x879d6e30), Next: 9200000}}, // Last Istanbul and first Muir Glacier block {9200000, ID{Hash: checksumToBytes(0xe029e991), Next: 0}}, // First Muir Glacier block - {10000000, ID{Hash: checksumToBytes(0xe029e991), Next: 0}}, // Future Muir Glacier block + {12243999, ID{Hash: checksumToBytes(0xe029e991), Next: 0}}, // Future Muir Glacier block }, }, // Ropsten test cases @@ -81,7 +81,7 @@ func TestCreation(t *testing.T) { {6485846, ID{Hash: checksumToBytes(0x4bc66396), Next: 7117117}}, // First Istanbul block {7117116, ID{Hash: checksumToBytes(0x4bc66396), Next: 7117117}}, // Last Istanbul block {7117117, ID{Hash: checksumToBytes(0x6727ef90), Next: 0}}, // First Muir Glacier block - {7500000, ID{Hash: checksumToBytes(0x6727ef90), Next: 0}}, // Future + {9812188, ID{Hash: checksumToBytes(0x6727ef90), Next: 0}}, // Future Muir Glacier block }, }, // Rinkeby test cases @@ -101,7 +101,7 @@ func TestCreation(t *testing.T) { {4321234, ID{Hash: checksumToBytes(0xafec6b27), Next: 5435345}}, // First Petersburg block {5435344, ID{Hash: checksumToBytes(0xafec6b27), Next: 5435345}}, // Last Petersburg block {5435345, ID{Hash: checksumToBytes(0xcbdb8838), Next: 0}}, // First Istanbul block - {6000000, ID{Hash: checksumToBytes(0xcbdb8838), Next: 0}}, // Future Istanbul block + {8290927, ID{Hash: checksumToBytes(0xcbdb8838), Next: 0}}, // Future Istanbul block }, }, // Goerli test cases @@ -112,7 +112,7 @@ func TestCreation(t *testing.T) { {0, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Unsynced, last Frontier, Homestead, Tangerine, Spurious, Byzantium, Constantinople and first Petersburg block {1561650, ID{Hash: checksumToBytes(0xa3f5ab08), Next: 1561651}}, // Last Petersburg block {1561651, ID{Hash: checksumToBytes(0xc25efa5c), Next: 0}}, // First Istanbul block - {2000000, ID{Hash: checksumToBytes(0xc25efa5c), Next: 0}}, // Future Istanbul block + {4460643, ID{Hash: checksumToBytes(0xc25efa5c), Next: 0}}, // Future Istanbul block }, }, } @@ -185,11 +185,11 @@ func TestValidation(t *testing.T) { // Local is mainnet Petersburg, remote is Rinkeby Petersburg. {7987396, ID{Hash: checksumToBytes(0xafec6b27), Next: 0}, ErrLocalIncompatibleOrStale}, - // Local is mainnet Muir Glacier, far in the future. Remote announces Gopherium (non existing fork) + // Local is mainnet Istanbul, far in the future. Remote announces Gopherium (non existing fork) // at some future block 88888888, for itself, but past block for local. Local is incompatible. // // This case detects non-upgraded nodes with majority hash power (typical Ropsten mess). - {88888888, ID{Hash: checksumToBytes(0xe029e991), Next: 88888888}, ErrLocalIncompatibleOrStale}, + {88888888, ID{Hash: checksumToBytes(0x0eb440f6), Next: 88888888}, ErrLocalIncompatibleOrStale}, // Local is mainnet Byzantium. Remote is also in Byzantium, but announces Gopherium (non existing // fork) at block 7279999, before Petersburg. Local is incompatible. diff --git a/core/genesis.go b/core/genesis.go index 0535d7ee3a..e05e27fe17 100644 --- a/core/genesis.go +++ b/core/genesis.go @@ -153,6 +153,10 @@ func (e *GenesisMismatchError) Error() string { // // The returned chain configuration is never nil. func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig, common.Hash, error) { + return SetupGenesisBlockWithOverride(db, genesis, nil) +} + +func SetupGenesisBlockWithOverride(db ethdb.Database, genesis *Genesis, overrideBerlin *big.Int) (*params.ChainConfig, common.Hash, error) { if genesis != nil && genesis.Config == nil { return params.AllEthashProtocolChanges, common.Hash{}, errGenesisNoConfig } @@ -171,11 +175,10 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig } return genesis.Config, block.Hash(), nil } - // We have the genesis block in database(perhaps in ancient database) // but the corresponding state is missing. header := rawdb.ReadHeader(db, stored, 0) - if _, err := state.New(header.Root, state.NewDatabaseWithCache(db, 0, ""), nil); err != nil { + if _, err := state.New(header.Root, state.NewDatabaseWithConfig(db, nil), nil); err != nil { if genesis == nil { genesis = DefaultGenesisBlock() } @@ -190,7 +193,6 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig } return genesis.Config, block.Hash(), nil } - // Check whether the genesis block is already written. if genesis != nil { hash := genesis.ToBlock(nil).Hash() @@ -198,9 +200,11 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig return genesis.Config, hash, &GenesisMismatchError{stored, hash} } } - // Get the existing chain configuration. newcfg := genesis.configOrDefault(stored) + if overrideBerlin != nil { + newcfg.BerlinBlock = overrideBerlin + } if err := newcfg.CheckConfigForkOrder(); err != nil { return newcfg, common.Hash{}, err } @@ -216,7 +220,6 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis) (*params.ChainConfig if genesis == nil && stored != params.MainnetGenesisHash { return storedcfg, stored, nil } - // Check config compatibility and write the config. Compatibility errors // are returned to the caller unless we're already at block zero. height := rawdb.ReadHeaderNumber(db, rawdb.ReadHeadHeaderHash(db)) @@ -243,8 +246,8 @@ func (g *Genesis) configOrDefault(ghash common.Hash) *params.ChainConfig { return params.RinkebyChainConfig case ghash == params.GoerliGenesisHash: return params.GoerliChainConfig - case ghash == params.YoloV2GenesisHash: - return params.YoloV2ChainConfig + case ghash == params.YoloV3GenesisHash: + return params.YoloV3ChainConfig default: return params.AllEthashProtocolChanges } @@ -288,7 +291,7 @@ func (g *Genesis) ToBlock(db ethdb.Database) *types.Block { statedb.Commit(false) statedb.Database().TrieDB().Commit(root, true, nil) - return types.NewBlock(head, nil, nil, nil, new(trie.Trie)) + return types.NewBlock(head, nil, nil, nil, trie.NewStackTrie(nil)) } // Commit writes the block and state of a genesis specification to the database. @@ -380,15 +383,15 @@ func DefaultGoerliGenesisBlock() *Genesis { } } -func DefaultYoloV2GenesisBlock() *Genesis { - // TODO: Update with yolov2 values + regenerate alloc data +func DefaultYoloV3GenesisBlock() *Genesis { + // Full genesis: https://gist.github.com/holiman/c6ed9269dce28304ad176314caa75e97 return &Genesis{ - Config: params.YoloV2ChainConfig, - Timestamp: 0x5f91b932, - ExtraData: hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000008a37866fd3627c9205a37c8685666f32ec07bb1b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), + Config: params.YoloV3ChainConfig, + Timestamp: 0x6027dd2e, + ExtraData: hexutil.MustDecode("0x00000000000000000000000000000000000000000000000000000000000000001041afbcb359d5a8dc58c15b2ff51354ff8a217d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"), GasLimit: 0x47b760, Difficulty: big.NewInt(1), - Alloc: decodePrealloc(yoloV1AllocData), + Alloc: decodePrealloc(yoloV3AllocData), } } diff --git a/core/genesis_alloc.go b/core/genesis_alloc.go index 3e03d16407..5b0e933d7a 100644 --- a/core/genesis_alloc.go +++ b/core/genesis_alloc.go @@ -25,4 +25,4 @@ const mainnetAllocData = "\xfa\x04]X\u0793\r\x83b\x011\x8e\u0189\x9agT\x06\x908' const ropstenAllocData = "\xf9\x03\xa4\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x80\xc2\v\x80\xc2\f\x80\xc2\r\x80\xc2\x0e\x80\xc2\x0f\x80\xc2\x10\x80\xc2\x11\x80\xc2\x12\x80\xc2\x13\x80\xc2\x14\x80\xc2\x15\x80\xc2\x16\x80\xc2\x17\x80\xc2\x18\x80\xc2\x19\x80\xc2\x1a\x80\xc2\x1b\x80\xc2\x1c\x80\xc2\x1d\x80\xc2\x1e\x80\xc2\x1f\x80\xc2 \x80\xc2!\x80\xc2\"\x80\xc2#\x80\xc2$\x80\xc2%\x80\xc2&\x80\xc2'\x80\xc2(\x80\xc2)\x80\xc2*\x80\xc2+\x80\xc2,\x80\xc2-\x80\xc2.\x80\xc2/\x80\xc20\x80\xc21\x80\xc22\x80\xc23\x80\xc24\x80\xc25\x80\xc26\x80\xc27\x80\xc28\x80\xc29\x80\xc2:\x80\xc2;\x80\xc2<\x80\xc2=\x80\xc2>\x80\xc2?\x80\xc2@\x80\xc2A\x80\xc2B\x80\xc2C\x80\xc2D\x80\xc2E\x80\xc2F\x80\xc2G\x80\xc2H\x80\xc2I\x80\xc2J\x80\xc2K\x80\xc2L\x80\xc2M\x80\xc2N\x80\xc2O\x80\xc2P\x80\xc2Q\x80\xc2R\x80\xc2S\x80\xc2T\x80\xc2U\x80\xc2V\x80\xc2W\x80\xc2X\x80\xc2Y\x80\xc2Z\x80\xc2[\x80\xc2\\\x80\xc2]\x80\xc2^\x80\xc2_\x80\xc2`\x80\xc2a\x80\xc2b\x80\xc2c\x80\xc2d\x80\xc2e\x80\xc2f\x80\xc2g\x80\xc2h\x80\xc2i\x80\xc2j\x80\xc2k\x80\xc2l\x80\xc2m\x80\xc2n\x80\xc2o\x80\xc2p\x80\xc2q\x80\xc2r\x80\xc2s\x80\xc2t\x80\xc2u\x80\xc2v\x80\xc2w\x80\xc2x\x80\xc2y\x80\xc2z\x80\xc2{\x80\xc2|\x80\xc2}\x80\xc2~\x80\xc2\u007f\x80\u00c1\x80\x80\u00c1\x81\x80\u00c1\x82\x80\u00c1\x83\x80\u00c1\x84\x80\u00c1\x85\x80\u00c1\x86\x80\u00c1\x87\x80\u00c1\x88\x80\u00c1\x89\x80\u00c1\x8a\x80\u00c1\x8b\x80\u00c1\x8c\x80\u00c1\x8d\x80\u00c1\x8e\x80\u00c1\x8f\x80\u00c1\x90\x80\u00c1\x91\x80\u00c1\x92\x80\u00c1\x93\x80\u00c1\x94\x80\u00c1\x95\x80\u00c1\x96\x80\u00c1\x97\x80\u00c1\x98\x80\u00c1\x99\x80\u00c1\x9a\x80\u00c1\x9b\x80\u00c1\x9c\x80\u00c1\x9d\x80\u00c1\x9e\x80\u00c1\x9f\x80\u00c1\xa0\x80\u00c1\xa1\x80\u00c1\xa2\x80\u00c1\xa3\x80\u00c1\xa4\x80\u00c1\xa5\x80\u00c1\xa6\x80\u00c1\xa7\x80\u00c1\xa8\x80\u00c1\xa9\x80\u00c1\xaa\x80\u00c1\xab\x80\u00c1\xac\x80\u00c1\xad\x80\u00c1\xae\x80\u00c1\xaf\x80\u00c1\xb0\x80\u00c1\xb1\x80\u00c1\xb2\x80\u00c1\xb3\x80\u00c1\xb4\x80\u00c1\xb5\x80\u00c1\xb6\x80\u00c1\xb7\x80\u00c1\xb8\x80\u00c1\xb9\x80\u00c1\xba\x80\u00c1\xbb\x80\u00c1\xbc\x80\u00c1\xbd\x80\u00c1\xbe\x80\u00c1\xbf\x80\u00c1\xc0\x80\u00c1\xc1\x80\u00c1\u0080\u00c1\u00c0\u00c1\u0100\u00c1\u0140\u00c1\u0180\u00c1\u01c0\u00c1\u0200\u00c1\u0240\u00c1\u0280\u00c1\u02c0\u00c1\u0300\u00c1\u0340\u00c1\u0380\u00c1\u03c0\u00c1\u0400\u00c1\u0440\u00c1\u0480\u00c1\u04c0\u00c1\u0500\u00c1\u0540\u00c1\u0580\u00c1\u05c0\u00c1\u0600\u00c1\u0640\u00c1\u0680\u00c1\u06c0\u00c1\u0700\u00c1\u0740\u00c1\u0780\u00c1\u07c0\u00c1\xe0\x80\u00c1\xe1\x80\u00c1\xe2\x80\u00c1\xe3\x80\u00c1\xe4\x80\u00c1\xe5\x80\u00c1\xe6\x80\u00c1\xe7\x80\u00c1\xe8\x80\u00c1\xe9\x80\u00c1\xea\x80\u00c1\xeb\x80\u00c1\xec\x80\u00c1\xed\x80\u00c1\xee\x80\u00c1\xef\x80\u00c1\xf0\x80\u00c1\xf1\x80\u00c1\xf2\x80\u00c1\xf3\x80\u00c1\xf4\x80\u00c1\xf5\x80\u00c1\xf6\x80\u00c1\xf7\x80\u00c1\xf8\x80\u00c1\xf9\x80\u00c1\xfa\x80\u00c1\xfb\x80\u00c1\xfc\x80\u00c1\xfd\x80\u00c1\xfe\x80\u00c1\xff\x80\u3507KT\xa8\xbd\x15)f\xd6?pk\xae\x1f\xfe\xb0A\x19!\xe5\x8d\f\x9f,\x9c\xd0Ft\xed\xea@\x00\x00\x00" const rinkebyAllocData = "\xf9\x03\xb7\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xf6\x941\xb9\x8d\x14\x00{\xde\xe67)\x80\x86\x98\x8a\v\xbd1\x18E#\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" const goerliAllocData = "\xf9\x04\x06\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xe0\x94L*\xe4\x82Y5\x05\xf0\x16<\xde\xfc\a>\x81\xc6<\xdaA\a\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe0\x94\xa8\xe8\xf1G2e\x8eKQ\xe8q\x191\x05:\x8ai\xba\xf2\xb1\x8a\x15-\x02\xc7\xe1J\xf6\x80\x00\x00\xe1\x94\u0665\x17\x9f\t\x1d\x85\x05\x1d<\x98'\x85\xef\xd1E\\\uc199\x8b\bE\x95\x16\x14\x01HJ\x00\x00\x00\xe1\x94\u08bdBX\xd2v\x887\xba\xa2j(\xfeq\xdc\a\x9f\x84\u01cbJG\xe3\xc1$H\xf4\xad\x00\x00\x00" -const yoloV1AllocData = "\xf9\x03\xb7\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xf6\x94\x8a7\x86o\xd3b|\x92\x05\xa3|\x86\x85fo2\xec\a\xbb\x1b\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" +const yoloV3AllocData = "\xf9\x05o\u0080\x01\xc2\x01\x01\xc2\x02\x01\xc2\x03\x01\xc2\x04\x01\xc2\x05\x01\xc2\x06\x01\xc2\a\x01\xc2\b\x01\xc2\t\x01\xc2\n\x01\xc2\v\x01\xc2\f\x01\xc2\r\x01\xc2\x0e\x01\xc2\x0f\x01\xc2\x10\x01\xc2\x11\x01\xc2\x12\x01\xc2\x13\x01\xc2\x14\x01\xc2\x15\x01\xc2\x16\x01\xc2\x17\x01\xc2\x18\x01\xc2\x19\x01\xc2\x1a\x01\xc2\x1b\x01\xc2\x1c\x01\xc2\x1d\x01\xc2\x1e\x01\xc2\x1f\x01\xc2 \x01\xc2!\x01\xc2\"\x01\xc2#\x01\xc2$\x01\xc2%\x01\xc2&\x01\xc2'\x01\xc2(\x01\xc2)\x01\xc2*\x01\xc2+\x01\xc2,\x01\xc2-\x01\xc2.\x01\xc2/\x01\xc20\x01\xc21\x01\xc22\x01\xc23\x01\xc24\x01\xc25\x01\xc26\x01\xc27\x01\xc28\x01\xc29\x01\xc2:\x01\xc2;\x01\xc2<\x01\xc2=\x01\xc2>\x01\xc2?\x01\xc2@\x01\xc2A\x01\xc2B\x01\xc2C\x01\xc2D\x01\xc2E\x01\xc2F\x01\xc2G\x01\xc2H\x01\xc2I\x01\xc2J\x01\xc2K\x01\xc2L\x01\xc2M\x01\xc2N\x01\xc2O\x01\xc2P\x01\xc2Q\x01\xc2R\x01\xc2S\x01\xc2T\x01\xc2U\x01\xc2V\x01\xc2W\x01\xc2X\x01\xc2Y\x01\xc2Z\x01\xc2[\x01\xc2\\\x01\xc2]\x01\xc2^\x01\xc2_\x01\xc2`\x01\xc2a\x01\xc2b\x01\xc2c\x01\xc2d\x01\xc2e\x01\xc2f\x01\xc2g\x01\xc2h\x01\xc2i\x01\xc2j\x01\xc2k\x01\xc2l\x01\xc2m\x01\xc2n\x01\xc2o\x01\xc2p\x01\xc2q\x01\xc2r\x01\xc2s\x01\xc2t\x01\xc2u\x01\xc2v\x01\xc2w\x01\xc2x\x01\xc2y\x01\xc2z\x01\xc2{\x01\xc2|\x01\xc2}\x01\xc2~\x01\xc2\u007f\x01\u00c1\x80\x01\u00c1\x81\x01\u00c1\x82\x01\u00c1\x83\x01\u00c1\x84\x01\u00c1\x85\x01\u00c1\x86\x01\u00c1\x87\x01\u00c1\x88\x01\u00c1\x89\x01\u00c1\x8a\x01\u00c1\x8b\x01\u00c1\x8c\x01\u00c1\x8d\x01\u00c1\x8e\x01\u00c1\x8f\x01\u00c1\x90\x01\u00c1\x91\x01\u00c1\x92\x01\u00c1\x93\x01\u00c1\x94\x01\u00c1\x95\x01\u00c1\x96\x01\u00c1\x97\x01\u00c1\x98\x01\u00c1\x99\x01\u00c1\x9a\x01\u00c1\x9b\x01\u00c1\x9c\x01\u00c1\x9d\x01\u00c1\x9e\x01\u00c1\x9f\x01\u00c1\xa0\x01\u00c1\xa1\x01\u00c1\xa2\x01\u00c1\xa3\x01\u00c1\xa4\x01\u00c1\xa5\x01\u00c1\xa6\x01\u00c1\xa7\x01\u00c1\xa8\x01\u00c1\xa9\x01\u00c1\xaa\x01\u00c1\xab\x01\u00c1\xac\x01\u00c1\xad\x01\u00c1\xae\x01\u00c1\xaf\x01\u00c1\xb0\x01\u00c1\xb1\x01\u00c1\xb2\x01\u00c1\xb3\x01\u00c1\xb4\x01\u00c1\xb5\x01\u00c1\xb6\x01\u00c1\xb7\x01\u00c1\xb8\x01\u00c1\xb9\x01\u00c1\xba\x01\u00c1\xbb\x01\u00c1\xbc\x01\u00c1\xbd\x01\u00c1\xbe\x01\u00c1\xbf\x01\u00c1\xc0\x01\u00c1\xc1\x01\u00c1\xc2\x01\u00c1\xc3\x01\u00c1\xc4\x01\u00c1\xc5\x01\u00c1\xc6\x01\u00c1\xc7\x01\u00c1\xc8\x01\u00c1\xc9\x01\u00c1\xca\x01\u00c1\xcb\x01\u00c1\xcc\x01\u00c1\xcd\x01\u00c1\xce\x01\u00c1\xcf\x01\u00c1\xd0\x01\u00c1\xd1\x01\u00c1\xd2\x01\u00c1\xd3\x01\u00c1\xd4\x01\u00c1\xd5\x01\u00c1\xd6\x01\u00c1\xd7\x01\u00c1\xd8\x01\u00c1\xd9\x01\u00c1\xda\x01\u00c1\xdb\x01\u00c1\xdc\x01\u00c1\xdd\x01\u00c1\xde\x01\u00c1\xdf\x01\u00c1\xe0\x01\u00c1\xe1\x01\u00c1\xe2\x01\u00c1\xe3\x01\u00c1\xe4\x01\u00c1\xe5\x01\u00c1\xe6\x01\u00c1\xe7\x01\u00c1\xe8\x01\u00c1\xe9\x01\u00c1\xea\x01\u00c1\xeb\x01\u00c1\xec\x01\u00c1\xed\x01\u00c1\xee\x01\u00c1\xef\x01\u00c1\xf0\x01\u00c1\xf1\x01\u00c1\xf2\x01\u00c1\xf3\x01\u00c1\xf4\x01\u00c1\xf5\x01\u00c1\xf6\x01\u00c1\xf7\x01\u00c1\xf8\x01\u00c1\xf9\x01\u00c1\xfa\x01\u00c1\xfb\x01\u00c1\xfc\x01\u00c1\xfd\x01\u00c1\xfe\x01\u00c1\xff\x01\xf6\x94\x0e\x89\xe2\xae\xdb\x1c\xfc\u06d4$\xd4\x1a\x1f!\x8fA2s\x81r\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\x10A\xaf\xbc\xb3Y\u0568\xdcX\xc1[/\xf5\x13T\xff\x8a!}\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94`\xad\xc0\xf8\x9aA\xaf#|\xe75T\xed\xe1p\xd73\xec\x14\xe0\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94y\x9d2\x9e_X4\x19\x16|\xd7\"\x96$\x85\x92n3\x8fJ\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94|\xf5\xb7\x9b\xfe)\x1ag\xab\x02\xb3\x93\xe4V\xcc\xc4\xc2f\xf7S\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\x8a\x8e\xaf\xb1\xcfb\xbf\xbe\xb1t\x17i\xda\xe1\xa9\xddG\x99a\x92\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\x8b\xa1\xf1\tU\x1b\xd42\x800\x12dZ\xc16\xdd\xd6M\xbar\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\xb0*.\xda\x1b1\u007f\xbd\x16v\x01(\x83k\n\u015bV\x0e\x9d\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf6\x94\xdf\n\x88\xb2\xb6\x8cg7\x13\xa8\xec\x82`\x03go'.5s\xa0\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" diff --git a/core/headerchain.go b/core/headerchain.go index f5a8e21cfc..dcd3644cd1 100644 --- a/core/headerchain.go +++ b/core/headerchain.go @@ -129,118 +129,193 @@ func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { return number } -// WriteHeader writes a header into the local chain, given that its parent is -// already known. If the total difficulty of the newly inserted header becomes -// greater than the current known TD, the canonical chain is re-routed. +type headerWriteResult struct { + status WriteStatus + ignored int + imported int + lastHash common.Hash + lastHeader *types.Header +} + +// WriteHeaders writes a chain of headers into the local chain, given that the parents +// are already known. If the total difficulty of the newly inserted chain becomes +// greater than the current known TD, the canonical chain is reorged. // // Note: This method is not concurrent-safe with inserting blocks simultaneously // into the chain, as side effects caused by reorganisations cannot be emulated // without the real blocks. Hence, writing headers directly should only be done // in two scenarios: pure-header mode of operation (light clients), or properly // separated header/block phases (non-archive clients). -func (hc *HeaderChain) WriteHeader(header *types.Header) (status WriteStatus, err error) { - // Cache some values to prevent constant recalculation +func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWriteResult, err error) { + if len(headers) == 0 { + return &headerWriteResult{}, nil + } + ptd := hc.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1) + if ptd == nil { + return &headerWriteResult{}, consensus.ErrUnknownAncestor + } var ( - hash = header.Hash() - number = header.Number.Uint64() + lastNumber = headers[0].Number.Uint64() - 1 // Last successfully imported number + lastHash = headers[0].ParentHash // Last imported header hash + newTD = new(big.Int).Set(ptd) // Total difficulty of inserted chain + + lastHeader *types.Header + inserted []numberHash // Ephemeral lookup of number/hash for the chain + firstInserted = -1 // Index of the first non-ignored header ) - // Calculate the total difficulty of the header - ptd := hc.GetTd(header.ParentHash, number-1) - if ptd == nil { - return NonStatTy, consensus.ErrUnknownAncestor - } - head := hc.CurrentHeader().Number.Uint64() - localTd := hc.GetTd(hc.currentHeaderHash, head) - externTd := new(big.Int).Add(header.Difficulty, ptd) - - // Irrelevant of the canonical status, write the td and header to the database - // - // Note all the components of header(td, hash->number index and header) should - // be written atomically. - headerBatch := hc.chainDb.NewBatch() - rawdb.WriteTd(headerBatch, hash, number, externTd) - rawdb.WriteHeader(headerBatch, header) - if err := headerBatch.Write(); err != nil { - log.Crit("Failed to write header into disk", "err", err) + + batch := hc.chainDb.NewBatch() + for i, header := range headers { + var hash common.Hash + // The headers have already been validated at this point, so we already + // know that it's a contiguous chain, where + // headers[i].Hash() == headers[i+1].ParentHash + if i < len(headers)-1 { + hash = headers[i+1].ParentHash + } else { + hash = header.Hash() + } + number := header.Number.Uint64() + newTD.Add(newTD, header.Difficulty) + + // If the header is already known, skip it, otherwise store + if !hc.HasHeader(hash, number) { + // Irrelevant of the canonical status, write the TD and header to the database. + rawdb.WriteTd(batch, hash, number, newTD) + hc.tdCache.Add(hash, new(big.Int).Set(newTD)) + + rawdb.WriteHeader(batch, header) + inserted = append(inserted, numberHash{number, hash}) + hc.headerCache.Add(hash, header) + hc.numberCache.Add(hash, number) + if firstInserted < 0 { + firstInserted = i + } + } + lastHeader, lastHash, lastNumber = header, hash, number + } + + // Skip the slow disk write of all headers if interrupted. + if hc.procInterrupt() { + log.Debug("Premature abort during headers import") + return &headerWriteResult{}, errors.New("aborted") } + // Commit to disk! + if err := batch.Write(); err != nil { + log.Crit("Failed to write headers", "error", err) + } + batch.Reset() + + var ( + head = hc.CurrentHeader().Number.Uint64() + localTD = hc.GetTd(hc.currentHeaderHash, head) + status = SideStatTy + ) // If the total difficulty is higher than our known, add it to the canonical chain // Second clause in the if statement reduces the vulnerability to selfish mining. // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf - reorg := externTd.Cmp(localTd) > 0 - if !reorg && externTd.Cmp(localTd) == 0 { - if header.Number.Uint64() < head { + reorg := newTD.Cmp(localTD) > 0 + if !reorg && newTD.Cmp(localTD) == 0 { + if lastNumber < head { reorg = true - } else if header.Number.Uint64() == head { + } else if lastNumber == head { reorg = mrand.Float64() < 0.5 } } + // If the parent of the (first) block is already the canon header, + // we don't have to go backwards to delete canon blocks, but + // simply pile them onto the existing chain + chainAlreadyCanon := headers[0].ParentHash == hc.currentHeaderHash if reorg { // If the header can be added into canonical chain, adjust the // header chain markers(canonical indexes and head header flag). // // Note all markers should be written atomically. - - // Delete any canonical number assignments above the new head - markerBatch := hc.chainDb.NewBatch() - for i := number + 1; ; i++ { - hash := rawdb.ReadCanonicalHash(hc.chainDb, i) - if hash == (common.Hash{}) { - break + markerBatch := batch // we can reuse the batch to keep allocs down + if !chainAlreadyCanon { + // Delete any canonical number assignments above the new head + for i := lastNumber + 1; ; i++ { + hash := rawdb.ReadCanonicalHash(hc.chainDb, i) + if hash == (common.Hash{}) { + break + } + rawdb.DeleteCanonicalHash(markerBatch, i) + } + // Overwrite any stale canonical number assignments, going + // backwards from the first header in this import + var ( + headHash = headers[0].ParentHash // inserted[0].parent? + headNumber = headers[0].Number.Uint64() - 1 // inserted[0].num-1 ? + headHeader = hc.GetHeader(headHash, headNumber) + ) + for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash { + rawdb.WriteCanonicalHash(markerBatch, headHash, headNumber) + headHash = headHeader.ParentHash + headNumber = headHeader.Number.Uint64() - 1 + headHeader = hc.GetHeader(headHash, headNumber) + } + // If some of the older headers were already known, but obtained canon-status + // during this import batch, then we need to write that now + // Further down, we continue writing the staus for the ones that + // were not already known + for i := 0; i < firstInserted; i++ { + hash := headers[i].Hash() + num := headers[i].Number.Uint64() + rawdb.WriteCanonicalHash(markerBatch, hash, num) + rawdb.WriteHeadHeaderHash(markerBatch, hash) } - rawdb.DeleteCanonicalHash(markerBatch, i) } - - // Overwrite any stale canonical number assignments - var ( - headHash = header.ParentHash - headNumber = header.Number.Uint64() - 1 - headHeader = hc.GetHeader(headHash, headNumber) - ) - for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash { - rawdb.WriteCanonicalHash(markerBatch, headHash, headNumber) - - headHash = headHeader.ParentHash - headNumber = headHeader.Number.Uint64() - 1 - headHeader = hc.GetHeader(headHash, headNumber) + // Extend the canonical chain with the new headers + for _, hn := range inserted { + rawdb.WriteCanonicalHash(markerBatch, hn.hash, hn.number) + rawdb.WriteHeadHeaderHash(markerBatch, hn.hash) } - // Extend the canonical chain with the new header - rawdb.WriteCanonicalHash(markerBatch, hash, number) - rawdb.WriteHeadHeaderHash(markerBatch, hash) if err := markerBatch.Write(); err != nil { log.Crit("Failed to write header markers into disk", "err", err) } + markerBatch.Reset() // Last step update all in-memory head header markers - hc.currentHeaderHash = hash - hc.currentHeader.Store(types.CopyHeader(header)) - headHeaderGauge.Update(header.Number.Int64()) + hc.currentHeaderHash = lastHash + hc.currentHeader.Store(types.CopyHeader(lastHeader)) + headHeaderGauge.Update(lastHeader.Number.Int64()) + // Chain status is canonical since this insert was a reorg. + // Note that all inserts which have higher TD than existing are 'reorg'. status = CanonStatTy - } else { - status = SideStatTy } - hc.tdCache.Add(hash, externTd) - hc.headerCache.Add(hash, header) - hc.numberCache.Add(hash, number) - return -} -// WhCallback is a callback function for inserting individual headers. -// A callback is used for two reasons: first, in a LightChain, status should be -// processed and light chain events sent, while in a BlockChain this is not -// necessary since chain events are sent after inserting blocks. Second, the -// header writes should be protected by the parent chain mutex individually. -type WhCallback func(*types.Header) error + if len(inserted) == 0 { + status = NonStatTy + } + return &headerWriteResult{ + status: status, + ignored: len(headers) - len(inserted), + imported: len(inserted), + lastHash: lastHash, + lastHeader: lastHeader, + }, nil +} func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) { // Do a sanity check that the provided chain is actually ordered and linked for i := 1; i < len(chain); i++ { - if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 || chain[i].ParentHash != chain[i-1].Hash() { + if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 { + hash := chain[i].Hash() + parentHash := chain[i-1].Hash() // Chain broke ancestry, log a message (programming error) and skip insertion - log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", chain[i].Hash(), - "parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", chain[i-1].Hash()) + log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", hash, + "parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", parentHash) return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number, - chain[i-1].Hash().Bytes()[:4], i, chain[i].Number, chain[i].Hash().Bytes()[:4], chain[i].ParentHash[:4]) + parentHash.Bytes()[:4], i, chain[i].Number, hash.Bytes()[:4], chain[i].ParentHash[:4]) + } + // If the header is a banned one, straight out abort + if BadHashes[chain[i].ParentHash] { + return i - 1, ErrBlacklistedHash + } + // If it's the last header in the cunk, we need to check it too + if i == len(chain)-1 && BadHashes[chain[i].Hash()] { + return i, ErrBlacklistedHash } } @@ -263,16 +338,12 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) defer close(abort) // Iterate over the headers and ensure they all check out - for i, header := range chain { + for i := range chain { // If the chain is terminating, stop processing blocks if hc.procInterrupt() { log.Debug("Premature abort during headers verification") return 0, errors.New("aborted") } - // If the header is a banned one, straight out abort - if BadHashes[header.Hash()] { - return i, ErrBlacklistedHash - } // Otherwise wait for headers checks and ensure they pass if err := <-results; err != nil { return i, err @@ -282,55 +353,41 @@ func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) return 0, nil } -// InsertHeaderChain attempts to insert the given header chain in to the local -// chain, possibly creating a reorg. If an error is returned, it will return the -// index number of the failing header as well an error describing what went wrong. +// InsertHeaderChain inserts the given headers. // -// The verify parameter can be used to fine tune whether nonce verification -// should be done or not. The reason behind the optional check is because some -// of the header retrieval mechanisms already need to verfy nonces, as well as -// because nonces can be verified sparsely, not needing to check each. -func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, writeHeader WhCallback, start time.Time) (int, error) { - // Collect some import statistics to report on - stats := struct{ processed, ignored int }{} - // All headers passed verification, import them into the database - for i, header := range chain { - // Short circuit insertion if shutting down - if hc.procInterrupt() { - log.Debug("Premature abort during headers import") - return i, errors.New("aborted") - } - // If the header's already known, skip it, otherwise store - hash := header.Hash() - if hc.HasHeader(hash, header.Number.Uint64()) { - externTd := hc.GetTd(hash, header.Number.Uint64()) - localTd := hc.GetTd(hc.currentHeaderHash, hc.CurrentHeader().Number.Uint64()) - if externTd == nil || externTd.Cmp(localTd) <= 0 { - stats.ignored++ - continue - } - } - if err := writeHeader(header); err != nil { - return i, err - } - stats.processed++ +// The validity of the headers is NOT CHECKED by this method, i.e. they need to be +// validated by ValidateHeaderChain before calling InsertHeaderChain. +// +// This insert is all-or-nothing. If this returns an error, no headers were written, +// otherwise they were all processed successfully. +// +// The returned 'write status' says if the inserted headers are part of the canonical chain +// or a side chain. +func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time) (WriteStatus, error) { + if hc.procInterrupt() { + return 0, errors.New("aborted") } - // Report some public statistics so the user has a clue what's going on - last := chain[len(chain)-1] + res, err := hc.writeHeaders(chain) + // Report some public statistics so the user has a clue what's going on context := []interface{}{ - "count", stats.processed, "elapsed", common.PrettyDuration(time.Since(start)), - "number", last.Number, "hash", last.Hash(), + "count", res.imported, + "elapsed", common.PrettyDuration(time.Since(start)), } - if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute { - context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) + if err != nil { + context = append(context, "err", err) } - if stats.ignored > 0 { - context = append(context, []interface{}{"ignored", stats.ignored}...) + if last := res.lastHeader; last != nil { + context = append(context, "number", last.Number, "hash", res.lastHash) + if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute { + context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) + } + } + if res.ignored > 0 { + context = append(context, []interface{}{"ignored", res.ignored}...) } log.Info("Imported new block headers", context...) - - return 0, nil + return res.status, err } // GetBlockHashesFromHash retrieves a number of block hashes starting at a given @@ -369,9 +426,8 @@ func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, ma // in this case it is cheaper to just read the header if header := hc.GetHeader(hash, number); header != nil { return header.ParentHash, number - 1 - } else { - return common.Hash{}, 0 } + return common.Hash{}, 0 } for ancestor != 0 { if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { diff --git a/core/headerchain_test.go b/core/headerchain_test.go new file mode 100644 index 0000000000..0aa25efd1f --- /dev/null +++ b/core/headerchain_test.go @@ -0,0 +1,115 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" +) + +func verifyUnbrokenCanonchain(hc *HeaderChain) error { + h := hc.CurrentHeader() + for { + canonHash := rawdb.ReadCanonicalHash(hc.chainDb, h.Number.Uint64()) + if exp := h.Hash(); canonHash != exp { + return fmt.Errorf("Canon hash chain broken, block %d got %x, expected %x", + h.Number, canonHash[:8], exp[:8]) + } + // Verify that we have the TD + if td := rawdb.ReadTd(hc.chainDb, canonHash, h.Number.Uint64()); td == nil { + return fmt.Errorf("Canon TD missing at block %d", h.Number) + } + if h.Number.Uint64() == 0 { + break + } + h = hc.GetHeader(h.ParentHash, h.Number.Uint64()-1) + } + return nil +} + +func testInsert(t *testing.T, hc *HeaderChain, chain []*types.Header, wantStatus WriteStatus, wantErr error) { + t.Helper() + + status, err := hc.InsertHeaderChain(chain, time.Now()) + if status != wantStatus { + t.Errorf("wrong write status from InsertHeaderChain: got %v, want %v", status, wantStatus) + } + // Always verify that the header chain is unbroken + if err := verifyUnbrokenCanonchain(hc); err != nil { + t.Fatal(err) + } + if !errors.Is(err, wantErr) { + t.Fatalf("unexpected error from InsertHeaderChain: %v", err) + } +} + +// This test checks status reporting of InsertHeaderChain. +func TestHeaderInsertion(t *testing.T) { + var ( + db = rawdb.NewMemoryDatabase() + genesis = new(Genesis).MustCommit(db) + ) + + hc, err := NewHeaderChain(db, params.AllEthashProtocolChanges, ethash.NewFaker(), func() bool { return false }) + if err != nil { + t.Fatal(err) + } + // chain A: G->A1->A2...A128 + chainA := makeHeaderChain(genesis.Header(), 128, ethash.NewFaker(), db, 10) + // chain B: G->A1->B2...B128 + chainB := makeHeaderChain(chainA[0], 128, ethash.NewFaker(), db, 10) + log.Root().SetHandler(log.StdoutHandler) + + // Inserting 64 headers on an empty chain, expecting + // 1 callbacks, 1 canon-status, 0 sidestatus, + testInsert(t, hc, chainA[:64], CanonStatTy, nil) + + // Inserting 64 identical headers, expecting + // 0 callbacks, 0 canon-status, 0 sidestatus, + testInsert(t, hc, chainA[:64], NonStatTy, nil) + + // Inserting the same some old, some new headers + // 1 callbacks, 1 canon, 0 side + testInsert(t, hc, chainA[32:96], CanonStatTy, nil) + + // Inserting side blocks, but not overtaking the canon chain + testInsert(t, hc, chainB[0:32], SideStatTy, nil) + + // Inserting more side blocks, but we don't have the parent + testInsert(t, hc, chainB[34:36], NonStatTy, consensus.ErrUnknownAncestor) + + // Inserting more sideblocks, overtaking the canon chain + testInsert(t, hc, chainB[32:97], CanonStatTy, nil) + + // Inserting more A-headers, taking back the canonicality + testInsert(t, hc, chainA[90:100], CanonStatTy, nil) + + // And B becomes canon again + testInsert(t, hc, chainB[97:107], CanonStatTy, nil) + + // And B becomes even longer + testInsert(t, hc, chainB[107:128], CanonStatTy, nil) +} diff --git a/core/rawdb/accessors_chain.go b/core/rawdb/accessors_chain.go index c948cdc7c6..461e1cbb17 100644 --- a/core/rawdb/accessors_chain.go +++ b/core/rawdb/accessors_chain.go @@ -20,6 +20,7 @@ import ( "bytes" "encoding/binary" "math/big" + "sort" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -702,6 +703,102 @@ func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number DeleteTd(db, hash, number) } +const badBlockToKeep = 10 + +type badBlock struct { + Header *types.Header + Body *types.Body +} + +// badBlockList implements the sort interface to allow sorting a list of +// bad blocks by their number in the reverse order. +type badBlockList []*badBlock + +func (s badBlockList) Len() int { return len(s) } +func (s badBlockList) Less(i, j int) bool { + return s[i].Header.Number.Uint64() < s[j].Header.Number.Uint64() +} +func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// ReadBadBlock retrieves the bad block with the corresponding block hash. +func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block { + blob, err := db.Get(badBlockKey) + if err != nil { + return nil + } + var badBlocks badBlockList + if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { + return nil + } + for _, bad := range badBlocks { + if bad.Header.Hash() == hash { + return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles) + } + } + return nil +} + +// ReadAllBadBlocks retrieves all the bad blocks in the database. +// All returned blocks are sorted in reverse order by number. +func ReadAllBadBlocks(db ethdb.Reader) []*types.Block { + blob, err := db.Get(badBlockKey) + if err != nil { + return nil + } + var badBlocks badBlockList + if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { + return nil + } + var blocks []*types.Block + for _, bad := range badBlocks { + blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles)) + } + return blocks +} + +// WriteBadBlock serializes the bad block into the database. If the cumulated +// bad blocks exceeds the limitation, the oldest will be dropped. +func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) { + blob, err := db.Get(badBlockKey) + if err != nil { + log.Warn("Failed to load old bad blocks", "error", err) + } + var badBlocks badBlockList + if len(blob) > 0 { + if err := rlp.DecodeBytes(blob, &badBlocks); err != nil { + log.Crit("Failed to decode old bad blocks", "error", err) + } + } + for _, b := range badBlocks { + if b.Header.Number.Uint64() == block.NumberU64() && b.Header.Hash() == block.Hash() { + log.Info("Skip duplicated bad block", "number", block.NumberU64(), "hash", block.Hash()) + return + } + } + badBlocks = append(badBlocks, &badBlock{ + Header: block.Header(), + Body: block.Body(), + }) + sort.Sort(sort.Reverse(badBlocks)) + if len(badBlocks) > badBlockToKeep { + badBlocks = badBlocks[:badBlockToKeep] + } + data, err := rlp.EncodeToBytes(badBlocks) + if err != nil { + log.Crit("Failed to encode bad blocks", "err", err) + } + if err := db.Put(badBlockKey, data); err != nil { + log.Crit("Failed to write bad blocks", "err", err) + } +} + +// DeleteBadBlocks deletes all the bad blocks from the database +func DeleteBadBlocks(db ethdb.KeyValueWriter) { + if err := db.Delete(badBlockKey); err != nil { + log.Crit("Failed to delete bad blocks", "err", err) + } +} + // FindCommonAncestor returns the last common ancestor of two block headers func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header { for bn := b.Number.Uint64(); a.Number.Uint64() > bn; { diff --git a/core/rawdb/accessors_chain_test.go b/core/rawdb/accessors_chain_test.go index 074c24d8fe..a5804cd309 100644 --- a/core/rawdb/accessors_chain_test.go +++ b/core/rawdb/accessors_chain_test.go @@ -22,6 +22,7 @@ import ( "fmt" "io/ioutil" "math/big" + "math/rand" "os" "reflect" "testing" @@ -188,6 +189,75 @@ func TestPartialBlockStorage(t *testing.T) { } } +// Tests block storage and retrieval operations. +func TestBadBlockStorage(t *testing.T) { + db := NewMemoryDatabase() + + // Create a test block to move around the database and make sure it's really new + block := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(1), + Extra: []byte("bad block"), + UncleHash: types.EmptyUncleHash, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + }) + if entry := ReadBadBlock(db, block.Hash()); entry != nil { + t.Fatalf("Non existent block returned: %v", entry) + } + // Write and verify the block in the database + WriteBadBlock(db, block) + if entry := ReadBadBlock(db, block.Hash()); entry == nil { + t.Fatalf("Stored block not found") + } else if entry.Hash() != block.Hash() { + t.Fatalf("Retrieved block mismatch: have %v, want %v", entry, block) + } + // Write one more bad block + blockTwo := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(2), + Extra: []byte("bad block two"), + UncleHash: types.EmptyUncleHash, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + }) + WriteBadBlock(db, blockTwo) + + // Write the block one again, should be filtered out. + WriteBadBlock(db, block) + badBlocks := ReadAllBadBlocks(db) + if len(badBlocks) != 2 { + t.Fatalf("Failed to load all bad blocks") + } + + // Write a bunch of bad blocks, all the blocks are should sorted + // in reverse order. The extra blocks should be truncated. + for _, n := range rand.Perm(100) { + block := types.NewBlockWithHeader(&types.Header{ + Number: big.NewInt(int64(n)), + Extra: []byte("bad block"), + UncleHash: types.EmptyUncleHash, + TxHash: types.EmptyRootHash, + ReceiptHash: types.EmptyRootHash, + }) + WriteBadBlock(db, block) + } + badBlocks = ReadAllBadBlocks(db) + if len(badBlocks) != badBlockToKeep { + t.Fatalf("The number of persised bad blocks in incorrect %d", len(badBlocks)) + } + for i := 0; i < len(badBlocks)-1; i++ { + if badBlocks[i].NumberU64() < badBlocks[i+1].NumberU64() { + t.Fatalf("The bad blocks are not sorted #[%d](%d) < #[%d](%d)", i, i+1, badBlocks[i].NumberU64(), badBlocks[i+1].NumberU64()) + } + } + + // Delete all bad blocks + DeleteBadBlocks(db) + badBlocks = ReadAllBadBlocks(db) + if len(badBlocks) != 0 { + t.Fatalf("Failed to delete bad blocks") + } +} + // Tests block total difficulty storage and retrieval operations. func TestTdStorage(t *testing.T) { db := NewMemoryDatabase() diff --git a/core/rawdb/accessors_metadata.go b/core/rawdb/accessors_metadata.go index 14a302a127..079e335fa6 100644 --- a/core/rawdb/accessors_metadata.go +++ b/core/rawdb/accessors_metadata.go @@ -18,6 +18,7 @@ package rawdb import ( "encoding/json" + "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" @@ -30,7 +31,7 @@ import ( func ReadDatabaseVersion(db ethdb.KeyValueReader) *uint64 { var version uint64 - enc, _ := db.Get(databaseVerisionKey) + enc, _ := db.Get(databaseVersionKey) if len(enc) == 0 { return nil } @@ -47,7 +48,7 @@ func WriteDatabaseVersion(db ethdb.KeyValueWriter, version uint64) { if err != nil { log.Crit("Failed to encode database version", "err", err) } - if err = db.Put(databaseVerisionKey, enc); err != nil { + if err = db.Put(databaseVersionKey, enc); err != nil { log.Crit("Failed to store the database version", "err", err) } } @@ -79,3 +80,61 @@ func WriteChainConfig(db ethdb.KeyValueWriter, hash common.Hash, cfg *params.Cha log.Crit("Failed to store chain config", "err", err) } } + +// crashList is a list of unclean-shutdown-markers, for rlp-encoding to the +// database +type crashList struct { + Discarded uint64 // how many ucs have we deleted + Recent []uint64 // unix timestamps of 10 latest unclean shutdowns +} + +const crashesToKeep = 10 + +// PushUncleanShutdownMarker appends a new unclean shutdown marker and returns +// the previous data +// - a list of timestamps +// - a count of how many old unclean-shutdowns have been discarded +func PushUncleanShutdownMarker(db ethdb.KeyValueStore) ([]uint64, uint64, error) { + var uncleanShutdowns crashList + // Read old data + if data, err := db.Get(uncleanShutdownKey); err != nil { + log.Warn("Error reading unclean shutdown markers", "error", err) + } else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil { + return nil, 0, err + } + var discarded = uncleanShutdowns.Discarded + var previous = make([]uint64, len(uncleanShutdowns.Recent)) + copy(previous, uncleanShutdowns.Recent) + // Add a new (but cap it) + uncleanShutdowns.Recent = append(uncleanShutdowns.Recent, uint64(time.Now().Unix())) + if count := len(uncleanShutdowns.Recent); count > crashesToKeep+1 { + numDel := count - (crashesToKeep + 1) + uncleanShutdowns.Recent = uncleanShutdowns.Recent[numDel:] + uncleanShutdowns.Discarded += uint64(numDel) + } + // And save it again + data, _ := rlp.EncodeToBytes(uncleanShutdowns) + if err := db.Put(uncleanShutdownKey, data); err != nil { + log.Warn("Failed to write unclean-shutdown marker", "err", err) + return nil, 0, err + } + return previous, discarded, nil +} + +// PopUncleanShutdownMarker removes the last unclean shutdown marker +func PopUncleanShutdownMarker(db ethdb.KeyValueStore) { + var uncleanShutdowns crashList + // Read old data + if data, err := db.Get(uncleanShutdownKey); err != nil { + log.Warn("Error reading unclean shutdown markers", "error", err) + } else if err := rlp.DecodeBytes(data, &uncleanShutdowns); err != nil { + log.Error("Error decoding unclean shutdown markers", "error", err) // Should mos def _not_ happen + } + if l := len(uncleanShutdowns.Recent); l > 0 { + uncleanShutdowns.Recent = uncleanShutdowns.Recent[:l-1] + } + data, _ := rlp.EncodeToBytes(uncleanShutdowns) + if err := db.Put(uncleanShutdownKey, data); err != nil { + log.Warn("Failed to clear unclean-shutdown marker", "err", err) + } +} diff --git a/core/rawdb/accessors_snapshot.go b/core/rawdb/accessors_snapshot.go index 5bd48ad5fa..c3616ba3aa 100644 --- a/core/rawdb/accessors_snapshot.go +++ b/core/rawdb/accessors_snapshot.go @@ -175,3 +175,24 @@ func DeleteSnapshotRecoveryNumber(db ethdb.KeyValueWriter) { log.Crit("Failed to remove snapshot recovery number", "err", err) } } + +// ReadSnapshotSyncStatus retrieves the serialized sync status saved at shutdown. +func ReadSnapshotSyncStatus(db ethdb.KeyValueReader) []byte { + data, _ := db.Get(snapshotSyncStatusKey) + return data +} + +// WriteSnapshotSyncStatus stores the serialized sync status to save at shutdown. +func WriteSnapshotSyncStatus(db ethdb.KeyValueWriter, status []byte) { + if err := db.Put(snapshotSyncStatusKey, status); err != nil { + log.Crit("Failed to store snapshot sync status", "err", err) + } +} + +// DeleteSnapshotSyncStatus deletes the serialized sync status saved at the last +// shutdown +func DeleteSnapshotSyncStatus(db ethdb.KeyValueWriter) { + if err := db.Delete(snapshotSyncStatusKey); err != nil { + log.Crit("Failed to remove snapshot sync status", "err", err) + } +} diff --git a/core/rawdb/chain_iterator.go b/core/rawdb/chain_iterator.go index 393b72c26c..862a549540 100644 --- a/core/rawdb/chain_iterator.go +++ b/core/rawdb/chain_iterator.go @@ -243,13 +243,13 @@ func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan } } } - // If there exists uncommitted data, flush them. - if batch.ValueSize() > 0 { - WriteTxIndexTail(batch, lastNum) // Also write the tail there - if err := batch.Write(); err != nil { - log.Crit("Failed writing batch to db", "error", err) - return - } + // Flush the new indexing tail and the last committed data. It can also happen + // that the last batch is empty because nothing to index, but the tail has to + // be flushed anyway. + WriteTxIndexTail(batch, lastNum) + if err := batch.Write(); err != nil { + log.Crit("Failed writing batch to db", "error", err) + return } select { case <-interrupt: @@ -334,13 +334,13 @@ func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt ch } } } - // Commit the last batch if there exists uncommitted data - if batch.ValueSize() > 0 { - WriteTxIndexTail(batch, nextNum) - if err := batch.Write(); err != nil { - log.Crit("Failed writing batch to db", "error", err) - return - } + // Flush the new indexing tail and the last committed data. It can also happen + // that the last batch is empty because nothing to unindex, but the tail has to + // be flushed anyway. + WriteTxIndexTail(batch, nextNum) + if err := batch.Write(); err != nil { + log.Crit("Failed writing batch to db", "error", err) + return } select { case <-interrupt: diff --git a/core/rawdb/database.go b/core/rawdb/database.go index b1ac3e9587..91171ef92c 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -270,8 +270,8 @@ func (s *stat) Count() string { // InspectDatabase traverses the entire database and checks the size // of all different categories of data. -func InspectDatabase(db ethdb.Database) error { - it := db.NewIterator(nil, nil) +func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { + it := db.NewIterator(keyPrefix, keyStart) defer it.Release() var ( @@ -307,8 +307,9 @@ func InspectDatabase(db ethdb.Database) error { bloomTrieNodes stat // Meta- and unaccounted data - metadata stat - unaccounted stat + metadata stat + unaccounted stat + shutdownInfo stat // Totals total common.StorageSize @@ -335,7 +336,7 @@ func InspectDatabase(db ethdb.Database) error { hashNumPairings.Add(size) case len(key) == common.HashLength: tries.Add(size) - case bytes.HasPrefix(key, codePrefix) && len(key) == len(codePrefix)+common.HashLength: + case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength: codes.Add(size) case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength): txLookups.Add(size) @@ -347,15 +348,28 @@ func InspectDatabase(db ethdb.Database) error { preimages.Add(size) case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength): bloomBits.Add(size) + case bytes.HasPrefix(key, BloomBitsIndexPrefix): + bloomBits.Add(size) case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength: cliqueSnaps.Add(size) - case bytes.HasPrefix(key, []byte("cht-")) && len(key) == 4+common.HashLength: + case bytes.HasPrefix(key, []byte("cht-")) || + bytes.HasPrefix(key, []byte("chtIndexV2-")) || + bytes.HasPrefix(key, []byte("chtRootV2-")): // Canonical hash trie chtTrieNodes.Add(size) - case bytes.HasPrefix(key, []byte("blt-")) && len(key) == 4+common.HashLength: + case bytes.HasPrefix(key, []byte("blt-")) || + bytes.HasPrefix(key, []byte("bltIndex-")) || + bytes.HasPrefix(key, []byte("bltRoot-")): // Bloomtrie sub bloomTrieNodes.Add(size) + case bytes.Equal(key, uncleanShutdownKey): + shutdownInfo.Add(size) default: var accounted bool - for _, meta := range [][]byte{databaseVerisionKey, headHeaderKey, headBlockKey, headFastBlockKey, fastTrieProgressKey} { + for _, meta := range [][]byte{ + databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, lastPivotKey, + fastTrieProgressKey, snapshotRootKey, snapshotJournalKey, snapshotGeneratorKey, + snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, uncleanShutdownKey, + badBlockKey, + } { if bytes.Equal(key, meta) { metadata.Add(size) accounted = true @@ -402,6 +416,7 @@ func InspectDatabase(db ethdb.Database) error { {"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()}, {"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()}, {"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()}, + {"Key-Value store", "Shutdown metadata", shutdownInfo.Size(), shutdownInfo.Count()}, {"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()}, {"Ancient store", "Bodies", ancientBodiesSize.String(), ancients.String()}, {"Ancient store", "Receipt lists", ancientReceiptsSize.String(), ancients.String()}, diff --git a/core/rawdb/freezer_table.go b/core/rawdb/freezer_table.go index b9d8a274a8..cd273222b1 100644 --- a/core/rawdb/freezer_table.go +++ b/core/rawdb/freezer_table.go @@ -103,6 +103,11 @@ type freezerTable struct { lock sync.RWMutex // Mutex protecting the data file descriptors } +// NewFreezerTable opens the given path as a freezer table. +func NewFreezerTable(path, name string, disableSnappy bool) (*freezerTable, error) { + return newTable(path, name, metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, disableSnappy) +} + // newTable opens a freezer table with default settings - 2G files func newTable(path string, name string, readMeter metrics.Meter, writeMeter metrics.Meter, sizeGauge metrics.Gauge, disableSnappy bool) (*freezerTable, error) { return newCustomTable(path, name, readMeter, writeMeter, sizeGauge, 2*1000*1000*1000, disableSnappy) diff --git a/core/rawdb/schema.go b/core/rawdb/schema.go index dbc5025d5d..0b411057f8 100644 --- a/core/rawdb/schema.go +++ b/core/rawdb/schema.go @@ -27,8 +27,8 @@ import ( // The fields below define the low level database schema prefixing. var ( - // databaseVerisionKey tracks the current database version. - databaseVerisionKey = []byte("DatabaseVersion") + // databaseVersionKey tracks the current database version. + databaseVersionKey = []byte("DatabaseVersion") // headHeaderKey tracks the latest known header's hash. headHeaderKey = []byte("LastHeader") @@ -57,12 +57,21 @@ var ( // snapshotRecoveryKey tracks the snapshot recovery marker across restarts. snapshotRecoveryKey = []byte("SnapshotRecovery") + // snapshotSyncStatusKey tracks the snapshot sync status across restarts. + snapshotSyncStatusKey = []byte("SnapshotSyncStatus") + // txIndexTailKey tracks the oldest block whose transactions have been indexed. txIndexTailKey = []byte("TransactionIndexTail") // fastTxLookupLimitKey tracks the transaction lookup limit during fast sync. fastTxLookupLimitKey = []byte("FastTransactionLookupLimit") + // badBlockKey tracks the list of bad blocks seen by local + badBlockKey = []byte("InvalidBlock") + + // uncleanShutdownKey tracks the list of local crashes + uncleanShutdownKey = []byte("unclean-shutdown") // config prefix for the db + // Data item prefixes (use single byte to avoid mixing data types, avoid `i`, used for indexes). headerPrefix = []byte("h") // headerPrefix + num (uint64 big endian) + hash -> header headerTDSuffix = []byte("t") // headerPrefix + num (uint64 big endian) + hash + headerTDSuffix -> td @@ -76,7 +85,7 @@ var ( bloomBitsPrefix = []byte("B") // bloomBitsPrefix + bit (uint16 big endian) + section (uint64 big endian) + hash -> bloom bits SnapshotAccountPrefix = []byte("a") // SnapshotAccountPrefix + account hash -> account trie value SnapshotStoragePrefix = []byte("o") // SnapshotStoragePrefix + account hash + storage hash -> storage trie value - codePrefix = []byte("c") // codePrefix + code hash -> account code + CodePrefix = []byte("c") // CodePrefix + code hash -> account code preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage configPrefix = []byte("ethereum-config-") // config prefix for the db @@ -200,16 +209,16 @@ func preimageKey(hash common.Hash) []byte { return append(preimagePrefix, hash.Bytes()...) } -// codeKey = codePrefix + hash +// codeKey = CodePrefix + hash func codeKey(hash common.Hash) []byte { - return append(codePrefix, hash.Bytes()...) + return append(CodePrefix, hash.Bytes()...) } // IsCodeKey reports whether the given byte slice is the key of contract code, // if so return the raw code hash as well. func IsCodeKey(key []byte) (bool, []byte) { - if bytes.HasPrefix(key, codePrefix) && len(key) == common.HashLength+len(codePrefix) { - return true, key[len(codePrefix):] + if bytes.HasPrefix(key, CodePrefix) && len(key) == common.HashLength+len(CodePrefix) { + return true, key[len(CodePrefix):] } return false, nil } diff --git a/core/state/database.go b/core/state/database.go index a9342f5179..1a06e33409 100644 --- a/core/state/database.go +++ b/core/state/database.go @@ -104,18 +104,18 @@ type Trie interface { // NewDatabase creates a backing store for state. The returned database is safe for // concurrent use, but does not retain any recent trie nodes in memory. To keep some -// historical state in memory, use the NewDatabaseWithCache constructor. +// historical state in memory, use the NewDatabaseWithConfig constructor. func NewDatabase(db ethdb.Database) Database { - return NewDatabaseWithCache(db, 0, "") + return NewDatabaseWithConfig(db, nil) } -// NewDatabaseWithCache creates a backing store for state. The returned database +// NewDatabaseWithConfig creates a backing store for state. The returned database // is safe for concurrent use and retains a lot of collapsed RLP trie nodes in a // large memory cache. -func NewDatabaseWithCache(db ethdb.Database, cache int, journal string) Database { +func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { csc, _ := lru.New(codeSizeCacheSize) return &cachingDB{ - db: trie.NewDatabaseWithCache(db, cache, journal), + db: trie.NewDatabaseWithConfig(db, config), codeSizeCache: csc, codeCache: fastcache.New(codeCacheSize), } @@ -129,12 +129,20 @@ type cachingDB struct { // OpenTrie opens the main account trie at a specific root hash. func (db *cachingDB) OpenTrie(root common.Hash) (Trie, error) { - return trie.NewSecure(root, db.db) + tr, err := trie.NewSecure(root, db.db) + if err != nil { + return nil, err + } + return tr, nil } // OpenStorageTrie opens the storage trie of an account. func (db *cachingDB) OpenStorageTrie(addrHash, root common.Hash) (Trie, error) { - return trie.NewSecure(root, db.db) + tr, err := trie.NewSecure(root, db.db) + if err != nil { + return nil, err + } + return tr, nil } // CopyTrie returns an independent copy of the given trie. diff --git a/core/state/dump.go b/core/state/dump.go index 9bb946d14b..b25da714fd 100644 --- a/core/state/dump.go +++ b/core/state/dump.go @@ -138,7 +138,7 @@ func (s *StateDB) DumpToCollector(c DumpCollector, excludeCode, excludeStorage, account.SecureKey = it.Key } addr := common.BytesToAddress(addrBytes) - obj := newObject(nil, addr, data) + obj := newObject(s, addr, data) if !excludeCode { account.Code = common.Bytes2Hex(obj.Code(s.db)) } diff --git a/core/state/pruner/bloom.go b/core/state/pruner/bloom.go new file mode 100644 index 0000000000..4aeeb176e8 --- /dev/null +++ b/core/state/pruner/bloom.go @@ -0,0 +1,132 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pruner + +import ( + "encoding/binary" + "errors" + "os" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/log" + bloomfilter "github.com/holiman/bloomfilter/v2" +) + +// stateBloomHasher is a wrapper around a byte blob to satisfy the interface API +// requirements of the bloom library used. It's used to convert a trie hash or +// contract code hash into a 64 bit mini hash. +type stateBloomHasher []byte + +func (f stateBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") } +func (f stateBloomHasher) Sum(b []byte) []byte { panic("not implemented") } +func (f stateBloomHasher) Reset() { panic("not implemented") } +func (f stateBloomHasher) BlockSize() int { panic("not implemented") } +func (f stateBloomHasher) Size() int { return 8 } +func (f stateBloomHasher) Sum64() uint64 { return binary.BigEndian.Uint64(f) } + +// stateBloom is a bloom filter used during the state convesion(snapshot->state). +// The keys of all generated entries will be recorded here so that in the pruning +// stage the entries belong to the specific version can be avoided for deletion. +// +// The false-positive is allowed here. The "false-positive" entries means they +// actually don't belong to the specific version but they are not deleted in the +// pruning. The downside of the false-positive allowance is we may leave some "dangling" +// nodes in the disk. But in practice the it's very unlike the dangling node is +// state root. So in theory this pruned state shouldn't be visited anymore. Another +// potential issue is for fast sync. If we do another fast sync upon the pruned +// database, it's problematic which will stop the expansion during the syncing. +// TODO address it @rjl493456442 @holiman @karalabe. +// +// After the entire state is generated, the bloom filter should be persisted into +// the disk. It indicates the whole generation procedure is finished. +type stateBloom struct { + bloom *bloomfilter.Filter +} + +// newStateBloomWithSize creates a brand new state bloom for state generation. +// The bloom filter will be created by the passing bloom filter size. According +// to the https://hur.st/bloomfilter/?n=600000000&p=&m=2048MB&k=4, the parameters +// are picked so that the false-positive rate for mainnet is low enough. +func newStateBloomWithSize(size uint64) (*stateBloom, error) { + bloom, err := bloomfilter.New(size*1024*1024*8, 4) + if err != nil { + return nil, err + } + log.Info("Initialized state bloom", "size", common.StorageSize(float64(bloom.M()/8))) + return &stateBloom{bloom: bloom}, nil +} + +// NewStateBloomFromDisk loads the state bloom from the given file. +// In this case the assumption is held the bloom filter is complete. +func NewStateBloomFromDisk(filename string) (*stateBloom, error) { + bloom, _, err := bloomfilter.ReadFile(filename) + if err != nil { + return nil, err + } + return &stateBloom{bloom: bloom}, nil +} + +// Commit flushes the bloom filter content into the disk and marks the bloom +// as complete. +func (bloom *stateBloom) Commit(filename, tempname string) error { + // Write the bloom out into a temporary file + _, err := bloom.bloom.WriteFile(tempname) + if err != nil { + return err + } + // Ensure the file is synced to disk + f, err := os.Open(tempname) + if err != nil { + return err + } + if err := f.Sync(); err != nil { + f.Close() + return err + } + f.Close() + + // Move the teporary file into it's final location + return os.Rename(tempname, filename) +} + +// Put implements the KeyValueWriter interface. But here only the key is needed. +func (bloom *stateBloom) Put(key []byte, value []byte) error { + // If the key length is not 32bytes, ensure it's contract code + // entry with new scheme. + if len(key) != common.HashLength { + isCode, codeKey := rawdb.IsCodeKey(key) + if !isCode { + return errors.New("invalid entry") + } + bloom.bloom.Add(stateBloomHasher(codeKey)) + return nil + } + bloom.bloom.Add(stateBloomHasher(key)) + return nil +} + +// Delete removes the key from the key-value data store. +func (bloom *stateBloom) Delete(key []byte) error { panic("not supported") } + +// Contain is the wrapper of the underlying contains function which +// reports whether the key is contained. +// - If it says yes, the key may be contained +// - If it says no, the key is definitely not contained. +func (bloom *stateBloom) Contain(key []byte) (bool, error) { + return bloom.bloom.Contains(stateBloomHasher(key)), nil +} diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go new file mode 100644 index 0000000000..530a348540 --- /dev/null +++ b/core/state/pruner/pruner.go @@ -0,0 +1,543 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package pruner + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "math" + "os" + "path/filepath" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/state/snapshot" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" +) + +const ( + // stateBloomFilePrefix is the filename prefix of state bloom filter. + stateBloomFilePrefix = "statebloom" + + // stateBloomFilePrefix is the filename suffix of state bloom filter. + stateBloomFileSuffix = "bf.gz" + + // stateBloomFileTempSuffix is the filename suffix of state bloom filter + // while it is being written out to detect write aborts. + stateBloomFileTempSuffix = ".tmp" + + // rangeCompactionThreshold is the minimal deleted entry number for + // triggering range compaction. It's a quite arbitrary number but just + // to avoid triggering range compaction because of small deletion. + rangeCompactionThreshold = 100000 +) + +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + // emptyCode is the known hash of the empty EVM bytecode. + emptyCode = crypto.Keccak256(nil) +) + +// Pruner is an offline tool to prune the stale state with the +// help of the snapshot. The workflow of pruner is very simple: +// +// - iterate the snapshot, reconstruct the relevant state +// - iterate the database, delete all other state entries which +// don't belong to the target state and the genesis state +// +// It can take several hours(around 2 hours for mainnet) to finish +// the whole pruning work. It's recommended to run this offline tool +// periodically in order to release the disk usage and improve the +// disk read performance to some extent. +type Pruner struct { + db ethdb.Database + stateBloom *stateBloom + datadir string + trieCachePath string + headHeader *types.Header + snaptree *snapshot.Tree +} + +// NewPruner creates the pruner instance. +func NewPruner(db ethdb.Database, headHeader *types.Header, datadir, trieCachePath string, bloomSize uint64) (*Pruner, error) { + snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headHeader.Root, false, false, false) + if err != nil { + return nil, err // The relevant snapshot(s) might not exist + } + // Sanitize the bloom filter size if it's too small. + if bloomSize < 256 { + log.Warn("Sanitizing bloomfilter size", "provided(MB)", bloomSize, "updated(MB)", 256) + bloomSize = 256 + } + stateBloom, err := newStateBloomWithSize(bloomSize) + if err != nil { + return nil, err + } + return &Pruner{ + db: db, + stateBloom: stateBloom, + datadir: datadir, + trieCachePath: trieCachePath, + headHeader: headHeader, + snaptree: snaptree, + }, nil +} + +func prune(maindb ethdb.Database, stateBloom *stateBloom, middleStateRoots map[common.Hash]struct{}, start time.Time) error { + // Delete all stale trie nodes in the disk. With the help of state bloom + // the trie nodes(and codes) belong to the active state will be filtered + // out. A very small part of stale tries will also be filtered because of + // the false-positive rate of bloom filter. But the assumption is held here + // that the false-positive is low enough(~0.05%). The probablity of the + // dangling node is the state root is super low. So the dangling nodes in + // theory will never ever be visited again. + var ( + count int + size common.StorageSize + pstart = time.Now() + logged = time.Now() + batch = maindb.NewBatch() + iter = maindb.NewIterator(nil, nil) + ) + for iter.Next() { + key := iter.Key() + + // All state entries don't belong to specific state and genesis are deleted here + // - trie node + // - legacy contract code + // - new-scheme contract code + isCode, codeKey := rawdb.IsCodeKey(key) + if len(key) == common.HashLength || isCode { + checkKey := key + if isCode { + checkKey = codeKey + } + if _, exist := middleStateRoots[common.BytesToHash(checkKey)]; exist { + log.Debug("Forcibly delete the middle state roots", "hash", common.BytesToHash(checkKey)) + } else { + if ok, err := stateBloom.Contain(checkKey); err != nil { + return err + } else if ok { + continue + } + } + count += 1 + size += common.StorageSize(len(key) + len(iter.Value())) + batch.Delete(key) + + var eta time.Duration // Realistically will never remain uninited + if done := binary.BigEndian.Uint64(key[:8]); done > 0 { + var ( + left = math.MaxUint64 - binary.BigEndian.Uint64(key[:8]) + speed = done/uint64(time.Since(pstart)/time.Millisecond+1) + 1 // +1s to avoid division by zero + ) + eta = time.Duration(left/speed) * time.Millisecond + } + if time.Since(logged) > 8*time.Second { + log.Info("Pruning state data", "nodes", count, "size", size, + "elapsed", common.PrettyDuration(time.Since(pstart)), "eta", common.PrettyDuration(eta)) + logged = time.Now() + } + // Recreate the iterator after every batch commit in order + // to allow the underlying compactor to delete the entries. + if batch.ValueSize() >= ethdb.IdealBatchSize { + batch.Write() + batch.Reset() + + iter.Release() + iter = maindb.NewIterator(nil, key) + } + } + } + if batch.ValueSize() > 0 { + batch.Write() + batch.Reset() + } + iter.Release() + log.Info("Pruned state data", "nodes", count, "size", size, "elapsed", common.PrettyDuration(time.Since(pstart))) + + // Start compactions, will remove the deleted data from the disk immediately. + // Note for small pruning, the compaction is skipped. + if count >= rangeCompactionThreshold { + cstart := time.Now() + for b := 0x00; b <= 0xf0; b += 0x10 { + var ( + start = []byte{byte(b)} + end = []byte{byte(b + 0x10)} + ) + if b == 0xf0 { + end = nil + } + log.Info("Compacting database", "range", fmt.Sprintf("%#x-%#x", start, end), "elapsed", common.PrettyDuration(time.Since(cstart))) + if err := maindb.Compact(start, end); err != nil { + log.Error("Database compaction failed", "error", err) + return err + } + } + log.Info("Database compaction finished", "elapsed", common.PrettyDuration(time.Since(cstart))) + } + log.Info("State pruning successful", "pruned", size, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +// Prune deletes all historical state nodes except the nodes belong to the +// specified state version. If user doesn't specify the state version, use +// the bottom-most snapshot diff layer as the target. +func (p *Pruner) Prune(root common.Hash) error { + // If the state bloom filter is already committed previously, + // reuse it for pruning instead of generating a new one. It's + // mandatory because a part of state may already be deleted, + // the recovery procedure is necessary. + _, stateBloomRoot, err := findBloomFilter(p.datadir) + if err != nil { + return err + } + if stateBloomRoot != (common.Hash{}) { + return RecoverPruning(p.datadir, p.db, p.trieCachePath) + } + // If the target state root is not specified, use the HEAD-127 as the + // target. The reason for picking it is: + // - in most of the normal cases, the related state is available + // - the probability of this layer being reorg is very low + var layers []snapshot.Snapshot + if root == (common.Hash{}) { + // Retrieve all snapshot layers from the current HEAD. + // In theory there are 128 difflayers + 1 disk layer present, + // so 128 diff layers are expected to be returned. + layers = p.snaptree.Snapshots(p.headHeader.Root, 128, true) + if len(layers) != 128 { + // Reject if the accumulated diff layers are less than 128. It + // means in most of normal cases, there is no associated state + // with bottom-most diff layer. + return fmt.Errorf("snapshot not old enough yet: need %d more blocks", 128-len(layers)) + } + // Use the bottom-most diff layer as the target + root = layers[len(layers)-1].Root() + } + // Ensure the root is really present. The weak assumption + // is the presence of root can indicate the presence of the + // entire trie. + if blob := rawdb.ReadTrieNode(p.db, root); len(blob) == 0 { + // The special case is for clique based networks(rinkeby, goerli + // and some other private networks), it's possible that two + // consecutive blocks will have same root. In this case snapshot + // difflayer won't be created. So HEAD-127 may not paired with + // head-127 layer. Instead the paired layer is higher than the + // bottom-most diff layer. Try to find the bottom-most snapshot + // layer with state available. + // + // Note HEAD and HEAD-1 is ignored. Usually there is the associated + // state available, but we don't want to use the topmost state + // as the pruning target. + var found bool + for i := len(layers) - 2; i >= 2; i-- { + if blob := rawdb.ReadTrieNode(p.db, layers[i].Root()); len(blob) != 0 { + root = layers[i].Root() + found = true + log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i) + break + } + } + if !found { + if len(layers) > 0 { + return errors.New("no snapshot paired state") + } + return fmt.Errorf("associated state[%x] is not present", root) + } + } else { + if len(layers) > 0 { + log.Info("Selecting bottom-most difflayer as the pruning target", "root", root, "height", p.headHeader.Number.Uint64()-127) + } else { + log.Info("Selecting user-specified state as the pruning target", "root", root) + } + } + // Before start the pruning, delete the clean trie cache first. + // It's necessary otherwise in the next restart we will hit the + // deleted state root in the "clean cache" so that the incomplete + // state is picked for usage. + deleteCleanTrieCache(p.trieCachePath) + + // All the state roots of the middle layer should be forcibly pruned, + // otherwise the dangling state will be left. + middleRoots := make(map[common.Hash]struct{}) + for _, layer := range layers { + if layer.Root() == root { + break + } + middleRoots[layer.Root()] = struct{}{} + } + // Traverse the target state, re-construct the whole state trie and + // commit to the given bloom filter. + start := time.Now() + if err := snapshot.GenerateTrie(p.snaptree, root, p.db, p.stateBloom); err != nil { + return err + } + // Traverse the genesis, put all genesis state entries into the + // bloom filter too. + if err := extractGenesis(p.db, p.stateBloom); err != nil { + return err + } + filterName := bloomFilterName(p.datadir, root) + + log.Info("Writing state bloom to disk", "name", filterName) + if err := p.stateBloom.Commit(filterName, filterName+stateBloomFileTempSuffix); err != nil { + return err + } + log.Info("State bloom filter committed", "name", filterName) + + if err := prune(p.db, p.stateBloom, middleRoots, start); err != nil { + return err + } + // Pruning is done, now drop the "useless" layers from the snapshot. + // Firstly, flushing the target layer into the disk. After that all + // diff layers below the target will all be merged into the disk. + if err := p.snaptree.Cap(root, 0); err != nil { + return err + } + // Secondly, flushing the snapshot journal into the disk. All diff + // layers upon the target layer are dropped silently. Eventually the + // entire snapshot tree is converted into a single disk layer with + // the pruning target as the root. + if _, err := p.snaptree.Journal(root); err != nil { + return err + } + // Delete the state bloom, it marks the entire pruning procedure is + // finished. If any crashes or manual exit happens before this, + // `RecoverPruning` will pick it up in the next restarts to redo all + // the things. + os.RemoveAll(filterName) + return nil +} + +// RecoverPruning will resume the pruning procedure during the system restart. +// This function is used in this case: user tries to prune state data, but the +// system was interrupted midway because of crash or manual-kill. In this case +// if the bloom filter for filtering active state is already constructed, the +// pruning can be resumed. What's more if the bloom filter is constructed, the +// pruning **has to be resumed**. Otherwise a lot of dangling nodes may be left +// in the disk. +func RecoverPruning(datadir string, db ethdb.Database, trieCachePath string) error { + stateBloomPath, stateBloomRoot, err := findBloomFilter(datadir) + if err != nil { + return err + } + if stateBloomPath == "" { + return nil // nothing to recover + } + headHeader, err := getHeadHeader(db) + if err != nil { + return err + } + // Initialize the snapshot tree in recovery mode to handle this special case: + // - Users run the `prune-state` command multiple times + // - Neither these `prune-state` running is finished(e.g. interrupted manually) + // - The state bloom filter is already generated, a part of state is deleted, + // so that resuming the pruning here is mandatory + // - The state HEAD is rewound already because of multiple incomplete `prune-state` + // In this case, even the state HEAD is not exactly matched with snapshot, it + // still feasible to recover the pruning correctly. + snaptree, err := snapshot.New(db, trie.NewDatabase(db), 256, headHeader.Root, false, false, true) + if err != nil { + return err // The relevant snapshot(s) might not exist + } + stateBloom, err := NewStateBloomFromDisk(stateBloomPath) + if err != nil { + return err + } + log.Info("Loaded state bloom filter", "path", stateBloomPath) + + // Before start the pruning, delete the clean trie cache first. + // It's necessary otherwise in the next restart we will hit the + // deleted state root in the "clean cache" so that the incomplete + // state is picked for usage. + deleteCleanTrieCache(trieCachePath) + + // All the state roots of the middle layers should be forcibly pruned, + // otherwise the dangling state will be left. + var ( + found bool + layers = snaptree.Snapshots(headHeader.Root, 128, true) + middleRoots = make(map[common.Hash]struct{}) + ) + for _, layer := range layers { + if layer.Root() == stateBloomRoot { + found = true + break + } + middleRoots[layer.Root()] = struct{}{} + } + if !found { + log.Error("Pruning target state is not existent") + return errors.New("non-existent target state") + } + if err := prune(db, stateBloom, middleRoots, time.Now()); err != nil { + return err + } + // Pruning is done, now drop the "useless" layers from the snapshot. + // Firstly, flushing the target layer into the disk. After that all + // diff layers below the target will all be merged into the disk. + if err := snaptree.Cap(stateBloomRoot, 0); err != nil { + return err + } + // Secondly, flushing the snapshot journal into the disk. All diff + // layers upon are dropped silently. Eventually the entire snapshot + // tree is converted into a single disk layer with the pruning target + // as the root. + if _, err := snaptree.Journal(stateBloomRoot); err != nil { + return err + } + // Delete the state bloom, it marks the entire pruning procedure is + // finished. If any crashes or manual exit happens before this, + // `RecoverPruning` will pick it up in the next restarts to redo all + // the things. + os.RemoveAll(stateBloomPath) + return nil +} + +// extractGenesis loads the genesis state and commits all the state entries +// into the given bloomfilter. +func extractGenesis(db ethdb.Database, stateBloom *stateBloom) error { + genesisHash := rawdb.ReadCanonicalHash(db, 0) + if genesisHash == (common.Hash{}) { + return errors.New("missing genesis hash") + } + genesis := rawdb.ReadBlock(db, genesisHash, 0) + if genesis == nil { + return errors.New("missing genesis block") + } + t, err := trie.NewSecure(genesis.Root(), trie.NewDatabase(db)) + if err != nil { + return err + } + accIter := t.NodeIterator(nil) + for accIter.Next(true) { + hash := accIter.Hash() + + // Embedded nodes don't have hash. + if hash != (common.Hash{}) { + stateBloom.Put(hash.Bytes(), nil) + } + // If it's a leaf node, yes we are touching an account, + // dig into the storage trie further. + if accIter.Leaf() { + var acc state.Account + if err := rlp.DecodeBytes(accIter.LeafBlob(), &acc); err != nil { + return err + } + if acc.Root != emptyRoot { + storageTrie, err := trie.NewSecure(acc.Root, trie.NewDatabase(db)) + if err != nil { + return err + } + storageIter := storageTrie.NodeIterator(nil) + for storageIter.Next(true) { + hash := storageIter.Hash() + if hash != (common.Hash{}) { + stateBloom.Put(hash.Bytes(), nil) + } + } + if storageIter.Error() != nil { + return storageIter.Error() + } + } + if !bytes.Equal(acc.CodeHash, emptyCode) { + stateBloom.Put(acc.CodeHash, nil) + } + } + } + return accIter.Error() +} + +func bloomFilterName(datadir string, hash common.Hash) string { + return filepath.Join(datadir, fmt.Sprintf("%s.%s.%s", stateBloomFilePrefix, hash.Hex(), stateBloomFileSuffix)) +} + +func isBloomFilter(filename string) (bool, common.Hash) { + filename = filepath.Base(filename) + if strings.HasPrefix(filename, stateBloomFilePrefix) && strings.HasSuffix(filename, stateBloomFileSuffix) { + return true, common.HexToHash(filename[len(stateBloomFilePrefix)+1 : len(filename)-len(stateBloomFileSuffix)-1]) + } + return false, common.Hash{} +} + +func findBloomFilter(datadir string) (string, common.Hash, error) { + var ( + stateBloomPath string + stateBloomRoot common.Hash + ) + if err := filepath.Walk(datadir, func(path string, info os.FileInfo, err error) error { + if info != nil && !info.IsDir() { + ok, root := isBloomFilter(path) + if ok { + stateBloomPath = path + stateBloomRoot = root + } + } + return nil + }); err != nil { + return "", common.Hash{}, err + } + return stateBloomPath, stateBloomRoot, nil +} + +func getHeadHeader(db ethdb.Database) (*types.Header, error) { + headHeaderHash := rawdb.ReadHeadBlockHash(db) + if headHeaderHash == (common.Hash{}) { + return nil, errors.New("empty head block hash") + } + headHeaderNumber := rawdb.ReadHeaderNumber(db, headHeaderHash) + if headHeaderNumber == nil { + return nil, errors.New("empty head block number") + } + headHeader := rawdb.ReadHeader(db, headHeaderHash, *headHeaderNumber) + if headHeader == nil { + return nil, errors.New("empty head header") + } + return headHeader, nil +} + +const warningLog = ` + +WARNING! + +The clean trie cache is not found. Please delete it by yourself after the +pruning. Remember don't start the Geth without deleting the clean trie cache +otherwise the entire database may be damaged! + +Check the command description "geth snapshot prune-state --help" for more details. +` + +func deleteCleanTrieCache(path string) { + if _, err := os.Stat(path); os.IsNotExist(err) { + log.Warn(warningLog) + return + } + os.RemoveAll(path) + log.Info("Deleted trie clean cache", "path", path) +} diff --git a/core/state/snapshot/conversion.go b/core/state/snapshot/conversion.go index dee9ff0bf2..bb87ecddf1 100644 --- a/core/state/snapshot/conversion.go +++ b/core/state/snapshot/conversion.go @@ -18,12 +18,17 @@ package snapshot import ( "bytes" + "encoding/binary" + "errors" "fmt" + "math" + "runtime" "sync" "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" @@ -38,46 +43,56 @@ type trieKV struct { type ( // trieGeneratorFn is the interface of trie generation which can // be implemented by different trie algorithm. - trieGeneratorFn func(in chan (trieKV), out chan (common.Hash)) + trieGeneratorFn func(db ethdb.KeyValueWriter, in chan (trieKV), out chan (common.Hash)) // leafCallbackFn is the callback invoked at the leaves of the trie, // returns the subtrie root with the specified subtrie identifier. - leafCallbackFn func(hash common.Hash, stat *generateStats) common.Hash + leafCallbackFn func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) ) // GenerateAccountTrieRoot takes an account iterator and reproduces the root hash. func GenerateAccountTrieRoot(it AccountIterator) (common.Hash, error) { - return generateTrieRoot(it, common.Hash{}, stdGenerate, nil, &generateStats{start: time.Now()}, true) + return generateTrieRoot(nil, it, common.Hash{}, stackTrieGenerate, nil, newGenerateStats(), true) } // GenerateStorageTrieRoot takes a storage iterator and reproduces the root hash. func GenerateStorageTrieRoot(account common.Hash, it StorageIterator) (common.Hash, error) { - return generateTrieRoot(it, account, stdGenerate, nil, &generateStats{start: time.Now()}, true) + return generateTrieRoot(nil, it, account, stackTrieGenerate, nil, newGenerateStats(), true) } -// VerifyState takes the whole snapshot tree as the input, traverses all the accounts -// as well as the corresponding storages and compares the re-computed hash with the -// original one(state root and the storage root). -func VerifyState(snaptree *Tree, root common.Hash) error { +// GenerateTrie takes the whole snapshot tree as the input, traverses all the +// accounts as well as the corresponding storages and regenerate the whole state +// (account trie + all storage tries). +func GenerateTrie(snaptree *Tree, root common.Hash, src ethdb.Database, dst ethdb.KeyValueWriter) error { + // Traverse all state by snapshot, re-generate the whole state trie acctIt, err := snaptree.AccountIterator(root, common.Hash{}) if err != nil { - return err + return err // The required snapshot might not exist. } defer acctIt.Release() - got, err := generateTrieRoot(acctIt, common.Hash{}, stdGenerate, func(account common.Hash, stat *generateStats) common.Hash { - storageIt, err := snaptree.StorageIterator(root, account, common.Hash{}) + got, err := generateTrieRoot(dst, acctIt, common.Hash{}, stackTrieGenerate, func(dst ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { + // Migrate the code first, commit the contract code into the tmp db. + if codeHash != emptyCode { + code := rawdb.ReadCode(src, codeHash) + if len(code) == 0 { + return common.Hash{}, errors.New("failed to read contract code") + } + rawdb.WriteCode(dst, codeHash, code) + } + // Then migrate all storage trie nodes into the tmp db. + storageIt, err := snaptree.StorageIterator(root, accountHash, common.Hash{}) if err != nil { - return common.Hash{} + return common.Hash{}, err } defer storageIt.Release() - hash, err := generateTrieRoot(storageIt, account, stdGenerate, nil, stat, false) + hash, err := generateTrieRoot(dst, storageIt, accountHash, stackTrieGenerate, nil, stat, false) if err != nil { - return common.Hash{} + return common.Hash{}, err } - return hash - }, &generateStats{start: time.Now()}, true) + return hash, nil + }, newGenerateStats(), true) if err != nil { return err @@ -91,23 +106,64 @@ func VerifyState(snaptree *Tree, root common.Hash) error { // generateStats is a collection of statistics gathered by the trie generator // for logging purposes. type generateStats struct { - accounts uint64 - slots uint64 - curAccount common.Hash - curSlot common.Hash - start time.Time - lock sync.RWMutex + head common.Hash + start time.Time + + accounts uint64 // Number of accounts done (including those being crawled) + slots uint64 // Number of storage slots done (including those being crawled) + + slotsStart map[common.Hash]time.Time // Start time for account slot crawling + slotsHead map[common.Hash]common.Hash // Slot head for accounts being crawled + + lock sync.RWMutex } -// progress records the progress trie generator made recently. -func (stat *generateStats) progress(accounts, slots uint64, curAccount common.Hash, curSlot common.Hash) { +// newGenerateStats creates a new generator stats. +func newGenerateStats() *generateStats { + return &generateStats{ + slotsStart: make(map[common.Hash]time.Time), + slotsHead: make(map[common.Hash]common.Hash), + start: time.Now(), + } +} + +// progressAccounts updates the generator stats for the account range. +func (stat *generateStats) progressAccounts(account common.Hash, done uint64) { stat.lock.Lock() defer stat.lock.Unlock() - stat.accounts += accounts - stat.slots += slots - stat.curAccount = curAccount - stat.curSlot = curSlot + stat.accounts += done + stat.head = account +} + +// finishAccounts updates the gemerator stats for the finished account range. +func (stat *generateStats) finishAccounts(done uint64) { + stat.lock.Lock() + defer stat.lock.Unlock() + + stat.accounts += done +} + +// progressContract updates the generator stats for a specific in-progress contract. +func (stat *generateStats) progressContract(account common.Hash, slot common.Hash, done uint64) { + stat.lock.Lock() + defer stat.lock.Unlock() + + stat.slots += done + stat.slotsHead[account] = slot + if _, ok := stat.slotsStart[account]; !ok { + stat.slotsStart[account] = time.Now() + } +} + +// finishContract updates the generator stats for a specific just-finished contract. +func (stat *generateStats) finishContract(account common.Hash, done uint64) { + stat.lock.Lock() + defer stat.lock.Unlock() + + stat.slots += done + delete(stat.slotsHead, account) + delete(stat.slotsStart, account) } // report prints the cumulative progress statistic smartly. @@ -115,22 +171,39 @@ func (stat *generateStats) report() { stat.lock.RLock() defer stat.lock.RUnlock() - var ctx []interface{} - if stat.curSlot != (common.Hash{}) { - ctx = append(ctx, []interface{}{ - "in", stat.curAccount, - "at", stat.curSlot, - }...) - } else { - ctx = append(ctx, []interface{}{"at", stat.curAccount}...) + ctx := []interface{}{ + "accounts", stat.accounts, + "slots", stat.slots, + "elapsed", common.PrettyDuration(time.Since(stat.start)), } - // Add the usual measurements - ctx = append(ctx, []interface{}{"accounts", stat.accounts}...) - if stat.slots != 0 { - ctx = append(ctx, []interface{}{"slots", stat.slots}...) + if stat.accounts > 0 { + // If there's progress on the account trie, estimate the time to finish crawling it + if done := binary.BigEndian.Uint64(stat.head[:8]) / stat.accounts; done > 0 { + var ( + left = (math.MaxUint64 - binary.BigEndian.Uint64(stat.head[:8])) / stat.accounts + speed = done/uint64(time.Since(stat.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero + eta = time.Duration(left/speed) * time.Millisecond + ) + // If there are large contract crawls in progress, estimate their finish time + for acc, head := range stat.slotsHead { + start := stat.slotsStart[acc] + if done := binary.BigEndian.Uint64(head[:8]); done > 0 { + var ( + left = math.MaxUint64 - binary.BigEndian.Uint64(head[:8]) + speed = done/uint64(time.Since(start)/time.Millisecond+1) + 1 // +1s to avoid division by zero + ) + // Override the ETA if larger than the largest until now + if slotETA := time.Duration(left/speed) * time.Millisecond; eta < slotETA { + eta = slotETA + } + } + } + ctx = append(ctx, []interface{}{ + "eta", common.PrettyDuration(eta), + }...) + } } - ctx = append(ctx, []interface{}{"elapsed", common.PrettyDuration(time.Since(stat.start))}...) - log.Info("Generating trie hash from snapshot", ctx...) + log.Info("Iterating state snapshot", ctx...) } // reportDone prints the last log when the whole generation is finished. @@ -144,13 +217,32 @@ func (stat *generateStats) reportDone() { ctx = append(ctx, []interface{}{"slots", stat.slots}...) } ctx = append(ctx, []interface{}{"elapsed", common.PrettyDuration(time.Since(stat.start))}...) - log.Info("Generated trie hash from snapshot", ctx...) + log.Info("Iterated snapshot", ctx...) +} + +// runReport periodically prints the progress information. +func runReport(stats *generateStats, stop chan bool) { + timer := time.NewTimer(0) + defer timer.Stop() + + for { + select { + case <-timer.C: + stats.report() + timer.Reset(time.Second * 8) + case success := <-stop: + if success { + stats.reportDone() + } + return + } + } } // generateTrieRoot generates the trie hash based on the snapshot iterator. // It can be used for generating account trie, storage trie or even the // whole state which connects the accounts and the corresponding storages. -func generateTrieRoot(it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) { +func generateTrieRoot(db ethdb.KeyValueWriter, it Iterator, account common.Hash, generatorFn trieGeneratorFn, leafCallback leafCallbackFn, stats *generateStats, report bool) (common.Hash, error) { var ( in = make(chan trieKV) // chan to pass leaves out = make(chan common.Hash, 1) // chan to collect result @@ -161,46 +253,43 @@ func generateTrieRoot(it Iterator, account common.Hash, generatorFn trieGenerato wg.Add(1) go func() { defer wg.Done() - generatorFn(in, out) + generatorFn(db, in, out) }() - // Spin up a go-routine for progress logging if report && stats != nil { wg.Add(1) go func() { defer wg.Done() - - timer := time.NewTimer(0) - defer timer.Stop() - - for { - select { - case <-timer.C: - stats.report() - timer.Reset(time.Second * 8) - case success := <-stoplog: - if success { - stats.reportDone() - } - return - } - } + runReport(stats, stoplog) }() } + // Create a semaphore to assign tasks and collect results through. We'll pre- + // fill it with nils, thus using the same channel for both limiting concurrent + // processing and gathering results. + threads := runtime.NumCPU() + results := make(chan error, threads) + for i := 0; i < threads; i++ { + results <- nil // fill the semaphore + } // stop is a helper function to shutdown the background threads // and return the re-generated trie hash. - stop := func(success bool) common.Hash { + stop := func(fail error) (common.Hash, error) { close(in) result := <-out - stoplog <- success + for i := 0; i < threads; i++ { + if err := <-results; err != nil && fail == nil { + fail = err + } + } + stoplog <- fail == nil + wg.Wait() - return result + return result, fail } var ( logged = time.Now() processed = uint64(0) leaf trieKV - last common.Hash ) // Start to feed leaves for it.Next() { @@ -212,26 +301,35 @@ func generateTrieRoot(it Iterator, account common.Hash, generatorFn trieGenerato if leafCallback == nil { fullData, err = FullAccountRLP(it.(AccountIterator).Account()) if err != nil { - stop(false) - return common.Hash{}, err + return stop(err) } } else { + // Wait until the semaphore allows us to continue, aborting if + // a sub-task failed + if err := <-results; err != nil { + results <- nil // stop will drain the results, add a noop back for this error we just consumed + return stop(err) + } + // Fetch the next account and process it concurrently account, err := FullAccount(it.(AccountIterator).Account()) if err != nil { - stop(false) - return common.Hash{}, err - } - // Apply the leaf callback. Normally the callback is used to traverse - // the storage trie and re-generate the subtrie root. - subroot := leafCallback(it.Hash(), stats) - if !bytes.Equal(account.Root, subroot.Bytes()) { - stop(false) - return common.Hash{}, fmt.Errorf("invalid subroot(%x), want %x, got %x", it.Hash(), account.Root, subroot) + return stop(err) } + go func(hash common.Hash) { + subroot, err := leafCallback(db, hash, common.BytesToHash(account.CodeHash), stats) + if err != nil { + results <- err + return + } + if !bytes.Equal(account.Root, subroot.Bytes()) { + results <- fmt.Errorf("invalid subroot(%x), want %x, got %x", it.Hash(), account.Root, subroot) + return + } + results <- nil + }(it.Hash()) fullData, err = rlp.EncodeToBytes(account) if err != nil { - stop(false) - return common.Hash{}, err + return stop(err) } } leaf = trieKV{it.Hash(), fullData} @@ -240,36 +338,38 @@ func generateTrieRoot(it Iterator, account common.Hash, generatorFn trieGenerato } in <- leaf - // Accumulate the generaation statistic if it's required. + // Accumulate the generation statistic if it's required. processed++ if time.Since(logged) > 3*time.Second && stats != nil { if account == (common.Hash{}) { - stats.progress(processed, 0, it.Hash(), common.Hash{}) + stats.progressAccounts(it.Hash(), processed) } else { - stats.progress(0, processed, account, it.Hash()) + stats.progressContract(account, it.Hash(), processed) } logged, processed = time.Now(), 0 } - last = it.Hash() } // Commit the last part statistic. if processed > 0 && stats != nil { if account == (common.Hash{}) { - stats.progress(processed, 0, last, common.Hash{}) + stats.finishAccounts(processed) } else { - stats.progress(0, processed, account, last) + stats.finishContract(account, processed) } } - result := stop(true) - return result, nil + return stop(nil) } -// stdGenerate is a very basic hexary trie builder which uses the same Trie -// as the rest of geth, with no enhancements or optimizations -func stdGenerate(in chan (trieKV), out chan (common.Hash)) { - t, _ := trie.New(common.Hash{}, trie.NewDatabase(memorydb.New())) +func stackTrieGenerate(db ethdb.KeyValueWriter, in chan trieKV, out chan common.Hash) { + t := trie.NewStackTrie(db) for leaf := range in { t.TryUpdate(leaf.key[:], leaf.value) } - out <- t.Hash() + var root common.Hash + if db == nil { + root = t.Hash() + } else { + root, _ = t.Commit() + } + out <- root } diff --git a/core/state/snapshot/difflayer.go b/core/state/snapshot/difflayer.go index 0aef6cf570..9c86a679d1 100644 --- a/core/state/snapshot/difflayer.go +++ b/core/state/snapshot/difflayer.go @@ -28,7 +28,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" - "github.com/steakknife/bloomfilter" + bloomfilter "github.com/holiman/bloomfilter/v2" ) var ( @@ -44,7 +44,7 @@ var ( // aggregatorItemLimit is an approximate number of items that will end up // in the agregator layer before it's flushed out to disk. A plain account // weighs around 14B (+hash), a storage slot 32B (+hash), a deleted slot - // 0B (+hash). Slots are mostly set/unset in lockstep, so thet average at + // 0B (+hash). Slots are mostly set/unset in lockstep, so that average at // 16B (+hash). All in all, the average entry seems to be 15+32=47B. Use a // smaller number to be on the safe side. aggregatorItemLimit = aggregatorMemoryLimit / 42 @@ -114,9 +114,9 @@ type diffLayer struct { // deleted, all data in other set belongs to the "new" A. destructSet map[common.Hash]struct{} // Keyed markers for deleted (and potentially) recreated accounts accountList []common.Hash // List of account for iteration. If it exists, it's sorted, otherwise it's nil - accountData map[common.Hash][]byte // Keyed accounts for direct retrival (nil means deleted) + accountData map[common.Hash][]byte // Keyed accounts for direct retrieval (nil means deleted) storageList map[common.Hash][]common.Hash // List of storage slots for iterated retrievals, one per account. Any existing lists are sorted if non-nil - storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrival. one per account (nil means deleted) + storageData map[common.Hash]map[common.Hash][]byte // Keyed storage slots for direct retrieval. one per account (nil means deleted) diffed *bloomfilter.Filter // Bloom filter tracking all the diffed items up to the disk layer @@ -191,19 +191,15 @@ func newDiffLayer(parent snapshot, root common.Hash, destructs map[common.Hash]s if blob == nil { panic(fmt.Sprintf("account %#x nil", accountHash)) } + // Determine memory size and track the dirty writes + dl.memory += uint64(common.HashLength + len(blob)) + snapshotDirtyAccountWriteMeter.Mark(int64(len(blob))) } for accountHash, slots := range storage { if slots == nil { panic(fmt.Sprintf("storage %#x nil", accountHash)) } - } - // Determine memory size and track the dirty writes - for _, data := range accounts { - dl.memory += uint64(common.HashLength + len(data)) - snapshotDirtyAccountWriteMeter.Mark(int64(len(data))) - } - // Determine memory size and track the dirty writes - for _, slots := range storage { + // Determine memory size and track the dirty writes for _, data := range slots { dl.memory += uint64(common.HashLength + len(data)) snapshotDirtyStorageWriteMeter.Mark(int64(len(data))) @@ -482,7 +478,7 @@ func (dl *diffLayer) flatten() snapshot { } } -// AccountList returns a sorted list of all accounts in this difflayer, including +// AccountList returns a sorted list of all accounts in this diffLayer, including // the deleted ones. // // Note, the returned slice is not a copy, so do not modify it. @@ -513,7 +509,7 @@ func (dl *diffLayer) AccountList() []common.Hash { return dl.accountList } -// StorageList returns a sorted list of all storage slot hashes in this difflayer +// StorageList returns a sorted list of all storage slot hashes in this diffLayer // for the given account. If the whole storage is destructed in this layer, then // an additional flag *destructed = true* will be returned, otherwise the flag is // false. Besides, the returned list will include the hash of deleted storage slot. diff --git a/core/state/snapshot/difflayer_test.go b/core/state/snapshot/difflayer_test.go index 31636ee133..919af5fa86 100644 --- a/core/state/snapshot/difflayer_test.go +++ b/core/state/snapshot/difflayer_test.go @@ -314,7 +314,7 @@ func BenchmarkSearchSlot(b *testing.B) { // With accountList and sorting // BenchmarkFlatten-6 50 29890856 ns/op // -// Without sorting and tracking accountlist +// Without sorting and tracking accountList // BenchmarkFlatten-6 300 5511511 ns/op func BenchmarkFlatten(b *testing.B) { fill := func(parent snapshot) *diffLayer { diff --git a/core/state/snapshot/disklayer.go b/core/state/snapshot/disklayer.go index e8f2bc853f..7cbf6e293d 100644 --- a/core/state/snapshot/disklayer.go +++ b/core/state/snapshot/disklayer.go @@ -31,7 +31,7 @@ import ( // diskLayer is a low level persistent snapshot built on top of a key-value store. type diskLayer struct { diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot - triedb *trie.Database // Trie node cache for reconstuction purposes + triedb *trie.Database // Trie node cache for reconstruction purposes cache *fastcache.Cache // Cache to avoid hitting the disk for direct access root common.Hash // Root hash of the base snapshot diff --git a/core/state/snapshot/disklayer_test.go b/core/state/snapshot/disklayer_test.go index 40ff5ade4c..6beb944e07 100644 --- a/core/state/snapshot/disklayer_test.go +++ b/core/state/snapshot/disklayer_test.go @@ -482,7 +482,7 @@ func TestDiskGeneratorPersistence(t *testing.T) { if !bytes.Equal(generator.Marker, genMarker) { t.Fatalf("Generator marker is not matched") } - // Test senario 2, the disk layer is fully generated + // Test scenario 2, the disk layer is fully generated // Modify or delete some accounts, flatten everything onto disk if err := snaps.Update(diffTwoRoot, diffRoot, nil, map[common.Hash][]byte{ accThree: accThree.Bytes(), diff --git a/core/state/snapshot/generate.go b/core/state/snapshot/generate.go index 92c7640c40..2b41dd5513 100644 --- a/core/state/snapshot/generate.go +++ b/core/state/snapshot/generate.go @@ -101,18 +101,26 @@ func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache i wiper = wipeSnapshot(diskdb, true) } // Create a new disk layer with an initialized state marker at zero - rawdb.WriteSnapshotRoot(diskdb, root) - + var ( + stats = &generatorStats{wiping: wiper, start: time.Now()} + batch = diskdb.NewBatch() + genMarker = []byte{} // Initialized but empty! + ) + rawdb.WriteSnapshotRoot(batch, root) + journalProgress(batch, genMarker, stats) + if err := batch.Write(); err != nil { + log.Crit("Failed to write initialized state marker", "error", err) + } base := &diskLayer{ diskdb: diskdb, triedb: triedb, root: root, cache: fastcache.New(cache * 1024 * 1024), - genMarker: []byte{}, // Initialized but empty! + genMarker: genMarker, genPending: make(chan struct{}), genAbort: make(chan chan *generatorStats), } - go base.generate(&generatorStats{wiping: wiper, start: time.Now()}) + go base.generate(stats) log.Debug("Start snapshot generation", "root", root) return base } @@ -137,10 +145,12 @@ func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorSta panic(err) // Cannot happen, here to catch dev errors } var logstr string - switch len(marker) { - case 0: + switch { + case marker == nil: logstr = "done" - case common.HashLength: + case bytes.Equal(marker, []byte{}): + logstr = "empty" + case len(marker) == common.HashLength: logstr = fmt.Sprintf("%#x", marker) default: logstr = fmt.Sprintf("%#x:%#x", marker[:common.HashLength], marker[common.HashLength:]) @@ -151,7 +161,7 @@ func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorSta // generate is a background thread that iterates over the state and storage tries, // constructing the state snapshot. All the arguments are purely for statistics -// gethering and logging, since the method surfs the blocks as they arrive, often +// gathering and logging, since the method surfs the blocks as they arrive, often // being restarted. func (dl *diskLayer) generate(stats *generatorStats) { // If a database wipe is in operation, wait until it's done @@ -241,7 +251,7 @@ func (dl *diskLayer) generate(stats *generatorStats) { if acc.Root != emptyRoot { storeTrie, err := trie.NewSecure(acc.Root, dl.triedb) if err != nil { - log.Error("Generator failed to access storage trie", "accroot", dl.root, "acchash", common.BytesToHash(accIt.Key), "stroot", acc.Root, "err", err) + log.Error("Generator failed to access storage trie", "root", dl.root, "account", accountHash, "stroot", acc.Root, "err", err) abort := <-dl.genAbort abort <- stats return @@ -281,6 +291,10 @@ func (dl *diskLayer) generate(stats *generatorStats) { abort <- stats return } + if time.Since(logged) > 8*time.Second { + stats.Log("Generating state snapshot", dl.root, append(accountHash[:], storeIt.Key...)) + logged = time.Now() + } } } if err := storeIt.Err; err != nil { @@ -303,13 +317,12 @@ func (dl *diskLayer) generate(stats *generatorStats) { abort <- stats return } - // Snapshot fully generated, set the marker to nil - if batch.ValueSize() > 0 { - // Ensure the generator entry is in sync with the data - journalProgress(batch, nil, stats) + // Snapshot fully generated, set the marker to nil. + // Note even there is nothing to commit, persist the + // generator anyway to mark the snapshot is complete. + journalProgress(batch, nil, stats) + batch.Write() - batch.Write() - } log.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots, "storage", stats.storage, "elapsed", common.PrettyDuration(time.Since(stats.start))) diff --git a/core/state/snapshot/iterator.go b/core/state/snapshot/iterator.go index 5f943fea9f..1d9340bbad 100644 --- a/core/state/snapshot/iterator.go +++ b/core/state/snapshot/iterator.go @@ -133,7 +133,7 @@ func (it *diffAccountIterator) Hash() common.Hash { // Account returns the RLP encoded slim account the iterator is currently at. // This method may _fail_, if the underlying layer has been flattened between -// the call to Next and Acccount. That type of error will set it.Err. +// the call to Next and Account. That type of error will set it.Err. // This method assumes that flattening does not delete elements from // the accountdata mapping (writing nil into it is fine though), and will panic // if elements have been deleted. @@ -243,7 +243,7 @@ type diffStorageIterator struct { } // StorageIterator creates a storage iterator over a single diff layer. -// Execept the storage iterator is returned, there is an additional flag +// Except the storage iterator is returned, there is an additional flag // "destructed" returned. If it's true then it means the whole storage is // destructed in this layer(maybe recreated too), don't bother deeper layer // for storage retrieval. diff --git a/core/state/snapshot/iterator_binary.go b/core/state/snapshot/iterator_binary.go index f82f750029..22184b2545 100644 --- a/core/state/snapshot/iterator_binary.go +++ b/core/state/snapshot/iterator_binary.go @@ -37,7 +37,7 @@ type binaryIterator struct { } // initBinaryAccountIterator creates a simplistic iterator to step over all the -// accounts in a slow, but eaily verifiable way. Note this function is used for +// accounts in a slow, but easily verifiable way. Note this function is used for // initialization, use `newBinaryAccountIterator` as the API. func (dl *diffLayer) initBinaryAccountIterator() Iterator { parent, ok := dl.parent.(*diffLayer) @@ -62,7 +62,7 @@ func (dl *diffLayer) initBinaryAccountIterator() Iterator { } // initBinaryStorageIterator creates a simplistic iterator to step over all the -// storage slots in a slow, but eaily verifiable way. Note this function is used +// storage slots in a slow, but easily verifiable way. Note this function is used // for initialization, use `newBinaryStorageIterator` as the API. func (dl *diffLayer) initBinaryStorageIterator(account common.Hash) Iterator { parent, ok := dl.parent.(*diffLayer) @@ -199,14 +199,14 @@ func (it *binaryIterator) Release() { } // newBinaryAccountIterator creates a simplistic account iterator to step over -// all the accounts in a slow, but eaily verifiable way. +// all the accounts in a slow, but easily verifiable way. func (dl *diffLayer) newBinaryAccountIterator() AccountIterator { iter := dl.initBinaryAccountIterator() return iter.(AccountIterator) } // newBinaryStorageIterator creates a simplistic account iterator to step over -// all the storage slots in a slow, but eaily verifiable way. +// all the storage slots in a slow, but easily verifiable way. func (dl *diffLayer) newBinaryStorageIterator(account common.Hash) StorageIterator { iter := dl.initBinaryStorageIterator(account) return iter.(StorageIterator) diff --git a/core/state/snapshot/iterator_fast.go b/core/state/snapshot/iterator_fast.go index 291d52900d..48069b8fcf 100644 --- a/core/state/snapshot/iterator_fast.go +++ b/core/state/snapshot/iterator_fast.go @@ -75,7 +75,7 @@ type fastIterator struct { fail error } -// newFastIterator creates a new hierarhical account or storage iterator with one +// newFastIterator creates a new hierarchical account or storage iterator with one // element per diff layer. The returned combo iterator can be used to walk over // the entire snapshot diff stack simultaneously. func newFastIterator(tree *Tree, root common.Hash, account common.Hash, seek common.Hash, accountIterator bool) (*fastIterator, error) { @@ -335,14 +335,14 @@ func (fi *fastIterator) Debug() { fmt.Println() } -// newFastAccountIterator creates a new hierarhical account iterator with one +// newFastAccountIterator creates a new hierarchical account iterator with one // element per diff layer. The returned combo iterator can be used to walk over // the entire snapshot diff stack simultaneously. func newFastAccountIterator(tree *Tree, root common.Hash, seek common.Hash) (AccountIterator, error) { return newFastIterator(tree, root, common.Hash{}, seek, true) } -// newFastStorageIterator creates a new hierarhical storage iterator with one +// newFastStorageIterator creates a new hierarchical storage iterator with one // element per diff layer. The returned combo iterator can be used to walk over // the entire snapshot diff stack simultaneously. func newFastStorageIterator(tree *Tree, root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) { diff --git a/core/state/snapshot/journal.go b/core/state/snapshot/journal.go index 178ba08902..d7e454cceb 100644 --- a/core/state/snapshot/journal.go +++ b/core/state/snapshot/journal.go @@ -441,6 +441,6 @@ func (dl *diffLayer) LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) { if err := rlp.Encode(buffer, storage); err != nil { return common.Hash{}, err } - log.Debug("Legacy journalled disk layer", "root", dl.root, "parent", dl.parent.Root()) + log.Debug("Legacy journalled diff layer", "root", dl.root, "parent", dl.parent.Root()) return base, nil } diff --git a/core/state/snapshot/snapshot.go b/core/state/snapshot/snapshot.go index 60b4158b56..aa5f5900b0 100644 --- a/core/state/snapshot/snapshot.go +++ b/core/state/snapshot/snapshot.go @@ -178,7 +178,7 @@ type Tree struct { // store, on a background thread. If the memory layers from the journal is not // continuous with disk layer or the journal is missing, all diffs will be discarded // iff it's in "recovery" mode, otherwise rebuild is mandatory. -func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, recovery bool) *Tree { +func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool) (*Tree, error) { // Create a new, empty snapshot tree snap := &Tree{ diskdb: diskdb, @@ -192,16 +192,19 @@ func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root comm // Attempt to load a previously persisted snapshot and rebuild one if failed head, err := loadSnapshot(diskdb, triedb, cache, root, recovery) if err != nil { - log.Warn("Failed to load snapshot, regenerating", "err", err) - snap.Rebuild(root) - return snap + if rebuild { + log.Warn("Failed to load snapshot, regenerating", "err", err) + snap.Rebuild(root) + return snap, nil + } + return nil, err // Bail out the error, don't rebuild automatically. } // Existing snapshot loaded, seed all the layers for head != nil { snap.layers[head.Root()] = head head = head.Parent() } - return snap + return snap, nil } // waitBuild blocks until the snapshot finishes rebuilding. This method is meant @@ -234,6 +237,39 @@ func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot { return t.layers[blockRoot] } +// Snapshots returns all visited layers from the topmost layer with specific +// root and traverses downward. The layer amount is limited by the given number. +// If nodisk is set, then disk layer is excluded. +func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot { + t.lock.RLock() + defer t.lock.RUnlock() + + if limits == 0 { + return nil + } + layer := t.layers[root] + if layer == nil { + return nil + } + var ret []Snapshot + for { + if _, isdisk := layer.(*diskLayer); isdisk && nodisk { + break + } + ret = append(ret, layer) + limits -= 1 + if limits == 0 { + break + } + parent := layer.Parent() + if parent == nil { + break + } + layer = parent + } + return ret +} + // Update adds a new snapshot into the tree, if that can be linked to an existing // old parent. It is disallowed to insert a disk layer (the origin of all). func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { @@ -264,6 +300,12 @@ func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs m // Cap traverses downwards the snapshot tree from a head block hash until the // number of allowed layers are crossed. All layers beyond the permitted number // are flattened downwards. +// +// Note, the final diff layer count in general will be one more than the amount +// requested. This happens because the bottom-most diff layer is the accumulator +// which may or may not overflow and cascade to disk. Since this last layer's +// survival is only known *after* capping, we need to omit it from the count if +// we want to ensure that *at least* the requested number of diff layers remain. func (t *Tree) Cap(root common.Hash, layers int) error { // Retrieve the head snapshot to cap from snap := t.Snapshot(root) @@ -288,10 +330,7 @@ func (t *Tree) Cap(root common.Hash, layers int) error { // Flattening the bottom-most diff layer requires special casing since there's // no child to rewire to the grandparent. In that case we can fake a temporary // child for the capping and then remove it. - var persisted *diskLayer - - switch layers { - case 0: + if layers == 0 { // If full commit was requested, flatten the diffs and merge onto disk diff.lock.RLock() base := diffToDisk(diff.flatten().(*diffLayer)) @@ -300,33 +339,9 @@ func (t *Tree) Cap(root common.Hash, layers int) error { // Replace the entire snapshot tree with the flat base t.layers = map[common.Hash]snapshot{base.root: base} return nil - - case 1: - // If full flattening was requested, flatten the diffs but only merge if the - // memory limit was reached - var ( - bottom *diffLayer - base *diskLayer - ) - diff.lock.RLock() - bottom = diff.flatten().(*diffLayer) - if bottom.memory >= aggregatorMemoryLimit { - base = diffToDisk(bottom) - } - diff.lock.RUnlock() - - // If all diff layers were removed, replace the entire snapshot tree - if base != nil { - t.layers = map[common.Hash]snapshot{base.root: base} - return nil - } - // Merge the new aggregated layer into the snapshot tree, clean stales below - t.layers[bottom.root] = bottom - - default: - // Many layers requested to be retained, cap normally - persisted = t.cap(diff, layers) } + persisted := t.cap(diff, layers) + // Remove any layer that is stale or links into a stale layer children := make(map[common.Hash][]common.Hash) for root, snap := range t.layers { @@ -368,10 +383,16 @@ func (t *Tree) Cap(root common.Hash, layers int) error { // crossed. All diffs beyond the permitted number are flattened downwards. If the // layer limit is reached, memory cap is also enforced (but not before). // -// The method returns the new disk layer if diffs were persistend into it. +// The method returns the new disk layer if diffs were persisted into it. +// +// Note, the final diff layer count in general will be one more than the amount +// requested. This happens because the bottom-most diff layer is the accumulator +// which may or may not overflow and cascade to disk. Since this last layer's +// survival is only known *after* capping, we need to omit it from the count if +// we want to ensure that *at least* the requested number of diff layers remain. func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer { // Dive until we run out of layers or reach the persistent database - for ; layers > 2; layers-- { + for i := 0; i < layers-1; i++ { // If we still have diff layers below, continue down if parent, ok := diff.parent.(*diffLayer); ok { diff = parent @@ -647,7 +668,7 @@ func (t *Tree) Rebuild(root common.Hash) { panic(fmt.Sprintf("unknown layer type: %T", layer)) } } - // Start generating a new snapshot from scratch on a backgroung thread. The + // Start generating a new snapshot from scratch on a background thread. The // generator will run a wiper first if there's not one running right now. log.Info("Rebuilding state snapshot") t.layers = map[common.Hash]snapshot{ @@ -681,6 +702,38 @@ func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek commo return newFastStorageIterator(t, root, account, seek) } +// Verify iterates the whole state(all the accounts as well as the corresponding storages) +// with the specific root and compares the re-computed hash with the original one. +func (t *Tree) Verify(root common.Hash) error { + acctIt, err := t.AccountIterator(root, common.Hash{}) + if err != nil { + return err + } + defer acctIt.Release() + + got, err := generateTrieRoot(nil, acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) { + storageIt, err := t.StorageIterator(root, accountHash, common.Hash{}) + if err != nil { + return common.Hash{}, err + } + defer storageIt.Release() + + hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false) + if err != nil { + return common.Hash{}, err + } + return hash, nil + }, newGenerateStats(), true) + + if err != nil { + return err + } + if got != root { + return fmt.Errorf("state root hash mismatch: got %x, want %x", got, root) + } + return nil +} + // disklayer is an internal helper function to return the disk layer. // The lock of snapTree is assumed to be held already. func (t *Tree) disklayer() *diskLayer { diff --git a/core/state/snapshot/snapshot_test.go b/core/state/snapshot/snapshot_test.go index ca4fa0a055..4b787cfe2e 100644 --- a/core/state/snapshot/snapshot_test.go +++ b/core/state/snapshot/snapshot_test.go @@ -17,6 +17,7 @@ package snapshot import ( + "encoding/binary" "fmt" "math/big" "math/rand" @@ -161,57 +162,10 @@ func TestDiskLayerExternalInvalidationPartialFlatten(t *testing.T) { defer func(memcap uint64) { aggregatorMemoryLimit = memcap }(aggregatorMemoryLimit) aggregatorMemoryLimit = 0 - if err := snaps.Cap(common.HexToHash("0x03"), 2); err != nil { - t.Fatalf("failed to merge diff layer onto disk: %v", err) - } - // Since the base layer was modified, ensure that data retrievald on the external reference fail - if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { - t.Errorf("stale reference returned account: %#x (err: %v)", acc, err) - } - if slot, err := ref.Storage(common.HexToHash("0xa1"), common.HexToHash("0xb1")); err != ErrSnapshotStale { - t.Errorf("stale reference returned storage slot: %#x (err: %v)", slot, err) - } - if n := len(snaps.layers); n != 2 { - t.Errorf("post-cap layer count mismatch: have %d, want %d", n, 2) - fmt.Println(snaps.layers) - } -} - -// Tests that if a diff layer becomes stale, no active external references will -// be returned with junk data. This version of the test flattens every diff layer -// to check internal corner case around the bottom-most memory accumulator. -func TestDiffLayerExternalInvalidationFullFlatten(t *testing.T) { - // Create an empty base layer and a snapshot tree out of it - base := &diskLayer{ - diskdb: rawdb.NewMemoryDatabase(), - root: common.HexToHash("0x01"), - cache: fastcache.New(1024 * 500), - } - snaps := &Tree{ - layers: map[common.Hash]snapshot{ - base.root: base, - }, - } - // Commit two diffs on top and retrieve a reference to the bottommost - accounts := map[common.Hash][]byte{ - common.HexToHash("0xa1"): randomAccount(), - } - if err := snaps.Update(common.HexToHash("0x02"), common.HexToHash("0x01"), nil, accounts, nil); err != nil { - t.Fatalf("failed to create a diff layer: %v", err) - } - if err := snaps.Update(common.HexToHash("0x03"), common.HexToHash("0x02"), nil, accounts, nil); err != nil { - t.Fatalf("failed to create a diff layer: %v", err) - } - if n := len(snaps.layers); n != 3 { - t.Errorf("pre-cap layer count mismatch: have %d, want %d", n, 3) - } - ref := snaps.Snapshot(common.HexToHash("0x02")) - - // Flatten the diff layer into the bottom accumulator if err := snaps.Cap(common.HexToHash("0x03"), 1); err != nil { - t.Fatalf("failed to flatten diff layer into accumulator: %v", err) + t.Fatalf("failed to merge accumulator onto disk: %v", err) } - // Since the accumulator diff layer was modified, ensure that data retrievald on the external reference fail + // Since the base layer was modified, ensure that data retrievald on the external reference fail if acc, err := ref.Account(common.HexToHash("0x01")); err != ErrSnapshotStale { t.Errorf("stale reference returned account: %#x (err: %v)", acc, err) } @@ -266,7 +220,7 @@ func TestDiffLayerExternalInvalidationPartialFlatten(t *testing.T) { t.Errorf("layers modified, got %d exp %d", got, exp) } // Flatten the diff layer into the bottom accumulator - if err := snaps.Cap(common.HexToHash("0x04"), 2); err != nil { + if err := snaps.Cap(common.HexToHash("0x04"), 1); err != nil { t.Fatalf("failed to flatten diff layer into accumulator: %v", err) } // Since the accumulator diff layer was modified, ensure that data retrievald on the external reference fail @@ -369,3 +323,103 @@ func TestPostCapBasicDataAccess(t *testing.T) { t.Error("expected error capping the disk layer, got none") } } + +// TestSnaphots tests the functionality for retrieveing the snapshot +// with given head root and the desired depth. +func TestSnaphots(t *testing.T) { + // setAccount is a helper to construct a random account entry and assign it to + // an account slot in a snapshot + setAccount := func(accKey string) map[common.Hash][]byte { + return map[common.Hash][]byte{ + common.HexToHash(accKey): randomAccount(), + } + } + makeRoot := func(height uint64) common.Hash { + var buffer [8]byte + binary.BigEndian.PutUint64(buffer[:], height) + return common.BytesToHash(buffer[:]) + } + // Create a starting base layer and a snapshot tree out of it + base := &diskLayer{ + diskdb: rawdb.NewMemoryDatabase(), + root: makeRoot(1), + cache: fastcache.New(1024 * 500), + } + snaps := &Tree{ + layers: map[common.Hash]snapshot{ + base.root: base, + }, + } + // Construct the snapshots with 129 layers, flattening whatever's above that + var ( + last = common.HexToHash("0x01") + head common.Hash + ) + for i := 0; i < 129; i++ { + head = makeRoot(uint64(i + 2)) + snaps.Update(head, last, nil, setAccount(fmt.Sprintf("%d", i+2)), nil) + last = head + snaps.Cap(head, 128) // 130 layers (128 diffs + 1 accumulator + 1 disk) + } + var cases = []struct { + headRoot common.Hash + limit int + nodisk bool + expected int + expectBottom common.Hash + }{ + {head, 0, false, 0, common.Hash{}}, + {head, 64, false, 64, makeRoot(129 + 2 - 64)}, + {head, 128, false, 128, makeRoot(3)}, // Normal diff layers, no accumulator + {head, 129, true, 129, makeRoot(2)}, // All diff layers, including accumulator + {head, 130, false, 130, makeRoot(1)}, // All diff layers + disk layer + } + for i, c := range cases { + layers := snaps.Snapshots(c.headRoot, c.limit, c.nodisk) + if len(layers) != c.expected { + t.Errorf("non-overflow test %d: returned snapshot layers are mismatched, want %v, got %v", i, c.expected, len(layers)) + } + if len(layers) == 0 { + continue + } + bottommost := layers[len(layers)-1] + if bottommost.Root() != c.expectBottom { + t.Errorf("non-overflow test %d: snapshot mismatch, want %v, get %v", i, c.expectBottom, bottommost.Root()) + } + } + // Above we've tested the normal capping, which leaves the accumulator live. + // Test that if the bottommost accumulator diff layer overflows the allowed + // memory limit, the snapshot tree gets capped to one less layer. + // Commit the diff layer onto the disk and ensure it's persisted + defer func(memcap uint64) { aggregatorMemoryLimit = memcap }(aggregatorMemoryLimit) + aggregatorMemoryLimit = 0 + + snaps.Cap(head, 128) // 129 (128 diffs + 1 overflown accumulator + 1 disk) + + cases = []struct { + headRoot common.Hash + limit int + nodisk bool + expected int + expectBottom common.Hash + }{ + {head, 0, false, 0, common.Hash{}}, + {head, 64, false, 64, makeRoot(129 + 2 - 64)}, + {head, 128, false, 128, makeRoot(3)}, // All diff layers, accumulator was flattened + {head, 129, true, 128, makeRoot(3)}, // All diff layers, accumulator was flattened + {head, 130, false, 129, makeRoot(2)}, // All diff layers + disk layer + } + for i, c := range cases { + layers := snaps.Snapshots(c.headRoot, c.limit, c.nodisk) + if len(layers) != c.expected { + t.Errorf("overflow test %d: returned snapshot layers are mismatched, want %v, got %v", i, c.expected, len(layers)) + } + if len(layers) == 0 { + continue + } + bottommost := layers[len(layers)-1] + if bottommost.Root() != c.expectBottom { + t.Errorf("overflow test %d: snapshot mismatch, want %v, get %v", i, c.expectBottom, bottommost.Root()) + } + } +} diff --git a/core/state/state_object.go b/core/state/state_object.go index d0d3b4513e..f93f47d5f5 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -157,11 +157,20 @@ func (s *stateObject) touch() { func (s *stateObject) getTrie(db Database) Trie { if s.trie == nil { - var err error - s.trie, err = db.OpenStorageTrie(s.addrHash, s.data.Root) - if err != nil { - s.trie, _ = db.OpenStorageTrie(s.addrHash, common.Hash{}) - s.setError(fmt.Errorf("can't create storage trie: %v", err)) + // Try fetching from prefetcher first + // We don't prefetch empty tries + if s.data.Root != emptyRoot && s.db.prefetcher != nil { + // When the miner is creating the pending state, there is no + // prefetcher + s.trie = s.db.prefetcher.trie(s.data.Root) + } + if s.trie == nil { + var err error + s.trie, err = db.OpenStorageTrie(s.addrHash, s.data.Root) + if err != nil { + s.trie, _ = db.OpenStorageTrie(s.addrHash, common.Hash{}) + s.setError(fmt.Errorf("can't create storage trie: %v", err)) + } } } return s.trie @@ -197,12 +206,24 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has } // If no live objects are available, attempt to use snapshots var ( - enc []byte - err error + enc []byte + err error + meter *time.Duration ) + readStart := time.Now() + if metrics.EnabledExpensive { + // If the snap is 'under construction', the first lookup may fail. If that + // happens, we don't want to double-count the time elapsed. Thus this + // dance with the metering. + defer func() { + if meter != nil { + *meter += time.Since(readStart) + } + }() + } if s.db.snap != nil { if metrics.EnabledExpensive { - defer func(start time.Time) { s.db.SnapshotStorageReads += time.Since(start) }(time.Now()) + meter = &s.db.SnapshotStorageReads } // If the object was destructed in *this* block (and potentially resurrected), // the storage has been cleared out, and we should *not* consult the previous @@ -217,8 +238,14 @@ func (s *stateObject) GetCommittedState(db Database, key common.Hash) common.Has } // If snapshot unavailable or reading from it failed, load from the database if s.db.snap == nil || err != nil { + if meter != nil { + // If we already spent time checking the snapshot, account for it + // and reset the readStart + *meter += time.Since(readStart) + readStart = time.Now() + } if metrics.EnabledExpensive { - defer func(start time.Time) { s.db.StorageReads += time.Since(start) }(time.Now()) + meter = &s.db.StorageReads } if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil { s.setError(err) @@ -282,9 +309,16 @@ func (s *stateObject) setState(key, value common.Hash) { // finalise moves all dirty storage slots into the pending area to be hashed or // committed later. It is invoked at the end of every transaction. -func (s *stateObject) finalise() { +func (s *stateObject) finalise(prefetch bool) { + slotsToPrefetch := make([][]byte, 0, len(s.dirtyStorage)) for key, value := range s.dirtyStorage { s.pendingStorage[key] = value + if value != s.originStorage[key] { + slotsToPrefetch = append(slotsToPrefetch, common.CopyBytes(key[:])) // Copy needed for closure + } + } + if s.db.prefetcher != nil && prefetch && len(slotsToPrefetch) > 0 && s.data.Root != emptyRoot { + s.db.prefetcher.prefetch(s.data.Root, slotsToPrefetch) } if len(s.dirtyStorage) > 0 { s.dirtyStorage = make(Storage) @@ -295,7 +329,7 @@ func (s *stateObject) finalise() { // It will return nil if the trie has not been loaded and no changes have been made func (s *stateObject) updateTrie(db Database) Trie { // Make sure all dirty slots are finalized into the pending storage area - s.finalise() + s.finalise(false) // Don't prefetch any more, pull directly if need be if len(s.pendingStorage) == 0 { return s.trie } @@ -303,18 +337,13 @@ func (s *stateObject) updateTrie(db Database) Trie { if metrics.EnabledExpensive { defer func(start time.Time) { s.db.StorageUpdates += time.Since(start) }(time.Now()) } - // Retrieve the snapshot storage map for the object + // The snapshot storage map for the object var storage map[common.Hash][]byte - if s.db.snap != nil { - // Retrieve the old storage map, if available, create a new one otherwise - storage = s.db.snapStorage[s.addrHash] - if storage == nil { - storage = make(map[common.Hash][]byte) - s.db.snapStorage[s.addrHash] = storage - } - } // Insert all the pending updates into the trie tr := s.getTrie(db) + hasher := s.db.hasher + + usedStorage := make([][]byte, 0, len(s.pendingStorage)) for key, value := range s.pendingStorage { // Skip noop changes, persist actual changes if value == s.originStorage[key] { @@ -331,9 +360,20 @@ func (s *stateObject) updateTrie(db Database) Trie { s.setError(tr.TryUpdate(key[:], v)) } // If state snapshotting is active, cache the data til commit - if storage != nil { - storage[crypto.Keccak256Hash(key[:])] = v // v will be nil if value is 0x00 + if s.db.snap != nil { + if storage == nil { + // Retrieve the old storage map, if available, create a new one otherwise + if storage = s.db.snapStorage[s.addrHash]; storage == nil { + storage = make(map[common.Hash][]byte) + s.db.snapStorage[s.addrHash] = storage + } + } + storage[crypto.HashData(hasher, key[:])] = v // v will be nil if value is 0x00 } + usedStorage = append(usedStorage, common.CopyBytes(key[:])) // Copy needed for closure + } + if s.db.prefetcher != nil { + s.db.prefetcher.used(s.data.Root, usedStorage) } if len(s.pendingStorage) > 0 { s.pendingStorage = make(Storage) diff --git a/core/state/state_test.go b/core/state/state_test.go index 0dc4c0ad63..22e93d7a95 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -41,7 +41,9 @@ func newStateTest() *stateTest { } func TestDump(t *testing.T) { - s := newStateTest() + db := rawdb.NewMemoryDatabase() + sdb, _ := New(common.Hash{}, NewDatabaseWithConfig(db, nil), nil) + s := &stateTest{db: db, state: sdb} // generate a few entries obj1 := s.state.GetOrNewStateObject(toAddr([]byte{0x01})) @@ -168,7 +170,7 @@ func TestSnapshot2(t *testing.T) { state.setStateObject(so0) root, _ := state.Commit(false) - state.Reset(root) + state, _ = New(root, state.db, state.snaps) // and one with deleted == true so1 := state.getStateObject(stateobjaddr1) diff --git a/core/state/statedb.go b/core/state/statedb.go index fe30f595ed..2e5d6e47c8 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -62,8 +62,11 @@ func (n *proofList) Delete(key []byte) error { // * Contracts // * Accounts type StateDB struct { - db Database - trie Trie + db Database + prefetcher *triePrefetcher + originalRoot common.Hash // The pre-state root, before any changes were made + trie Trie + hasher crypto.KeccakState snaps *snapshot.Tree snap snapshot.Snapshot @@ -125,6 +128,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) sdb := &StateDB{ db: db, trie: tr, + originalRoot: root, snaps: snaps, stateObjects: make(map[common.Address]*stateObject), stateObjectsPending: make(map[common.Address]struct{}), @@ -133,6 +137,7 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) preimages: make(map[common.Hash][]byte), journal: newJournal(), accessList: newAccessList(), + hasher: crypto.NewKeccakState(), } if sdb.snaps != nil { if sdb.snap = sdb.snaps.Snapshot(root); sdb.snap != nil { @@ -144,6 +149,28 @@ func New(root common.Hash, db Database, snaps *snapshot.Tree) (*StateDB, error) return sdb, nil } +// StartPrefetcher initializes a new trie prefetcher to pull in nodes from the +// state trie concurrently while the state is mutated so that when we reach the +// commit phase, most of the needed data is already hot. +func (s *StateDB) StartPrefetcher(namespace string) { + if s.prefetcher != nil { + s.prefetcher.close() + s.prefetcher = nil + } + if s.snap != nil { + s.prefetcher = newTriePrefetcher(s.db, s.originalRoot, namespace) + } +} + +// StopPrefetcher terminates a running prefetcher and reports any leftover stats +// from the gathered metrics. +func (s *StateDB) StopPrefetcher() { + if s.prefetcher != nil { + s.prefetcher.close() + s.prefetcher = nil + } +} + // setError remembers the first non-nil error it is called with. func (s *StateDB) setError(err error) { if s.dbErr == nil { @@ -155,37 +182,6 @@ func (s *StateDB) Error() error { return s.dbErr } -// Reset clears out all ephemeral state objects from the state db, but keeps -// the underlying state trie to avoid reloading data for the next operations. -func (s *StateDB) Reset(root common.Hash) error { - tr, err := s.db.OpenTrie(root) - if err != nil { - return err - } - s.trie = tr - s.stateObjects = make(map[common.Address]*stateObject) - s.stateObjectsPending = make(map[common.Address]struct{}) - s.stateObjectsDirty = make(map[common.Address]struct{}) - s.thash = common.Hash{} - s.bhash = common.Hash{} - s.txIndex = 0 - s.logs = make(map[common.Hash][]*types.Log) - s.logSize = 0 - s.preimages = make(map[common.Hash][]byte) - s.clearJournalAndRefund() - - if s.snaps != nil { - s.snapAccounts, s.snapDestructs, s.snapStorage = nil, nil, nil - if s.snap = s.snaps.Snapshot(root); s.snap != nil { - s.snapDestructs = make(map[common.Hash]struct{}) - s.snapAccounts = make(map[common.Hash][]byte) - s.snapStorage = make(map[common.Hash]map[common.Hash][]byte) - } - } - s.accessList = newAccessList() - return nil -} - func (s *StateDB) AddLog(log *types.Log) { s.journal.append(addLogChange{txhash: s.thash}) @@ -314,14 +310,19 @@ func (s *StateDB) GetState(addr common.Address, hash common.Hash) common.Hash { return common.Hash{} } -// GetProof returns the MerkleProof for a given Account -func (s *StateDB) GetProof(a common.Address) ([][]byte, error) { +// GetProof returns the Merkle proof for a given account. +func (s *StateDB) GetProof(addr common.Address) ([][]byte, error) { + return s.GetProofByHash(crypto.Keccak256Hash(addr.Bytes())) +} + +// GetProofByHash returns the Merkle proof for a given account. +func (s *StateDB) GetProofByHash(addrHash common.Hash) ([][]byte, error) { var proof proofList - err := s.trie.Prove(crypto.Keccak256(a.Bytes()), 0, &proof) - return [][]byte(proof), err + err := s.trie.Prove(addrHash[:], 0, &proof) + return proof, err } -// GetStorageProof returns the StorageProof for given key +// GetStorageProof returns the Merkle proof for given storage slot. func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, error) { var proof proofList trie := s.StorageTrie(a) @@ -329,7 +330,18 @@ func (s *StateDB) GetStorageProof(a common.Address, key common.Hash) ([][]byte, return proof, errors.New("storage trie for requested address does not exist") } err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) - return [][]byte(proof), err + return proof, err +} + +// GetStorageProofByHash returns the Merkle proof for given storage slot. +func (s *StateDB) GetStorageProofByHash(a common.Address, key common.Hash) ([][]byte, error) { + var proof proofList + trie := s.StorageTrie(a) + if trie == nil { + return proof, errors.New("storage trie for requested address does not exist") + } + err := trie.Prove(crypto.Keccak256(key.Bytes()), 0, &proof) + return proof, err } // GetCommittedState retrieves a value from the given account's committed storage trie. @@ -516,7 +528,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *stateObject { defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now()) } var acc *snapshot.Account - if acc, err = s.snap.Account(crypto.Keccak256Hash(addr.Bytes())); err == nil { + if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil { if acc == nil { return nil } @@ -659,6 +671,7 @@ func (s *StateDB) Copy() *StateDB { logSize: s.logSize, preimages: make(map[common.Hash][]byte, len(s.preimages)), journal: newJournal(), + hasher: crypto.NewKeccakState(), } // Copy the dirty states, logs, and preimages for addr := range s.journal.dirties { @@ -708,6 +721,38 @@ func (s *StateDB) Copy() *StateDB { // However, it doesn't cost us much to copy an empty list, so we do it anyway // to not blow up if we ever decide copy it in the middle of a transaction state.accessList = s.accessList.Copy() + + // If there's a prefetcher running, make an inactive copy of it that can + // only access data but does not actively preload (since the user will not + // know that they need to explicitly terminate an active copy). + if s.prefetcher != nil { + state.prefetcher = s.prefetcher.copy() + } + if s.snaps != nil { + // In order for the miner to be able to use and make additions + // to the snapshot tree, we need to copy that aswell. + // Otherwise, any block mined by ourselves will cause gaps in the tree, + // and force the miner to operate trie-backed only + state.snaps = s.snaps + state.snap = s.snap + // deep copy needed + state.snapDestructs = make(map[common.Hash]struct{}) + for k, v := range s.snapDestructs { + state.snapDestructs[k] = v + } + state.snapAccounts = make(map[common.Hash][]byte) + for k, v := range s.snapAccounts { + state.snapAccounts[k] = v + } + state.snapStorage = make(map[common.Hash]map[common.Hash][]byte) + for k, v := range s.snapStorage { + temp := make(map[common.Hash][]byte) + for kk, vv := range v { + temp[kk] = vv + } + state.snapStorage[k] = temp + } + } return state } @@ -744,6 +789,7 @@ func (s *StateDB) GetRefund() uint64 { // the journal as well as the refunds. Finalise, however, will not push any updates // into the tries just yet. Only IntermediateRoot or Commit will do that. func (s *StateDB) Finalise(deleteEmptyObjects bool) { + addressesToPrefetch := make([][]byte, 0, len(s.journal.dirties)) for addr := range s.journal.dirties { obj, exist := s.stateObjects[addr] if !exist { @@ -768,10 +814,18 @@ func (s *StateDB) Finalise(deleteEmptyObjects bool) { delete(s.snapStorage, obj.addrHash) // Clear out any previously updated storage data (may be recreated via a ressurrect) } } else { - obj.finalise() + obj.finalise(true) // Prefetch slots in the background } s.stateObjectsPending[addr] = struct{}{} s.stateObjectsDirty[addr] = struct{}{} + + // At this point, also ship the address off to the precacher. The precacher + // will start loading tries, and when the change is eventually committed, + // the commit-phase will be a lot faster + addressesToPrefetch = append(addressesToPrefetch, common.CopyBytes(addr[:])) // Copy needed for closure + } + if s.prefetcher != nil && len(addressesToPrefetch) > 0 { + s.prefetcher.prefetch(s.originalRoot, addressesToPrefetch) } // Invalidate journal because reverting across transactions is not allowed. s.clearJournalAndRefund() @@ -784,14 +838,49 @@ func (s *StateDB) IntermediateRoot(deleteEmptyObjects bool) common.Hash { // Finalise all the dirty storage states and write them into the tries s.Finalise(deleteEmptyObjects) + // If there was a trie prefetcher operating, it gets aborted and irrevocably + // modified after we start retrieving tries. Remove it from the statedb after + // this round of use. + // + // This is weird pre-byzantium since the first tx runs with a prefetcher and + // the remainder without, but pre-byzantium even the initial prefetcher is + // useless, so no sleep lost. + prefetcher := s.prefetcher + if s.prefetcher != nil { + defer func() { + s.prefetcher.close() + s.prefetcher = nil + }() + } + // Although naively it makes sense to retrieve the account trie and then do + // the contract storage and account updates sequentially, that short circuits + // the account prefetcher. Instead, let's process all the storage updates + // first, giving the account prefeches just a few more milliseconds of time + // to pull useful data from disk. + for addr := range s.stateObjectsPending { + if obj := s.stateObjects[addr]; !obj.deleted { + obj.updateRoot(s.db) + } + } + // Now we're about to start to write changes to the trie. The trie is so far + // _untouched_. We can check with the prefetcher, if it can give us a trie + // which has the same root, but also has some content loaded into it. + if prefetcher != nil { + if trie := prefetcher.trie(s.originalRoot); trie != nil { + s.trie = trie + } + } + usedAddrs := make([][]byte, 0, len(s.stateObjectsPending)) for addr := range s.stateObjectsPending { - obj := s.stateObjects[addr] - if obj.deleted { + if obj := s.stateObjects[addr]; obj.deleted { s.deleteStateObject(obj) } else { - obj.updateRoot(s.db) s.updateStateObject(obj) } + usedAddrs = append(usedAddrs, common.CopyBytes(addr[:])) // Copy needed for closure + } + if prefetcher != nil { + prefetcher.used(s.originalRoot, usedAddrs) } if len(s.stateObjectsPending) > 0 { s.stateObjectsPending = make(map[common.Address]struct{}) @@ -894,6 +983,32 @@ func (s *StateDB) Commit(deleteEmptyObjects bool) (common.Hash, error) { return root, err } +// PrepareAccessList handles the preparatory steps for executing a state transition with +// regards to both EIP-2929 and EIP-2930: +// +// - Add sender to access list (2929) +// - Add destination to access list (2929) +// - Add precompiles to access list (2929) +// - Add the contents of the optional tx access list (2930) +// +// This method should only be called if Yolov3/Berlin/2929+2930 is applicable at the current number. +func (s *StateDB) PrepareAccessList(sender common.Address, dst *common.Address, precompiles []common.Address, list types.AccessList) { + s.AddAddressToAccessList(sender) + if dst != nil { + s.AddAddressToAccessList(*dst) + // If it's a create-tx, the destination will be added inside evm.create + } + for _, addr := range precompiles { + s.AddAddressToAccessList(addr) + } + for _, el := range list { + s.AddAddressToAccessList(el.Address) + for _, key := range el.StorageKeys { + s.AddSlotToAccessList(el.Address, key) + } + } +} + // AddAddressToAccessList adds the given address to the access list func (s *StateDB) AddAddressToAccessList(addr common.Address) { if s.accessList.AddAddress(addr) { diff --git a/core/state/statedb_test.go b/core/state/statedb_test.go index 70d01ff3dd..220e28525c 100644 --- a/core/state/statedb_test.go +++ b/core/state/statedb_test.go @@ -474,7 +474,7 @@ func TestTouchDelete(t *testing.T) { s := newStateTest() s.state.GetOrNewStateObject(common.Address{}) root, _ := s.state.Commit(false) - s.state.Reset(root) + s.state, _ = New(root, s.state.db, s.state.snaps) snapshot := s.state.Snapshot() s.state.AddBalance(common.Address{}, new(big.Int)) @@ -676,7 +676,7 @@ func TestDeleteCreateRevert(t *testing.T) { state.SetBalance(addr, big.NewInt(1)) root, _ := state.Commit(false) - state.Reset(root) + state, _ = New(root, state.db, state.snaps) // Simulate self-destructing in one transaction, then create-reverting in another state.Suicide(addr) @@ -688,7 +688,7 @@ func TestDeleteCreateRevert(t *testing.T) { // Commit the entire state and make sure we don't crash and have the correct state root, _ = state.Commit(true) - state.Reset(root) + state, _ = New(root, state.db, state.snaps) if state.getStateObject(addr) != nil { t.Fatalf("self-destructed contract came alive") diff --git a/core/state/sync_test.go b/core/state/sync_test.go index deb4b52b4c..9c4867093d 100644 --- a/core/state/sync_test.go +++ b/core/state/sync_test.go @@ -62,7 +62,8 @@ func makeTestState() (Database, common.Hash, []*testAccount) { } if i%5 == 0 { for j := byte(0); j < 5; j++ { - obj.SetState(db, crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j}), crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j})) + hash := crypto.Keccak256Hash([]byte{i, i, i, i, i, j, j}) + obj.SetState(db, hash, hash) } } state.updateStateObject(obj) @@ -401,15 +402,14 @@ func TestIncompleteStateSync(t *testing.T) { // Create a random state to copy srcDb, srcRoot, srcAccounts := makeTestState() - // isCode reports whether the hash is contract code hash. - isCode := func(hash common.Hash) bool { - for _, acc := range srcAccounts { - if hash == crypto.Keccak256Hash(acc.code) { - return true - } + // isCodeLookup to save some hashing + var isCode = make(map[common.Hash]struct{}) + for _, acc := range srcAccounts { + if len(acc.code) > 0 { + isCode[crypto.Keccak256Hash(acc.code)] = struct{}{} } - return false } + isCode[common.BytesToHash(emptyCodeHash)] = struct{}{} checkTrieConsistency(srcDb.TrieDB().DiskDB().(ethdb.Database), srcRoot) // Create a destination state and sync with the scheduler @@ -447,15 +447,13 @@ func TestIncompleteStateSync(t *testing.T) { batch.Write() for _, result := range results { added = append(added, result.Hash) - } - // Check that all known sub-tries added so far are complete or missing entirely. - for _, hash := range added { - if isCode(hash) { + // Check that all known sub-tries added so far are complete or missing entirely. + if _, ok := isCode[result.Hash]; ok { continue } // Can't use checkStateConsistency here because subtrie keys may have odd // length and crash in LeafKey. - if err := checkTrieConsistency(dstDb, hash); err != nil { + if err := checkTrieConsistency(dstDb, result.Hash); err != nil { t.Fatalf("state inconsistent: %v", err) } } @@ -466,9 +464,9 @@ func TestIncompleteStateSync(t *testing.T) { // Sanity check that removing any node from the database is detected for _, node := range added[1:] { var ( - key = node.Bytes() - code = isCode(node) - val []byte + key = node.Bytes() + _, code = isCode[node] + val []byte ) if code { val = rawdb.ReadCode(dstDb, node) diff --git a/core/state/trie_prefetcher.go b/core/state/trie_prefetcher.go new file mode 100644 index 0000000000..ac5e95c5c2 --- /dev/null +++ b/core/state/trie_prefetcher.go @@ -0,0 +1,334 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package state + +import ( + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" +) + +var ( + // triePrefetchMetricsPrefix is the prefix under which to publis the metrics. + triePrefetchMetricsPrefix = "trie/prefetch/" +) + +// triePrefetcher is an active prefetcher, which receives accounts or storage +// items and does trie-loading of them. The goal is to get as much useful content +// into the caches as possible. +// +// Note, the prefetcher's API is not thread safe. +type triePrefetcher struct { + db Database // Database to fetch trie nodes through + root common.Hash // Root hash of theaccount trie for metrics + fetches map[common.Hash]Trie // Partially or fully fetcher tries + fetchers map[common.Hash]*subfetcher // Subfetchers for each trie + + deliveryMissMeter metrics.Meter + accountLoadMeter metrics.Meter + accountDupMeter metrics.Meter + accountSkipMeter metrics.Meter + accountWasteMeter metrics.Meter + storageLoadMeter metrics.Meter + storageDupMeter metrics.Meter + storageSkipMeter metrics.Meter + storageWasteMeter metrics.Meter +} + +// newTriePrefetcher +func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePrefetcher { + prefix := triePrefetchMetricsPrefix + namespace + p := &triePrefetcher{ + db: db, + root: root, + fetchers: make(map[common.Hash]*subfetcher), // Active prefetchers use the fetchers map + + deliveryMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss", nil), + accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil), + accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil), + accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil), + accountWasteMeter: metrics.GetOrRegisterMeter(prefix+"/account/waste", nil), + storageLoadMeter: metrics.GetOrRegisterMeter(prefix+"/storage/load", nil), + storageDupMeter: metrics.GetOrRegisterMeter(prefix+"/storage/dup", nil), + storageSkipMeter: metrics.GetOrRegisterMeter(prefix+"/storage/skip", nil), + storageWasteMeter: metrics.GetOrRegisterMeter(prefix+"/storage/waste", nil), + } + return p +} + +// close iterates over all the subfetchers, aborts any that were left spinning +// and reports the stats to the metrics subsystem. +func (p *triePrefetcher) close() { + for _, fetcher := range p.fetchers { + fetcher.abort() // safe to do multiple times + + if metrics.Enabled { + if fetcher.root == p.root { + p.accountLoadMeter.Mark(int64(len(fetcher.seen))) + p.accountDupMeter.Mark(int64(fetcher.dups)) + p.accountSkipMeter.Mark(int64(len(fetcher.tasks))) + + for _, key := range fetcher.used { + delete(fetcher.seen, string(key)) + } + p.accountWasteMeter.Mark(int64(len(fetcher.seen))) + } else { + p.storageLoadMeter.Mark(int64(len(fetcher.seen))) + p.storageDupMeter.Mark(int64(fetcher.dups)) + p.storageSkipMeter.Mark(int64(len(fetcher.tasks))) + + for _, key := range fetcher.used { + delete(fetcher.seen, string(key)) + } + p.storageWasteMeter.Mark(int64(len(fetcher.seen))) + } + } + } + // Clear out all fetchers (will crash on a second call, deliberate) + p.fetchers = nil +} + +// copy creates a deep-but-inactive copy of the trie prefetcher. Any trie data +// already loaded will be copied over, but no goroutines will be started. This +// is mostly used in the miner which creates a copy of it's actively mutated +// state to be sealed while it may further mutate the state. +func (p *triePrefetcher) copy() *triePrefetcher { + copy := &triePrefetcher{ + db: p.db, + root: p.root, + fetches: make(map[common.Hash]Trie), // Active prefetchers use the fetches map + + deliveryMissMeter: p.deliveryMissMeter, + accountLoadMeter: p.accountLoadMeter, + accountDupMeter: p.accountDupMeter, + accountSkipMeter: p.accountSkipMeter, + accountWasteMeter: p.accountWasteMeter, + storageLoadMeter: p.storageLoadMeter, + storageDupMeter: p.storageDupMeter, + storageSkipMeter: p.storageSkipMeter, + storageWasteMeter: p.storageWasteMeter, + } + // If the prefetcher is already a copy, duplicate the data + if p.fetches != nil { + for root, fetch := range p.fetches { + copy.fetches[root] = p.db.CopyTrie(fetch) + } + return copy + } + // Otherwise we're copying an active fetcher, retrieve the current states + for root, fetcher := range p.fetchers { + copy.fetches[root] = fetcher.peek() + } + return copy +} + +// prefetch schedules a batch of trie items to prefetch. +func (p *triePrefetcher) prefetch(root common.Hash, keys [][]byte) { + // If the prefetcher is an inactive one, bail out + if p.fetches != nil { + return + } + // Active fetcher, schedule the retrievals + fetcher := p.fetchers[root] + if fetcher == nil { + fetcher = newSubfetcher(p.db, root) + p.fetchers[root] = fetcher + } + fetcher.schedule(keys) +} + +// trie returns the trie matching the root hash, or nil if the prefetcher doesn't +// have it. +func (p *triePrefetcher) trie(root common.Hash) Trie { + // If the prefetcher is inactive, return from existing deep copies + if p.fetches != nil { + trie := p.fetches[root] + if trie == nil { + p.deliveryMissMeter.Mark(1) + return nil + } + return p.db.CopyTrie(trie) + } + // Otherwise the prefetcher is active, bail if no trie was prefetched for this root + fetcher := p.fetchers[root] + if fetcher == nil { + p.deliveryMissMeter.Mark(1) + return nil + } + // Interrupt the prefetcher if it's by any chance still running and return + // a copy of any pre-loaded trie. + fetcher.abort() // safe to do multiple times + + trie := fetcher.peek() + if trie == nil { + p.deliveryMissMeter.Mark(1) + return nil + } + return trie +} + +// used marks a batch of state items used to allow creating statistics as to +// how useful or wasteful the prefetcher is. +func (p *triePrefetcher) used(root common.Hash, used [][]byte) { + if fetcher := p.fetchers[root]; fetcher != nil { + fetcher.used = used + } +} + +// subfetcher is a trie fetcher goroutine responsible for pulling entries for a +// single trie. It is spawned when a new root is encountered and lives until the +// main prefetcher is paused and either all requested items are processed or if +// the trie being worked on is retrieved from the prefetcher. +type subfetcher struct { + db Database // Database to load trie nodes through + root common.Hash // Root hash of the trie to prefetch + trie Trie // Trie being populated with nodes + + tasks [][]byte // Items queued up for retrieval + lock sync.Mutex // Lock protecting the task queue + + wake chan struct{} // Wake channel if a new task is scheduled + stop chan struct{} // Channel to interrupt processing + term chan struct{} // Channel to signal iterruption + copy chan chan Trie // Channel to request a copy of the current trie + + seen map[string]struct{} // Tracks the entries already loaded + dups int // Number of duplicate preload tasks + used [][]byte // Tracks the entries used in the end +} + +// newSubfetcher creates a goroutine to prefetch state items belonging to a +// particular root hash. +func newSubfetcher(db Database, root common.Hash) *subfetcher { + sf := &subfetcher{ + db: db, + root: root, + wake: make(chan struct{}, 1), + stop: make(chan struct{}), + term: make(chan struct{}), + copy: make(chan chan Trie), + seen: make(map[string]struct{}), + } + go sf.loop() + return sf +} + +// schedule adds a batch of trie keys to the queue to prefetch. +func (sf *subfetcher) schedule(keys [][]byte) { + // Append the tasks to the current queue + sf.lock.Lock() + sf.tasks = append(sf.tasks, keys...) + sf.lock.Unlock() + + // Notify the prefetcher, it's fine if it's already terminated + select { + case sf.wake <- struct{}{}: + default: + } +} + +// peek tries to retrieve a deep copy of the fetcher's trie in whatever form it +// is currently. +func (sf *subfetcher) peek() Trie { + ch := make(chan Trie) + select { + case sf.copy <- ch: + // Subfetcher still alive, return copy from it + return <-ch + + case <-sf.term: + // Subfetcher already terminated, return a copy directly + if sf.trie == nil { + return nil + } + return sf.db.CopyTrie(sf.trie) + } +} + +// abort interrupts the subfetcher immediately. It is safe to call abort multiple +// times but it is not thread safe. +func (sf *subfetcher) abort() { + select { + case <-sf.stop: + default: + close(sf.stop) + } + <-sf.term +} + +// loop waits for new tasks to be scheduled and keeps loading them until it runs +// out of tasks or its underlying trie is retrieved for committing. +func (sf *subfetcher) loop() { + // No matter how the loop stops, signal anyone waiting that it's terminated + defer close(sf.term) + + // Start by opening the trie and stop processing if it fails + trie, err := sf.db.OpenTrie(sf.root) + if err != nil { + log.Warn("Trie prefetcher failed opening trie", "root", sf.root, "err", err) + return + } + sf.trie = trie + + // Trie opened successfully, keep prefetching items + for { + select { + case <-sf.wake: + // Subfetcher was woken up, retrieve any tasks to avoid spinning the lock + sf.lock.Lock() + tasks := sf.tasks + sf.tasks = nil + sf.lock.Unlock() + + // Prefetch any tasks until the loop is interrupted + for i, task := range tasks { + select { + case <-sf.stop: + // If termination is requested, add any leftover back and return + sf.lock.Lock() + sf.tasks = append(sf.tasks, tasks[i:]...) + sf.lock.Unlock() + return + + case ch := <-sf.copy: + // Somebody wants a copy of the current trie, grant them + ch <- sf.db.CopyTrie(sf.trie) + + default: + // No termination request yet, prefetch the next entry + taskid := string(task) + if _, ok := sf.seen[taskid]; ok { + sf.dups++ + } else { + sf.trie.TryGet(task) + sf.seen[taskid] = struct{}{} + } + } + } + + case ch := <-sf.copy: + // Somebody wants a copy of the current trie, grant them + ch <- sf.db.CopyTrie(sf.trie) + + case <-sf.stop: + // Termination is requested, abort and leave remaining tasks + return + } + } +} diff --git a/core/state_prefetcher.go b/core/state_prefetcher.go index 1c550fa8bc..05394321f7 100644 --- a/core/state_prefetcher.go +++ b/core/state_prefetcher.go @@ -19,7 +19,6 @@ package core import ( "sync/atomic" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" @@ -50,8 +49,11 @@ func newStatePrefetcher(config *params.ChainConfig, bc *BlockChain, engine conse // only goal is to pre-cache transaction signatures and state trie nodes. func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, cfg vm.Config, interrupt *uint32) { var ( - header = block.Header() - gaspool = new(GasPool).AddGas(block.GasLimit()) + header = block.Header() + gaspool = new(GasPool).AddGas(block.GasLimit()) + blockContext = NewEVMBlockContext(header, p.bc, nil) + evm = vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) + signer = types.MakeSigner(p.config, header.Number) ) // Iterate over and process the individual transactions byzantium := p.config.IsByzantium(block.Number()) @@ -60,9 +62,13 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c if interrupt != nil && atomic.LoadUint32(interrupt) == 1 { return } - // Block precaching permitted to continue, execute the transaction + // Convert the transaction into an executable message and pre-cache its sender + msg, err := tx.AsMessage(signer) + if err != nil { + return // Also invalid block, bail out + } statedb.Prepare(tx.Hash(), block.Hash(), i) - if err := precacheTransaction(p.config, p.bc, nil, gaspool, statedb, header, tx, cfg); err != nil { + if err := precacheTransaction(msg, p.config, gaspool, statedb, header, evm); err != nil { return // Ugh, something went horribly wrong, bail out } // If we're pre-byzantium, pre-load trie nodes for the intermediate root @@ -79,16 +85,10 @@ func (p *statePrefetcher) Prefetch(block *types.Block, statedb *state.StateDB, c // precacheTransaction attempts to apply a transaction to the given state database // and uses the input parameters for its environment. The goal is not to execute // the transaction successfully, rather to warm up touched data slots. -func precacheTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gaspool *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, cfg vm.Config) error { - // Convert the transaction into an executable message and pre-cache its sender - msg, err := tx.AsMessage(types.MakeSigner(config, header.Number)) - if err != nil { - return err - } - // Create the EVM and execute the transaction - context := NewEVMContext(msg, header, bc, author) - vm := vm.NewEVM(context, statedb, config, cfg) - - _, err = ApplyMessage(vm, msg, gaspool) +func precacheTransaction(msg types.Message, config *params.ChainConfig, gaspool *GasPool, statedb *state.StateDB, header *types.Header, evm *vm.EVM) error { + // Update the evm with the new transaction context. + evm.Reset(NewEVMTxContext(msg), statedb) + // Add addresses to access list if applicable + _, err := ApplyMessage(evm, msg, gaspool) return err } diff --git a/core/state_processor.go b/core/state_processor.go index ac6046b717..40a953f0d4 100644 --- a/core/state_processor.go +++ b/core/state_processor.go @@ -17,6 +17,8 @@ package core import ( + "fmt" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/misc" @@ -65,13 +67,19 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 { misc.ApplyDAOHardFork(statedb) } + blockContext := NewEVMBlockContext(header, p.bc, nil) + vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) // Iterate over and process the individual transactions for i, tx := range block.Transactions() { - statedb.Prepare(tx.Hash(), block.Hash(), i) - receipt, err := ApplyTransaction(p.config, p.bc, nil, gp, statedb, header, tx, usedGas, cfg) + msg, err := tx.AsMessage(types.MakeSigner(p.config, header.Number)) if err != nil { return nil, nil, 0, err } + statedb.Prepare(tx.Hash(), block.Hash(), i) + receipt, err := applyTransaction(msg, p.config, p.bc, nil, gp, statedb, header, tx, usedGas, vmenv) + if err != nil { + return nil, nil, 0, fmt.Errorf("could not apply tx %d [%v]: %w", i, tx.Hash().Hex(), err) + } receipts = append(receipts, receipt) allLogs = append(allLogs, receipt.Logs...) } @@ -81,38 +89,18 @@ func (p *StateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg return receipts, allLogs, *usedGas, nil } -// ApplyTransaction attempts to apply a transaction to the given state database -// and uses the input parameters for its environment. It returns the receipt -// for the transaction, gas used and an error if the transaction failed, -// indicating the block was invalid. -func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, error) { - msg, err := tx.AsMessage(types.MakeSigner(config, header.Number)) - if err != nil { - return nil, err - } - // Create a new context to be used in the EVM environment - context := NewEVMContext(msg, header, bc, author) - // Create a new environment which holds all relevant information - // about the transaction and calling mechanisms. - vmenv := vm.NewEVM(context, statedb, config, cfg) - - if config.IsYoloV2(header.Number) { - statedb.AddAddressToAccessList(msg.From()) - if dst := msg.To(); dst != nil { - statedb.AddAddressToAccessList(*dst) - // If it's a create-tx, the destination will be added inside evm.create - } - for _, addr := range vmenv.ActivePrecompiles() { - statedb.AddAddressToAccessList(addr) - } - } +func applyTransaction(msg types.Message, config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, evm *vm.EVM) (*types.Receipt, error) { + // Create a new context to be used in the EVM environment. + txContext := NewEVMTxContext(msg) + evm.Reset(txContext, statedb) - // Apply the transaction to the current state (included in the env) - result, err := ApplyMessage(vmenv, msg, gp) + // Apply the transaction to the current state (included in the env). + result, err := ApplyMessage(evm, msg, gp) if err != nil { return nil, err } - // Update the state with pending changes + + // Update the state with pending changes. var root []byte if config.IsByzantium(header.Number) { statedb.Finalise(true) @@ -121,21 +109,42 @@ func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *commo } *usedGas += result.UsedGas - // Create a new receipt for the transaction, storing the intermediate root and gas used by the tx - // based on the eip phase, we're passing whether the root touch-delete accounts. - receipt := types.NewReceipt(root, result.Failed(), *usedGas) + // Create a new receipt for the transaction, storing the intermediate root and gas used + // by the tx. + receipt := &types.Receipt{Type: tx.Type(), PostState: root, CumulativeGasUsed: *usedGas} + if result.Failed() { + receipt.Status = types.ReceiptStatusFailed + } else { + receipt.Status = types.ReceiptStatusSuccessful + } receipt.TxHash = tx.Hash() receipt.GasUsed = result.UsedGas - // if the transaction created a contract, store the creation address in the receipt. + + // If the transaction created a contract, store the creation address in the receipt. if msg.To() == nil { - receipt.ContractAddress = crypto.CreateAddress(vmenv.Context.Origin, tx.Nonce()) + receipt.ContractAddress = crypto.CreateAddress(evm.TxContext.Origin, tx.Nonce()) } - // Set the receipt logs and create a bloom for filtering + + // Set the receipt logs and create the bloom filter. receipt.Logs = statedb.GetLogs(tx.Hash()) receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) receipt.BlockHash = statedb.BlockHash() receipt.BlockNumber = header.Number receipt.TransactionIndex = uint(statedb.TxIndex()) - return receipt, err } + +// ApplyTransaction attempts to apply a transaction to the given state database +// and uses the input parameters for its environment. It returns the receipt +// for the transaction, gas used and an error if the transaction failed, +// indicating the block was invalid. +func ApplyTransaction(config *params.ChainConfig, bc ChainContext, author *common.Address, gp *GasPool, statedb *state.StateDB, header *types.Header, tx *types.Transaction, usedGas *uint64, cfg vm.Config) (*types.Receipt, error) { + msg, err := tx.AsMessage(types.MakeSigner(config, header.Number)) + if err != nil { + return nil, err + } + // Create a new context to be used in the EVM environment + blockContext := NewEVMBlockContext(header, bc, author) + vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, config, cfg) + return applyTransaction(msg, config, bc, author, gp, statedb, header, tx, usedGas, vmenv) +} diff --git a/core/state_processor_test.go b/core/state_processor_test.go new file mode 100644 index 0000000000..5976ecc3d4 --- /dev/null +++ b/core/state_processor_test.go @@ -0,0 +1,152 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" + "golang.org/x/crypto/sha3" +) + +// TestStateProcessorErrors tests the output from the 'core' errors +// as defined in core/error.go. These errors are generated when the +// blockchain imports bad blocks, meaning blocks which have valid headers but +// contain invalid transactions +func TestStateProcessorErrors(t *testing.T) { + var ( + signer = types.HomesteadSigner{} + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + db = rawdb.NewMemoryDatabase() + gspec = &Genesis{ + Config: params.TestChainConfig, + } + genesis = gspec.MustCommit(db) + blockchain, _ = NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) + ) + defer blockchain.Stop() + var makeTx = func(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction { + tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, testKey) + return tx + } + for i, tt := range []struct { + txs []*types.Transaction + want string + }{ + { + txs: []*types.Transaction{ + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, nil, nil), + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, nil, nil), + }, + want: "could not apply tx 1 [0x36bfa6d14f1cd35a1be8cc2322982a595fabc0e799f09c1de3bad7bd5b1f7626]: nonce too low: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 0 state: 1", + }, + { + txs: []*types.Transaction{ + makeTx(100, common.Address{}, big.NewInt(0), params.TxGas, nil, nil), + }, + want: "could not apply tx 0 [0x51cd272d41ef6011d8138e18bf4043797aca9b713c7d39a97563f9bbe6bdbe6f]: nonce too high: address 0x71562b71999873DB5b286dF957af199Ec94617F7, tx: 100 state: 0", + }, + { + txs: []*types.Transaction{ + makeTx(0, common.Address{}, big.NewInt(0), 21000000, nil, nil), + }, + want: "could not apply tx 0 [0x54c58b530824b0bb84b7a98183f08913b5d74e1cebc368515ef3c65edf8eb56a]: gas limit reached", + }, + { + txs: []*types.Transaction{ + makeTx(0, common.Address{}, big.NewInt(1), params.TxGas, nil, nil), + }, + want: "could not apply tx 0 [0x3094b17498940d92b13baccf356ce8bfd6f221e926abc903d642fa1466c5b50e]: insufficient funds for transfer: address 0x71562b71999873DB5b286dF957af199Ec94617F7", + }, + { + txs: []*types.Transaction{ + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(0xffffff), nil), + }, + want: "could not apply tx 0 [0xaa3f7d86802b1f364576d9071bf231e31d61b392d306831ac9cf706ff5371ce0]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 0 want 352321515000", + }, + { + txs: []*types.Transaction{ + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, nil, nil), + makeTx(1, common.Address{}, big.NewInt(0), params.TxGas, nil, nil), + makeTx(2, common.Address{}, big.NewInt(0), params.TxGas, nil, nil), + makeTx(3, common.Address{}, big.NewInt(0), params.TxGas-1000, big.NewInt(0), nil), + }, + want: "could not apply tx 3 [0x836fab5882205362680e49b311a20646de03b630920f18ec6ee3b111a2cf6835]: intrinsic gas too low: have 20000, want 21000", + }, + // The last 'core' error is ErrGasUintOverflow: "gas uint64 overflow", but in order to + // trigger that one, we'd have to allocate a _huge_ chunk of data, such that the + // multiplication len(data) +gas_per_byte overflows uint64. Not testable at the moment + } { + block := GenerateBadBlock(genesis, ethash.NewFaker(), tt.txs) + _, err := blockchain.InsertChain(types.Blocks{block}) + if err == nil { + t.Fatal("block imported without errors") + } + if have, want := err.Error(), tt.want; have != want { + t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want) + } + } +} + +// GenerateBadBlock constructs a "block" which contains the transactions. The transactions are not expected to be +// valid, and no proper post-state can be made. But from the perspective of the blockchain, the block is sufficiently +// valid to be considered for import: +// - valid pow (fake), ancestry, difficulty, gaslimit etc +func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Transactions) *types.Block { + header := &types.Header{ + ParentHash: parent.Hash(), + Coinbase: parent.Coinbase(), + Difficulty: engine.CalcDifficulty(&fakeChainReader{params.TestChainConfig}, parent.Time()+10, &types.Header{ + Number: parent.Number(), + Time: parent.Time(), + Difficulty: parent.Difficulty(), + UncleHash: parent.UncleHash(), + }), + GasLimit: CalcGasLimit(parent, parent.GasLimit(), parent.GasLimit()), + Number: new(big.Int).Add(parent.Number(), common.Big1), + Time: parent.Time() + 10, + UncleHash: types.EmptyUncleHash, + } + var receipts []*types.Receipt + + // The post-state result doesn't need to be correct (this is a bad block), but we do need something there + // Preferably something unique. So let's use a combo of blocknum + txhash + hasher := sha3.NewLegacyKeccak256() + hasher.Write(header.Number.Bytes()) + var cumulativeGas uint64 + for _, tx := range txs { + txh := tx.Hash() + hasher.Write(txh[:]) + receipt := types.NewReceipt(nil, false, cumulativeGas+tx.Gas()) + receipt.TxHash = tx.Hash() + receipt.GasUsed = tx.Gas() + receipts = append(receipts, receipt) + cumulativeGas += tx.Gas() + } + header.Root = common.BytesToHash(hasher.Sum(nil)) + // Assemble and return the final block for sealing + return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil)) +} diff --git a/core/state_transition.go b/core/state_transition.go index 9a9bf475e9..d511e40bd6 100644 --- a/core/state_transition.go +++ b/core/state_transition.go @@ -17,10 +17,12 @@ package core import ( + "fmt" "math" "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" ) @@ -66,6 +68,7 @@ type Message interface { Nonce() uint64 CheckNonce() bool Data() []byte + AccessList() types.AccessList } // ExecutionResult includes all output after executing given evm @@ -104,10 +107,10 @@ func (result *ExecutionResult) Revert() []byte { } // IntrinsicGas computes the 'intrinsic gas' for a message with the given data. -func IntrinsicGas(data []byte, contractCreation, isHomestead bool, isEIP2028 bool) (uint64, error) { +func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028 bool) (uint64, error) { // Set the starting gas for the raw transaction var gas uint64 - if contractCreation && isHomestead { + if isContractCreation && isHomestead { gas = params.TxGasContractCreation } else { gas = params.TxGas @@ -137,6 +140,10 @@ func IntrinsicGas(data []byte, contractCreation, isHomestead bool, isEIP2028 boo } gas += z * params.TxDataZeroGas } + if accessList != nil { + gas += uint64(len(accessList)) * params.TxAccessListAddressGas + gas += uint64(accessList.StorageKeys()) * params.TxAccessListStorageKeyGas + } return gas, nil } @@ -174,8 +181,8 @@ func (st *StateTransition) to() common.Address { func (st *StateTransition) buyGas() error { mgval := new(big.Int).Mul(new(big.Int).SetUint64(st.msg.Gas()), st.gasPrice) - if st.state.GetBalance(st.msg.From()).Cmp(mgval) < 0 { - return ErrInsufficientFunds + if have, want := st.state.GetBalance(st.msg.From()), mgval; have.Cmp(want) < 0 { + return fmt.Errorf("%w: address %v have %v want %v", ErrInsufficientFunds, st.msg.From().Hex(), have, want) } if err := st.gp.SubGas(st.msg.Gas()); err != nil { return err @@ -190,11 +197,13 @@ func (st *StateTransition) buyGas() error { func (st *StateTransition) preCheck() error { // Make sure this transaction's nonce is correct. if st.msg.CheckNonce() { - nonce := st.state.GetNonce(st.msg.From()) - if nonce < st.msg.Nonce() { - return ErrNonceTooHigh - } else if nonce > st.msg.Nonce() { - return ErrNonceTooLow + stNonce := st.state.GetNonce(st.msg.From()) + if msgNonce := st.msg.Nonce(); stNonce < msgNonce { + return fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooHigh, + st.msg.From().Hex(), msgNonce, stNonce) + } else if stNonce > msgNonce { + return fmt.Errorf("%w: address %v, tx: %d state: %d", ErrNonceTooLow, + st.msg.From().Hex(), msgNonce, stNonce) } } return st.buyGas() @@ -230,24 +239,30 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } msg := st.msg sender := vm.AccountRef(msg.From()) - homestead := st.evm.ChainConfig().IsHomestead(st.evm.BlockNumber) - istanbul := st.evm.ChainConfig().IsIstanbul(st.evm.BlockNumber) + homestead := st.evm.ChainConfig().IsHomestead(st.evm.Context.BlockNumber) + istanbul := st.evm.ChainConfig().IsIstanbul(st.evm.Context.BlockNumber) contractCreation := msg.To() == nil // Check clauses 4-5, subtract intrinsic gas if everything is correct - gas, err := IntrinsicGas(st.data, contractCreation, homestead, istanbul) + gas, err := IntrinsicGas(st.data, st.msg.AccessList(), contractCreation, homestead, istanbul) if err != nil { return nil, err } if st.gas < gas { - return nil, ErrIntrinsicGas + return nil, fmt.Errorf("%w: have %d, want %d", ErrIntrinsicGas, st.gas, gas) } st.gas -= gas // Check clause 6 - if msg.Value().Sign() > 0 && !st.evm.CanTransfer(st.state, msg.From(), msg.Value()) { - return nil, ErrInsufficientFundsForTransfer + if msg.Value().Sign() > 0 && !st.evm.Context.CanTransfer(st.state, msg.From(), msg.Value()) { + return nil, fmt.Errorf("%w: address %v", ErrInsufficientFundsForTransfer, msg.From().Hex()) + } + + // Set up the initial access list. + if st.evm.ChainConfig().IsBerlin(st.evm.Context.BlockNumber) { + st.state.PrepareAccessList(msg.From(), msg.To(), st.evm.ActivePrecompiles(), msg.AccessList()) } + var ( ret []byte vmerr error // vm errors do not effect consensus and are therefore not assigned to err @@ -260,7 +275,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { ret, st.gas, vmerr = st.evm.Call(sender, st.to(), st.data, st.gas, st.value) } st.refundGas() - st.state.AddBalance(st.evm.Coinbase, new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.gasPrice)) + st.state.AddBalance(st.evm.Context.Coinbase, new(big.Int).Mul(new(big.Int).SetUint64(st.gasUsed()), st.gasPrice)) return &ExecutionResult{ UsedGas: st.gasUsed(), diff --git a/core/tx_list.go b/core/tx_list.go index cdd3df14c5..894640d570 100644 --- a/core/tx_list.go +++ b/core/tx_list.go @@ -24,7 +24,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" ) // nonceHeap is a heap.Interface implementation over 64bit unsigned integers for @@ -439,24 +438,29 @@ func (h *priceHeap) Pop() interface{} { } // txPricedList is a price-sorted heap to allow operating on transactions pool -// contents in a price-incrementing way. +// contents in a price-incrementing way. It's built opon the all transactions +// in txpool but only interested in the remote part. It means only remote transactions +// will be considered for tracking, sorting, eviction, etc. type txPricedList struct { - all *txLookup // Pointer to the map of all transactions - items *priceHeap // Heap of prices of all the stored transactions - stales int // Number of stale price points to (re-heap trigger) + all *txLookup // Pointer to the map of all transactions + remotes *priceHeap // Heap of prices of all the stored **remote** transactions + stales int // Number of stale price points to (re-heap trigger) } // newTxPricedList creates a new price-sorted transaction heap. func newTxPricedList(all *txLookup) *txPricedList { return &txPricedList{ - all: all, - items: new(priceHeap), + all: all, + remotes: new(priceHeap), } } // Put inserts a new transaction into the heap. -func (l *txPricedList) Put(tx *types.Transaction) { - heap.Push(l.items, tx) +func (l *txPricedList) Put(tx *types.Transaction, local bool) { + if local { + return + } + heap.Push(l.remotes, tx) } // Removed notifies the prices transaction list that an old transaction dropped @@ -465,121 +469,95 @@ func (l *txPricedList) Put(tx *types.Transaction) { func (l *txPricedList) Removed(count int) { // Bump the stale counter, but exit if still too low (< 25%) l.stales += count - if l.stales <= len(*l.items)/4 { + if l.stales <= len(*l.remotes)/4 { return } // Seems we've reached a critical number of stale transactions, reheap - reheap := make(priceHeap, 0, l.all.Count()) - - l.stales, l.items = 0, &reheap - l.all.Range(func(hash common.Hash, tx *types.Transaction) bool { - *l.items = append(*l.items, tx) - return true - }) - heap.Init(l.items) + l.Reheap() } // Cap finds all the transactions below the given price threshold, drops them // from the priced list and returns them for further removal from the entire pool. -func (l *txPricedList) Cap(threshold *big.Int, local *accountSet) types.Transactions { +// +// Note: only remote transactions will be considered for eviction. +func (l *txPricedList) Cap(threshold *big.Int) types.Transactions { drop := make(types.Transactions, 0, 128) // Remote underpriced transactions to drop - save := make(types.Transactions, 0, 64) // Local underpriced transactions to keep - - for len(*l.items) > 0 { + for len(*l.remotes) > 0 { // Discard stale transactions if found during cleanup - tx := heap.Pop(l.items).(*types.Transaction) - if l.all.Get(tx.Hash()) == nil { + cheapest := (*l.remotes)[0] + if l.all.GetRemote(cheapest.Hash()) == nil { // Removed or migrated + heap.Pop(l.remotes) l.stales-- continue } // Stop the discards if we've reached the threshold - if tx.GasPriceIntCmp(threshold) >= 0 { - save = append(save, tx) + if cheapest.GasPriceIntCmp(threshold) >= 0 { break } - // Non stale transaction found, discard unless local - if local.containsTx(tx) { - save = append(save, tx) - } else { - drop = append(drop, tx) - } - } - for _, tx := range save { - heap.Push(l.items, tx) + heap.Pop(l.remotes) + drop = append(drop, cheapest) } return drop } // Underpriced checks whether a transaction is cheaper than (or as cheap as) the -// lowest priced transaction currently being tracked. -func (l *txPricedList) Underpriced(tx *types.Transaction, local *accountSet) bool { - // Local transactions cannot be underpriced - if local.containsTx(tx) { - return false - } +// lowest priced (remote) transaction currently being tracked. +func (l *txPricedList) Underpriced(tx *types.Transaction) bool { // Discard stale price points if found at the heap start - for len(*l.items) > 0 { - head := []*types.Transaction(*l.items)[0] - if l.all.Get(head.Hash()) == nil { + for len(*l.remotes) > 0 { + head := []*types.Transaction(*l.remotes)[0] + if l.all.GetRemote(head.Hash()) == nil { // Removed or migrated l.stales-- - heap.Pop(l.items) + heap.Pop(l.remotes) continue } break } // Check if the transaction is underpriced or not - if len(*l.items) == 0 { - log.Error("Pricing query for empty pool") // This cannot happen, print to catch programming errors - return false + if len(*l.remotes) == 0 { + return false // There is no remote transaction at all. } - cheapest := []*types.Transaction(*l.items)[0] + // If the remote transaction is even cheaper than the + // cheapest one tracked locally, reject it. + cheapest := []*types.Transaction(*l.remotes)[0] return cheapest.GasPriceCmp(tx) >= 0 } // Discard finds a number of most underpriced transactions, removes them from the // priced list and returns them for further removal from the entire pool. -func (l *txPricedList) Discard(slots int, local *accountSet) types.Transactions { - // If we have some local accountset, those will not be discarded - if !local.empty() { - // In case the list is filled to the brim with 'local' txs, we do this - // little check to avoid unpacking / repacking the heap later on, which - // is very expensive - discardable := 0 - for _, tx := range *l.items { - if !local.containsTx(tx) { - discardable++ - } - if discardable >= slots { - break - } - } - if slots > discardable { - slots = discardable - } - } - if slots == 0 { - return nil - } - drop := make(types.Transactions, 0, slots) // Remote underpriced transactions to drop - save := make(types.Transactions, 0, len(*l.items)-slots) // Local underpriced transactions to keep - - for len(*l.items) > 0 && slots > 0 { +// +// Note local transaction won't be considered for eviction. +func (l *txPricedList) Discard(slots int, force bool) (types.Transactions, bool) { + drop := make(types.Transactions, 0, slots) // Remote underpriced transactions to drop + for len(*l.remotes) > 0 && slots > 0 { // Discard stale transactions if found during cleanup - tx := heap.Pop(l.items).(*types.Transaction) - if l.all.Get(tx.Hash()) == nil { + tx := heap.Pop(l.remotes).(*types.Transaction) + if l.all.GetRemote(tx.Hash()) == nil { // Removed or migrated l.stales-- continue } - // Non stale transaction found, discard unless local - if local.containsTx(tx) { - save = append(save, tx) - } else { - drop = append(drop, tx) - slots -= numSlots(tx) + // Non stale transaction found, discard it + drop = append(drop, tx) + slots -= numSlots(tx) + } + // If we still can't make enough room for the new transaction + if slots > 0 && !force { + for _, tx := range drop { + heap.Push(l.remotes, tx) } + return nil, false } - for _, tx := range save { - heap.Push(l.items, tx) - } - return drop + return drop, true +} + +// Reheap forcibly rebuilds the heap based on the current remote transaction set. +func (l *txPricedList) Reheap() { + reheap := make(priceHeap, 0, l.all.RemoteCount()) + + l.stales, l.remotes = 0, &reheap + l.all.Range(func(hash common.Hash, tx *types.Transaction, local bool) bool { + *l.remotes = append(*l.remotes, tx) + return true + }, false, true) // Only iterate remotes + heap.Init(l.remotes) } diff --git a/core/tx_pool.go b/core/tx_pool.go index e3ffe103cf..4c1bd809fd 100644 --- a/core/tx_pool.go +++ b/core/tx_pool.go @@ -63,6 +63,10 @@ var ( // configured for the transaction pool. ErrUnderpriced = errors.New("transaction underpriced") + // ErrTxPoolOverflow is returned if the transaction pool is full and can't accpet + // another remote transaction. + ErrTxPoolOverflow = errors.New("txpool is full") + // ErrReplaceUnderpriced is returned if a transaction is attempted to be replaced // with a different one without the required price bump. ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") @@ -105,6 +109,7 @@ var ( validTxMeter = metrics.NewRegisteredMeter("txpool/valid", nil) invalidTxMeter = metrics.NewRegisteredMeter("txpool/invalid", nil) underpricedTxMeter = metrics.NewRegisteredMeter("txpool/underpriced", nil) + overflowedTxMeter = metrics.NewRegisteredMeter("txpool/overflowed", nil) pendingGauge = metrics.NewRegisteredGauge("txpool/pending", nil) queuedGauge = metrics.NewRegisteredGauge("txpool/queued", nil) @@ -224,6 +229,7 @@ type TxPool struct { mu sync.RWMutex istanbul bool // Fork indicator whether we are in the istanbul stage. + eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. currentState *state.StateDB // Current state in the blockchain head pendingNonces *txNoncer // Pending state tracking virtual nonces @@ -263,7 +269,7 @@ func NewTxPool(config TxPoolConfig, chainconfig *params.ChainConfig, chain block config: config, chainconfig: chainconfig, chain: chain, - signer: types.NewEIP155Signer(chainconfig.ChainID), + signer: types.LatestSigner(chainconfig), pending: make(map[common.Address]*txList), queue: make(map[common.Address]*txList), beats: make(map[common.Address]time.Time), @@ -421,7 +427,7 @@ func (pool *TxPool) SetGasPrice(price *big.Int) { defer pool.mu.Unlock() pool.gasPrice = price - for _, tx := range pool.priced.Cap(price, pool.locals) { + for _, tx := range pool.priced.Cap(price) { pool.removeTx(tx.Hash(), false) } log.Info("Transaction pool price threshold updated", "price", price) @@ -517,6 +523,10 @@ func (pool *TxPool) local() map[common.Address]types.Transactions { // validateTx checks whether a transaction is valid according to the consensus // rules and adheres to some heuristic limits of the local node (price and size). func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { + // Accept only legacy transactions until EIP-2718/2930 activates. + if !pool.eip2718 && tx.Type() != types.LegacyTxType { + return ErrTxTypeNotSupported + } // Reject transactions over defined size to prevent DOS attacks if uint64(tx.Size()) > txMaxSize { return ErrOversizedData @@ -530,13 +540,12 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { if pool.currentMaxGas < tx.Gas() { return ErrGasLimit } - // Make sure the transaction is signed properly + // Make sure the transaction is signed properly. from, err := types.Sender(pool.signer, tx) if err != nil { return ErrInvalidSender } // Drop non-local transactions under our own minimal accepted gas price - local = local || pool.locals.contains(from) // account may be local even if the transaction arrived from the network if !local && tx.GasPriceIntCmp(pool.gasPrice) < 0 { return ErrUnderpriced } @@ -550,7 +559,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { return ErrInsufficientFunds } // Ensure the transaction has more gas than the basic tx fee. - intrGas, err := IntrinsicGas(tx.Data(), tx.To() == nil, true, pool.istanbul) + intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul) if err != nil { return err } @@ -575,22 +584,36 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e knownTxMeter.Mark(1) return false, ErrAlreadyKnown } + // Make the local flag. If it's from local source or it's from the network but + // the sender is marked as local previously, treat it as the local transaction. + isLocal := local || pool.locals.containsTx(tx) + // If the transaction fails basic validation, discard it - if err := pool.validateTx(tx, local); err != nil { + if err := pool.validateTx(tx, isLocal); err != nil { log.Trace("Discarding invalid transaction", "hash", hash, "err", err) invalidTxMeter.Mark(1) return false, err } // If the transaction pool is full, discard underpriced transactions - if uint64(pool.all.Count()) >= pool.config.GlobalSlots+pool.config.GlobalQueue { + if uint64(pool.all.Count()+numSlots(tx)) > pool.config.GlobalSlots+pool.config.GlobalQueue { // If the new transaction is underpriced, don't accept it - if !local && pool.priced.Underpriced(tx, pool.locals) { + if !isLocal && pool.priced.Underpriced(tx) { log.Trace("Discarding underpriced transaction", "hash", hash, "price", tx.GasPrice()) underpricedTxMeter.Mark(1) return false, ErrUnderpriced } - // New transaction is better than our worse ones, make room for it - drop := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), pool.locals) + // New transaction is better than our worse ones, make room for it. + // If it's a local transaction, forcibly discard all available transactions. + // Otherwise if we can't make enough room for new one, abort the operation. + drop, success := pool.priced.Discard(pool.all.Slots()-int(pool.config.GlobalSlots+pool.config.GlobalQueue)+numSlots(tx), isLocal) + + // Special case, we still can't make the room for the new remote one. + if !isLocal && !success { + log.Trace("Discarding overflown transaction", "hash", hash) + overflowedTxMeter.Mark(1) + return false, ErrTxPoolOverflow + } + // Kick out the underpriced remote transactions. for _, tx := range drop { log.Trace("Discarding freshly underpriced transaction", "hash", tx.Hash(), "price", tx.GasPrice()) underpricedTxMeter.Mark(1) @@ -612,8 +635,8 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e pool.priced.Removed(1) pendingReplaceMeter.Mark(1) } - pool.all.Add(tx) - pool.priced.Put(tx) + pool.all.Add(tx, isLocal) + pool.priced.Put(tx, isLocal) pool.journalTx(from, tx) pool.queueTxEvent(tx) log.Trace("Pooled new executable transaction", "hash", hash, "from", from, "to", tx.To()) @@ -623,18 +646,17 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e return old != nil, nil } // New transaction isn't replacing a pending one, push into queue - replaced, err = pool.enqueueTx(hash, tx) + replaced, err = pool.enqueueTx(hash, tx, isLocal, true) if err != nil { return false, err } // Mark local addresses and journal local transactions - if local { - if !pool.locals.contains(from) { - log.Info("Setting new local account", "address", from) - pool.locals.add(from) - } + if local && !pool.locals.contains(from) { + log.Info("Setting new local account", "address", from) + pool.locals.add(from) + pool.priced.Removed(pool.all.RemoteToLocals(pool.locals)) // Migrate the remotes if it's marked as local first time. } - if local || pool.locals.contains(from) { + if isLocal { localGauge.Inc(1) } pool.journalTx(from, tx) @@ -646,7 +668,7 @@ func (pool *TxPool) add(tx *types.Transaction, local bool) (replaced bool, err e // enqueueTx inserts a new transaction into the non-executable transaction queue. // // Note, this method assumes the pool lock is held! -func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, error) { +func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction, local bool, addAll bool) (bool, error) { // Try to insert the transaction into the future queue from, _ := types.Sender(pool.signer, tx) // already validated if pool.queue[from] == nil { @@ -667,9 +689,14 @@ func (pool *TxPool) enqueueTx(hash common.Hash, tx *types.Transaction) (bool, er // Nothing was replaced, bump the queued counter queuedGauge.Inc(1) } - if pool.all.Get(hash) == nil { - pool.all.Add(tx) - pool.priced.Put(tx) + // If the transaction isn't in lookup set but it's expected to be there, + // show the error log. + if pool.all.Get(hash) == nil && !addAll { + log.Error("Missing transaction in lookup set, please report the issue", "hash", hash) + } + if addAll { + pool.all.Add(tx, local) + pool.priced.Put(tx, local) } // If we never record the heartbeat, do it right now. if _, exist := pool.beats[from]; !exist { @@ -718,11 +745,6 @@ func (pool *TxPool) promoteTx(addr common.Address, hash common.Hash, tx *types.T // Nothing was replaced, bump the pending counter pendingGauge.Inc(1) } - // Failsafe to work around direct pending inserts (tests) - if pool.all.Get(hash) == nil { - pool.all.Add(tx) - pool.priced.Put(tx) - } // Set the potentially new pending nonce and notify any subsystems of the new tx pool.pendingNonces.set(addr, tx.Nonce()+1) @@ -904,7 +926,8 @@ func (pool *TxPool) removeTx(hash common.Hash, outofbound bool) { } // Postpone any invalidated transactions for _, tx := range invalids { - pool.enqueueTx(tx.Hash(), tx) + // Internal shuffle shouldn't touch the lookup set. + pool.enqueueTx(tx.Hash(), tx, false, false) } // Update the account nonce if needed pool.pendingNonces.setIfLower(addr, tx.Nonce()) @@ -1119,44 +1142,45 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { // head from the chain. // If that is the case, we don't have the lost transactions any more, and // there's nothing to add - if newNum < oldNum { - // If the reorg ended up on a lower number, it's indicative of setHead being the cause - log.Debug("Skipping transaction reset caused by setHead", - "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) - } else { + if newNum >= oldNum { // If we reorged to a same or higher number, then it's not a case of setHead log.Warn("Transaction pool reset with missing oldhead", "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) - } - return - } - for rem.NumberU64() > add.NumberU64() { - discarded = append(discarded, rem.Transactions()...) - if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { - log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) return } - } - for add.NumberU64() > rem.NumberU64() { - included = append(included, add.Transactions()...) - if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { - log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) - return + // If the reorg ended up on a lower number, it's indicative of setHead being the cause + log.Debug("Skipping transaction reset caused by setHead", + "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + // We still need to update the current state s.th. the lost transactions can be readded by the user + } else { + for rem.NumberU64() > add.NumberU64() { + discarded = append(discarded, rem.Transactions()...) + if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { + log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) + return + } } - } - for rem.Hash() != add.Hash() { - discarded = append(discarded, rem.Transactions()...) - if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { - log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) - return + for add.NumberU64() > rem.NumberU64() { + included = append(included, add.Transactions()...) + if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { + log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) + return + } } - included = append(included, add.Transactions()...) - if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { - log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) - return + for rem.Hash() != add.Hash() { + discarded = append(discarded, rem.Transactions()...) + if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { + log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) + return + } + included = append(included, add.Transactions()...) + if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { + log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) + return + } } + reinject = types.TxDifference(discarded, included) } - reinject = types.TxDifference(discarded, included) } } // Initialize the internal state to the current head @@ -1180,6 +1204,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { // Update all fork indicator by next pending block number. next := new(big.Int).Add(newHead.Number, big.NewInt(1)) pool.istanbul = pool.chainconfig.IsIstanbul(next) + pool.eip2718 = pool.chainconfig.IsBerlin(next) } // promoteExecutables moves transactions that have become processable from the @@ -1408,7 +1433,9 @@ func (pool *TxPool) demoteUnexecutables() { for _, tx := range invalids { hash := tx.Hash() log.Trace("Demoting pending transaction", "hash", hash) - pool.enqueueTx(hash, tx) + + // Internal shuffle shouldn't touch the lookup set. + pool.enqueueTx(hash, tx, false, false) } pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) if pool.locals.contains(addr) { @@ -1420,7 +1447,9 @@ func (pool *TxPool) demoteUnexecutables() { for _, tx := range gapped { hash := tx.Hash() log.Error("Demoting invalidated transaction", "hash", hash) - pool.enqueueTx(hash, tx) + + // Internal shuffle shouldn't touch the lookup set. + pool.enqueueTx(hash, tx, false, false) } pendingGauge.Dec(int64(len(gapped))) // This might happen in a reorg, so log it to the metering @@ -1519,8 +1548,8 @@ func (as *accountSet) merge(other *accountSet) { as.cache = nil } -// txLookup is used internally by TxPool to track transactions while allowing lookup without -// mutex contention. +// txLookup is used internally by TxPool to track transactions while allowing +// lookup without mutex contention. // // Note, although this type is properly protected against concurrent access, it // is **not** a type that should ever be mutated or even exposed outside of the @@ -1528,27 +1557,43 @@ func (as *accountSet) merge(other *accountSet) { // internal mechanisms. The sole purpose of the type is to permit out-of-bound // peeking into the pool in TxPool.Get without having to acquire the widely scoped // TxPool.mu mutex. +// +// This lookup set combines the notion of "local transactions", which is useful +// to build upper-level structure. type txLookup struct { - all map[common.Hash]*types.Transaction - slots int - lock sync.RWMutex + slots int + lock sync.RWMutex + locals map[common.Hash]*types.Transaction + remotes map[common.Hash]*types.Transaction } // newTxLookup returns a new txLookup structure. func newTxLookup() *txLookup { return &txLookup{ - all: make(map[common.Hash]*types.Transaction), + locals: make(map[common.Hash]*types.Transaction), + remotes: make(map[common.Hash]*types.Transaction), } } -// Range calls f on each key and value present in the map. -func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction) bool) { +// Range calls f on each key and value present in the map. The callback passed +// should return the indicator whether the iteration needs to be continued. +// Callers need to specify which set (or both) to be iterated. +func (t *txLookup) Range(f func(hash common.Hash, tx *types.Transaction, local bool) bool, local bool, remote bool) { t.lock.RLock() defer t.lock.RUnlock() - for key, value := range t.all { - if !f(key, value) { - break + if local { + for key, value := range t.locals { + if !f(key, value, true) { + return + } + } + } + if remote { + for key, value := range t.remotes { + if !f(key, value, false) { + return + } } } } @@ -1558,15 +1603,50 @@ func (t *txLookup) Get(hash common.Hash) *types.Transaction { t.lock.RLock() defer t.lock.RUnlock() - return t.all[hash] + if tx := t.locals[hash]; tx != nil { + return tx + } + return t.remotes[hash] +} + +// GetLocal returns a transaction if it exists in the lookup, or nil if not found. +func (t *txLookup) GetLocal(hash common.Hash) *types.Transaction { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.locals[hash] } -// Count returns the current number of items in the lookup. +// GetRemote returns a transaction if it exists in the lookup, or nil if not found. +func (t *txLookup) GetRemote(hash common.Hash) *types.Transaction { + t.lock.RLock() + defer t.lock.RUnlock() + + return t.remotes[hash] +} + +// Count returns the current number of transactions in the lookup. func (t *txLookup) Count() int { t.lock.RLock() defer t.lock.RUnlock() - return len(t.all) + return len(t.locals) + len(t.remotes) +} + +// LocalCount returns the current number of local transactions in the lookup. +func (t *txLookup) LocalCount() int { + t.lock.RLock() + defer t.lock.RUnlock() + + return len(t.locals) +} + +// RemoteCount returns the current number of remote transactions in the lookup. +func (t *txLookup) RemoteCount() int { + t.lock.RLock() + defer t.lock.RUnlock() + + return len(t.remotes) } // Slots returns the current number of slots used in the lookup. @@ -1578,14 +1658,18 @@ func (t *txLookup) Slots() int { } // Add adds a transaction to the lookup. -func (t *txLookup) Add(tx *types.Transaction) { +func (t *txLookup) Add(tx *types.Transaction, local bool) { t.lock.Lock() defer t.lock.Unlock() t.slots += numSlots(tx) slotsGauge.Update(int64(t.slots)) - t.all[tx.Hash()] = tx + if local { + t.locals[tx.Hash()] = tx + } else { + t.remotes[tx.Hash()] = tx + } } // Remove removes a transaction from the lookup. @@ -1593,10 +1677,36 @@ func (t *txLookup) Remove(hash common.Hash) { t.lock.Lock() defer t.lock.Unlock() - t.slots -= numSlots(t.all[hash]) + tx, ok := t.locals[hash] + if !ok { + tx, ok = t.remotes[hash] + } + if !ok { + log.Error("No transaction found to be deleted", "hash", hash) + return + } + t.slots -= numSlots(tx) slotsGauge.Update(int64(t.slots)) - delete(t.all, hash) + delete(t.locals, hash) + delete(t.remotes, hash) +} + +// RemoteToLocals migrates the transactions belongs to the given locals to locals +// set. The assumption is held the locals set is thread-safe to be used. +func (t *txLookup) RemoteToLocals(locals *accountSet) int { + t.lock.Lock() + defer t.lock.Unlock() + + var migrated int + for hash, tx := range t.remotes { + if locals.containsTx(tx) { + t.locals[hash] = tx + delete(t.remotes, hash) + migrated += 1 + } + } + return migrated } // numSlots calculates the number of slots needed for a single transaction. diff --git a/core/tx_pool_test.go b/core/tx_pool_test.go index 4fca734e65..5d555f5a9c 100644 --- a/core/tx_pool_test.go +++ b/core/tx_pool_test.go @@ -55,7 +55,7 @@ type testBlockChain struct { func (bc *testBlockChain) CurrentBlock() *types.Block { return types.NewBlock(&types.Header{ GasLimit: bc.gasLimit, - }, nil, nil, nil, new(trie.Trie)) + }, nil, nil, nil, trie.NewStackTrie(nil)) } func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { @@ -107,10 +107,11 @@ func validateTxPoolInternals(pool *TxPool) error { if total := pool.all.Count(); total != pending+queued { return fmt.Errorf("total transaction count %d != %d pending + %d queued", total, pending, queued) } - if priced := pool.priced.items.Len() - pool.priced.stales; priced != pending+queued { - return fmt.Errorf("total priced transaction count %d != %d pending + %d queued", priced, pending, queued) + pool.priced.Reheap() + priced, remote := pool.priced.remotes.Len(), pool.all.RemoteCount() + if priced != remote { + return fmt.Errorf("total priced transaction count %d != %d", priced, remote) } - // Ensure the next nonce to assign is the correct one for addr, txs := range pool.pending { // Find the last transaction @@ -242,7 +243,7 @@ func TestInvalidTransactions(t *testing.T) { from, _ := deriveSender(tx) pool.currentState.AddBalance(from, big.NewInt(1)) - if err := pool.AddRemote(tx); err != ErrInsufficientFunds { + if err := pool.AddRemote(tx); !errors.Is(err, ErrInsufficientFunds) { t.Error("expected", ErrInsufficientFunds) } @@ -255,7 +256,7 @@ func TestInvalidTransactions(t *testing.T) { pool.currentState.SetNonce(from, 1) pool.currentState.AddBalance(from, big.NewInt(0xffffffffffffff)) tx = transaction(0, 100000, key) - if err := pool.AddRemote(tx); err != ErrNonceTooLow { + if err := pool.AddRemote(tx); !errors.Is(err, ErrNonceTooLow) { t.Error("expected", ErrNonceTooLow) } @@ -280,7 +281,7 @@ func TestTransactionQueue(t *testing.T) { pool.currentState.AddBalance(from, big.NewInt(1000)) <-pool.requestReset(nil, nil) - pool.enqueueTx(tx.Hash(), tx) + pool.enqueueTx(tx.Hash(), tx, false, true) <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) if len(pool.pending) != 1 { t.Error("expected valid txs to be 1 is", len(pool.pending)) @@ -289,7 +290,7 @@ func TestTransactionQueue(t *testing.T) { tx = transaction(1, 100, key) from, _ = deriveSender(tx) pool.currentState.SetNonce(from, 2) - pool.enqueueTx(tx.Hash(), tx) + pool.enqueueTx(tx.Hash(), tx, false, true) <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok { @@ -313,9 +314,9 @@ func TestTransactionQueue2(t *testing.T) { pool.currentState.AddBalance(from, big.NewInt(1000)) pool.reset(nil, nil) - pool.enqueueTx(tx1.Hash(), tx1) - pool.enqueueTx(tx2.Hash(), tx2) - pool.enqueueTx(tx3.Hash(), tx3) + pool.enqueueTx(tx1.Hash(), tx1, false, true) + pool.enqueueTx(tx2.Hash(), tx2, false, true) + pool.enqueueTx(tx3.Hash(), tx3, false, true) pool.promoteExecutables([]common.Address{from}) if len(pool.pending) != 1 { @@ -488,12 +489,21 @@ func TestTransactionDropping(t *testing.T) { tx11 = transaction(11, 200, key) tx12 = transaction(12, 300, key) ) + pool.all.Add(tx0, false) + pool.priced.Put(tx0, false) pool.promoteTx(account, tx0.Hash(), tx0) + + pool.all.Add(tx1, false) + pool.priced.Put(tx1, false) pool.promoteTx(account, tx1.Hash(), tx1) + + pool.all.Add(tx2, false) + pool.priced.Put(tx2, false) pool.promoteTx(account, tx2.Hash(), tx2) - pool.enqueueTx(tx10.Hash(), tx10) - pool.enqueueTx(tx11.Hash(), tx11) - pool.enqueueTx(tx12.Hash(), tx12) + + pool.enqueueTx(tx10.Hash(), tx10, false, true) + pool.enqueueTx(tx11.Hash(), tx11, false, true) + pool.enqueueTx(tx12.Hash(), tx12, false, true) // Check that pre and post validations leave the pool as is if pool.pending[account].Len() != 3 { @@ -1139,7 +1149,7 @@ func TestTransactionAllowedTxSize(t *testing.T) { t.Fatalf("expected rejection on slightly oversize transaction") } // Try adding a transaction of random not allowed size - if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(int(10*txMaxSize))))); err == nil { + if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentMaxGas, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil { t.Fatalf("expected rejection on oversize transaction") } // Run some sanity checks on the pool internals @@ -1964,7 +1974,7 @@ func benchmarkFuturePromotion(b *testing.B, size int) { for i := 0; i < size; i++ { tx := transaction(uint64(1+i), 100000, key) - pool.enqueueTx(tx.Hash(), tx) + pool.enqueueTx(tx.Hash(), tx, false, true) } // Benchmark the speed of pool validation b.ResetTimer() @@ -2007,3 +2017,38 @@ func benchmarkPoolBatchInsert(b *testing.B, size int, local bool) { } } } + +func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { + // Allocate keys for testing + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + + remoteKey, _ := crypto.GenerateKey() + remoteAddr := crypto.PubkeyToAddress(remoteKey.PublicKey) + + locals := make([]*types.Transaction, 4096+1024) // Occupy all slots + for i := 0; i < len(locals); i++ { + locals[i] = transaction(uint64(i), 100000, key) + } + remotes := make([]*types.Transaction, 1000) + for i := 0; i < len(remotes); i++ { + remotes[i] = pricedTransaction(uint64(i), 100000, big.NewInt(2), remoteKey) // Higher gasprice + } + // Benchmark importing the transactions into the queue + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + pool, _ := setupTxPool() + pool.currentState.AddBalance(account, big.NewInt(100000000)) + for _, local := range locals { + pool.AddLocal(local) + } + b.StartTimer() + // Assign a high enough balance for testing + pool.currentState.AddBalance(remoteAddr, big.NewInt(100000000)) + for i := 0; i < len(remotes); i++ { + pool.AddRemotes([]*types.Transaction{remotes[i]}) + } + pool.Stop() + } +} diff --git a/core/types/access_list_tx.go b/core/types/access_list_tx.go new file mode 100644 index 0000000000..65ee95adf6 --- /dev/null +++ b/core/types/access_list_tx.go @@ -0,0 +1,115 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +//go:generate gencodec -type AccessTuple -out gen_access_tuple.go + +// AccessList is an EIP-2930 access list. +type AccessList []AccessTuple + +// AccessTuple is the element type of an access list. +type AccessTuple struct { + Address common.Address `json:"address" gencodec:"required"` + StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"` +} + +// StorageKeys returns the total number of storage keys in the access list. +func (al AccessList) StorageKeys() int { + sum := 0 + for _, tuple := range al { + sum += len(tuple.StorageKeys) + } + return sum +} + +// AccessListTx is the data of EIP-2930 access list transactions. +type AccessListTx struct { + ChainID *big.Int // destination chain ID + Nonce uint64 // nonce of sender account + GasPrice *big.Int // wei per gas + Gas uint64 // gas limit + To *common.Address `rlp:"nil"` // nil means contract creation + Value *big.Int // wei amount + Data []byte // contract invocation input data + AccessList AccessList // EIP-2930 access list + V, R, S *big.Int // signature values +} + +// copy creates a deep copy of the transaction data and initializes all fields. +func (tx *AccessListTx) copy() TxData { + cpy := &AccessListTx{ + Nonce: tx.Nonce, + To: tx.To, // TODO: copy pointed-to address + Data: common.CopyBytes(tx.Data), + Gas: tx.Gas, + // These are copied below. + AccessList: make(AccessList, len(tx.AccessList)), + Value: new(big.Int), + ChainID: new(big.Int), + GasPrice: new(big.Int), + V: new(big.Int), + R: new(big.Int), + S: new(big.Int), + } + copy(cpy.AccessList, tx.AccessList) + if tx.Value != nil { + cpy.Value.Set(tx.Value) + } + if tx.ChainID != nil { + cpy.ChainID.Set(tx.ChainID) + } + if tx.GasPrice != nil { + cpy.GasPrice.Set(tx.GasPrice) + } + if tx.V != nil { + cpy.V.Set(tx.V) + } + if tx.R != nil { + cpy.R.Set(tx.R) + } + if tx.S != nil { + cpy.S.Set(tx.S) + } + return cpy +} + +// accessors for innerTx. + +func (tx *AccessListTx) txType() byte { return AccessListTxType } +func (tx *AccessListTx) chainID() *big.Int { return tx.ChainID } +func (tx *AccessListTx) protected() bool { return true } +func (tx *AccessListTx) accessList() AccessList { return tx.AccessList } +func (tx *AccessListTx) data() []byte { return tx.Data } +func (tx *AccessListTx) gas() uint64 { return tx.Gas } +func (tx *AccessListTx) gasPrice() *big.Int { return tx.GasPrice } +func (tx *AccessListTx) value() *big.Int { return tx.Value } +func (tx *AccessListTx) nonce() uint64 { return tx.Nonce } +func (tx *AccessListTx) to() *common.Address { return tx.To } + +func (tx *AccessListTx) rawSignatureValues() (v, r, s *big.Int) { + return tx.V, tx.R, tx.S +} + +func (tx *AccessListTx) setSignatureValues(chainID, v, r, s *big.Int) { + tx.ChainID, tx.V, tx.R, tx.S = chainID, v, r, s +} diff --git a/core/types/block.go b/core/types/block.go index 8096ebb755..553db003bb 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -23,15 +23,12 @@ import ( "io" "math/big" "reflect" - "sync" "sync/atomic" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/crypto/sha3" ) var ( @@ -131,22 +128,6 @@ func (h *Header) SanityCheck() error { return nil } -// hasherPool holds LegacyKeccak hashers. -var hasherPool = sync.Pool{ - New: func() interface{} { - return sha3.NewLegacyKeccak256() - }, -} - -func rlpHash(x interface{}) (h common.Hash) { - sha := hasherPool.Get().(crypto.KeccakState) - defer hasherPool.Put(sha) - sha.Reset() - rlp.Encode(sha, x) - sha.Read(h[:]) - return h -} - // EmptyBody returns true if there is no additional 'body' to complete the header // that is: no transactions and no uncles. func (h *Header) EmptyBody() bool { @@ -221,7 +202,7 @@ type storageblock struct { // The values of TxHash, UncleHash, ReceiptHash and Bloom in header // are ignored and set to values derived from the given txs, uncles // and receipts. -func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher Hasher) *Block { +func NewBlock(header *Header, txs []*Transaction, uncles []*Header, receipts []*Receipt, hasher TrieHasher) *Block { b := &Block{header: CopyHeader(header), td: new(big.Int)} // TODO: panic if len(txs) != len(receipts) diff --git a/core/types/block_test.go b/core/types/block_test.go index 4dfdcf9545..63904f882c 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -59,6 +59,66 @@ func TestBlockEncoding(t *testing.T) { tx1, _ = tx1.WithSignature(HomesteadSigner{}, common.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100")) check("len(Transactions)", len(block.Transactions()), 1) check("Transactions[0].Hash", block.Transactions()[0].Hash(), tx1.Hash()) + ourBlockEnc, err := rlp.EncodeToBytes(&block) + if err != nil { + t.Fatal("encode error: ", err) + } + if !bytes.Equal(ourBlockEnc, blockEnc) { + t.Errorf("encoded block mismatch:\ngot: %x\nwant: %x", ourBlockEnc, blockEnc) + } +} + +func TestEIP2718BlockEncoding(t *testing.T) { + blockEnc := common.FromHex("f90319f90211a00000000000000000000000000000000000000000000000000000000000000000a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a0ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017a0e6e49996c7ec59f7a23d22b83239a60151512c65613bf84a0d7da336399ebc4aa0cafe75574d59780665a97fbfd11365c7545aa8f1abf4e5e12e8243334ef7286bb901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000083020000820200832fefd882a410845506eb0796636f6f6c65737420626c6f636b206f6e20636861696ea0bd4472abb6659ebe3ee06ee4d7b72a00a9f4d001caca51342001075469aff49888a13a5a8c8f2bb1c4f90101f85f800a82c35094095e7baea6a6c7c4c2dfeb977efac326af552d870a801ba09bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094fa08a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b1b89e01f89b01800a8301e24194095e7baea6a6c7c4c2dfeb977efac326af552d878080f838f7940000000000000000000000000000000000000001e1a0000000000000000000000000000000000000000000000000000000000000000001a03dbacc8d0259f2508625e97fdfc57cd85fdd16e5821bc2c10bdd1a52649e8335a0476e10695b183a87b0aa292a7f4b78ef0c3fbe62aa2c42c84e1d9c3da159ef14c0") + var block Block + if err := rlp.DecodeBytes(blockEnc, &block); err != nil { + t.Fatal("decode error: ", err) + } + + check := func(f string, got, want interface{}) { + if !reflect.DeepEqual(got, want) { + t.Errorf("%s mismatch: got %v, want %v", f, got, want) + } + } + check("Difficulty", block.Difficulty(), big.NewInt(131072)) + check("GasLimit", block.GasLimit(), uint64(3141592)) + check("GasUsed", block.GasUsed(), uint64(42000)) + check("Coinbase", block.Coinbase(), common.HexToAddress("8888f1f195afa192cfee860698584c030f4c9db1")) + check("MixDigest", block.MixDigest(), common.HexToHash("bd4472abb6659ebe3ee06ee4d7b72a00a9f4d001caca51342001075469aff498")) + check("Root", block.Root(), common.HexToHash("ef1552a40b7165c3cd773806b9e0c165b75356e0314bf0706f279c729f51e017")) + check("Nonce", block.Nonce(), uint64(0xa13a5a8c8f2bb1c4)) + check("Time", block.Time(), uint64(1426516743)) + check("Size", block.Size(), common.StorageSize(len(blockEnc))) + + // Create legacy tx. + to := common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87") + tx1 := NewTx(&LegacyTx{ + Nonce: 0, + To: &to, + Value: big.NewInt(10), + Gas: 50000, + GasPrice: big.NewInt(10), + }) + sig := common.Hex2Bytes("9bea4c4daac7c7c52e093e6a4c35dbbcf8856f1af7b059ba20253e70848d094f8a8fae537ce25ed8cb5af9adac3f141af69bd515bd2ba031522df09b97dd72b100") + tx1, _ = tx1.WithSignature(HomesteadSigner{}, sig) + + // Create ACL tx. + addr := common.HexToAddress("0x0000000000000000000000000000000000000001") + tx2 := NewTx(&AccessListTx{ + ChainID: big.NewInt(1), + Nonce: 0, + To: &to, + Gas: 123457, + GasPrice: big.NewInt(10), + AccessList: AccessList{{Address: addr, StorageKeys: []common.Hash{{0}}}}, + }) + sig2 := common.Hex2Bytes("3dbacc8d0259f2508625e97fdfc57cd85fdd16e5821bc2c10bdd1a52649e8335476e10695b183a87b0aa292a7f4b78ef0c3fbe62aa2c42c84e1d9c3da159ef1401") + tx2, _ = tx2.WithSignature(NewEIP2930Signer(big.NewInt(1)), sig2) + + check("len(Transactions)", len(block.Transactions()), 2) + check("Transactions[0].Hash", block.Transactions()[0].Hash(), tx1.Hash()) + check("Transactions[1].Hash", block.Transactions()[1].Hash(), tx2.Hash()) + check("Transactions[1].Type()", block.Transactions()[1].Type(), uint8(AccessListTxType)) ourBlockEnc, err := rlp.EncodeToBytes(&block) if err != nil { @@ -121,7 +181,7 @@ func makeBenchBlock() *Block { key, _ = crypto.GenerateKey() txs = make([]*Transaction, 70) receipts = make([]*Receipt, len(txs)) - signer = NewEIP155Signer(params.TestChainConfig.ChainID) + signer = LatestSigner(params.TestChainConfig) uncles = make([]*Header, 3) ) header := &Header{ diff --git a/core/types/derive_sha.go b/core/types/derive_sha.go deleted file mode 100644 index 51a10f3f3d..0000000000 --- a/core/types/derive_sha.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package types - -import ( - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/rlp" -) - -type DerivableList interface { - Len() int - GetRlp(i int) []byte -} - -// Hasher is the tool used to calculate the hash of derivable list. -type Hasher interface { - Reset() - Update([]byte, []byte) - Hash() common.Hash -} - -func DeriveSha(list DerivableList, hasher Hasher) common.Hash { - hasher.Reset() - - // StackTrie requires values to be inserted in increasing - // hash order, which is not the order that `list` provides - // hashes in. This insertion sequence ensures that the - // order is correct. - - var buf []byte - for i := 1; i < list.Len() && i <= 0x7f; i++ { - buf = rlp.AppendUint64(buf[:0], uint64(i)) - hasher.Update(buf, list.GetRlp(i)) - } - if list.Len() > 0 { - buf = rlp.AppendUint64(buf[:0], 0) - hasher.Update(buf, list.GetRlp(0)) - } - for i := 0x80; i < list.Len(); i++ { - buf = rlp.AppendUint64(buf[:0], uint64(i)) - hasher.Update(buf, list.GetRlp(i)) - } - return hasher.Hash() -} diff --git a/core/types/gen_access_tuple.go b/core/types/gen_access_tuple.go new file mode 100644 index 0000000000..fc48a84cc0 --- /dev/null +++ b/core/types/gen_access_tuple.go @@ -0,0 +1,43 @@ +// Code generated by github.com/fjl/gencodec. DO NOT EDIT. + +package types + +import ( + "encoding/json" + "errors" + + "github.com/ethereum/go-ethereum/common" +) + +// MarshalJSON marshals as JSON. +func (a AccessTuple) MarshalJSON() ([]byte, error) { + type AccessTuple struct { + Address common.Address `json:"address" gencodec:"required"` + StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"` + } + var enc AccessTuple + enc.Address = a.Address + enc.StorageKeys = a.StorageKeys + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (a *AccessTuple) UnmarshalJSON(input []byte) error { + type AccessTuple struct { + Address *common.Address `json:"address" gencodec:"required"` + StorageKeys []common.Hash `json:"storageKeys" gencodec:"required"` + } + var dec AccessTuple + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + if dec.Address == nil { + return errors.New("missing required field 'address' for AccessTuple") + } + a.Address = *dec.Address + if dec.StorageKeys == nil { + return errors.New("missing required field 'storageKeys' for AccessTuple") + } + a.StorageKeys = dec.StorageKeys + return nil +} diff --git a/core/types/gen_receipt_json.go b/core/types/gen_receipt_json.go index 790ed65b58..bb892f85be 100644 --- a/core/types/gen_receipt_json.go +++ b/core/types/gen_receipt_json.go @@ -16,6 +16,7 @@ var _ = (*receiptMarshaling)(nil) // MarshalJSON marshals as JSON. func (r Receipt) MarshalJSON() ([]byte, error) { type Receipt struct { + Type hexutil.Uint64 `json:"type,omitempty"` PostState hexutil.Bytes `json:"root"` Status hexutil.Uint64 `json:"status"` CumulativeGasUsed hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"` @@ -29,6 +30,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) { TransactionIndex hexutil.Uint `json:"transactionIndex"` } var enc Receipt + enc.Type = hexutil.Uint64(r.Type) enc.PostState = r.PostState enc.Status = hexutil.Uint64(r.Status) enc.CumulativeGasUsed = hexutil.Uint64(r.CumulativeGasUsed) @@ -46,6 +48,7 @@ func (r Receipt) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals from JSON. func (r *Receipt) UnmarshalJSON(input []byte) error { type Receipt struct { + Type *hexutil.Uint64 `json:"type,omitempty"` PostState *hexutil.Bytes `json:"root"` Status *hexutil.Uint64 `json:"status"` CumulativeGasUsed *hexutil.Uint64 `json:"cumulativeGasUsed" gencodec:"required"` @@ -62,6 +65,9 @@ func (r *Receipt) UnmarshalJSON(input []byte) error { if err := json.Unmarshal(input, &dec); err != nil { return err } + if dec.Type != nil { + r.Type = uint8(*dec.Type) + } if dec.PostState != nil { r.PostState = *dec.PostState } diff --git a/core/types/gen_tx_json.go b/core/types/gen_tx_json.go deleted file mode 100644 index e676058ecc..0000000000 --- a/core/types/gen_tx_json.go +++ /dev/null @@ -1,101 +0,0 @@ -// Code generated by github.com/fjl/gencodec. DO NOT EDIT. - -package types - -import ( - "encoding/json" - "errors" - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" -) - -var _ = (*txdataMarshaling)(nil) - -// MarshalJSON marshals as JSON. -func (t txdata) MarshalJSON() ([]byte, error) { - type txdata struct { - AccountNonce hexutil.Uint64 `json:"nonce" gencodec:"required"` - Price *hexutil.Big `json:"gasPrice" gencodec:"required"` - GasLimit hexutil.Uint64 `json:"gas" gencodec:"required"` - Recipient *common.Address `json:"to" rlp:"nil"` - Amount *hexutil.Big `json:"value" gencodec:"required"` - Payload hexutil.Bytes `json:"input" gencodec:"required"` - V *hexutil.Big `json:"v" gencodec:"required"` - R *hexutil.Big `json:"r" gencodec:"required"` - S *hexutil.Big `json:"s" gencodec:"required"` - Hash *common.Hash `json:"hash" rlp:"-"` - } - var enc txdata - enc.AccountNonce = hexutil.Uint64(t.AccountNonce) - enc.Price = (*hexutil.Big)(t.Price) - enc.GasLimit = hexutil.Uint64(t.GasLimit) - enc.Recipient = t.Recipient - enc.Amount = (*hexutil.Big)(t.Amount) - enc.Payload = t.Payload - enc.V = (*hexutil.Big)(t.V) - enc.R = (*hexutil.Big)(t.R) - enc.S = (*hexutil.Big)(t.S) - enc.Hash = t.Hash - return json.Marshal(&enc) -} - -// UnmarshalJSON unmarshals from JSON. -func (t *txdata) UnmarshalJSON(input []byte) error { - type txdata struct { - AccountNonce *hexutil.Uint64 `json:"nonce" gencodec:"required"` - Price *hexutil.Big `json:"gasPrice" gencodec:"required"` - GasLimit *hexutil.Uint64 `json:"gas" gencodec:"required"` - Recipient *common.Address `json:"to" rlp:"nil"` - Amount *hexutil.Big `json:"value" gencodec:"required"` - Payload *hexutil.Bytes `json:"input" gencodec:"required"` - V *hexutil.Big `json:"v" gencodec:"required"` - R *hexutil.Big `json:"r" gencodec:"required"` - S *hexutil.Big `json:"s" gencodec:"required"` - Hash *common.Hash `json:"hash" rlp:"-"` - } - var dec txdata - if err := json.Unmarshal(input, &dec); err != nil { - return err - } - if dec.AccountNonce == nil { - return errors.New("missing required field 'nonce' for txdata") - } - t.AccountNonce = uint64(*dec.AccountNonce) - if dec.Price == nil { - return errors.New("missing required field 'gasPrice' for txdata") - } - t.Price = (*big.Int)(dec.Price) - if dec.GasLimit == nil { - return errors.New("missing required field 'gas' for txdata") - } - t.GasLimit = uint64(*dec.GasLimit) - if dec.Recipient != nil { - t.Recipient = dec.Recipient - } - if dec.Amount == nil { - return errors.New("missing required field 'value' for txdata") - } - t.Amount = (*big.Int)(dec.Amount) - if dec.Payload == nil { - return errors.New("missing required field 'input' for txdata") - } - t.Payload = *dec.Payload - if dec.V == nil { - return errors.New("missing required field 'v' for txdata") - } - t.V = (*big.Int)(dec.V) - if dec.R == nil { - return errors.New("missing required field 'r' for txdata") - } - t.R = (*big.Int)(dec.R) - if dec.S == nil { - return errors.New("missing required field 's' for txdata") - } - t.S = (*big.Int)(dec.S) - if dec.Hash != nil { - t.Hash = dec.Hash - } - return nil -} diff --git a/core/types/hashing.go b/core/types/hashing.go new file mode 100644 index 0000000000..71efb25a9a --- /dev/null +++ b/core/types/hashing.go @@ -0,0 +1,112 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "bytes" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "golang.org/x/crypto/sha3" +) + +// hasherPool holds LegacyKeccak256 hashers for rlpHash. +var hasherPool = sync.Pool{ + New: func() interface{} { return sha3.NewLegacyKeccak256() }, +} + +// deriveBufferPool holds temporary encoder buffers for DeriveSha and TX encoding. +var encodeBufferPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + +func rlpHash(x interface{}) (h common.Hash) { + sha := hasherPool.Get().(crypto.KeccakState) + defer hasherPool.Put(sha) + sha.Reset() + rlp.Encode(sha, x) + sha.Read(h[:]) + return h +} + +// prefixedRlpHash writes the prefix into the hasher before rlp-encoding the +// given interface. It's used for typed transactions. +func prefixedRlpHash(prefix byte, x interface{}) (h common.Hash) { + sha := hasherPool.Get().(crypto.KeccakState) + defer hasherPool.Put(sha) + sha.Reset() + sha.Write([]byte{prefix}) + rlp.Encode(sha, x) + sha.Read(h[:]) + return h +} + +// TrieHasher is the tool used to calculate the hash of derivable list. +// This is internal, do not use. +type TrieHasher interface { + Reset() + Update([]byte, []byte) + Hash() common.Hash +} + +// DerivableList is the input to DeriveSha. +// It is implemented by the 'Transactions' and 'Receipts' types. +// This is internal, do not use these methods. +type DerivableList interface { + Len() int + EncodeIndex(int, *bytes.Buffer) +} + +func encodeForDerive(list DerivableList, i int, buf *bytes.Buffer) []byte { + buf.Reset() + list.EncodeIndex(i, buf) + // It's really unfortunate that we need to do perform this copy. + // StackTrie holds onto the values until Hash is called, so the values + // written to it must not alias. + return common.CopyBytes(buf.Bytes()) +} + +// DeriveSha creates the tree hashes of transactions and receipts in a block header. +func DeriveSha(list DerivableList, hasher TrieHasher) common.Hash { + hasher.Reset() + + valueBuf := encodeBufferPool.Get().(*bytes.Buffer) + defer encodeBufferPool.Put(valueBuf) + + // StackTrie requires values to be inserted in increasing hash order, which is not the + // order that `list` provides hashes in. This insertion sequence ensures that the + // order is correct. + var indexBuf []byte + for i := 1; i < list.Len() && i <= 0x7f; i++ { + indexBuf = rlp.AppendUint64(indexBuf[:0], uint64(i)) + value := encodeForDerive(list, i, valueBuf) + hasher.Update(indexBuf, value) + } + if list.Len() > 0 { + indexBuf = rlp.AppendUint64(indexBuf[:0], 0) + value := encodeForDerive(list, 0, valueBuf) + hasher.Update(indexBuf, value) + } + for i := 0x80; i < list.Len(); i++ { + indexBuf = rlp.AppendUint64(indexBuf[:0], uint64(i)) + value := encodeForDerive(list, i, valueBuf) + hasher.Update(indexBuf, value) + } + return hasher.Hash() +} diff --git a/core/types/hashing_test.go b/core/types/hashing_test.go new file mode 100644 index 0000000000..a948b10ef6 --- /dev/null +++ b/core/types/hashing_test.go @@ -0,0 +1,212 @@ +package types_test + +import ( + "bytes" + "fmt" + "io" + "math/big" + mrand "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" +) + +func TestDeriveSha(t *testing.T) { + txs, err := genTxs(0) + if err != nil { + t.Fatal(err) + } + for len(txs) < 1000 { + exp := types.DeriveSha(txs, new(trie.Trie)) + got := types.DeriveSha(txs, trie.NewStackTrie(nil)) + if !bytes.Equal(got[:], exp[:]) { + t.Fatalf("%d txs: got %x exp %x", len(txs), got, exp) + } + newTxs, err := genTxs(uint64(len(txs) + 1)) + if err != nil { + t.Fatal(err) + } + txs = append(txs, newTxs...) + } +} + +// TestEIP2718DeriveSha tests that the input to the DeriveSha function is correct. +func TestEIP2718DeriveSha(t *testing.T) { + for _, tc := range []struct { + rlpData string + exp string + }{ + { + rlpData: "0xb8a701f8a486796f6c6f763380843b9aca008262d4948a8eafb1cf62bfbeb1741769dae1a9dd479961928080f838f7940000000000000000000000000000000000001337e1a0000000000000000000000000000000000000000000000000000000000000000080a0775101f92dcca278a56bfe4d613428624a1ebfc3cd9e0bcc1de80c41455b9021a06c9deac205afe7b124907d4ba54a9f46161498bd3990b90d175aac12c9a40ee9", + exp: "01 01f8a486796f6c6f763380843b9aca008262d4948a8eafb1cf62bfbeb1741769dae1a9dd479961928080f838f7940000000000000000000000000000000000001337e1a0000000000000000000000000000000000000000000000000000000000000000080a0775101f92dcca278a56bfe4d613428624a1ebfc3cd9e0bcc1de80c41455b9021a06c9deac205afe7b124907d4ba54a9f46161498bd3990b90d175aac12c9a40ee9\n80 01f8a486796f6c6f763380843b9aca008262d4948a8eafb1cf62bfbeb1741769dae1a9dd479961928080f838f7940000000000000000000000000000000000001337e1a0000000000000000000000000000000000000000000000000000000000000000080a0775101f92dcca278a56bfe4d613428624a1ebfc3cd9e0bcc1de80c41455b9021a06c9deac205afe7b124907d4ba54a9f46161498bd3990b90d175aac12c9a40ee9\n", + }, + } { + d := &hashToHumanReadable{} + var t1, t2 types.Transaction + rlp.DecodeBytes(common.FromHex(tc.rlpData), &t1) + rlp.DecodeBytes(common.FromHex(tc.rlpData), &t2) + txs := types.Transactions{&t1, &t2} + types.DeriveSha(txs, d) + if tc.exp != string(d.data) { + t.Fatalf("Want\n%v\nhave:\n%v", tc.exp, string(d.data)) + } + } +} + +func BenchmarkDeriveSha200(b *testing.B) { + txs, err := genTxs(200) + if err != nil { + b.Fatal(err) + } + var exp common.Hash + var got common.Hash + b.Run("std_trie", func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + exp = types.DeriveSha(txs, new(trie.Trie)) + } + }) + + b.Run("stack_trie", func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + got = types.DeriveSha(txs, trie.NewStackTrie(nil)) + } + }) + if got != exp { + b.Errorf("got %x exp %x", got, exp) + } +} + +func TestFuzzDeriveSha(t *testing.T) { + // increase this for longer runs -- it's set to quite low for travis + rndSeed := mrand.Int() + for i := 0; i < 10; i++ { + seed := rndSeed + i + exp := types.DeriveSha(newDummy(i), new(trie.Trie)) + got := types.DeriveSha(newDummy(i), trie.NewStackTrie(nil)) + if !bytes.Equal(got[:], exp[:]) { + printList(newDummy(seed)) + t.Fatalf("seed %d: got %x exp %x", seed, got, exp) + } + } +} + +// TestDerivableList contains testcases found via fuzzing +func TestDerivableList(t *testing.T) { + type tcase []string + tcs := []tcase{ + { + "0xc041", + }, + { + "0xf04cf757812428b0763112efb33b6f4fad7deb445e", + "0xf04cf757812428b0763112efb33b6f4fad7deb445e", + }, + { + "0xca410605310cdc3bb8d4977ae4f0143df54a724ed873457e2272f39d66e0460e971d9d", + "0x6cd850eca0a7ac46bb1748d7b9cb88aa3bd21c57d852c28198ad8fa422c4595032e88a4494b4778b36b944fe47a52b8c5cd312910139dfcb4147ab8e972cc456bcb063f25dd78f54c4d34679e03142c42c662af52947d45bdb6e555751334ace76a5080ab5a0256a1d259855dfc5c0b8023b25befbb13fd3684f9f755cbd3d63544c78ee2001452dd54633a7593ade0b183891a0a4e9c7844e1254005fbe592b1b89149a502c24b6e1dca44c158aebedf01beae9c30cabe16a", + "0x14abd5c47c0be87b0454596baad2", + "0xca410605310cdc3bb8d4977ae4f0143df54a724ed873457e2272f39d66e0460e971d9d", + }, + } + for i, tc := range tcs[1:] { + exp := types.DeriveSha(flatList(tc), new(trie.Trie)) + got := types.DeriveSha(flatList(tc), trie.NewStackTrie(nil)) + if !bytes.Equal(got[:], exp[:]) { + t.Fatalf("case %d: got %x exp %x", i, got, exp) + } + } +} + +func genTxs(num uint64) (types.Transactions, error) { + key, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") + if err != nil { + return nil, err + } + var addr = crypto.PubkeyToAddress(key.PublicKey) + newTx := func(i uint64) (*types.Transaction, error) { + signer := types.NewEIP155Signer(big.NewInt(18)) + utx := types.NewTransaction(i, addr, new(big.Int), 0, new(big.Int).SetUint64(10000000), nil) + tx, err := types.SignTx(utx, signer, key) + return tx, err + } + var txs types.Transactions + for i := uint64(0); i < num; i++ { + tx, err := newTx(i) + if err != nil { + return nil, err + } + txs = append(txs, tx) + } + return txs, nil +} + +type dummyDerivableList struct { + len int + seed int +} + +func newDummy(seed int) *dummyDerivableList { + d := &dummyDerivableList{} + src := mrand.NewSource(int64(seed)) + // don't use lists longer than 4K items + d.len = int(src.Int63() & 0x0FFF) + d.seed = seed + return d +} + +func (d *dummyDerivableList) Len() int { + return d.len +} + +func (d *dummyDerivableList) EncodeIndex(i int, w *bytes.Buffer) { + src := mrand.NewSource(int64(d.seed + i)) + // max item size 256, at least 1 byte per item + size := 1 + src.Int63()&0x00FF + io.CopyN(w, mrand.New(src), size) +} + +func printList(l types.DerivableList) { + fmt.Printf("list length: %d\n", l.Len()) + fmt.Printf("{\n") + for i := 0; i < l.Len(); i++ { + var buf bytes.Buffer + l.EncodeIndex(i, &buf) + fmt.Printf("\"0x%x\",\n", buf.Bytes()) + } + fmt.Printf("},\n") +} + +type flatList []string + +func (f flatList) Len() int { + return len(f) +} +func (f flatList) EncodeIndex(i int, w *bytes.Buffer) { + w.Write(hexutil.MustDecode(f[i])) +} + +type hashToHumanReadable struct { + data []byte +} + +func (d *hashToHumanReadable) Reset() { + d.data = make([]byte, 0) +} + +func (d *hashToHumanReadable) Update(i []byte, i2 []byte) { + l := fmt.Sprintf("%x %x\n", i, i2) + d.data = append(d.data, []byte(l)...) +} + +func (d *hashToHumanReadable) Hash() common.Hash { + return common.Hash{} +} diff --git a/core/types/legacy_tx.go b/core/types/legacy_tx.go new file mode 100644 index 0000000000..41ad44f379 --- /dev/null +++ b/core/types/legacy_tx.go @@ -0,0 +1,111 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package types + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" +) + +// LegacyTx is the transaction data of regular Ethereum transactions. +type LegacyTx struct { + Nonce uint64 // nonce of sender account + GasPrice *big.Int // wei per gas + Gas uint64 // gas limit + To *common.Address `rlp:"nil"` // nil means contract creation + Value *big.Int // wei amount + Data []byte // contract invocation input data + V, R, S *big.Int // signature values +} + +// NewTransaction creates an unsigned legacy transaction. +// Deprecated: use NewTx instead. +func NewTransaction(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction { + return NewTx(&LegacyTx{ + Nonce: nonce, + To: &to, + Value: amount, + Gas: gasLimit, + GasPrice: gasPrice, + Data: data, + }) +} + +// NewContractCreation creates an unsigned legacy transaction. +// Deprecated: use NewTx instead. +func NewContractCreation(nonce uint64, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction { + return NewTx(&LegacyTx{ + Nonce: nonce, + Value: amount, + Gas: gasLimit, + GasPrice: gasPrice, + Data: data, + }) +} + +// copy creates a deep copy of the transaction data and initializes all fields. +func (tx *LegacyTx) copy() TxData { + cpy := &LegacyTx{ + Nonce: tx.Nonce, + To: tx.To, // TODO: copy pointed-to address + Data: common.CopyBytes(tx.Data), + Gas: tx.Gas, + // These are initialized below. + Value: new(big.Int), + GasPrice: new(big.Int), + V: new(big.Int), + R: new(big.Int), + S: new(big.Int), + } + if tx.Value != nil { + cpy.Value.Set(tx.Value) + } + if tx.GasPrice != nil { + cpy.GasPrice.Set(tx.GasPrice) + } + if tx.V != nil { + cpy.V.Set(tx.V) + } + if tx.R != nil { + cpy.R.Set(tx.R) + } + if tx.S != nil { + cpy.S.Set(tx.S) + } + return cpy +} + +// accessors for innerTx. + +func (tx *LegacyTx) txType() byte { return LegacyTxType } +func (tx *LegacyTx) chainID() *big.Int { return deriveChainId(tx.V) } +func (tx *LegacyTx) accessList() AccessList { return nil } +func (tx *LegacyTx) data() []byte { return tx.Data } +func (tx *LegacyTx) gas() uint64 { return tx.Gas } +func (tx *LegacyTx) gasPrice() *big.Int { return tx.GasPrice } +func (tx *LegacyTx) value() *big.Int { return tx.Value } +func (tx *LegacyTx) nonce() uint64 { return tx.Nonce } +func (tx *LegacyTx) to() *common.Address { return tx.To } + +func (tx *LegacyTx) rawSignatureValues() (v, r, s *big.Int) { + return tx.V, tx.R, tx.S +} + +func (tx *LegacyTx) setSignatureValues(chainID, v, r, s *big.Int) { + tx.V, tx.R, tx.S = v, r, s +} diff --git a/core/types/receipt.go b/core/types/receipt.go index a96c7525ef..48f4aef06a 100644 --- a/core/types/receipt.go +++ b/core/types/receipt.go @@ -38,6 +38,8 @@ var ( receiptStatusSuccessfulRLP = []byte{0x01} ) +var errEmptyTypedReceipt = errors.New("empty typed receipt bytes") + const ( // ReceiptStatusFailed is the status code of a transaction if execution failed. ReceiptStatusFailed = uint64(0) @@ -49,6 +51,7 @@ const ( // Receipt represents the results of a transaction. type Receipt struct { // Consensus fields: These fields are defined by the Yellow Paper + Type uint8 `json:"type,omitempty"` PostState []byte `json:"root"` Status uint64 `json:"status"` CumulativeGasUsed uint64 `json:"cumulativeGasUsed" gencodec:"required"` @@ -69,6 +72,7 @@ type Receipt struct { } type receiptMarshaling struct { + Type hexutil.Uint64 PostState hexutil.Bytes Status hexutil.Uint64 CumulativeGasUsed hexutil.Uint64 @@ -114,8 +118,13 @@ type v3StoredReceiptRLP struct { } // NewReceipt creates a barebone transaction receipt, copying the init fields. +// Deprecated: create receipts using a struct literal instead. func NewReceipt(root []byte, failed bool, cumulativeGasUsed uint64) *Receipt { - r := &Receipt{PostState: common.CopyBytes(root), CumulativeGasUsed: cumulativeGasUsed} + r := &Receipt{ + Type: LegacyTxType, + PostState: common.CopyBytes(root), + CumulativeGasUsed: cumulativeGasUsed, + } if failed { r.Status = ReceiptStatusFailed } else { @@ -127,21 +136,65 @@ func NewReceipt(root []byte, failed bool, cumulativeGasUsed uint64) *Receipt { // EncodeRLP implements rlp.Encoder, and flattens the consensus fields of a receipt // into an RLP stream. If no post state is present, byzantium fork is assumed. func (r *Receipt) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs}) + data := &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs} + if r.Type == LegacyTxType { + return rlp.Encode(w, data) + } + // It's an EIP-2718 typed TX receipt. + if r.Type != AccessListTxType { + return ErrTxTypeNotSupported + } + buf := encodeBufferPool.Get().(*bytes.Buffer) + defer encodeBufferPool.Put(buf) + buf.Reset() + buf.WriteByte(r.Type) + if err := rlp.Encode(buf, data); err != nil { + return err + } + return rlp.Encode(w, buf.Bytes()) } // DecodeRLP implements rlp.Decoder, and loads the consensus fields of a receipt // from an RLP stream. func (r *Receipt) DecodeRLP(s *rlp.Stream) error { - var dec receiptRLP - if err := s.Decode(&dec); err != nil { - return err - } - if err := r.setStatus(dec.PostStateOrStatus); err != nil { + kind, _, err := s.Kind() + switch { + case err != nil: return err + case kind == rlp.List: + // It's a legacy receipt. + var dec receiptRLP + if err := s.Decode(&dec); err != nil { + return err + } + r.Type = LegacyTxType + return r.setFromRLP(dec) + case kind == rlp.String: + // It's an EIP-2718 typed tx receipt. + b, err := s.Bytes() + if err != nil { + return err + } + if len(b) == 0 { + return errEmptyTypedReceipt + } + r.Type = b[0] + if r.Type == AccessListTxType { + var dec receiptRLP + if err := rlp.DecodeBytes(b[1:], &dec); err != nil { + return err + } + return r.setFromRLP(dec) + } + return ErrTxTypeNotSupported + default: + return rlp.ErrExpectedList } - r.CumulativeGasUsed, r.Bloom, r.Logs = dec.CumulativeGasUsed, dec.Bloom, dec.Logs - return nil +} + +func (r *Receipt) setFromRLP(data receiptRLP) error { + r.CumulativeGasUsed, r.Bloom, r.Logs = data.CumulativeGasUsed, data.Bloom, data.Logs + return r.setStatus(data.PostStateOrStatus) } func (r *Receipt) setStatus(postStateOrStatus []byte) error { @@ -172,7 +225,6 @@ func (r *Receipt) statusEncoding() []byte { // to approximate and limit the memory consumption of various caches. func (r *Receipt) Size() common.StorageSize { size := common.StorageSize(unsafe.Sizeof(*r)) + common.StorageSize(len(r.PostState)) - size += common.StorageSize(len(r.Logs)) * common.StorageSize(unsafe.Sizeof(Log{})) for _, log := range r.Logs { size += common.StorageSize(len(log.Topics)*common.HashLength + len(log.Data)) @@ -277,19 +329,27 @@ func decodeV3StoredReceiptRLP(r *ReceiptForStorage, blob []byte) error { return nil } -// Receipts is a wrapper around a Receipt array to implement DerivableList. +// Receipts implements DerivableList for receipts. type Receipts []*Receipt // Len returns the number of receipts in this list. -func (r Receipts) Len() int { return len(r) } - -// GetRlp returns the RLP encoding of one receipt from the list. -func (r Receipts) GetRlp(i int) []byte { - bytes, err := rlp.EncodeToBytes(r[i]) - if err != nil { - panic(err) +func (rs Receipts) Len() int { return len(rs) } + +// EncodeIndex encodes the i'th receipt to w. +func (rs Receipts) EncodeIndex(i int, w *bytes.Buffer) { + r := rs[i] + data := &receiptRLP{r.statusEncoding(), r.CumulativeGasUsed, r.Bloom, r.Logs} + switch r.Type { + case LegacyTxType: + rlp.Encode(w, data) + case AccessListTxType: + w.WriteByte(AccessListTxType) + rlp.Encode(w, data) + default: + // For unsupported types, write nothing. Since this is for + // DeriveSha, the error will be caught matching the derived hash + // to the block. } - return bytes } // DeriveFields fills the receipts with their computed fields based on consensus @@ -302,7 +362,8 @@ func (r Receipts) DeriveFields(config *params.ChainConfig, hash common.Hash, num return errors.New("transaction and receipt count mismatch") } for i := 0; i < len(r); i++ { - // The transaction hash can be retrieved from the transaction itself + // The transaction type and hash can be retrieved from the transaction itself + r[i].Type = txs[i].Type() r[i].TxHash = txs[i].Hash() // block location fields diff --git a/core/types/receipt_test.go b/core/types/receipt_test.go index 806b3dd2ab..22a316c237 100644 --- a/core/types/receipt_test.go +++ b/core/types/receipt_test.go @@ -29,6 +29,15 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) +func TestDecodeEmptyTypedReceipt(t *testing.T) { + input := []byte{0x80} + var r Receipt + err := rlp.DecodeBytes(input, &r) + if err != errEmptyTypedReceipt { + t.Fatal("wrong error:", err) + } +} + func TestLegacyReceiptDecoding(t *testing.T) { tests := []struct { name string @@ -154,9 +163,29 @@ func encodeAsV3StoredReceiptRLP(want *Receipt) ([]byte, error) { // Tests that receipt data can be correctly derived from the contextual infos func TestDeriveFields(t *testing.T) { // Create a few transactions to have receipts for + to2 := common.HexToAddress("0x2") + to3 := common.HexToAddress("0x3") txs := Transactions{ - NewContractCreation(1, big.NewInt(1), 1, big.NewInt(1), nil), - NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, big.NewInt(2), nil), + NewTx(&LegacyTx{ + Nonce: 1, + Value: big.NewInt(1), + Gas: 1, + GasPrice: big.NewInt(1), + }), + NewTx(&LegacyTx{ + To: &to2, + Nonce: 2, + Value: big.NewInt(2), + Gas: 2, + GasPrice: big.NewInt(2), + }), + NewTx(&AccessListTx{ + To: &to3, + Nonce: 3, + Value: big.NewInt(3), + Gas: 3, + GasPrice: big.NewInt(3), + }), } // Create the corresponding receipts receipts := Receipts{ @@ -182,6 +211,18 @@ func TestDeriveFields(t *testing.T) { ContractAddress: common.BytesToAddress([]byte{0x02, 0x22, 0x22}), GasUsed: 2, }, + &Receipt{ + Type: AccessListTxType, + PostState: common.Hash{3}.Bytes(), + CumulativeGasUsed: 6, + Logs: []*Log{ + {Address: common.BytesToAddress([]byte{0x33})}, + {Address: common.BytesToAddress([]byte{0x03, 0x33})}, + }, + TxHash: txs[2].Hash(), + ContractAddress: common.BytesToAddress([]byte{0x03, 0x33, 0x33}), + GasUsed: 3, + }, } // Clear all the computed fields and re-derive them number := big.NewInt(1) @@ -196,6 +237,9 @@ func TestDeriveFields(t *testing.T) { logIndex := uint(0) for i := range receipts { + if receipts[i].Type != txs[i].Type() { + t.Errorf("receipts[%d].Type = %d, want %d", i, receipts[i].Type, txs[i].Type()) + } if receipts[i].TxHash != txs[i].Hash() { t.Errorf("receipts[%d].TxHash = %s, want %s", i, receipts[i].TxHash.String(), txs[i].Hash().String()) } @@ -243,6 +287,34 @@ func TestDeriveFields(t *testing.T) { } } +// TestTypedReceiptEncodingDecoding reproduces a flaw that existed in the receipt +// rlp decoder, which failed due to a shadowing error. +func TestTypedReceiptEncodingDecoding(t *testing.T) { + var payload = common.FromHex("f9043eb9010c01f90108018262d4b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0b9010c01f901080182cd14b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0b9010d01f901090183013754b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0b9010d01f90109018301a194b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0") + check := func(bundle []*Receipt) { + t.Helper() + for i, receipt := range bundle { + if got, want := receipt.Type, uint8(1); got != want { + t.Fatalf("bundle %d: got %x, want %x", i, got, want) + } + } + } + { + var bundle []*Receipt + rlp.DecodeBytes(payload, &bundle) + check(bundle) + } + { + var bundle []*Receipt + r := bytes.NewReader(payload) + s := rlp.NewStream(r, uint64(len(payload))) + if err := s.Decode(&bundle); err != nil { + t.Fatal(err) + } + check(bundle) + } +} + func clearComputedFieldsOnReceipts(t *testing.T, receipts Receipts) { t.Helper() diff --git a/core/types/transaction.go b/core/types/transaction.go index ec19744881..49127630ae 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -17,6 +17,7 @@ package types import ( + "bytes" "container/heap" "errors" "io" @@ -25,20 +26,28 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" ) -//go:generate gencodec -type txdata -field-override txdataMarshaling -out gen_tx_json.go - var ( - ErrInvalidSig = errors.New("invalid transaction v, r, s values") + ErrInvalidSig = errors.New("invalid transaction v, r, s values") + ErrUnexpectedProtection = errors.New("transaction type does not supported EIP-155 protected signatures") + ErrInvalidTxType = errors.New("transaction type not valid in this context") + ErrTxTypeNotSupported = errors.New("transaction type not supported") + errEmptyTypedTx = errors.New("empty typed transaction bytes") +) + +// Transaction types. +const ( + LegacyTxType = iota + AccessListTxType ) +// Transaction is an Ethereum transaction. type Transaction struct { - data txdata // Consensus contents of a transaction - time time.Time // Time first seen locally (spam avoidance) + inner TxData // Consensus contents of a transaction + time time.Time // Time first seen locally (spam avoidance) // caches hash atomic.Value @@ -46,205 +55,280 @@ type Transaction struct { from atomic.Value } -type txdata struct { - AccountNonce uint64 `json:"nonce" gencodec:"required"` - Price *big.Int `json:"gasPrice" gencodec:"required"` - GasLimit uint64 `json:"gas" gencodec:"required"` - Recipient *common.Address `json:"to" rlp:"nil"` // nil means contract creation - Amount *big.Int `json:"value" gencodec:"required"` - Payload []byte `json:"input" gencodec:"required"` +// NewTx creates a new transaction. +func NewTx(inner TxData) *Transaction { + tx := new(Transaction) + tx.setDecoded(inner.copy(), 0) + return tx +} + +// TxData is the underlying data of a transaction. +// +// This is implemented by LegacyTx and AccessListTx. +type TxData interface { + txType() byte // returns the type ID + copy() TxData // creates a deep copy and initializes all fields - // Signature values - V *big.Int `json:"v" gencodec:"required"` - R *big.Int `json:"r" gencodec:"required"` - S *big.Int `json:"s" gencodec:"required"` + chainID() *big.Int + accessList() AccessList + data() []byte + gas() uint64 + gasPrice() *big.Int + value() *big.Int + nonce() uint64 + to() *common.Address - // This is only used when marshaling to JSON. - Hash *common.Hash `json:"hash" rlp:"-"` + rawSignatureValues() (v, r, s *big.Int) + setSignatureValues(chainID, v, r, s *big.Int) } -type txdataMarshaling struct { - AccountNonce hexutil.Uint64 - Price *hexutil.Big - GasLimit hexutil.Uint64 - Amount *hexutil.Big - Payload hexutil.Bytes - V *hexutil.Big - R *hexutil.Big - S *hexutil.Big +// EncodeRLP implements rlp.Encoder +func (tx *Transaction) EncodeRLP(w io.Writer) error { + if tx.Type() == LegacyTxType { + return rlp.Encode(w, tx.inner) + } + // It's an EIP-2718 typed TX envelope. + buf := encodeBufferPool.Get().(*bytes.Buffer) + defer encodeBufferPool.Put(buf) + buf.Reset() + if err := tx.encodeTyped(buf); err != nil { + return err + } + return rlp.Encode(w, buf.Bytes()) } -func NewTransaction(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction { - return newTransaction(nonce, &to, amount, gasLimit, gasPrice, data) +// encodeTyped writes the canonical encoding of a typed transaction to w. +func (tx *Transaction) encodeTyped(w *bytes.Buffer) error { + w.WriteByte(tx.Type()) + return rlp.Encode(w, tx.inner) } -func NewContractCreation(nonce uint64, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction { - return newTransaction(nonce, nil, amount, gasLimit, gasPrice, data) +// MarshalBinary returns the canonical encoding of the transaction. +// For legacy transactions, it returns the RLP encoding. For EIP-2718 typed +// transactions, it returns the type and payload. +func (tx *Transaction) MarshalBinary() ([]byte, error) { + if tx.Type() == LegacyTxType { + return rlp.EncodeToBytes(tx.inner) + } + var buf bytes.Buffer + err := tx.encodeTyped(&buf) + return buf.Bytes(), err } -func newTransaction(nonce uint64, to *common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *Transaction { - if len(data) > 0 { - data = common.CopyBytes(data) +// DecodeRLP implements rlp.Decoder +func (tx *Transaction) DecodeRLP(s *rlp.Stream) error { + kind, size, err := s.Kind() + switch { + case err != nil: + return err + case kind == rlp.List: + // It's a legacy transaction. + var inner LegacyTx + err := s.Decode(&inner) + if err == nil { + tx.setDecoded(&inner, int(rlp.ListSize(size))) + } + return err + case kind == rlp.String: + // It's an EIP-2718 typed TX envelope. + var b []byte + if b, err = s.Bytes(); err != nil { + return err + } + inner, err := tx.decodeTyped(b) + if err == nil { + tx.setDecoded(inner, len(b)) + } + return err + default: + return rlp.ErrExpectedList } - d := txdata{ - AccountNonce: nonce, - Recipient: to, - Payload: data, - Amount: new(big.Int), - GasLimit: gasLimit, - Price: new(big.Int), - V: new(big.Int), - R: new(big.Int), - S: new(big.Int), +} + +// UnmarshalBinary decodes the canonical encoding of transactions. +// It supports legacy RLP transactions and EIP2718 typed transactions. +func (tx *Transaction) UnmarshalBinary(b []byte) error { + if len(b) > 0 && b[0] > 0x7f { + // It's a legacy transaction. + var data LegacyTx + err := rlp.DecodeBytes(b, &data) + if err != nil { + return err + } + tx.setDecoded(&data, len(b)) + return nil } - if amount != nil { - d.Amount.Set(amount) + // It's an EIP2718 typed transaction envelope. + inner, err := tx.decodeTyped(b) + if err != nil { + return err } - if gasPrice != nil { - d.Price.Set(gasPrice) + tx.setDecoded(inner, len(b)) + return nil +} + +// decodeTyped decodes a typed transaction from the canonical format. +func (tx *Transaction) decodeTyped(b []byte) (TxData, error) { + if len(b) == 0 { + return nil, errEmptyTypedTx } - return &Transaction{ - data: d, - time: time.Now(), + switch b[0] { + case AccessListTxType: + var inner AccessListTx + err := rlp.DecodeBytes(b[1:], &inner) + return &inner, err + default: + return nil, ErrTxTypeNotSupported } } -// ChainId returns which chain id this transaction was signed for (if at all) -func (tx *Transaction) ChainId() *big.Int { - return deriveChainId(tx.data.V) +// setDecoded sets the inner transaction and size after decoding. +func (tx *Transaction) setDecoded(inner TxData, size int) { + tx.inner = inner + tx.time = time.Now() + if size > 0 { + tx.size.Store(common.StorageSize(size)) + } } -// Protected returns whether the transaction is protected from replay protection. -func (tx *Transaction) Protected() bool { - return isProtectedV(tx.data.V) +func sanityCheckSignature(v *big.Int, r *big.Int, s *big.Int, maybeProtected bool) error { + if isProtectedV(v) && !maybeProtected { + return ErrUnexpectedProtection + } + + var plainV byte + if isProtectedV(v) { + chainID := deriveChainId(v).Uint64() + plainV = byte(v.Uint64() - 35 - 2*chainID) + } else if maybeProtected { + // Only EIP-155 signatures can be optionally protected. Since + // we determined this v value is not protected, it must be a + // raw 27 or 28. + plainV = byte(v.Uint64() - 27) + } else { + // If the signature is not optionally protected, we assume it + // must already be equal to the recovery id. + plainV = byte(v.Uint64()) + } + if !crypto.ValidateSignatureValues(plainV, r, s, false) { + return ErrInvalidSig + } + + return nil } func isProtectedV(V *big.Int) bool { if V.BitLen() <= 8 { v := V.Uint64() - return v != 27 && v != 28 + return v != 27 && v != 28 && v != 1 && v != 0 } // anything not 27 or 28 is considered protected return true } -// EncodeRLP implements rlp.Encoder -func (tx *Transaction) EncodeRLP(w io.Writer) error { - return rlp.Encode(w, &tx.data) -} - -// DecodeRLP implements rlp.Decoder -func (tx *Transaction) DecodeRLP(s *rlp.Stream) error { - _, size, _ := s.Kind() - err := s.Decode(&tx.data) - if err == nil { - tx.size.Store(common.StorageSize(rlp.ListSize(size))) - tx.time = time.Now() +// Protected says whether the transaction is replay-protected. +func (tx *Transaction) Protected() bool { + switch tx := tx.inner.(type) { + case *LegacyTx: + return tx.V != nil && isProtectedV(tx.V) + default: + return true } - return err } -// MarshalJSON encodes the web3 RPC transaction format. -func (tx *Transaction) MarshalJSON() ([]byte, error) { - hash := tx.Hash() - data := tx.data - data.Hash = &hash - return data.MarshalJSON() +// Type returns the transaction type. +func (tx *Transaction) Type() uint8 { + return tx.inner.txType() } -// UnmarshalJSON decodes the web3 RPC transaction format. -func (tx *Transaction) UnmarshalJSON(input []byte) error { - var dec txdata - if err := dec.UnmarshalJSON(input); err != nil { - return err - } - withSignature := dec.V.Sign() != 0 || dec.R.Sign() != 0 || dec.S.Sign() != 0 - if withSignature { - var V byte - if isProtectedV(dec.V) { - chainID := deriveChainId(dec.V).Uint64() - V = byte(dec.V.Uint64() - 35 - 2*chainID) - } else { - V = byte(dec.V.Uint64() - 27) - } - if !crypto.ValidateSignatureValues(V, dec.R, dec.S, false) { - return ErrInvalidSig - } - } - *tx = Transaction{ - data: dec, - time: time.Now(), - } - return nil +// ChainId returns the EIP155 chain ID of the transaction. The return value will always be +// non-nil. For legacy transactions which are not replay-protected, the return value is +// zero. +func (tx *Transaction) ChainId() *big.Int { + return tx.inner.chainID() } -func (tx *Transaction) Data() []byte { return common.CopyBytes(tx.data.Payload) } -func (tx *Transaction) Gas() uint64 { return tx.data.GasLimit } -func (tx *Transaction) GasPrice() *big.Int { return new(big.Int).Set(tx.data.Price) } -func (tx *Transaction) GasPriceCmp(other *Transaction) int { - return tx.data.Price.Cmp(other.data.Price) -} -func (tx *Transaction) GasPriceIntCmp(other *big.Int) int { - return tx.data.Price.Cmp(other) -} -func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.data.Amount) } -func (tx *Transaction) Nonce() uint64 { return tx.data.AccountNonce } -func (tx *Transaction) CheckNonce() bool { return true } +// Data returns the input data of the transaction. +func (tx *Transaction) Data() []byte { return tx.inner.data() } + +// AccessList returns the access list of the transaction. +func (tx *Transaction) AccessList() AccessList { return tx.inner.accessList() } + +// Gas returns the gas limit of the transaction. +func (tx *Transaction) Gas() uint64 { return tx.inner.gas() } + +// GasPrice returns the gas price of the transaction. +func (tx *Transaction) GasPrice() *big.Int { return new(big.Int).Set(tx.inner.gasPrice()) } + +// Value returns the ether amount of the transaction. +func (tx *Transaction) Value() *big.Int { return new(big.Int).Set(tx.inner.value()) } + +// Nonce returns the sender account nonce of the transaction. +func (tx *Transaction) Nonce() uint64 { return tx.inner.nonce() } // To returns the recipient address of the transaction. -// It returns nil if the transaction is a contract creation. +// For contract-creation transactions, To returns nil. func (tx *Transaction) To() *common.Address { - if tx.data.Recipient == nil { + // Copy the pointed-to address. + ito := tx.inner.to() + if ito == nil { return nil } - to := *tx.data.Recipient - return &to + cpy := *ito + return &cpy +} + +// Cost returns gas * gasPrice + value. +func (tx *Transaction) Cost() *big.Int { + total := new(big.Int).Mul(tx.GasPrice(), new(big.Int).SetUint64(tx.Gas())) + total.Add(total, tx.Value()) + return total +} + +// RawSignatureValues returns the V, R, S signature values of the transaction. +// The return values should not be modified by the caller. +func (tx *Transaction) RawSignatureValues() (v, r, s *big.Int) { + return tx.inner.rawSignatureValues() +} + +// GasPriceCmp compares the gas prices of two transactions. +func (tx *Transaction) GasPriceCmp(other *Transaction) int { + return tx.inner.gasPrice().Cmp(other.GasPrice()) } -// Hash hashes the RLP encoding of tx. -// It uniquely identifies the transaction. +// GasPriceIntCmp compares the gas price of the transaction against the given price. +func (tx *Transaction) GasPriceIntCmp(other *big.Int) int { + return tx.inner.gasPrice().Cmp(other) +} + +// Hash returns the transaction hash. func (tx *Transaction) Hash() common.Hash { if hash := tx.hash.Load(); hash != nil { return hash.(common.Hash) } - v := rlpHash(tx) - tx.hash.Store(v) - return v + + var h common.Hash + if tx.Type() == LegacyTxType { + h = rlpHash(tx.inner) + } else { + h = prefixedRlpHash(tx.Type(), tx.inner) + } + tx.hash.Store(h) + return h } // Size returns the true RLP encoded storage size of the transaction, either by -// encoding and returning it, or returning a previsouly cached value. +// encoding and returning it, or returning a previously cached value. func (tx *Transaction) Size() common.StorageSize { if size := tx.size.Load(); size != nil { return size.(common.StorageSize) } c := writeCounter(0) - rlp.Encode(&c, &tx.data) + rlp.Encode(&c, &tx.inner) tx.size.Store(common.StorageSize(c)) return common.StorageSize(c) } -// AsMessage returns the transaction as a core.Message. -// -// AsMessage requires a signer to derive the sender. -// -// XXX Rename message to something less arbitrary? -func (tx *Transaction) AsMessage(s Signer) (Message, error) { - msg := Message{ - nonce: tx.data.AccountNonce, - gasLimit: tx.data.GasLimit, - gasPrice: new(big.Int).Set(tx.data.Price), - to: tx.data.Recipient, - amount: tx.data.Amount, - data: tx.data.Payload, - checkNonce: true, - } - - var err error - msg.from, err = Sender(s, tx) - return msg, err -} - // WithSignature returns a new transaction with the given signature. // This signature needs to be in the [R || S || V] format where V is 0 or 1. func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, error) { @@ -252,40 +336,27 @@ func (tx *Transaction) WithSignature(signer Signer, sig []byte) (*Transaction, e if err != nil { return nil, err } - cpy := &Transaction{ - data: tx.data, - time: tx.time, - } - cpy.data.R, cpy.data.S, cpy.data.V = r, s, v - return cpy, nil + cpy := tx.inner.copy() + cpy.setSignatureValues(signer.ChainID(), v, r, s) + return &Transaction{inner: cpy, time: tx.time}, nil } -// Cost returns amount + gasprice * gaslimit. -func (tx *Transaction) Cost() *big.Int { - total := new(big.Int).Mul(tx.data.Price, new(big.Int).SetUint64(tx.data.GasLimit)) - total.Add(total, tx.data.Amount) - return total -} - -// RawSignatureValues returns the V, R, S signature values of the transaction. -// The return values should not be modified by the caller. -func (tx *Transaction) RawSignatureValues() (v, r, s *big.Int) { - return tx.data.V, tx.data.R, tx.data.S -} - -// Transactions is a Transaction slice type for basic sorting. +// Transactions implements DerivableList for transactions. type Transactions []*Transaction // Len returns the length of s. func (s Transactions) Len() int { return len(s) } -// Swap swaps the i'th and the j'th element in s. -func (s Transactions) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// GetRlp implements Rlpable and returns the i'th element of s in rlp. -func (s Transactions) GetRlp(i int) []byte { - enc, _ := rlp.EncodeToBytes(s[i]) - return enc +// EncodeIndex encodes the i'th transaction to w. Note that this does not check for errors +// because we assume that *Transaction will only ever contain valid txs that were either +// constructed by decoding or via public API in this package. +func (s Transactions) EncodeIndex(i int, w *bytes.Buffer) { + tx := s[i] + if tx.Type() == LegacyTxType { + rlp.Encode(w, tx.inner) + } else { + tx.encodeTyped(w) + } } // TxDifference returns a new set which is the difference between a and b. @@ -312,7 +383,7 @@ func TxDifference(a, b Transactions) Transactions { type TxByNonce Transactions func (s TxByNonce) Len() int { return len(s) } -func (s TxByNonce) Less(i, j int) bool { return s[i].data.AccountNonce < s[j].data.AccountNonce } +func (s TxByNonce) Less(i, j int) bool { return s[i].Nonce() < s[j].Nonce() } func (s TxByNonce) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // TxByPriceAndTime implements both the sort and the heap interface, making it useful @@ -323,7 +394,7 @@ func (s TxByPriceAndTime) Len() int { return len(s) } func (s TxByPriceAndTime) Less(i, j int) bool { // If the prices are equal, use the time the transaction was first seen for // deterministic sorting - cmp := s[i].data.Price.Cmp(s[j].data.Price) + cmp := s[i].GasPrice().Cmp(s[j].GasPrice()) if cmp == 0 { return s[i].time.Before(s[j].time) } @@ -361,13 +432,13 @@ func NewTransactionsByPriceAndNonce(signer Signer, txs map[common.Address]Transa // Initialize a price and received time based heap with the head transactions heads := make(TxByPriceAndTime, 0, len(txs)) for from, accTxs := range txs { - heads = append(heads, accTxs[0]) // Ensure the sender address is from the signer - acc, _ := Sender(signer, accTxs[0]) - txs[acc] = accTxs[1:] - if from != acc { + if acc, _ := Sender(signer, accTxs[0]); acc != from { delete(txs, from) + continue } + heads = append(heads, accTxs[0]) + txs[from] = accTxs[1:] } heap.Init(&heads) @@ -416,10 +487,11 @@ type Message struct { gasLimit uint64 gasPrice *big.Int data []byte + accessList AccessList checkNonce bool } -func NewMessage(from common.Address, to *common.Address, nonce uint64, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte, checkNonce bool) Message { +func NewMessage(from common.Address, to *common.Address, nonce uint64, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte, accessList AccessList, checkNonce bool) Message { return Message{ from: from, to: to, @@ -428,15 +500,35 @@ func NewMessage(from common.Address, to *common.Address, nonce uint64, amount *b gasLimit: gasLimit, gasPrice: gasPrice, data: data, + accessList: accessList, checkNonce: checkNonce, } } -func (m Message) From() common.Address { return m.from } -func (m Message) To() *common.Address { return m.to } -func (m Message) GasPrice() *big.Int { return m.gasPrice } -func (m Message) Value() *big.Int { return m.amount } -func (m Message) Gas() uint64 { return m.gasLimit } -func (m Message) Nonce() uint64 { return m.nonce } -func (m Message) Data() []byte { return m.data } -func (m Message) CheckNonce() bool { return m.checkNonce } +// AsMessage returns the transaction as a core.Message. +func (tx *Transaction) AsMessage(s Signer) (Message, error) { + msg := Message{ + nonce: tx.Nonce(), + gasLimit: tx.Gas(), + gasPrice: new(big.Int).Set(tx.GasPrice()), + to: tx.To(), + amount: tx.Value(), + data: tx.Data(), + accessList: tx.AccessList(), + checkNonce: true, + } + + var err error + msg.from, err = Sender(s, tx) + return msg, err +} + +func (m Message) From() common.Address { return m.from } +func (m Message) To() *common.Address { return m.to } +func (m Message) GasPrice() *big.Int { return m.gasPrice } +func (m Message) Value() *big.Int { return m.amount } +func (m Message) Gas() uint64 { return m.gasLimit } +func (m Message) Nonce() uint64 { return m.nonce } +func (m Message) Data() []byte { return m.data } +func (m Message) AccessList() AccessList { return m.accessList } +func (m Message) CheckNonce() bool { return m.checkNonce } diff --git a/core/types/transaction_marshalling.go b/core/types/transaction_marshalling.go new file mode 100644 index 0000000000..184a17d5b5 --- /dev/null +++ b/core/types/transaction_marshalling.go @@ -0,0 +1,187 @@ +package types + +import ( + "encoding/json" + "errors" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +// txJSON is the JSON representation of transactions. +type txJSON struct { + Type hexutil.Uint64 `json:"type"` + + // Common transaction fields: + Nonce *hexutil.Uint64 `json:"nonce"` + GasPrice *hexutil.Big `json:"gasPrice"` + Gas *hexutil.Uint64 `json:"gas"` + Value *hexutil.Big `json:"value"` + Data *hexutil.Bytes `json:"input"` + V *hexutil.Big `json:"v"` + R *hexutil.Big `json:"r"` + S *hexutil.Big `json:"s"` + To *common.Address `json:"to"` + + // Access list transaction fields: + ChainID *hexutil.Big `json:"chainId,omitempty"` + AccessList *AccessList `json:"accessList,omitempty"` + + // Only used for encoding: + Hash common.Hash `json:"hash"` +} + +// MarshalJSON marshals as JSON with a hash. +func (t *Transaction) MarshalJSON() ([]byte, error) { + var enc txJSON + // These are set for all tx types. + enc.Hash = t.Hash() + enc.Type = hexutil.Uint64(t.Type()) + + // Other fields are set conditionally depending on tx type. + switch tx := t.inner.(type) { + case *LegacyTx: + enc.Nonce = (*hexutil.Uint64)(&tx.Nonce) + enc.Gas = (*hexutil.Uint64)(&tx.Gas) + enc.GasPrice = (*hexutil.Big)(tx.GasPrice) + enc.Value = (*hexutil.Big)(tx.Value) + enc.Data = (*hexutil.Bytes)(&tx.Data) + enc.To = t.To() + enc.V = (*hexutil.Big)(tx.V) + enc.R = (*hexutil.Big)(tx.R) + enc.S = (*hexutil.Big)(tx.S) + case *AccessListTx: + enc.ChainID = (*hexutil.Big)(tx.ChainID) + enc.AccessList = &tx.AccessList + enc.Nonce = (*hexutil.Uint64)(&tx.Nonce) + enc.Gas = (*hexutil.Uint64)(&tx.Gas) + enc.GasPrice = (*hexutil.Big)(tx.GasPrice) + enc.Value = (*hexutil.Big)(tx.Value) + enc.Data = (*hexutil.Bytes)(&tx.Data) + enc.To = t.To() + enc.V = (*hexutil.Big)(tx.V) + enc.R = (*hexutil.Big)(tx.R) + enc.S = (*hexutil.Big)(tx.S) + } + return json.Marshal(&enc) +} + +// UnmarshalJSON unmarshals from JSON. +func (t *Transaction) UnmarshalJSON(input []byte) error { + var dec txJSON + if err := json.Unmarshal(input, &dec); err != nil { + return err + } + + // Decode / verify fields according to transaction type. + var inner TxData + switch dec.Type { + case LegacyTxType: + var itx LegacyTx + inner = &itx + if dec.To != nil { + itx.To = dec.To + } + if dec.Nonce == nil { + return errors.New("missing required field 'nonce' in transaction") + } + itx.Nonce = uint64(*dec.Nonce) + if dec.GasPrice == nil { + return errors.New("missing required field 'gasPrice' in transaction") + } + itx.GasPrice = (*big.Int)(dec.GasPrice) + if dec.Gas == nil { + return errors.New("missing required field 'gas' in transaction") + } + itx.Gas = uint64(*dec.Gas) + if dec.Value == nil { + return errors.New("missing required field 'value' in transaction") + } + itx.Value = (*big.Int)(dec.Value) + if dec.Data == nil { + return errors.New("missing required field 'input' in transaction") + } + itx.Data = *dec.Data + if dec.V == nil { + return errors.New("missing required field 'v' in transaction") + } + itx.V = (*big.Int)(dec.V) + if dec.R == nil { + return errors.New("missing required field 'r' in transaction") + } + itx.R = (*big.Int)(dec.R) + if dec.S == nil { + return errors.New("missing required field 's' in transaction") + } + itx.S = (*big.Int)(dec.S) + withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 + if withSignature { + if err := sanityCheckSignature(itx.V, itx.R, itx.S, true); err != nil { + return err + } + } + + case AccessListTxType: + var itx AccessListTx + inner = &itx + // Access list is optional for now. + if dec.AccessList != nil { + itx.AccessList = *dec.AccessList + } + if dec.ChainID == nil { + return errors.New("missing required field 'chainId' in transaction") + } + itx.ChainID = (*big.Int)(dec.ChainID) + if dec.To != nil { + itx.To = dec.To + } + if dec.Nonce == nil { + return errors.New("missing required field 'nonce' in transaction") + } + itx.Nonce = uint64(*dec.Nonce) + if dec.GasPrice == nil { + return errors.New("missing required field 'gasPrice' in transaction") + } + itx.GasPrice = (*big.Int)(dec.GasPrice) + if dec.Gas == nil { + return errors.New("missing required field 'gas' in transaction") + } + itx.Gas = uint64(*dec.Gas) + if dec.Value == nil { + return errors.New("missing required field 'value' in transaction") + } + itx.Value = (*big.Int)(dec.Value) + if dec.Data == nil { + return errors.New("missing required field 'input' in transaction") + } + itx.Data = *dec.Data + if dec.V == nil { + return errors.New("missing required field 'v' in transaction") + } + itx.V = (*big.Int)(dec.V) + if dec.R == nil { + return errors.New("missing required field 'r' in transaction") + } + itx.R = (*big.Int)(dec.R) + if dec.S == nil { + return errors.New("missing required field 's' in transaction") + } + itx.S = (*big.Int)(dec.S) + withSignature := itx.V.Sign() != 0 || itx.R.Sign() != 0 || itx.S.Sign() != 0 + if withSignature { + if err := sanityCheckSignature(itx.V, itx.R, itx.S, false); err != nil { + return err + } + } + + default: + return ErrTxTypeNotSupported + } + + // Now set the inner transaction. + t.setDecoded(inner, 0) + + // TODO: check hash here? + return nil +} diff --git a/core/types/transaction_signing.go b/core/types/transaction_signing.go index 842fedbd03..b4594cb90b 100644 --- a/core/types/transaction_signing.go +++ b/core/types/transaction_signing.go @@ -27,9 +27,7 @@ import ( "github.com/ethereum/go-ethereum/params" ) -var ( - ErrInvalidChainId = errors.New("invalid chain id for signer") -) +var ErrInvalidChainId = errors.New("invalid chain id for signer") // sigCache is used to cache the derived sender and contains // the signer used to derive it. @@ -42,6 +40,8 @@ type sigCache struct { func MakeSigner(config *params.ChainConfig, blockNumber *big.Int) Signer { var signer Signer switch { + case config.IsBerlin(blockNumber): + signer = NewEIP2930Signer(config.ChainID) case config.IsEIP155(blockNumber): signer = NewEIP155Signer(config.ChainID) case config.IsHomestead(blockNumber): @@ -52,7 +52,40 @@ func MakeSigner(config *params.ChainConfig, blockNumber *big.Int) Signer { return signer } -// SignTx signs the transaction using the given signer and private key +// LatestSigner returns the 'most permissive' Signer available for the given chain +// configuration. Specifically, this enables support of EIP-155 replay protection and +// EIP-2930 access list transactions when their respective forks are scheduled to occur at +// any block number in the chain config. +// +// Use this in transaction-handling code where the current block number is unknown. If you +// have the current block number available, use MakeSigner instead. +func LatestSigner(config *params.ChainConfig) Signer { + if config.ChainID != nil { + if config.BerlinBlock != nil || config.YoloV3Block != nil { + return NewEIP2930Signer(config.ChainID) + } + if config.EIP155Block != nil { + return NewEIP155Signer(config.ChainID) + } + } + return HomesteadSigner{} +} + +// LatestSignerForChainID returns the 'most permissive' Signer available. Specifically, +// this enables support for EIP-155 replay protection and all implemented EIP-2718 +// transaction types if chainID is non-nil. +// +// Use this in transaction-handling code where the current block number and fork +// configuration are unknown. If you have a ChainConfig, use LatestSigner instead. +// If you have a ChainConfig and know the current block number, use MakeSigner instead. +func LatestSignerForChainID(chainID *big.Int) Signer { + if chainID == nil { + return HomesteadSigner{} + } + return NewEIP2930Signer(chainID) +} + +// SignTx signs the transaction using the given signer and private key. func SignTx(tx *Transaction, s Signer, prv *ecdsa.PrivateKey) (*Transaction, error) { h := s.Hash(tx) sig, err := crypto.Sign(h[:], prv) @@ -62,6 +95,27 @@ func SignTx(tx *Transaction, s Signer, prv *ecdsa.PrivateKey) (*Transaction, err return tx.WithSignature(s, sig) } +// SignNewTx creates a transaction and signs it. +func SignNewTx(prv *ecdsa.PrivateKey, s Signer, txdata TxData) (*Transaction, error) { + tx := NewTx(txdata) + h := s.Hash(tx) + sig, err := crypto.Sign(h[:], prv) + if err != nil { + return nil, err + } + return tx.WithSignature(s, sig) +} + +// MustSignNewTx creates a transaction and signs it. +// This panics if the transaction cannot be signed. +func MustSignNewTx(prv *ecdsa.PrivateKey, s Signer, txdata TxData) *Transaction { + tx, err := SignNewTx(prv, s, txdata) + if err != nil { + panic(err) + } + return tx +} + // Sender returns the address derived from the signature (V, R, S) using secp256k1 // elliptic curve and an error if it failed deriving or upon an incorrect // signature. @@ -88,21 +142,128 @@ func Sender(signer Signer, tx *Transaction) (common.Address, error) { return addr, nil } -// Signer encapsulates transaction signature handling. Note that this interface is not a -// stable API and may change at any time to accommodate new protocol rules. +// Signer encapsulates transaction signature handling. The name of this type is slightly +// misleading because Signers don't actually sign, they're just for validating and +// processing of signatures. +// +// Note that this interface is not a stable API and may change at any time to accommodate +// new protocol rules. type Signer interface { // Sender returns the sender address of the transaction. Sender(tx *Transaction) (common.Address, error) + // SignatureValues returns the raw R, S, V values corresponding to the // given signature. SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error) - // Hash returns the hash to be signed. + ChainID() *big.Int + + // Hash returns 'signature hash', i.e. the transaction hash that is signed by the + // private key. This hash does not uniquely identify the transaction. Hash(tx *Transaction) common.Hash + // Equal returns true if the given signer is the same as the receiver. Equal(Signer) bool } -// EIP155Transaction implements Signer using the EIP155 rules. +type eip2930Signer struct{ EIP155Signer } + +// NewEIP2930Signer returns a signer that accepts EIP-2930 access list transactions, +// EIP-155 replay protected transactions, and legacy Homestead transactions. +func NewEIP2930Signer(chainId *big.Int) Signer { + return eip2930Signer{NewEIP155Signer(chainId)} +} + +func (s eip2930Signer) ChainID() *big.Int { + return s.chainId +} + +func (s eip2930Signer) Equal(s2 Signer) bool { + x, ok := s2.(eip2930Signer) + return ok && x.chainId.Cmp(s.chainId) == 0 +} + +func (s eip2930Signer) Sender(tx *Transaction) (common.Address, error) { + V, R, S := tx.RawSignatureValues() + switch tx.Type() { + case LegacyTxType: + if !tx.Protected() { + return HomesteadSigner{}.Sender(tx) + } + V = new(big.Int).Sub(V, s.chainIdMul) + V.Sub(V, big8) + case AccessListTxType: + // ACL txs are defined to use 0 and 1 as their recovery id, add + // 27 to become equivalent to unprotected Homestead signatures. + V = new(big.Int).Add(V, big.NewInt(27)) + default: + return common.Address{}, ErrTxTypeNotSupported + } + if tx.ChainId().Cmp(s.chainId) != 0 { + return common.Address{}, ErrInvalidChainId + } + return recoverPlain(s.Hash(tx), R, S, V, true) +} + +func (s eip2930Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) { + switch txdata := tx.inner.(type) { + case *LegacyTx: + R, S, V = decodeSignature(sig) + if s.chainId.Sign() != 0 { + V = big.NewInt(int64(sig[64] + 35)) + V.Add(V, s.chainIdMul) + } + case *AccessListTx: + // Check that chain ID of tx matches the signer. We also accept ID zero here, + // because it indicates that the chain ID was not specified in the tx. + if txdata.ChainID.Sign() != 0 && txdata.ChainID.Cmp(s.chainId) != 0 { + return nil, nil, nil, ErrInvalidChainId + } + R, S, _ = decodeSignature(sig) + V = big.NewInt(int64(sig[64])) + default: + return nil, nil, nil, ErrTxTypeNotSupported + } + return R, S, V, nil +} + +// Hash returns the hash to be signed by the sender. +// It does not uniquely identify the transaction. +func (s eip2930Signer) Hash(tx *Transaction) common.Hash { + switch tx.Type() { + case LegacyTxType: + return rlpHash([]interface{}{ + tx.Nonce(), + tx.GasPrice(), + tx.Gas(), + tx.To(), + tx.Value(), + tx.Data(), + s.chainId, uint(0), uint(0), + }) + case AccessListTxType: + return prefixedRlpHash( + tx.Type(), + []interface{}{ + s.chainId, + tx.Nonce(), + tx.GasPrice(), + tx.Gas(), + tx.To(), + tx.Value(), + tx.Data(), + tx.AccessList(), + }) + default: + // This _should_ not happen, but in case someone sends in a bad + // json struct via RPC, it's probably more prudent to return an + // empty hash instead of killing the node with a panic + //panic("Unsupported transaction type: %d", tx.typ) + return common.Hash{} + } +} + +// EIP155Signer implements Signer using the EIP-155 rules. This accepts transactions which +// are replay-protected as well as unprotected homestead transactions. type EIP155Signer struct { chainId, chainIdMul *big.Int } @@ -117,6 +278,10 @@ func NewEIP155Signer(chainId *big.Int) EIP155Signer { } } +func (s EIP155Signer) ChainID() *big.Int { + return s.chainId +} + func (s EIP155Signer) Equal(s2 Signer) bool { eip155, ok := s2.(EIP155Signer) return ok && eip155.chainId.Cmp(s.chainId) == 0 @@ -125,24 +290,28 @@ func (s EIP155Signer) Equal(s2 Signer) bool { var big8 = big.NewInt(8) func (s EIP155Signer) Sender(tx *Transaction) (common.Address, error) { + if tx.Type() != LegacyTxType { + return common.Address{}, ErrTxTypeNotSupported + } if !tx.Protected() { return HomesteadSigner{}.Sender(tx) } if tx.ChainId().Cmp(s.chainId) != 0 { return common.Address{}, ErrInvalidChainId } - V := new(big.Int).Sub(tx.data.V, s.chainIdMul) + V, R, S := tx.RawSignatureValues() + V = new(big.Int).Sub(V, s.chainIdMul) V.Sub(V, big8) - return recoverPlain(s.Hash(tx), tx.data.R, tx.data.S, V, true) + return recoverPlain(s.Hash(tx), R, S, V, true) } // SignatureValues returns signature values. This signature // needs to be in the [R || S || V] format where V is 0 or 1. func (s EIP155Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big.Int, err error) { - R, S, V, err = HomesteadSigner{}.SignatureValues(tx, sig) - if err != nil { - return nil, nil, nil, err + if tx.Type() != LegacyTxType { + return nil, nil, nil, ErrTxTypeNotSupported } + R, S, V = decodeSignature(sig) if s.chainId.Sign() != 0 { V = big.NewInt(int64(sig[64] + 35)) V.Add(V, s.chainIdMul) @@ -154,12 +323,12 @@ func (s EIP155Signer) SignatureValues(tx *Transaction, sig []byte) (R, S, V *big // It does not uniquely identify the transaction. func (s EIP155Signer) Hash(tx *Transaction) common.Hash { return rlpHash([]interface{}{ - tx.data.AccountNonce, - tx.data.Price, - tx.data.GasLimit, - tx.data.Recipient, - tx.data.Amount, - tx.data.Payload, + tx.Nonce(), + tx.GasPrice(), + tx.Gas(), + tx.To(), + tx.Value(), + tx.Data(), s.chainId, uint(0), uint(0), }) } @@ -168,6 +337,10 @@ func (s EIP155Signer) Hash(tx *Transaction) common.Hash { // homestead rules. type HomesteadSigner struct{ FrontierSigner } +func (s HomesteadSigner) ChainID() *big.Int { + return nil +} + func (s HomesteadSigner) Equal(s2 Signer) bool { _, ok := s2.(HomesteadSigner) return ok @@ -180,25 +353,39 @@ func (hs HomesteadSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v } func (hs HomesteadSigner) Sender(tx *Transaction) (common.Address, error) { - return recoverPlain(hs.Hash(tx), tx.data.R, tx.data.S, tx.data.V, true) + if tx.Type() != LegacyTxType { + return common.Address{}, ErrTxTypeNotSupported + } + v, r, s := tx.RawSignatureValues() + return recoverPlain(hs.Hash(tx), r, s, v, true) } type FrontierSigner struct{} +func (s FrontierSigner) ChainID() *big.Int { + return nil +} + func (s FrontierSigner) Equal(s2 Signer) bool { _, ok := s2.(FrontierSigner) return ok } +func (fs FrontierSigner) Sender(tx *Transaction) (common.Address, error) { + if tx.Type() != LegacyTxType { + return common.Address{}, ErrTxTypeNotSupported + } + v, r, s := tx.RawSignatureValues() + return recoverPlain(fs.Hash(tx), r, s, v, false) +} + // SignatureValues returns signature values. This signature // needs to be in the [R || S || V] format where V is 0 or 1. func (fs FrontierSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v *big.Int, err error) { - if len(sig) != crypto.SignatureLength { - panic(fmt.Sprintf("wrong size for signature: got %d, want %d", len(sig), crypto.SignatureLength)) + if tx.Type() != LegacyTxType { + return nil, nil, nil, ErrTxTypeNotSupported } - r = new(big.Int).SetBytes(sig[:32]) - s = new(big.Int).SetBytes(sig[32:64]) - v = new(big.Int).SetBytes([]byte{sig[64] + 27}) + r, s, v = decodeSignature(sig) return r, s, v, nil } @@ -206,17 +393,23 @@ func (fs FrontierSigner) SignatureValues(tx *Transaction, sig []byte) (r, s, v * // It does not uniquely identify the transaction. func (fs FrontierSigner) Hash(tx *Transaction) common.Hash { return rlpHash([]interface{}{ - tx.data.AccountNonce, - tx.data.Price, - tx.data.GasLimit, - tx.data.Recipient, - tx.data.Amount, - tx.data.Payload, + tx.Nonce(), + tx.GasPrice(), + tx.Gas(), + tx.To(), + tx.Value(), + tx.Data(), }) } -func (fs FrontierSigner) Sender(tx *Transaction) (common.Address, error) { - return recoverPlain(fs.Hash(tx), tx.data.R, tx.data.S, tx.data.V, false) +func decodeSignature(sig []byte) (r, s, v *big.Int) { + if len(sig) != crypto.SignatureLength { + panic(fmt.Sprintf("wrong size for signature: got %d, want %d", len(sig), crypto.SignatureLength)) + } + r = new(big.Int).SetBytes(sig[:32]) + s = new(big.Int).SetBytes(sig[32:64]) + v = new(big.Int).SetBytes([]byte{sig[64] + 27}) + return r, s, v } func recoverPlain(sighash common.Hash, R, S, Vb *big.Int, homestead bool) (common.Address, error) { diff --git a/core/types/transaction_test.go b/core/types/transaction_test.go index 159cb0c4c4..3cece9c235 100644 --- a/core/types/transaction_test.go +++ b/core/types/transaction_test.go @@ -20,7 +20,9 @@ import ( "bytes" "crypto/ecdsa" "encoding/json" + "fmt" "math/big" + "reflect" "testing" "time" @@ -32,6 +34,8 @@ import ( // The values in those tests are from the Transaction Tests // at github.com/ethereum/tests. var ( + testAddr = common.HexToAddress("b94f5374fce5edbc8e2a8697c15331677e6ebf0b") + emptyTx = NewTransaction( 0, common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87"), @@ -41,7 +45,7 @@ var ( rightvrsTx, _ = NewTransaction( 3, - common.HexToAddress("b94f5374fce5edbc8e2a8697c15331677e6ebf0b"), + testAddr, big.NewInt(10), 2000, big.NewInt(1), @@ -50,8 +54,32 @@ var ( HomesteadSigner{}, common.Hex2Bytes("98ff921201554726367d2be8c804a7ff89ccf285ebc57dff8ae4c44b9c19ac4a8887321be575c8095f789dd4c743dfe42c1820f9231f98a962b210e3ac2452a301"), ) + + emptyEip2718Tx = NewTx(&AccessListTx{ + ChainID: big.NewInt(1), + Nonce: 3, + To: &testAddr, + Value: big.NewInt(10), + Gas: 25000, + GasPrice: big.NewInt(1), + Data: common.FromHex("5544"), + }) + + signedEip2718Tx, _ = emptyEip2718Tx.WithSignature( + NewEIP2930Signer(big.NewInt(1)), + common.Hex2Bytes("c9519f4f2b30335884581971573fadf60c6204f59a911df35ee8a540456b266032f1e8e2c5dd761f9e4f88f41c8310aeaba26a8bfcdacfedfa12ec3862d3752101"), + ) ) +func TestDecodeEmptyTypedTx(t *testing.T) { + input := []byte{0x80} + var tx Transaction + err := rlp.DecodeBytes(input, &tx) + if err != errEmptyTypedTx { + t.Fatal("wrong error:", err) + } +} + func TestTransactionSigHash(t *testing.T) { var homestead HomesteadSigner if homestead.Hash(emptyTx) != common.HexToHash("c775b99e7ad12f50d819fcd602390467e28141316969f4b57f0626f74fe3b386") { @@ -73,10 +101,121 @@ func TestTransactionEncode(t *testing.T) { } } +func TestEIP2718TransactionSigHash(t *testing.T) { + s := NewEIP2930Signer(big.NewInt(1)) + if s.Hash(emptyEip2718Tx) != common.HexToHash("49b486f0ec0a60dfbbca2d30cb07c9e8ffb2a2ff41f29a1ab6737475f6ff69f3") { + t.Errorf("empty EIP-2718 transaction hash mismatch, got %x", s.Hash(emptyEip2718Tx)) + } + if s.Hash(signedEip2718Tx) != common.HexToHash("49b486f0ec0a60dfbbca2d30cb07c9e8ffb2a2ff41f29a1ab6737475f6ff69f3") { + t.Errorf("signed EIP-2718 transaction hash mismatch, got %x", s.Hash(signedEip2718Tx)) + } +} + +// This test checks signature operations on access list transactions. +func TestEIP2930Signer(t *testing.T) { + + var ( + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + keyAddr = crypto.PubkeyToAddress(key.PublicKey) + signer1 = NewEIP2930Signer(big.NewInt(1)) + signer2 = NewEIP2930Signer(big.NewInt(2)) + tx0 = NewTx(&AccessListTx{Nonce: 1}) + tx1 = NewTx(&AccessListTx{ChainID: big.NewInt(1), Nonce: 1}) + tx2, _ = SignNewTx(key, signer2, &AccessListTx{ChainID: big.NewInt(2), Nonce: 1}) + ) + + tests := []struct { + tx *Transaction + signer Signer + wantSignerHash common.Hash + wantSenderErr error + wantSignErr error + wantHash common.Hash // after signing + }{ + { + tx: tx0, + signer: signer1, + wantSignerHash: common.HexToHash("846ad7672f2a3a40c1f959cd4a8ad21786d620077084d84c8d7c077714caa139"), + wantSenderErr: ErrInvalidChainId, + wantHash: common.HexToHash("1ccd12d8bbdb96ea391af49a35ab641e219b2dd638dea375f2bc94dd290f2549"), + }, + { + tx: tx1, + signer: signer1, + wantSenderErr: ErrInvalidSig, + wantSignerHash: common.HexToHash("846ad7672f2a3a40c1f959cd4a8ad21786d620077084d84c8d7c077714caa139"), + wantHash: common.HexToHash("1ccd12d8bbdb96ea391af49a35ab641e219b2dd638dea375f2bc94dd290f2549"), + }, + { + // This checks what happens when trying to sign an unsigned tx for the wrong chain. + tx: tx1, + signer: signer2, + wantSenderErr: ErrInvalidChainId, + wantSignerHash: common.HexToHash("367967247499343401261d718ed5aa4c9486583e4d89251afce47f4a33c33362"), + wantSignErr: ErrInvalidChainId, + }, + { + // This checks what happens when trying to re-sign a signed tx for the wrong chain. + tx: tx2, + signer: signer1, + wantSenderErr: ErrInvalidChainId, + wantSignerHash: common.HexToHash("846ad7672f2a3a40c1f959cd4a8ad21786d620077084d84c8d7c077714caa139"), + wantSignErr: ErrInvalidChainId, + }, + } + + for i, test := range tests { + sigHash := test.signer.Hash(test.tx) + if sigHash != test.wantSignerHash { + t.Errorf("test %d: wrong sig hash: got %x, want %x", i, sigHash, test.wantSignerHash) + } + sender, err := Sender(test.signer, test.tx) + if err != test.wantSenderErr { + t.Errorf("test %d: wrong Sender error %q", i, err) + } + if err == nil && sender != keyAddr { + t.Errorf("test %d: wrong sender address %x", i, sender) + } + signedTx, err := SignTx(test.tx, test.signer, key) + if err != test.wantSignErr { + t.Fatalf("test %d: wrong SignTx error %q", i, err) + } + if signedTx != nil { + if signedTx.Hash() != test.wantHash { + t.Errorf("test %d: wrong tx hash after signing: got %x, want %x", i, signedTx.Hash(), test.wantHash) + } + } + } +} + +func TestEIP2718TransactionEncode(t *testing.T) { + // RLP representation + { + have, err := rlp.EncodeToBytes(signedEip2718Tx) + if err != nil { + t.Fatalf("encode error: %v", err) + } + want := common.FromHex("b86601f8630103018261a894b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a825544c001a0c9519f4f2b30335884581971573fadf60c6204f59a911df35ee8a540456b2660a032f1e8e2c5dd761f9e4f88f41c8310aeaba26a8bfcdacfedfa12ec3862d37521") + if !bytes.Equal(have, want) { + t.Errorf("encoded RLP mismatch, got %x", have) + } + } + // Binary representation + { + have, err := signedEip2718Tx.MarshalBinary() + if err != nil { + t.Fatalf("encode error: %v", err) + } + want := common.FromHex("01f8630103018261a894b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a825544c001a0c9519f4f2b30335884581971573fadf60c6204f59a911df35ee8a540456b2660a032f1e8e2c5dd761f9e4f88f41c8310aeaba26a8bfcdacfedfa12ec3862d37521") + if !bytes.Equal(have, want) { + t.Errorf("encoded RLP mismatch, got %x", have) + } + } +} + func decodeTx(data []byte) (*Transaction, error) { var tx Transaction t, err := &tx, rlp.Decode(bytes.NewReader(data), &tx) - return t, err } @@ -219,50 +358,125 @@ func TestTransactionTimeSort(t *testing.T) { } } -// TestTransactionJSON tests serializing/de-serializing to/from JSON. -func TestTransactionJSON(t *testing.T) { +// TestTransactionCoding tests serializing/de-serializing to/from rlp and JSON. +func TestTransactionCoding(t *testing.T) { key, err := crypto.GenerateKey() if err != nil { t.Fatalf("could not generate key: %v", err) } - signer := NewEIP155Signer(common.Big1) - - transactions := make([]*Transaction, 0, 50) - for i := uint64(0); i < 25; i++ { - var tx *Transaction - switch i % 2 { + var ( + signer = NewEIP2930Signer(common.Big1) + addr = common.HexToAddress("0x0000000000000000000000000000000000000001") + recipient = common.HexToAddress("095e7baea6a6c7c4c2dfeb977efac326af552d87") + accesses = AccessList{{Address: addr, StorageKeys: []common.Hash{{0}}}} + ) + for i := uint64(0); i < 500; i++ { + var txdata TxData + switch i % 5 { case 0: - tx = NewTransaction(i, common.Address{1}, common.Big0, 1, common.Big2, []byte("abcdef")) + // Legacy tx. + txdata = &LegacyTx{ + Nonce: i, + To: &recipient, + Gas: 1, + GasPrice: big.NewInt(2), + Data: []byte("abcdef"), + } case 1: - tx = NewContractCreation(i, common.Big0, 1, common.Big2, []byte("abcdef")) + // Legacy tx contract creation. + txdata = &LegacyTx{ + Nonce: i, + Gas: 1, + GasPrice: big.NewInt(2), + Data: []byte("abcdef"), + } + case 2: + // Tx with non-zero access list. + txdata = &AccessListTx{ + ChainID: big.NewInt(1), + Nonce: i, + To: &recipient, + Gas: 123457, + GasPrice: big.NewInt(10), + AccessList: accesses, + Data: []byte("abcdef"), + } + case 3: + // Tx with empty access list. + txdata = &AccessListTx{ + ChainID: big.NewInt(1), + Nonce: i, + To: &recipient, + Gas: 123457, + GasPrice: big.NewInt(10), + Data: []byte("abcdef"), + } + case 4: + // Contract creation with access list. + txdata = &AccessListTx{ + ChainID: big.NewInt(1), + Nonce: i, + Gas: 123457, + GasPrice: big.NewInt(10), + AccessList: accesses, + } } - transactions = append(transactions, tx) - - signedTx, err := SignTx(tx, signer, key) + tx, err := SignNewTx(key, signer, txdata) if err != nil { t.Fatalf("could not sign transaction: %v", err) } - - transactions = append(transactions, signedTx) - } - - for _, tx := range transactions { - data, err := json.Marshal(tx) + // RLP + parsedTx, err := encodeDecodeBinary(tx) if err != nil { - t.Fatalf("json.Marshal failed: %v", err) + t.Fatal(err) } + assertEqual(parsedTx, tx) - var parsedTx *Transaction - if err := json.Unmarshal(data, &parsedTx); err != nil { - t.Fatalf("json.Unmarshal failed: %v", err) + // JSON + parsedTx, err = encodeDecodeJSON(tx) + if err != nil { + t.Fatal(err) } + assertEqual(parsedTx, tx) + } +} - // compare nonce, price, gaslimit, recipient, amount, payload, V, R, S - if tx.Hash() != parsedTx.Hash() { - t.Errorf("parsed tx differs from original tx, want %v, got %v", tx, parsedTx) - } - if tx.ChainId().Cmp(parsedTx.ChainId()) != 0 { - t.Errorf("invalid chain id, want %d, got %d", tx.ChainId(), parsedTx.ChainId()) +func encodeDecodeJSON(tx *Transaction) (*Transaction, error) { + data, err := json.Marshal(tx) + if err != nil { + return nil, fmt.Errorf("json encoding failed: %v", err) + } + var parsedTx = &Transaction{} + if err := json.Unmarshal(data, &parsedTx); err != nil { + return nil, fmt.Errorf("json decoding failed: %v", err) + } + return parsedTx, nil +} + +func encodeDecodeBinary(tx *Transaction) (*Transaction, error) { + data, err := tx.MarshalBinary() + if err != nil { + return nil, fmt.Errorf("rlp encoding failed: %v", err) + } + var parsedTx = &Transaction{} + if err := parsedTx.UnmarshalBinary(data); err != nil { + return nil, fmt.Errorf("rlp decoding failed: %v", err) + } + return parsedTx, nil +} + +func assertEqual(orig *Transaction, cpy *Transaction) error { + // compare nonce, price, gaslimit, recipient, amount, payload, V, R, S + if want, got := orig.Hash(), cpy.Hash(); want != got { + return fmt.Errorf("parsed tx differs from original tx, want %v, got %v", want, got) + } + if want, got := orig.ChainId(), cpy.ChainId(); want.Cmp(got) != 0 { + return fmt.Errorf("invalid chain id, want %d, got %d", want, got) + } + if orig.AccessList() != nil { + if !reflect.DeepEqual(orig.AccessList(), cpy.AccessList()) { + return fmt.Errorf("access list wrong!") } } + return nil } diff --git a/core/vm/contracts.go b/core/vm/contracts.go index 35faa7b83d..4e99a51618 100644 --- a/core/vm/contracts.go +++ b/core/vm/contracts.go @@ -58,7 +58,7 @@ var PrecompiledContractsByzantium = map[common.Address]PrecompiledContract{ common.BytesToAddress([]byte{2}): &sha256hash{}, common.BytesToAddress([]byte{3}): &ripemd160hash{}, common.BytesToAddress([]byte{4}): &dataCopy{}, - common.BytesToAddress([]byte{5}): &bigModExp{}, + common.BytesToAddress([]byte{5}): &bigModExp{eip2565: false}, common.BytesToAddress([]byte{6}): &bn256AddByzantium{}, common.BytesToAddress([]byte{7}): &bn256ScalarMulByzantium{}, common.BytesToAddress([]byte{8}): &bn256PairingByzantium{}, @@ -71,25 +71,30 @@ var PrecompiledContractsIstanbul = map[common.Address]PrecompiledContract{ common.BytesToAddress([]byte{2}): &sha256hash{}, common.BytesToAddress([]byte{3}): &ripemd160hash{}, common.BytesToAddress([]byte{4}): &dataCopy{}, - common.BytesToAddress([]byte{5}): &bigModExp{}, + common.BytesToAddress([]byte{5}): &bigModExp{eip2565: false}, common.BytesToAddress([]byte{6}): &bn256AddIstanbul{}, common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{}, common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{}, common.BytesToAddress([]byte{9}): &blake2F{}, } -// PrecompiledContractsYoloV2 contains the default set of pre-compiled Ethereum -// contracts used in the Yolo v2 test release. -var PrecompiledContractsYoloV2 = map[common.Address]PrecompiledContract{ - common.BytesToAddress([]byte{1}): &ecrecover{}, - common.BytesToAddress([]byte{2}): &sha256hash{}, - common.BytesToAddress([]byte{3}): &ripemd160hash{}, - common.BytesToAddress([]byte{4}): &dataCopy{}, - common.BytesToAddress([]byte{5}): &bigModExp{}, - common.BytesToAddress([]byte{6}): &bn256AddIstanbul{}, - common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{}, - common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{}, - common.BytesToAddress([]byte{9}): &blake2F{}, +// PrecompiledContractsBerlin contains the default set of pre-compiled Ethereum +// contracts used in the Berlin release. +var PrecompiledContractsBerlin = map[common.Address]PrecompiledContract{ + common.BytesToAddress([]byte{1}): &ecrecover{}, + common.BytesToAddress([]byte{2}): &sha256hash{}, + common.BytesToAddress([]byte{3}): &ripemd160hash{}, + common.BytesToAddress([]byte{4}): &dataCopy{}, + common.BytesToAddress([]byte{5}): &bigModExp{eip2565: true}, + common.BytesToAddress([]byte{6}): &bn256AddIstanbul{}, + common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{}, + common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{}, + common.BytesToAddress([]byte{9}): &blake2F{}, +} + +// PrecompiledContractsBLS contains the set of pre-compiled Ethereum +// contracts specified in EIP-2537. These are exported for testing purposes. +var PrecompiledContractsBLS = map[common.Address]PrecompiledContract{ common.BytesToAddress([]byte{10}): &bls12381G1Add{}, common.BytesToAddress([]byte{11}): &bls12381G1Mul{}, common.BytesToAddress([]byte{12}): &bls12381G1MultiExp{}, @@ -102,7 +107,7 @@ var PrecompiledContractsYoloV2 = map[common.Address]PrecompiledContract{ } var ( - PrecompiledAddressesYoloV2 []common.Address + PrecompiledAddressesBerlin []common.Address PrecompiledAddressesIstanbul []common.Address PrecompiledAddressesByzantium []common.Address PrecompiledAddressesHomestead []common.Address @@ -118,8 +123,8 @@ func init() { for k := range PrecompiledContractsIstanbul { PrecompiledAddressesIstanbul = append(PrecompiledAddressesIstanbul, k) } - for k := range PrecompiledContractsYoloV2 { - PrecompiledAddressesYoloV2 = append(PrecompiledAddressesYoloV2, k) + for k := range PrecompiledContractsBerlin { + PrecompiledAddressesBerlin = append(PrecompiledAddressesBerlin, k) } } @@ -222,14 +227,19 @@ func (c *dataCopy) Run(in []byte) ([]byte, error) { } // bigModExp implements a native big integer exponential modular operation. -type bigModExp struct{} +type bigModExp struct { + eip2565 bool +} var ( big0 = big.NewInt(0) big1 = big.NewInt(1) + big3 = big.NewInt(3) big4 = big.NewInt(4) + big7 = big.NewInt(7) big8 = big.NewInt(8) big16 = big.NewInt(16) + big20 = big.NewInt(20) big32 = big.NewInt(32) big64 = big.NewInt(64) big96 = big.NewInt(96) @@ -239,6 +249,34 @@ var ( big199680 = big.NewInt(199680) ) +// modexpMultComplexity implements bigModexp multComplexity formula, as defined in EIP-198 +// +// def mult_complexity(x): +// if x <= 64: return x ** 2 +// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072 +// else: return x ** 2 // 16 + 480 * x - 199680 +// +// where is x is max(length_of_MODULUS, length_of_BASE) +func modexpMultComplexity(x *big.Int) *big.Int { + switch { + case x.Cmp(big64) <= 0: + x.Mul(x, x) // x ** 2 + case x.Cmp(big1024) <= 0: + // (x ** 2 // 4 ) + ( 96 * x - 3072) + x = new(big.Int).Add( + new(big.Int).Div(new(big.Int).Mul(x, x), big4), + new(big.Int).Sub(new(big.Int).Mul(big96, x), big3072), + ) + default: + // (x ** 2 // 16) + (480 * x - 199680) + x = new(big.Int).Add( + new(big.Int).Div(new(big.Int).Mul(x, x), big16), + new(big.Int).Sub(new(big.Int).Mul(big480, x), big199680), + ) + } + return x +} + // RequiredGas returns the gas required to execute the pre-compiled contract. func (c *bigModExp) RequiredGas(input []byte) uint64 { var ( @@ -273,25 +311,36 @@ func (c *bigModExp) RequiredGas(input []byte) uint64 { adjExpLen.Mul(big8, adjExpLen) } adjExpLen.Add(adjExpLen, big.NewInt(int64(msb))) - // Calculate the gas cost of the operation gas := new(big.Int).Set(math.BigMax(modLen, baseLen)) - switch { - case gas.Cmp(big64) <= 0: + if c.eip2565 { + // EIP-2565 has three changes + // 1. Different multComplexity (inlined here) + // in EIP-2565 (https://eips.ethereum.org/EIPS/eip-2565): + // + // def mult_complexity(x): + // ceiling(x/8)^2 + // + //where is x is max(length_of_MODULUS, length_of_BASE) + gas = gas.Add(gas, big7) + gas = gas.Div(gas, big8) gas.Mul(gas, gas) - case gas.Cmp(big1024) <= 0: - gas = new(big.Int).Add( - new(big.Int).Div(new(big.Int).Mul(gas, gas), big4), - new(big.Int).Sub(new(big.Int).Mul(big96, gas), big3072), - ) - default: - gas = new(big.Int).Add( - new(big.Int).Div(new(big.Int).Mul(gas, gas), big16), - new(big.Int).Sub(new(big.Int).Mul(big480, gas), big199680), - ) + + gas.Mul(gas, math.BigMax(adjExpLen, big1)) + // 2. Different divisor (`GQUADDIVISOR`) (3) + gas.Div(gas, big3) + if gas.BitLen() > 64 { + return math.MaxUint64 + } + // 3. Minimum price of 200 gas + if gas.Uint64() < 200 { + return 200 + } + return gas.Uint64() } + gas = modexpMultComplexity(gas) gas.Mul(gas, math.BigMax(adjExpLen, big1)) - gas.Div(gas, new(big.Int).SetUint64(params.ModExpQuadCoeffDiv)) + gas.Div(gas, big20) if gas.BitLen() > 64 { return math.MaxUint64 diff --git a/core/vm/contracts_test.go b/core/vm/contracts_test.go index ed0d675a69..30d9b49f71 100644 --- a/core/vm/contracts_test.go +++ b/core/vm/contracts_test.go @@ -43,7 +43,29 @@ type precompiledFailureTest struct { Name string } -var allPrecompiles = PrecompiledContractsYoloV2 +// allPrecompiles does not map to the actual set of precompiles, as it also contains +// repriced versions of precompiles at certain slots +var allPrecompiles = map[common.Address]PrecompiledContract{ + common.BytesToAddress([]byte{1}): &ecrecover{}, + common.BytesToAddress([]byte{2}): &sha256hash{}, + common.BytesToAddress([]byte{3}): &ripemd160hash{}, + common.BytesToAddress([]byte{4}): &dataCopy{}, + common.BytesToAddress([]byte{5}): &bigModExp{eip2565: false}, + common.BytesToAddress([]byte{0xf5}): &bigModExp{eip2565: true}, + common.BytesToAddress([]byte{6}): &bn256AddIstanbul{}, + common.BytesToAddress([]byte{7}): &bn256ScalarMulIstanbul{}, + common.BytesToAddress([]byte{8}): &bn256PairingIstanbul{}, + common.BytesToAddress([]byte{9}): &blake2F{}, + common.BytesToAddress([]byte{10}): &bls12381G1Add{}, + common.BytesToAddress([]byte{11}): &bls12381G1Mul{}, + common.BytesToAddress([]byte{12}): &bls12381G1MultiExp{}, + common.BytesToAddress([]byte{13}): &bls12381G2Add{}, + common.BytesToAddress([]byte{14}): &bls12381G2Mul{}, + common.BytesToAddress([]byte{15}): &bls12381G2MultiExp{}, + common.BytesToAddress([]byte{16}): &bls12381Pairing{}, + common.BytesToAddress([]byte{17}): &bls12381MapG1{}, + common.BytesToAddress([]byte{18}): &bls12381MapG2{}, +} // EIP-152 test vectors var blake2FMalformedInputTests = []precompiledFailureTest{ @@ -213,6 +235,9 @@ func BenchmarkPrecompiledIdentity(bench *testing.B) { func TestPrecompiledModExp(t *testing.T) { testJson("modexp", "05", t) } func BenchmarkPrecompiledModExp(b *testing.B) { benchJson("modexp", "05", b) } +func TestPrecompiledModExpEip2565(t *testing.T) { testJson("modexp_eip2565", "f5", t) } +func BenchmarkPrecompiledModExpEip2565(b *testing.B) { benchJson("modexp_eip2565", "f5", b) } + // Tests the sample inputs from the elliptic curve addition EIP 213. func TestPrecompiledBn256Add(t *testing.T) { testJson("bn256Add", "06", t) } func BenchmarkPrecompiledBn256Add(b *testing.B) { benchJson("bn256Add", "06", b) } diff --git a/core/vm/evm.go b/core/vm/evm.go index 8f6e603aee..7346d76e5b 100644 --- a/core/vm/evm.go +++ b/core/vm/evm.go @@ -46,8 +46,8 @@ type ( // configuration func (evm *EVM) ActivePrecompiles() []common.Address { switch { - case evm.chainRules.IsYoloV2: - return PrecompiledAddressesYoloV2 + case evm.chainRules.IsBerlin: + return PrecompiledAddressesBerlin case evm.chainRules.IsIstanbul: return PrecompiledAddressesIstanbul case evm.chainRules.IsByzantium: @@ -60,8 +60,8 @@ func (evm *EVM) ActivePrecompiles() []common.Address { func (evm *EVM) precompile(addr common.Address) (PrecompiledContract, bool) { var precompiles map[common.Address]PrecompiledContract switch { - case evm.chainRules.IsYoloV2: - precompiles = PrecompiledContractsYoloV2 + case evm.chainRules.IsBerlin: + precompiles = PrecompiledContractsBerlin case evm.chainRules.IsIstanbul: precompiles = PrecompiledContractsIstanbul case evm.chainRules.IsByzantium: @@ -91,9 +91,9 @@ func run(evm *EVM, contract *Contract, input []byte, readOnly bool) ([]byte, err return nil, errors.New("no compatible interpreter") } -// Context provides the EVM with auxiliary information. Once provided +// BlockContext provides the EVM with auxiliary information. Once provided // it shouldn't be modified. -type Context struct { +type BlockContext struct { // CanTransfer returns whether the account contains // sufficient ether to transfer the value CanTransfer CanTransferFunc @@ -102,10 +102,6 @@ type Context struct { // GetHash returns the hash corresponding to n GetHash GetHashFunc - // Message information - Origin common.Address // Provides information for ORIGIN - GasPrice *big.Int // Provides information for GASPRICE - // Block information Coinbase common.Address // Provides information for COINBASE GasLimit uint64 // Provides information for GASLIMIT @@ -114,6 +110,14 @@ type Context struct { Difficulty *big.Int // Provides information for DIFFICULTY } +// TxContext provides the EVM with information about a transaction. +// All fields can change between transactions. +type TxContext struct { + // Message information + Origin common.Address // Provides information for ORIGIN + GasPrice *big.Int // Provides information for GASPRICE +} + // EVM is the Ethereum Virtual Machine base object and provides // the necessary tools to run a contract on the given state with // the provided context. It should be noted that any error @@ -125,7 +129,8 @@ type Context struct { // The EVM should never be reused and is not thread safe. type EVM struct { // Context provides auxiliary blockchain related information - Context + Context BlockContext + TxContext // StateDB gives access to the underlying state StateDB StateDB // Depth is the current call stack @@ -153,17 +158,18 @@ type EVM struct { // NewEVM returns a new EVM. The returned EVM is not thread safe and should // only ever be used *once*. -func NewEVM(ctx Context, statedb StateDB, chainConfig *params.ChainConfig, vmConfig Config) *EVM { +func NewEVM(blockCtx BlockContext, txCtx TxContext, statedb StateDB, chainConfig *params.ChainConfig, vmConfig Config) *EVM { evm := &EVM{ - Context: ctx, + Context: blockCtx, + TxContext: txCtx, StateDB: statedb, vmConfig: vmConfig, chainConfig: chainConfig, - chainRules: chainConfig.Rules(ctx.BlockNumber), + chainRules: chainConfig.Rules(blockCtx.BlockNumber), interpreters: make([]Interpreter, 0, 1), } - if chainConfig.IsEWASM(ctx.BlockNumber) { + if chainConfig.IsEWASM(blockCtx.BlockNumber) { // to be implemented by EVM-C and Wagon PRs. // if vmConfig.EWASMInterpreter != "" { // extIntOpts := strings.Split(vmConfig.EWASMInterpreter, ":") @@ -187,6 +193,13 @@ func NewEVM(ctx Context, statedb StateDB, chainConfig *params.ChainConfig, vmCon return evm } +// Reset resets the EVM with a new transaction context.Reset +// This is not threadsafe and should only be done very cautiously. +func (evm *EVM) Reset(txCtx TxContext, statedb StateDB) { + evm.TxContext = txCtx + evm.StateDB = statedb +} + // Cancel cancels any running EVM operation. This may be called concurrently and // it's safe to be called multiple times. func (evm *EVM) Cancel() { @@ -233,7 +246,7 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas } evm.StateDB.CreateAccount(addr) } - evm.Transfer(evm.StateDB, caller.Address(), addr, value) + evm.Context.Transfer(evm.StateDB, caller.Address(), addr, value) // Capture the tracer start/end events in debug mode if evm.vmConfig.Debug && evm.depth == 0 { @@ -426,14 +439,14 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, if evm.depth > int(params.CallCreateDepth) { return nil, common.Address{}, gas, ErrDepth } - if !evm.CanTransfer(evm.StateDB, caller.Address(), value) { + if !evm.Context.CanTransfer(evm.StateDB, caller.Address(), value) { return nil, common.Address{}, gas, ErrInsufficientBalance } nonce := evm.StateDB.GetNonce(caller.Address()) evm.StateDB.SetNonce(caller.Address(), nonce+1) // We add this to the access list _before_ taking a snapshot. Even if the creation fails, // the access-list change should not be rolled back - if evm.chainRules.IsYoloV2 { + if evm.chainRules.IsBerlin { evm.StateDB.AddAddressToAccessList(address) } // Ensure there's no existing contract already at the designated address @@ -447,7 +460,7 @@ func (evm *EVM) create(caller ContractRef, codeAndHash *codeAndHash, gas uint64, if evm.chainRules.IsEIP158 { evm.StateDB.SetNonce(address, 1) } - evm.Transfer(evm.StateDB, caller.Address(), address, value) + evm.Context.Transfer(evm.StateDB, caller.Address(), address, value) // Initialise a new contract and set the code that is to be used by the EVM. // The contract is a scoped environment for this execution context only. @@ -512,7 +525,7 @@ func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.I // instead of the usual sender-and-nonce-hash as the address where the contract is initialized at. func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) { codeAndHash := &codeAndHash{code: code} - contractAddr = crypto.CreateAddress2(caller.Address(), common.Hash(salt.Bytes32()), codeAndHash.Hash().Bytes()) + contractAddr = crypto.CreateAddress2(caller.Address(), salt.Bytes32(), codeAndHash.Hash().Bytes()) return evm.create(caller, codeAndHash, gas, endowment, contractAddr) } diff --git a/core/vm/gas_table.go b/core/vm/gas_table.go index 01249a5388..944b6cf0a5 100644 --- a/core/vm/gas_table.go +++ b/core/vm/gas_table.go @@ -96,7 +96,7 @@ var ( func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { var ( y, x = stack.Back(1), stack.Back(0) - current = evm.StateDB.GetState(contract.Address(), common.Hash(x.Bytes32())) + current = evm.StateDB.GetState(contract.Address(), x.Bytes32()) ) // The legacy gas metering only takes into consideration the current state // Legacy rules should be applied if we are in Petersburg (removal of EIP-1283) @@ -135,7 +135,7 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi if current == value { // noop (1) return params.NetSstoreNoopGas, nil } - original := evm.StateDB.GetCommittedState(contract.Address(), common.Hash(x.Bytes32())) + original := evm.StateDB.GetCommittedState(contract.Address(), x.Bytes32()) if original == current { if original == (common.Hash{}) { // create slot (2.1.1) return params.NetSstoreInitGas, nil @@ -183,14 +183,14 @@ func gasSStoreEIP2200(evm *EVM, contract *Contract, stack *Stack, mem *Memory, m // Gas sentry honoured, do the actual gas calculation based on the stored value var ( y, x = stack.Back(1), stack.Back(0) - current = evm.StateDB.GetState(contract.Address(), common.Hash(x.Bytes32())) + current = evm.StateDB.GetState(contract.Address(), x.Bytes32()) ) value := common.Hash(y.Bytes32()) if current == value { // noop (1) return params.SloadGasEIP2200, nil } - original := evm.StateDB.GetCommittedState(contract.Address(), common.Hash(x.Bytes32())) + original := evm.StateDB.GetCommittedState(contract.Address(), x.Bytes32()) if original == current { if original == (common.Hash{}) { // create slot (2.1.1) return params.SstoreSetGasEIP2200, nil diff --git a/core/vm/gas_table_test.go b/core/vm/gas_table_test.go index 419c903062..6cd126c9b4 100644 --- a/core/vm/gas_table_test.go +++ b/core/vm/gas_table_test.go @@ -87,11 +87,11 @@ func TestEIP2200(t *testing.T) { statedb.SetState(address, common.Hash{}, common.BytesToHash([]byte{tt.original})) statedb.Finalise(true) // Push the state into the "original" slot - vmctx := Context{ + vmctx := BlockContext{ CanTransfer: func(StateDB, common.Address, *big.Int) bool { return true }, Transfer: func(StateDB, common.Address, common.Address, *big.Int) {}, } - vmenv := NewEVM(vmctx, statedb, params.AllEthashProtocolChanges, Config{ExtraEips: []int{2200}}) + vmenv := NewEVM(vmctx, TxContext{}, statedb, params.AllEthashProtocolChanges, Config{ExtraEips: []int{2200}}) _, gas, err := vmenv.Call(AccountRef(common.Address{}), address, nil, tt.gaspool, new(big.Int)) if err != tt.failure { diff --git a/core/vm/instructions.go b/core/vm/instructions.go index adf44b7f48..1137505292 100644 --- a/core/vm/instructions.go +++ b/core/vm/instructions.go @@ -341,7 +341,7 @@ func opReturnDataCopy(pc *uint64, interpreter *EVMInterpreter, callContext *call func opExtCodeSize(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { slot := callContext.stack.peek() - slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(common.Address(slot.Bytes20())))) + slot.SetUint64(uint64(interpreter.evm.StateDB.GetCodeSize(slot.Bytes20()))) return nil, nil } @@ -438,14 +438,14 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) return nil, nil } var upper, lower uint64 - upper = interpreter.evm.BlockNumber.Uint64() + upper = interpreter.evm.Context.BlockNumber.Uint64() if upper < 257 { lower = 0 } else { lower = upper - 256 } if num64 >= lower && num64 < upper { - num.SetBytes(interpreter.evm.GetHash(num64).Bytes()) + num.SetBytes(interpreter.evm.Context.GetHash(num64).Bytes()) } else { num.Clear() } @@ -453,30 +453,30 @@ func opBlockhash(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) } func opCoinbase(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetBytes(interpreter.evm.Coinbase.Bytes())) + callContext.stack.push(new(uint256.Int).SetBytes(interpreter.evm.Context.Coinbase.Bytes())) return nil, nil } func opTimestamp(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - v, _ := uint256.FromBig(interpreter.evm.Time) + v, _ := uint256.FromBig(interpreter.evm.Context.Time) callContext.stack.push(v) return nil, nil } func opNumber(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - v, _ := uint256.FromBig(interpreter.evm.BlockNumber) + v, _ := uint256.FromBig(interpreter.evm.Context.BlockNumber) callContext.stack.push(v) return nil, nil } func opDifficulty(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - v, _ := uint256.FromBig(interpreter.evm.Difficulty) + v, _ := uint256.FromBig(interpreter.evm.Context.Difficulty) callContext.stack.push(v) return nil, nil } func opGasLimit(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { - callContext.stack.push(new(uint256.Int).SetUint64(interpreter.evm.GasLimit)) + callContext.stack.push(new(uint256.Int).SetUint64(interpreter.evm.Context.GasLimit)) return nil, nil } @@ -517,7 +517,7 @@ func opSstore(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([] loc := callContext.stack.pop() val := callContext.stack.pop() interpreter.evm.StateDB.SetState(callContext.contract.Address(), - common.Hash(loc.Bytes32()), common.Hash(val.Bytes32())) + loc.Bytes32(), val.Bytes32()) return nil, nil } @@ -817,7 +817,7 @@ func opStop(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]by func opSuicide(pc *uint64, interpreter *EVMInterpreter, callContext *callCtx) ([]byte, error) { beneficiary := callContext.stack.pop() balance := interpreter.evm.StateDB.GetBalance(callContext.contract.Address()) - interpreter.evm.StateDB.AddBalance(common.Address(beneficiary.Bytes20()), balance) + interpreter.evm.StateDB.AddBalance(beneficiary.Bytes20(), balance) interpreter.evm.StateDB.Suicide(callContext.contract.Address()) return nil, nil } @@ -832,7 +832,7 @@ func makeLog(size int) executionFunc { mStart, mSize := stack.pop(), stack.pop() for i := 0; i < size; i++ { addr := stack.pop() - topics[i] = common.Hash(addr.Bytes32()) + topics[i] = addr.Bytes32() } d := callContext.memory.GetCopy(int64(mStart.Uint64()), int64(mSize.Uint64())) @@ -842,7 +842,7 @@ func makeLog(size int) executionFunc { Data: d, // This is a non-consensus field, but assigned here because // core/state doesn't know the current block number. - BlockNumber: interpreter.evm.BlockNumber.Uint64(), + BlockNumber: interpreter.evm.Context.BlockNumber.Uint64(), }) return nil, nil diff --git a/core/vm/instructions_test.go b/core/vm/instructions_test.go index 0b6fb1f486..985d5a5156 100644 --- a/core/vm/instructions_test.go +++ b/core/vm/instructions_test.go @@ -92,7 +92,7 @@ func init() { func testTwoOperandOp(t *testing.T, tests []TwoOperandTestcase, opFn executionFunc, name string) { var ( - env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = newstack() rstack = newReturnStack() pc = uint64(0) @@ -192,7 +192,7 @@ func TestSAR(t *testing.T) { func TestAddMod(t *testing.T) { var ( - env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack = newstack() evmInterpreter = NewEVMInterpreter(env, env.vmConfig) pc = uint64(0) @@ -231,7 +231,7 @@ func TestAddMod(t *testing.T) { // getResult is a convenience function to generate the expected values func getResult(args []*twoOperandParams, opFn executionFunc) []TwoOperandTestcase { var ( - env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack, rstack = newstack(), newReturnStack() pc = uint64(0) interpreter = env.interpreter.(*EVMInterpreter) @@ -281,7 +281,7 @@ func TestJsonTestcases(t *testing.T) { func opBenchmark(bench *testing.B, op executionFunc, args ...string) { var ( - env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack, rstack = newstack(), newReturnStack() evmInterpreter = NewEVMInterpreter(env, env.vmConfig) ) @@ -515,7 +515,7 @@ func BenchmarkOpIsZero(b *testing.B) { func TestOpMstore(t *testing.T) { var ( - env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack, rstack = newstack(), newReturnStack() mem = NewMemory() evmInterpreter = NewEVMInterpreter(env, env.vmConfig) @@ -539,7 +539,7 @@ func TestOpMstore(t *testing.T) { func BenchmarkOpMstore(bench *testing.B) { var ( - env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack, rstack = newstack(), newReturnStack() mem = NewMemory() evmInterpreter = NewEVMInterpreter(env, env.vmConfig) @@ -560,7 +560,7 @@ func BenchmarkOpMstore(bench *testing.B) { func BenchmarkOpSHA3(bench *testing.B) { var ( - env = NewEVM(Context{}, nil, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, nil, params.TestChainConfig, Config{}) stack, rstack = newstack(), newReturnStack() mem = NewMemory() evmInterpreter = NewEVMInterpreter(env, env.vmConfig) diff --git a/core/vm/interface.go b/core/vm/interface.go index fb5bbca48f..ad9b05d666 100644 --- a/core/vm/interface.go +++ b/core/vm/interface.go @@ -57,6 +57,7 @@ type StateDB interface { // is defined according to EIP161 (balance = nonce = code = 0). Empty(common.Address) bool + PrepareAccessList(sender common.Address, dest *common.Address, precompiles []common.Address, txAccesses types.AccessList) AddressInAccessList(addr common.Address) bool SlotInAccessList(addr common.Address, slot common.Hash) (addressOk bool, slotOk bool) // AddAddressToAccessList adds the given address to the access list. This operation is safe to perform diff --git a/core/vm/interpreter.go b/core/vm/interpreter.go index bffc5013a6..0084b7d071 100644 --- a/core/vm/interpreter.go +++ b/core/vm/interpreter.go @@ -99,8 +99,8 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { if cfg.JumpTable[STOP] == nil { var jt JumpTable switch { - case evm.chainRules.IsYoloV2: - jt = yoloV2InstructionSet + case evm.chainRules.IsBerlin: + jt = berlinInstructionSet case evm.chainRules.IsIstanbul: jt = istanbulInstructionSet case evm.chainRules.IsConstantinople: diff --git a/core/vm/jump_table.go b/core/vm/jump_table.go index 83fb2c1ed6..d831f9300f 100644 --- a/core/vm/jump_table.go +++ b/core/vm/jump_table.go @@ -56,24 +56,23 @@ var ( byzantiumInstructionSet = newByzantiumInstructionSet() constantinopleInstructionSet = newConstantinopleInstructionSet() istanbulInstructionSet = newIstanbulInstructionSet() - yoloV2InstructionSet = newYoloV2InstructionSet() + berlinInstructionSet = newBerlinInstructionSet() ) // JumpTable contains the EVM opcodes supported at a given fork. type JumpTable [256]*operation -// newYoloV2InstructionSet creates an instructionset containing -// - "EIP-2315: Simple Subroutines" -// - "EIP-2929: Gas cost increases for state access opcodes" -func newYoloV2InstructionSet() JumpTable { +// newBerlinInstructionSet returns the frontier, homestead, byzantium, +// contantinople, istanbul, petersburg and berlin instructions. +func newBerlinInstructionSet() JumpTable { instructionSet := newIstanbulInstructionSet() enable2315(&instructionSet) // Subroutines - https://eips.ethereum.org/EIPS/eip-2315 enable2929(&instructionSet) // Access lists for trie accesses https://eips.ethereum.org/EIPS/eip-2929 return instructionSet } -// newIstanbulInstructionSet returns the frontier, homestead -// byzantium, contantinople and petersburg instructions. +// newIstanbulInstructionSet returns the frontier, homestead, byzantium, +// contantinople, istanbul and petersburg instructions. func newIstanbulInstructionSet() JumpTable { instructionSet := newConstantinopleInstructionSet() @@ -84,7 +83,7 @@ func newIstanbulInstructionSet() JumpTable { return instructionSet } -// newConstantinopleInstructionSet returns the frontier, homestead +// newConstantinopleInstructionSet returns the frontier, homestead, // byzantium and contantinople instructions. func newConstantinopleInstructionSet() JumpTable { instructionSet := newByzantiumInstructionSet() diff --git a/core/vm/logger_test.go b/core/vm/logger_test.go index e287f0c7aa..bf7d5358f8 100644 --- a/core/vm/logger_test.go +++ b/core/vm/logger_test.go @@ -51,7 +51,7 @@ func (*dummyStatedb) GetRefund() uint64 { return 1337 } func TestStoreCapture(t *testing.T) { var ( - env = NewEVM(Context{}, &dummyStatedb{}, params.TestChainConfig, Config{}) + env = NewEVM(BlockContext{}, TxContext{}, &dummyStatedb{}, params.TestChainConfig, Config{}) logger = NewStructLogger(nil) mem = NewMemory() stack = newstack() diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 41b0549c51..191953ce5e 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -73,7 +73,7 @@ func gasSStoreEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, m // return params.SloadGasEIP2200, nil return cost + WarmStorageReadCostEIP2929, nil // SLOAD_GAS } - original := evm.StateDB.GetCommittedState(contract.Address(), common.Hash(x.Bytes32())) + original := evm.StateDB.GetCommittedState(contract.Address(), x.Bytes32()) if original == current { if original == (common.Hash{}) { // create slot (2.1.1) return cost + params.SstoreSetGasEIP2200, nil diff --git a/core/vm/runtime/env.go b/core/vm/runtime/env.go index 38ee448904..6c4c72eeac 100644 --- a/core/vm/runtime/env.go +++ b/core/vm/runtime/env.go @@ -22,18 +22,20 @@ import ( ) func NewEnv(cfg *Config) *vm.EVM { - context := vm.Context{ + txContext := vm.TxContext{ + Origin: cfg.Origin, + GasPrice: cfg.GasPrice, + } + blockContext := vm.BlockContext{ CanTransfer: core.CanTransfer, Transfer: core.Transfer, GetHash: cfg.GetHashFn, - Origin: cfg.Origin, Coinbase: cfg.Coinbase, BlockNumber: cfg.BlockNumber, Time: cfg.Time, Difficulty: cfg.Difficulty, GasLimit: cfg.GasLimit, - GasPrice: cfg.GasPrice, } - return vm.NewEVM(context, cfg.State, cfg.ChainConfig, cfg.EVMConfig) + return vm.NewEVM(blockContext, txContext, cfg.State, cfg.ChainConfig, cfg.EVMConfig) } diff --git a/core/vm/runtime/runtime.go b/core/vm/runtime/runtime.go index d99e8f3b2b..9cb69e1c76 100644 --- a/core/vm/runtime/runtime.go +++ b/core/vm/runtime/runtime.go @@ -65,7 +65,8 @@ func setDefaults(cfg *Config) { PetersburgBlock: new(big.Int), IstanbulBlock: new(big.Int), MuirGlacierBlock: new(big.Int), - YoloV2Block: nil, + BerlinBlock: new(big.Int), + YoloV3Block: nil, } } @@ -113,13 +114,8 @@ func Execute(code, input []byte, cfg *Config) ([]byte, *state.StateDB, error) { vmenv = NewEnv(cfg) sender = vm.AccountRef(cfg.Origin) ) - if cfg.ChainConfig.IsYoloV2(vmenv.BlockNumber) { - cfg.State.AddAddressToAccessList(cfg.Origin) - cfg.State.AddAddressToAccessList(address) - for _, addr := range vmenv.ActivePrecompiles() { - cfg.State.AddAddressToAccessList(addr) - cfg.State.AddAddressToAccessList(addr) - } + if cfg.ChainConfig.IsBerlin(vmenv.Context.BlockNumber) { + cfg.State.PrepareAccessList(cfg.Origin, &address, vmenv.ActivePrecompiles(), nil) } cfg.State.CreateAccount(address) // set the receiver's (the executing contract) code for execution. @@ -150,11 +146,8 @@ func Create(input []byte, cfg *Config) ([]byte, common.Address, uint64, error) { vmenv = NewEnv(cfg) sender = vm.AccountRef(cfg.Origin) ) - if cfg.ChainConfig.IsYoloV2(vmenv.BlockNumber) { - cfg.State.AddAddressToAccessList(cfg.Origin) - for _, addr := range vmenv.ActivePrecompiles() { - cfg.State.AddAddressToAccessList(addr) - } + if cfg.ChainConfig.IsBerlin(vmenv.Context.BlockNumber) { + cfg.State.PrepareAccessList(cfg.Origin, nil, vmenv.ActivePrecompiles(), nil) } // Call the code with the given configuration. @@ -178,12 +171,9 @@ func Call(address common.Address, input []byte, cfg *Config) ([]byte, uint64, er vmenv := NewEnv(cfg) sender := cfg.State.GetOrNewStateObject(cfg.Origin) - if cfg.ChainConfig.IsYoloV2(vmenv.BlockNumber) { - cfg.State.AddAddressToAccessList(cfg.Origin) - cfg.State.AddAddressToAccessList(address) - for _, addr := range vmenv.ActivePrecompiles() { - cfg.State.AddAddressToAccessList(addr) - } + statedb := cfg.State + if cfg.ChainConfig.IsBerlin(vmenv.Context.BlockNumber) { + statedb.PrepareAccessList(cfg.Origin, &address, vmenv.ActivePrecompiles(), nil) } // Call the code with the given configuration. diff --git a/core/vm/runtime/runtime_test.go b/core/vm/runtime/runtime_test.go index b185258dad..af69e3333f 100644 --- a/core/vm/runtime/runtime_test.go +++ b/core/vm/runtime/runtime_test.go @@ -500,7 +500,7 @@ func DisabledTestEipExampleCases(t *testing.T) { { code := []byte{ - byte(vm.PUSH9), 0x00, 0x00, 0x00, 0x00, 0x0, 0x00, 0x00, 0x00, (4 + 8), + byte(vm.PUSH9), 0x00, 0x00, 0x00, 0x00, 0x0, 0x00, 0x00, 0x00, 4 + 8, byte(vm.JUMPSUB), byte(vm.STOP), byte(vm.BEGINSUB), @@ -516,7 +516,7 @@ func DisabledTestEipExampleCases(t *testing.T) { // out the trace. { code := []byte{ - byte(vm.PUSH9), 0x01, 0x00, 0x00, 0x00, 0x0, 0x00, 0x00, 0x00, (4 + 8), + byte(vm.PUSH9), 0x01, 0x00, 0x00, 0x00, 0x0, 0x00, 0x00, 0x00, 4 + 8, byte(vm.JUMPSUB), byte(vm.STOP), byte(vm.BEGINSUB), diff --git a/core/vm/testdata/precompiles/modexp_eip2565.json b/core/vm/testdata/precompiles/modexp_eip2565.json new file mode 100644 index 0000000000..c55441439e --- /dev/null +++ b/core/vm/testdata/precompiles/modexp_eip2565.json @@ -0,0 +1,121 @@ +[ + { + "Input": "00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002003fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2efffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "Expected": "0000000000000000000000000000000000000000000000000000000000000001", + "Name": "eip_example1", + "Gas": 1360, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000020fffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2efffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f", + "Expected": "0000000000000000000000000000000000000000000000000000000000000000", + "Name": "eip_example2", + "Gas": 1360, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb502fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b", + "Expected": "60008f1614cc01dcfb6bfb09c625cf90b47d4468db81b5f8b7a39d42f332eab9b2da8f2d95311648a8f243f4bb13cfb3d8f7f2a3c014122ebb3ed41b02783adc", + "Name": "nagydani-1-square", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb503fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b", + "Expected": "4834a46ba565db27903b1c720c9d593e84e4cbd6ad2e64b31885d944f68cd801f92225a8961c952ddf2797fa4701b330c85c4b363798100b921a1a22a46a7fec", + "Name": "nagydani-1-qube", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000040e09ad9675465c53a109fac66a445c91b292d2bb2c5268addb30cd82f80fcb0033ff97c80a5fc6f39193ae969c6ede6710a6b7ac27078a06d90ef1c72e5c85fb5010001fc9e1f6beb81516545975218075ec2af118cd8798df6e08a147c60fd6095ac2bb02c2908cf4dd7c81f11c289e4bce98f3553768f392a80ce22bf5c4f4a248c6b", + "Expected": "c36d804180c35d4426b57b50c5bfcca5c01856d104564cd513b461d3c8b8409128a5573e416d0ebe38f5f736766d9dc27143e4da981dfa4d67f7dc474cbee6d2", + "Name": "nagydani-1-pow0x10001", + "Gas": 341, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf5102e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087", + "Expected": "981dd99c3b113fae3e3eaa9435c0dc96779a23c12a53d1084b4f67b0b053a27560f627b873e3f16ad78f28c94f14b6392def26e4d8896c5e3c984e50fa0b3aa44f1da78b913187c6128baa9340b1e9c9a0fd02cb78885e72576da4a8f7e5a113e173a7a2889fde9d407bd9f06eb05bc8fc7b4229377a32941a02bf4edcc06d70", + "Name": "nagydani-2-square", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf5103e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087", + "Expected": "d89ceb68c32da4f6364978d62aaa40d7b09b59ec61eb3c0159c87ec3a91037f7dc6967594e530a69d049b64adfa39c8fa208ea970cfe4b7bcd359d345744405afe1cbf761647e32b3184c7fbe87cee8c6c7ff3b378faba6c68b83b6889cb40f1603ee68c56b4c03d48c595c826c041112dc941878f8c5be828154afd4a16311f", + "Name": "nagydani-2-qube", + "Gas": 200, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000080cad7d991a00047dd54d3399b6b0b937c718abddef7917c75b6681f40cc15e2be0003657d8d4c34167b2f0bbbca0ccaa407c2a6a07d50f1517a8f22979ce12a81dcaf707cc0cebfc0ce2ee84ee7f77c38b9281b9822a8d3de62784c089c9b18dcb9a2a5eecbede90ea788a862a9ddd9d609c2c52972d63e289e28f6a590ffbf51010001e6d893b80aeed5e6e9ce9afa8a5d5675c93a32ac05554cb20e9951b2c140e3ef4e433068cf0fb73bc9f33af1853f64aa27a0028cbf570d7ac9048eae5dc7b28c87c31e5810f1e7fa2cda6adf9f1076dbc1ec1238560071e7efc4e9565c49be9e7656951985860a558a754594115830bcdb421f741408346dd5997bb01c287087", + "Expected": "ad85e8ef13fd1dd46eae44af8b91ad1ccae5b7a1c92944f92a19f21b0b658139e0cabe9c1f679507c2de354bf2c91ebd965d1e633978a830d517d2f6f8dd5fd58065d58559de7e2334a878f8ec6992d9b9e77430d4764e863d77c0f87beede8f2f7f2ab2e7222f85cc9d98b8467f4bb72e87ef2882423ebdb6daf02dddac6db2", + "Name": "nagydani-2-pow0x10001", + "Gas": 1365, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb02d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d", + "Expected": "affc7507ea6d84751ec6b3f0d7b99dbcc263f33330e450d1b3ff0bc3d0874320bf4edd57debd587306988157958cb3cfd369cc0c9c198706f635c9e0f15d047df5cb44d03e2727f26b083c4ad8485080e1293f171c1ed52aef5993a5815c35108e848c951cf1e334490b4a539a139e57b68f44fee583306f5b85ffa57206b3ee5660458858534e5386b9584af3c7f67806e84c189d695e5eb96e1272d06ec2df5dc5fabc6e94b793718c60c36be0a4d031fc84cd658aa72294b2e16fc240aef70cb9e591248e38bd49c5a554d1afa01f38dab72733092f7555334bbef6c8c430119840492380aa95fa025dcf699f0a39669d812b0c6946b6091e6e235337b6f8", + "Name": "nagydani-3-square", + "Gas": 341, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb03d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d", + "Expected": "1b280ecd6a6bf906b806d527c2a831e23b238f89da48449003a88ac3ac7150d6a5e9e6b3be4054c7da11dd1e470ec29a606f5115801b5bf53bc1900271d7c3ff3cd5ed790d1c219a9800437a689f2388ba1a11d68f6a8e5b74e9a3b1fac6ee85fc6afbac599f93c391f5dc82a759e3c6c0ab45ce3f5d25d9b0c1bf94cf701ea6466fc9a478dacc5754e593172b5111eeba88557048bceae401337cd4c1182ad9f700852bc8c99933a193f0b94cf1aedbefc48be3bc93ef5cb276d7c2d5462ac8bb0c8fe8923a1db2afe1c6b90d59c534994a6a633f0ead1d638fdc293486bb634ff2c8ec9e7297c04241a61c37e3ae95b11d53343d4ba2b4cc33d2cfa7eb705e", + "Name": "nagydani-3-qube", + "Gas": 341, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000100c9130579f243e12451760976261416413742bd7c91d39ae087f46794062b8c239f2a74abf3918605a0e046a7890e049475ba7fbb78f5de6490bd22a710cc04d30088179a919d86c2da62cf37f59d8f258d2310d94c24891be2d7eeafaa32a8cb4b0cfe5f475ed778f45907dc8916a73f03635f233f7a77a00a3ec9ca6761a5bbd558a2318ecd0caa1c5016691523e7e1fa267dd35e70c66e84380bdcf7c0582f540174e572c41f81e93da0b757dff0b0fe23eb03aa19af0bdec3afb474216febaacb8d0381e631802683182b0fe72c28392539850650b70509f54980241dc175191a35d967288b532a7a8223ce2440d010615f70df269501944d4ec16fe4a3cb010001d7a85909174757835187cb52e71934e6c07ef43b4c46fc30bbcd0bc72913068267c54a4aabebb493922492820babdeb7dc9b1558fcf7bd82c37c82d3147e455b623ab0efa752fe0b3a67ca6e4d126639e645a0bf417568adbb2a6a4eef62fa1fa29b2a5a43bebea1f82193a7dd98eb483d09bb595af1fa9c97c7f41f5649d976aee3e5e59e2329b43b13bea228d4a93f16ba139ccb511de521ffe747aa2eca664f7c9e33da59075cc335afcd2bf3ae09765f01ab5a7c3e3938ec168b74724b5074247d200d9970382f683d6059b94dbc336603d1dfee714e4b447ac2fa1d99ecb4961da2854e03795ed758220312d101e1e3d87d5313a6d052aebde75110363d", + "Expected": "37843d7c67920b5f177372fa56e2a09117df585f81df8b300fba245b1175f488c99476019857198ed459ed8d9799c377330e49f4180c4bf8e8f66240c64f65ede93d601f957b95b83efdee1e1bfde74169ff77002eaf078c71815a9220c80b2e3b3ff22c2f358111d816ebf83c2999026b6de50bfc711ff68705d2f40b753424aefc9f70f08d908b5a20276ad613b4ab4309a3ea72f0c17ea9df6b3367d44fb3acab11c333909e02e81ea2ed404a712d3ea96bba87461720e2d98723e7acd0520ac1a5212dbedcd8dc0c1abf61d4719e319ff4758a774790b8d463cdfe131d1b2dcfee52d002694e98e720cb6ae7ccea353bc503269ba35f0f63bf8d7b672a76", + "Name": "nagydani-3-pow0x10001", + "Gas": 5461, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8102df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f", + "Expected": "8a5aea5f50dcc03dc7a7a272b5aeebc040554dbc1ffe36753c4fc75f7ed5f6c2cc0de3a922bf96c78bf0643a73025ad21f45a4a5cadd717612c511ab2bff1190fe5f1ae05ba9f8fe3624de1de2a817da6072ddcdb933b50216811dbe6a9ca79d3a3c6b3a476b079fd0d05f04fb154e2dd3e5cb83b148a006f2bcbf0042efb2ae7b916ea81b27aac25c3bf9a8b6d35440062ad8eae34a83f3ffa2cc7b40346b62174a4422584f72f95316f6b2bee9ff232ba9739301c97c99a9ded26c45d72676eb856ad6ecc81d36a6de36d7f9dafafee11baa43a4b0d5e4ecffa7b9b7dcefd58c397dd373e6db4acd2b2c02717712e6289bed7c813b670c4a0c6735aa7f3b0f1ce556eae9fcc94b501b2c8781ba50a8c6220e8246371c3c7359fe4ef9da786ca7d98256754ca4e496be0a9174bedbecb384bdf470779186d6a833f068d2838a88d90ef3ad48ff963b67c39cc5a3ee123baf7bf3125f64e77af7f30e105d72c4b9b5b237ed251e4c122c6d8c1405e736299c3afd6db16a28c6a9cfa68241e53de4cd388271fe534a6a9b0dbea6171d170db1b89858468885d08fecbd54c8e471c3e25d48e97ba450b96d0d87e00ac732aaa0d3ce4309c1064bd8a4c0808a97e0143e43a24cfa847635125cd41c13e0574487963e9d725c01375db99c31da67b4cf65eff555f0c0ac416c727ff8d438ad7c42030551d68c2e7adda0abb1ca7c10", + "Name": "nagydani-4-square", + "Gas": 1365, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b8103df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f", + "Expected": "5a2664252aba2d6e19d9600da582cdd1f09d7a890ac48e6b8da15ae7c6ff1856fc67a841ac2314d283ffa3ca81a0ecf7c27d89ef91a5a893297928f5da0245c99645676b481b7e20a566ee6a4f2481942bee191deec5544600bb2441fd0fb19e2ee7d801ad8911c6b7750affec367a4b29a22942c0f5f4744a4e77a8b654da2a82571037099e9c6d930794efe5cdca73c7b6c0844e386bdca8ea01b3d7807146bb81365e2cdc6475f8c23e0ff84463126189dc9789f72bbce2e3d2d114d728a272f1345122de23df54c922ec7a16e5c2a8f84da8871482bd258c20a7c09bbcd64c7a96a51029bbfe848736a6ba7bf9d931a9b7de0bcaf3635034d4958b20ae9ab3a95a147b0421dd5f7ebff46c971010ebfc4adbbe0ad94d5498c853e7142c450d8c71de4b2f84edbf8acd2e16d00c8115b150b1c30e553dbb82635e781379fe2a56360420ff7e9f70cc64c00aba7e26ed13c7c19622865ae07248daced36416080f35f8cc157a857ed70ea4f347f17d1bee80fa038abd6e39b1ba06b97264388b21364f7c56e192d4b62d9b161405f32ab1e2594e86243e56fcf2cb30d21adef15b9940f91af681da24328c883d892670c6aa47940867a81830a82b82716895db810df1b834640abefb7db2092dd92912cb9a735175bc447be40a503cf22dfe565b4ed7a3293ca0dfd63a507430b323ee248ec82e843b673c97ad730728cebc", + "Name": "nagydani-4-qube", + "Gas": 1365, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000200db34d0e438249c0ed685c949cc28776a05094e1c48691dc3f2dca5fc3356d2a0663bd376e4712839917eb9a19c670407e2c377a2de385a3ff3b52104f7f1f4e0c7bf7717fb913896693dc5edbb65b760ef1b00e42e9d8f9af17352385e1cd742c9b006c0f669995cb0bb21d28c0aced2892267637b6470d8cee0ab27fc5d42658f6e88240c31d6774aa60a7ebd25cd48b56d0da11209f1928e61005c6eb709f3e8e0aaf8d9b10f7d7e296d772264dc76897ccdddadc91efa91c1903b7232a9e4c3b941917b99a3bc0c26497dedc897c25750af60237aa67934a26a2bc491db3dcc677491944bc1f51d3e5d76b8d846a62db03dedd61ff508f91a56d71028125035c3a44cbb041497c83bf3e4ae2a9613a401cc721c547a2afa3b16a2969933d3626ed6d8a7428648f74122fd3f2a02a20758f7f693892c8fd798b39abac01d18506c45e71432639e9f9505719ee822f62ccbf47f6850f096ff77b5afaf4be7d772025791717dbe5abf9b3f40cff7d7aab6f67e38f62faf510747276e20a42127e7500c444f9ed92baf65ade9e836845e39c4316d9dce5f8e2c8083e2c0acbb95296e05e51aab13b6b8f53f06c9c4276e12b0671133218cc3ea907da3bd9a367096d9202128d14846cc2e20d56fc8473ecb07cecbfb8086919f3971926e7045b853d85a69d026195c70f9f7a823536e2a8f4b3e12e94d9b53a934353451094b81010001df3143a0057457d75e8c708b6337a6f5a4fd1a06727acf9fb93e2993c62f3378b37d56c85e7b1e00f0145ebf8e4095bd723166293c60b6ac1252291ef65823c9e040ddad14969b3b340a4ef714db093a587c37766d68b8d6b5016e741587e7e6bf7e763b44f0247e64bae30f994d248bfd20541a333e5b225ef6a61199e301738b1e688f70ec1d7fb892c183c95dc543c3e12adf8a5e8b9ca9d04f9445cced3ab256f29e998e69efaa633a7b60e1db5a867924ccab0a171d9d6e1098dfa15acde9553de599eaa56490c8f411e4985111f3d40bddfc5e301edb01547b01a886550a61158f7e2033c59707789bf7c854181d0c2e2a42a93cf09209747d7082e147eb8544de25c3eb14f2e35559ea0c0f5877f2f3fc92132c0ae9da4e45b2f6c866a224ea6d1f28c05320e287750fbc647368d41116e528014cc1852e5531d53e4af938374daba6cee4baa821ed07117253bb3601ddd00d59a3d7fb2ef1f5a2fbba7c429f0cf9a5b3462410fd833a69118f8be9c559b1000cc608fd877fb43f8e65c2d1302622b944462579056874b387208d90623fcdaf93920ca7a9e4ba64ea208758222ad868501cc2c345e2d3a5ea2a17e5069248138c8a79c0251185d29ee73e5afab5354769142d2bf0cb6712727aa6bf84a6245fcdae66e4938d84d1b9dd09a884818622080ff5f98942fb20acd7e0c916c2d5ea7ce6f7e173315384518f", + "Expected": "bed8b970c4a34849fc6926b08e40e20b21c15ed68d18f228904878d4370b56322d0da5789da0318768a374758e6375bfe4641fca5285ec7171828922160f48f5ca7efbfee4d5148612c38ad683ae4e3c3a053d2b7c098cf2b34f2cb19146eadd53c86b2d7ccf3d83b2c370bfb840913ee3879b1057a6b4e07e110b6bcd5e958bc71a14798c91d518cc70abee264b0d25a4110962a764b364ac0b0dd1ee8abc8426d775ec0f22b7e47b32576afaf1b5a48f64573ed1c5c29f50ab412188d9685307323d990802b81dacc06c6e05a1e901830ba9fcc67688dc29c5e27bde0a6e845ca925f5454b6fb3747edfaa2a5820838fb759eadf57f7cb5cec57fc213ddd8a4298fa079c3c0f472b07fb15aa6a7f0a3780bd296ff6a62e58ef443870b02260bd4fd2bbc98255674b8e1f1f9f8d33c7170b0ebbea4523b695911abbf26e41885344823bd0587115fdd83b721a4e8457a31c9a84b3d3520a07e0e35df7f48e5a9d534d0ec7feef1ff74de6a11e7f93eab95175b6ce22c68d78a642ad642837897ec11349205d8593ac19300207572c38d29ca5dfa03bc14cdbc32153c80e5cc3e739403d34c75915e49beb43094cc6dcafb3665b305ddec9286934ae66ec6b777ca528728c851318eb0f207b39f1caaf96db6eeead6b55ed08f451939314577d42bcc9f97c0b52d0234f88fd07e4c1d7780fdebc025cfffcb572cb27a8c33963", + "Name": "nagydani-4-pow0x10001", + "Gas": 21845, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf02e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad", + "Expected": "d61fe4e3f32ac260915b5b03b78a86d11bfc41d973fce5b0cc59035cf8289a8a2e3878ea15fa46565b0d806e2f85b53873ea20ed653869b688adf83f3ef444535bf91598ff7e80f334fb782539b92f39f55310cc4b35349ab7b278346eda9bc37c0d8acd3557fae38197f412f8d9e57ce6a76b7205c23564cab06e5615be7c6f05c3d05ec690cba91da5e89d55b152ff8dd2157dc5458190025cf94b1ad98f7cbe64e9482faba95e6b33844afc640892872b44a9932096508f4a782a4805323808f23e54b6ff9b841dbfa87db3505ae4f687972c18ea0f0d0af89d36c1c2a5b14560c153c3fee406f5cf15cfd1c0bb45d767426d465f2f14c158495069d0c5955a00150707862ecaae30624ebacdd8ac33e4e6aab3ff90b6ba445a84689386b9e945d01823a65874444316e83767290fcff630d2477f49d5d8ffdd200e08ee1274270f86ed14c687895f6caf5ce528bd970c20d2408a9ba66216324c6a011ac4999098362dbd98a038129a2d40c8da6ab88318aa3046cb660327cc44236d9e5d2163bd0959062195c51ed93d0088b6f92051fc99050ece2538749165976233697ab4b610385366e5ce0b02ad6b61c168ecfbedcdf74278a38de340fd7a5fead8e588e294795f9b011e2e60377a89e25c90e145397cdeabc60fd32444a6b7642a611a83c464d8b8976666351b4865c37b02e6dc21dbcdf5f930341707b618cc0f03c3122646b3385c9df9f2ec730eec9d49e7dfc9153b6e6289da8c4f0ebea9ccc1b751948e3bb7171c9e4d57423b0eeeb79095c030cb52677b3f7e0b45c30f645391f3f9c957afa549c4e0b2465b03c67993cd200b1af01035962edbc4c9e89b31c82ac121987d6529dafdeef67a132dc04b6dc68e77f22862040b75e2ceb9ff16da0fca534e6db7bd12fa7b7f51b6c08c1e23dfcdb7acbd2da0b51c87ffbced065a612e9b1c8bba9b7e2d8d7a2f04fcc4aaf355b60d764879a76b5e16762d5f2f55d585d0c8e82df6940960cddfb72c91dfa71f6b4e1c6ca25dfc39a878e998a663c04fe29d5e83b9586d047b4d7ff70a9f0d44f127e7d741685ca75f11629128d916a0ffef4be586a30c4b70389cc746e84ebf177c01ee8a4511cfbb9d1ecf7f7b33c7dd8177896e10bbc82f838dcd6db7ac67de62bf46b6a640fb580c5d1d2708f3862e3d2b645d0d18e49ef088053e3a220adc0e033c2afcfe61c90e32151152eb3caaf746c5e377d541cafc6cbb0cc0fa48b5caf1728f2e1957f5addfc234f1a9d89e40d49356c9172d0561a695fce6dab1d412321bbf407f63766ffd7b6b3d79bcfa07991c5a9709849c1008689e3b47c50d613980bec239fb64185249d055b30375ccb4354d71fe4d05648fbf6c80634dfc3575f2f24abb714c1e4c95e8896763bf4316e954c7ad19e5780ab7a040ca6fb9271f90a8b22ae738daf6cb", + "Name": "nagydani-5-square", + "Gas": 5461, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf03e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad", + "Expected": "5f9c70ec884926a89461056ad20ac4c30155e817f807e4d3f5bb743d789c83386762435c3627773fa77da5144451f2a8aad8adba88e0b669f5377c5e9bad70e45c86fe952b613f015a9953b8a5de5eaee4566acf98d41e327d93a35bd5cef4607d025e58951167957df4ff9b1627649d3943805472e5e293d3efb687cfd1e503faafeb2840a3e3b3f85d016051a58e1c9498aab72e63b748d834b31eb05d85dcde65e27834e266b85c75cc4ec0135135e0601cb93eeeb6e0010c8ceb65c4c319623c5e573a2c8c9fbbf7df68a930beb412d3f4dfd146175484f45d7afaa0d2e60684af9b34730f7c8438465ad3e1d0c3237336722f2aa51095bd5759f4b8ab4dda111b684aa3dac62a761722e7ae43495b7709933512c81c4e3c9133a51f7ce9f2b51fcec064f65779666960b4e45df3900f54311f5613e8012dd1b8efd359eda31a778264c72aa8bb419d862734d769076bce2810011989a45374e5c5d8729fec21427f0bf397eacbb4220f603cf463a4b0c94efd858ffd9768cd60d6ce68d755e0fbad007ce5c2223d70c7018345a102e4ab3c60a13a9e7794303156d4c2063e919f2153c13961fb324c80b240742f47773a7a8e25b3e3fb19b00ce839346c6eb3c732fbc6b888df0b1fe0a3d07b053a2e9402c267b2d62f794d8a2840526e3ade15ce2264496ccd7519571dfde47f7a4bb16292241c20b2be59f3f8fb4f6383f232d838c5a22d8c95b6834d9d2ca493f5a505ebe8899503b0e8f9b19e6e2dd81c1628b80016d02097e0134de51054c4e7674824d4d758760fc52377d2cad145e259aa2ffaf54139e1a66b1e0c1c191e32ac59474c6b526f5b3ba07d3e5ec286eddf531fcd5292869be58c9f22ef91026159f7cf9d05ef66b4299f4da48cc1635bf2243051d342d378a22c83390553e873713c0454ce5f3234397111ac3fe3207b86f0ed9fc025c81903e1748103692074f83824fda6341be4f95ff00b0a9a208c267e12fa01825054cc0513629bf3dbb56dc5b90d4316f87654a8be18227978ea0a8a522760cad620d0d14fd38920fb7321314062914275a5f99f677145a6979b156bd82ecd36f23f8e1273cc2759ecc0b2c69d94dad5211d1bed939dd87ed9e07b91d49713a6e16ade0a98aea789f04994e318e4ff2c8a188cd8d43aeb52c6daa3bc29b4af50ea82a247c5cd67b573b34cbadcc0a376d3bbd530d50367b42705d870f2e27a8197ef46070528bfe408360faa2ebb8bf76e9f388572842bcb119f4d84ee34ae31f5cc594f23705a49197b181fb78ed1ec99499c690f843a4d0cf2e226d118e9372271054fbabdcc5c92ae9fefaef0589cd0e722eaf30c1703ec4289c7fd81beaa8a455ccee5298e31e2080c10c366a6fcf56f7d13582ad0bcad037c612b710fc595b70fbefaaca23623b60c6c39b11beb8e5843b6b3dac60f", + "Name": "nagydani-5-qube", + "Gas": 5461, + "NoBenchmark": false + }, + { + "Input": "000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000000000000000000000000000000000400c5a1611f8be90071a43db23cc2fe01871cc4c0e8ab5743f6378e4fef77f7f6db0095c0727e20225beb665645403453e325ad5f9aeb9ba99bf3c148f63f9c07cf4fe8847ad5242d6b7d4499f93bd47056ddab8f7dee878fc2314f344dbee2a7c41a5d3db91eff372c730c2fdd3a141a4b61999e36d549b9870cf2f4e632c4d5df5f024f81c028000073a0ed8847cfb0593d36a47142f578f05ccbe28c0c06aeb1b1da027794c48db880278f79ba78ae64eedfea3c07d10e0562668d839749dc95f40467d15cf65b9cfc52c7c4bcef1cda3596dd52631aac942f146c7cebd46065131699ce8385b0db1874336747ee020a5698a3d1a1082665721e769567f579830f9d259cec1a836845109c21cf6b25da572512bf3c42fd4b96e43895589042ab60dd41f497db96aec102087fe784165bb45f942859268fd2ff6c012d9d00c02ba83eace047cc5f7b2c392c2955c58a49f0338d6fc58749c9db2155522ac17914ec216ad87f12e0ee95574613942fa615898c4d9e8a3be68cd6afa4e7a003dedbdf8edfee31162b174f965b20ae752ad89c967b3068b6f722c16b354456ba8e280f987c08e0a52d40a2e8f3a59b94d590aeef01879eb7a90b3ee7d772c839c85519cbeaddc0c193ec4874a463b53fcaea3271d80ebfb39b33489365fc039ae549a17a9ff898eea2f4cb27b8dbee4c17b998438575b2b8d107e4a0d66ba7fca85b41a58a8d51f191a35c856dfbe8aef2b00048a694bbccff832d23c8ca7a7ff0b6c0b3011d00b97c86c0628444d267c951d9e4fb8f83e154b8f74fb51aa16535e498235c5597dac9606ed0be3173a3836baa4e7d756ffe1e2879b415d3846bccd538c05b847785699aefde3e305decb600cd8fb0e7d8de5efc26971a6ad4e6d7a2d91474f1023a0ac4b78dc937da0ce607a45974d2cac1c33a2631ff7fe6144a3b2e5cf98b531a9627dea92c1dc82204d09db0439b6a11dd64b484e1263aa45fd9539b6020b55e3baece3986a8bffc1003406348f5c61265099ed43a766ee4f93f5f9c5abbc32a0fd3ac2b35b87f9ec26037d88275bd7dd0a54474995ee34ed3727f3f97c48db544b1980193a4b76a8a3ddab3591ce527f16d91882e67f0103b5cda53f7da54d489fc4ac08b6ab358a5a04aa9daa16219d50bd672a7cb804ed769d218807544e5993f1c27427104b349906a0b654df0bf69328afd3013fbe430155339c39f236df5557bf92f1ded7ff609a8502f49064ec3d1dbfb6c15d3a4c11a4f8acd12278cbf68acd5709463d12e3338a6eddb8c112f199645e23154a8e60879d2a654e3ed9296aa28f134168619691cd2c6b9e2eba4438381676173fc63c2588a3c5910dc149cf3760f0aa9fa9c3f5faa9162b0bf1aac9dd32b706a60ef53cbdb394b6b40222b5bc80eea82ba8958386672564cae3794f977871ab62337cf010001e30049201ec12937e7ce79d0f55d9c810e20acf52212aca1d3888949e0e4830aad88d804161230eb89d4d329cc83570fe257217d2119134048dd2ed167646975fc7d77136919a049ea74cf08ddd2b896890bb24a0ba18094a22baa351bf29ad96c66bbb1a598f2ca391749620e62d61c3561a7d3653ccc8892c7b99baaf76bf836e2991cb06d6bc0514568ff0d1ec8bb4b3d6984f5eaefb17d3ea2893722375d3ddb8e389a8eef7d7d198f8e687d6a513983df906099f9a2d23f4f9dec6f8ef2f11fc0a21fac45353b94e00486f5e17d386af42502d09db33cf0cf28310e049c07e88682aeeb00cb833c5174266e62407a57583f1f88b304b7c6e0c84bbe1c0fd423072d37a5bd0aacf764229e5c7cd02473460ba3645cd8e8ae144065bf02d0dd238593d8e230354f67e0b2f23012c23274f80e3ee31e35e2606a4a3f31d94ab755e6d163cff52cbb36b6d0cc67ffc512aeed1dce4d7a0d70ce82f2baba12e8d514dc92a056f994adfb17b5b9712bd5186f27a2fda1f7039c5df2c8587fdc62f5627580c13234b55be4df3056050e2d1ef3218f0dd66cb05265fe1acfb0989d8213f2c19d1735a7cf3fa65d88dad5af52dc2bba22b7abf46c3bc77b5091baab9e8f0ddc4d5e581037de91a9f8dcbc69309be29cc815cf19a20a7585b8b3073edf51fc9baeb3e509b97fa4ecfd621e0fd57bd61cac1b895c03248ff12bdbc57509250df3517e8a3fe1d776836b34ab352b973d932ef708b14f7418f9eceb1d87667e61e3e758649cb083f01b133d37ab2f5afa96d6c84bcacf4efc3851ad308c1e7d9113624fce29fab460ab9d2a48d92cdb281103a5250ad44cb2ff6e67ac670c02fdafb3e0f1353953d6d7d5646ca1568dea55275a050ec501b7c6250444f7219f1ba7521ba3b93d089727ca5f3bbe0d6c1300b423377004954c5628fdb65770b18ced5c9b23a4a5a6d6ef25fe01b4ce278de0bcc4ed86e28a0a68818ffa40970128cf2c38740e80037984428c1bd5113f40ff47512ee6f4e4d8f9b8e8e1b3040d2928d003bd1c1329dc885302fbce9fa81c23b4dc49c7c82d29b52957847898676c89aa5d32b5b0e1c0d5a2b79a19d67562f407f19425687971a957375879d90c5f57c857136c17106c9ab1b99d80e69c8c954ed386493368884b55c939b8d64d26f643e800c56f90c01079d7c534e3b2b7ae352cefd3016da55f6a85eb803b85e2304915fd2001f77c74e28746293c46e4f5f0fd49cf988aafd0026b8e7a3bab2da5cdce1ea26c2e29ec03f4807fac432662b2d6c060be1c7be0e5489de69d0a6e03a4b9117f9244b34a0f1ecba89884f781c6320412413a00c4980287409a2a78c2cd7e65cecebbe4ec1c28cac4dd95f6998e78fc6f1392384331c9436aa10e10e2bf8ad2c4eafbcf276aa7bae64b74428911b3269c749338b0fc5075ad", + "Expected": "5a0eb2bdf0ac1cae8e586689fa16cd4b07dfdedaec8a110ea1fdb059dd5253231b6132987598dfc6e11f86780428982d50cf68f67ae452622c3b336b537ef3298ca645e8f89ee39a26758206a5a3f6409afc709582f95274b57b71fae5c6b74619ae6f089a5393c5b79235d9caf699d23d88fb873f78379690ad8405e34c19f5257d596580c7a6a7206a3712825afe630c76b31cdb4a23e7f0632e10f14f4e282c81a66451a26f8df2a352b5b9f607a7198449d1b926e27036810368e691a74b91c61afa73d9d3b99453e7c8b50fd4f09c039a2f2feb5c419206694c31b92df1d9586140cb3417b38d0c503c7b508cc2ed12e813a1c795e9829eb39ee78eeaf360a169b491a1d4e419574e712402de9d48d54c1ae5e03739b7156615e8267e1fb0a897f067afd11fb33f6e24182d7aaaaa18fe5bc1982f20d6b871e5a398f0f6f718181d31ec225cfa9a0a70124ed9a70031bdf0c1c7829f708b6e17d50419ef361cf77d99c85f44607186c8d683106b8bd38a49b5d0fb503b397a83388c5678dcfcc737499d84512690701ed621a6f0172aecf037184ddf0f2453e4053024018e5ab2e30d6d5363b56e8b41509317c99042f517247474ab3abc848e00a07f69c254f46f2a05cf6ed84e5cc906a518fdcfdf2c61ce731f24c5264f1a25fc04934dc28aec112134dd523f70115074ca34e3807aa4cb925147f3a0ce152d323bd8c675ace446d0fd1ae30c4b57f0eb2c23884bc18f0964c0114796c5b6d080c3d89175665fbf63a6381a6a9da39ad070b645c8bb1779506da14439a9f5b5d481954764ea114fac688930bc68534d403cff4210673b6a6ff7ae416b7cd41404c3d3f282fcd193b86d0f54d0006c2a503b40d5c3930da980565b8f9630e9493a79d1c03e74e5f93ac8e4dc1a901ec5e3b3e57049124c7b72ea345aa359e782285d9e6a5c144a378111dd02c40855ff9c2be9b48425cb0b2fd62dc8678fd151121cf26a65e917d65d8e0dacfae108eb5508b601fb8ffa370be1f9a8b749a2d12eeab81f41079de87e2d777994fa4d28188c579ad327f9957fb7bdecec5c680844dd43cb57cf87aeb763c003e65011f73f8c63442df39a92b946a6bd968a1c1e4d5fa7d88476a68bd8e20e5b70a99259c7d3f85fb1b65cd2e93972e6264e74ebf289b8b6979b9b68a85cd5b360c1987f87235c3c845d62489e33acf85d53fa3561fe3a3aee18924588d9c6eba4edb7a4d106b31173e42929f6f0c48c80ce6a72d54eca7c0fe870068b7a7c89c63cdda593f5b32d3cb4ea8a32c39f00ab449155757172d66763ed9527019d6de6c9f2416aa6203f4d11c9ebee1e1d3845099e55504446448027212616167eb36035726daa7698b075286f5379cd3e93cb3e0cf4f9cb8d017facbb5550ed32d5ec5400ae57e47e2bf78d1eaeff9480cc765ceff39db500", + "Name": "nagydani-5-pow0x10001", + "Gas": 87381, + "NoBenchmark": false + } +] \ No newline at end of file diff --git a/crypto/bls12381/arithmetic_fallback.go b/crypto/bls12381/arithmetic_fallback.go index 19fb589104..91cabf4f3d 100644 --- a/crypto/bls12381/arithmetic_fallback.go +++ b/crypto/bls12381/arithmetic_fallback.go @@ -207,7 +207,7 @@ func lsubAssign(z, x *fe) { z[2], b = bits.Sub64(z[2], x[2], b) z[3], b = bits.Sub64(z[3], x[3], b) z[4], b = bits.Sub64(z[4], x[4], b) - z[5], b = bits.Sub64(z[5], x[5], b) + z[5], _ = bits.Sub64(z[5], x[5], b) } func neg(z *fe, x *fe) { diff --git a/crypto/bls12381/bls12_381_test.go b/crypto/bls12381/bls12_381_test.go index 51523c9ee7..6bf5834105 100644 --- a/crypto/bls12381/bls12_381_test.go +++ b/crypto/bls12381/bls12_381_test.go @@ -5,7 +5,7 @@ import ( "math/big" ) -var fuz int = 10 +var fuz = 10 func randScalar(max *big.Int) *big.Int { a, _ := rand.Int(rand.Reader, max) diff --git a/crypto/bls12381/fp_test.go b/crypto/bls12381/fp_test.go index 14bb4d7d65..97528d9db3 100644 --- a/crypto/bls12381/fp_test.go +++ b/crypto/bls12381/fp_test.go @@ -1393,6 +1393,15 @@ func BenchmarkMultiplication(t *testing.B) { } } +func BenchmarkInverse(t *testing.B) { + a, _ := new(fe).rand(rand.Reader) + b, _ := new(fe).rand(rand.Reader) + t.ResetTimer() + for i := 0; i < t.N; i++ { + inverse(a, b) + } +} + func padBytes(in []byte, size int) []byte { out := make([]byte, size) if len(in) > size { diff --git a/crypto/bls12381/g1.go b/crypto/bls12381/g1.go index 63d38f90c2..d853823cd2 100644 --- a/crypto/bls12381/g1.go +++ b/crypto/bls12381/g1.go @@ -266,9 +266,8 @@ func (g *G1) Add(r, p1, p2 *PointG1) *PointG1 { if t[1].equal(t[3]) { if t[0].equal(t[2]) { return g.Double(r, p1) - } else { - return r.Zero() } + return r.Zero() } sub(t[1], t[1], t[3]) double(t[4], t[1]) diff --git a/crypto/bls12381/g2.go b/crypto/bls12381/g2.go index 1d1a3258f7..fa110e3edf 100644 --- a/crypto/bls12381/g2.go +++ b/crypto/bls12381/g2.go @@ -287,9 +287,8 @@ func (g *G2) Add(r, p1, p2 *PointG2) *PointG2 { if t[1].equal(t[3]) { if t[0].equal(t[2]) { return g.Double(r, p1) - } else { - return r.Zero() } + return r.Zero() } g.f.sub(t[1], t[1], t[3]) g.f.double(t[4], t[1]) diff --git a/crypto/bn256/bn256_fuzz.go b/crypto/bn256/bn256_fuzz.go deleted file mode 100644 index 6aa1421170..0000000000 --- a/crypto/bn256/bn256_fuzz.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2018 Péter Szilágyi. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be found -// in the LICENSE file. - -// +build gofuzz - -package bn256 - -import ( - "bytes" - "math/big" - - cloudflare "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" - google "github.com/ethereum/go-ethereum/crypto/bn256/google" -) - -// FuzzAdd fuzzez bn256 addition between the Google and Cloudflare libraries. -func FuzzAdd(data []byte) int { - // Ensure we have enough data in the first place - if len(data) != 128 { - return 0 - } - // Ensure both libs can parse the first curve point - xc := new(cloudflare.G1) - _, errc := xc.Unmarshal(data[:64]) - - xg := new(google.G1) - _, errg := xg.Unmarshal(data[:64]) - - if (errc == nil) != (errg == nil) { - panic("parse mismatch") - } else if errc != nil { - return 0 - } - // Ensure both libs can parse the second curve point - yc := new(cloudflare.G1) - _, errc = yc.Unmarshal(data[64:]) - - yg := new(google.G1) - _, errg = yg.Unmarshal(data[64:]) - - if (errc == nil) != (errg == nil) { - panic("parse mismatch") - } else if errc != nil { - return 0 - } - // Add the two points and ensure they result in the same output - rc := new(cloudflare.G1) - rc.Add(xc, yc) - - rg := new(google.G1) - rg.Add(xg, yg) - - if !bytes.Equal(rc.Marshal(), rg.Marshal()) { - panic("add mismatch") - } - return 0 -} - -// FuzzMul fuzzez bn256 scalar multiplication between the Google and Cloudflare -// libraries. -func FuzzMul(data []byte) int { - // Ensure we have enough data in the first place - if len(data) != 96 { - return 0 - } - // Ensure both libs can parse the curve point - pc := new(cloudflare.G1) - _, errc := pc.Unmarshal(data[:64]) - - pg := new(google.G1) - _, errg := pg.Unmarshal(data[:64]) - - if (errc == nil) != (errg == nil) { - panic("parse mismatch") - } else if errc != nil { - return 0 - } - // Add the two points and ensure they result in the same output - rc := new(cloudflare.G1) - rc.ScalarMult(pc, new(big.Int).SetBytes(data[64:])) - - rg := new(google.G1) - rg.ScalarMult(pg, new(big.Int).SetBytes(data[64:])) - - if !bytes.Equal(rc.Marshal(), rg.Marshal()) { - panic("scalar mul mismatch") - } - return 0 -} - -func FuzzPair(data []byte) int { - // Ensure we have enough data in the first place - if len(data) != 192 { - return 0 - } - // Ensure both libs can parse the curve point - pc := new(cloudflare.G1) - _, errc := pc.Unmarshal(data[:64]) - - pg := new(google.G1) - _, errg := pg.Unmarshal(data[:64]) - - if (errc == nil) != (errg == nil) { - panic("parse mismatch") - } else if errc != nil { - return 0 - } - // Ensure both libs can parse the twist point - tc := new(cloudflare.G2) - _, errc = tc.Unmarshal(data[64:]) - - tg := new(google.G2) - _, errg = tg.Unmarshal(data[64:]) - - if (errc == nil) != (errg == nil) { - panic("parse mismatch") - } else if errc != nil { - return 0 - } - // Pair the two points and ensure thet result in the same output - if cloudflare.PairingCheck([]*cloudflare.G1{pc}, []*cloudflare.G2{tc}) != google.PairingCheck([]*google.G1{pg}, []*google.G2{tg}) { - panic("pair mismatch") - } - return 0 -} diff --git a/crypto/bn256/cloudflare/bn256.go b/crypto/bn256/cloudflare/bn256.go index 38822a76bf..4f607af2ad 100644 --- a/crypto/bn256/cloudflare/bn256.go +++ b/crypto/bn256/cloudflare/bn256.go @@ -9,8 +9,13 @@ // // This package specifically implements the Optimal Ate pairing over a 256-bit // Barreto-Naehrig curve as described in -// http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is compatible -// with the implementation described in that paper. +// http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is not +// compatible with the implementation described in that paper, as different +// parameters are chosen. +// +// (This package previously claimed to operate at a 128-bit security level. +// However, recent improvements in attacks mean that is no longer true. See +// https://moderncrypto.org/mail-archive/curves/2016/000740.html.) package bn256 import ( @@ -23,7 +28,7 @@ import ( func randomK(r io.Reader) (k *big.Int, err error) { for { k, err = rand.Int(r, Order) - if k.Sign() > 0 || err != nil { + if err != nil || k.Sign() > 0 { return } } diff --git a/crypto/bn256/google/bn256.go b/crypto/bn256/google/bn256.go index e0402e51f0..0a9d5cd35d 100644 --- a/crypto/bn256/google/bn256.go +++ b/crypto/bn256/google/bn256.go @@ -12,8 +12,9 @@ // // This package specifically implements the Optimal Ate pairing over a 256-bit // Barreto-Naehrig curve as described in -// http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is compatible -// with the implementation described in that paper. +// http://cryptojedi.org/papers/dclxvi-20100714.pdf. Its output is not +// compatible with the implementation described in that paper, as different +// parameters are chosen. // // (This package previously claimed to operate at a 128-bit security level. // However, recent improvements in attacks mean that is no longer true. See diff --git a/crypto/bn256/google/constants.go b/crypto/bn256/google/constants.go index ab649d7f3f..2990bd9512 100644 --- a/crypto/bn256/google/constants.go +++ b/crypto/bn256/google/constants.go @@ -13,13 +13,16 @@ func bigFromBase10(s string) *big.Int { return n } -// u is the BN parameter that determines the prime: 1868033³. +// u is the BN parameter that determines the prime. var u = bigFromBase10("4965661367192848881") -// p is a prime over which we form a basic field: 36u⁴+36u³+24u²+6u+1. +// P is a prime over which we form a basic field: 36u⁴+36u³+24u²+6u+1. var P = bigFromBase10("21888242871839275222246405745257275088696311157297823662689037894645226208583") // Order is the number of elements in both G₁ and G₂: 36u⁴+36u³+18u²+6u+1. +// Needs to be highly 2-adic for efficient SNARK key and proof generation. +// Order - 1 = 2^28 * 3^2 * 13 * 29 * 983 * 11003 * 237073 * 405928799 * 1670836401704629 * 13818364434197438864469338081. +// Refer to https://eprint.iacr.org/2013/879.pdf and https://eprint.iacr.org/2013/507.pdf for more information on these parameters. var Order = bigFromBase10("21888242871839275222246405745257275088548364400416034343698204186575808495617") // xiToPMinus1Over6 is ξ^((p-1)/6) where ξ = i+9. diff --git a/crypto/crypto.go b/crypto/crypto.go index a4a49136a8..40969a2895 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -60,10 +60,23 @@ type KeccakState interface { Read([]byte) (int, error) } +// NewKeccakState creates a new KeccakState +func NewKeccakState() KeccakState { + return sha3.NewLegacyKeccak256().(KeccakState) +} + +// HashData hashes the provided data using the KeccakState and returns a 32 byte hash +func HashData(kh KeccakState, data []byte) (h common.Hash) { + kh.Reset() + kh.Write(data) + kh.Read(h[:]) + return h +} + // Keccak256 calculates and returns the Keccak256 hash of the input data. func Keccak256(data ...[]byte) []byte { b := make([]byte, 32) - d := sha3.NewLegacyKeccak256().(KeccakState) + d := NewKeccakState() for _, b := range data { d.Write(b) } @@ -74,7 +87,7 @@ func Keccak256(data ...[]byte) []byte { // Keccak256Hash calculates and returns the Keccak256 hash of the input data, // converting it to an internal Hash data structure. func Keccak256Hash(data ...[]byte) (h common.Hash) { - d := sha3.NewLegacyKeccak256().(KeccakState) + d := NewKeccakState() for _, b := range data { d.Write(b) } diff --git a/crypto/crypto_test.go b/crypto/crypto_test.go index f71ae8232a..f9b0d3e834 100644 --- a/crypto/crypto_test.go +++ b/crypto/crypto_test.go @@ -42,6 +42,13 @@ func TestKeccak256Hash(t *testing.T) { checkhash(t, "Sha3-256-array", func(in []byte) []byte { h := Keccak256Hash(in); return h[:] }, msg, exp) } +func TestKeccak256Hasher(t *testing.T) { + msg := []byte("abc") + exp, _ := hex.DecodeString("4e03657aea45a94fc7d47ba826c8d667c0d1e6e33a64a036ec44f58fa12d6c45") + hasher := NewKeccakState() + checkhash(t, "Sha3-256-array", func(in []byte) []byte { h := HashData(hasher, in); return h[:] }, msg, exp) +} + func TestToECDSAErrors(t *testing.T) { if _, err := HexToECDSA("0000000000000000000000000000000000000000000000000000000000000000"); err == nil { t.Fatal("HexToECDSA should've returned error") diff --git a/crypto/secp256k1/curve.go b/crypto/secp256k1/curve.go index 5409ee1d2c..8f83cccad9 100644 --- a/crypto/secp256k1/curve.go +++ b/crypto/secp256k1/curve.go @@ -116,6 +116,10 @@ func (BitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { // affineFromJacobian reverses the Jacobian transform. See the comment at the // top of the file. func (BitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) { + if z.Sign() == 0 { + return new(big.Int), new(big.Int) + } + zinv := new(big.Int).ModInverse(z, BitCurve.P) zinvsq := new(big.Int).Mul(zinv, zinv) diff --git a/crypto/secp256k1/dummy.go b/crypto/secp256k1/dummy.go new file mode 100644 index 0000000000..c0f2ee52c0 --- /dev/null +++ b/crypto/secp256k1/dummy.go @@ -0,0 +1,20 @@ +// +build dummy + +// This file is part of a workaround for `go mod vendor` which won't vendor +// C files if there's no Go file in the same directory. +// This would prevent the crypto/secp256k1/libsecp256k1/include/secp256k1.h file to be vendored. +// +// This Go file imports the c directory where there is another dummy.go file which +// is the second part of this workaround. +// +// These two files combined make it so `go mod vendor` behaves correctly. +// +// See this issue for reference: https://github.com/golang/go/issues/26366 + +package secp256k1 + +import ( + _ "github.com/ethereum/go-ethereum/crypto/secp256k1/libsecp256k1/include" + _ "github.com/ethereum/go-ethereum/crypto/secp256k1/libsecp256k1/src" + _ "github.com/ethereum/go-ethereum/crypto/secp256k1/libsecp256k1/src/modules/recovery" +) diff --git a/crypto/secp256k1/libsecp256k1/contrib/dummy.go b/crypto/secp256k1/libsecp256k1/contrib/dummy.go new file mode 100644 index 0000000000..fda594be99 --- /dev/null +++ b/crypto/secp256k1/libsecp256k1/contrib/dummy.go @@ -0,0 +1,7 @@ +// +build dummy + +// Package c contains only a C file. +// +// This Go file is part of a workaround for `go mod vendor`. +// Please see the file crypto/secp256k1/dummy.go for more information. +package contrib diff --git a/crypto/secp256k1/libsecp256k1/dummy.go b/crypto/secp256k1/libsecp256k1/dummy.go new file mode 100644 index 0000000000..379b16992f --- /dev/null +++ b/crypto/secp256k1/libsecp256k1/dummy.go @@ -0,0 +1,7 @@ +// +build dummy + +// Package c contains only a C file. +// +// This Go file is part of a workaround for `go mod vendor`. +// Please see the file crypto/secp256k1/dummy.go for more information. +package libsecp256k1 diff --git a/crypto/secp256k1/libsecp256k1/include/dummy.go b/crypto/secp256k1/libsecp256k1/include/dummy.go new file mode 100644 index 0000000000..5af540c73c --- /dev/null +++ b/crypto/secp256k1/libsecp256k1/include/dummy.go @@ -0,0 +1,7 @@ +// +build dummy + +// Package c contains only a C file. +// +// This Go file is part of a workaround for `go mod vendor`. +// Please see the file crypto/secp256k1/dummy.go for more information. +package include diff --git a/crypto/secp256k1/libsecp256k1/src/dummy.go b/crypto/secp256k1/libsecp256k1/src/dummy.go new file mode 100644 index 0000000000..65868f38a8 --- /dev/null +++ b/crypto/secp256k1/libsecp256k1/src/dummy.go @@ -0,0 +1,7 @@ +// +build dummy + +// Package c contains only a C file. +// +// This Go file is part of a workaround for `go mod vendor`. +// Please see the file crypto/secp256k1/dummy.go for more information. +package src diff --git a/crypto/secp256k1/libsecp256k1/src/modules/dummy.go b/crypto/secp256k1/libsecp256k1/src/modules/dummy.go new file mode 100644 index 0000000000..3c7a696439 --- /dev/null +++ b/crypto/secp256k1/libsecp256k1/src/modules/dummy.go @@ -0,0 +1,7 @@ +// +build dummy + +// Package c contains only a C file. +// +// This Go file is part of a workaround for `go mod vendor`. +// Please see the file crypto/secp256k1/dummy.go for more information. +package module diff --git a/crypto/secp256k1/libsecp256k1/src/modules/ecdh/dummy.go b/crypto/secp256k1/libsecp256k1/src/modules/ecdh/dummy.go new file mode 100644 index 0000000000..b6fc38327e --- /dev/null +++ b/crypto/secp256k1/libsecp256k1/src/modules/ecdh/dummy.go @@ -0,0 +1,7 @@ +// +build dummy + +// Package c contains only a C file. +// +// This Go file is part of a workaround for `go mod vendor`. +// Please see the file crypto/secp256k1/dummy.go for more information. +package ecdh diff --git a/crypto/secp256k1/libsecp256k1/src/modules/recovery/dummy.go b/crypto/secp256k1/libsecp256k1/src/modules/recovery/dummy.go new file mode 100644 index 0000000000..b9491f0cb9 --- /dev/null +++ b/crypto/secp256k1/libsecp256k1/src/modules/recovery/dummy.go @@ -0,0 +1,7 @@ +// +build dummy + +// Package c contains only a C file. +// +// This Go file is part of a workaround for `go mod vendor`. +// Please see the file crypto/secp256k1/dummy.go for more information. +package recovery diff --git a/crypto/signify/signify.go b/crypto/signify/signify.go new file mode 100644 index 0000000000..e280f87268 --- /dev/null +++ b/crypto/signify/signify.go @@ -0,0 +1,100 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// signFile reads the contents of an input file and signs it (in armored format) +// with the key provided, placing the signature into the output file. + +package signify + +import ( + "bytes" + "crypto/ed25519" + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "strings" + "time" +) + +var ( + errInvalidKeyHeader = errors.New("incorrect key header") + errInvalidKeyLength = errors.New("invalid, key length != 104") +) + +func parsePrivateKey(key string) (k ed25519.PrivateKey, header []byte, keyNum []byte, err error) { + keydata, err := base64.StdEncoding.DecodeString(key) + if err != nil { + return nil, nil, nil, err + } + if len(keydata) != 104 { + return nil, nil, nil, errInvalidKeyLength + } + if string(keydata[:2]) != "Ed" { + return nil, nil, nil, errInvalidKeyHeader + } + return keydata[40:], keydata[:2], keydata[32:40], nil +} + +// SignFile creates a signature of the input file. +// +// This accepts base64 keys in the format created by the 'signify' tool. +// The signature is written to the 'output' file. +func SignFile(input string, output string, key string, untrustedComment string, trustedComment string) error { + // Pre-check comments and ensure they're set to something. + if strings.IndexByte(untrustedComment, '\n') >= 0 { + return errors.New("untrusted comment must not contain newline") + } + if strings.IndexByte(trustedComment, '\n') >= 0 { + return errors.New("trusted comment must not contain newline") + } + if untrustedComment == "" { + untrustedComment = "verify with " + input + ".pub" + } + if trustedComment == "" { + trustedComment = fmt.Sprintf("timestamp:%d", time.Now().Unix()) + } + + filedata, err := ioutil.ReadFile(input) + if err != nil { + return err + } + skey, header, keyNum, err := parsePrivateKey(key) + if err != nil { + return err + } + + // Create the main data signature. + rawSig := ed25519.Sign(skey, filedata) + var dataSig []byte + dataSig = append(dataSig, header...) + dataSig = append(dataSig, keyNum...) + dataSig = append(dataSig, rawSig...) + + // Create the comment signature. + var commentSigInput []byte + commentSigInput = append(commentSigInput, rawSig...) + commentSigInput = append(commentSigInput, []byte(trustedComment)...) + commentSig := ed25519.Sign(skey, commentSigInput) + + // Create the output file. + var out = new(bytes.Buffer) + fmt.Fprintln(out, "untrusted comment:", untrustedComment) + fmt.Fprintln(out, base64.StdEncoding.EncodeToString(dataSig)) + fmt.Fprintln(out, "trusted comment:", trustedComment) + fmt.Fprintln(out, base64.StdEncoding.EncodeToString(commentSig)) + return ioutil.WriteFile(output, out.Bytes(), 0644) +} diff --git a/crypto/signify/signify_fuzz.go b/crypto/signify/signify_fuzz.go new file mode 100644 index 0000000000..f9167900ad --- /dev/null +++ b/crypto/signify/signify_fuzz.go @@ -0,0 +1,150 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// +build gofuzz + +package signify + +import ( + "bufio" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + + fuzz "github.com/google/gofuzz" + "github.com/jedisct1/go-minisign" +) + +func Fuzz(data []byte) int { + if len(data) < 32 { + return -1 + } + tmpFile, err := ioutil.TempFile("", "") + if err != nil { + panic(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + testSecKey, testPubKey := createKeyPair() + // Create message + tmpFile.Write(data) + if err = tmpFile.Close(); err != nil { + panic(err) + } + // Fuzz comments + var untrustedComment string + var trustedComment string + f := fuzz.NewFromGoFuzz(data) + f.Fuzz(&untrustedComment) + f.Fuzz(&trustedComment) + fmt.Printf("untrusted: %v\n", untrustedComment) + fmt.Printf("trusted: %v\n", trustedComment) + + err = SignifySignFile(tmpFile.Name(), tmpFile.Name()+".sig", testSecKey, untrustedComment, trustedComment) + if err != nil { + panic(err) + } + defer os.Remove(tmpFile.Name() + ".sig") + + signify := "signify" + path := os.Getenv("SIGNIFY") + if path != "" { + signify = path + } + + _, err := exec.LookPath(signify) + if err != nil { + panic(err) + } + + // Write the public key into the file to pass it as + // an argument to signify-openbsd + pubKeyFile, err := ioutil.TempFile("", "") + if err != nil { + panic(err) + } + defer os.Remove(pubKeyFile.Name()) + defer pubKeyFile.Close() + pubKeyFile.WriteString("untrusted comment: signify public key\n") + pubKeyFile.WriteString(testPubKey) + pubKeyFile.WriteString("\n") + + cmd := exec.Command(signify, "-V", "-p", pubKeyFile.Name(), "-x", tmpFile.Name()+".sig", "-m", tmpFile.Name()) + if output, err := cmd.CombinedOutput(); err != nil { + panic(fmt.Sprintf("could not verify the file: %v, output: \n%s", err, output)) + } + + // Verify the signature using a golang library + sig, err := minisign.NewSignatureFromFile(tmpFile.Name() + ".sig") + if err != nil { + panic(err) + } + + pKey, err := minisign.NewPublicKey(testPubKey) + if err != nil { + panic(err) + } + + valid, err := pKey.VerifyFromFile(tmpFile.Name(), sig) + if err != nil { + panic(err) + } + if !valid { + panic("invalid signature") + } + return 1 +} + +func getKey(fileS string) (string, error) { + file, err := os.Open(fileS) + if err != nil { + log.Fatal(err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + // Discard the first line + scanner.Scan() + scanner.Scan() + return scanner.Text(), scanner.Err() +} + +func createKeyPair() (string, string) { + // Create key and put it in correct format + tmpKey, err := ioutil.TempFile("", "") + if err != nil { + panic(err) + } + defer os.Remove(tmpKey.Name()) + defer os.Remove(tmpKey.Name() + ".pub") + defer os.Remove(tmpKey.Name() + ".sec") + cmd := exec.Command("signify", "-G", "-n", "-p", tmpKey.Name()+".pub", "-s", tmpKey.Name()+".sec") + if output, err := cmd.CombinedOutput(); err != nil { + panic(fmt.Sprintf("could not verify the file: %v, output: \n%s", err, output)) + } + secKey, err := getKey(tmpKey.Name() + ".sec") + if err != nil { + panic(err) + } + pubKey, err := getKey(tmpKey.Name() + ".pub") + if err != nil { + panic(err) + } + return secKey, pubKey +} diff --git a/crypto/signify/signify_test.go b/crypto/signify/signify_test.go new file mode 100644 index 0000000000..615d4e6527 --- /dev/null +++ b/crypto/signify/signify_test.go @@ -0,0 +1,154 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// signFile reads the contents of an input file and signs it (in armored format) +// with the key provided, placing the signature into the output file. + +package signify + +import ( + "io/ioutil" + "math/rand" + "os" + "testing" + "time" + + "github.com/jedisct1/go-minisign" +) + +var ( + testSecKey = "RWRCSwAAAABVN5lr2JViGBN8DhX3/Qb/0g0wBdsNAR/APRW2qy9Fjsfr12sK2cd3URUFis1jgzQzaoayK8x4syT4G3Gvlt9RwGIwUYIQW/0mTeI+ECHu1lv5U4Wa2YHEPIesVPyRm5M=" + testPubKey = "RWTAPRW2qy9FjsBiMFGCEFv9Jk3iPhAh7tZb+VOFmtmBxDyHrFT8kZuT" +) + +func TestSignify(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + rand.Seed(time.Now().UnixNano()) + + data := make([]byte, 1024) + rand.Read(data) + tmpFile.Write(data) + + if err = tmpFile.Close(); err != nil { + t.Fatal(err) + } + + err = SignFile(tmpFile.Name(), tmpFile.Name()+".sig", testSecKey, "clé", "croissants") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name() + ".sig") + + // Verify the signature using a golang library + sig, err := minisign.NewSignatureFromFile(tmpFile.Name() + ".sig") + if err != nil { + t.Fatal(err) + } + + pKey, err := minisign.NewPublicKey(testPubKey) + if err != nil { + t.Fatal(err) + } + + valid, err := pKey.VerifyFromFile(tmpFile.Name(), sig) + if err != nil { + t.Fatal(err) + } + if !valid { + t.Fatal("invalid signature") + } +} + +func TestSignifyTrustedCommentTooManyLines(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + rand.Seed(time.Now().UnixNano()) + + data := make([]byte, 1024) + rand.Read(data) + tmpFile.Write(data) + + if err = tmpFile.Close(); err != nil { + t.Fatal(err) + } + + err = SignFile(tmpFile.Name(), tmpFile.Name()+".sig", testSecKey, "", "crois\nsants") + if err == nil || err.Error() == "" { + t.Fatalf("should have errored on a multi-line trusted comment, got %v", err) + } + defer os.Remove(tmpFile.Name() + ".sig") +} + +func TestSignifyTrustedCommentTooManyLinesLF(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + rand.Seed(time.Now().UnixNano()) + + data := make([]byte, 1024) + rand.Read(data) + tmpFile.Write(data) + + if err = tmpFile.Close(); err != nil { + t.Fatal(err) + } + + err = SignFile(tmpFile.Name(), tmpFile.Name()+".sig", testSecKey, "crois\rsants", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name() + ".sig") +} + +func TestSignifyTrustedCommentEmpty(t *testing.T) { + tmpFile, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name()) + defer tmpFile.Close() + + rand.Seed(time.Now().UnixNano()) + + data := make([]byte, 1024) + rand.Read(data) + tmpFile.Write(data) + + if err = tmpFile.Close(); err != nil { + t.Fatal(err) + } + + err = SignFile(tmpFile.Name(), tmpFile.Name()+".sig", testSecKey, "", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmpFile.Name() + ".sig") +} diff --git a/eth/api.go b/eth/api.go index fd35656476..53ef91392b 100644 --- a/eth/api.go +++ b/eth/api.go @@ -67,12 +67,12 @@ func (api *PublicEthereumAPI) Hashrate() hexutil.Uint64 { } // ChainId is the EIP-155 replay-protection chain id for the current ethereum chain config. -func (api *PublicEthereumAPI) ChainId() hexutil.Uint64 { - chainID := new(big.Int) +func (api *PublicEthereumAPI) ChainId() (hexutil.Uint64, error) { + // if current block is at or past the EIP-155 replay-protection fork block, return chainID from config if config := api.e.blockchain.Config(); config.IsEIP155(api.e.blockchain.CurrentBlock().Number()) { - chainID = config.ChainID + return (hexutil.Uint64)(config.ChainID.Uint64()), nil } - return (hexutil.Uint64)(chainID.Uint64()) + return hexutil.Uint64(0), fmt.Errorf("chain not synced beyond EIP-155 replay-protection fork block") } // PublicMinerAPI provides an API to control the miner. @@ -331,22 +331,29 @@ type BadBlockArgs struct { // GetBadBlocks returns a list of the last 'bad blocks' that the client has seen on the network // and returns them as a JSON list of block-hashes func (api *PrivateDebugAPI) GetBadBlocks(ctx context.Context) ([]*BadBlockArgs, error) { - blocks := api.eth.BlockChain().BadBlocks() - results := make([]*BadBlockArgs, len(blocks)) - - var err error - for i, block := range blocks { - results[i] = &BadBlockArgs{ - Hash: block.Hash(), - } + var ( + err error + blocks = rawdb.ReadAllBadBlocks(api.eth.chainDb) + results = make([]*BadBlockArgs, 0, len(blocks)) + ) + for _, block := range blocks { + var ( + blockRlp string + blockJSON map[string]interface{} + ) if rlpBytes, err := rlp.EncodeToBytes(block); err != nil { - results[i].RLP = err.Error() // Hacky, but hey, it works + blockRlp = err.Error() // Hacky, but hey, it works } else { - results[i].RLP = fmt.Sprintf("0x%x", rlpBytes) + blockRlp = fmt.Sprintf("0x%x", rlpBytes) } - if results[i].Block, err = ethapi.RPCMarshalBlock(block, true, true); err != nil { - results[i].Block = map[string]interface{}{"error": err.Error()} + if blockJSON, err = ethapi.RPCMarshalBlock(block, true, true); err != nil { + blockJSON = map[string]interface{}{"error": err.Error()} } + results = append(results, &BadBlockArgs{ + Hash: block.Hash(), + RLP: blockRlp, + Block: blockJSON, + }) } return results, nil } @@ -419,10 +426,11 @@ func (api *PrivateDebugAPI) StorageRangeAt(blockHash common.Hash, txIndex int, c if block == nil { return StorageRangeResult{}, fmt.Errorf("block %#x not found", blockHash) } - _, _, statedb, err := api.computeTxEnv(block, txIndex, 0) + _, _, statedb, release, err := api.eth.stateAtTransaction(block, txIndex, 0) if err != nil { return StorageRangeResult{}, err } + defer release() st := statedb.StorageTrie(contractAddress) if st == nil { return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress) diff --git a/eth/api_backend.go b/eth/api_backend.go index 0e91691d8f..2569972e52 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -41,9 +41,10 @@ import ( // EthAPIBackend implements ethapi.Backend for full nodes type EthAPIBackend struct { - extRPCEnabled bool - eth *Ethereum - gpo *gasprice.Oracle + extRPCEnabled bool + allowUnprotectedTxs bool + eth *Ethereum + gpo *gasprice.Oracle } // ChainConfig returns the active chain configuration. @@ -56,7 +57,7 @@ func (b *EthAPIBackend) CurrentBlock() *types.Block { } func (b *EthAPIBackend) SetHead(number uint64) { - b.eth.protocolManager.downloader.Cancel() + b.eth.handler.downloader.Cancel() b.eth.blockchain.SetHead(number) } @@ -194,8 +195,9 @@ func (b *EthAPIBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) { vmError := func() error { return nil } - context := core.NewEVMContext(msg, header, b.eth.BlockChain(), nil) - return vm.NewEVM(context, state, b.eth.blockchain.Config(), *b.eth.blockchain.GetVMConfig()), vmError, nil + txContext := core.NewEVMTxContext(msg) + context := core.NewEVMBlockContext(header, b.eth.BlockChain(), nil) + return vm.NewEVM(context, txContext, state, b.eth.blockchain.Config(), *b.eth.blockchain.GetVMConfig()), vmError, nil } func (b *EthAPIBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { @@ -271,10 +273,6 @@ func (b *EthAPIBackend) Downloader() *downloader.Downloader { return b.eth.Downloader() } -func (b *EthAPIBackend) ProtocolVersion() int { - return b.eth.EthVersion() -} - func (b *EthAPIBackend) SuggestPrice(ctx context.Context) (*big.Int, error) { return b.gpo.SuggestPrice(ctx) } @@ -295,6 +293,10 @@ func (b *EthAPIBackend) ExtRPCEnabled() bool { return b.extRPCEnabled } +func (b *EthAPIBackend) UnprotectedAllowed() bool { + return b.allowUnprotectedTxs +} + func (b *EthAPIBackend) RPCGasCap() uint64 { return b.eth.config.RPCGasCap } @@ -329,3 +331,15 @@ func (b *EthAPIBackend) Miner() *miner.Miner { func (b *EthAPIBackend) StartMining(threads int) error { return b.eth.StartMining(threads) } + +func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error) { + return b.eth.stateAtBlock(block, reexec) +} + +func (b *EthAPIBackend) StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error) { + return b.eth.statesInRange(fromBlock, toBlock, reexec) +} + +func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) { + return b.eth.stateAtTransaction(block, txIndex, reexec) +} diff --git a/eth/api_test.go b/eth/api_test.go index 42f71e261e..b44eed40bc 100644 --- a/eth/api_test.go +++ b/eth/api_test.go @@ -57,8 +57,10 @@ func (h resultHash) Swap(i, j int) { h[i], h[j] = h[j], h[i] } func (h resultHash) Less(i, j int) bool { return bytes.Compare(h[i].Bytes(), h[j].Bytes()) < 0 } func TestAccountRange(t *testing.T) { + t.Parallel() + var ( - statedb = state.NewDatabase(rawdb.NewMemoryDatabase()) + statedb = state.NewDatabaseWithConfig(rawdb.NewMemoryDatabase(), nil) state, _ = state.New(common.Hash{}, statedb, nil) addrs = [AccountRangeMaxResults * 2]common.Address{} m = map[common.Address]bool{} @@ -126,6 +128,8 @@ func TestAccountRange(t *testing.T) { } func TestEmptyAccountRange(t *testing.T) { + t.Parallel() + var ( statedb = state.NewDatabase(rawdb.NewMemoryDatabase()) state, _ = state.New(common.Hash{}, statedb, nil) @@ -142,6 +146,8 @@ func TestEmptyAccountRange(t *testing.T) { } func TestStorageRangeAt(t *testing.T) { + t.Parallel() + // Create a state where account 0x010000... has a few storage entries. var ( state, _ = state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) diff --git a/eth/backend.go b/eth/backend.go index 3fd027137c..76ce5137f4 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -24,21 +24,25 @@ import ( "runtime" "sync" "sync/atomic" + "time" "github.com/ethereum/go-ethereum/accounts" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/consensus/clique" - "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state/pruner" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/eth/gasprice" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/internal/ethapi" @@ -47,21 +51,25 @@ import ( "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" ) +// Config contains the configuration options of the ETH protocol. +// Deprecated: use ethconfig.Config instead. +type Config = ethconfig.Config + // Ethereum implements the Ethereum full node service. type Ethereum struct { - config *Config + config *ethconfig.Config // Handlers - txPool *core.TxPool - blockchain *core.BlockChain - protocolManager *ProtocolManager - dialCandidates enode.Iterator + txPool *core.TxPool + blockchain *core.BlockChain + handler *handler + ethDialCandidates enode.Iterator + snapDialCandidates enode.Iterator // DB interfaces chainDb ethdb.Database // Block chain database @@ -90,7 +98,7 @@ type Ethereum struct { // New creates a new Ethereum object (including the // initialisation of the common Ethereum object) -func New(stack *node.Node, config *Config) (*Ethereum, error) { +func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { // Ensure configuration values are compatible and sane if config.SyncMode == downloader.LightSync { return nil, errors.New("can't run eth.Ethereum in light sync mode, use les.LightEthereum") @@ -99,8 +107,8 @@ func New(stack *node.Node, config *Config) (*Ethereum, error) { return nil, fmt.Errorf("invalid sync mode %d", config.SyncMode) } if config.Miner.GasPrice == nil || config.Miner.GasPrice.Cmp(common.Big0) <= 0 { - log.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", DefaultConfig.Miner.GasPrice) - config.Miner.GasPrice = new(big.Int).Set(DefaultConfig.Miner.GasPrice) + log.Warn("Sanitizing invalid miner gas price", "provided", config.Miner.GasPrice, "updated", ethconfig.Defaults.Miner.GasPrice) + config.Miner.GasPrice = new(big.Int).Set(ethconfig.Defaults.Miner.GasPrice) } if config.NoPruning && config.TrieDirtyCache > 0 { if config.SnapshotCache > 0 { @@ -118,24 +126,27 @@ func New(stack *node.Node, config *Config) (*Ethereum, error) { if err != nil { return nil, err } - chainConfig, genesisHash, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis) + chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideBerlin) if _, ok := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !ok { return nil, genesisErr } log.Info("Initialised chain configuration", "config", chainConfig) + if err := pruner.RecoverPruning(stack.ResolvePath(""), chainDb, stack.ResolvePath(config.TrieCleanCacheJournal)); err != nil { + log.Error("Failed to recover state", "error", err) + } eth := &Ethereum{ config: config, chainDb: chainDb, eventMux: stack.EventMux(), accountManager: stack.AccountManager(), - engine: CreateConsensusEngine(stack, chainConfig, &config.Ethash, config.Miner.Notify, config.Miner.Noverify, chainDb), + engine: ethconfig.CreateConsensusEngine(stack, chainConfig, &config.Ethash, config.Miner.Notify, config.Miner.Noverify, chainDb), closeBloomHandler: make(chan struct{}), networkID: config.NetworkId, gasPrice: config.Miner.GasPrice, etherbase: config.Miner.Etherbase, bloomRequests: make(chan chan *bloombits.Retrieval), - bloomIndexer: NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms), + bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocks, params.BloomConfirms), p2pServer: stack.Server(), } @@ -144,7 +155,7 @@ func New(stack *node.Node, config *Config) (*Ethereum, error) { if bcVersion != nil { dbVer = fmt.Sprintf("%d", *bcVersion) } - log.Info("Initialising Ethereum protocol", "versions", ProtocolVersions, "network", config.NetworkId, "dbversion", dbVer) + log.Info("Initialising Ethereum protocol", "network", config.NetworkId, "dbversion", dbVer) if !config.SkipBcVersionCheck { if bcVersion != nil && *bcVersion > core.BlockChainVersion { @@ -169,6 +180,7 @@ func New(stack *node.Node, config *Config) (*Ethereum, error) { TrieDirtyDisabled: config.NoPruning, TrieTimeLimit: config.TrieTimeout, SnapshotLimit: config.SnapshotCache, + Preimages: config.Preimages, } ) eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, eth.shouldPreserve, &config.TxLookupLimit) @@ -194,31 +206,60 @@ func New(stack *node.Node, config *Config) (*Ethereum, error) { if checkpoint == nil { checkpoint = params.TrustedCheckpoints[genesisHash] } - if eth.protocolManager, err = NewProtocolManager(chainConfig, checkpoint, config.SyncMode, config.NetworkId, eth.eventMux, eth.txPool, eth.engine, eth.blockchain, chainDb, cacheLimit, config.Whitelist); err != nil { + if eth.handler, err = newHandler(&handlerConfig{ + Database: chainDb, + Chain: eth.blockchain, + TxPool: eth.txPool, + Network: config.NetworkId, + Sync: config.SyncMode, + BloomCache: uint64(cacheLimit), + EventMux: eth.eventMux, + Checkpoint: checkpoint, + Whitelist: config.Whitelist, + }); err != nil { return nil, err } eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, eth.isLocalBlock) eth.miner.SetExtra(makeExtraData(config.Miner.ExtraData)) - eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), eth, nil} + eth.APIBackend = &EthAPIBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, eth, nil} + if eth.APIBackend.allowUnprotectedTxs { + log.Info("Unprotected transactions allowed") + } gpoParams := config.GPO if gpoParams.Default == nil { gpoParams.Default = config.Miner.GasPrice } eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams) - eth.dialCandidates, err = eth.setupDiscovery(&stack.Config().P2P) + eth.ethDialCandidates, err = setupDiscovery(eth.config.EthDiscoveryURLs) + if err != nil { + return nil, err + } + eth.snapDialCandidates, err = setupDiscovery(eth.config.SnapDiscoveryURLs) if err != nil { return nil, err } - // Start the RPC service - eth.netRPCService = ethapi.NewPublicNetAPI(eth.p2pServer, eth.NetVersion()) + eth.netRPCService = ethapi.NewPublicNetAPI(eth.p2pServer, config.NetworkId) // Register the backend on the node stack.RegisterAPIs(eth.APIs()) stack.RegisterProtocols(eth.Protocols()) stack.RegisterLifecycle(eth) + // Check for unclean shutdown + if uncleanShutdowns, discards, err := rawdb.PushUncleanShutdownMarker(chainDb); err != nil { + log.Error("Could not update unclean-shutdown-marker list", "error", err) + } else { + if discards > 0 { + log.Warn("Old unclean shutdowns found", "count", discards) + } + for _, tstamp := range uncleanShutdowns { + t := time.Unix(int64(tstamp), 0) + log.Warn("Unclean shutdown detected", "booted", t, + "age", common.PrettyAge(t)) + } + } return eth, nil } @@ -239,39 +280,6 @@ func makeExtraData(extra []byte) []byte { return extra } -// CreateConsensusEngine creates the required type of consensus engine instance for an Ethereum service -func CreateConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, config *ethash.Config, notify []string, noverify bool, db ethdb.Database) consensus.Engine { - // If proof-of-authority is requested, set it up - if chainConfig.Clique != nil { - return clique.New(chainConfig.Clique, db) - } - // Otherwise assume proof-of-work - switch config.PowMode { - case ethash.ModeFake: - log.Warn("Ethash used in fake mode") - return ethash.NewFaker() - case ethash.ModeTest: - log.Warn("Ethash used in test mode") - return ethash.NewTester(nil, noverify) - case ethash.ModeShared: - log.Warn("Ethash used in shared mode") - return ethash.NewShared() - default: - engine := ethash.New(ethash.Config{ - CacheDir: stack.ResolvePath(config.CacheDir), - CachesInMem: config.CachesInMem, - CachesOnDisk: config.CachesOnDisk, - CachesLockMmap: config.CachesLockMmap, - DatasetDir: config.DatasetDir, - DatasetsInMem: config.DatasetsInMem, - DatasetsOnDisk: config.DatasetsOnDisk, - DatasetsLockMmap: config.DatasetsLockMmap, - }, notify, noverify) - engine.SetThreads(-1) // Disable CPU mining - return engine - } -} - // APIs return the collection of RPC services the ethereum package offers. // NOTE, some of these services probably need to be moved to somewhere else. func (s *Ethereum) APIs() []rpc.API { @@ -295,7 +303,7 @@ func (s *Ethereum) APIs() []rpc.API { }, { Namespace: "eth", Version: "1.0", - Service: downloader.NewPublicDownloaderAPI(s.protocolManager.downloader, s.eventMux), + Service: downloader.NewPublicDownloaderAPI(s.handler.downloader, s.eventMux), Public: true, }, { Namespace: "miner", @@ -305,7 +313,7 @@ func (s *Ethereum) APIs() []rpc.API { }, { Namespace: "eth", Version: "1.0", - Service: filters.NewPublicFilterAPI(s.APIBackend, false), + Service: filters.NewPublicFilterAPI(s.APIBackend, false, 5*time.Minute), Public: true, }, { Namespace: "admin", @@ -458,7 +466,7 @@ func (s *Ethereum) StartMining(threads int) error { } // If mining is started, we can disable the transaction rejection mechanism // introduced to speed sync times. - atomic.StoreUint32(&s.protocolManager.acceptTxs, 1) + atomic.StoreUint32(&s.handler.acceptTxs, 1) go s.miner.Start(eb) } @@ -489,21 +497,17 @@ func (s *Ethereum) EventMux() *event.TypeMux { return s.eventMux } func (s *Ethereum) Engine() consensus.Engine { return s.engine } func (s *Ethereum) ChainDb() ethdb.Database { return s.chainDb } func (s *Ethereum) IsListening() bool { return true } // Always listening -func (s *Ethereum) EthVersion() int { return int(ProtocolVersions[0]) } -func (s *Ethereum) NetVersion() uint64 { return s.networkID } -func (s *Ethereum) Downloader() *downloader.Downloader { return s.protocolManager.downloader } -func (s *Ethereum) Synced() bool { return atomic.LoadUint32(&s.protocolManager.acceptTxs) == 1 } +func (s *Ethereum) Downloader() *downloader.Downloader { return s.handler.downloader } +func (s *Ethereum) Synced() bool { return atomic.LoadUint32(&s.handler.acceptTxs) == 1 } func (s *Ethereum) ArchiveMode() bool { return s.config.NoPruning } func (s *Ethereum) BloomIndexer() *core.ChainIndexer { return s.bloomIndexer } // Protocols returns all the currently configured // network protocols to start. func (s *Ethereum) Protocols() []p2p.Protocol { - protos := make([]p2p.Protocol, len(ProtocolVersions)) - for i, vsn := range ProtocolVersions { - protos[i] = s.protocolManager.makeProtocol(vsn) - protos[i].Attributes = []enr.Entry{s.currentEthEntry()} - protos[i].DialCandidates = s.dialCandidates + protos := eth.MakeProtocols((*ethHandler)(s.handler), s.networkID, s.ethDialCandidates) + if s.config.SnapshotCache > 0 { + protos = append(protos, snap.MakeProtocols((*snapHandler)(s.handler), s.snapDialCandidates)...) } return protos } @@ -511,7 +515,7 @@ func (s *Ethereum) Protocols() []p2p.Protocol { // Start implements node.Lifecycle, starting all internal goroutines needed by the // Ethereum protocol implementation. func (s *Ethereum) Start() error { - s.startEthEntryUpdate(s.p2pServer.LocalNode()) + eth.StartENRUpdater(s.blockchain, s.p2pServer.LocalNode()) // Start the bloom bits servicing goroutines s.startBloomHandlers(params.BloomBitsBlocks) @@ -525,7 +529,7 @@ func (s *Ethereum) Start() error { maxPeers -= s.config.LightPeers } // Start the networking layer and the light server if requested - s.protocolManager.Start(maxPeers) + s.handler.Start(maxPeers) return nil } @@ -533,7 +537,7 @@ func (s *Ethereum) Start() error { // Ethereum protocol. func (s *Ethereum) Stop() error { // Stop all the peer-related stuff first. - s.protocolManager.Stop() + s.handler.Stop() // Then stop everything else. s.bloomIndexer.Close() @@ -542,7 +546,9 @@ func (s *Ethereum) Stop() error { s.miner.Stop() s.blockchain.Stop() s.engine.Close() + rawdb.PopUncleanShutdownMarker(s.chainDb) s.chainDb.Close() s.eventMux.Stop() + return nil } diff --git a/eth/bloombits.go b/eth/bloombits.go index bd34bd7b69..0cb7050d23 100644 --- a/eth/bloombits.go +++ b/eth/bloombits.go @@ -17,16 +17,10 @@ package eth import ( - "context" "time" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/bitutil" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethdb" ) const ( @@ -78,66 +72,3 @@ func (eth *Ethereum) startBloomHandlers(sectionSize uint64) { }() } } - -const ( - // bloomThrottling is the time to wait between processing two consecutive index - // sections. It's useful during chain upgrades to prevent disk overload. - bloomThrottling = 100 * time.Millisecond -) - -// BloomIndexer implements a core.ChainIndexer, building up a rotated bloom bits index -// for the Ethereum header bloom filters, permitting blazing fast filtering. -type BloomIndexer struct { - size uint64 // section size to generate bloombits for - db ethdb.Database // database instance to write index data and metadata into - gen *bloombits.Generator // generator to rotate the bloom bits crating the bloom index - section uint64 // Section is the section number being processed currently - head common.Hash // Head is the hash of the last header processed -} - -// NewBloomIndexer returns a chain indexer that generates bloom bits data for the -// canonical chain for fast logs filtering. -func NewBloomIndexer(db ethdb.Database, size, confirms uint64) *core.ChainIndexer { - backend := &BloomIndexer{ - db: db, - size: size, - } - table := rawdb.NewTable(db, string(rawdb.BloomBitsIndexPrefix)) - - return core.NewChainIndexer(db, table, backend, size, confirms, bloomThrottling, "bloombits") -} - -// Reset implements core.ChainIndexerBackend, starting a new bloombits index -// section. -func (b *BloomIndexer) Reset(ctx context.Context, section uint64, lastSectionHead common.Hash) error { - gen, err := bloombits.NewGenerator(uint(b.size)) - b.gen, b.section, b.head = gen, section, common.Hash{} - return err -} - -// Process implements core.ChainIndexerBackend, adding a new header's bloom into -// the index. -func (b *BloomIndexer) Process(ctx context.Context, header *types.Header) error { - b.gen.AddBloom(uint(header.Number.Uint64()-b.section*b.size), header.Bloom) - b.head = header.Hash() - return nil -} - -// Commit implements core.ChainIndexerBackend, finalizing the bloom section and -// writing it out into the database. -func (b *BloomIndexer) Commit() error { - batch := b.db.NewBatch() - for i := 0; i < types.BloomBitLength; i++ { - bits, err := b.gen.Bitset(uint(i)) - if err != nil { - return err - } - rawdb.WriteBloomBits(batch, uint(i), b.section, b.head, bitutil.CompressBytes(bits)) - } - return batch.Write() -} - -// Prune returns an empty error since we don't support pruning here. -func (b *BloomIndexer) Prune(threshold uint64) error { - return nil -} diff --git a/eth/discovery.go b/eth/discovery.go index 48f6159017..855ce3b0e1 100644 --- a/eth/discovery.go +++ b/eth/discovery.go @@ -19,7 +19,6 @@ package eth import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/forkid" - "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rlp" @@ -64,11 +63,12 @@ func (eth *Ethereum) currentEthEntry() *ethEntry { eth.blockchain.CurrentHeader().Number.Uint64())} } -// setupDiscovery creates the node discovery source for the eth protocol. -func (eth *Ethereum) setupDiscovery(cfg *p2p.Config) (enode.Iterator, error) { - if cfg.NoDiscovery || len(eth.config.DiscoveryURLs) == 0 { +// setupDiscovery creates the node discovery source for the `eth` and `snap` +// protocols. +func setupDiscovery(urls []string) (enode.Iterator, error) { + if len(urls) == 0 { return nil, nil } client := dnsdisc.NewClient(dnsdisc.Config{}) - return client.NewIterator(eth.config.DiscoveryURLs...) + return client.NewIterator(urls...) } diff --git a/eth/downloader/api.go b/eth/downloader/api.go index 57ff3d71af..2024d23dea 100644 --- a/eth/downloader/api.go +++ b/eth/downloader/api.go @@ -20,7 +20,7 @@ import ( "context" "sync" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/rpc" ) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 686c1ace14..5ddd2f9848 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" @@ -38,7 +39,6 @@ import ( ) var ( - MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly @@ -89,7 +89,8 @@ var ( errCancelContentProcessing = errors.New("content processing canceled (requested)") errCanceled = errors.New("syncing canceled (requested)") errNoSyncActive = errors.New("no sync active") - errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 63)") + errTooOld = errors.New("peer's protocol version too old") + errNoAncestorFound = errors.New("no common ancestor found") ) type Downloader struct { @@ -131,20 +132,22 @@ type Downloader struct { ancientLimit uint64 // The maximum block number which can be regarded as ancient data. // Channels - headerCh chan dataPack // [eth/62] Channel receiving inbound block headers - bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies - receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts - bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks - receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks - headerProcCh chan []*types.Header // [eth/62] Channel to feed the header processor new tasks + headerCh chan dataPack // Channel receiving inbound block headers + bodyCh chan dataPack // Channel receiving inbound block bodies + receiptCh chan dataPack // Channel receiving inbound receipts + bodyWakeCh chan bool // Channel to signal the block body fetcher of new tasks + receiptWakeCh chan bool // Channel to signal the receipt fetcher of new tasks + headerProcCh chan []*types.Header // Channel to feed the header processor new tasks // State sync pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root pivotLock sync.RWMutex // Lock protecting pivot header reads from updates + snapSync bool // Whether to run state sync over the snap protocol + SnapSyncer *snap.Syncer // TODO(karalabe): make private! hack for now stateSyncStart chan *stateSync trackStateReq chan *stateReq - stateCh chan dataPack // [eth/63] Channel receiving inbound node state data + stateCh chan dataPack // Channel receiving inbound node state data // Cancellation and termination cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) @@ -237,6 +240,7 @@ func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, headerProcCh: make(chan []*types.Header, 1), quitCh: make(chan struct{}), stateCh: make(chan dataPack), + SnapSyncer: snap.NewSyncer(stateDb, stateBloom), stateSyncStart: make(chan *stateSync), syncStatsState: stateSyncStats{ processed: rawdb.ReadFastTrieProgress(stateDb), @@ -286,19 +290,16 @@ func (d *Downloader) Synchronising() bool { return atomic.LoadInt32(&d.synchronising) > 0 } -// SyncBloomContains tests if the syncbloom filter contains the given hash: -// - false: the bloom definitely does not contain hash -// - true: the bloom maybe contains hash -// -// While the bloom is being initialized (or is closed), all queries will return true. -func (d *Downloader) SyncBloomContains(hash []byte) bool { - return d.stateBloom == nil || d.stateBloom.Contains(hash) -} - // RegisterPeer injects a new download peer into the set of block source to be // used for fetching hashes and blocks from. -func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error { - logger := log.New("peer", id) +func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { + var logger log.Logger + if len(id) < 16 { + // Tests use short IDs, don't choke on them + logger = log.New("peer", id) + } else { + logger = log.New("peer", id[:8]) + } logger.Trace("Registering sync peer") if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil { logger.Error("Failed to register sync peer", "err", err) @@ -310,7 +311,7 @@ func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error { } // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. -func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) error { +func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error { return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) } @@ -319,7 +320,13 @@ func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) e // the queue. func (d *Downloader) UnregisterPeer(id string) error { // Unregister the peer from the active peer set and revoke any fetch tasks - logger := log.New("peer", id) + var logger log.Logger + if len(id) < 16 { + // Tests use short IDs, don't choke on them + logger = log.New("peer", id) + } else { + logger = log.New("peer", id[:8]) + } logger.Trace("Unregistering sync peer") if err := d.peers.Unregister(id); err != nil { logger.Error("Failed to unregister sync peer", "err", err) @@ -339,7 +346,6 @@ func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode case nil, errBusy, errCanceled: return err } - if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) || errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) || errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) { @@ -381,6 +387,16 @@ func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode if mode == FullSync && d.stateBloom != nil { d.stateBloom.Close() } + // If snap sync was requested, create the snap scheduler and switch to fast + // sync mode. Long term we could drop fast sync or merge the two together, + // but until snap becomes prevalent, we should support both. TODO(karalabe). + if mode == SnapSync { + if !d.snapSync { + log.Warn("Enabling snapshot sync prototype") + d.snapSync = true + } + mode = FastSync + } // Reset the queue, peer set and wake channels to clean any internal leftover state d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems) d.peers.Reset() @@ -443,8 +459,8 @@ func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.I d.mux.Post(DoneEvent{latest}) } }() - if p.version < 63 { - return errTooOld + if p.version < 64 { + return fmt.Errorf("%w: advertized %d < required %d", errTooOld, p.version, 64) } mode := d.getMode() @@ -798,6 +814,26 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) } } + ancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor) + if err == nil { + return ancestor, nil + } + // The returned error was not nil. + // If the error returned does not reflect that a common ancestor was not found, return it. + // If the error reflects that a common ancestor was not found, continue to binary search, + // where the error value will be reassigned. + if !errors.Is(err, errNoAncestorFound) { + return 0, err + } + + ancestor, err = d.findAncestorBinarySearch(p, mode, remoteHeight, floor) + if err != nil { + return 0, err + } + return ancestor, nil +} + +func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (commonAncestor uint64, err error) { from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) @@ -878,6 +914,12 @@ func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) p.log.Debug("Found common ancestor", "number", number, "hash", hash) return number, nil } + return 0, errNoAncestorFound +} + +func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (commonAncestor uint64, err error) { + hash := common.Hash{} + // Ancestor not found, we need to binary search over our chain start, end := uint64(0), remoteHeight if floor > 0 { @@ -1345,7 +1387,7 @@ func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) case err == nil: peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats()) default: - peer.log.Trace("Failed to deliver retrieved data", "type", kind, "err", err) + peer.log.Debug("Failed to deliver retrieved data", "type", kind, "err", err) } } // Blocks assembled, try to update the progress @@ -1721,7 +1763,7 @@ func (d *Downloader) processFastSyncContent() error { }() closeOnErr := func(s *stateSync) { - if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled { + if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled && err != snap.ErrCancelled { d.queue.Close() // wake up Results } } @@ -1910,27 +1952,53 @@ func (d *Downloader) commitPivotBlock(result *fetchResult) error { // DeliverHeaders injects a new batch of block headers received from a remote // node into the download schedule. -func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) { - return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) +func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) error { + return d.deliver(d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) } // DeliverBodies injects a new batch of block bodies received from a remote node. -func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) { - return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter) +func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) error { + return d.deliver(d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter) } // DeliverReceipts injects a new batch of receipts received from a remote node. -func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) { - return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) +func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) error { + return d.deliver(d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) } // DeliverNodeData injects a new batch of node state data received from a remote node. -func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) { - return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) +func (d *Downloader) DeliverNodeData(id string, data [][]byte) error { + return d.deliver(d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) +} + +// DeliverSnapPacket is invoked from a peer's message handler when it transmits a +// data packet for the local node to consume. +func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error { + switch packet := packet.(type) { + case *snap.AccountRangePacket: + hashes, accounts, err := packet.Unpack() + if err != nil { + return err + } + return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof) + + case *snap.StorageRangesPacket: + hashset, slotset := packet.Unpack() + return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof) + + case *snap.ByteCodesPacket: + return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes) + + case *snap.TrieNodesPacket: + return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes) + + default: + return fmt.Errorf("unexpected snap packet type: %T", packet) + } } // deliver injects a new batch of data received from a remote node. -func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { +func (d *Downloader) deliver(destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { // Update the delivery metrics for both good and failed deliveries inMeter.Mark(int64(packet.Items())) defer func() { diff --git a/eth/downloader/downloader_test.go b/eth/downloader/downloader_test.go index 7645f04e4f..2917116144 100644 --- a/eth/downloader/downloader_test.go +++ b/eth/downloader/downloader_test.go @@ -390,7 +390,7 @@ func (dl *downloadTester) Rollback(hashes []common.Hash) { } // newPeer registers a new block download source into the downloader. -func (dl *downloadTester) newPeer(id string, version int, chain *testChain) error { +func (dl *downloadTester) newPeer(id string, version uint, chain *testChain) error { dl.lock.Lock() defer dl.lock.Unlock() @@ -515,20 +515,18 @@ func assertOwnForkedChain(t *testing.T, tester *downloadTester, common int, leng } } -// Tests that simple synchronization against a canonical chain works correctly. -// In this test common ancestor lookup should be short circuited and not require -// binary searching. -func TestCanonicalSynchronisation63Full(t *testing.T) { testCanonicalSynchronisation(t, 63, FullSync) } -func TestCanonicalSynchronisation63Fast(t *testing.T) { testCanonicalSynchronisation(t, 63, FastSync) } -func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonicalSynchronisation(t, 64, FullSync) } -func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonicalSynchronisation(t, 64, FastSync) } -func TestCanonicalSynchronisation65Full(t *testing.T) { testCanonicalSynchronisation(t, 65, FullSync) } -func TestCanonicalSynchronisation65Fast(t *testing.T) { testCanonicalSynchronisation(t, 65, FastSync) } -func TestCanonicalSynchronisation65Light(t *testing.T) { - testCanonicalSynchronisation(t, 65, LightSync) -} +func TestCanonicalSynchronisation64Full(t *testing.T) { testCanonSync(t, 64, FullSync) } +func TestCanonicalSynchronisation64Fast(t *testing.T) { testCanonSync(t, 64, FastSync) } + +func TestCanonicalSynchronisation65Full(t *testing.T) { testCanonSync(t, 65, FullSync) } +func TestCanonicalSynchronisation65Fast(t *testing.T) { testCanonSync(t, 65, FastSync) } +func TestCanonicalSynchronisation65Light(t *testing.T) { testCanonSync(t, 65, LightSync) } + +func TestCanonicalSynchronisation66Full(t *testing.T) { testCanonSync(t, 66, FullSync) } +func TestCanonicalSynchronisation66Fast(t *testing.T) { testCanonSync(t, 66, FastSync) } +func TestCanonicalSynchronisation66Light(t *testing.T) { testCanonSync(t, 66, LightSync) } -func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { +func testCanonSync(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -547,14 +545,16 @@ func testCanonicalSynchronisation(t *testing.T, protocol int, mode SyncMode) { // Tests that if a large batch of blocks are being downloaded, it is throttled // until the cached blocks are retrieved. -func TestThrottling63Full(t *testing.T) { testThrottling(t, 63, FullSync) } -func TestThrottling63Fast(t *testing.T) { testThrottling(t, 63, FastSync) } func TestThrottling64Full(t *testing.T) { testThrottling(t, 64, FullSync) } func TestThrottling64Fast(t *testing.T) { testThrottling(t, 64, FastSync) } + func TestThrottling65Full(t *testing.T) { testThrottling(t, 65, FullSync) } func TestThrottling65Fast(t *testing.T) { testThrottling(t, 65, FastSync) } -func testThrottling(t *testing.T, protocol int, mode SyncMode) { +func TestThrottling66Full(t *testing.T) { testThrottling(t, 66, FullSync) } +func TestThrottling66Fast(t *testing.T) { testThrottling(t, 66, FastSync) } + +func testThrottling(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -569,7 +569,7 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) { <-proceed } // Start a synchronisation concurrently - errc := make(chan error) + errc := make(chan error, 1) go func() { errc <- tester.sync("peer", nil, mode) }() @@ -588,14 +588,15 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) { time.Sleep(25 * time.Millisecond) tester.lock.Lock() + tester.downloader.queue.lock.Lock() + tester.downloader.queue.resultCache.lock.Lock() { - tester.downloader.queue.resultCache.lock.Lock() cached = tester.downloader.queue.resultCache.countCompleted() - tester.downloader.queue.resultCache.lock.Unlock() frozen = int(atomic.LoadUint32(&blocked)) retrieved = len(tester.ownBlocks) - } + tester.downloader.queue.resultCache.lock.Unlock() + tester.downloader.queue.lock.Unlock() tester.lock.Unlock() if cached == blockCacheMaxItems || @@ -632,15 +633,18 @@ func testThrottling(t *testing.T, protocol int, mode SyncMode) { // Tests that simple synchronization against a forked chain works correctly. In // this test common ancestor lookup should *not* be short circuited, and a full // binary search should be executed. -func TestForkedSync63Full(t *testing.T) { testForkedSync(t, 63, FullSync) } -func TestForkedSync63Fast(t *testing.T) { testForkedSync(t, 63, FastSync) } -func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) } -func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) } +func TestForkedSync64Full(t *testing.T) { testForkedSync(t, 64, FullSync) } +func TestForkedSync64Fast(t *testing.T) { testForkedSync(t, 64, FastSync) } + func TestForkedSync65Full(t *testing.T) { testForkedSync(t, 65, FullSync) } func TestForkedSync65Fast(t *testing.T) { testForkedSync(t, 65, FastSync) } func TestForkedSync65Light(t *testing.T) { testForkedSync(t, 65, LightSync) } -func testForkedSync(t *testing.T, protocol int, mode SyncMode) { +func TestForkedSync66Full(t *testing.T) { testForkedSync(t, 66, FullSync) } +func TestForkedSync66Fast(t *testing.T) { testForkedSync(t, 66, FastSync) } +func TestForkedSync66Light(t *testing.T) { testForkedSync(t, 66, LightSync) } + +func testForkedSync(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -665,15 +669,18 @@ func testForkedSync(t *testing.T, protocol int, mode SyncMode) { // Tests that synchronising against a much shorter but much heavyer fork works // corrently and is not dropped. -func TestHeavyForkedSync63Full(t *testing.T) { testHeavyForkedSync(t, 63, FullSync) } -func TestHeavyForkedSync63Fast(t *testing.T) { testHeavyForkedSync(t, 63, FastSync) } -func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) } -func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) } +func TestHeavyForkedSync64Full(t *testing.T) { testHeavyForkedSync(t, 64, FullSync) } +func TestHeavyForkedSync64Fast(t *testing.T) { testHeavyForkedSync(t, 64, FastSync) } + func TestHeavyForkedSync65Full(t *testing.T) { testHeavyForkedSync(t, 65, FullSync) } func TestHeavyForkedSync65Fast(t *testing.T) { testHeavyForkedSync(t, 65, FastSync) } func TestHeavyForkedSync65Light(t *testing.T) { testHeavyForkedSync(t, 65, LightSync) } -func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { +func TestHeavyForkedSync66Full(t *testing.T) { testHeavyForkedSync(t, 66, FullSync) } +func TestHeavyForkedSync66Fast(t *testing.T) { testHeavyForkedSync(t, 66, FastSync) } +func TestHeavyForkedSync66Light(t *testing.T) { testHeavyForkedSync(t, 66, LightSync) } + +func testHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -700,15 +707,18 @@ func testHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { // Tests that chain forks are contained within a certain interval of the current // chain head, ensuring that malicious peers cannot waste resources by feeding // long dead chains. -func TestBoundedForkedSync63Full(t *testing.T) { testBoundedForkedSync(t, 63, FullSync) } -func TestBoundedForkedSync63Fast(t *testing.T) { testBoundedForkedSync(t, 63, FastSync) } -func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) } -func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) } +func TestBoundedForkedSync64Full(t *testing.T) { testBoundedForkedSync(t, 64, FullSync) } +func TestBoundedForkedSync64Fast(t *testing.T) { testBoundedForkedSync(t, 64, FastSync) } + func TestBoundedForkedSync65Full(t *testing.T) { testBoundedForkedSync(t, 65, FullSync) } func TestBoundedForkedSync65Fast(t *testing.T) { testBoundedForkedSync(t, 65, FastSync) } func TestBoundedForkedSync65Light(t *testing.T) { testBoundedForkedSync(t, 65, LightSync) } -func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) { +func TestBoundedForkedSync66Full(t *testing.T) { testBoundedForkedSync(t, 66, FullSync) } +func TestBoundedForkedSync66Fast(t *testing.T) { testBoundedForkedSync(t, 66, FastSync) } +func TestBoundedForkedSync66Light(t *testing.T) { testBoundedForkedSync(t, 66, LightSync) } + +func testBoundedForkedSync(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -734,15 +744,18 @@ func testBoundedForkedSync(t *testing.T, protocol int, mode SyncMode) { // Tests that chain forks are contained within a certain interval of the current // chain head for short but heavy forks too. These are a bit special because they // take different ancestor lookup paths. -func TestBoundedHeavyForkedSync63Full(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FullSync) } -func TestBoundedHeavyForkedSync63Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 63, FastSync) } -func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) } -func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) } +func TestBoundedHeavyForkedSync64Full(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FullSync) } +func TestBoundedHeavyForkedSync64Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 64, FastSync) } + func TestBoundedHeavyForkedSync65Full(t *testing.T) { testBoundedHeavyForkedSync(t, 65, FullSync) } func TestBoundedHeavyForkedSync65Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 65, FastSync) } func TestBoundedHeavyForkedSync65Light(t *testing.T) { testBoundedHeavyForkedSync(t, 65, LightSync) } -func testBoundedHeavyForkedSync(t *testing.T, protocol int, mode SyncMode) { +func TestBoundedHeavyForkedSync66Full(t *testing.T) { testBoundedHeavyForkedSync(t, 66, FullSync) } +func TestBoundedHeavyForkedSync66Fast(t *testing.T) { testBoundedHeavyForkedSync(t, 66, FastSync) } +func TestBoundedHeavyForkedSync66Light(t *testing.T) { testBoundedHeavyForkedSync(t, 66, LightSync) } + +func testBoundedHeavyForkedSync(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -786,15 +799,18 @@ func TestInactiveDownloader63(t *testing.T) { } // Tests that a canceled download wipes all previously accumulated state. -func TestCancel63Full(t *testing.T) { testCancel(t, 63, FullSync) } -func TestCancel63Fast(t *testing.T) { testCancel(t, 63, FastSync) } -func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) } -func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) } +func TestCancel64Full(t *testing.T) { testCancel(t, 64, FullSync) } +func TestCancel64Fast(t *testing.T) { testCancel(t, 64, FastSync) } + func TestCancel65Full(t *testing.T) { testCancel(t, 65, FullSync) } func TestCancel65Fast(t *testing.T) { testCancel(t, 65, FastSync) } func TestCancel65Light(t *testing.T) { testCancel(t, 65, LightSync) } -func testCancel(t *testing.T, protocol int, mode SyncMode) { +func TestCancel66Full(t *testing.T) { testCancel(t, 66, FullSync) } +func TestCancel66Fast(t *testing.T) { testCancel(t, 66, FastSync) } +func TestCancel66Light(t *testing.T) { testCancel(t, 66, LightSync) } + +func testCancel(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -819,15 +835,18 @@ func testCancel(t *testing.T, protocol int, mode SyncMode) { } // Tests that synchronisation from multiple peers works as intended (multi thread sanity test). -func TestMultiSynchronisation63Full(t *testing.T) { testMultiSynchronisation(t, 63, FullSync) } -func TestMultiSynchronisation63Fast(t *testing.T) { testMultiSynchronisation(t, 63, FastSync) } -func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) } -func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) } +func TestMultiSynchronisation64Full(t *testing.T) { testMultiSynchronisation(t, 64, FullSync) } +func TestMultiSynchronisation64Fast(t *testing.T) { testMultiSynchronisation(t, 64, FastSync) } + func TestMultiSynchronisation65Full(t *testing.T) { testMultiSynchronisation(t, 65, FullSync) } func TestMultiSynchronisation65Fast(t *testing.T) { testMultiSynchronisation(t, 65, FastSync) } func TestMultiSynchronisation65Light(t *testing.T) { testMultiSynchronisation(t, 65, LightSync) } -func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { +func TestMultiSynchronisation66Full(t *testing.T) { testMultiSynchronisation(t, 66, FullSync) } +func TestMultiSynchronisation66Fast(t *testing.T) { testMultiSynchronisation(t, 66, FastSync) } +func TestMultiSynchronisation66Light(t *testing.T) { testMultiSynchronisation(t, 66, LightSync) } + +func testMultiSynchronisation(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -849,15 +868,18 @@ func testMultiSynchronisation(t *testing.T, protocol int, mode SyncMode) { // Tests that synchronisations behave well in multi-version protocol environments // and not wreak havoc on other nodes in the network. -func TestMultiProtoSynchronisation63Full(t *testing.T) { testMultiProtoSync(t, 63, FullSync) } -func TestMultiProtoSynchronisation63Fast(t *testing.T) { testMultiProtoSync(t, 63, FastSync) } -func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) } -func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) } +func TestMultiProtoSynchronisation64Full(t *testing.T) { testMultiProtoSync(t, 64, FullSync) } +func TestMultiProtoSynchronisation64Fast(t *testing.T) { testMultiProtoSync(t, 64, FastSync) } + func TestMultiProtoSynchronisation65Full(t *testing.T) { testMultiProtoSync(t, 65, FullSync) } func TestMultiProtoSynchronisation65Fast(t *testing.T) { testMultiProtoSync(t, 65, FastSync) } func TestMultiProtoSynchronisation65Light(t *testing.T) { testMultiProtoSync(t, 65, LightSync) } -func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { +func TestMultiProtoSynchronisation66Full(t *testing.T) { testMultiProtoSync(t, 66, FullSync) } +func TestMultiProtoSynchronisation66Fast(t *testing.T) { testMultiProtoSync(t, 66, FastSync) } +func TestMultiProtoSynchronisation66Light(t *testing.T) { testMultiProtoSync(t, 66, LightSync) } + +func testMultiProtoSync(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -867,9 +889,9 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { chain := testChainBase.shorten(blockCacheMaxItems - 15) // Create peers of every type - tester.newPeer("peer 63", 63, chain) tester.newPeer("peer 64", 64, chain) tester.newPeer("peer 65", 65, chain) + tester.newPeer("peer 66", 66, chain) // Synchronise with the requested peer and make sure all blocks were retrieved if err := tester.sync(fmt.Sprintf("peer %d", protocol), nil, mode); err != nil { @@ -878,7 +900,7 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { assertOwnChain(t, tester, chain.len()) // Check that no peers have been dropped off - for _, version := range []int{63, 64, 65} { + for _, version := range []int{64, 65, 66} { peer := fmt.Sprintf("peer %d", version) if _, ok := tester.peers[peer]; !ok { t.Errorf("%s dropped", peer) @@ -888,15 +910,18 @@ func testMultiProtoSync(t *testing.T, protocol int, mode SyncMode) { // Tests that if a block is empty (e.g. header only), no body request should be // made, and instead the header should be assembled into a whole block in itself. -func TestEmptyShortCircuit63Full(t *testing.T) { testEmptyShortCircuit(t, 63, FullSync) } -func TestEmptyShortCircuit63Fast(t *testing.T) { testEmptyShortCircuit(t, 63, FastSync) } -func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) } -func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) } +func TestEmptyShortCircuit64Full(t *testing.T) { testEmptyShortCircuit(t, 64, FullSync) } +func TestEmptyShortCircuit64Fast(t *testing.T) { testEmptyShortCircuit(t, 64, FastSync) } + func TestEmptyShortCircuit65Full(t *testing.T) { testEmptyShortCircuit(t, 65, FullSync) } func TestEmptyShortCircuit65Fast(t *testing.T) { testEmptyShortCircuit(t, 65, FastSync) } func TestEmptyShortCircuit65Light(t *testing.T) { testEmptyShortCircuit(t, 65, LightSync) } -func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { +func TestEmptyShortCircuit66Full(t *testing.T) { testEmptyShortCircuit(t, 66, FullSync) } +func TestEmptyShortCircuit66Fast(t *testing.T) { testEmptyShortCircuit(t, 66, FastSync) } +func TestEmptyShortCircuit66Light(t *testing.T) { testEmptyShortCircuit(t, 66, LightSync) } + +func testEmptyShortCircuit(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -942,15 +967,18 @@ func testEmptyShortCircuit(t *testing.T, protocol int, mode SyncMode) { // Tests that headers are enqueued continuously, preventing malicious nodes from // stalling the downloader by feeding gapped header chains. -func TestMissingHeaderAttack63Full(t *testing.T) { testMissingHeaderAttack(t, 63, FullSync) } -func TestMissingHeaderAttack63Fast(t *testing.T) { testMissingHeaderAttack(t, 63, FastSync) } -func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) } -func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) } +func TestMissingHeaderAttack64Full(t *testing.T) { testMissingHeaderAttack(t, 64, FullSync) } +func TestMissingHeaderAttack64Fast(t *testing.T) { testMissingHeaderAttack(t, 64, FastSync) } + func TestMissingHeaderAttack65Full(t *testing.T) { testMissingHeaderAttack(t, 65, FullSync) } func TestMissingHeaderAttack65Fast(t *testing.T) { testMissingHeaderAttack(t, 65, FastSync) } func TestMissingHeaderAttack65Light(t *testing.T) { testMissingHeaderAttack(t, 65, LightSync) } -func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { +func TestMissingHeaderAttack66Full(t *testing.T) { testMissingHeaderAttack(t, 66, FullSync) } +func TestMissingHeaderAttack66Fast(t *testing.T) { testMissingHeaderAttack(t, 66, FastSync) } +func TestMissingHeaderAttack66Light(t *testing.T) { testMissingHeaderAttack(t, 66, LightSync) } + +func testMissingHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -974,15 +1002,18 @@ func testMissingHeaderAttack(t *testing.T, protocol int, mode SyncMode) { // Tests that if requested headers are shifted (i.e. first is missing), the queue // detects the invalid numbering. -func TestShiftedHeaderAttack63Full(t *testing.T) { testShiftedHeaderAttack(t, 63, FullSync) } -func TestShiftedHeaderAttack63Fast(t *testing.T) { testShiftedHeaderAttack(t, 63, FastSync) } -func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) } -func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) } +func TestShiftedHeaderAttack64Full(t *testing.T) { testShiftedHeaderAttack(t, 64, FullSync) } +func TestShiftedHeaderAttack64Fast(t *testing.T) { testShiftedHeaderAttack(t, 64, FastSync) } + func TestShiftedHeaderAttack65Full(t *testing.T) { testShiftedHeaderAttack(t, 65, FullSync) } func TestShiftedHeaderAttack65Fast(t *testing.T) { testShiftedHeaderAttack(t, 65, FastSync) } func TestShiftedHeaderAttack65Light(t *testing.T) { testShiftedHeaderAttack(t, 65, LightSync) } -func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { +func TestShiftedHeaderAttack66Full(t *testing.T) { testShiftedHeaderAttack(t, 66, FullSync) } +func TestShiftedHeaderAttack66Fast(t *testing.T) { testShiftedHeaderAttack(t, 66, FastSync) } +func TestShiftedHeaderAttack66Light(t *testing.T) { testShiftedHeaderAttack(t, 66, LightSync) } + +func testShiftedHeaderAttack(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -1011,11 +1042,11 @@ func testShiftedHeaderAttack(t *testing.T, protocol int, mode SyncMode) { // Tests that upon detecting an invalid header, the recent ones are rolled back // for various failure scenarios. Afterwards a full sync is attempted to make // sure no state was corrupted. -func TestInvalidHeaderRollback63Fast(t *testing.T) { testInvalidHeaderRollback(t, 63, FastSync) } func TestInvalidHeaderRollback64Fast(t *testing.T) { testInvalidHeaderRollback(t, 64, FastSync) } func TestInvalidHeaderRollback65Fast(t *testing.T) { testInvalidHeaderRollback(t, 65, FastSync) } +func TestInvalidHeaderRollback66Fast(t *testing.T) { testInvalidHeaderRollback(t, 66, FastSync) } -func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { +func testInvalidHeaderRollback(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -1103,15 +1134,18 @@ func testInvalidHeaderRollback(t *testing.T, protocol int, mode SyncMode) { // Tests that a peer advertising a high TD doesn't get to stall the downloader // afterwards by not sending any useful hashes. -func TestHighTDStarvationAttack63Full(t *testing.T) { testHighTDStarvationAttack(t, 63, FullSync) } -func TestHighTDStarvationAttack63Fast(t *testing.T) { testHighTDStarvationAttack(t, 63, FastSync) } -func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) } -func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) } +func TestHighTDStarvationAttack64Full(t *testing.T) { testHighTDStarvationAttack(t, 64, FullSync) } +func TestHighTDStarvationAttack64Fast(t *testing.T) { testHighTDStarvationAttack(t, 64, FastSync) } + func TestHighTDStarvationAttack65Full(t *testing.T) { testHighTDStarvationAttack(t, 65, FullSync) } func TestHighTDStarvationAttack65Fast(t *testing.T) { testHighTDStarvationAttack(t, 65, FastSync) } func TestHighTDStarvationAttack65Light(t *testing.T) { testHighTDStarvationAttack(t, 65, LightSync) } -func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { +func TestHighTDStarvationAttack66Full(t *testing.T) { testHighTDStarvationAttack(t, 66, FullSync) } +func TestHighTDStarvationAttack66Fast(t *testing.T) { testHighTDStarvationAttack(t, 66, FastSync) } +func TestHighTDStarvationAttack66Light(t *testing.T) { testHighTDStarvationAttack(t, 66, LightSync) } + +func testHighTDStarvationAttack(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -1125,11 +1159,11 @@ func testHighTDStarvationAttack(t *testing.T, protocol int, mode SyncMode) { } // Tests that misbehaving peers are disconnected, whilst behaving ones are not. -func TestBlockHeaderAttackerDropping63(t *testing.T) { testBlockHeaderAttackerDropping(t, 63) } func TestBlockHeaderAttackerDropping64(t *testing.T) { testBlockHeaderAttackerDropping(t, 64) } func TestBlockHeaderAttackerDropping65(t *testing.T) { testBlockHeaderAttackerDropping(t, 65) } +func TestBlockHeaderAttackerDropping66(t *testing.T) { testBlockHeaderAttackerDropping(t, 66) } -func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { +func testBlockHeaderAttackerDropping(t *testing.T, protocol uint) { t.Parallel() // Define the disconnection requirement for individual hash fetch errors @@ -1179,15 +1213,18 @@ func testBlockHeaderAttackerDropping(t *testing.T, protocol int) { // Tests that synchronisation progress (origin block number, current block number // and highest block number) is tracked and updated correctly. -func TestSyncProgress63Full(t *testing.T) { testSyncProgress(t, 63, FullSync) } -func TestSyncProgress63Fast(t *testing.T) { testSyncProgress(t, 63, FastSync) } -func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) } -func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) } +func TestSyncProgress64Full(t *testing.T) { testSyncProgress(t, 64, FullSync) } +func TestSyncProgress64Fast(t *testing.T) { testSyncProgress(t, 64, FastSync) } + func TestSyncProgress65Full(t *testing.T) { testSyncProgress(t, 65, FullSync) } func TestSyncProgress65Fast(t *testing.T) { testSyncProgress(t, 65, FastSync) } func TestSyncProgress65Light(t *testing.T) { testSyncProgress(t, 65, LightSync) } -func testSyncProgress(t *testing.T, protocol int, mode SyncMode) { +func TestSyncProgress66Full(t *testing.T) { testSyncProgress(t, 66, FullSync) } +func TestSyncProgress66Fast(t *testing.T) { testSyncProgress(t, 66, FastSync) } +func TestSyncProgress66Light(t *testing.T) { testSyncProgress(t, 66, LightSync) } + +func testSyncProgress(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -1263,21 +1300,24 @@ func checkProgress(t *testing.T, d *Downloader, stage string, want ethereum.Sync // Tests that synchronisation progress (origin block number and highest block // number) is tracked and updated correctly in case of a fork (or manual head // revertal). -func TestForkedSyncProgress63Full(t *testing.T) { testForkedSyncProgress(t, 63, FullSync) } -func TestForkedSyncProgress63Fast(t *testing.T) { testForkedSyncProgress(t, 63, FastSync) } -func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) } -func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) } +func TestForkedSyncProgress64Full(t *testing.T) { testForkedSyncProgress(t, 64, FullSync) } +func TestForkedSyncProgress64Fast(t *testing.T) { testForkedSyncProgress(t, 64, FastSync) } + func TestForkedSyncProgress65Full(t *testing.T) { testForkedSyncProgress(t, 65, FullSync) } func TestForkedSyncProgress65Fast(t *testing.T) { testForkedSyncProgress(t, 65, FastSync) } func TestForkedSyncProgress65Light(t *testing.T) { testForkedSyncProgress(t, 65, LightSync) } -func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) { +func TestForkedSyncProgress66Full(t *testing.T) { testForkedSyncProgress(t, 66, FullSync) } +func TestForkedSyncProgress66Fast(t *testing.T) { testForkedSyncProgress(t, 66, FastSync) } +func TestForkedSyncProgress66Light(t *testing.T) { testForkedSyncProgress(t, 66, LightSync) } + +func testForkedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() defer tester.terminate() - chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHashFetch) - chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHashFetch) + chainA := testChainForkLightA.shorten(testChainBase.len() + MaxHeaderFetch) + chainB := testChainForkLightB.shorten(testChainBase.len() + MaxHeaderFetch) // Set a sync init hook to catch progress changes starting := make(chan struct{}) @@ -1339,15 +1379,18 @@ func testForkedSyncProgress(t *testing.T, protocol int, mode SyncMode) { // Tests that if synchronisation is aborted due to some failure, then the progress // origin is not updated in the next sync cycle, as it should be considered the // continuation of the previous sync and not a new instance. -func TestFailedSyncProgress63Full(t *testing.T) { testFailedSyncProgress(t, 63, FullSync) } -func TestFailedSyncProgress63Fast(t *testing.T) { testFailedSyncProgress(t, 63, FastSync) } -func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) } -func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) } +func TestFailedSyncProgress64Full(t *testing.T) { testFailedSyncProgress(t, 64, FullSync) } +func TestFailedSyncProgress64Fast(t *testing.T) { testFailedSyncProgress(t, 64, FastSync) } + func TestFailedSyncProgress65Full(t *testing.T) { testFailedSyncProgress(t, 65, FullSync) } func TestFailedSyncProgress65Fast(t *testing.T) { testFailedSyncProgress(t, 65, FastSync) } func TestFailedSyncProgress65Light(t *testing.T) { testFailedSyncProgress(t, 65, LightSync) } -func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) { +func TestFailedSyncProgress66Full(t *testing.T) { testFailedSyncProgress(t, 66, FullSync) } +func TestFailedSyncProgress66Fast(t *testing.T) { testFailedSyncProgress(t, 66, FastSync) } +func TestFailedSyncProgress66Light(t *testing.T) { testFailedSyncProgress(t, 66, LightSync) } + +func testFailedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -1412,15 +1455,18 @@ func testFailedSyncProgress(t *testing.T, protocol int, mode SyncMode) { // Tests that if an attacker fakes a chain height, after the attack is detected, // the progress height is successfully reduced at the next sync invocation. -func TestFakedSyncProgress63Full(t *testing.T) { testFakedSyncProgress(t, 63, FullSync) } -func TestFakedSyncProgress63Fast(t *testing.T) { testFakedSyncProgress(t, 63, FastSync) } -func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) } -func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) } +func TestFakedSyncProgress64Full(t *testing.T) { testFakedSyncProgress(t, 64, FullSync) } +func TestFakedSyncProgress64Fast(t *testing.T) { testFakedSyncProgress(t, 64, FastSync) } + func TestFakedSyncProgress65Full(t *testing.T) { testFakedSyncProgress(t, 65, FullSync) } func TestFakedSyncProgress65Fast(t *testing.T) { testFakedSyncProgress(t, 65, FastSync) } func TestFakedSyncProgress65Light(t *testing.T) { testFakedSyncProgress(t, 65, LightSync) } -func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) { +func TestFakedSyncProgress66Full(t *testing.T) { testFakedSyncProgress(t, 66, FullSync) } +func TestFakedSyncProgress66Fast(t *testing.T) { testFakedSyncProgress(t, 66, FastSync) } +func TestFakedSyncProgress66Light(t *testing.T) { testFakedSyncProgress(t, 66, LightSync) } + +func testFakedSyncProgress(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() tester := newTester() @@ -1489,31 +1535,20 @@ func testFakedSyncProgress(t *testing.T, protocol int, mode SyncMode) { // This test reproduces an issue where unexpected deliveries would // block indefinitely if they arrived at the right time. -func TestDeliverHeadersHang(t *testing.T) { - t.Parallel() +func TestDeliverHeadersHang64Full(t *testing.T) { testDeliverHeadersHang(t, 64, FullSync) } +func TestDeliverHeadersHang64Fast(t *testing.T) { testDeliverHeadersHang(t, 64, FastSync) } - testCases := []struct { - protocol int - syncMode SyncMode - }{ - {63, FullSync}, - {63, FastSync}, - {64, FullSync}, - {64, FastSync}, - {64, LightSync}, - {65, FullSync}, - {65, FastSync}, - {65, LightSync}, - } - for _, tc := range testCases { - t.Run(fmt.Sprintf("protocol %d mode %v", tc.protocol, tc.syncMode), func(t *testing.T) { - t.Parallel() - testDeliverHeadersHang(t, tc.protocol, tc.syncMode) - }) - } -} +func TestDeliverHeadersHang65Full(t *testing.T) { testDeliverHeadersHang(t, 65, FullSync) } +func TestDeliverHeadersHang65Fast(t *testing.T) { testDeliverHeadersHang(t, 65, FastSync) } +func TestDeliverHeadersHang65Light(t *testing.T) { testDeliverHeadersHang(t, 65, LightSync) } + +func TestDeliverHeadersHang66Full(t *testing.T) { testDeliverHeadersHang(t, 66, FullSync) } +func TestDeliverHeadersHang66Fast(t *testing.T) { testDeliverHeadersHang(t, 66, FastSync) } +func TestDeliverHeadersHang66Light(t *testing.T) { testDeliverHeadersHang(t, 66, LightSync) } + +func testDeliverHeadersHang(t *testing.T, protocol uint, mode SyncMode) { + t.Parallel() -func testDeliverHeadersHang(t *testing.T, protocol int, mode SyncMode) { master := newTester() defer master.terminate() chain := testChainBase.shorten(15) @@ -1664,15 +1699,18 @@ func TestRemoteHeaderRequestSpan(t *testing.T) { // Tests that peers below a pre-configured checkpoint block are prevented from // being fast-synced from, avoiding potential cheap eclipse attacks. -func TestCheckpointEnforcement63Full(t *testing.T) { testCheckpointEnforcement(t, 63, FullSync) } -func TestCheckpointEnforcement63Fast(t *testing.T) { testCheckpointEnforcement(t, 63, FastSync) } -func TestCheckpointEnforcement64Full(t *testing.T) { testCheckpointEnforcement(t, 64, FullSync) } -func TestCheckpointEnforcement64Fast(t *testing.T) { testCheckpointEnforcement(t, 64, FastSync) } +func TestCheckpointEnforcement64Full(t *testing.T) { testCheckpointEnforcement(t, 64, FullSync) } +func TestCheckpointEnforcement64Fast(t *testing.T) { testCheckpointEnforcement(t, 64, FastSync) } + func TestCheckpointEnforcement65Full(t *testing.T) { testCheckpointEnforcement(t, 65, FullSync) } func TestCheckpointEnforcement65Fast(t *testing.T) { testCheckpointEnforcement(t, 65, FastSync) } func TestCheckpointEnforcement65Light(t *testing.T) { testCheckpointEnforcement(t, 65, LightSync) } -func testCheckpointEnforcement(t *testing.T, protocol int, mode SyncMode) { +func TestCheckpointEnforcement66Full(t *testing.T) { testCheckpointEnforcement(t, 66, FullSync) } +func TestCheckpointEnforcement66Fast(t *testing.T) { testCheckpointEnforcement(t, 66, FastSync) } +func TestCheckpointEnforcement66Light(t *testing.T) { testCheckpointEnforcement(t, 66, LightSync) } + +func testCheckpointEnforcement(t *testing.T, protocol uint, mode SyncMode) { t.Parallel() // Create a new tester with a particular hard coded checkpoint block diff --git a/eth/downloader/modes.go b/eth/downloader/modes.go index d866ceabce..3ea14d22d7 100644 --- a/eth/downloader/modes.go +++ b/eth/downloader/modes.go @@ -24,7 +24,8 @@ type SyncMode uint32 const ( FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks - FastSync // Quickly download the headers, full sync only at the chain head + FastSync // Quickly download the headers, full sync only at the chain + SnapSync // Download the chain and the state via compact snapshots LightSync // Download only the headers and terminate afterwards ) @@ -39,6 +40,8 @@ func (mode SyncMode) String() string { return "full" case FastSync: return "fast" + case SnapSync: + return "snap" case LightSync: return "light" default: @@ -52,6 +55,8 @@ func (mode SyncMode) MarshalText() ([]byte, error) { return []byte("full"), nil case FastSync: return []byte("fast"), nil + case SnapSync: + return []byte("snap"), nil case LightSync: return []byte("light"), nil default: @@ -65,6 +70,8 @@ func (mode *SyncMode) UnmarshalText(text []byte) error { *mode = FullSync case "fast": *mode = FastSync + case "snap": + *mode = SnapSync case "light": *mode = LightSync default: diff --git a/eth/downloader/peer.go b/eth/downloader/peer.go index c6671436f9..7852569d8e 100644 --- a/eth/downloader/peer.go +++ b/eth/downloader/peer.go @@ -29,6 +29,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" ) @@ -69,7 +70,7 @@ type peerConnection struct { peer Peer - version int // Eth protocol version number to switch strategies + version uint // Eth protocol version number to switch strategies log log.Logger // Contextual logger to add extra infos to peer logs lock sync.RWMutex } @@ -112,7 +113,7 @@ func (w *lightPeerWrapper) RequestNodeData([]common.Hash) error { } // newPeerConnection creates a new downloader peer. -func newPeerConnection(id string, version int, peer Peer, logger log.Logger) *peerConnection { +func newPeerConnection(id string, version uint, peer Peer, logger log.Logger) *peerConnection { return &peerConnection{ id: id, lacking: make(map[common.Hash]struct{}), @@ -457,7 +458,7 @@ func (ps *peerSet) HeaderIdlePeers() ([]*peerConnection, int) { defer p.lock.RUnlock() return p.headerThroughput } - return ps.idlePeers(63, 65, idle, throughput) + return ps.idlePeers(eth.ETH64, eth.ETH66, idle, throughput) } // BodyIdlePeers retrieves a flat list of all the currently body-idle peers within @@ -471,7 +472,7 @@ func (ps *peerSet) BodyIdlePeers() ([]*peerConnection, int) { defer p.lock.RUnlock() return p.blockThroughput } - return ps.idlePeers(63, 65, idle, throughput) + return ps.idlePeers(eth.ETH64, eth.ETH66, idle, throughput) } // ReceiptIdlePeers retrieves a flat list of all the currently receipt-idle peers @@ -485,7 +486,7 @@ func (ps *peerSet) ReceiptIdlePeers() ([]*peerConnection, int) { defer p.lock.RUnlock() return p.receiptThroughput } - return ps.idlePeers(63, 65, idle, throughput) + return ps.idlePeers(eth.ETH64, eth.ETH66, idle, throughput) } // NodeDataIdlePeers retrieves a flat list of all the currently node-data-idle @@ -499,13 +500,13 @@ func (ps *peerSet) NodeDataIdlePeers() ([]*peerConnection, int) { defer p.lock.RUnlock() return p.stateThroughput } - return ps.idlePeers(63, 65, idle, throughput) + return ps.idlePeers(eth.ETH64, eth.ETH66, idle, throughput) } // idlePeers retrieves a flat list of all currently idle peers satisfying the // protocol version constraints, using the provided function to check idleness. // The resulting set of peers are sorted by their measure throughput. -func (ps *peerSet) idlePeers(minProtocol, maxProtocol int, idleCheck func(*peerConnection) bool, throughput func(*peerConnection) float64) ([]*peerConnection, int) { +func (ps *peerSet) idlePeers(minProtocol, maxProtocol uint, idleCheck func(*peerConnection) bool, throughput func(*peerConnection) float64) ([]*peerConnection, int) { ps.lock.RLock() defer ps.lock.RUnlock() diff --git a/eth/downloader/queue.go b/eth/downloader/queue.go index d2ec8ba694..ac7edc2c68 100644 --- a/eth/downloader/queue.go +++ b/eth/downloader/queue.go @@ -113,24 +113,24 @@ type queue struct { mode SyncMode // Synchronisation mode to decide on the block parts to schedule for fetching // Headers are "special", they download in batches, supported by a skeleton chain - headerHead common.Hash // [eth/62] Hash of the last queued header to verify order - headerTaskPool map[uint64]*types.Header // [eth/62] Pending header retrieval tasks, mapping starting indexes to skeleton headers - headerTaskQueue *prque.Prque // [eth/62] Priority queue of the skeleton indexes to fetch the filling headers for - headerPeerMiss map[string]map[uint64]struct{} // [eth/62] Set of per-peer header batches known to be unavailable - headerPendPool map[string]*fetchRequest // [eth/62] Currently pending header retrieval operations - headerResults []*types.Header // [eth/62] Result cache accumulating the completed headers - headerProced int // [eth/62] Number of headers already processed from the results - headerOffset uint64 // [eth/62] Number of the first header in the result cache - headerContCh chan bool // [eth/62] Channel to notify when header download finishes + headerHead common.Hash // Hash of the last queued header to verify order + headerTaskPool map[uint64]*types.Header // Pending header retrieval tasks, mapping starting indexes to skeleton headers + headerTaskQueue *prque.Prque // Priority queue of the skeleton indexes to fetch the filling headers for + headerPeerMiss map[string]map[uint64]struct{} // Set of per-peer header batches known to be unavailable + headerPendPool map[string]*fetchRequest // Currently pending header retrieval operations + headerResults []*types.Header // Result cache accumulating the completed headers + headerProced int // Number of headers already processed from the results + headerOffset uint64 // Number of the first header in the result cache + headerContCh chan bool // Channel to notify when header download finishes // All data retrievals below are based on an already assembles header chain - blockTaskPool map[common.Hash]*types.Header // [eth/62] Pending block (body) retrieval tasks, mapping hashes to headers - blockTaskQueue *prque.Prque // [eth/62] Priority queue of the headers to fetch the blocks (bodies) for - blockPendPool map[string]*fetchRequest // [eth/62] Currently pending block (body) retrieval operations + blockTaskPool map[common.Hash]*types.Header // Pending block (body) retrieval tasks, mapping hashes to headers + blockTaskQueue *prque.Prque // Priority queue of the headers to fetch the blocks (bodies) for + blockPendPool map[string]*fetchRequest // Currently pending block (body) retrieval operations - receiptTaskPool map[common.Hash]*types.Header // [eth/63] Pending receipt retrieval tasks, mapping hashes to headers - receiptTaskQueue *prque.Prque // [eth/63] Priority queue of the headers to fetch the receipts for - receiptPendPool map[string]*fetchRequest // [eth/63] Currently pending receipt retrieval operations + receiptTaskPool map[common.Hash]*types.Header // Pending receipt retrieval tasks, mapping hashes to headers + receiptTaskQueue *prque.Prque // Priority queue of the headers to fetch the receipts for + receiptPendPool map[string]*fetchRequest // Currently pending receipt retrieval operations resultCache *resultStore // Downloaded but not yet delivered fetch results resultSize common.StorageSize // Approximate size of a block (exponential moving average) @@ -690,6 +690,13 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh q.lock.Lock() defer q.lock.Unlock() + var logger log.Logger + if len(id) < 16 { + // Tests use short IDs, don't choke on them + logger = log.New("peer", id) + } else { + logger = log.New("peer", id[:16]) + } // Short circuit if the data was never requested request := q.headerPendPool[id] if request == nil { @@ -704,10 +711,10 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh accepted := len(headers) == MaxHeaderFetch if accepted { if headers[0].Number.Uint64() != request.From { - log.Trace("First header broke chain ordering", "peer", id, "number", headers[0].Number, "hash", headers[0].Hash(), request.From) + logger.Trace("First header broke chain ordering", "number", headers[0].Number, "hash", headers[0].Hash(), "expected", request.From) accepted = false } else if headers[len(headers)-1].Hash() != target { - log.Trace("Last header broke skeleton structure ", "peer", id, "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target) + logger.Trace("Last header broke skeleton structure ", "number", headers[len(headers)-1].Number, "hash", headers[len(headers)-1].Hash(), "expected", target) accepted = false } } @@ -716,12 +723,12 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh for i, header := range headers[1:] { hash := header.Hash() if want := request.From + 1 + uint64(i); header.Number.Uint64() != want { - log.Warn("Header broke chain ordering", "peer", id, "number", header.Number, "hash", hash, "expected", want) + logger.Warn("Header broke chain ordering", "number", header.Number, "hash", hash, "expected", want) accepted = false break } if parentHash != header.ParentHash { - log.Warn("Header broke chain ancestry", "peer", id, "number", header.Number, "hash", hash) + logger.Warn("Header broke chain ancestry", "number", header.Number, "hash", hash) accepted = false break } @@ -731,7 +738,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh } // If the batch of headers wasn't accepted, mark as unavailable if !accepted { - log.Trace("Skeleton filling not accepted", "peer", id, "from", request.From) + logger.Trace("Skeleton filling not accepted", "from", request.From) miss := q.headerPeerMiss[id] if miss == nil { @@ -758,7 +765,7 @@ func (q *queue) DeliverHeaders(id string, headers []*types.Header, headerProcCh select { case headerProcCh <- process: - log.Trace("Pre-scheduled new headers", "peer", id, "count", len(process), "from", process[0].Number) + logger.Trace("Pre-scheduled new headers", "count", len(process), "from", process[0].Number) q.headerProced += len(process) default: } @@ -886,9 +893,6 @@ func (q *queue) deliver(id string, taskPool map[common.Hash]*types.Header, return accepted, nil } // If none of the data was good, it's a stale delivery - if errors.Is(failure, errInvalidChain) { - return accepted, failure - } if accepted > 0 { return accepted, fmt.Errorf("partial failure: %v", failure) } diff --git a/eth/downloader/queue_test.go b/eth/downloader/queue_test.go index aedfba4565..f43ad67a41 100644 --- a/eth/downloader/queue_test.go +++ b/eth/downloader/queue_test.go @@ -97,6 +97,9 @@ func dummyPeer(id string) *peerConnection { } func TestBasics(t *testing.T) { + numOfBlocks := len(emptyChain.blocks) + numOfReceipts := len(emptyChain.blocks) / 2 + q := newQueue(10, 10) if !q.Idle() { t.Errorf("new queue should be idle") @@ -135,6 +138,12 @@ func TestBasics(t *testing.T) { t.Fatalf("expected header %d, got %d", exp, got) } } + if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { + t.Errorf("expected block task queue to be %d, got %d", exp, got) + } + if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got { + t.Errorf("expected receipt task queue to be %d, got %d", exp, got) + } { peer := dummyPeer("peer-2") fetchReq, _, throttle := q.ReserveBodies(peer, 50) @@ -148,8 +157,12 @@ func TestBasics(t *testing.T) { t.Fatalf("should have no fetches, got %d", len(fetchReq.Headers)) } } - //fmt.Printf("blockTaskQueue len: %d\n", q.blockTaskQueue.Size()) - //fmt.Printf("receiptTaskQueue len: %d\n", q.receiptTaskQueue.Size()) + if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { + t.Errorf("expected block task queue to be %d, got %d", exp, got) + } + if exp, got := q.receiptTaskQueue.Size(), numOfReceipts; exp != got { + t.Errorf("expected receipt task queue to be %d, got %d", exp, got) + } { // The receipt delivering peer should not be affected // by the throttling of body deliveries @@ -168,12 +181,20 @@ func TestBasics(t *testing.T) { } } - //fmt.Printf("blockTaskQueue len: %d\n", q.blockTaskQueue.Size()) - //fmt.Printf("receiptTaskQueue len: %d\n", q.receiptTaskQueue.Size()) - //fmt.Printf("processable: %d\n", q.resultCache.countCompleted()) + if exp, got := q.blockTaskQueue.Size(), numOfBlocks-10; exp != got { + t.Errorf("expected block task queue to be %d, got %d", exp, got) + } + if exp, got := q.receiptTaskQueue.Size(), numOfReceipts-5; exp != got { + t.Errorf("expected receipt task queue to be %d, got %d", exp, got) + } + if got, exp := q.resultCache.countCompleted(), 0; got != exp { + t.Errorf("wrong processable count, got %d, exp %d", got, exp) + } } func TestEmptyBlocks(t *testing.T) { + numOfBlocks := len(emptyChain.blocks) + q := newQueue(10, 10) q.Prepare(1, FastSync) @@ -208,13 +229,12 @@ func TestEmptyBlocks(t *testing.T) { } } - if q.blockTaskQueue.Size() != len(emptyChain.blocks)-10 { - t.Errorf("expected block task queue to be 0, got %d", q.blockTaskQueue.Size()) + if q.blockTaskQueue.Size() != numOfBlocks-10 { + t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) } if q.receiptTaskQueue.Size() != 0 { - t.Errorf("expected receipt task queue to be 0, got %d", q.receiptTaskQueue.Size()) + t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) } - //fmt.Printf("receiptTaskQueue len: %d\n", q.receiptTaskQueue.Size()) { peer := dummyPeer("peer-3") fetchReq, _, _ := q.ReserveReceipts(peer, 50) @@ -224,6 +244,12 @@ func TestEmptyBlocks(t *testing.T) { t.Fatal("there should be no body fetch tasks remaining") } } + if q.blockTaskQueue.Size() != numOfBlocks-10 { + t.Errorf("expected block task queue to be %d, got %d", numOfBlocks-10, q.blockTaskQueue.Size()) + } + if q.receiptTaskQueue.Size() != 0 { + t.Errorf("expected receipt task queue to be %d, got %d", 0, q.receiptTaskQueue.Size()) + } if got, exp := q.resultCache.countCompleted(), 10; got != exp { t.Errorf("wrong processable count, got %d, exp %d", got, exp) } diff --git a/eth/downloader/statesync.go b/eth/downloader/statesync.go index 6745aa54ac..6231588ad2 100644 --- a/eth/downloader/statesync.go +++ b/eth/downloader/statesync.go @@ -18,13 +18,13 @@ package downloader import ( "fmt" - "hash" "sync" "time" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/trie" @@ -101,8 +101,16 @@ func (d *Downloader) runStateSync(s *stateSync) *stateSync { finished []*stateReq // Completed or failed requests timeout = make(chan *stateReq) // Timed out active requests ) - // Run the state sync. log.Trace("State sync starting", "root", s.root) + + defer func() { + // Cancel active request timers on exit. Also set peers to idle so they're + // available for the next sync. + for _, req := range active { + req.timer.Stop() + req.peer.SetNodeDataIdle(int(req.nItems), time.Now()) + } + }() go s.run() defer s.Cancel() @@ -252,8 +260,9 @@ func (d *Downloader) spindownStateSync(active map[string]*stateReq, finished []* type stateSync struct { d *Downloader // Downloader instance to access and manage current peerset - sched *trie.Sync // State trie sync scheduler defining the tasks - keccak hash.Hash // Keccak256 hasher to verify deliveries with + root common.Hash // State root currently being synced + sched *trie.Sync // State trie sync scheduler defining the tasks + keccak crypto.KeccakState // Keccak256 hasher to verify deliveries with trieTasks map[common.Hash]*trieTask // Set of trie node tasks currently queued for retrieval codeTasks map[common.Hash]*codeTask // Set of byte code tasks currently queued for retrieval @@ -268,8 +277,6 @@ type stateSync struct { cancelOnce sync.Once // Ensures cancel only ever gets called once done chan struct{} // Channel to signal termination completion err error // Any error hit during sync (set before completion) - - root common.Hash } // trieTask represents a single trie node download task, containing a set of @@ -290,15 +297,15 @@ type codeTask struct { func newStateSync(d *Downloader, root common.Hash) *stateSync { return &stateSync{ d: d, + root: root, sched: state.NewStateSync(root, d.stateDB, d.stateBloom), - keccak: sha3.NewLegacyKeccak256(), + keccak: sha3.NewLegacyKeccak256().(crypto.KeccakState), trieTasks: make(map[common.Hash]*trieTask), codeTasks: make(map[common.Hash]*codeTask), deliver: make(chan *stateReq), cancel: make(chan struct{}), done: make(chan struct{}), started: make(chan struct{}), - root: root, } } @@ -306,7 +313,12 @@ func newStateSync(d *Downloader, root common.Hash) *stateSync { // it finishes, and finally notifying any goroutines waiting for the loop to // finish. func (s *stateSync) run() { - s.err = s.loop() + close(s.started) + if s.d.snapSync { + s.err = s.d.SnapSyncer.Sync(s.root, s.cancel) + } else { + s.err = s.loop() + } close(s.done) } @@ -318,7 +330,9 @@ func (s *stateSync) Wait() error { // Cancel cancels the sync and waits until it has shut down. func (s *stateSync) Cancel() error { - s.cancelOnce.Do(func() { close(s.cancel) }) + s.cancelOnce.Do(func() { + close(s.cancel) + }) return s.Wait() } @@ -329,7 +343,6 @@ func (s *stateSync) Cancel() error { // pushed here async. The reason is to decouple processing from data receipt // and timeouts. func (s *stateSync) loop() (err error) { - close(s.started) // Listen for new peer events to assign tasks to them newPeer := make(chan *peerConnection, 1024) peerSub := s.d.peers.SubscribeNewPeers(newPeer) @@ -577,7 +590,7 @@ func (s *stateSync) processNodeData(blob []byte) (common.Hash, error) { res := trie.SyncResult{Data: blob} s.keccak.Reset() s.keccak.Write(blob) - s.keccak.Sum(res.Hash[:0]) + s.keccak.Read(res.Hash[:]) err := s.sched.Process(res) return res.Hash, err } diff --git a/eth/config.go b/eth/ethconfig/config.go similarity index 62% rename from eth/config.go rename to eth/ethconfig/config.go index 0d99c2a3f1..5d0eece067 100644 --- a/eth/config.go +++ b/eth/ethconfig/config.go @@ -14,7 +14,8 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package eth +// Package ethconfig contains the configuration of the ETH and LES protocols. +package ethconfig import ( "math/big" @@ -25,30 +26,35 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/clique" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/gasprice" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/miner" + "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" ) -// DefaultFullGPOConfig contains default gasprice oracle settings for full node. -var DefaultFullGPOConfig = gasprice.Config{ +// FullNodeGPO contains default gasprice oracle settings for full node. +var FullNodeGPO = gasprice.Config{ Blocks: 20, Percentile: 60, MaxPrice: gasprice.DefaultMaxPrice, } -// DefaultLightGPOConfig contains default gasprice oracle settings for light client. -var DefaultLightGPOConfig = gasprice.Config{ +// LightClientGPO contains default gasprice oracle settings for light client. +var LightClientGPO = gasprice.Config{ Blocks: 2, Percentile: 60, MaxPrice: gasprice.DefaultMaxPrice, } -// DefaultConfig contains default settings for use on the Ethereum main net. -var DefaultConfig = Config{ +// Defaults contains default settings for use on the Ethereum main net. +var Defaults = Config{ SyncMode: downloader.FastSync, Ethash: ethash.Config{ CacheDir: "ethash", @@ -60,6 +66,7 @@ var DefaultConfig = Config{ DatasetsLockMmap: false, }, NetworkId: 1, + TxLookupLimit: 2350000, LightPeers: 100, UltraLightFraction: 75, DatabaseCache: 512, @@ -77,7 +84,7 @@ var DefaultConfig = Config{ }, TxPool: core.DefaultTxPoolConfig, RPCGasCap: 25000000, - GPO: DefaultFullGPOConfig, + GPO: FullNodeGPO, RPCTxFeeCap: 1, // 1 ether } @@ -89,21 +96,22 @@ func init() { } } if runtime.GOOS == "darwin" { - DefaultConfig.Ethash.DatasetDir = filepath.Join(home, "Library", "Ethash") + Defaults.Ethash.DatasetDir = filepath.Join(home, "Library", "Ethash") } else if runtime.GOOS == "windows" { localappdata := os.Getenv("LOCALAPPDATA") if localappdata != "" { - DefaultConfig.Ethash.DatasetDir = filepath.Join(localappdata, "Ethash") + Defaults.Ethash.DatasetDir = filepath.Join(localappdata, "Ethash") } else { - DefaultConfig.Ethash.DatasetDir = filepath.Join(home, "AppData", "Local", "Ethash") + Defaults.Ethash.DatasetDir = filepath.Join(home, "AppData", "Local", "Ethash") } } else { - DefaultConfig.Ethash.DatasetDir = filepath.Join(home, ".ethash") + Defaults.Ethash.DatasetDir = filepath.Join(home, ".ethash") } } //go:generate gencodec -type Config -formats toml -out gen_config.go +// Config contains configuration options for of the ETH and LES protocols. type Config struct { // The genesis block, which is inserted if the database is empty. // If nil, the Ethereum main net block is used. @@ -115,7 +123,8 @@ type Config struct { // This can be set to list of enrtree:// URLs which will be queried for // for nodes to connect to. - DiscoveryURLs []string + EthDiscoveryURLs []string + SnapDiscoveryURLs []string NoPruning bool // Whether to disable pruning and flush everything to disk NoPrefetch bool // Whether to disable prefetching and only load state on demand @@ -126,11 +135,13 @@ type Config struct { Whitelist map[uint64]common.Hash `toml:"-"` // Light client options - LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests - LightIngress int `toml:",omitempty"` // Incoming bandwidth limit for light servers - LightEgress int `toml:",omitempty"` // Outgoing bandwidth limit for light servers - LightPeers int `toml:",omitempty"` // Maximum number of LES client peers - LightNoPrune bool `toml:",omitempty"` // Whether to disable light chain pruning + LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests + LightIngress int `toml:",omitempty"` // Incoming bandwidth limit for light servers + LightEgress int `toml:",omitempty"` // Outgoing bandwidth limit for light servers + LightPeers int `toml:",omitempty"` // Maximum number of LES client peers + LightNoPrune bool `toml:",omitempty"` // Whether to disable light chain pruning + LightNoSyncServe bool `toml:",omitempty"` // Whether to serve light clients before syncing + SyncFromCheckpoint bool `toml:",omitempty"` // Whether to sync the header chain from the configured checkpoint // Ultra Light client options UltraLightServers []string `toml:",omitempty"` // List of trusted ultra light servers @@ -149,6 +160,7 @@ type Config struct { TrieDirtyCache int TrieTimeout time.Duration SnapshotCache int + Preimages bool // Mining options Miner miner.Config @@ -186,4 +198,40 @@ type Config struct { // CheckpointOracle is the configuration for checkpoint oracle. CheckpointOracle *params.CheckpointOracleConfig `toml:",omitempty"` + + // Berlin block override (TODO: remove after the fork) + OverrideBerlin *big.Int `toml:",omitempty"` +} + +// CreateConsensusEngine creates a consensus engine for the given chain configuration. +func CreateConsensusEngine(stack *node.Node, chainConfig *params.ChainConfig, config *ethash.Config, notify []string, noverify bool, db ethdb.Database) consensus.Engine { + // If proof-of-authority is requested, set it up + if chainConfig.Clique != nil { + return clique.New(chainConfig.Clique, db) + } + // Otherwise assume proof-of-work + switch config.PowMode { + case ethash.ModeFake: + log.Warn("Ethash used in fake mode") + return ethash.NewFaker() + case ethash.ModeTest: + log.Warn("Ethash used in test mode") + return ethash.NewTester(nil, noverify) + case ethash.ModeShared: + log.Warn("Ethash used in shared mode") + return ethash.NewShared() + default: + engine := ethash.New(ethash.Config{ + CacheDir: stack.ResolvePath(config.CacheDir), + CachesInMem: config.CachesInMem, + CachesOnDisk: config.CachesOnDisk, + CachesLockMmap: config.CachesLockMmap, + DatasetDir: config.DatasetDir, + DatasetsInMem: config.DatasetsInMem, + DatasetsOnDisk: config.DatasetsOnDisk, + DatasetsLockMmap: config.DatasetsLockMmap, + }, notify, noverify) + engine.SetThreads(-1) // Disable CPU mining + return engine + } } diff --git a/eth/gen_config.go b/eth/ethconfig/gen_config.go similarity index 88% rename from eth/gen_config.go rename to eth/ethconfig/gen_config.go index 0093439d14..ca93b2ad00 100644 --- a/eth/gen_config.go +++ b/eth/ethconfig/gen_config.go @@ -1,6 +1,6 @@ // Code generated by github.com/fjl/gencodec. DO NOT EDIT. -package eth +package ethconfig import ( "time" @@ -20,7 +20,8 @@ func (c Config) MarshalTOML() (interface{}, error) { Genesis *core.Genesis `toml:",omitempty"` NetworkId uint64 SyncMode downloader.SyncMode - DiscoveryURLs []string + EthDiscoveryURLs []string + SnapDiscoveryURLs []string NoPruning bool NoPrefetch bool TxLookupLimit uint64 `toml:",omitempty"` @@ -30,6 +31,8 @@ func (c Config) MarshalTOML() (interface{}, error) { LightEgress int `toml:",omitempty"` LightPeers int `toml:",omitempty"` LightNoPrune bool `toml:",omitempty"` + LightNoSyncServe bool `toml:",omitempty"` + SyncFromCheckpoint bool `toml:",omitempty"` UltraLightServers []string `toml:",omitempty"` UltraLightFraction int `toml:",omitempty"` UltraLightOnlyAnnounce bool `toml:",omitempty"` @@ -43,6 +46,7 @@ func (c Config) MarshalTOML() (interface{}, error) { TrieDirtyCache int TrieTimeout time.Duration SnapshotCache int + Preimages bool Miner miner.Config Ethash ethash.Config TxPool core.TxPoolConfig @@ -60,7 +64,8 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.Genesis = c.Genesis enc.NetworkId = c.NetworkId enc.SyncMode = c.SyncMode - enc.DiscoveryURLs = c.DiscoveryURLs + enc.EthDiscoveryURLs = c.EthDiscoveryURLs + enc.SnapDiscoveryURLs = c.SnapDiscoveryURLs enc.NoPruning = c.NoPruning enc.NoPrefetch = c.NoPrefetch enc.TxLookupLimit = c.TxLookupLimit @@ -70,6 +75,8 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.LightEgress = c.LightEgress enc.LightPeers = c.LightPeers enc.LightNoPrune = c.LightNoPrune + enc.LightNoSyncServe = c.LightNoSyncServe + enc.SyncFromCheckpoint = c.SyncFromCheckpoint enc.UltraLightServers = c.UltraLightServers enc.UltraLightFraction = c.UltraLightFraction enc.UltraLightOnlyAnnounce = c.UltraLightOnlyAnnounce @@ -83,6 +90,7 @@ func (c Config) MarshalTOML() (interface{}, error) { enc.TrieDirtyCache = c.TrieDirtyCache enc.TrieTimeout = c.TrieTimeout enc.SnapshotCache = c.SnapshotCache + enc.Preimages = c.Preimages enc.Miner = c.Miner enc.Ethash = c.Ethash enc.TxPool = c.TxPool @@ -104,7 +112,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { Genesis *core.Genesis `toml:",omitempty"` NetworkId *uint64 SyncMode *downloader.SyncMode - DiscoveryURLs []string + EthDiscoveryURLs []string + SnapDiscoveryURLs []string NoPruning *bool NoPrefetch *bool TxLookupLimit *uint64 `toml:",omitempty"` @@ -114,6 +123,8 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { LightEgress *int `toml:",omitempty"` LightPeers *int `toml:",omitempty"` LightNoPrune *bool `toml:",omitempty"` + LightNoSyncServe *bool `toml:",omitempty"` + SyncFromCheckpoint *bool `toml:",omitempty"` UltraLightServers []string `toml:",omitempty"` UltraLightFraction *int `toml:",omitempty"` UltraLightOnlyAnnounce *bool `toml:",omitempty"` @@ -127,6 +138,7 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { TrieDirtyCache *int TrieTimeout *time.Duration SnapshotCache *int + Preimages *bool Miner *miner.Config Ethash *ethash.Config TxPool *core.TxPoolConfig @@ -153,8 +165,11 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.SyncMode != nil { c.SyncMode = *dec.SyncMode } - if dec.DiscoveryURLs != nil { - c.DiscoveryURLs = dec.DiscoveryURLs + if dec.EthDiscoveryURLs != nil { + c.EthDiscoveryURLs = dec.EthDiscoveryURLs + } + if dec.SnapDiscoveryURLs != nil { + c.SnapDiscoveryURLs = dec.SnapDiscoveryURLs } if dec.NoPruning != nil { c.NoPruning = *dec.NoPruning @@ -183,6 +198,12 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.LightNoPrune != nil { c.LightNoPrune = *dec.LightNoPrune } + if dec.LightNoSyncServe != nil { + c.LightNoSyncServe = *dec.LightNoSyncServe + } + if dec.SyncFromCheckpoint != nil { + c.SyncFromCheckpoint = *dec.SyncFromCheckpoint + } if dec.UltraLightServers != nil { c.UltraLightServers = dec.UltraLightServers } @@ -222,6 +243,9 @@ func (c *Config) UnmarshalTOML(unmarshal func(interface{}) error) error { if dec.SnapshotCache != nil { c.SnapshotCache = *dec.SnapshotCache } + if dec.Preimages != nil { + c.Preimages = *dec.Preimages + } if dec.Miner != nil { c.Miner = *dec.Miner } diff --git a/eth/fetcher/block_fetcher.go b/eth/fetcher/block_fetcher.go index 270aaf5918..5ea8a128d9 100644 --- a/eth/fetcher/block_fetcher.go +++ b/eth/fetcher/block_fetcher.go @@ -620,7 +620,7 @@ func (f *BlockFetcher) loop() { continue } if txnHash == (common.Hash{}) { - txnHash = types.DeriveSha(types.Transactions(task.transactions[i]), new(trie.Trie)) + txnHash = types.DeriveSha(types.Transactions(task.transactions[i]), trie.NewStackTrie(nil)) } if txnHash != announce.header.TxHash { continue diff --git a/eth/fetcher/block_fetcher_test.go b/eth/fetcher/block_fetcher_test.go index 3220002a99..a6eef71da0 100644 --- a/eth/fetcher/block_fetcher_test.go +++ b/eth/fetcher/block_fetcher_test.go @@ -39,7 +39,7 @@ var ( testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") testAddress = crypto.PubkeyToAddress(testKey.PublicKey) genesis = core.GenesisBlockForTesting(testdb, testAddress, big.NewInt(1000000000)) - unknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit}, nil, nil, nil, new(trie.Trie)) + unknownBlock = types.NewBlock(&types.Header{GasLimit: params.GenesisGasLimit}, nil, nil, nil, trie.NewStackTrie(nil)) ) // makeChain creates a chain of n blocks starting at and including parent. diff --git a/eth/filters/api.go b/eth/filters/api.go index 30d7b71c31..4b36a5379e 100644 --- a/eth/filters/api.go +++ b/eth/filters/api.go @@ -25,7 +25,7 @@ import ( "sync" "time" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" @@ -34,10 +34,6 @@ import ( "github.com/ethereum/go-ethereum/rpc" ) -var ( - deadline = 5 * time.Minute // consider a filter inactive if it has not been polled for within deadline -) - // filter is a helper struct that holds meta information over the filter type // and associated subscription in the event system. type filter struct { @@ -59,25 +55,28 @@ type PublicFilterAPI struct { events *EventSystem filtersMu sync.Mutex filters map[rpc.ID]*filter + timeout time.Duration } // NewPublicFilterAPI returns a new PublicFilterAPI instance. -func NewPublicFilterAPI(backend Backend, lightMode bool) *PublicFilterAPI { +func NewPublicFilterAPI(backend Backend, lightMode bool, timeout time.Duration) *PublicFilterAPI { api := &PublicFilterAPI{ backend: backend, chainDb: backend.ChainDb(), events: NewEventSystem(backend, lightMode), filters: make(map[rpc.ID]*filter), + timeout: timeout, } - go api.timeoutLoop() + go api.timeoutLoop(timeout) return api } // timeoutLoop runs every 5 minutes and deletes filters that have not been recently used. // Tt is started when the api is created. -func (api *PublicFilterAPI) timeoutLoop() { - ticker := time.NewTicker(5 * time.Minute) +func (api *PublicFilterAPI) timeoutLoop(timeout time.Duration) { + var toUninstall []*Subscription + ticker := time.NewTicker(timeout) defer ticker.Stop() for { <-ticker.C @@ -85,13 +84,21 @@ func (api *PublicFilterAPI) timeoutLoop() { for id, f := range api.filters { select { case <-f.deadline.C: - f.s.Unsubscribe() + toUninstall = append(toUninstall, f.s) delete(api.filters, id) default: continue } } api.filtersMu.Unlock() + + // Unsubscribes are processed outside the lock to avoid the following scenario: + // event loop attempts broadcasting events to still active filters while + // Unsubscribe is waiting for it to process the uninstall request. + for _, s := range toUninstall { + s.Unsubscribe() + } + toUninstall = nil } } @@ -101,7 +108,7 @@ func (api *PublicFilterAPI) timeoutLoop() { // It is part of the filter package because this filter can be used through the // `eth_getFilterChanges` polling method that is also used for log filters. // -// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newpendingtransactionfilter +// https://eth.wiki/json-rpc/API#eth_newpendingtransactionfilter func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID { var ( pendingTxs = make(chan []common.Hash) @@ -109,7 +116,7 @@ func (api *PublicFilterAPI) NewPendingTransactionFilter() rpc.ID { ) api.filtersMu.Lock() - api.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, deadline: time.NewTimer(deadline), hashes: make([]common.Hash, 0), s: pendingTxSub} + api.filters[pendingTxSub.ID] = &filter{typ: PendingTransactionsSubscription, deadline: time.NewTimer(api.timeout), hashes: make([]common.Hash, 0), s: pendingTxSub} api.filtersMu.Unlock() go func() { @@ -171,7 +178,7 @@ func (api *PublicFilterAPI) NewPendingTransactions(ctx context.Context) (*rpc.Su // NewBlockFilter creates a filter that fetches blocks that are imported into the chain. // It is part of the filter package since polling goes with eth_getFilterChanges. // -// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newblockfilter +// https://eth.wiki/json-rpc/API#eth_newblockfilter func (api *PublicFilterAPI) NewBlockFilter() rpc.ID { var ( headers = make(chan *types.Header) @@ -179,7 +186,7 @@ func (api *PublicFilterAPI) NewBlockFilter() rpc.ID { ) api.filtersMu.Lock() - api.filters[headerSub.ID] = &filter{typ: BlocksSubscription, deadline: time.NewTimer(deadline), hashes: make([]common.Hash, 0), s: headerSub} + api.filters[headerSub.ID] = &filter{typ: BlocksSubscription, deadline: time.NewTimer(api.timeout), hashes: make([]common.Hash, 0), s: headerSub} api.filtersMu.Unlock() go func() { @@ -287,16 +294,16 @@ type FilterCriteria ethereum.FilterQuery // // In case "fromBlock" > "toBlock" an error is returned. // -// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newfilter +// https://eth.wiki/json-rpc/API#eth_newfilter func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) { logs := make(chan []*types.Log) logsSub, err := api.events.SubscribeLogs(ethereum.FilterQuery(crit), logs) if err != nil { - return rpc.ID(""), err + return "", err } api.filtersMu.Lock() - api.filters[logsSub.ID] = &filter{typ: LogsSubscription, crit: crit, deadline: time.NewTimer(deadline), logs: make([]*types.Log, 0), s: logsSub} + api.filters[logsSub.ID] = &filter{typ: LogsSubscription, crit: crit, deadline: time.NewTimer(api.timeout), logs: make([]*types.Log, 0), s: logsSub} api.filtersMu.Unlock() go func() { @@ -322,7 +329,7 @@ func (api *PublicFilterAPI) NewFilter(crit FilterCriteria) (rpc.ID, error) { // GetLogs returns logs matching the given argument that are stored within the state. // -// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs +// https://eth.wiki/json-rpc/API#eth_getlogs func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([]*types.Log, error) { var filter *Filter if crit.BlockHash != nil { @@ -351,7 +358,7 @@ func (api *PublicFilterAPI) GetLogs(ctx context.Context, crit FilterCriteria) ([ // UninstallFilter removes the filter with the given filter id. // -// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_uninstallfilter +// https://eth.wiki/json-rpc/API#eth_uninstallfilter func (api *PublicFilterAPI) UninstallFilter(id rpc.ID) bool { api.filtersMu.Lock() f, found := api.filters[id] @@ -369,7 +376,7 @@ func (api *PublicFilterAPI) UninstallFilter(id rpc.ID) bool { // GetFilterLogs returns the logs for the filter with the given id. // If the filter could not be found an empty array of logs is returned. // -// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterlogs +// https://eth.wiki/json-rpc/API#eth_getfilterlogs func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*types.Log, error) { api.filtersMu.Lock() f, found := api.filters[id] @@ -410,7 +417,7 @@ func (api *PublicFilterAPI) GetFilterLogs(ctx context.Context, id rpc.ID) ([]*ty // For pending transaction and block filters the result is []common.Hash. // (pending)Log filters return []Log. // -// https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterchanges +// https://eth.wiki/json-rpc/API#eth_getfilterchanges func (api *PublicFilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { api.filtersMu.Lock() defer api.filtersMu.Unlock() @@ -421,7 +428,7 @@ func (api *PublicFilterAPI) GetFilterChanges(id rpc.ID) (interface{}, error) { // receive timer value and reset timer <-f.deadline.C } - f.deadline.Reset(deadline) + f.deadline.Reset(api.timeout) switch f.typ { case PendingTransactionsSubscription, BlocksSubscription: diff --git a/eth/filters/filter_system.go b/eth/filters/filter_system.go index a105ec51c3..12f037d0f9 100644 --- a/eth/filters/filter_system.go +++ b/eth/filters/filter_system.go @@ -24,7 +24,7 @@ import ( "sync" "time" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" diff --git a/eth/filters/filter_system_test.go b/eth/filters/filter_system_test.go index c8d1d43abb..52150366c1 100644 --- a/eth/filters/filter_system_test.go +++ b/eth/filters/filter_system_test.go @@ -22,10 +22,11 @@ import ( "math/big" "math/rand" "reflect" + "runtime" "testing" "time" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" @@ -38,6 +39,10 @@ import ( "github.com/ethereum/go-ethereum/rpc" ) +var ( + deadline = 5 * time.Minute +) + type testBackend struct { mux *event.TypeMux db ethdb.Database @@ -163,7 +168,7 @@ func TestBlockSubscription(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() backend = &testBackend{db: db} - api = NewPublicFilterAPI(backend, false) + api = NewPublicFilterAPI(backend, false, deadline) genesis = new(core.Genesis).MustCommit(db) chain, _ = core.GenerateChain(params.TestChainConfig, genesis, ethash.NewFaker(), db, 10, func(i int, gen *core.BlockGen) {}) chainEvents = []core.ChainEvent{} @@ -215,7 +220,7 @@ func TestPendingTxFilter(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() backend = &testBackend{db: db} - api = NewPublicFilterAPI(backend, false) + api = NewPublicFilterAPI(backend, false, deadline) transactions = []*types.Transaction{ types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), @@ -270,7 +275,7 @@ func TestLogFilterCreation(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() backend = &testBackend{db: db} - api = NewPublicFilterAPI(backend, false) + api = NewPublicFilterAPI(backend, false, deadline) testCases = []struct { crit FilterCriteria @@ -314,7 +319,7 @@ func TestInvalidLogFilterCreation(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() backend = &testBackend{db: db} - api = NewPublicFilterAPI(backend, false) + api = NewPublicFilterAPI(backend, false, deadline) ) // different situations where log filter creation should fail. @@ -336,7 +341,7 @@ func TestInvalidGetLogsRequest(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() backend = &testBackend{db: db} - api = NewPublicFilterAPI(backend, false) + api = NewPublicFilterAPI(backend, false, deadline) blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") ) @@ -361,7 +366,7 @@ func TestLogFilter(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() backend = &testBackend{db: db} - api = NewPublicFilterAPI(backend, false) + api = NewPublicFilterAPI(backend, false, deadline) firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") @@ -475,7 +480,7 @@ func TestPendingLogsSubscription(t *testing.T) { var ( db = rawdb.NewMemoryDatabase() backend = &testBackend{db: db} - api = NewPublicFilterAPI(backend, false) + api = NewPublicFilterAPI(backend, false, deadline) firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") @@ -601,6 +606,73 @@ func TestPendingLogsSubscription(t *testing.T) { } } +// TestPendingTxFilterDeadlock tests if the event loop hangs when pending +// txes arrive at the same time that one of multiple filters is timing out. +// Please refer to #22131 for more details. +func TestPendingTxFilterDeadlock(t *testing.T) { + t.Parallel() + timeout := 100 * time.Millisecond + + var ( + db = rawdb.NewMemoryDatabase() + backend = &testBackend{db: db} + api = NewPublicFilterAPI(backend, false, timeout) + done = make(chan struct{}) + ) + + go func() { + // Bombard feed with txes until signal was received to stop + i := uint64(0) + for { + select { + case <-done: + return + default: + } + + tx := types.NewTransaction(i, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil) + backend.txFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx}}) + i++ + } + }() + + // Create a bunch of filters that will + // timeout either in 100ms or 200ms + fids := make([]rpc.ID, 20) + for i := 0; i < len(fids); i++ { + fid := api.NewPendingTransactionFilter() + fids[i] = fid + // Wait for at least one tx to arrive in filter + for { + hashes, err := api.GetFilterChanges(fid) + if err != nil { + t.Fatalf("Filter should exist: %v\n", err) + } + if len(hashes.([]common.Hash)) > 0 { + break + } + runtime.Gosched() + } + } + + // Wait until filters have timed out + time.Sleep(3 * timeout) + + // If tx loop doesn't consume `done` after a second + // it's hanging. + select { + case done <- struct{}{}: + // Check that all filters have been uninstalled + for _, fid := range fids { + if _, err := api.GetFilterChanges(fid); err == nil { + t.Errorf("Filter %s should have been uninstalled\n", fid) + } + } + case <-time.After(1 * time.Second): + t.Error("Tx sending loop hangs") + } +} + func flattenLogs(pl [][]*types.Log) []*types.Log { var logs []*types.Log for _, l := range pl { diff --git a/eth/gasprice/gasprice_test.go b/eth/gasprice/gasprice_test.go index 89caeeb45b..4fd2df10e2 100644 --- a/eth/gasprice/gasprice_test.go +++ b/eth/gasprice/gasprice_test.go @@ -63,7 +63,7 @@ func newTestBackend(t *testing.T) *testBackend { Config: params.TestChainConfig, Alloc: core.GenesisAlloc{addr: {Balance: big.NewInt(math.MaxInt64)}}, } - signer = types.NewEIP155Signer(gspec.Config.ChainID) + signer = types.LatestSigner(gspec.Config) ) engine := ethash.NewFaker() db := rawdb.NewMemoryDatabase() diff --git a/eth/handler.go b/eth/handler.go index 5b89986539..13fa701935 100644 --- a/eth/handler.go +++ b/eth/handler.go @@ -17,9 +17,7 @@ package eth import ( - "encoding/json" "errors" - "fmt" "math" "math/big" "sync" @@ -27,26 +25,22 @@ import ( "time" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/eth/fetcher" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/eth/protocols/snap" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" ) const ( - softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data. - estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header - // txChanSize is the size of channel listening to NewTxsEvent. // The number is referenced from the size of tx pool. txChanSize = 4096 @@ -56,26 +50,61 @@ var ( syncChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the sync progress challenge ) -func errResp(code errCode, format string, v ...interface{}) error { - return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...)) +// txPool defines the methods needed from a transaction pool implementation to +// support all the operations needed by the Ethereum chain protocols. +type txPool interface { + // Has returns an indicator whether txpool has a transaction + // cached with the given hash. + Has(hash common.Hash) bool + + // Get retrieves the transaction from local txpool with given + // tx hash. + Get(hash common.Hash) *types.Transaction + + // AddRemotes should add the given transactions to the pool. + AddRemotes([]*types.Transaction) []error + + // Pending should return pending transactions. + // The slice should be modifiable by the caller. + Pending() (map[common.Address]types.Transactions, error) + + // SubscribeNewTxsEvent should return an event subscription of + // NewTxsEvent and send events to the given channel. + SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription +} + +// handlerConfig is the collection of initialization parameters to create a full +// node network handler. +type handlerConfig struct { + Database ethdb.Database // Database for direct sync insertions + Chain *core.BlockChain // Blockchain to serve data from + TxPool txPool // Transaction pool to propagate from + Network uint64 // Network identifier to adfvertise + Sync downloader.SyncMode // Whether to fast or full sync + BloomCache uint64 // Megabytes to alloc for fast sync bloom + EventMux *event.TypeMux // Legacy event mux, deprecate for `feed` + Checkpoint *params.TrustedCheckpoint // Hard coded checkpoint for sync challenges + Whitelist map[uint64]common.Hash // Hard coded whitelist for sync challenged } -type ProtocolManager struct { +type handler struct { networkID uint64 forkFilter forkid.Filter // Fork ID filter, constant across the lifetime of the node fastSync uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks) + snapSync uint32 // Flag whether fast sync should operate on top of the snap protocol acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing) checkpointNumber uint64 // Block number for the sync progress validator to cross reference checkpointHash common.Hash // Block hash for the sync progress validator to cross reference - txpool txPool - blockchain *core.BlockChain - chaindb ethdb.Database - maxPeers int + database ethdb.Database + txpool txPool + chain *core.BlockChain + maxPeers int downloader *downloader.Downloader + stateBloom *trie.SyncBloom blockFetcher *fetcher.BlockFetcher txFetcher *fetcher.TxFetcher peers *peerSet @@ -94,29 +123,27 @@ type ProtocolManager struct { chainSync *chainSyncer wg sync.WaitGroup peerWG sync.WaitGroup - - // Test fields or hooks - broadcastTxAnnouncesOnly bool // Testing field, disable transaction propagation } -// NewProtocolManager returns a new Ethereum sub protocol manager. The Ethereum sub protocol manages peers capable -// with the Ethereum network. -func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCheckpoint, mode downloader.SyncMode, networkID uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb ethdb.Database, cacheLimit int, whitelist map[uint64]common.Hash) (*ProtocolManager, error) { +// newHandler returns a handler for all Ethereum chain management protocol. +func newHandler(config *handlerConfig) (*handler, error) { // Create the protocol manager with the base fields - manager := &ProtocolManager{ - networkID: networkID, - forkFilter: forkid.NewFilter(blockchain), - eventMux: mux, - txpool: txpool, - blockchain: blockchain, - chaindb: chaindb, + if config.EventMux == nil { + config.EventMux = new(event.TypeMux) // Nicety initialization for tests + } + h := &handler{ + networkID: config.Network, + forkFilter: forkid.NewFilter(config.Chain), + eventMux: config.EventMux, + database: config.Database, + txpool: config.TxPool, + chain: config.Chain, peers: newPeerSet(), - whitelist: whitelist, + whitelist: config.Whitelist, txsyncCh: make(chan *txsync), quitSync: make(chan struct{}), } - - if mode == downloader.FullSync { + if config.Sync == downloader.FullSync { // The database seems empty as the current block is the genesis. Yet the fast // block is ahead, so fast sync was enabled for this node at a certain point. // The scenarios where this can happen is @@ -125,42 +152,42 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh // * the last fast sync is not finished while user specifies a full sync this // time. But we don't have any recent state for full sync. // In these cases however it's safe to reenable fast sync. - fullBlock, fastBlock := blockchain.CurrentBlock(), blockchain.CurrentFastBlock() + fullBlock, fastBlock := h.chain.CurrentBlock(), h.chain.CurrentFastBlock() if fullBlock.NumberU64() == 0 && fastBlock.NumberU64() > 0 { - manager.fastSync = uint32(1) + h.fastSync = uint32(1) log.Warn("Switch sync mode from full sync to fast sync") } } else { - if blockchain.CurrentBlock().NumberU64() > 0 { + if h.chain.CurrentBlock().NumberU64() > 0 { // Print warning log if database is not empty to run fast sync. log.Warn("Switch sync mode from fast sync to full sync") } else { // If fast sync was requested and our database is empty, grant it - manager.fastSync = uint32(1) + h.fastSync = uint32(1) + if config.Sync == downloader.SnapSync { + h.snapSync = uint32(1) + } } } - // If we have trusted checkpoints, enforce them on the chain - if checkpoint != nil { - manager.checkpointNumber = (checkpoint.SectionIndex+1)*params.CHTFrequency - 1 - manager.checkpointHash = checkpoint.SectionHead + if config.Checkpoint != nil { + h.checkpointNumber = (config.Checkpoint.SectionIndex+1)*params.CHTFrequency - 1 + h.checkpointHash = config.Checkpoint.SectionHead } - // Construct the downloader (long sync) and its backing state bloom if fast // sync is requested. The downloader is responsible for deallocating the state // bloom when it's done. - var stateBloom *trie.SyncBloom - if atomic.LoadUint32(&manager.fastSync) == 1 { - stateBloom = trie.NewSyncBloom(uint64(cacheLimit), chaindb) + if atomic.LoadUint32(&h.fastSync) == 1 { + h.stateBloom = trie.NewSyncBloom(config.BloomCache, config.Database) } - manager.downloader = downloader.New(manager.checkpointNumber, chaindb, stateBloom, manager.eventMux, blockchain, nil, manager.removePeer) + h.downloader = downloader.New(h.checkpointNumber, config.Database, h.stateBloom, h.eventMux, h.chain, nil, h.removePeer) // Construct the fetcher (short sync) validator := func(header *types.Header) error { - return engine.VerifyHeader(blockchain, header, true) + return h.chain.Engine().VerifyHeader(h.chain, header, true) } heighter := func() uint64 { - return blockchain.CurrentBlock().NumberU64() + return h.chain.CurrentBlock().NumberU64() } inserter := func(blocks types.Blocks) (int, error) { // If sync hasn't reached the checkpoint yet, deny importing weird blocks. @@ -169,7 +196,7 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh // the propagated block if the head is too old. Unfortunately there is a corner // case when starting new networks, where the genesis might be ancient (0 unix) // which would prevent full nodes from accepting it. - if manager.blockchain.CurrentBlock().NumberU64() < manager.checkpointNumber { + if h.chain.CurrentBlock().NumberU64() < h.checkpointNumber { log.Warn("Unsynced yet, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) return 0, nil } @@ -178,180 +205,117 @@ func NewProtocolManager(config *params.ChainConfig, checkpoint *params.TrustedCh // accept each others' blocks until a restart. Unfortunately we haven't figured // out a way yet where nodes can decide unilaterally whether the network is new // or not. This should be fixed if we figure out a solution. - if atomic.LoadUint32(&manager.fastSync) == 1 { + if atomic.LoadUint32(&h.fastSync) == 1 { log.Warn("Fast syncing, discarded propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash()) return 0, nil } - n, err := manager.blockchain.InsertChain(blocks) + n, err := h.chain.InsertChain(blocks) if err == nil { - atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import + atomic.StoreUint32(&h.acceptTxs, 1) // Mark initial sync done on any fetcher import } return n, err } - manager.blockFetcher = fetcher.NewBlockFetcher(false, nil, blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, nil, inserter, manager.removePeer) + h.blockFetcher = fetcher.NewBlockFetcher(false, nil, h.chain.GetBlockByHash, validator, h.BroadcastBlock, heighter, nil, inserter, h.removePeer) fetchTx := func(peer string, hashes []common.Hash) error { - p := manager.peers.Peer(peer) + p := h.peers.peer(peer) if p == nil { return errors.New("unknown peer") } return p.RequestTxs(hashes) } - manager.txFetcher = fetcher.NewTxFetcher(txpool.Has, txpool.AddRemotes, fetchTx) - - manager.chainSync = newChainSyncer(manager) - - return manager, nil -} - -func (pm *ProtocolManager) makeProtocol(version uint) p2p.Protocol { - length, ok := protocolLengths[version] - if !ok { - panic("makeProtocol for unknown version") - } - - return p2p.Protocol{ - Name: protocolName, - Version: version, - Length: length, - Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { - return pm.runPeer(pm.newPeer(int(version), p, rw, pm.txpool.Get)) - }, - NodeInfo: func() interface{} { - return pm.NodeInfo() - }, - PeerInfo: func(id enode.ID) interface{} { - if p := pm.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil { - return p.Info() - } - return nil - }, - } + h.txFetcher = fetcher.NewTxFetcher(h.txpool.Has, h.txpool.AddRemotes, fetchTx) + h.chainSync = newChainSyncer(h) + return h, nil } -func (pm *ProtocolManager) removePeer(id string) { - // Short circuit if the peer was already removed - peer := pm.peers.Peer(id) - if peer == nil { - return - } - log.Debug("Removing Ethereum peer", "peer", id) - - // Unregister the peer from the downloader and Ethereum peer set - pm.downloader.UnregisterPeer(id) - pm.txFetcher.Drop(id) - - if err := pm.peers.Unregister(id); err != nil { - log.Error("Peer removal failed", "peer", id, "err", err) - } - // Hard disconnect at the networking layer - if peer != nil { - peer.Peer.Disconnect(p2p.DiscUselessPeer) +// runEthPeer registers an eth peer into the joint eth/snap peerset, adds it to +// various subsistems and starts handling messages. +func (h *handler) runEthPeer(peer *eth.Peer, handler eth.Handler) error { + // If the peer has a `snap` extension, wait for it to connect so we can have + // a uniform initialization/teardown mechanism + snap, err := h.peers.waitSnapExtension(peer) + if err != nil { + peer.Log().Error("Snapshot extension barrier failed", "err", err) + return err } -} - -func (pm *ProtocolManager) Start(maxPeers int) { - pm.maxPeers = maxPeers - - // broadcast transactions - pm.wg.Add(1) - pm.txsCh = make(chan core.NewTxsEvent, txChanSize) - pm.txsSub = pm.txpool.SubscribeNewTxsEvent(pm.txsCh) - go pm.txBroadcastLoop() - - // broadcast mined blocks - pm.wg.Add(1) - pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{}) - go pm.minedBroadcastLoop() - - // start sync handlers - pm.wg.Add(2) - go pm.chainSync.loop() - go pm.txsyncLoop64() // TODO(karalabe): Legacy initial tx echange, drop with eth/64. -} - -func (pm *ProtocolManager) Stop() { - pm.txsSub.Unsubscribe() // quits txBroadcastLoop - pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop - - // Quit chainSync and txsync64. - // After this is done, no new peers will be accepted. - close(pm.quitSync) - pm.wg.Wait() - - // Disconnect existing sessions. - // This also closes the gate for any new registrations on the peer set. - // sessions which are already established but not added to pm.peers yet - // will exit when they try to register. - pm.peers.Close() - pm.peerWG.Wait() - - log.Info("Ethereum protocol stopped") -} - -func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter, getPooledTx func(hash common.Hash) *types.Transaction) *peer { - return newPeer(pv, p, rw, getPooledTx) -} - -func (pm *ProtocolManager) runPeer(p *peer) error { - if !pm.chainSync.handlePeerEvent(p) { + // TODO(karalabe): Not sure why this is needed + if !h.chainSync.handlePeerEvent(peer) { return p2p.DiscQuitting } - pm.peerWG.Add(1) - defer pm.peerWG.Done() - return pm.handle(p) -} - -// handle is the callback invoked to manage the life cycle of an eth peer. When -// this function terminates, the peer is disconnected. -func (pm *ProtocolManager) handle(p *peer) error { - // Ignore maxPeers if this is a trusted peer - if pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted { - return p2p.DiscTooManyPeers - } - p.Log().Debug("Ethereum peer connected", "name", p.Name()) + h.peerWG.Add(1) + defer h.peerWG.Done() // Execute the Ethereum handshake var ( - genesis = pm.blockchain.Genesis() - head = pm.blockchain.CurrentHeader() + genesis = h.chain.Genesis() + head = h.chain.CurrentHeader() hash = head.Hash() number = head.Number.Uint64() - td = pm.blockchain.GetTd(hash, number) + td = h.chain.GetTd(hash, number) ) - forkID := forkid.NewID(pm.blockchain.Config(), pm.blockchain.Genesis().Hash(), pm.blockchain.CurrentHeader().Number.Uint64()) - if err := p.Handshake(pm.networkID, td, hash, genesis.Hash(), forkID, pm.forkFilter); err != nil { - p.Log().Debug("Ethereum handshake failed", "err", err) + forkID := forkid.NewID(h.chain.Config(), h.chain.Genesis().Hash(), h.chain.CurrentHeader().Number.Uint64()) + if err := peer.Handshake(h.networkID, td, hash, genesis.Hash(), forkID, h.forkFilter); err != nil { + peer.Log().Debug("Ethereum handshake failed", "err", err) return err } + reject := false // reserved peer slots + if atomic.LoadUint32(&h.snapSync) == 1 { + if snap == nil { + // If we are running snap-sync, we want to reserve roughly half the peer + // slots for peers supporting the snap protocol. + // The logic here is; we only allow up to 5 more non-snap peers than snap-peers. + if all, snp := h.peers.len(), h.peers.snapLen(); all-snp > snp+5 { + reject = true + } + } + } + // Ignore maxPeers if this is a trusted peer + if !peer.Peer.Info().Network.Trusted { + if reject || h.peers.len() >= h.maxPeers { + return p2p.DiscTooManyPeers + } + } + peer.Log().Debug("Ethereum peer connected", "name", peer.Name()) // Register the peer locally - if err := pm.peers.Register(p, pm.removePeer); err != nil { - p.Log().Error("Ethereum peer registration failed", "err", err) + if err := h.peers.registerPeer(peer, snap); err != nil { + peer.Log().Error("Ethereum peer registration failed", "err", err) return err } - defer pm.removePeer(p.id) + defer h.removePeer(peer.ID()) + p := h.peers.peer(peer.ID()) + if p == nil { + return errors.New("peer dropped during handling") + } // Register the peer in the downloader. If the downloader considers it banned, we disconnect - if err := pm.downloader.RegisterPeer(p.id, p.version, p); err != nil { + if err := h.downloader.RegisterPeer(peer.ID(), peer.Version(), peer); err != nil { + peer.Log().Error("Failed to register peer in eth syncer", "err", err) return err } - pm.chainSync.handlePeerEvent(p) + if snap != nil { + if err := h.downloader.SnapSyncer.Register(snap); err != nil { + peer.Log().Error("Failed to register peer in snap syncer", "err", err) + return err + } + } + h.chainSync.handlePeerEvent(peer) // Propagate existing transactions. new transactions appearing // after this will be sent via broadcasts. - pm.syncTransactions(p) + h.syncTransactions(peer) // If we have a trusted CHT, reject all peers below that (avoid fast sync eclipse) - if pm.checkpointHash != (common.Hash{}) { + if h.checkpointHash != (common.Hash{}) { // Request the peer's checkpoint header for chain height/weight validation - if err := p.RequestHeadersByNumber(pm.checkpointNumber, 1, 0, false); err != nil { + if err := peer.RequestHeadersByNumber(h.checkpointNumber, 1, 0, false); err != nil { return err } // Start a timer to disconnect if the peer doesn't reply in time p.syncDrop = time.AfterFunc(syncChallengeTimeout, func() { - p.Log().Warn("Checkpoint challenge timed out, dropping", "addr", p.RemoteAddr(), "type", p.Name()) - pm.removePeer(p.id) + peer.Log().Warn("Checkpoint challenge timed out, dropping", "addr", peer.RemoteAddr(), "type", peer.Name()) + h.removePeer(peer.ID()) }) // Make sure it's cleaned up if the peer dies off defer func() { @@ -362,474 +326,115 @@ func (pm *ProtocolManager) handle(p *peer) error { }() } // If we have any explicit whitelist block hashes, request them - for number := range pm.whitelist { - if err := p.RequestHeadersByNumber(number, 1, 0, false); err != nil { + for number := range h.whitelist { + if err := peer.RequestHeadersByNumber(number, 1, 0, false); err != nil { return err } } // Handle incoming messages until the connection is torn down - for { - if err := pm.handleMsg(p); err != nil { - p.Log().Debug("Ethereum message handling failed", "err", err) - return err - } - } + return handler(peer) } -// handleMsg is invoked whenever an inbound message is received from a remote -// peer. The remote connection is torn down upon returning any error. -func (pm *ProtocolManager) handleMsg(p *peer) error { - // Read the next message from the remote peer, and ensure it's fully consumed - msg, err := p.rw.ReadMsg() - if err != nil { +// runSnapExtension registers a `snap` peer into the joint eth/snap peerset and +// starts handling inbound messages. As `snap` is only a satellite protocol to +// `eth`, all subsystem registrations and lifecycle management will be done by +// the main `eth` handler to prevent strange races. +func (h *handler) runSnapExtension(peer *snap.Peer, handler snap.Handler) error { + h.peerWG.Add(1) + defer h.peerWG.Done() + + if err := h.peers.registerSnapExtension(peer); err != nil { + peer.Log().Error("Snapshot extension registration failed", "err", err) return err } - if msg.Size > protocolMaxMsgSize { - return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize) - } - defer msg.Discard() - - // Handle the message depending on its contents - switch { - case msg.Code == StatusMsg: - // Status messages should never arrive after the handshake - return errResp(ErrExtraStatusMsg, "uncontrolled status message") - - // Block header query, collect the requested headers and reply - case msg.Code == GetBlockHeadersMsg: - // Decode the complex header query - var query getBlockHeadersData - if err := msg.Decode(&query); err != nil { - return errResp(ErrDecode, "%v: %v", msg, err) - } - hashMode := query.Origin.Hash != (common.Hash{}) - first := true - maxNonCanonical := uint64(100) - - // Gather headers until the fetch or network limits is reached - var ( - bytes common.StorageSize - headers []*types.Header - unknown bool - ) - for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch { - // Retrieve the next header satisfying the query - var origin *types.Header - if hashMode { - if first { - first = false - origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash) - if origin != nil { - query.Origin.Number = origin.Number.Uint64() - } - } else { - origin = pm.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number) - } - } else { - origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number) - } - if origin == nil { - break - } - headers = append(headers, origin) - bytes += estHeaderRlpSize - - // Advance to the next header of the query - switch { - case hashMode && query.Reverse: - // Hash based traversal towards the genesis block - ancestor := query.Skip + 1 - if ancestor == 0 { - unknown = true - } else { - query.Origin.Hash, query.Origin.Number = pm.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical) - unknown = (query.Origin.Hash == common.Hash{}) - } - case hashMode && !query.Reverse: - // Hash based traversal towards the leaf block - var ( - current = origin.Number.Uint64() - next = current + query.Skip + 1 - ) - if next <= current { - infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ") - p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos) - unknown = true - } else { - if header := pm.blockchain.GetHeaderByNumber(next); header != nil { - nextHash := header.Hash() - expOldHash, _ := pm.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical) - if expOldHash == query.Origin.Hash { - query.Origin.Hash, query.Origin.Number = nextHash, next - } else { - unknown = true - } - } else { - unknown = true - } - } - case query.Reverse: - // Number based traversal towards the genesis block - if query.Origin.Number >= query.Skip+1 { - query.Origin.Number -= query.Skip + 1 - } else { - unknown = true - } - - case !query.Reverse: - // Number based traversal towards the leaf block - query.Origin.Number += query.Skip + 1 - } - } - return p.SendBlockHeaders(headers) - - case msg.Code == BlockHeadersMsg: - // A batch of headers arrived to one of our previous requests - var headers []*types.Header - if err := msg.Decode(&headers); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // If no headers were received, but we're expencting a checkpoint header, consider it that - if len(headers) == 0 && p.syncDrop != nil { - // Stop the timer either way, decide later to drop or not - p.syncDrop.Stop() - p.syncDrop = nil - - // If we're doing a fast sync, we must enforce the checkpoint block to avoid - // eclipse attacks. Unsynced nodes are welcome to connect after we're done - // joining the network - if atomic.LoadUint32(&pm.fastSync) == 1 { - p.Log().Warn("Dropping unsynced node during fast sync", "addr", p.RemoteAddr(), "type", p.Name()) - return errors.New("unsynced node cannot serve fast sync") - } - } - // Filter out any explicitly requested headers, deliver the rest to the downloader - filter := len(headers) == 1 - if filter { - // If it's a potential sync progress check, validate the content and advertised chain weight - if p.syncDrop != nil && headers[0].Number.Uint64() == pm.checkpointNumber { - // Disable the sync drop timer - p.syncDrop.Stop() - p.syncDrop = nil - - // Validate the header and either drop the peer or continue - if headers[0].Hash() != pm.checkpointHash { - return errors.New("checkpoint hash mismatch") - } - return nil - } - // Otherwise if it's a whitelisted block, validate against the set - if want, ok := pm.whitelist[headers[0].Number.Uint64()]; ok { - if hash := headers[0].Hash(); want != hash { - p.Log().Info("Whitelist mismatch, dropping peer", "number", headers[0].Number.Uint64(), "hash", hash, "want", want) - return errors.New("whitelist block mismatch") - } - p.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want) - } - // Irrelevant of the fork checks, send the header to the fetcher just in case - headers = pm.blockFetcher.FilterHeaders(p.id, headers, time.Now()) - } - if len(headers) > 0 || !filter { - err := pm.downloader.DeliverHeaders(p.id, headers) - if err != nil { - log.Debug("Failed to deliver headers", "err", err) - } - } - - case msg.Code == GetBlockBodiesMsg: - // Decode the retrieval message - msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) - if _, err := msgStream.List(); err != nil { - return err - } - // Gather blocks until the fetch or network limits is reached - var ( - hash common.Hash - bytes int - bodies []rlp.RawValue - ) - for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch { - // Retrieve the hash of the next block - if err := msgStream.Decode(&hash); err == rlp.EOL { - break - } else if err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Retrieve the requested block body, stopping if enough was found - if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 { - bodies = append(bodies, data) - bytes += len(data) - } - } - return p.SendBlockBodiesRLP(bodies) - - case msg.Code == BlockBodiesMsg: - // A batch of block bodies arrived to one of our previous requests - var request blockBodiesData - if err := msg.Decode(&request); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Deliver them all to the downloader for queuing - transactions := make([][]*types.Transaction, len(request)) - uncles := make([][]*types.Header, len(request)) + return handler(peer) +} - for i, body := range request { - transactions[i] = body.Transactions - uncles[i] = body.Uncles - } - // Filter out any explicitly requested bodies, deliver the rest to the downloader - filter := len(transactions) > 0 || len(uncles) > 0 - if filter { - transactions, uncles = pm.blockFetcher.FilterBodies(p.id, transactions, uncles, time.Now()) - } - if len(transactions) > 0 || len(uncles) > 0 || !filter { - err := pm.downloader.DeliverBodies(p.id, transactions, uncles) - if err != nil { - log.Debug("Failed to deliver bodies", "err", err) - } - } +// removePeer unregisters a peer from the downloader and fetchers, removes it from +// the set of tracked peers and closes the network connection to it. +func (h *handler) removePeer(id string) { + // Create a custom logger to avoid printing the entire id + var logger log.Logger + if len(id) < 16 { + // Tests use short IDs, don't choke on them + logger = log.New("peer", id) + } else { + logger = log.New("peer", id[:8]) + } + // Abort if the peer does not exist + peer := h.peers.peer(id) + if peer == nil { + logger.Error("Ethereum peer removal failed", "err", errPeerNotRegistered) + return + } + // Remove the `eth` peer if it exists + logger.Debug("Removing Ethereum peer", "snap", peer.snapExt != nil) - case p.version >= eth63 && msg.Code == GetNodeDataMsg: - // Decode the retrieval message - msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) - if _, err := msgStream.List(); err != nil { - return err - } - // Gather state data until the fetch or network limits is reached - var ( - hash common.Hash - bytes int - data [][]byte - ) - for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch { - // Retrieve the hash of the next state entry - if err := msgStream.Decode(&hash); err == rlp.EOL { - break - } else if err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Retrieve the requested state entry, stopping if enough was found - // todo now the code and trienode is mixed in the protocol level, - // separate these two types. - if !pm.downloader.SyncBloomContains(hash[:]) { - // Only lookup the trie node if there's chance that we actually have it - continue - } - entry, err := pm.blockchain.TrieNode(hash) - if len(entry) == 0 || err != nil { - // Read the contract code with prefix only to save unnecessary lookups. - entry, err = pm.blockchain.ContractCodeWithPrefix(hash) - } - if err == nil && len(entry) > 0 { - data = append(data, entry) - bytes += len(entry) - } - } - return p.SendNodeData(data) + // Remove the `snap` extension if it exists + if peer.snapExt != nil { + h.downloader.SnapSyncer.Unregister(id) + } + h.downloader.UnregisterPeer(id) + h.txFetcher.Drop(id) - case p.version >= eth63 && msg.Code == NodeDataMsg: - // A batch of node state data arrived to one of our previous requests - var data [][]byte - if err := msg.Decode(&data); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Deliver all to the downloader - if err := pm.downloader.DeliverNodeData(p.id, data); err != nil { - log.Debug("Failed to deliver node state data", "err", err) - } + if err := h.peers.unregisterPeer(id); err != nil { + logger.Error("Ethereum peer removal failed", "err", err) + } + // Hard disconnect at the networking layer + peer.Peer.Disconnect(p2p.DiscUselessPeer) +} - case p.version >= eth63 && msg.Code == GetReceiptsMsg: - // Decode the retrieval message - msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) - if _, err := msgStream.List(); err != nil { - return err - } - // Gather state data until the fetch or network limits is reached - var ( - hash common.Hash - bytes int - receipts []rlp.RawValue - ) - for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch { - // Retrieve the hash of the next block - if err := msgStream.Decode(&hash); err == rlp.EOL { - break - } else if err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Retrieve the requested block's receipts, skipping if unknown to us - results := pm.blockchain.GetReceiptsByHash(hash) - if results == nil { - if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { - continue - } - } - // If known, encode and queue for response packet - if encoded, err := rlp.EncodeToBytes(results); err != nil { - log.Error("Failed to encode receipt", "err", err) - } else { - receipts = append(receipts, encoded) - bytes += len(encoded) - } - } - return p.SendReceiptsRLP(receipts) +func (h *handler) Start(maxPeers int) { + h.maxPeers = maxPeers - case p.version >= eth63 && msg.Code == ReceiptsMsg: - // A batch of receipts arrived to one of our previous requests - var receipts [][]*types.Receipt - if err := msg.Decode(&receipts); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Deliver all to the downloader - if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil { - log.Debug("Failed to deliver receipts", "err", err) - } + // broadcast transactions + h.wg.Add(1) + h.txsCh = make(chan core.NewTxsEvent, txChanSize) + h.txsSub = h.txpool.SubscribeNewTxsEvent(h.txsCh) + go h.txBroadcastLoop() - case msg.Code == NewBlockHashesMsg: - var announces newBlockHashesData - if err := msg.Decode(&announces); err != nil { - return errResp(ErrDecode, "%v: %v", msg, err) - } - // Mark the hashes as present at the remote node - for _, block := range announces { - p.MarkBlock(block.Hash) - } - // Schedule all the unknown hashes for retrieval - unknown := make(newBlockHashesData, 0, len(announces)) - for _, block := range announces { - if !pm.blockchain.HasBlock(block.Hash, block.Number) { - unknown = append(unknown, block) - } - } - for _, block := range unknown { - pm.blockFetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies) - } + // broadcast mined blocks + h.wg.Add(1) + h.minedBlockSub = h.eventMux.Subscribe(core.NewMinedBlockEvent{}) + go h.minedBroadcastLoop() - case msg.Code == NewBlockMsg: - // Retrieve and decode the propagated block - var request newBlockData - if err := msg.Decode(&request); err != nil { - return errResp(ErrDecode, "%v: %v", msg, err) - } - if hash := types.CalcUncleHash(request.Block.Uncles()); hash != request.Block.UncleHash() { - log.Warn("Propagated block has invalid uncles", "have", hash, "exp", request.Block.UncleHash()) - break // TODO(karalabe): return error eventually, but wait a few releases - } - if hash := types.DeriveSha(request.Block.Transactions(), trie.NewStackTrie(nil)); hash != request.Block.TxHash() { - log.Warn("Propagated block has invalid body", "have", hash, "exp", request.Block.TxHash()) - break // TODO(karalabe): return error eventually, but wait a few releases - } - if err := request.sanityCheck(); err != nil { - return err - } - request.Block.ReceivedAt = msg.ReceivedAt - request.Block.ReceivedFrom = p - - // Mark the peer as owning the block and schedule it for import - p.MarkBlock(request.Block.Hash()) - pm.blockFetcher.Enqueue(p.id, request.Block) - - // Assuming the block is importable by the peer, but possibly not yet done so, - // calculate the head hash and TD that the peer truly must have. - var ( - trueHead = request.Block.ParentHash() - trueTD = new(big.Int).Sub(request.TD, request.Block.Difficulty()) - ) - // Update the peer's total difficulty if better than the previous - if _, td := p.Head(); trueTD.Cmp(td) > 0 { - p.SetHead(trueHead, trueTD) - pm.chainSync.handlePeerEvent(p) - } + // start sync handlers + h.wg.Add(2) + go h.chainSync.loop() + go h.txsyncLoop64() // TODO(karalabe): Legacy initial tx echange, drop with eth/64. +} - case msg.Code == NewPooledTransactionHashesMsg && p.version >= eth65: - // New transaction announcement arrived, make sure we have - // a valid and fresh chain to handle them - if atomic.LoadUint32(&pm.acceptTxs) == 0 { - break - } - var hashes []common.Hash - if err := msg.Decode(&hashes); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Schedule all the unknown hashes for retrieval - for _, hash := range hashes { - p.MarkTransaction(hash) - } - pm.txFetcher.Notify(p.id, hashes) +func (h *handler) Stop() { + h.txsSub.Unsubscribe() // quits txBroadcastLoop + h.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop - case msg.Code == GetPooledTransactionsMsg && p.version >= eth65: - // Decode the retrieval message - msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size)) - if _, err := msgStream.List(); err != nil { - return err - } - // Gather transactions until the fetch or network limits is reached - var ( - hash common.Hash - bytes int - hashes []common.Hash - txs []rlp.RawValue - ) - for bytes < softResponseLimit { - // Retrieve the hash of the next block - if err := msgStream.Decode(&hash); err == rlp.EOL { - break - } else if err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Retrieve the requested transaction, skipping if unknown to us - tx := pm.txpool.Get(hash) - if tx == nil { - continue - } - // If known, encode and queue for response packet - if encoded, err := rlp.EncodeToBytes(tx); err != nil { - log.Error("Failed to encode transaction", "err", err) - } else { - hashes = append(hashes, hash) - txs = append(txs, encoded) - bytes += len(encoded) - } - } - return p.SendPooledTransactionsRLP(hashes, txs) + // Quit chainSync and txsync64. + // After this is done, no new peers will be accepted. + close(h.quitSync) + h.wg.Wait() - case msg.Code == TransactionMsg || (msg.Code == PooledTransactionsMsg && p.version >= eth65): - // Transactions arrived, make sure we have a valid and fresh chain to handle them - if atomic.LoadUint32(&pm.acceptTxs) == 0 { - break - } - // Transactions can be processed, parse all of them and deliver to the pool - var txs []*types.Transaction - if err := msg.Decode(&txs); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - for i, tx := range txs { - // Validate and mark the remote transaction - if tx == nil { - return errResp(ErrDecode, "transaction %d is nil", i) - } - p.MarkTransaction(tx.Hash()) - } - pm.txFetcher.Enqueue(p.id, txs, msg.Code == PooledTransactionsMsg) + // Disconnect existing sessions. + // This also closes the gate for any new registrations on the peer set. + // sessions which are already established but not added to h.peers yet + // will exit when they try to register. + h.peers.close() + h.peerWG.Wait() - default: - return errResp(ErrInvalidMsgCode, "%v", msg.Code) - } - return nil + log.Info("Ethereum protocol stopped") } // BroadcastBlock will either propagate a block to a subset of its peers, or // will only announce its availability (depending what's requested). -func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { +func (h *handler) BroadcastBlock(block *types.Block, propagate bool) { hash := block.Hash() - peers := pm.peers.PeersWithoutBlock(hash) + peers := h.peers.peersWithoutBlock(hash) // If propagation is requested, send to a subset of the peer if propagate { // Calculate the TD of the block (it's not imported yet, so block.Td is not valid) var td *big.Int - if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil { - td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1)) + if parent := h.chain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil { + td = new(big.Int).Add(block.Difficulty(), h.chain.GetTd(block.ParentHash(), block.NumberU64()-1)) } else { log.Error("Propagating dangling block", "number", block.Number(), "hash", hash) return @@ -843,7 +448,7 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { return } // Otherwise if the block is indeed in out own chain, announce it - if pm.blockchain.HasBlock(hash, block.NumberU64()) { + if h.chain.HasBlock(hash, block.NumberU64()) { for _, peer := range peers { peer.AsyncSendNewBlockHash(block) } @@ -851,97 +456,74 @@ func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) { } } -// BroadcastTransactions will propagate a batch of transactions to all peers which are not known to +// BroadcastTransactions will propagate a batch of transactions +// - To a square root of all peers +// - And, separately, as announcements to all peers which are not known to // already have the given transaction. -func (pm *ProtocolManager) BroadcastTransactions(txs types.Transactions, propagate bool) { +func (h *handler) BroadcastTransactions(txs types.Transactions) { var ( - txset = make(map[*peer][]common.Hash) - annos = make(map[*peer][]common.Hash) + annoCount int // Count of announcements made + annoPeers int + directCount int // Count of the txs sent directly to peers + directPeers int // Count of the peers that were sent transactions directly + + txset = make(map[*ethPeer][]common.Hash) // Set peer->hash to transfer directly + annos = make(map[*ethPeer][]common.Hash) // Set peer->hash to announce + ) // Broadcast transactions to a batch of peers not knowing about it - if propagate { - for _, tx := range txs { - peers := pm.peers.PeersWithoutTx(tx.Hash()) - - // Send the block to a subset of our peers - transfer := peers[:int(math.Sqrt(float64(len(peers))))] - for _, peer := range transfer { - txset[peer] = append(txset[peer], tx.Hash()) - } - log.Trace("Broadcast transaction", "hash", tx.Hash(), "recipients", len(peers)) - } - for peer, hashes := range txset { - peer.AsyncSendTransactions(hashes) - } - return - } - // Otherwise only broadcast the announcement to peers for _, tx := range txs { - peers := pm.peers.PeersWithoutTx(tx.Hash()) - for _, peer := range peers { + peers := h.peers.peersWithoutTransaction(tx.Hash()) + // Send the tx unconditionally to a subset of our peers + numDirect := int(math.Sqrt(float64(len(peers)))) + for _, peer := range peers[:numDirect] { + txset[peer] = append(txset[peer], tx.Hash()) + } + // For the remaining peers, send announcement only + for _, peer := range peers[numDirect:] { annos[peer] = append(annos[peer], tx.Hash()) } } + for peer, hashes := range txset { + directPeers++ + directCount += len(hashes) + peer.AsyncSendTransactions(hashes) + } for peer, hashes := range annos { - if peer.version >= eth65 { + annoPeers++ + annoCount += len(hashes) + if peer.Version() >= eth.ETH65 { peer.AsyncSendPooledTransactionHashes(hashes) } else { peer.AsyncSendTransactions(hashes) } } + log.Debug("Transaction broadcast", "txs", len(txs), + "announce packs", annoPeers, "announced hashes", annoCount, + "tx packs", directPeers, "broadcast txs", directCount) } // minedBroadcastLoop sends mined blocks to connected peers. -func (pm *ProtocolManager) minedBroadcastLoop() { - defer pm.wg.Done() +func (h *handler) minedBroadcastLoop() { + defer h.wg.Done() - for obj := range pm.minedBlockSub.Chan() { + for obj := range h.minedBlockSub.Chan() { if ev, ok := obj.Data.(core.NewMinedBlockEvent); ok { - pm.BroadcastBlock(ev.Block, true) // First propagate block to peers - pm.BroadcastBlock(ev.Block, false) // Only then announce to the rest + h.BroadcastBlock(ev.Block, true) // First propagate block to peers + h.BroadcastBlock(ev.Block, false) // Only then announce to the rest } } } // txBroadcastLoop announces new transactions to connected peers. -func (pm *ProtocolManager) txBroadcastLoop() { - defer pm.wg.Done() - +func (h *handler) txBroadcastLoop() { + defer h.wg.Done() for { select { - case event := <-pm.txsCh: - // For testing purpose only, disable propagation - if pm.broadcastTxAnnouncesOnly { - pm.BroadcastTransactions(event.Txs, false) - continue - } - pm.BroadcastTransactions(event.Txs, true) // First propagate transactions to peers - pm.BroadcastTransactions(event.Txs, false) // Only then announce to the rest - - case <-pm.txsSub.Err(): + case event := <-h.txsCh: + h.BroadcastTransactions(event.Txs) + case <-h.txsSub.Err(): return } } } - -// NodeInfo represents a short summary of the Ethereum sub-protocol metadata -// known about the host peer. -type NodeInfo struct { - Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4) - Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain - Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block - Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules - Head common.Hash `json:"head"` // SHA3 hash of the host's best owned block -} - -// NodeInfo retrieves some protocol metadata about the running host node. -func (pm *ProtocolManager) NodeInfo() *NodeInfo { - currentBlock := pm.blockchain.CurrentBlock() - return &NodeInfo{ - Network: pm.networkID, - Difficulty: pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()), - Genesis: pm.blockchain.Genesis().Hash(), - Config: pm.blockchain.Config(), - Head: currentBlock.Hash(), - } -} diff --git a/eth/handler_eth.go b/eth/handler_eth.go new file mode 100644 index 0000000000..3ff9f2245b --- /dev/null +++ b/eth/handler_eth.go @@ -0,0 +1,218 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "errors" + "fmt" + "math/big" + "sync/atomic" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/trie" +) + +// ethHandler implements the eth.Backend interface to handle the various network +// packets that are sent as replies or broadcasts. +type ethHandler handler + +func (h *ethHandler) Chain() *core.BlockChain { return h.chain } +func (h *ethHandler) StateBloom() *trie.SyncBloom { return h.stateBloom } +func (h *ethHandler) TxPool() eth.TxPool { return h.txpool } + +// RunPeer is invoked when a peer joins on the `eth` protocol. +func (h *ethHandler) RunPeer(peer *eth.Peer, hand eth.Handler) error { + return (*handler)(h).runEthPeer(peer, hand) +} + +// PeerInfo retrieves all known `eth` information about a peer. +func (h *ethHandler) PeerInfo(id enode.ID) interface{} { + if p := h.peers.peer(id.String()); p != nil { + return p.info() + } + return nil +} + +// AcceptTxs retrieves whether transaction processing is enabled on the node +// or if inbound transactions should simply be dropped. +func (h *ethHandler) AcceptTxs() bool { + return atomic.LoadUint32(&h.acceptTxs) == 1 +} + +// Handle is invoked from a peer's message handler when it receives a new remote +// message that the handler couldn't consume and serve itself. +func (h *ethHandler) Handle(peer *eth.Peer, packet eth.Packet) error { + // Consume any broadcasts and announces, forwarding the rest to the downloader + switch packet := packet.(type) { + case *eth.BlockHeadersPacket: + return h.handleHeaders(peer, *packet) + + case *eth.BlockBodiesPacket: + txset, uncleset := packet.Unpack() + return h.handleBodies(peer, txset, uncleset) + + case *eth.NodeDataPacket: + if err := h.downloader.DeliverNodeData(peer.ID(), *packet); err != nil { + log.Debug("Failed to deliver node state data", "err", err) + } + return nil + + case *eth.ReceiptsPacket: + if err := h.downloader.DeliverReceipts(peer.ID(), *packet); err != nil { + log.Debug("Failed to deliver receipts", "err", err) + } + return nil + + case *eth.NewBlockHashesPacket: + hashes, numbers := packet.Unpack() + return h.handleBlockAnnounces(peer, hashes, numbers) + + case *eth.NewBlockPacket: + return h.handleBlockBroadcast(peer, packet.Block, packet.TD) + + case *eth.NewPooledTransactionHashesPacket: + return h.txFetcher.Notify(peer.ID(), *packet) + + case *eth.TransactionsPacket: + return h.txFetcher.Enqueue(peer.ID(), *packet, false) + + case *eth.PooledTransactionsPacket: + return h.txFetcher.Enqueue(peer.ID(), *packet, true) + + default: + return fmt.Errorf("unexpected eth packet type: %T", packet) + } +} + +// handleHeaders is invoked from a peer's message handler when it transmits a batch +// of headers for the local node to process. +func (h *ethHandler) handleHeaders(peer *eth.Peer, headers []*types.Header) error { + p := h.peers.peer(peer.ID()) + if p == nil { + return errors.New("unregistered during callback") + } + // If no headers were received, but we're expencting a checkpoint header, consider it that + if len(headers) == 0 && p.syncDrop != nil { + // Stop the timer either way, decide later to drop or not + p.syncDrop.Stop() + p.syncDrop = nil + + // If we're doing a fast (or snap) sync, we must enforce the checkpoint block to avoid + // eclipse attacks. Unsynced nodes are welcome to connect after we're done + // joining the network + if atomic.LoadUint32(&h.fastSync) == 1 { + peer.Log().Warn("Dropping unsynced node during sync", "addr", peer.RemoteAddr(), "type", peer.Name()) + return errors.New("unsynced node cannot serve sync") + } + } + // Filter out any explicitly requested headers, deliver the rest to the downloader + filter := len(headers) == 1 + if filter { + // If it's a potential sync progress check, validate the content and advertised chain weight + if p.syncDrop != nil && headers[0].Number.Uint64() == h.checkpointNumber { + // Disable the sync drop timer + p.syncDrop.Stop() + p.syncDrop = nil + + // Validate the header and either drop the peer or continue + if headers[0].Hash() != h.checkpointHash { + return errors.New("checkpoint hash mismatch") + } + return nil + } + // Otherwise if it's a whitelisted block, validate against the set + if want, ok := h.whitelist[headers[0].Number.Uint64()]; ok { + if hash := headers[0].Hash(); want != hash { + peer.Log().Info("Whitelist mismatch, dropping peer", "number", headers[0].Number.Uint64(), "hash", hash, "want", want) + return errors.New("whitelist block mismatch") + } + peer.Log().Debug("Whitelist block verified", "number", headers[0].Number.Uint64(), "hash", want) + } + // Irrelevant of the fork checks, send the header to the fetcher just in case + headers = h.blockFetcher.FilterHeaders(peer.ID(), headers, time.Now()) + } + if len(headers) > 0 || !filter { + err := h.downloader.DeliverHeaders(peer.ID(), headers) + if err != nil { + log.Debug("Failed to deliver headers", "err", err) + } + } + return nil +} + +// handleBodies is invoked from a peer's message handler when it transmits a batch +// of block bodies for the local node to process. +func (h *ethHandler) handleBodies(peer *eth.Peer, txs [][]*types.Transaction, uncles [][]*types.Header) error { + // Filter out any explicitly requested bodies, deliver the rest to the downloader + filter := len(txs) > 0 || len(uncles) > 0 + if filter { + txs, uncles = h.blockFetcher.FilterBodies(peer.ID(), txs, uncles, time.Now()) + } + if len(txs) > 0 || len(uncles) > 0 || !filter { + err := h.downloader.DeliverBodies(peer.ID(), txs, uncles) + if err != nil { + log.Debug("Failed to deliver bodies", "err", err) + } + } + return nil +} + +// handleBlockAnnounces is invoked from a peer's message handler when it transmits a +// batch of block announcements for the local node to process. +func (h *ethHandler) handleBlockAnnounces(peer *eth.Peer, hashes []common.Hash, numbers []uint64) error { + // Schedule all the unknown hashes for retrieval + var ( + unknownHashes = make([]common.Hash, 0, len(hashes)) + unknownNumbers = make([]uint64, 0, len(numbers)) + ) + for i := 0; i < len(hashes); i++ { + if !h.chain.HasBlock(hashes[i], numbers[i]) { + unknownHashes = append(unknownHashes, hashes[i]) + unknownNumbers = append(unknownNumbers, numbers[i]) + } + } + for i := 0; i < len(unknownHashes); i++ { + h.blockFetcher.Notify(peer.ID(), unknownHashes[i], unknownNumbers[i], time.Now(), peer.RequestOneHeader, peer.RequestBodies) + } + return nil +} + +// handleBlockBroadcast is invoked from a peer's message handler when it transmits a +// block broadcast for the local node to process. +func (h *ethHandler) handleBlockBroadcast(peer *eth.Peer, block *types.Block, td *big.Int) error { + // Schedule the block for import + h.blockFetcher.Enqueue(peer.ID(), block) + + // Assuming the block is importable by the peer, but possibly not yet done so, + // calculate the head hash and TD that the peer truly must have. + var ( + trueHead = block.ParentHash() + trueTD = new(big.Int).Sub(td, block.Difficulty()) + ) + // Update the peer's total difficulty if better than the previous + if _, td := peer.Head(); trueTD.Cmp(td) > 0 { + peer.SetHead(trueHead, trueTD) + h.chainSync.handlePeerEvent(peer) + } + return nil +} diff --git a/eth/handler_eth_test.go b/eth/handler_eth_test.go new file mode 100644 index 0000000000..5f5d4e9e82 --- /dev/null +++ b/eth/handler_eth_test.go @@ -0,0 +1,740 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "fmt" + "math/big" + "math/rand" + "sync/atomic" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/forkid" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" +) + +// testEthHandler is a mock event handler to listen for inbound network requests +// on the `eth` protocol and convert them into a more easily testable form. +type testEthHandler struct { + blockBroadcasts event.Feed + txAnnounces event.Feed + txBroadcasts event.Feed +} + +func (h *testEthHandler) Chain() *core.BlockChain { panic("no backing chain") } +func (h *testEthHandler) StateBloom() *trie.SyncBloom { panic("no backing state bloom") } +func (h *testEthHandler) TxPool() eth.TxPool { panic("no backing tx pool") } +func (h *testEthHandler) AcceptTxs() bool { return true } +func (h *testEthHandler) RunPeer(*eth.Peer, eth.Handler) error { panic("not used in tests") } +func (h *testEthHandler) PeerInfo(enode.ID) interface{} { panic("not used in tests") } + +func (h *testEthHandler) Handle(peer *eth.Peer, packet eth.Packet) error { + switch packet := packet.(type) { + case *eth.NewBlockPacket: + h.blockBroadcasts.Send(packet.Block) + return nil + + case *eth.NewPooledTransactionHashesPacket: + h.txAnnounces.Send(([]common.Hash)(*packet)) + return nil + + case *eth.TransactionsPacket: + h.txBroadcasts.Send(([]*types.Transaction)(*packet)) + return nil + + case *eth.PooledTransactionsPacket: + h.txBroadcasts.Send(([]*types.Transaction)(*packet)) + return nil + + default: + panic(fmt.Sprintf("unexpected eth packet type in tests: %T", packet)) + } +} + +// Tests that peers are correctly accepted (or rejected) based on the advertised +// fork IDs in the protocol handshake. +func TestForkIDSplit64(t *testing.T) { testForkIDSplit(t, 64) } +func TestForkIDSplit65(t *testing.T) { testForkIDSplit(t, 65) } + +func testForkIDSplit(t *testing.T, protocol uint) { + t.Parallel() + + var ( + engine = ethash.NewFaker() + + configNoFork = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1)} + configProFork = ¶ms.ChainConfig{ + HomesteadBlock: big.NewInt(1), + EIP150Block: big.NewInt(2), + EIP155Block: big.NewInt(2), + EIP158Block: big.NewInt(2), + ByzantiumBlock: big.NewInt(3), + } + dbNoFork = rawdb.NewMemoryDatabase() + dbProFork = rawdb.NewMemoryDatabase() + + gspecNoFork = &core.Genesis{Config: configNoFork} + gspecProFork = &core.Genesis{Config: configProFork} + + genesisNoFork = gspecNoFork.MustCommit(dbNoFork) + genesisProFork = gspecProFork.MustCommit(dbProFork) + + chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil, nil) + chainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{}, nil, nil) + + blocksNoFork, _ = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil) + blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil) + + ethNoFork, _ = newHandler(&handlerConfig{ + Database: dbNoFork, + Chain: chainNoFork, + TxPool: newTestTxPool(), + Network: 1, + Sync: downloader.FullSync, + BloomCache: 1, + }) + ethProFork, _ = newHandler(&handlerConfig{ + Database: dbProFork, + Chain: chainProFork, + TxPool: newTestTxPool(), + Network: 1, + Sync: downloader.FullSync, + BloomCache: 1, + }) + ) + ethNoFork.Start(1000) + ethProFork.Start(1000) + + // Clean up everything after ourselves + defer chainNoFork.Stop() + defer chainProFork.Stop() + + defer ethNoFork.Stop() + defer ethProFork.Stop() + + // Both nodes should allow the other to connect (same genesis, next fork is the same) + p2pNoFork, p2pProFork := p2p.MsgPipe() + defer p2pNoFork.Close() + defer p2pProFork.Close() + + peerNoFork := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil) + peerProFork := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil) + defer peerNoFork.Close() + defer peerProFork.Close() + + errc := make(chan error, 2) + go func(errc chan error) { + errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) + }(errc) + go func(errc chan error) { + errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) + }(errc) + + for i := 0; i < 2; i++ { + select { + case err := <-errc: + if err != nil { + t.Fatalf("frontier nofork <-> profork failed: %v", err) + } + case <-time.After(250 * time.Millisecond): + t.Fatalf("frontier nofork <-> profork handler timeout") + } + } + // Progress into Homestead. Fork's match, so we don't care what the future holds + chainNoFork.InsertChain(blocksNoFork[:1]) + chainProFork.InsertChain(blocksProFork[:1]) + + p2pNoFork, p2pProFork = p2p.MsgPipe() + defer p2pNoFork.Close() + defer p2pProFork.Close() + + peerNoFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil) + peerProFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil) + defer peerNoFork.Close() + defer peerProFork.Close() + + errc = make(chan error, 2) + go func(errc chan error) { + errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) + }(errc) + go func(errc chan error) { + errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) + }(errc) + + for i := 0; i < 2; i++ { + select { + case err := <-errc: + if err != nil { + t.Fatalf("homestead nofork <-> profork failed: %v", err) + } + case <-time.After(250 * time.Millisecond): + t.Fatalf("homestead nofork <-> profork handler timeout") + } + } + // Progress into Spurious. Forks mismatch, signalling differing chains, reject + chainNoFork.InsertChain(blocksNoFork[1:2]) + chainProFork.InsertChain(blocksProFork[1:2]) + + p2pNoFork, p2pProFork = p2p.MsgPipe() + defer p2pNoFork.Close() + defer p2pProFork.Close() + + peerNoFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil) + peerProFork = eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil) + defer peerNoFork.Close() + defer peerProFork.Close() + + errc = make(chan error, 2) + go func(errc chan error) { + errc <- ethNoFork.runEthPeer(peerProFork, func(peer *eth.Peer) error { return nil }) + }(errc) + go func(errc chan error) { + errc <- ethProFork.runEthPeer(peerNoFork, func(peer *eth.Peer) error { return nil }) + }(errc) + + var successes int + for i := 0; i < 2; i++ { + select { + case err := <-errc: + if err == nil { + successes++ + if successes == 2 { // Only one side disconnects + t.Fatalf("fork ID rejection didn't happen") + } + } + case <-time.After(250 * time.Millisecond): + t.Fatalf("split peers not rejected") + } + } +} + +// Tests that received transactions are added to the local pool. +func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) } +func TestRecvTransactions65(t *testing.T) { testRecvTransactions(t, 65) } + +func testRecvTransactions(t *testing.T, protocol uint) { + t.Parallel() + + // Create a message handler, configure it to accept transactions and watch them + handler := newTestHandler() + defer handler.close() + + handler.handler.acceptTxs = 1 // mark synced to accept transactions + + txs := make(chan core.NewTxsEvent) + sub := handler.txpool.SubscribeNewTxsEvent(txs) + defer sub.Unsubscribe() + + // Create a source peer to send messages through and a sink handler to receive them + p2pSrc, p2pSink := p2p.MsgPipe() + defer p2pSrc.Close() + defer p2pSink.Close() + + src := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pSrc, handler.txpool) + sink := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pSink, handler.txpool) + defer src.Close() + defer sink.Close() + + go handler.handler.runEthPeer(sink, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(handler.handler), peer) + }) + // Run the handshake locally to avoid spinning up a source handler + var ( + genesis = handler.chain.Genesis() + head = handler.chain.CurrentBlock() + td = handler.chain.GetTd(head.Hash(), head.NumberU64()) + ) + if err := src.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { + t.Fatalf("failed to run protocol handshake") + } + // Send the transaction to the sink and verify that it's added to the tx pool + tx := types.NewTransaction(0, common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil) + tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) + + if err := src.SendTransactions([]*types.Transaction{tx}); err != nil { + t.Fatalf("failed to send transaction: %v", err) + } + select { + case event := <-txs: + if len(event.Txs) != 1 { + t.Errorf("wrong number of added transactions: got %d, want 1", len(event.Txs)) + } else if event.Txs[0].Hash() != tx.Hash() { + t.Errorf("added wrong tx hash: got %v, want %v", event.Txs[0].Hash(), tx.Hash()) + } + case <-time.After(2 * time.Second): + t.Errorf("no NewTxsEvent received within 2 seconds") + } +} + +// This test checks that pending transactions are sent. +func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) } +func TestSendTransactions65(t *testing.T) { testSendTransactions(t, 65) } + +func testSendTransactions(t *testing.T, protocol uint) { + t.Parallel() + + // Create a message handler and fill the pool with big transactions + handler := newTestHandler() + defer handler.close() + + insert := make([]*types.Transaction, 100) + for nonce := range insert { + tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), make([]byte, txsyncPackSize/10)) + tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) + + insert[nonce] = tx + } + go handler.txpool.AddRemotes(insert) // Need goroutine to not block on feed + time.Sleep(250 * time.Millisecond) // Wait until tx events get out of the system (can't use events, tx broadcaster races with peer join) + + // Create a source handler to send messages through and a sink peer to receive them + p2pSrc, p2pSink := p2p.MsgPipe() + defer p2pSrc.Close() + defer p2pSink.Close() + + src := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pSrc, handler.txpool) + sink := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pSink, handler.txpool) + defer src.Close() + defer sink.Close() + + go handler.handler.runEthPeer(src, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(handler.handler), peer) + }) + // Run the handshake locally to avoid spinning up a source handler + var ( + genesis = handler.chain.Genesis() + head = handler.chain.CurrentBlock() + td = handler.chain.GetTd(head.Hash(), head.NumberU64()) + ) + if err := sink.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { + t.Fatalf("failed to run protocol handshake") + } + // After the handshake completes, the source handler should stream the sink + // the transactions, subscribe to all inbound network events + backend := new(testEthHandler) + + anns := make(chan []common.Hash) + annSub := backend.txAnnounces.Subscribe(anns) + defer annSub.Unsubscribe() + + bcasts := make(chan []*types.Transaction) + bcastSub := backend.txBroadcasts.Subscribe(bcasts) + defer bcastSub.Unsubscribe() + + go eth.Handle(backend, sink) + + // Make sure we get all the transactions on the correct channels + seen := make(map[common.Hash]struct{}) + for len(seen) < len(insert) { + switch protocol { + case 63, 64: + select { + case <-anns: + t.Errorf("tx announce received on pre eth/65") + case txs := <-bcasts: + for _, tx := range txs { + if _, ok := seen[tx.Hash()]; ok { + t.Errorf("duplicate transaction announced: %x", tx.Hash()) + } + seen[tx.Hash()] = struct{}{} + } + } + case 65: + select { + case hashes := <-anns: + for _, hash := range hashes { + if _, ok := seen[hash]; ok { + t.Errorf("duplicate transaction announced: %x", hash) + } + seen[hash] = struct{}{} + } + case <-bcasts: + t.Errorf("initial tx broadcast received on post eth/65") + } + + default: + panic("unsupported protocol, please extend test") + } + } + for _, tx := range insert { + if _, ok := seen[tx.Hash()]; !ok { + t.Errorf("missing transaction: %x", tx.Hash()) + } + } +} + +// Tests that transactions get propagated to all attached peers, either via direct +// broadcasts or via announcements/retrievals. +func TestTransactionPropagation64(t *testing.T) { testTransactionPropagation(t, 64) } +func TestTransactionPropagation65(t *testing.T) { testTransactionPropagation(t, 65) } + +func testTransactionPropagation(t *testing.T, protocol uint) { + t.Parallel() + + // Create a source handler to send transactions from and a number of sinks + // to receive them. We need multiple sinks since a one-to-one peering would + // broadcast all transactions without announcement. + source := newTestHandler() + defer source.close() + + sinks := make([]*testHandler, 10) + for i := 0; i < len(sinks); i++ { + sinks[i] = newTestHandler() + defer sinks[i].close() + + sinks[i].handler.acceptTxs = 1 // mark synced to accept transactions + } + // Interconnect all the sink handlers with the source handler + for i, sink := range sinks { + sink := sink // Closure for gorotuine below + + sourcePipe, sinkPipe := p2p.MsgPipe() + defer sourcePipe.Close() + defer sinkPipe.Close() + + sourcePeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{byte(i)}, "", nil), sourcePipe, source.txpool) + sinkPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{0}, "", nil), sinkPipe, sink.txpool) + defer sourcePeer.Close() + defer sinkPeer.Close() + + go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(source.handler), peer) + }) + go sink.handler.runEthPeer(sinkPeer, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(sink.handler), peer) + }) + } + // Subscribe to all the transaction pools + txChs := make([]chan core.NewTxsEvent, len(sinks)) + for i := 0; i < len(sinks); i++ { + txChs[i] = make(chan core.NewTxsEvent, 1024) + + sub := sinks[i].txpool.SubscribeNewTxsEvent(txChs[i]) + defer sub.Unsubscribe() + } + // Fill the source pool with transactions and wait for them at the sinks + txs := make([]*types.Transaction, 1024) + for nonce := range txs { + tx := types.NewTransaction(uint64(nonce), common.Address{}, big.NewInt(0), 100000, big.NewInt(0), nil) + tx, _ = types.SignTx(tx, types.HomesteadSigner{}, testKey) + + txs[nonce] = tx + } + source.txpool.AddRemotes(txs) + + // Iterate through all the sinks and ensure they all got the transactions + for i := range sinks { + for arrived := 0; arrived < len(txs); { + select { + case event := <-txChs[i]: + arrived += len(event.Txs) + case <-time.NewTimer(time.Second).C: + t.Errorf("sink %d: transaction propagation timed out: have %d, want %d", i, arrived, len(txs)) + } + } + } +} + +// Tests that post eth protocol handshake, clients perform a mutual checkpoint +// challenge to validate each other's chains. Hash mismatches, or missing ones +// during a fast sync should lead to the peer getting dropped. +func TestCheckpointChallenge(t *testing.T) { + tests := []struct { + syncmode downloader.SyncMode + checkpoint bool + timeout bool + empty bool + match bool + drop bool + }{ + // If checkpointing is not enabled locally, don't challenge and don't drop + {downloader.FullSync, false, false, false, false, false}, + {downloader.FastSync, false, false, false, false, false}, + + // If checkpointing is enabled locally and remote response is empty, only drop during fast sync + {downloader.FullSync, true, false, true, false, false}, + {downloader.FastSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer + + // If checkpointing is enabled locally and remote response mismatches, always drop + {downloader.FullSync, true, false, false, false, true}, + {downloader.FastSync, true, false, false, false, true}, + + // If checkpointing is enabled locally and remote response matches, never drop + {downloader.FullSync, true, false, false, true, false}, + {downloader.FastSync, true, false, false, true, false}, + + // If checkpointing is enabled locally and remote times out, always drop + {downloader.FullSync, true, true, false, true, true}, + {downloader.FastSync, true, true, false, true, true}, + } + for _, tt := range tests { + t.Run(fmt.Sprintf("sync %v checkpoint %v timeout %v empty %v match %v", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) { + testCheckpointChallenge(t, tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match, tt.drop) + }) + } +} + +func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpoint bool, timeout bool, empty bool, match bool, drop bool) { + // Reduce the checkpoint handshake challenge timeout + defer func(old time.Duration) { syncChallengeTimeout = old }(syncChallengeTimeout) + syncChallengeTimeout = 250 * time.Millisecond + + // Create a test handler and inject a CHT into it. The injection is a bit + // ugly, but it beats creating everything manually just to avoid reaching + // into the internals a bit. + handler := newTestHandler() + defer handler.close() + + if syncmode == downloader.FastSync { + atomic.StoreUint32(&handler.handler.fastSync, 1) + } else { + atomic.StoreUint32(&handler.handler.fastSync, 0) + } + var response *types.Header + if checkpoint { + number := (uint64(rand.Intn(500))+1)*params.CHTFrequency - 1 + response = &types.Header{Number: big.NewInt(int64(number)), Extra: []byte("valid")} + + handler.handler.checkpointNumber = number + handler.handler.checkpointHash = response.Hash() + } + // Create a challenger peer and a challenged one + p2pLocal, p2pRemote := p2p.MsgPipe() + defer p2pLocal.Close() + defer p2pRemote.Close() + + local := eth.NewPeer(eth.ETH64, p2p.NewPeer(enode.ID{1}, "", nil), p2pLocal, handler.txpool) + remote := eth.NewPeer(eth.ETH64, p2p.NewPeer(enode.ID{2}, "", nil), p2pRemote, handler.txpool) + defer local.Close() + defer remote.Close() + + go handler.handler.runEthPeer(local, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(handler.handler), peer) + }) + // Run the handshake locally to avoid spinning up a remote handler + var ( + genesis = handler.chain.Genesis() + head = handler.chain.CurrentBlock() + td = handler.chain.GetTd(head.Hash(), head.NumberU64()) + ) + if err := remote.Handshake(1, td, head.Hash(), genesis.Hash(), forkid.NewIDWithChain(handler.chain), forkid.NewFilter(handler.chain)); err != nil { + t.Fatalf("failed to run protocol handshake") + } + // Connect a new peer and check that we receive the checkpoint challenge + if checkpoint { + if err := remote.ExpectRequestHeadersByNumber(response.Number.Uint64(), 1, 0, false); err != nil { + t.Fatalf("challenge mismatch: %v", err) + } + // Create a block to reply to the challenge if no timeout is simulated + if !timeout { + if empty { + if err := remote.SendBlockHeaders([]*types.Header{}); err != nil { + t.Fatalf("failed to answer challenge: %v", err) + } + } else if match { + if err := remote.SendBlockHeaders([]*types.Header{response}); err != nil { + t.Fatalf("failed to answer challenge: %v", err) + } + } else { + if err := remote.SendBlockHeaders([]*types.Header{{Number: response.Number}}); err != nil { + t.Fatalf("failed to answer challenge: %v", err) + } + } + } + } + // Wait until the test timeout passes to ensure proper cleanup + time.Sleep(syncChallengeTimeout + 300*time.Millisecond) + + // Verify that the remote peer is maintained or dropped + if drop { + if peers := handler.handler.peers.len(); peers != 0 { + t.Fatalf("peer count mismatch: have %d, want %d", peers, 0) + } + } else { + if peers := handler.handler.peers.len(); peers != 1 { + t.Fatalf("peer count mismatch: have %d, want %d", peers, 1) + } + } +} + +// Tests that blocks are broadcast to a sqrt number of peers only. +func TestBroadcastBlock1Peer(t *testing.T) { testBroadcastBlock(t, 1, 1) } +func TestBroadcastBlock2Peers(t *testing.T) { testBroadcastBlock(t, 2, 1) } +func TestBroadcastBlock3Peers(t *testing.T) { testBroadcastBlock(t, 3, 1) } +func TestBroadcastBlock4Peers(t *testing.T) { testBroadcastBlock(t, 4, 2) } +func TestBroadcastBlock5Peers(t *testing.T) { testBroadcastBlock(t, 5, 2) } +func TestBroadcastBlock8Peers(t *testing.T) { testBroadcastBlock(t, 9, 3) } +func TestBroadcastBlock12Peers(t *testing.T) { testBroadcastBlock(t, 12, 3) } +func TestBroadcastBlock16Peers(t *testing.T) { testBroadcastBlock(t, 16, 4) } +func TestBroadcastBloc26Peers(t *testing.T) { testBroadcastBlock(t, 26, 5) } +func TestBroadcastBlock100Peers(t *testing.T) { testBroadcastBlock(t, 100, 10) } + +func testBroadcastBlock(t *testing.T, peers, bcasts int) { + t.Parallel() + + // Create a source handler to broadcast blocks from and a number of sinks + // to receive them. + source := newTestHandlerWithBlocks(1) + defer source.close() + + sinks := make([]*testEthHandler, peers) + for i := 0; i < len(sinks); i++ { + sinks[i] = new(testEthHandler) + } + // Interconnect all the sink handlers with the source handler + var ( + genesis = source.chain.Genesis() + td = source.chain.GetTd(genesis.Hash(), genesis.NumberU64()) + ) + for i, sink := range sinks { + sink := sink // Closure for gorotuine below + + sourcePipe, sinkPipe := p2p.MsgPipe() + defer sourcePipe.Close() + defer sinkPipe.Close() + + sourcePeer := eth.NewPeer(eth.ETH64, p2p.NewPeer(enode.ID{byte(i)}, "", nil), sourcePipe, nil) + sinkPeer := eth.NewPeer(eth.ETH64, p2p.NewPeer(enode.ID{0}, "", nil), sinkPipe, nil) + defer sourcePeer.Close() + defer sinkPeer.Close() + + go source.handler.runEthPeer(sourcePeer, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(source.handler), peer) + }) + if err := sinkPeer.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain)); err != nil { + t.Fatalf("failed to run protocol handshake") + } + go eth.Handle(sink, sinkPeer) + } + // Subscribe to all the transaction pools + blockChs := make([]chan *types.Block, len(sinks)) + for i := 0; i < len(sinks); i++ { + blockChs[i] = make(chan *types.Block, 1) + defer close(blockChs[i]) + + sub := sinks[i].blockBroadcasts.Subscribe(blockChs[i]) + defer sub.Unsubscribe() + } + // Initiate a block propagation across the peers + time.Sleep(100 * time.Millisecond) + source.handler.BroadcastBlock(source.chain.CurrentBlock(), true) + + // Iterate through all the sinks and ensure the correct number got the block + done := make(chan struct{}, peers) + for _, ch := range blockChs { + ch := ch + go func() { + <-ch + done <- struct{}{} + }() + } + var received int + for { + select { + case <-done: + received++ + + case <-time.After(100 * time.Millisecond): + if received != bcasts { + t.Errorf("broadcast count mismatch: have %d, want %d", received, bcasts) + } + return + } + } +} + +// Tests that a propagated malformed block (uncles or transactions don't match +// with the hashes in the header) gets discarded and not broadcast forward. +func TestBroadcastMalformedBlock64(t *testing.T) { testBroadcastMalformedBlock(t, 64) } +func TestBroadcastMalformedBlock65(t *testing.T) { testBroadcastMalformedBlock(t, 65) } + +func testBroadcastMalformedBlock(t *testing.T, protocol uint) { + t.Parallel() + + // Create a source handler to broadcast blocks from and a number of sinks + // to receive them. + source := newTestHandlerWithBlocks(1) + defer source.close() + + // Create a source handler to send messages through and a sink peer to receive them + p2pSrc, p2pSink := p2p.MsgPipe() + defer p2pSrc.Close() + defer p2pSink.Close() + + src := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), p2pSrc, source.txpool) + sink := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), p2pSink, source.txpool) + defer src.Close() + defer sink.Close() + + go source.handler.runEthPeer(src, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(source.handler), peer) + }) + // Run the handshake locally to avoid spinning up a sink handler + var ( + genesis = source.chain.Genesis() + td = source.chain.GetTd(genesis.Hash(), genesis.NumberU64()) + ) + if err := sink.Handshake(1, td, genesis.Hash(), genesis.Hash(), forkid.NewIDWithChain(source.chain), forkid.NewFilter(source.chain)); err != nil { + t.Fatalf("failed to run protocol handshake") + } + // After the handshake completes, the source handler should stream the sink + // the blocks, subscribe to inbound network events + backend := new(testEthHandler) + + blocks := make(chan *types.Block, 1) + sub := backend.blockBroadcasts.Subscribe(blocks) + defer sub.Unsubscribe() + + go eth.Handle(backend, sink) + + // Create various combinations of malformed blocks + head := source.chain.CurrentBlock() + + malformedUncles := head.Header() + malformedUncles.UncleHash[0]++ + malformedTransactions := head.Header() + malformedTransactions.TxHash[0]++ + malformedEverything := head.Header() + malformedEverything.UncleHash[0]++ + malformedEverything.TxHash[0]++ + + // Try to broadcast all malformations and ensure they all get discarded + for _, header := range []*types.Header{malformedUncles, malformedTransactions, malformedEverything} { + block := types.NewBlockWithHeader(header).WithBody(head.Transactions(), head.Uncles()) + if err := src.SendNewBlock(block, big.NewInt(131136)); err != nil { + t.Fatalf("failed to broadcast block: %v", err) + } + select { + case <-blocks: + t.Fatalf("malformed block forwarded") + case <-time.After(100 * time.Millisecond): + } + } +} diff --git a/eth/handler_snap.go b/eth/handler_snap.go new file mode 100644 index 0000000000..767416ffd6 --- /dev/null +++ b/eth/handler_snap.go @@ -0,0 +1,50 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth/protocols/snap" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +// snapHandler implements the snap.Backend interface to handle the various network +// packets that are sent as replies or broadcasts. +type snapHandler handler + +func (h *snapHandler) Chain() *core.BlockChain { return h.chain } + +// RunPeer is invoked when a peer joins on the `snap` protocol. +func (h *snapHandler) RunPeer(peer *snap.Peer, hand snap.Handler) error { + return (*handler)(h).runSnapExtension(peer, hand) +} + +// PeerInfo retrieves all known `snap` information about a peer. +func (h *snapHandler) PeerInfo(id enode.ID) interface{} { + if p := h.peers.peer(id.String()); p != nil { + if p.snapExt != nil { + return p.snapExt.info() + } + } + return nil +} + +// Handle is invoked from a peer's message handler when it receives a new remote +// message that the handler couldn't consume and serve itself. +func (h *snapHandler) Handle(peer *snap.Peer, packet snap.Packet) error { + return h.downloader.DeliverSnapPacket(peer, packet) +} diff --git a/eth/handler_test.go b/eth/handler_test.go index fc6c6f2745..a90ef5c348 100644 --- a/eth/handler_test.go +++ b/eth/handler_test.go @@ -17,678 +17,154 @@ package eth import ( - "fmt" - "math" "math/big" - "math/rand" - "testing" - "time" + "sort" + "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/params" ) -// Tests that block headers can be retrieved from a remote chain based on user queries. -func TestGetBlockHeaders63(t *testing.T) { testGetBlockHeaders(t, 63) } -func TestGetBlockHeaders64(t *testing.T) { testGetBlockHeaders(t, 64) } +var ( + // testKey is a private key to use for funding a tester account. + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") -func testGetBlockHeaders(t *testing.T, protocol int) { - pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, downloader.MaxHashFetch+15, nil, nil) - peer, _ := newTestPeer("peer", protocol, pm, true) - defer peer.close() + // testAddr is the Ethereum address of the tester account. + testAddr = crypto.PubkeyToAddress(testKey.PublicKey) +) - // Create a "random" unknown hash for testing - var unknown common.Hash - for i := range unknown { - unknown[i] = byte(i) - } - // Create a batch of tests for various scenarios - limit := uint64(downloader.MaxHeaderFetch) - tests := []struct { - query *getBlockHeadersData // The query to execute for header retrieval - expect []common.Hash // The hashes of the block whose headers are expected - }{ - // A single random block should be retrievable by hash and number too - { - &getBlockHeadersData{Origin: hashOrNumber{Hash: pm.blockchain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, - []common.Hash{pm.blockchain.GetBlockByNumber(limit / 2).Hash()}, - }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1}, - []common.Hash{pm.blockchain.GetBlockByNumber(limit / 2).Hash()}, - }, - // Multiple headers should be retrievable in both directions - { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(limit / 2).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 + 1).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 + 2).Hash(), - }, - }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(limit / 2).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 - 1).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 - 2).Hash(), - }, - }, - // Multiple headers with skip lists should be retrievable - { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(limit / 2).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 + 4).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 + 8).Hash(), - }, - }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(limit / 2).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 - 4).Hash(), - pm.blockchain.GetBlockByNumber(limit/2 - 8).Hash(), - }, - }, - // The chain endpoints should be retrievable - { - &getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1}, - []common.Hash{pm.blockchain.GetBlockByNumber(0).Hash()}, - }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64()}, Amount: 1}, - []common.Hash{pm.blockchain.CurrentBlock().Hash()}, - }, - // Ensure protocol limits are honored - { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true}, - pm.blockchain.GetBlockHashesFromHash(pm.blockchain.CurrentBlock().Hash(), limit), - }, - // Check that requesting more than available is handled gracefully - { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 4).Hash(), - pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64()).Hash(), - }, - }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(4).Hash(), - pm.blockchain.GetBlockByNumber(0).Hash(), - }, - }, - // Check that requesting more than available is handled gracefully, even if mid skip - { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 4).Hash(), - pm.blockchain.GetBlockByNumber(pm.blockchain.CurrentBlock().NumberU64() - 1).Hash(), - }, - }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(4).Hash(), - pm.blockchain.GetBlockByNumber(1).Hash(), - }, - }, - // Check a corner case where requesting more can iterate past the endpoints - { - &getBlockHeadersData{Origin: hashOrNumber{Number: 2}, Amount: 5, Reverse: true}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(2).Hash(), - pm.blockchain.GetBlockByNumber(1).Hash(), - pm.blockchain.GetBlockByNumber(0).Hash(), - }, - }, - // Check a corner case where skipping overflow loops back into the chain start - { - &getBlockHeadersData{Origin: hashOrNumber{Hash: pm.blockchain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(3).Hash(), - }, - }, - // Check a corner case where skipping overflow loops back to the same header - { - &getBlockHeadersData{Origin: hashOrNumber{Hash: pm.blockchain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, - []common.Hash{ - pm.blockchain.GetBlockByNumber(1).Hash(), - }, - }, - // Check that non existing headers aren't returned - { - &getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1}, - []common.Hash{}, - }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: pm.blockchain.CurrentBlock().NumberU64() + 1}, Amount: 1}, - []common.Hash{}, - }, - } - // Run each of the tests and verify the results against the chain - for i, tt := range tests { - // Collect the headers to expect in the response - headers := []*types.Header{} - for _, hash := range tt.expect { - headers = append(headers, pm.blockchain.GetBlockByHash(hash).Header()) - } - // Send the hash request and verify the response - p2p.Send(peer.app, 0x03, tt.query) - if err := p2p.ExpectMsg(peer.app, 0x04, headers); err != nil { - t.Errorf("test %d: headers mismatch: %v", i, err) - } - // If the test used number origins, repeat with hashes as the too - if tt.query.Origin.Hash == (common.Hash{}) { - if origin := pm.blockchain.GetBlockByNumber(tt.query.Origin.Number); origin != nil { - tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0 +// testTxPool is a mock transaction pool that blindly accepts all transactions. +// Its goal is to get around setting up a valid statedb for the balance and nonce +// checks. +type testTxPool struct { + pool map[common.Hash]*types.Transaction // Hash map of collected transactions - p2p.Send(peer.app, 0x03, tt.query) - if err := p2p.ExpectMsg(peer.app, 0x04, headers); err != nil { - t.Errorf("test %d: headers mismatch: %v", i, err) - } - } - } - } + txFeed event.Feed // Notification feed to allow waiting for inclusion + lock sync.RWMutex // Protects the transaction pool } -// Tests that block contents can be retrieved from a remote chain based on their hashes. -func TestGetBlockBodies63(t *testing.T) { testGetBlockBodies(t, 63) } -func TestGetBlockBodies64(t *testing.T) { testGetBlockBodies(t, 64) } - -func testGetBlockBodies(t *testing.T, protocol int) { - pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, downloader.MaxBlockFetch+15, nil, nil) - peer, _ := newTestPeer("peer", protocol, pm, true) - defer peer.close() - - // Create a batch of tests for various scenarios - limit := downloader.MaxBlockFetch - tests := []struct { - random int // Number of blocks to fetch randomly from the chain - explicit []common.Hash // Explicitly requested blocks - available []bool // Availability of explicitly requested blocks - expected int // Total number of existing blocks to expect - }{ - {1, nil, nil, 1}, // A single random block should be retrievable - {10, nil, nil, 10}, // Multiple random blocks should be retrievable - {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable - {limit + 1, nil, nil, limit}, // No more than the possible block count should be returned - {0, []common.Hash{pm.blockchain.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable - {0, []common.Hash{pm.blockchain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable - {0, []common.Hash{{}}, []bool{false}, 0}, // A non existent block should not be returned - - // Existing and non-existing blocks interleaved should not cause problems - {0, []common.Hash{ - {}, - pm.blockchain.GetBlockByNumber(1).Hash(), - {}, - pm.blockchain.GetBlockByNumber(10).Hash(), - {}, - pm.blockchain.GetBlockByNumber(100).Hash(), - {}, - }, []bool{false, true, false, true, false, true, false}, 3}, - } - // Run each of the tests and verify the results against the chain - for i, tt := range tests { - // Collect the hashes to request, and the response to expect - hashes, seen := []common.Hash{}, make(map[int64]bool) - bodies := []*blockBody{} - - for j := 0; j < tt.random; j++ { - for { - num := rand.Int63n(int64(pm.blockchain.CurrentBlock().NumberU64())) - if !seen[num] { - seen[num] = true - - block := pm.blockchain.GetBlockByNumber(uint64(num)) - hashes = append(hashes, block.Hash()) - if len(bodies) < tt.expected { - bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()}) - } - break - } - } - } - for j, hash := range tt.explicit { - hashes = append(hashes, hash) - if tt.available[j] && len(bodies) < tt.expected { - block := pm.blockchain.GetBlockByHash(hash) - bodies = append(bodies, &blockBody{Transactions: block.Transactions(), Uncles: block.Uncles()}) - } - } - // Send the hash request and verify the response - p2p.Send(peer.app, 0x05, hashes) - if err := p2p.ExpectMsg(peer.app, 0x06, bodies); err != nil { - t.Errorf("test %d: bodies mismatch: %v", i, err) - } +// newTestTxPool creates a mock transaction pool. +func newTestTxPool() *testTxPool { + return &testTxPool{ + pool: make(map[common.Hash]*types.Transaction), } } -// Tests that the node state database can be retrieved based on hashes. -func TestGetNodeData63(t *testing.T) { testGetNodeData(t, 63) } -func TestGetNodeData64(t *testing.T) { testGetNodeData(t, 64) } +// Has returns an indicator whether txpool has a transaction +// cached with the given hash. +func (p *testTxPool) Has(hash common.Hash) bool { + p.lock.Lock() + defer p.lock.Unlock() -func testGetNodeData(t *testing.T, protocol int) { - // Define three accounts to simulate transactions with - acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey) - acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey) - - signer := types.HomesteadSigner{} - // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test) - generator := func(i int, block *core.BlockGen) { - switch i { - case 0: - // In block 1, the test bank sends account #1 some ether. - tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBank), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testBankKey) - block.AddTx(tx) - case 1: - // In block 2, the test bank sends some more ether to account #1. - // acc1Addr passes it on to account #2. - tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBank), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testBankKey) - tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key) - block.AddTx(tx1) - block.AddTx(tx2) - case 2: - // Block 3 is empty but was mined by account #2. - block.SetCoinbase(acc2Addr) - block.SetExtra([]byte("yeehaw")) - case 3: - // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). - b2 := block.PrevBlock(1).Header() - b2.Extra = []byte("foo") - block.AddUncle(b2) - b3 := block.PrevBlock(2).Header() - b3.Extra = []byte("foo") - block.AddUncle(b3) - } - } - // Assemble the test environment - pm, db := newTestProtocolManagerMust(t, downloader.FullSync, 4, generator, nil) - peer, _ := newTestPeer("peer", protocol, pm, true) - defer peer.close() - - // Fetch for now the entire chain db - hashes := []common.Hash{} - - it := db.NewIterator(nil, nil) - for it.Next() { - if key := it.Key(); len(key) == common.HashLength { - hashes = append(hashes, common.BytesToHash(key)) - } - } - it.Release() - - p2p.Send(peer.app, 0x0d, hashes) - msg, err := peer.app.ReadMsg() - if err != nil { - t.Fatalf("failed to read node data response: %v", err) - } - if msg.Code != 0x0e { - t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, 0x0c) - } - var data [][]byte - if err := msg.Decode(&data); err != nil { - t.Fatalf("failed to decode response node data: %v", err) - } - // Verify that all hashes correspond to the requested data, and reconstruct a state tree - for i, want := range hashes { - if hash := crypto.Keccak256Hash(data[i]); hash != want { - t.Errorf("data hash mismatch: have %x, want %x", hash, want) - } - } - statedb := rawdb.NewMemoryDatabase() - for i := 0; i < len(data); i++ { - statedb.Put(hashes[i].Bytes(), data[i]) - } - accounts := []common.Address{testBank, acc1Addr, acc2Addr} - for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ { - trie, _ := state.New(pm.blockchain.GetBlockByNumber(i).Root(), state.NewDatabase(statedb), nil) - - for j, acc := range accounts { - state, _ := pm.blockchain.State() - bw := state.GetBalance(acc) - bh := trie.GetBalance(acc) - - if (bw != nil && bh == nil) || (bw == nil && bh != nil) { - t.Errorf("test %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) - } - if bw != nil && bh != nil && bw.Cmp(bw) != 0 { - t.Errorf("test %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) - } - } - } + return p.pool[hash] != nil } -// Tests that the transaction receipts can be retrieved based on hashes. -func TestGetReceipt63(t *testing.T) { testGetReceipt(t, 63) } -func TestGetReceipt64(t *testing.T) { testGetReceipt(t, 64) } +// Get retrieves the transaction from local txpool with given +// tx hash. +func (p *testTxPool) Get(hash common.Hash) *types.Transaction { + p.lock.Lock() + defer p.lock.Unlock() -func testGetReceipt(t *testing.T, protocol int) { - // Define three accounts to simulate transactions with - acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") - acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") - acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey) - acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey) - - signer := types.HomesteadSigner{} - // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test) - generator := func(i int, block *core.BlockGen) { - switch i { - case 0: - // In block 1, the test bank sends account #1 some ether. - tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBank), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testBankKey) - block.AddTx(tx) - case 1: - // In block 2, the test bank sends some more ether to account #1. - // acc1Addr passes it on to account #2. - tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testBank), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testBankKey) - tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key) - block.AddTx(tx1) - block.AddTx(tx2) - case 2: - // Block 3 is empty but was mined by account #2. - block.SetCoinbase(acc2Addr) - block.SetExtra([]byte("yeehaw")) - case 3: - // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). - b2 := block.PrevBlock(1).Header() - b2.Extra = []byte("foo") - block.AddUncle(b2) - b3 := block.PrevBlock(2).Header() - b3.Extra = []byte("foo") - block.AddUncle(b3) - } - } - // Assemble the test environment - pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 4, generator, nil) - peer, _ := newTestPeer("peer", protocol, pm, true) - defer peer.close() - - // Collect the hashes to request, and the response to expect - hashes, receipts := []common.Hash{}, []types.Receipts{} - for i := uint64(0); i <= pm.blockchain.CurrentBlock().NumberU64(); i++ { - block := pm.blockchain.GetBlockByNumber(i) - - hashes = append(hashes, block.Hash()) - receipts = append(receipts, pm.blockchain.GetReceiptsByHash(block.Hash())) - } - // Send the hash request and verify the response - p2p.Send(peer.app, 0x0f, hashes) - if err := p2p.ExpectMsg(peer.app, 0x10, receipts); err != nil { - t.Errorf("receipts mismatch: %v", err) - } + return p.pool[hash] } -// Tests that post eth protocol handshake, clients perform a mutual checkpoint -// challenge to validate each other's chains. Hash mismatches, or missing ones -// during a fast sync should lead to the peer getting dropped. -func TestCheckpointChallenge(t *testing.T) { - tests := []struct { - syncmode downloader.SyncMode - checkpoint bool - timeout bool - empty bool - match bool - drop bool - }{ - // If checkpointing is not enabled locally, don't challenge and don't drop - {downloader.FullSync, false, false, false, false, false}, - {downloader.FastSync, false, false, false, false, false}, - - // If checkpointing is enabled locally and remote response is empty, only drop during fast sync - {downloader.FullSync, true, false, true, false, false}, - {downloader.FastSync, true, false, true, false, true}, // Special case, fast sync, unsynced peer - - // If checkpointing is enabled locally and remote response mismatches, always drop - {downloader.FullSync, true, false, false, false, true}, - {downloader.FastSync, true, false, false, false, true}, - - // If checkpointing is enabled locally and remote response matches, never drop - {downloader.FullSync, true, false, false, true, false}, - {downloader.FastSync, true, false, false, true, false}, +// AddRemotes appends a batch of transactions to the pool, and notifies any +// listeners if the addition channel is non nil +func (p *testTxPool) AddRemotes(txs []*types.Transaction) []error { + p.lock.Lock() + defer p.lock.Unlock() - // If checkpointing is enabled locally and remote times out, always drop - {downloader.FullSync, true, true, false, true, true}, - {downloader.FastSync, true, true, false, true, true}, - } - for _, tt := range tests { - t.Run(fmt.Sprintf("sync %v checkpoint %v timeout %v empty %v match %v", tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match), func(t *testing.T) { - testCheckpointChallenge(t, tt.syncmode, tt.checkpoint, tt.timeout, tt.empty, tt.match, tt.drop) - }) + for _, tx := range txs { + p.pool[tx.Hash()] = tx } + p.txFeed.Send(core.NewTxsEvent{Txs: txs}) + return make([]error, len(txs)) } -func testCheckpointChallenge(t *testing.T, syncmode downloader.SyncMode, checkpoint bool, timeout bool, empty bool, match bool, drop bool) { - // Reduce the checkpoint handshake challenge timeout - defer func(old time.Duration) { syncChallengeTimeout = old }(syncChallengeTimeout) - syncChallengeTimeout = 250 * time.Millisecond - - // Initialize a chain and generate a fake CHT if checkpointing is enabled - var ( - db = rawdb.NewMemoryDatabase() - config = new(params.ChainConfig) - ) - (&core.Genesis{Config: config}).MustCommit(db) // Commit genesis block - // If checkpointing is enabled, create and inject a fake CHT and the corresponding - // chllenge response. - var response *types.Header - var cht *params.TrustedCheckpoint - if checkpoint { - index := uint64(rand.Intn(500)) - number := (index+1)*params.CHTFrequency - 1 - response = &types.Header{Number: big.NewInt(int64(number)), Extra: []byte("valid")} +// Pending returns all the transactions known to the pool +func (p *testTxPool) Pending() (map[common.Address]types.Transactions, error) { + p.lock.RLock() + defer p.lock.RUnlock() - cht = ¶ms.TrustedCheckpoint{ - SectionIndex: index, - SectionHead: response.Hash(), - } + batches := make(map[common.Address]types.Transactions) + for _, tx := range p.pool { + from, _ := types.Sender(types.HomesteadSigner{}, tx) + batches[from] = append(batches[from], tx) } - // Create a checkpoint aware protocol manager - blockchain, err := core.NewBlockChain(db, nil, config, ethash.NewFaker(), vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create new blockchain: %v", err) - } - pm, err := NewProtocolManager(config, cht, syncmode, DefaultConfig.NetworkId, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, ethash.NewFaker(), blockchain, db, 1, nil) - if err != nil { - t.Fatalf("failed to start test protocol manager: %v", err) - } - pm.Start(1000) - defer pm.Stop() - - // Connect a new peer and check that we receive the checkpoint challenge - peer, _ := newTestPeer("peer", eth63, pm, true) - defer peer.close() - - if checkpoint { - challenge := &getBlockHeadersData{ - Origin: hashOrNumber{Number: response.Number.Uint64()}, - Amount: 1, - Skip: 0, - Reverse: false, - } - if err := p2p.ExpectMsg(peer.app, GetBlockHeadersMsg, challenge); err != nil { - t.Fatalf("challenge mismatch: %v", err) - } - // Create a block to reply to the challenge if no timeout is simulated - if !timeout { - if empty { - if err := p2p.Send(peer.app, BlockHeadersMsg, []*types.Header{}); err != nil { - t.Fatalf("failed to answer challenge: %v", err) - } - } else if match { - if err := p2p.Send(peer.app, BlockHeadersMsg, []*types.Header{response}); err != nil { - t.Fatalf("failed to answer challenge: %v", err) - } - } else { - if err := p2p.Send(peer.app, BlockHeadersMsg, []*types.Header{{Number: response.Number}}); err != nil { - t.Fatalf("failed to answer challenge: %v", err) - } - } - } - } - // Wait until the test timeout passes to ensure proper cleanup - time.Sleep(syncChallengeTimeout + 300*time.Millisecond) - - // Verify that the remote peer is maintained or dropped - if drop { - if peers := pm.peers.Len(); peers != 0 { - t.Fatalf("peer count mismatch: have %d, want %d", peers, 0) - } - } else { - if peers := pm.peers.Len(); peers != 1 { - t.Fatalf("peer count mismatch: have %d, want %d", peers, 1) - } + for _, batch := range batches { + sort.Sort(types.TxByNonce(batch)) } + return batches, nil } -func TestBroadcastBlock(t *testing.T) { - var tests = []struct { - totalPeers int - broadcastExpected int - }{ - {1, 1}, - {2, 1}, - {3, 1}, - {4, 2}, - {5, 2}, - {9, 3}, - {12, 3}, - {16, 4}, - {26, 5}, - {100, 10}, - } - for _, test := range tests { - testBroadcastBlock(t, test.totalPeers, test.broadcastExpected) - } +// SubscribeNewTxsEvent should return an event subscription of NewTxsEvent and +// send events to the given channel. +func (p *testTxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { + return p.txFeed.Subscribe(ch) } -func testBroadcastBlock(t *testing.T, totalPeers, broadcastExpected int) { - var ( - evmux = new(event.TypeMux) - pow = ethash.NewFaker() - db = rawdb.NewMemoryDatabase() - config = ¶ms.ChainConfig{} - gspec = &core.Genesis{Config: config} - genesis = gspec.MustCommit(db) - ) - blockchain, err := core.NewBlockChain(db, nil, config, pow, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create new blockchain: %v", err) - } - pm, err := NewProtocolManager(config, nil, downloader.FullSync, DefaultConfig.NetworkId, evmux, &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, pow, blockchain, db, 1, nil) - if err != nil { - t.Fatalf("failed to start test protocol manager: %v", err) - } - pm.Start(1000) - defer pm.Stop() - var peers []*testPeer - for i := 0; i < totalPeers; i++ { - peer, _ := newTestPeer(fmt.Sprintf("peer %d", i), eth63, pm, true) - defer peer.close() - - peers = append(peers, peer) - } - chain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 1, func(i int, gen *core.BlockGen) {}) - pm.BroadcastBlock(chain[0], true /*propagate*/) - - errCh := make(chan error, totalPeers) - doneCh := make(chan struct{}, totalPeers) - for _, peer := range peers { - go func(p *testPeer) { - if err := p2p.ExpectMsg(p.app, NewBlockMsg, &newBlockData{Block: chain[0], TD: big.NewInt(131136)}); err != nil { - errCh <- err - } else { - doneCh <- struct{}{} - } - }(peer) - } - var received int - for { - select { - case <-doneCh: - received++ - if received > broadcastExpected { - // We can bail early here - t.Errorf("broadcast count mismatch: have %d > want %d", received, broadcastExpected) - return - } - case <-time.After(2 * time.Second): - if received != broadcastExpected { - t.Errorf("broadcast count mismatch: have %d, want %d", received, broadcastExpected) - } - return - case err = <-errCh: - t.Fatalf("broadcast failed: %v", err) - } - } +// testHandler is a live implementation of the Ethereum protocol handler, just +// preinitialized with some sane testing defaults and the transaction pool mocked +// out. +type testHandler struct { + db ethdb.Database + chain *core.BlockChain + txpool *testTxPool + handler *handler +} +// newTestHandler creates a new handler for testing purposes with no blocks. +func newTestHandler() *testHandler { + return newTestHandlerWithBlocks(0) } -// Tests that a propagated malformed block (uncles or transactions don't match -// with the hashes in the header) gets discarded and not broadcast forward. -func TestBroadcastMalformedBlock(t *testing.T) { - // Create a live node to test propagation with - var ( - engine = ethash.NewFaker() - db = rawdb.NewMemoryDatabase() - config = ¶ms.ChainConfig{} - gspec = &core.Genesis{Config: config} - genesis = gspec.MustCommit(db) - ) - blockchain, err := core.NewBlockChain(db, nil, config, engine, vm.Config{}, nil, nil) - if err != nil { - t.Fatalf("failed to create new blockchain: %v", err) +// newTestHandlerWithBlocks creates a new handler for testing purposes, with a +// given number of initial blocks. +func newTestHandlerWithBlocks(blocks int) *testHandler { + // Create a database pre-initialize with a genesis block + db := rawdb.NewMemoryDatabase() + (&core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}}, + }).MustCommit(db) + + chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) + + bs, _ := core.GenerateChain(params.TestChainConfig, chain.Genesis(), ethash.NewFaker(), db, blocks, nil) + if _, err := chain.InsertChain(bs); err != nil { + panic(err) + } + txpool := newTestTxPool() + + handler, _ := newHandler(&handlerConfig{ + Database: db, + Chain: chain, + TxPool: txpool, + Network: 1, + Sync: downloader.FastSync, + BloomCache: 1, + }) + handler.Start(1000) + + return &testHandler{ + db: db, + chain: chain, + txpool: txpool, + handler: handler, } - pm, err := NewProtocolManager(config, nil, downloader.FullSync, DefaultConfig.NetworkId, new(event.TypeMux), new(testTxPool), engine, blockchain, db, 1, nil) - if err != nil { - t.Fatalf("failed to start test protocol manager: %v", err) - } - pm.Start(2) - defer pm.Stop() - - // Create two peers, one to send the malformed block with and one to check - // propagation - source, _ := newTestPeer("source", eth63, pm, true) - defer source.close() - - sink, _ := newTestPeer("sink", eth63, pm, true) - defer sink.close() - - // Create various combinations of malformed blocks - chain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, 1, func(i int, gen *core.BlockGen) {}) - - malformedUncles := chain[0].Header() - malformedUncles.UncleHash[0]++ - malformedTransactions := chain[0].Header() - malformedTransactions.TxHash[0]++ - malformedEverything := chain[0].Header() - malformedEverything.UncleHash[0]++ - malformedEverything.TxHash[0]++ +} - // Keep listening to broadcasts and notify if any arrives - notify := make(chan struct{}, 1) - go func() { - if _, err := sink.app.ReadMsg(); err == nil { - notify <- struct{}{} - } - }() - // Try to broadcast all malformations and ensure they all get discarded - for _, header := range []*types.Header{malformedUncles, malformedTransactions, malformedEverything} { - block := types.NewBlockWithHeader(header).WithBody(chain[0].Transactions(), chain[0].Uncles()) - if err := p2p.Send(source.app, NewBlockMsg, []interface{}{block, big.NewInt(131136)}); err != nil { - t.Fatalf("failed to broadcast block: %v", err) - } - select { - case <-notify: - t.Fatalf("malformed block forwarded") - case <-time.After(100 * time.Millisecond): - } - } +// close tears down the handler and all its internal constructs. +func (b *testHandler) close() { + b.handler.Stop() + b.chain.Stop() } diff --git a/eth/helper_test.go b/eth/helper_test.go deleted file mode 100644 index c0bda181ea..0000000000 --- a/eth/helper_test.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// This file contains some shares testing functionality, common to multiple -// different files and modules being tested. - -package eth - -import ( - "crypto/ecdsa" - "crypto/rand" - "fmt" - "math/big" - "sort" - "sync" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/forkid" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/downloader" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/params" -) - -var ( - testBankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - testBank = crypto.PubkeyToAddress(testBankKey.PublicKey) -) - -// newTestProtocolManager creates a new protocol manager for testing purposes, -// with the given number of blocks already known, and potential notification -// channels for different events. -func newTestProtocolManager(mode downloader.SyncMode, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) (*ProtocolManager, ethdb.Database, error) { - var ( - evmux = new(event.TypeMux) - engine = ethash.NewFaker() - db = rawdb.NewMemoryDatabase() - gspec = &core.Genesis{ - Config: params.TestChainConfig, - Alloc: core.GenesisAlloc{testBank: {Balance: big.NewInt(1000000)}}, - } - genesis = gspec.MustCommit(db) - blockchain, _ = core.NewBlockChain(db, nil, gspec.Config, engine, vm.Config{}, nil, nil) - ) - chain, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, blocks, generator) - if _, err := blockchain.InsertChain(chain); err != nil { - panic(err) - } - pm, err := NewProtocolManager(gspec.Config, nil, mode, DefaultConfig.NetworkId, evmux, &testTxPool{added: newtx, pool: make(map[common.Hash]*types.Transaction)}, engine, blockchain, db, 1, nil) - if err != nil { - return nil, nil, err - } - pm.Start(1000) - return pm, db, nil -} - -// newTestProtocolManagerMust creates a new protocol manager for testing purposes, -// with the given number of blocks already known, and potential notification -// channels for different events. In case of an error, the constructor force- -// fails the test. -func newTestProtocolManagerMust(t *testing.T, mode downloader.SyncMode, blocks int, generator func(int, *core.BlockGen), newtx chan<- []*types.Transaction) (*ProtocolManager, ethdb.Database) { - pm, db, err := newTestProtocolManager(mode, blocks, generator, newtx) - if err != nil { - t.Fatalf("Failed to create protocol manager: %v", err) - } - return pm, db -} - -// testTxPool is a fake, helper transaction pool for testing purposes -type testTxPool struct { - txFeed event.Feed - pool map[common.Hash]*types.Transaction // Hash map of collected transactions - added chan<- []*types.Transaction // Notification channel for new transactions - - lock sync.RWMutex // Protects the transaction pool -} - -// Has returns an indicator whether txpool has a transaction -// cached with the given hash. -func (p *testTxPool) Has(hash common.Hash) bool { - p.lock.Lock() - defer p.lock.Unlock() - - return p.pool[hash] != nil -} - -// Get retrieves the transaction from local txpool with given -// tx hash. -func (p *testTxPool) Get(hash common.Hash) *types.Transaction { - p.lock.Lock() - defer p.lock.Unlock() - - return p.pool[hash] -} - -// AddRemotes appends a batch of transactions to the pool, and notifies any -// listeners if the addition channel is non nil -func (p *testTxPool) AddRemotes(txs []*types.Transaction) []error { - p.lock.Lock() - defer p.lock.Unlock() - - for _, tx := range txs { - p.pool[tx.Hash()] = tx - } - if p.added != nil { - p.added <- txs - } - p.txFeed.Send(core.NewTxsEvent{Txs: txs}) - return make([]error, len(txs)) -} - -// Pending returns all the transactions known to the pool -func (p *testTxPool) Pending() (map[common.Address]types.Transactions, error) { - p.lock.RLock() - defer p.lock.RUnlock() - - batches := make(map[common.Address]types.Transactions) - for _, tx := range p.pool { - from, _ := types.Sender(types.HomesteadSigner{}, tx) - batches[from] = append(batches[from], tx) - } - for _, batch := range batches { - sort.Sort(types.TxByNonce(batch)) - } - return batches, nil -} - -func (p *testTxPool) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { - return p.txFeed.Subscribe(ch) -} - -// newTestTransaction create a new dummy transaction. -func newTestTransaction(from *ecdsa.PrivateKey, nonce uint64, datasize int) *types.Transaction { - tx := types.NewTransaction(nonce, common.Address{}, big.NewInt(0), 100000, big.NewInt(0), make([]byte, datasize)) - tx, _ = types.SignTx(tx, types.HomesteadSigner{}, from) - return tx -} - -// testPeer is a simulated peer to allow testing direct network calls. -type testPeer struct { - net p2p.MsgReadWriter // Network layer reader/writer to simulate remote messaging - app *p2p.MsgPipeRW // Application layer reader/writer to simulate the local side - *peer -} - -// newTestPeer creates a new peer registered at the given protocol manager. -func newTestPeer(name string, version int, pm *ProtocolManager, shake bool) (*testPeer, <-chan error) { - // Create a message pipe to communicate through - app, net := p2p.MsgPipe() - - // Start the peer on a new thread - var id enode.ID - rand.Read(id[:]) - peer := pm.newPeer(version, p2p.NewPeer(id, name, nil), net, pm.txpool.Get) - errc := make(chan error, 1) - go func() { errc <- pm.runPeer(peer) }() - tp := &testPeer{app: app, net: net, peer: peer} - - // Execute any implicitly requested handshakes and return - if shake { - var ( - genesis = pm.blockchain.Genesis() - head = pm.blockchain.CurrentHeader() - td = pm.blockchain.GetTd(head.Hash(), head.Number.Uint64()) - ) - forkID := forkid.NewID(pm.blockchain.Config(), pm.blockchain.Genesis().Hash(), pm.blockchain.CurrentHeader().Number.Uint64()) - tp.handshake(nil, td, head.Hash(), genesis.Hash(), forkID, forkid.NewFilter(pm.blockchain)) - } - return tp, errc -} - -// handshake simulates a trivial handshake that expects the same state from the -// remote side as we are simulating locally. -func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) { - var msg interface{} - switch { - case p.version == eth63: - msg = &statusData63{ - ProtocolVersion: uint32(p.version), - NetworkId: DefaultConfig.NetworkId, - TD: td, - CurrentBlock: head, - GenesisBlock: genesis, - } - case p.version >= eth64: - msg = &statusData{ - ProtocolVersion: uint32(p.version), - NetworkID: DefaultConfig.NetworkId, - TD: td, - Head: head, - Genesis: genesis, - ForkID: forkID, - } - default: - panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version)) - } - if err := p2p.ExpectMsg(p.app, StatusMsg, msg); err != nil { - t.Fatalf("status recv: %v", err) - } - if err := p2p.Send(p.app, StatusMsg, msg); err != nil { - t.Fatalf("status send: %v", err) - } -} - -// close terminates the local side of the peer, notifying the remote protocol -// manager of termination. -func (p *testPeer) close() { - p.app.Close() -} diff --git a/eth/peer.go b/eth/peer.go index 21b82a19c5..1cea9c640e 100644 --- a/eth/peer.go +++ b/eth/peer.go @@ -17,806 +17,57 @@ package eth import ( - "errors" - "fmt" "math/big" "sync" "time" - mapset "github.com/deckarep/golang-set" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/forkid" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/eth/protocols/snap" ) -var ( - errClosed = errors.New("peer set is closed") - errAlreadyRegistered = errors.New("peer is already registered") - errNotRegistered = errors.New("peer is not registered") -) - -const ( - maxKnownTxs = 32768 // Maximum transactions hashes to keep in the known list (prevent DOS) - maxKnownBlocks = 1024 // Maximum block hashes to keep in the known list (prevent DOS) - - // maxQueuedTxs is the maximum number of transactions to queue up before dropping - // older broadcasts. - maxQueuedTxs = 4096 - - // maxQueuedTxAnns is the maximum number of transaction announcements to queue up - // before dropping older announcements. - maxQueuedTxAnns = 4096 - - // maxQueuedBlocks is the maximum number of block propagations to queue up before - // dropping broadcasts. There's not much point in queueing stale blocks, so a few - // that might cover uncles should be enough. - maxQueuedBlocks = 4 - - // maxQueuedBlockAnns is the maximum number of block announcements to queue up before - // dropping broadcasts. Similarly to block propagations, there's no point to queue - // above some healthy uncle limit, so use that. - maxQueuedBlockAnns = 4 - - handshakeTimeout = 5 * time.Second -) - -// max is a helper function which returns the larger of the two given integers. -func max(a, b int) int { - if a > b { - return a - } - return b -} - -// PeerInfo represents a short summary of the Ethereum sub-protocol metadata known +// ethPeerInfo represents a short summary of the `eth` sub-protocol metadata known // about a connected peer. -type PeerInfo struct { - Version int `json:"version"` // Ethereum protocol version negotiated +type ethPeerInfo struct { + Version uint `json:"version"` // Ethereum protocol version negotiated Difficulty *big.Int `json:"difficulty"` // Total difficulty of the peer's blockchain - Head string `json:"head"` // SHA3 hash of the peer's best owned block -} - -// propEvent is a block propagation, waiting for its turn in the broadcast queue. -type propEvent struct { - block *types.Block - td *big.Int -} - -type peer struct { - id string - - *p2p.Peer - rw p2p.MsgReadWriter - - version int // Protocol version negotiated - syncDrop *time.Timer // Timed connection dropper if sync progress isn't validated in time - - head common.Hash - td *big.Int - lock sync.RWMutex - - knownBlocks mapset.Set // Set of block hashes known to be known by this peer - queuedBlocks chan *propEvent // Queue of blocks to broadcast to the peer - queuedBlockAnns chan *types.Block // Queue of blocks to announce to the peer - - knownTxs mapset.Set // Set of transaction hashes known to be known by this peer - txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests - txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests - getPooledTx func(common.Hash) *types.Transaction // Callback used to retrieve transaction from txpool - - term chan struct{} // Termination channel to stop the broadcaster + Head string `json:"head"` // Hex hash of the peer's best owned block } -func newPeer(version int, p *p2p.Peer, rw p2p.MsgReadWriter, getPooledTx func(hash common.Hash) *types.Transaction) *peer { - return &peer{ - Peer: p, - rw: rw, - version: version, - id: fmt.Sprintf("%x", p.ID().Bytes()[:8]), - knownTxs: mapset.NewSet(), - knownBlocks: mapset.NewSet(), - queuedBlocks: make(chan *propEvent, maxQueuedBlocks), - queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns), - txBroadcast: make(chan []common.Hash), - txAnnounce: make(chan []common.Hash), - getPooledTx: getPooledTx, - term: make(chan struct{}), - } -} - -// broadcastBlocks is a write loop that multiplexes blocks and block accouncements -// to the remote peer. The goal is to have an async writer that does not lock up -// node internals and at the same time rate limits queued data. -func (p *peer) broadcastBlocks(removePeer func(string)) { - for { - select { - case prop := <-p.queuedBlocks: - if err := p.SendNewBlock(prop.block, prop.td); err != nil { - removePeer(p.id) - return - } - p.Log().Trace("Propagated block", "number", prop.block.Number(), "hash", prop.block.Hash(), "td", prop.td) - - case block := <-p.queuedBlockAnns: - if err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil { - removePeer(p.id) - return - } - p.Log().Trace("Announced block", "number", block.Number(), "hash", block.Hash()) - - case <-p.term: - return - } - } -} - -// broadcastTransactions is a write loop that schedules transaction broadcasts -// to the remote peer. The goal is to have an async writer that does not lock up -// node internals and at the same time rate limits queued data. -func (p *peer) broadcastTransactions(removePeer func(string)) { - var ( - queue []common.Hash // Queue of hashes to broadcast as full transactions - done chan struct{} // Non-nil if background broadcaster is running - fail = make(chan error, 1) // Channel used to receive network error - ) - for { - // If there's no in-flight broadcast running, check if a new one is needed - if done == nil && len(queue) > 0 { - // Pile transaction until we reach our allowed network limit - var ( - hashes []common.Hash - txs []*types.Transaction - size common.StorageSize - ) - for i := 0; i < len(queue) && size < txsyncPackSize; i++ { - if tx := p.getPooledTx(queue[i]); tx != nil { - txs = append(txs, tx) - size += tx.Size() - } - hashes = append(hashes, queue[i]) - } - queue = queue[:copy(queue, queue[len(hashes):])] - - // If there's anything available to transfer, fire up an async writer - if len(txs) > 0 { - done = make(chan struct{}) - go func() { - if err := p.sendTransactions(txs); err != nil { - fail <- err - return - } - close(done) - p.Log().Trace("Sent transactions", "count", len(txs)) - }() - } - } - // Transfer goroutine may or may not have been started, listen for events - select { - case hashes := <-p.txBroadcast: - // New batch of transactions to be broadcast, queue them (with cap) - queue = append(queue, hashes...) - if len(queue) > maxQueuedTxs { - // Fancy copy and resize to ensure buffer doesn't grow indefinitely - queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxs:])] - } - - case <-done: - done = nil - - case <-fail: - removePeer(p.id) - return +// ethPeer is a wrapper around eth.Peer to maintain a few extra metadata. +type ethPeer struct { + *eth.Peer + snapExt *snapPeer // Satellite `snap` connection - case <-p.term: - return - } - } + syncDrop *time.Timer // Connection dropper if `eth` sync progress isn't validated in time + snapWait chan struct{} // Notification channel for snap connections + lock sync.RWMutex // Mutex protecting the internal fields } -// announceTransactions is a write loop that schedules transaction broadcasts -// to the remote peer. The goal is to have an async writer that does not lock up -// node internals and at the same time rate limits queued data. -func (p *peer) announceTransactions(removePeer func(string)) { - var ( - queue []common.Hash // Queue of hashes to announce as transaction stubs - done chan struct{} // Non-nil if background announcer is running - fail = make(chan error, 1) // Channel used to receive network error - ) - for { - // If there's no in-flight announce running, check if a new one is needed - if done == nil && len(queue) > 0 { - // Pile transaction hashes until we reach our allowed network limit - var ( - hashes []common.Hash - pending []common.Hash - size common.StorageSize - ) - for i := 0; i < len(queue) && size < txsyncPackSize; i++ { - if p.getPooledTx(queue[i]) != nil { - pending = append(pending, queue[i]) - size += common.HashLength - } - hashes = append(hashes, queue[i]) - } - queue = queue[:copy(queue, queue[len(hashes):])] - - // If there's anything available to transfer, fire up an async writer - if len(pending) > 0 { - done = make(chan struct{}) - go func() { - if err := p.sendPooledTransactionHashes(pending); err != nil { - fail <- err - return - } - close(done) - p.Log().Trace("Sent transaction announcements", "count", len(pending)) - }() - } - } - // Transfer goroutine may or may not have been started, listen for events - select { - case hashes := <-p.txAnnounce: - // New batch of transactions to be broadcast, queue them (with cap) - queue = append(queue, hashes...) - if len(queue) > maxQueuedTxAnns { - // Fancy copy and resize to ensure buffer doesn't grow indefinitely - queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxAnns:])] - } - - case <-done: - done = nil - - case <-fail: - removePeer(p.id) - return - - case <-p.term: - return - } - } -} - -// close signals the broadcast goroutine to terminate. -func (p *peer) close() { - close(p.term) -} - -// Info gathers and returns a collection of metadata known about a peer. -func (p *peer) Info() *PeerInfo { +// info gathers and returns some `eth` protocol metadata known about a peer. +func (p *ethPeer) info() *ethPeerInfo { hash, td := p.Head() - return &PeerInfo{ - Version: p.version, + return ðPeerInfo{ + Version: p.Version(), Difficulty: td, Head: hash.Hex(), } } -// Head retrieves a copy of the current head hash and total difficulty of the -// peer. -func (p *peer) Head() (hash common.Hash, td *big.Int) { - p.lock.RLock() - defer p.lock.RUnlock() - - copy(hash[:], p.head[:]) - return hash, new(big.Int).Set(p.td) -} - -// SetHead updates the head hash and total difficulty of the peer. -func (p *peer) SetHead(hash common.Hash, td *big.Int) { - p.lock.Lock() - defer p.lock.Unlock() - - copy(p.head[:], hash[:]) - p.td.Set(td) -} - -// MarkBlock marks a block as known for the peer, ensuring that the block will -// never be propagated to this particular peer. -func (p *peer) MarkBlock(hash common.Hash) { - // If we reached the memory allowance, drop a previously known block hash - for p.knownBlocks.Cardinality() >= maxKnownBlocks { - p.knownBlocks.Pop() - } - p.knownBlocks.Add(hash) -} - -// MarkTransaction marks a transaction as known for the peer, ensuring that it -// will never be propagated to this particular peer. -func (p *peer) MarkTransaction(hash common.Hash) { - // If we reached the memory allowance, drop a previously known transaction hash - for p.knownTxs.Cardinality() >= maxKnownTxs { - p.knownTxs.Pop() - } - p.knownTxs.Add(hash) -} - -// SendTransactions64 sends transactions to the peer and includes the hashes -// in its transaction hash set for future reference. -// -// This method is legacy support for initial transaction exchange in eth/64 and -// prior. For eth/65 and higher use SendPooledTransactionHashes. -func (p *peer) SendTransactions64(txs types.Transactions) error { - return p.sendTransactions(txs) -} - -// sendTransactions sends transactions to the peer and includes the hashes -// in its transaction hash set for future reference. -// -// This method is a helper used by the async transaction sender. Don't call it -// directly as the queueing (memory) and transmission (bandwidth) costs should -// not be managed directly. -func (p *peer) sendTransactions(txs types.Transactions) error { - // Mark all the transactions as known, but ensure we don't overflow our limits - for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(txs)) { - p.knownTxs.Pop() - } - for _, tx := range txs { - p.knownTxs.Add(tx.Hash()) - } - return p2p.Send(p.rw, TransactionMsg, txs) -} - -// AsyncSendTransactions queues a list of transactions (by hash) to eventually -// propagate to a remote peer. The number of pending sends are capped (new ones -// will force old sends to be dropped) -func (p *peer) AsyncSendTransactions(hashes []common.Hash) { - select { - case p.txBroadcast <- hashes: - // Mark all the transactions as known, but ensure we don't overflow our limits - for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) { - p.knownTxs.Pop() - } - for _, hash := range hashes { - p.knownTxs.Add(hash) - } - case <-p.term: - p.Log().Debug("Dropping transaction propagation", "count", len(hashes)) - } -} - -// sendPooledTransactionHashes sends transaction hashes to the peer and includes -// them in its transaction hash set for future reference. -// -// This method is a helper used by the async transaction announcer. Don't call it -// directly as the queueing (memory) and transmission (bandwidth) costs should -// not be managed directly. -func (p *peer) sendPooledTransactionHashes(hashes []common.Hash) error { - // Mark all the transactions as known, but ensure we don't overflow our limits - for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) { - p.knownTxs.Pop() - } - for _, hash := range hashes { - p.knownTxs.Add(hash) - } - return p2p.Send(p.rw, NewPooledTransactionHashesMsg, hashes) -} - -// AsyncSendPooledTransactionHashes queues a list of transactions hashes to eventually -// announce to a remote peer. The number of pending sends are capped (new ones -// will force old sends to be dropped) -func (p *peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) { - select { - case p.txAnnounce <- hashes: - // Mark all the transactions as known, but ensure we don't overflow our limits - for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) { - p.knownTxs.Pop() - } - for _, hash := range hashes { - p.knownTxs.Add(hash) - } - case <-p.term: - p.Log().Debug("Dropping transaction announcement", "count", len(hashes)) - } -} - -// SendPooledTransactionsRLP sends requested transactions to the peer and adds the -// hashes in its transaction hash set for future reference. -// -// Note, the method assumes the hashes are correct and correspond to the list of -// transactions being sent. -func (p *peer) SendPooledTransactionsRLP(hashes []common.Hash, txs []rlp.RawValue) error { - // Mark all the transactions as known, but ensure we don't overflow our limits - for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) { - p.knownTxs.Pop() - } - for _, hash := range hashes { - p.knownTxs.Add(hash) - } - return p2p.Send(p.rw, PooledTransactionsMsg, txs) -} - -// SendNewBlockHashes announces the availability of a number of blocks through -// a hash notification. -func (p *peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error { - // Mark all the block hashes as known, but ensure we don't overflow our limits - for p.knownBlocks.Cardinality() > max(0, maxKnownBlocks-len(hashes)) { - p.knownBlocks.Pop() - } - for _, hash := range hashes { - p.knownBlocks.Add(hash) - } - request := make(newBlockHashesData, len(hashes)) - for i := 0; i < len(hashes); i++ { - request[i].Hash = hashes[i] - request[i].Number = numbers[i] - } - return p2p.Send(p.rw, NewBlockHashesMsg, request) -} - -// AsyncSendNewBlockHash queues the availability of a block for propagation to a -// remote peer. If the peer's broadcast queue is full, the event is silently -// dropped. -func (p *peer) AsyncSendNewBlockHash(block *types.Block) { - select { - case p.queuedBlockAnns <- block: - // Mark all the block hash as known, but ensure we don't overflow our limits - for p.knownBlocks.Cardinality() >= maxKnownBlocks { - p.knownBlocks.Pop() - } - p.knownBlocks.Add(block.Hash()) - default: - p.Log().Debug("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash()) - } -} - -// SendNewBlock propagates an entire block to a remote peer. -func (p *peer) SendNewBlock(block *types.Block, td *big.Int) error { - // Mark all the block hash as known, but ensure we don't overflow our limits - for p.knownBlocks.Cardinality() >= maxKnownBlocks { - p.knownBlocks.Pop() - } - p.knownBlocks.Add(block.Hash()) - return p2p.Send(p.rw, NewBlockMsg, []interface{}{block, td}) -} - -// AsyncSendNewBlock queues an entire block for propagation to a remote peer. If -// the peer's broadcast queue is full, the event is silently dropped. -func (p *peer) AsyncSendNewBlock(block *types.Block, td *big.Int) { - select { - case p.queuedBlocks <- &propEvent{block: block, td: td}: - // Mark all the block hash as known, but ensure we don't overflow our limits - for p.knownBlocks.Cardinality() >= maxKnownBlocks { - p.knownBlocks.Pop() - } - p.knownBlocks.Add(block.Hash()) - default: - p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash()) - } -} - -// SendBlockHeaders sends a batch of block headers to the remote peer. -func (p *peer) SendBlockHeaders(headers []*types.Header) error { - return p2p.Send(p.rw, BlockHeadersMsg, headers) -} - -// SendBlockBodies sends a batch of block contents to the remote peer. -func (p *peer) SendBlockBodies(bodies []*blockBody) error { - return p2p.Send(p.rw, BlockBodiesMsg, blockBodiesData(bodies)) -} - -// SendBlockBodiesRLP sends a batch of block contents to the remote peer from -// an already RLP encoded format. -func (p *peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error { - return p2p.Send(p.rw, BlockBodiesMsg, bodies) -} - -// SendNodeDataRLP sends a batch of arbitrary internal data, corresponding to the -// hashes requested. -func (p *peer) SendNodeData(data [][]byte) error { - return p2p.Send(p.rw, NodeDataMsg, data) -} - -// SendReceiptsRLP sends a batch of transaction receipts, corresponding to the -// ones requested from an already RLP encoded format. -func (p *peer) SendReceiptsRLP(receipts []rlp.RawValue) error { - return p2p.Send(p.rw, ReceiptsMsg, receipts) -} - -// RequestOneHeader is a wrapper around the header query functions to fetch a -// single header. It is used solely by the fetcher. -func (p *peer) RequestOneHeader(hash common.Hash) error { - p.Log().Debug("Fetching single header", "hash", hash) - return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: uint64(1), Skip: uint64(0), Reverse: false}) -} - -// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the -// specified header query, based on the hash of an origin block. -func (p *peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { - p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse) - return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}) -} - -// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the -// specified header query, based on the number of an origin block. -func (p *peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { - p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse) - return p2p.Send(p.rw, GetBlockHeadersMsg, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}) -} - -// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes -// specified. -func (p *peer) RequestBodies(hashes []common.Hash) error { - p.Log().Debug("Fetching batch of block bodies", "count", len(hashes)) - return p2p.Send(p.rw, GetBlockBodiesMsg, hashes) -} - -// RequestNodeData fetches a batch of arbitrary data from a node's known state -// data, corresponding to the specified hashes. -func (p *peer) RequestNodeData(hashes []common.Hash) error { - p.Log().Debug("Fetching batch of state data", "count", len(hashes)) - return p2p.Send(p.rw, GetNodeDataMsg, hashes) -} - -// RequestReceipts fetches a batch of transaction receipts from a remote node. -func (p *peer) RequestReceipts(hashes []common.Hash) error { - p.Log().Debug("Fetching batch of receipts", "count", len(hashes)) - return p2p.Send(p.rw, GetReceiptsMsg, hashes) -} - -// RequestTxs fetches a batch of transactions from a remote node. -func (p *peer) RequestTxs(hashes []common.Hash) error { - p.Log().Debug("Fetching batch of transactions", "count", len(hashes)) - return p2p.Send(p.rw, GetPooledTransactionsMsg, hashes) -} - -// Handshake executes the eth protocol handshake, negotiating version number, -// network IDs, difficulties, head and genesis blocks. -func (p *peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) error { - // Send out own handshake in a new thread - errc := make(chan error, 2) - - var ( - status63 statusData63 // safe to read after two values have been received from errc - status statusData // safe to read after two values have been received from errc - ) - go func() { - switch { - case p.version == eth63: - errc <- p2p.Send(p.rw, StatusMsg, &statusData63{ - ProtocolVersion: uint32(p.version), - NetworkId: network, - TD: td, - CurrentBlock: head, - GenesisBlock: genesis, - }) - case p.version >= eth64: - errc <- p2p.Send(p.rw, StatusMsg, &statusData{ - ProtocolVersion: uint32(p.version), - NetworkID: network, - TD: td, - Head: head, - Genesis: genesis, - ForkID: forkID, - }) - default: - panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version)) - } - }() - go func() { - switch { - case p.version == eth63: - errc <- p.readStatusLegacy(network, &status63, genesis) - case p.version >= eth64: - errc <- p.readStatus(network, &status, genesis, forkFilter) - default: - panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version)) - } - }() - timeout := time.NewTimer(handshakeTimeout) - defer timeout.Stop() - for i := 0; i < 2; i++ { - select { - case err := <-errc: - if err != nil { - return err - } - case <-timeout.C: - return p2p.DiscReadTimeout - } - } - switch { - case p.version == eth63: - p.td, p.head = status63.TD, status63.CurrentBlock - case p.version >= eth64: - p.td, p.head = status.TD, status.Head - default: - panic(fmt.Sprintf("unsupported eth protocol version: %d", p.version)) - } - return nil -} - -func (p *peer) readStatusLegacy(network uint64, status *statusData63, genesis common.Hash) error { - msg, err := p.rw.ReadMsg() - if err != nil { - return err - } - if msg.Code != StatusMsg { - return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg) - } - if msg.Size > protocolMaxMsgSize { - return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize) - } - // Decode the handshake and make sure everything matches - if err := msg.Decode(&status); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - if status.GenesisBlock != genesis { - return errResp(ErrGenesisMismatch, "%x (!= %x)", status.GenesisBlock[:8], genesis[:8]) - } - if status.NetworkId != network { - return errResp(ErrNetworkIDMismatch, "%d (!= %d)", status.NetworkId, network) - } - if int(status.ProtocolVersion) != p.version { - return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version) - } - return nil -} - -func (p *peer) readStatus(network uint64, status *statusData, genesis common.Hash, forkFilter forkid.Filter) error { - msg, err := p.rw.ReadMsg() - if err != nil { - return err - } - if msg.Code != StatusMsg { - return errResp(ErrNoStatusMsg, "first msg has code %x (!= %x)", msg.Code, StatusMsg) - } - if msg.Size > protocolMaxMsgSize { - return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, protocolMaxMsgSize) - } - // Decode the handshake and make sure everything matches - if err := msg.Decode(&status); err != nil { - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - if status.NetworkID != network { - return errResp(ErrNetworkIDMismatch, "%d (!= %d)", status.NetworkID, network) - } - if int(status.ProtocolVersion) != p.version { - return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", status.ProtocolVersion, p.version) - } - if status.Genesis != genesis { - return errResp(ErrGenesisMismatch, "%x (!= %x)", status.Genesis, genesis) - } - if err := forkFilter(status.ForkID); err != nil { - return errResp(ErrForkIDRejected, "%v", err) - } - return nil -} - -// String implements fmt.Stringer. -func (p *peer) String() string { - return fmt.Sprintf("Peer %s [%s]", p.id, - fmt.Sprintf("eth/%2d", p.version), - ) -} - -// peerSet represents the collection of active peers currently participating in -// the Ethereum sub-protocol. -type peerSet struct { - peers map[string]*peer - lock sync.RWMutex - closed bool -} - -// newPeerSet creates a new peer set to track the active participants. -func newPeerSet() *peerSet { - return &peerSet{ - peers: make(map[string]*peer), - } -} - -// Register injects a new peer into the working set, or returns an error if the -// peer is already known. If a new peer it registered, its broadcast loop is also -// started. -func (ps *peerSet) Register(p *peer, removePeer func(string)) error { - ps.lock.Lock() - defer ps.lock.Unlock() - - if ps.closed { - return errClosed - } - if _, ok := ps.peers[p.id]; ok { - return errAlreadyRegistered - } - ps.peers[p.id] = p - - go p.broadcastBlocks(removePeer) - go p.broadcastTransactions(removePeer) - if p.version >= eth65 { - go p.announceTransactions(removePeer) - } - return nil -} - -// Unregister removes a remote peer from the active set, disabling any further -// actions to/from that particular entity. -func (ps *peerSet) Unregister(id string) error { - ps.lock.Lock() - defer ps.lock.Unlock() - - p, ok := ps.peers[id] - if !ok { - return errNotRegistered - } - delete(ps.peers, id) - p.close() - - return nil -} - -// Peer retrieves the registered peer with the given id. -func (ps *peerSet) Peer(id string) *peer { - ps.lock.RLock() - defer ps.lock.RUnlock() - - return ps.peers[id] -} - -// Len returns if the current number of peers in the set. -func (ps *peerSet) Len() int { - ps.lock.RLock() - defer ps.lock.RUnlock() - - return len(ps.peers) -} - -// PeersWithoutBlock retrieves a list of peers that do not have a given block in -// their set of known hashes. -func (ps *peerSet) PeersWithoutBlock(hash common.Hash) []*peer { - ps.lock.RLock() - defer ps.lock.RUnlock() - - list := make([]*peer, 0, len(ps.peers)) - for _, p := range ps.peers { - if !p.knownBlocks.Contains(hash) { - list = append(list, p) - } - } - return list -} - -// PeersWithoutTx retrieves a list of peers that do not have a given transaction -// in their set of known hashes. -func (ps *peerSet) PeersWithoutTx(hash common.Hash) []*peer { - ps.lock.RLock() - defer ps.lock.RUnlock() - - list := make([]*peer, 0, len(ps.peers)) - for _, p := range ps.peers { - if !p.knownTxs.Contains(hash) { - list = append(list, p) - } - } - return list +// snapPeerInfo represents a short summary of the `snap` sub-protocol metadata known +// about a connected peer. +type snapPeerInfo struct { + Version uint `json:"version"` // Snapshot protocol version negotiated } -// BestPeer retrieves the known peer with the currently highest total difficulty. -func (ps *peerSet) BestPeer() *peer { - ps.lock.RLock() - defer ps.lock.RUnlock() - - var ( - bestPeer *peer - bestTd *big.Int - ) - for _, p := range ps.peers { - if _, td := p.Head(); bestPeer == nil || td.Cmp(bestTd) > 0 { - bestPeer, bestTd = p, td - } - } - return bestPeer +// snapPeer is a wrapper around snap.Peer to maintain a few extra metadata. +type snapPeer struct { + *snap.Peer } -// Close disconnects all peers. -// No new peers can be registered after Close has returned. -func (ps *peerSet) Close() { - ps.lock.Lock() - defer ps.lock.Unlock() - - for _, p := range ps.peers { - p.Disconnect(p2p.DiscQuitting) +// info gathers and returns some `snap` protocol metadata known about a peer. +func (p *snapPeer) info() *snapPeerInfo { + return &snapPeerInfo{ + Version: p.Version(), } - ps.closed = true } diff --git a/eth/peerset.go b/eth/peerset.go new file mode 100644 index 0000000000..1e864a8e46 --- /dev/null +++ b/eth/peerset.go @@ -0,0 +1,259 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "errors" + "math/big" + "sync" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/eth/protocols/eth" + "github.com/ethereum/go-ethereum/eth/protocols/snap" + "github.com/ethereum/go-ethereum/p2p" +) + +var ( + // errPeerSetClosed is returned if a peer is attempted to be added or removed + // from the peer set after it has been terminated. + errPeerSetClosed = errors.New("peerset closed") + + // errPeerAlreadyRegistered is returned if a peer is attempted to be added + // to the peer set, but one with the same id already exists. + errPeerAlreadyRegistered = errors.New("peer already registered") + + // errPeerNotRegistered is returned if a peer is attempted to be removed from + // a peer set, but no peer with the given id exists. + errPeerNotRegistered = errors.New("peer not registered") + + // errSnapWithoutEth is returned if a peer attempts to connect only on the + // snap protocol without advertizing the eth main protocol. + errSnapWithoutEth = errors.New("peer connected on snap without compatible eth support") +) + +// peerSet represents the collection of active peers currently participating in +// the `eth` protocol, with or without the `snap` extension. +type peerSet struct { + peers map[string]*ethPeer // Peers connected on the `eth` protocol + snapPeers int // Number of `snap` compatible peers for connection prioritization + + snapWait map[string]chan *snap.Peer // Peers connected on `eth` waiting for their snap extension + snapPend map[string]*snap.Peer // Peers connected on the `snap` protocol, but not yet on `eth` + + lock sync.RWMutex + closed bool +} + +// newPeerSet creates a new peer set to track the active participants. +func newPeerSet() *peerSet { + return &peerSet{ + peers: make(map[string]*ethPeer), + snapWait: make(map[string]chan *snap.Peer), + snapPend: make(map[string]*snap.Peer), + } +} + +// registerSnapExtension unblocks an already connected `eth` peer waiting for its +// `snap` extension, or if no such peer exists, tracks the extension for the time +// being until the `eth` main protocol starts looking for it. +func (ps *peerSet) registerSnapExtension(peer *snap.Peer) error { + // Reject the peer if it advertises `snap` without `eth` as `snap` is only a + // satellite protocol meaningful with the chain selection of `eth` + if !peer.RunningCap(eth.ProtocolName, eth.ProtocolVersions) { + return errSnapWithoutEth + } + // Ensure nobody can double connect + ps.lock.Lock() + defer ps.lock.Unlock() + + id := peer.ID() + if _, ok := ps.peers[id]; ok { + return errPeerAlreadyRegistered // avoid connections with the same id as existing ones + } + if _, ok := ps.snapPend[id]; ok { + return errPeerAlreadyRegistered // avoid connections with the same id as pending ones + } + // Inject the peer into an `eth` counterpart is available, otherwise save for later + if wait, ok := ps.snapWait[id]; ok { + delete(ps.snapWait, id) + wait <- peer + return nil + } + ps.snapPend[id] = peer + return nil +} + +// waitExtensions blocks until all satellite protocols are connected and tracked +// by the peerset. +func (ps *peerSet) waitSnapExtension(peer *eth.Peer) (*snap.Peer, error) { + // If the peer does not support a compatible `snap`, don't wait + if !peer.RunningCap(snap.ProtocolName, snap.ProtocolVersions) { + return nil, nil + } + // Ensure nobody can double connect + ps.lock.Lock() + + id := peer.ID() + if _, ok := ps.peers[id]; ok { + ps.lock.Unlock() + return nil, errPeerAlreadyRegistered // avoid connections with the same id as existing ones + } + if _, ok := ps.snapWait[id]; ok { + ps.lock.Unlock() + return nil, errPeerAlreadyRegistered // avoid connections with the same id as pending ones + } + // If `snap` already connected, retrieve the peer from the pending set + if snap, ok := ps.snapPend[id]; ok { + delete(ps.snapPend, id) + + ps.lock.Unlock() + return snap, nil + } + // Otherwise wait for `snap` to connect concurrently + wait := make(chan *snap.Peer) + ps.snapWait[id] = wait + ps.lock.Unlock() + + return <-wait, nil +} + +// registerPeer injects a new `eth` peer into the working set, or returns an error +// if the peer is already known. +func (ps *peerSet) registerPeer(peer *eth.Peer, ext *snap.Peer) error { + // Start tracking the new peer + ps.lock.Lock() + defer ps.lock.Unlock() + + if ps.closed { + return errPeerSetClosed + } + id := peer.ID() + if _, ok := ps.peers[id]; ok { + return errPeerAlreadyRegistered + } + eth := ðPeer{ + Peer: peer, + } + if ext != nil { + eth.snapExt = &snapPeer{ext} + ps.snapPeers++ + } + ps.peers[id] = eth + return nil +} + +// unregisterPeer removes a remote peer from the active set, disabling any further +// actions to/from that particular entity. +func (ps *peerSet) unregisterPeer(id string) error { + ps.lock.Lock() + defer ps.lock.Unlock() + + peer, ok := ps.peers[id] + if !ok { + return errPeerNotRegistered + } + delete(ps.peers, id) + if peer.snapExt != nil { + ps.snapPeers-- + } + return nil +} + +// peer retrieves the registered peer with the given id. +func (ps *peerSet) peer(id string) *ethPeer { + ps.lock.RLock() + defer ps.lock.RUnlock() + + return ps.peers[id] +} + +// peersWithoutBlock retrieves a list of peers that do not have a given block in +// their set of known hashes so it might be propagated to them. +func (ps *peerSet) peersWithoutBlock(hash common.Hash) []*ethPeer { + ps.lock.RLock() + defer ps.lock.RUnlock() + + list := make([]*ethPeer, 0, len(ps.peers)) + for _, p := range ps.peers { + if !p.KnownBlock(hash) { + list = append(list, p) + } + } + return list +} + +// peersWithoutTransaction retrieves a list of peers that do not have a given +// transaction in their set of known hashes. +func (ps *peerSet) peersWithoutTransaction(hash common.Hash) []*ethPeer { + ps.lock.RLock() + defer ps.lock.RUnlock() + + list := make([]*ethPeer, 0, len(ps.peers)) + for _, p := range ps.peers { + if !p.KnownTransaction(hash) { + list = append(list, p) + } + } + return list +} + +// len returns if the current number of `eth` peers in the set. Since the `snap` +// peers are tied to the existence of an `eth` connection, that will always be a +// subset of `eth`. +func (ps *peerSet) len() int { + ps.lock.RLock() + defer ps.lock.RUnlock() + + return len(ps.peers) +} + +// snapLen returns if the current number of `snap` peers in the set. +func (ps *peerSet) snapLen() int { + ps.lock.RLock() + defer ps.lock.RUnlock() + + return ps.snapPeers +} + +// peerWithHighestTD retrieves the known peer with the currently highest total +// difficulty. +func (ps *peerSet) peerWithHighestTD() *eth.Peer { + ps.lock.RLock() + defer ps.lock.RUnlock() + + var ( + bestPeer *eth.Peer + bestTd *big.Int + ) + for _, p := range ps.peers { + if _, td := p.Head(); bestPeer == nil || td.Cmp(bestTd) > 0 { + bestPeer, bestTd = p.Peer, td + } + } + return bestPeer +} + +// close disconnects all peers. +func (ps *peerSet) close() { + ps.lock.Lock() + defer ps.lock.Unlock() + + for _, p := range ps.peers { + p.Disconnect(p2p.DiscQuitting) + } + ps.closed = true +} diff --git a/eth/protocol.go b/eth/protocol.go deleted file mode 100644 index dc75d6b31a..0000000000 --- a/eth/protocol.go +++ /dev/null @@ -1,221 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package eth - -import ( - "fmt" - "io" - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/forkid" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/rlp" -) - -// Constants to match up protocol versions and messages -const ( - eth63 = 63 - eth64 = 64 - eth65 = 65 -) - -// protocolName is the official short name of the protocol used during capability negotiation. -const protocolName = "eth" - -// ProtocolVersions are the supported versions of the eth protocol (first is primary). -var ProtocolVersions = []uint{eth65, eth64, eth63} - -// protocolLengths are the number of implemented message corresponding to different protocol versions. -var protocolLengths = map[uint]uint64{eth65: 17, eth64: 17, eth63: 17} - -const protocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message - -// eth protocol message codes -const ( - StatusMsg = 0x00 - NewBlockHashesMsg = 0x01 - TransactionMsg = 0x02 - GetBlockHeadersMsg = 0x03 - BlockHeadersMsg = 0x04 - GetBlockBodiesMsg = 0x05 - BlockBodiesMsg = 0x06 - NewBlockMsg = 0x07 - GetNodeDataMsg = 0x0d - NodeDataMsg = 0x0e - GetReceiptsMsg = 0x0f - ReceiptsMsg = 0x10 - - // New protocol message codes introduced in eth65 - // - // Previously these message ids were used by some legacy and unsupported - // eth protocols, reown them here. - NewPooledTransactionHashesMsg = 0x08 - GetPooledTransactionsMsg = 0x09 - PooledTransactionsMsg = 0x0a -) - -type errCode int - -const ( - ErrMsgTooLarge = iota - ErrDecode - ErrInvalidMsgCode - ErrProtocolVersionMismatch - ErrNetworkIDMismatch - ErrGenesisMismatch - ErrForkIDRejected - ErrNoStatusMsg - ErrExtraStatusMsg -) - -func (e errCode) String() string { - return errorToString[int(e)] -} - -// XXX change once legacy code is out -var errorToString = map[int]string{ - ErrMsgTooLarge: "Message too long", - ErrDecode: "Invalid message", - ErrInvalidMsgCode: "Invalid message code", - ErrProtocolVersionMismatch: "Protocol version mismatch", - ErrNetworkIDMismatch: "Network ID mismatch", - ErrGenesisMismatch: "Genesis mismatch", - ErrForkIDRejected: "Fork ID rejected", - ErrNoStatusMsg: "No status message", - ErrExtraStatusMsg: "Extra status message", -} - -type txPool interface { - // Has returns an indicator whether txpool has a transaction - // cached with the given hash. - Has(hash common.Hash) bool - - // Get retrieves the transaction from local txpool with given - // tx hash. - Get(hash common.Hash) *types.Transaction - - // AddRemotes should add the given transactions to the pool. - AddRemotes([]*types.Transaction) []error - - // Pending should return pending transactions. - // The slice should be modifiable by the caller. - Pending() (map[common.Address]types.Transactions, error) - - // SubscribeNewTxsEvent should return an event subscription of - // NewTxsEvent and send events to the given channel. - SubscribeNewTxsEvent(chan<- core.NewTxsEvent) event.Subscription -} - -// statusData63 is the network packet for the status message for eth/63. -type statusData63 struct { - ProtocolVersion uint32 - NetworkId uint64 - TD *big.Int - CurrentBlock common.Hash - GenesisBlock common.Hash -} - -// statusData is the network packet for the status message for eth/64 and later. -type statusData struct { - ProtocolVersion uint32 - NetworkID uint64 - TD *big.Int - Head common.Hash - Genesis common.Hash - ForkID forkid.ID -} - -// newBlockHashesData is the network packet for the block announcements. -type newBlockHashesData []struct { - Hash common.Hash // Hash of one particular block being announced - Number uint64 // Number of one particular block being announced -} - -// getBlockHeadersData represents a block header query. -type getBlockHeadersData struct { - Origin hashOrNumber // Block from which to retrieve headers - Amount uint64 // Maximum number of headers to retrieve - Skip uint64 // Blocks to skip between consecutive headers - Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) -} - -// hashOrNumber is a combined field for specifying an origin block. -type hashOrNumber struct { - Hash common.Hash // Block hash from which to retrieve headers (excludes Number) - Number uint64 // Block hash from which to retrieve headers (excludes Hash) -} - -// EncodeRLP is a specialized encoder for hashOrNumber to encode only one of the -// two contained union fields. -func (hn *hashOrNumber) EncodeRLP(w io.Writer) error { - if hn.Hash == (common.Hash{}) { - return rlp.Encode(w, hn.Number) - } - if hn.Number != 0 { - return fmt.Errorf("both origin hash (%x) and number (%d) provided", hn.Hash, hn.Number) - } - return rlp.Encode(w, hn.Hash) -} - -// DecodeRLP is a specialized decoder for hashOrNumber to decode the contents -// into either a block hash or a block number. -func (hn *hashOrNumber) DecodeRLP(s *rlp.Stream) error { - _, size, _ := s.Kind() - origin, err := s.Raw() - if err == nil { - switch { - case size == 32: - err = rlp.DecodeBytes(origin, &hn.Hash) - case size <= 8: - err = rlp.DecodeBytes(origin, &hn.Number) - default: - err = fmt.Errorf("invalid input size %d for origin", size) - } - } - return err -} - -// newBlockData is the network packet for the block propagation message. -type newBlockData struct { - Block *types.Block - TD *big.Int -} - -// sanityCheck verifies that the values are reasonable, as a DoS protection -func (request *newBlockData) sanityCheck() error { - if err := request.Block.SanityCheck(); err != nil { - return err - } - //TD at mainnet block #7753254 is 76 bits. If it becomes 100 million times - // larger, it will still fit within 100 bits - if tdlen := request.TD.BitLen(); tdlen > 100 { - return fmt.Errorf("too large block TD: bitlen %d", tdlen) - } - return nil -} - -// blockBody represents the data content of a single block. -type blockBody struct { - Transactions []*types.Transaction // Transactions contained within a block - Uncles []*types.Header // Uncles contained within a block -} - -// blockBodiesData is the network packet for block content distribution. -type blockBodiesData []*blockBody diff --git a/eth/protocol_test.go b/eth/protocol_test.go deleted file mode 100644 index 331dd05ce1..0000000000 --- a/eth/protocol_test.go +++ /dev/null @@ -1,459 +0,0 @@ -// Copyright 2014 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package eth - -import ( - "fmt" - "math/big" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/consensus/ethash" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/forkid" - "github.com/ethereum/go-ethereum/core/rawdb" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth/downloader" - "github.com/ethereum/go-ethereum/event" - "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" -) - -func init() { - // log.Root().SetHandler(log.LvlFilterHandler(log.LvlTrace, log.StreamHandler(os.Stderr, log.TerminalFormat(false)))) -} - -var testAccount, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - -// Tests that handshake failures are detected and reported correctly. -func TestStatusMsgErrors63(t *testing.T) { - pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil) - var ( - genesis = pm.blockchain.Genesis() - head = pm.blockchain.CurrentHeader() - td = pm.blockchain.GetTd(head.Hash(), head.Number.Uint64()) - ) - defer pm.Stop() - - tests := []struct { - code uint64 - data interface{} - wantError error - }{ - { - code: TransactionMsg, data: []interface{}{}, - wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"), - }, - { - code: StatusMsg, data: statusData63{10, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash()}, - wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", 63), - }, - { - code: StatusMsg, data: statusData63{63, 999, td, head.Hash(), genesis.Hash()}, - wantError: errResp(ErrNetworkIDMismatch, "999 (!= %d)", DefaultConfig.NetworkId), - }, - { - code: StatusMsg, data: statusData63{63, DefaultConfig.NetworkId, td, head.Hash(), common.Hash{3}}, - wantError: errResp(ErrGenesisMismatch, "0300000000000000 (!= %x)", genesis.Hash().Bytes()[:8]), - }, - } - for i, test := range tests { - p, errc := newTestPeer("peer", 63, pm, false) - // The send call might hang until reset because - // the protocol might not read the payload. - go p2p.Send(p.app, test.code, test.data) - - select { - case err := <-errc: - if err == nil { - t.Errorf("test %d: protocol returned nil error, want %q", i, test.wantError) - } else if err.Error() != test.wantError.Error() { - t.Errorf("test %d: wrong error: got %q, want %q", i, err, test.wantError) - } - case <-time.After(2 * time.Second): - t.Errorf("protocol did not shut down within 2 seconds") - } - p.close() - } -} - -func TestStatusMsgErrors64(t *testing.T) { - pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil) - var ( - genesis = pm.blockchain.Genesis() - head = pm.blockchain.CurrentHeader() - td = pm.blockchain.GetTd(head.Hash(), head.Number.Uint64()) - forkID = forkid.NewID(pm.blockchain.Config(), pm.blockchain.Genesis().Hash(), pm.blockchain.CurrentHeader().Number.Uint64()) - ) - defer pm.Stop() - - tests := []struct { - code uint64 - data interface{} - wantError error - }{ - { - code: TransactionMsg, data: []interface{}{}, - wantError: errResp(ErrNoStatusMsg, "first msg has code 2 (!= 0)"), - }, - { - code: StatusMsg, data: statusData{10, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash(), forkID}, - wantError: errResp(ErrProtocolVersionMismatch, "10 (!= %d)", 64), - }, - { - code: StatusMsg, data: statusData{64, 999, td, head.Hash(), genesis.Hash(), forkID}, - wantError: errResp(ErrNetworkIDMismatch, "999 (!= %d)", DefaultConfig.NetworkId), - }, - { - code: StatusMsg, data: statusData{64, DefaultConfig.NetworkId, td, head.Hash(), common.Hash{3}, forkID}, - wantError: errResp(ErrGenesisMismatch, "0300000000000000000000000000000000000000000000000000000000000000 (!= %x)", genesis.Hash()), - }, - { - code: StatusMsg, data: statusData{64, DefaultConfig.NetworkId, td, head.Hash(), genesis.Hash(), forkid.ID{Hash: [4]byte{0x00, 0x01, 0x02, 0x03}}}, - wantError: errResp(ErrForkIDRejected, forkid.ErrLocalIncompatibleOrStale.Error()), - }, - } - for i, test := range tests { - p, errc := newTestPeer("peer", 64, pm, false) - // The send call might hang until reset because - // the protocol might not read the payload. - go p2p.Send(p.app, test.code, test.data) - - select { - case err := <-errc: - if err == nil { - t.Errorf("test %d: protocol returned nil error, want %q", i, test.wantError) - } else if err.Error() != test.wantError.Error() { - t.Errorf("test %d: wrong error: got %q, want %q", i, err, test.wantError) - } - case <-time.After(2 * time.Second): - t.Errorf("protocol did not shut down within 2 seconds") - } - p.close() - } -} - -func TestForkIDSplit(t *testing.T) { - var ( - engine = ethash.NewFaker() - - configNoFork = ¶ms.ChainConfig{HomesteadBlock: big.NewInt(1)} - configProFork = ¶ms.ChainConfig{ - HomesteadBlock: big.NewInt(1), - EIP150Block: big.NewInt(2), - EIP155Block: big.NewInt(2), - EIP158Block: big.NewInt(2), - ByzantiumBlock: big.NewInt(3), - } - dbNoFork = rawdb.NewMemoryDatabase() - dbProFork = rawdb.NewMemoryDatabase() - - gspecNoFork = &core.Genesis{Config: configNoFork} - gspecProFork = &core.Genesis{Config: configProFork} - - genesisNoFork = gspecNoFork.MustCommit(dbNoFork) - genesisProFork = gspecProFork.MustCommit(dbProFork) - - chainNoFork, _ = core.NewBlockChain(dbNoFork, nil, configNoFork, engine, vm.Config{}, nil, nil) - chainProFork, _ = core.NewBlockChain(dbProFork, nil, configProFork, engine, vm.Config{}, nil, nil) - - blocksNoFork, _ = core.GenerateChain(configNoFork, genesisNoFork, engine, dbNoFork, 2, nil) - blocksProFork, _ = core.GenerateChain(configProFork, genesisProFork, engine, dbProFork, 2, nil) - - ethNoFork, _ = NewProtocolManager(configNoFork, nil, downloader.FullSync, 1, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, engine, chainNoFork, dbNoFork, 1, nil) - ethProFork, _ = NewProtocolManager(configProFork, nil, downloader.FullSync, 1, new(event.TypeMux), &testTxPool{pool: make(map[common.Hash]*types.Transaction)}, engine, chainProFork, dbProFork, 1, nil) - ) - ethNoFork.Start(1000) - ethProFork.Start(1000) - - // Both nodes should allow the other to connect (same genesis, next fork is the same) - p2pNoFork, p2pProFork := p2p.MsgPipe() - peerNoFork := newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil) - peerProFork := newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil) - - errc := make(chan error, 2) - go func() { errc <- ethNoFork.handle(peerProFork) }() - go func() { errc <- ethProFork.handle(peerNoFork) }() - - select { - case err := <-errc: - t.Fatalf("frontier nofork <-> profork failed: %v", err) - case <-time.After(250 * time.Millisecond): - p2pNoFork.Close() - p2pProFork.Close() - } - // Progress into Homestead. Fork's match, so we don't care what the future holds - chainNoFork.InsertChain(blocksNoFork[:1]) - chainProFork.InsertChain(blocksProFork[:1]) - - p2pNoFork, p2pProFork = p2p.MsgPipe() - peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil) - peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil) - - errc = make(chan error, 2) - go func() { errc <- ethNoFork.handle(peerProFork) }() - go func() { errc <- ethProFork.handle(peerNoFork) }() - - select { - case err := <-errc: - t.Fatalf("homestead nofork <-> profork failed: %v", err) - case <-time.After(250 * time.Millisecond): - p2pNoFork.Close() - p2pProFork.Close() - } - // Progress into Spurious. Forks mismatch, signalling differing chains, reject - chainNoFork.InsertChain(blocksNoFork[1:2]) - chainProFork.InsertChain(blocksProFork[1:2]) - - p2pNoFork, p2pProFork = p2p.MsgPipe() - peerNoFork = newPeer(64, p2p.NewPeer(enode.ID{1}, "", nil), p2pNoFork, nil) - peerProFork = newPeer(64, p2p.NewPeer(enode.ID{2}, "", nil), p2pProFork, nil) - - errc = make(chan error, 2) - go func() { errc <- ethNoFork.handle(peerProFork) }() - go func() { errc <- ethProFork.handle(peerNoFork) }() - - select { - case err := <-errc: - if want := errResp(ErrForkIDRejected, forkid.ErrLocalIncompatibleOrStale.Error()); err.Error() != want.Error() { - t.Fatalf("fork ID rejection error mismatch: have %v, want %v", err, want) - } - case <-time.After(250 * time.Millisecond): - t.Fatalf("split peers not rejected") - } -} - -// This test checks that received transactions are added to the local pool. -func TestRecvTransactions63(t *testing.T) { testRecvTransactions(t, 63) } -func TestRecvTransactions64(t *testing.T) { testRecvTransactions(t, 64) } -func TestRecvTransactions65(t *testing.T) { testRecvTransactions(t, 65) } - -func testRecvTransactions(t *testing.T, protocol int) { - txAdded := make(chan []*types.Transaction) - pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, txAdded) - pm.acceptTxs = 1 // mark synced to accept transactions - p, _ := newTestPeer("peer", protocol, pm, true) - defer pm.Stop() - defer p.close() - - tx := newTestTransaction(testAccount, 0, 0) - if err := p2p.Send(p.app, TransactionMsg, []interface{}{tx}); err != nil { - t.Fatalf("send error: %v", err) - } - select { - case added := <-txAdded: - if len(added) != 1 { - t.Errorf("wrong number of added transactions: got %d, want 1", len(added)) - } else if added[0].Hash() != tx.Hash() { - t.Errorf("added wrong tx hash: got %v, want %v", added[0].Hash(), tx.Hash()) - } - case <-time.After(2 * time.Second): - t.Errorf("no NewTxsEvent received within 2 seconds") - } -} - -// This test checks that pending transactions are sent. -func TestSendTransactions63(t *testing.T) { testSendTransactions(t, 63) } -func TestSendTransactions64(t *testing.T) { testSendTransactions(t, 64) } -func TestSendTransactions65(t *testing.T) { testSendTransactions(t, 65) } - -func testSendTransactions(t *testing.T, protocol int) { - pm, _ := newTestProtocolManagerMust(t, downloader.FullSync, 0, nil, nil) - defer pm.Stop() - - // Fill the pool with big transactions (use a subscription to wait until all - // the transactions are announced to avoid spurious events causing extra - // broadcasts). - const txsize = txsyncPackSize / 10 - alltxs := make([]*types.Transaction, 100) - for nonce := range alltxs { - alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), txsize) - } - pm.txpool.AddRemotes(alltxs) - time.Sleep(100 * time.Millisecond) // Wait until new tx even gets out of the system (lame) - - // Connect several peers. They should all receive the pending transactions. - var wg sync.WaitGroup - checktxs := func(p *testPeer) { - defer wg.Done() - defer p.close() - seen := make(map[common.Hash]bool) - for _, tx := range alltxs { - seen[tx.Hash()] = false - } - for n := 0; n < len(alltxs) && !t.Failed(); { - var forAllHashes func(callback func(hash common.Hash)) - switch protocol { - case 63: - fallthrough - case 64: - msg, err := p.app.ReadMsg() - if err != nil { - t.Errorf("%v: read error: %v", p.Peer, err) - continue - } else if msg.Code != TransactionMsg { - t.Errorf("%v: got code %d, want TxMsg", p.Peer, msg.Code) - continue - } - var txs []*types.Transaction - if err := msg.Decode(&txs); err != nil { - t.Errorf("%v: %v", p.Peer, err) - continue - } - forAllHashes = func(callback func(hash common.Hash)) { - for _, tx := range txs { - callback(tx.Hash()) - } - } - case 65: - msg, err := p.app.ReadMsg() - if err != nil { - t.Errorf("%v: read error: %v", p.Peer, err) - continue - } else if msg.Code != NewPooledTransactionHashesMsg { - t.Errorf("%v: got code %d, want NewPooledTransactionHashesMsg", p.Peer, msg.Code) - continue - } - var hashes []common.Hash - if err := msg.Decode(&hashes); err != nil { - t.Errorf("%v: %v", p.Peer, err) - continue - } - forAllHashes = func(callback func(hash common.Hash)) { - for _, h := range hashes { - callback(h) - } - } - } - forAllHashes(func(hash common.Hash) { - seentx, want := seen[hash] - if seentx { - t.Errorf("%v: got tx more than once: %x", p.Peer, hash) - } - if !want { - t.Errorf("%v: got unexpected tx: %x", p.Peer, hash) - } - seen[hash] = true - n++ - }) - } - } - for i := 0; i < 3; i++ { - p, _ := newTestPeer(fmt.Sprintf("peer #%d", i), protocol, pm, true) - wg.Add(1) - go checktxs(p) - } - wg.Wait() -} - -func TestTransactionPropagation(t *testing.T) { testSyncTransaction(t, true) } -func TestTransactionAnnouncement(t *testing.T) { testSyncTransaction(t, false) } - -func testSyncTransaction(t *testing.T, propagtion bool) { - // Create a protocol manager for transaction fetcher and sender - pmFetcher, _ := newTestProtocolManagerMust(t, downloader.FastSync, 0, nil, nil) - defer pmFetcher.Stop() - pmSender, _ := newTestProtocolManagerMust(t, downloader.FastSync, 1024, nil, nil) - pmSender.broadcastTxAnnouncesOnly = !propagtion - defer pmSender.Stop() - - // Sync up the two peers - io1, io2 := p2p.MsgPipe() - - go pmSender.handle(pmSender.newPeer(65, p2p.NewPeer(enode.ID{}, "sender", nil), io2, pmSender.txpool.Get)) - go pmFetcher.handle(pmFetcher.newPeer(65, p2p.NewPeer(enode.ID{}, "fetcher", nil), io1, pmFetcher.txpool.Get)) - - time.Sleep(250 * time.Millisecond) - pmFetcher.doSync(peerToSyncOp(downloader.FullSync, pmFetcher.peers.BestPeer())) - atomic.StoreUint32(&pmFetcher.acceptTxs, 1) - - newTxs := make(chan core.NewTxsEvent, 1024) - sub := pmFetcher.txpool.SubscribeNewTxsEvent(newTxs) - defer sub.Unsubscribe() - - // Fill the pool with new transactions - alltxs := make([]*types.Transaction, 1024) - for nonce := range alltxs { - alltxs[nonce] = newTestTransaction(testAccount, uint64(nonce), 0) - } - pmSender.txpool.AddRemotes(alltxs) - - var got int -loop: - for { - select { - case ev := <-newTxs: - got += len(ev.Txs) - if got == 1024 { - break loop - } - case <-time.NewTimer(time.Second).C: - t.Fatal("Failed to retrieve all transaction") - } - } -} - -// Tests that the custom union field encoder and decoder works correctly. -func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { - // Create a "random" hash for testing - var hash common.Hash - for i := range hash { - hash[i] = byte(i) - } - // Assemble some table driven tests - tests := []struct { - packet *getBlockHeadersData - fail bool - }{ - // Providing the origin as either a hash or a number should both work - {fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Number: 314}}}, - {fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}}}, - - // Providing arbitrary query field should also work - {fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}}, - {fail: false, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}}, - - // Providing both the origin hash and origin number must fail - {fail: true, packet: &getBlockHeadersData{Origin: hashOrNumber{Hash: hash, Number: 314}}}, - } - // Iterate over each of the tests and try to encode and then decode - for i, tt := range tests { - bytes, err := rlp.EncodeToBytes(tt.packet) - if err != nil && !tt.fail { - t.Fatalf("test %d: failed to encode packet: %v", i, err) - } else if err == nil && tt.fail { - t.Fatalf("test %d: encode should have failed", i) - } - if !tt.fail { - packet := new(getBlockHeadersData) - if err := rlp.DecodeBytes(bytes, packet); err != nil { - t.Fatalf("test %d: failed to decode packet: %v", i, err) - } - if packet.Origin.Hash != tt.packet.Origin.Hash || packet.Origin.Number != tt.packet.Origin.Number || packet.Amount != tt.packet.Amount || - packet.Skip != tt.packet.Skip || packet.Reverse != tt.packet.Reverse { - t.Fatalf("test %d: encode decode mismatch: have %+v, want %+v", i, packet, tt.packet) - } - } - } -} diff --git a/eth/protocols/eth/broadcast.go b/eth/protocols/eth/broadcast.go new file mode 100644 index 0000000000..328396d510 --- /dev/null +++ b/eth/protocols/eth/broadcast.go @@ -0,0 +1,195 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" +) + +const ( + // This is the target size for the packs of transactions or announcements. A + // pack can get larger than this if a single transactions exceeds this size. + maxTxPacketSize = 100 * 1024 +) + +// blockPropagation is a block propagation event, waiting for its turn in the +// broadcast queue. +type blockPropagation struct { + block *types.Block + td *big.Int +} + +// broadcastBlocks is a write loop that multiplexes blocks and block accouncements +// to the remote peer. The goal is to have an async writer that does not lock up +// node internals and at the same time rate limits queued data. +func (p *Peer) broadcastBlocks() { + for { + select { + case prop := <-p.queuedBlocks: + if err := p.SendNewBlock(prop.block, prop.td); err != nil { + return + } + p.Log().Trace("Propagated block", "number", prop.block.Number(), "hash", prop.block.Hash(), "td", prop.td) + + case block := <-p.queuedBlockAnns: + if err := p.SendNewBlockHashes([]common.Hash{block.Hash()}, []uint64{block.NumberU64()}); err != nil { + return + } + p.Log().Trace("Announced block", "number", block.Number(), "hash", block.Hash()) + + case <-p.term: + return + } + } +} + +// broadcastTransactions is a write loop that schedules transaction broadcasts +// to the remote peer. The goal is to have an async writer that does not lock up +// node internals and at the same time rate limits queued data. +func (p *Peer) broadcastTransactions() { + var ( + queue []common.Hash // Queue of hashes to broadcast as full transactions + done chan struct{} // Non-nil if background broadcaster is running + fail = make(chan error, 1) // Channel used to receive network error + failed bool // Flag whether a send failed, discard everything onward + ) + for { + // If there's no in-flight broadcast running, check if a new one is needed + if done == nil && len(queue) > 0 { + // Pile transaction until we reach our allowed network limit + var ( + hashes []common.Hash + txs []*types.Transaction + size common.StorageSize + ) + for i := 0; i < len(queue) && size < maxTxPacketSize; i++ { + if tx := p.txpool.Get(queue[i]); tx != nil { + txs = append(txs, tx) + size += tx.Size() + } + hashes = append(hashes, queue[i]) + } + queue = queue[:copy(queue, queue[len(hashes):])] + + // If there's anything available to transfer, fire up an async writer + if len(txs) > 0 { + done = make(chan struct{}) + go func() { + if err := p.SendTransactions(txs); err != nil { + fail <- err + return + } + close(done) + p.Log().Trace("Sent transactions", "count", len(txs)) + }() + } + } + // Transfer goroutine may or may not have been started, listen for events + select { + case hashes := <-p.txBroadcast: + // If the connection failed, discard all transaction events + if failed { + continue + } + // New batch of transactions to be broadcast, queue them (with cap) + queue = append(queue, hashes...) + if len(queue) > maxQueuedTxs { + // Fancy copy and resize to ensure buffer doesn't grow indefinitely + queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxs:])] + } + + case <-done: + done = nil + + case <-fail: + failed = true + + case <-p.term: + return + } + } +} + +// announceTransactions is a write loop that schedules transaction broadcasts +// to the remote peer. The goal is to have an async writer that does not lock up +// node internals and at the same time rate limits queued data. +func (p *Peer) announceTransactions() { + var ( + queue []common.Hash // Queue of hashes to announce as transaction stubs + done chan struct{} // Non-nil if background announcer is running + fail = make(chan error, 1) // Channel used to receive network error + failed bool // Flag whether a send failed, discard everything onward + ) + for { + // If there's no in-flight announce running, check if a new one is needed + if done == nil && len(queue) > 0 { + // Pile transaction hashes until we reach our allowed network limit + var ( + count int + pending []common.Hash + size common.StorageSize + ) + for count = 0; count < len(queue) && size < maxTxPacketSize; count++ { + if p.txpool.Get(queue[count]) != nil { + pending = append(pending, queue[count]) + size += common.HashLength + } + } + // Shift and trim queue + queue = queue[:copy(queue, queue[count:])] + + // If there's anything available to transfer, fire up an async writer + if len(pending) > 0 { + done = make(chan struct{}) + go func() { + if err := p.sendPooledTransactionHashes(pending); err != nil { + fail <- err + return + } + close(done) + p.Log().Trace("Sent transaction announcements", "count", len(pending)) + }() + } + } + // Transfer goroutine may or may not have been started, listen for events + select { + case hashes := <-p.txAnnounce: + // If the connection failed, discard all transaction events + if failed { + continue + } + // New batch of transactions to be broadcast, queue them (with cap) + queue = append(queue, hashes...) + if len(queue) > maxQueuedTxAnns { + // Fancy copy and resize to ensure buffer doesn't grow indefinitely + queue = queue[:copy(queue, queue[len(queue)-maxQueuedTxAnns:])] + } + + case <-done: + done = nil + + case <-fail: + failed = true + + case <-p.term: + return + } + } +} diff --git a/eth/protocols/eth/discovery.go b/eth/protocols/eth/discovery.go new file mode 100644 index 0000000000..025479b423 --- /dev/null +++ b/eth/protocols/eth/discovery.go @@ -0,0 +1,65 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/forkid" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/rlp" +) + +// enrEntry is the ENR entry which advertises `eth` protocol on the discovery. +type enrEntry struct { + ForkID forkid.ID // Fork identifier per EIP-2124 + + // Ignore additional fields (for forward compatibility). + Rest []rlp.RawValue `rlp:"tail"` +} + +// ENRKey implements enr.Entry. +func (e enrEntry) ENRKey() string { + return "eth" +} + +// StartENRUpdater starts the `eth` ENR updater loop, which listens for chain +// head events and updates the requested node record whenever a fork is passed. +func StartENRUpdater(chain *core.BlockChain, ln *enode.LocalNode) { + var newHead = make(chan core.ChainHeadEvent, 10) + sub := chain.SubscribeChainHeadEvent(newHead) + + go func() { + defer sub.Unsubscribe() + for { + select { + case <-newHead: + ln.Set(currentENREntry(chain)) + case <-sub.Err(): + // Would be nice to sync with Stop, but there is no + // good way to do that. + return + } + } + }() +} + +// currentENREntry constructs an `eth` ENR entry based on the current state of the chain. +func currentENREntry(chain *core.BlockChain) *enrEntry { + return &enrEntry{ + ForkID: forkid.NewID(chain.Config(), chain.Genesis().Hash(), chain.CurrentHeader().Number.Uint64()), + } +} diff --git a/eth/protocols/eth/handler.go b/eth/protocols/eth/handler.go new file mode 100644 index 0000000000..64648ed419 --- /dev/null +++ b/eth/protocols/eth/handler.go @@ -0,0 +1,249 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" +) + +const ( + // softResponseLimit is the target maximum size of replies to data retrievals. + softResponseLimit = 2 * 1024 * 1024 + + // estHeaderSize is the approximate size of an RLP encoded block header. + estHeaderSize = 500 + + // maxHeadersServe is the maximum number of block headers to serve. This number + // is there to limit the number of disk lookups. + maxHeadersServe = 1024 + + // maxBodiesServe is the maximum number of block bodies to serve. This number + // is mostly there to limit the number of disk lookups. With 24KB block sizes + // nowadays, the practical limit will always be softResponseLimit. + maxBodiesServe = 1024 + + // maxNodeDataServe is the maximum number of state trie nodes to serve. This + // number is there to limit the number of disk lookups. + maxNodeDataServe = 1024 + + // maxReceiptsServe is the maximum number of block receipts to serve. This + // number is mostly there to limit the number of disk lookups. With block + // containing 200+ transactions nowadays, the practical limit will always + // be softResponseLimit. + maxReceiptsServe = 1024 +) + +// Handler is a callback to invoke from an outside runner after the boilerplate +// exchanges have passed. +type Handler func(peer *Peer) error + +// Backend defines the data retrieval methods to serve remote requests and the +// callback methods to invoke on remote deliveries. +type Backend interface { + // Chain retrieves the blockchain object to serve data. + Chain() *core.BlockChain + + // StateBloom retrieves the bloom filter - if any - for state trie nodes. + StateBloom() *trie.SyncBloom + + // TxPool retrieves the transaction pool object to serve data. + TxPool() TxPool + + // AcceptTxs retrieves whether transaction processing is enabled on the node + // or if inbound transactions should simply be dropped. + AcceptTxs() bool + + // RunPeer is invoked when a peer joins on the `eth` protocol. The handler + // should do any peer maintenance work, handshakes and validations. If all + // is passed, control should be given back to the `handler` to process the + // inbound messages going forward. + RunPeer(peer *Peer, handler Handler) error + + // PeerInfo retrieves all known `eth` information about a peer. + PeerInfo(id enode.ID) interface{} + + // Handle is a callback to be invoked when a data packet is received from + // the remote peer. Only packets not consumed by the protocol handler will + // be forwarded to the backend. + Handle(peer *Peer, packet Packet) error +} + +// TxPool defines the methods needed by the protocol handler to serve transactions. +type TxPool interface { + // Get retrieves the the transaction from the local txpool with the given hash. + Get(hash common.Hash) *types.Transaction +} + +// MakeProtocols constructs the P2P protocol definitions for `eth`. +func MakeProtocols(backend Backend, network uint64, dnsdisc enode.Iterator) []p2p.Protocol { + protocols := make([]p2p.Protocol, len(ProtocolVersions)) + for i, version := range ProtocolVersions { + version := version // Closure + + protocols[i] = p2p.Protocol{ + Name: ProtocolName, + Version: version, + Length: protocolLengths[version], + Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { + peer := NewPeer(version, p, rw, backend.TxPool()) + defer peer.Close() + + return backend.RunPeer(peer, func(peer *Peer) error { + return Handle(backend, peer) + }) + }, + NodeInfo: func() interface{} { + return nodeInfo(backend.Chain(), network) + }, + PeerInfo: func(id enode.ID) interface{} { + return backend.PeerInfo(id) + }, + Attributes: []enr.Entry{currentENREntry(backend.Chain())}, + DialCandidates: dnsdisc, + } + } + return protocols +} + +// NodeInfo represents a short summary of the `eth` sub-protocol metadata +// known about the host peer. +type NodeInfo struct { + Network uint64 `json:"network"` // Ethereum network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4) + Difficulty *big.Int `json:"difficulty"` // Total difficulty of the host's blockchain + Genesis common.Hash `json:"genesis"` // SHA3 hash of the host's genesis block + Config *params.ChainConfig `json:"config"` // Chain configuration for the fork rules + Head common.Hash `json:"head"` // Hex hash of the host's best owned block +} + +// nodeInfo retrieves some `eth` protocol metadata about the running host node. +func nodeInfo(chain *core.BlockChain, network uint64) *NodeInfo { + head := chain.CurrentBlock() + return &NodeInfo{ + Network: network, + Difficulty: chain.GetTd(head.Hash(), head.NumberU64()), + Genesis: chain.Genesis().Hash(), + Config: chain.Config(), + Head: head.Hash(), + } +} + +// Handle is invoked whenever an `eth` connection is made that successfully passes +// the protocol handshake. This method will keep processing messages until the +// connection is torn down. +func Handle(backend Backend, peer *Peer) error { + for { + if err := handleMessage(backend, peer); err != nil { + peer.Log().Debug("Message handling failed in `eth`", "err", err) + return err + } + } +} + +type msgHandler func(backend Backend, msg Decoder, peer *Peer) error +type Decoder interface { + Decode(val interface{}) error + Time() time.Time +} + +var eth64 = map[uint64]msgHandler{ + GetBlockHeadersMsg: handleGetBlockHeaders, + BlockHeadersMsg: handleBlockHeaders, + GetBlockBodiesMsg: handleGetBlockBodies, + BlockBodiesMsg: handleBlockBodies, + GetNodeDataMsg: handleGetNodeData, + NodeDataMsg: handleNodeData, + GetReceiptsMsg: handleGetReceipts, + ReceiptsMsg: handleReceipts, + NewBlockHashesMsg: handleNewBlockhashes, + NewBlockMsg: handleNewBlock, + TransactionsMsg: handleTransactions, +} +var eth65 = map[uint64]msgHandler{ + // old 64 messages + GetBlockHeadersMsg: handleGetBlockHeaders, + BlockHeadersMsg: handleBlockHeaders, + GetBlockBodiesMsg: handleGetBlockBodies, + BlockBodiesMsg: handleBlockBodies, + GetNodeDataMsg: handleGetNodeData, + NodeDataMsg: handleNodeData, + GetReceiptsMsg: handleGetReceipts, + ReceiptsMsg: handleReceipts, + NewBlockHashesMsg: handleNewBlockhashes, + NewBlockMsg: handleNewBlock, + TransactionsMsg: handleTransactions, + // New eth65 messages + NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes, + GetPooledTransactionsMsg: handleGetPooledTransactions, + PooledTransactionsMsg: handlePooledTransactions, +} + +var eth66 = map[uint64]msgHandler{ + // eth64 announcement messages (no id) + NewBlockHashesMsg: handleNewBlockhashes, + NewBlockMsg: handleNewBlock, + TransactionsMsg: handleTransactions, + // eth65 announcement messages (no id) + NewPooledTransactionHashesMsg: handleNewPooledTransactionHashes, + // eth66 messages with request-id + GetBlockHeadersMsg: handleGetBlockHeaders66, + BlockHeadersMsg: handleBlockHeaders66, + GetBlockBodiesMsg: handleGetBlockBodies66, + BlockBodiesMsg: handleBlockBodies66, + GetNodeDataMsg: handleGetNodeData66, + NodeDataMsg: handleNodeData66, + GetReceiptsMsg: handleGetReceipts66, + ReceiptsMsg: handleReceipts66, + GetPooledTransactionsMsg: handleGetPooledTransactions66, + PooledTransactionsMsg: handlePooledTransactions66, +} + +// handleMessage is invoked whenever an inbound message is received from a remote +// peer. The remote connection is torn down upon returning any error. +func handleMessage(backend Backend, peer *Peer) error { + // Read the next message from the remote peer, and ensure it's fully consumed + msg, err := peer.rw.ReadMsg() + if err != nil { + return err + } + if msg.Size > maxMessageSize { + return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize) + } + defer msg.Discard() + + var handlers = eth64 + if peer.Version() == ETH65 { + handlers = eth65 + } else if peer.Version() >= ETH66 { + handlers = eth66 + } + + if handler := handlers[msg.Code]; handler != nil { + return handler(backend, msg, peer) + } + return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code) +} diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go new file mode 100644 index 0000000000..30beae931b --- /dev/null +++ b/eth/protocols/eth/handler_test.go @@ -0,0 +1,519 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "math" + "math/big" + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/trie" +) + +var ( + // testKey is a private key to use for funding a tester account. + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + + // testAddr is the Ethereum address of the tester account. + testAddr = crypto.PubkeyToAddress(testKey.PublicKey) +) + +// testBackend is a mock implementation of the live Ethereum message handler. Its +// purpose is to allow testing the request/reply workflows and wire serialization +// in the `eth` protocol without actually doing any data processing. +type testBackend struct { + db ethdb.Database + chain *core.BlockChain + txpool *core.TxPool +} + +// newTestBackend creates an empty chain and wraps it into a mock backend. +func newTestBackend(blocks int) *testBackend { + return newTestBackendWithGenerator(blocks, nil) +} + +// newTestBackend creates a chain with a number of explicitly defined blocks and +// wraps it into a mock backend. +func newTestBackendWithGenerator(blocks int, generator func(int, *core.BlockGen)) *testBackend { + // Create a database pre-initialize with a genesis block + db := rawdb.NewMemoryDatabase() + (&core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{testAddr: {Balance: big.NewInt(1000000)}}, + }).MustCommit(db) + + chain, _ := core.NewBlockChain(db, nil, params.TestChainConfig, ethash.NewFaker(), vm.Config{}, nil, nil) + + bs, _ := core.GenerateChain(params.TestChainConfig, chain.Genesis(), ethash.NewFaker(), db, blocks, generator) + if _, err := chain.InsertChain(bs); err != nil { + panic(err) + } + txconfig := core.DefaultTxPoolConfig + txconfig.Journal = "" // Don't litter the disk with test journals + + return &testBackend{ + db: db, + chain: chain, + txpool: core.NewTxPool(txconfig, params.TestChainConfig, chain), + } +} + +// close tears down the transaction pool and chain behind the mock backend. +func (b *testBackend) close() { + b.txpool.Stop() + b.chain.Stop() +} + +func (b *testBackend) Chain() *core.BlockChain { return b.chain } +func (b *testBackend) StateBloom() *trie.SyncBloom { return nil } +func (b *testBackend) TxPool() TxPool { return b.txpool } + +func (b *testBackend) RunPeer(peer *Peer, handler Handler) error { + // Normally the backend would do peer mainentance and handshakes. All that + // is omitted and we will just give control back to the handler. + return handler(peer) +} +func (b *testBackend) PeerInfo(enode.ID) interface{} { panic("not implemented") } + +func (b *testBackend) AcceptTxs() bool { + panic("data processing tests should be done in the handler package") +} +func (b *testBackend) Handle(*Peer, Packet) error { + panic("data processing tests should be done in the handler package") +} + +// Tests that block headers can be retrieved from a remote chain based on user queries. +func TestGetBlockHeaders64(t *testing.T) { testGetBlockHeaders(t, 64) } +func TestGetBlockHeaders65(t *testing.T) { testGetBlockHeaders(t, 65) } + +func testGetBlockHeaders(t *testing.T, protocol uint) { + t.Parallel() + + backend := newTestBackend(maxHeadersServe + 15) + defer backend.close() + + peer, _ := newTestPeer("peer", protocol, backend) + defer peer.close() + + // Create a "random" unknown hash for testing + var unknown common.Hash + for i := range unknown { + unknown[i] = byte(i) + } + // Create a batch of tests for various scenarios + limit := uint64(maxHeadersServe) + tests := []struct { + query *GetBlockHeadersPacket // The query to execute for header retrieval + expect []common.Hash // The hashes of the block whose headers are expected + }{ + // A single random block should be retrievable by hash and number too + { + &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, + []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, + }, { + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 1}, + []common.Hash{backend.chain.GetBlockByNumber(limit / 2).Hash()}, + }, + // Multiple headers should be retrievable in both directions + { + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3}, + []common.Hash{ + backend.chain.GetBlockByNumber(limit / 2).Hash(), + backend.chain.GetBlockByNumber(limit/2 + 1).Hash(), + backend.chain.GetBlockByNumber(limit/2 + 2).Hash(), + }, + }, { + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, + []common.Hash{ + backend.chain.GetBlockByNumber(limit / 2).Hash(), + backend.chain.GetBlockByNumber(limit/2 - 1).Hash(), + backend.chain.GetBlockByNumber(limit/2 - 2).Hash(), + }, + }, + // Multiple headers with skip lists should be retrievable + { + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, + []common.Hash{ + backend.chain.GetBlockByNumber(limit / 2).Hash(), + backend.chain.GetBlockByNumber(limit/2 + 4).Hash(), + backend.chain.GetBlockByNumber(limit/2 + 8).Hash(), + }, + }, { + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, + []common.Hash{ + backend.chain.GetBlockByNumber(limit / 2).Hash(), + backend.chain.GetBlockByNumber(limit/2 - 4).Hash(), + backend.chain.GetBlockByNumber(limit/2 - 8).Hash(), + }, + }, + // The chain endpoints should be retrievable + { + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 0}, Amount: 1}, + []common.Hash{backend.chain.GetBlockByNumber(0).Hash()}, + }, { + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64()}, Amount: 1}, + []common.Hash{backend.chain.CurrentBlock().Hash()}, + }, + // Ensure protocol limits are honored + { + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true}, + backend.chain.GetBlockHashesFromHash(backend.chain.CurrentBlock().Hash(), limit), + }, + // Check that requesting more than available is handled gracefully + { + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3}, + []common.Hash{ + backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64() - 4).Hash(), + backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64()).Hash(), + }, + }, { + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, + []common.Hash{ + backend.chain.GetBlockByNumber(4).Hash(), + backend.chain.GetBlockByNumber(0).Hash(), + }, + }, + // Check that requesting more than available is handled gracefully, even if mid skip + { + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3}, + []common.Hash{ + backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64() - 4).Hash(), + backend.chain.GetBlockByNumber(backend.chain.CurrentBlock().NumberU64() - 1).Hash(), + }, + }, { + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, + []common.Hash{ + backend.chain.GetBlockByNumber(4).Hash(), + backend.chain.GetBlockByNumber(1).Hash(), + }, + }, + // Check a corner case where requesting more can iterate past the endpoints + { + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 2}, Amount: 5, Reverse: true}, + []common.Hash{ + backend.chain.GetBlockByNumber(2).Hash(), + backend.chain.GetBlockByNumber(1).Hash(), + backend.chain.GetBlockByNumber(0).Hash(), + }, + }, + // Check a corner case where skipping overflow loops back into the chain start + { + &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(3).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64 - 1}, + []common.Hash{ + backend.chain.GetBlockByNumber(3).Hash(), + }, + }, + // Check a corner case where skipping overflow loops back to the same header + { + &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: backend.chain.GetBlockByNumber(1).Hash()}, Amount: 2, Reverse: false, Skip: math.MaxUint64}, + []common.Hash{ + backend.chain.GetBlockByNumber(1).Hash(), + }, + }, + // Check that non existing headers aren't returned + { + &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: unknown}, Amount: 1}, + []common.Hash{}, + }, { + &GetBlockHeadersPacket{Origin: HashOrNumber{Number: backend.chain.CurrentBlock().NumberU64() + 1}, Amount: 1}, + []common.Hash{}, + }, + } + // Run each of the tests and verify the results against the chain + for i, tt := range tests { + // Collect the headers to expect in the response + var headers []*types.Header + for _, hash := range tt.expect { + headers = append(headers, backend.chain.GetBlockByHash(hash).Header()) + } + // Send the hash request and verify the response + p2p.Send(peer.app, GetBlockHeadersMsg, tt.query) + if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, headers); err != nil { + t.Errorf("test %d: headers mismatch: %v", i, err) + } + // If the test used number origins, repeat with hashes as the too + if tt.query.Origin.Hash == (common.Hash{}) { + if origin := backend.chain.GetBlockByNumber(tt.query.Origin.Number); origin != nil { + tt.query.Origin.Hash, tt.query.Origin.Number = origin.Hash(), 0 + + p2p.Send(peer.app, GetBlockHeadersMsg, tt.query) + if err := p2p.ExpectMsg(peer.app, BlockHeadersMsg, headers); err != nil { + t.Errorf("test %d: headers mismatch: %v", i, err) + } + } + } + } +} + +// Tests that block contents can be retrieved from a remote chain based on their hashes. +func TestGetBlockBodies64(t *testing.T) { testGetBlockBodies(t, 64) } +func TestGetBlockBodies65(t *testing.T) { testGetBlockBodies(t, 65) } + +func testGetBlockBodies(t *testing.T, protocol uint) { + t.Parallel() + + backend := newTestBackend(maxBodiesServe + 15) + defer backend.close() + + peer, _ := newTestPeer("peer", protocol, backend) + defer peer.close() + + // Create a batch of tests for various scenarios + limit := maxBodiesServe + tests := []struct { + random int // Number of blocks to fetch randomly from the chain + explicit []common.Hash // Explicitly requested blocks + available []bool // Availability of explicitly requested blocks + expected int // Total number of existing blocks to expect + }{ + {1, nil, nil, 1}, // A single random block should be retrievable + {10, nil, nil, 10}, // Multiple random blocks should be retrievable + {limit, nil, nil, limit}, // The maximum possible blocks should be retrievable + {limit + 1, nil, nil, limit}, // No more than the possible block count should be returned + {0, []common.Hash{backend.chain.Genesis().Hash()}, []bool{true}, 1}, // The genesis block should be retrievable + {0, []common.Hash{backend.chain.CurrentBlock().Hash()}, []bool{true}, 1}, // The chains head block should be retrievable + {0, []common.Hash{{}}, []bool{false}, 0}, // A non existent block should not be returned + + // Existing and non-existing blocks interleaved should not cause problems + {0, []common.Hash{ + {}, + backend.chain.GetBlockByNumber(1).Hash(), + {}, + backend.chain.GetBlockByNumber(10).Hash(), + {}, + backend.chain.GetBlockByNumber(100).Hash(), + {}, + }, []bool{false, true, false, true, false, true, false}, 3}, + } + // Run each of the tests and verify the results against the chain + for i, tt := range tests { + // Collect the hashes to request, and the response to expectva + var ( + hashes []common.Hash + bodies []*BlockBody + seen = make(map[int64]bool) + ) + for j := 0; j < tt.random; j++ { + for { + num := rand.Int63n(int64(backend.chain.CurrentBlock().NumberU64())) + if !seen[num] { + seen[num] = true + + block := backend.chain.GetBlockByNumber(uint64(num)) + hashes = append(hashes, block.Hash()) + if len(bodies) < tt.expected { + bodies = append(bodies, &BlockBody{Transactions: block.Transactions(), Uncles: block.Uncles()}) + } + break + } + } + } + for j, hash := range tt.explicit { + hashes = append(hashes, hash) + if tt.available[j] && len(bodies) < tt.expected { + block := backend.chain.GetBlockByHash(hash) + bodies = append(bodies, &BlockBody{Transactions: block.Transactions(), Uncles: block.Uncles()}) + } + } + // Send the hash request and verify the response + p2p.Send(peer.app, GetBlockBodiesMsg, hashes) + if err := p2p.ExpectMsg(peer.app, BlockBodiesMsg, bodies); err != nil { + t.Errorf("test %d: bodies mismatch: %v", i, err) + } + } +} + +// Tests that the state trie nodes can be retrieved based on hashes. +func TestGetNodeData64(t *testing.T) { testGetNodeData(t, 64) } +func TestGetNodeData65(t *testing.T) { testGetNodeData(t, 65) } + +func testGetNodeData(t *testing.T, protocol uint) { + t.Parallel() + + // Define three accounts to simulate transactions with + acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey) + acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey) + + signer := types.HomesteadSigner{} + // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test) + generator := func(i int, block *core.BlockGen) { + switch i { + case 0: + // In block 1, the test bank sends account #1 some ether. + tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testKey) + block.AddTx(tx) + case 1: + // In block 2, the test bank sends some more ether to account #1. + // acc1Addr passes it on to account #2. + tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey) + tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key) + block.AddTx(tx1) + block.AddTx(tx2) + case 2: + // Block 3 is empty but was mined by account #2. + block.SetCoinbase(acc2Addr) + block.SetExtra([]byte("yeehaw")) + case 3: + // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). + b2 := block.PrevBlock(1).Header() + b2.Extra = []byte("foo") + block.AddUncle(b2) + b3 := block.PrevBlock(2).Header() + b3.Extra = []byte("foo") + block.AddUncle(b3) + } + } + // Assemble the test environment + backend := newTestBackendWithGenerator(4, generator) + defer backend.close() + + peer, _ := newTestPeer("peer", protocol, backend) + defer peer.close() + + // Fetch for now the entire chain db + var hashes []common.Hash + + it := backend.db.NewIterator(nil, nil) + for it.Next() { + if key := it.Key(); len(key) == common.HashLength { + hashes = append(hashes, common.BytesToHash(key)) + } + } + it.Release() + + p2p.Send(peer.app, GetNodeDataMsg, hashes) + msg, err := peer.app.ReadMsg() + if err != nil { + t.Fatalf("failed to read node data response: %v", err) + } + if msg.Code != NodeDataMsg { + t.Fatalf("response packet code mismatch: have %x, want %x", msg.Code, NodeDataMsg) + } + var data [][]byte + if err := msg.Decode(&data); err != nil { + t.Fatalf("failed to decode response node data: %v", err) + } + // Verify that all hashes correspond to the requested data, and reconstruct a state tree + for i, want := range hashes { + if hash := crypto.Keccak256Hash(data[i]); hash != want { + t.Errorf("data hash mismatch: have %x, want %x", hash, want) + } + } + statedb := rawdb.NewMemoryDatabase() + for i := 0; i < len(data); i++ { + statedb.Put(hashes[i].Bytes(), data[i]) + } + accounts := []common.Address{testAddr, acc1Addr, acc2Addr} + for i := uint64(0); i <= backend.chain.CurrentBlock().NumberU64(); i++ { + trie, _ := state.New(backend.chain.GetBlockByNumber(i).Root(), state.NewDatabase(statedb), nil) + + for j, acc := range accounts { + state, _ := backend.chain.State() + bw := state.GetBalance(acc) + bh := trie.GetBalance(acc) + + if (bw != nil && bh == nil) || (bw == nil && bh != nil) { + t.Errorf("test %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) + } + if bw != nil && bh != nil && bw.Cmp(bw) != 0 { + t.Errorf("test %d, account %d: balance mismatch: have %v, want %v", i, j, bh, bw) + } + } + } +} + +// Tests that the transaction receipts can be retrieved based on hashes. +func TestGetBlockReceipts64(t *testing.T) { testGetBlockReceipts(t, 64) } +func TestGetBlockReceipts65(t *testing.T) { testGetBlockReceipts(t, 65) } + +func testGetBlockReceipts(t *testing.T, protocol uint) { + t.Parallel() + + // Define three accounts to simulate transactions with + acc1Key, _ := crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + acc2Key, _ := crypto.HexToECDSA("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee") + acc1Addr := crypto.PubkeyToAddress(acc1Key.PublicKey) + acc2Addr := crypto.PubkeyToAddress(acc2Key.PublicKey) + + signer := types.HomesteadSigner{} + // Create a chain generator with some simple transactions (blatantly stolen from @fjl/chain_markets_test) + generator := func(i int, block *core.BlockGen) { + switch i { + case 0: + // In block 1, the test bank sends account #1 some ether. + tx, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(10000), params.TxGas, nil, nil), signer, testKey) + block.AddTx(tx) + case 1: + // In block 2, the test bank sends some more ether to account #1. + // acc1Addr passes it on to account #2. + tx1, _ := types.SignTx(types.NewTransaction(block.TxNonce(testAddr), acc1Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, testKey) + tx2, _ := types.SignTx(types.NewTransaction(block.TxNonce(acc1Addr), acc2Addr, big.NewInt(1000), params.TxGas, nil, nil), signer, acc1Key) + block.AddTx(tx1) + block.AddTx(tx2) + case 2: + // Block 3 is empty but was mined by account #2. + block.SetCoinbase(acc2Addr) + block.SetExtra([]byte("yeehaw")) + case 3: + // Block 4 includes blocks 2 and 3 as uncle headers (with modified extra data). + b2 := block.PrevBlock(1).Header() + b2.Extra = []byte("foo") + block.AddUncle(b2) + b3 := block.PrevBlock(2).Header() + b3.Extra = []byte("foo") + block.AddUncle(b3) + } + } + // Assemble the test environment + backend := newTestBackendWithGenerator(4, generator) + defer backend.close() + + peer, _ := newTestPeer("peer", protocol, backend) + defer peer.close() + + // Collect the hashes to request, and the response to expect + var ( + hashes []common.Hash + receipts []types.Receipts + ) + for i := uint64(0); i <= backend.chain.CurrentBlock().NumberU64(); i++ { + block := backend.chain.GetBlockByNumber(i) + + hashes = append(hashes, block.Hash()) + receipts = append(receipts, backend.chain.GetReceiptsByHash(block.Hash())) + } + // Send the hash request and verify the response + p2p.Send(peer.app, GetReceiptsMsg, hashes) + if err := p2p.ExpectMsg(peer.app, ReceiptsMsg, receipts); err != nil { + t.Errorf("receipts mismatch: %v", err) + } +} diff --git a/eth/protocols/eth/handlers.go b/eth/protocols/eth/handlers.go new file mode 100644 index 0000000000..8433fa343a --- /dev/null +++ b/eth/protocols/eth/handlers.go @@ -0,0 +1,510 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "encoding/json" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" +) + +// handleGetBlockHeaders handles Block header query, collect the requested headers and reply +func handleGetBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { + // Decode the complex header query + var query GetBlockHeadersPacket + if err := msg.Decode(&query); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + response := answerGetBlockHeadersQuery(backend, &query, peer) + return peer.SendBlockHeaders(response) +} + +// handleGetBlockHeaders66 is the eth/66 version of handleGetBlockHeaders +func handleGetBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { + // Decode the complex header query + var query GetBlockHeadersPacket66 + if err := msg.Decode(&query); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + response := answerGetBlockHeadersQuery(backend, query.GetBlockHeadersPacket, peer) + return peer.ReplyBlockHeaders(query.RequestId, response) +} + +func answerGetBlockHeadersQuery(backend Backend, query *GetBlockHeadersPacket, peer *Peer) []*types.Header { + hashMode := query.Origin.Hash != (common.Hash{}) + first := true + maxNonCanonical := uint64(100) + + // Gather headers until the fetch or network limits is reached + var ( + bytes common.StorageSize + headers []*types.Header + unknown bool + lookups int + ) + for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && + len(headers) < maxHeadersServe && lookups < 2*maxHeadersServe { + lookups++ + // Retrieve the next header satisfying the query + var origin *types.Header + if hashMode { + if first { + first = false + origin = backend.Chain().GetHeaderByHash(query.Origin.Hash) + if origin != nil { + query.Origin.Number = origin.Number.Uint64() + } + } else { + origin = backend.Chain().GetHeader(query.Origin.Hash, query.Origin.Number) + } + } else { + origin = backend.Chain().GetHeaderByNumber(query.Origin.Number) + } + if origin == nil { + break + } + headers = append(headers, origin) + bytes += estHeaderSize + + // Advance to the next header of the query + switch { + case hashMode && query.Reverse: + // Hash based traversal towards the genesis block + ancestor := query.Skip + 1 + if ancestor == 0 { + unknown = true + } else { + query.Origin.Hash, query.Origin.Number = backend.Chain().GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical) + unknown = (query.Origin.Hash == common.Hash{}) + } + case hashMode && !query.Reverse: + // Hash based traversal towards the leaf block + var ( + current = origin.Number.Uint64() + next = current + query.Skip + 1 + ) + if next <= current { + infos, _ := json.MarshalIndent(peer.Peer.Info(), "", " ") + peer.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos) + unknown = true + } else { + if header := backend.Chain().GetHeaderByNumber(next); header != nil { + nextHash := header.Hash() + expOldHash, _ := backend.Chain().GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical) + if expOldHash == query.Origin.Hash { + query.Origin.Hash, query.Origin.Number = nextHash, next + } else { + unknown = true + } + } else { + unknown = true + } + } + case query.Reverse: + // Number based traversal towards the genesis block + if query.Origin.Number >= query.Skip+1 { + query.Origin.Number -= query.Skip + 1 + } else { + unknown = true + } + + case !query.Reverse: + // Number based traversal towards the leaf block + query.Origin.Number += query.Skip + 1 + } + } + return headers +} + +func handleGetBlockBodies(backend Backend, msg Decoder, peer *Peer) error { + // Decode the block body retrieval message + var query GetBlockBodiesPacket + if err := msg.Decode(&query); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + response := answerGetBlockBodiesQuery(backend, query, peer) + return peer.SendBlockBodiesRLP(response) +} + +func handleGetBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { + // Decode the block body retrieval message + var query GetBlockBodiesPacket66 + if err := msg.Decode(&query); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + response := answerGetBlockBodiesQuery(backend, query.GetBlockBodiesPacket, peer) + return peer.ReplyBlockBodiesRLP(query.RequestId, response) +} + +func answerGetBlockBodiesQuery(backend Backend, query GetBlockBodiesPacket, peer *Peer) []rlp.RawValue { + // Gather blocks until the fetch or network limits is reached + var ( + bytes int + bodies []rlp.RawValue + ) + for lookups, hash := range query { + if bytes >= softResponseLimit || len(bodies) >= maxBodiesServe || + lookups >= 2*maxBodiesServe { + break + } + if data := backend.Chain().GetBodyRLP(hash); len(data) != 0 { + bodies = append(bodies, data) + bytes += len(data) + } + } + return bodies +} + +func handleGetNodeData(backend Backend, msg Decoder, peer *Peer) error { + // Decode the trie node data retrieval message + var query GetNodeDataPacket + if err := msg.Decode(&query); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + response := answerGetNodeDataQuery(backend, query, peer) + return peer.SendNodeData(response) +} + +func handleGetNodeData66(backend Backend, msg Decoder, peer *Peer) error { + // Decode the trie node data retrieval message + var query GetNodeDataPacket66 + if err := msg.Decode(&query); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + response := answerGetNodeDataQuery(backend, query.GetNodeDataPacket, peer) + return peer.ReplyNodeData(query.RequestId, response) +} + +func answerGetNodeDataQuery(backend Backend, query GetNodeDataPacket, peer *Peer) [][]byte { + // Gather state data until the fetch or network limits is reached + var ( + bytes int + nodes [][]byte + ) + for lookups, hash := range query { + if bytes >= softResponseLimit || len(nodes) >= maxNodeDataServe || + lookups >= 2*maxNodeDataServe { + break + } + // Retrieve the requested state entry + if bloom := backend.StateBloom(); bloom != nil && !bloom.Contains(hash[:]) { + // Only lookup the trie node if there's chance that we actually have it + continue + } + entry, err := backend.Chain().TrieNode(hash) + if len(entry) == 0 || err != nil { + // Read the contract code with prefix only to save unnecessary lookups. + entry, err = backend.Chain().ContractCodeWithPrefix(hash) + } + if err == nil && len(entry) > 0 { + nodes = append(nodes, entry) + bytes += len(entry) + } + } + return nodes +} + +func handleGetReceipts(backend Backend, msg Decoder, peer *Peer) error { + // Decode the block receipts retrieval message + var query GetReceiptsPacket + if err := msg.Decode(&query); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + response := answerGetReceiptsQuery(backend, query, peer) + return peer.SendReceiptsRLP(response) +} + +func handleGetReceipts66(backend Backend, msg Decoder, peer *Peer) error { + // Decode the block receipts retrieval message + var query GetReceiptsPacket66 + if err := msg.Decode(&query); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + response := answerGetReceiptsQuery(backend, query.GetReceiptsPacket, peer) + return peer.ReplyReceiptsRLP(query.RequestId, response) +} + +func answerGetReceiptsQuery(backend Backend, query GetReceiptsPacket, peer *Peer) []rlp.RawValue { + // Gather state data until the fetch or network limits is reached + var ( + bytes int + receipts []rlp.RawValue + ) + for lookups, hash := range query { + if bytes >= softResponseLimit || len(receipts) >= maxReceiptsServe || + lookups >= 2*maxReceiptsServe { + break + } + // Retrieve the requested block's receipts + results := backend.Chain().GetReceiptsByHash(hash) + if results == nil { + if header := backend.Chain().GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { + continue + } + } + // If known, encode and queue for response packet + if encoded, err := rlp.EncodeToBytes(results); err != nil { + log.Error("Failed to encode receipt", "err", err) + } else { + receipts = append(receipts, encoded) + bytes += len(encoded) + } + } + return receipts +} + +func handleNewBlockhashes(backend Backend, msg Decoder, peer *Peer) error { + // A batch of new block announcements just arrived + ann := new(NewBlockHashesPacket) + if err := msg.Decode(ann); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + // Mark the hashes as present at the remote node + for _, block := range *ann { + peer.markBlock(block.Hash) + } + // Deliver them all to the backend for queuing + return backend.Handle(peer, ann) +} + +func handleNewBlock(backend Backend, msg Decoder, peer *Peer) error { + // Retrieve and decode the propagated block + ann := new(NewBlockPacket) + if err := msg.Decode(ann); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + if hash := types.CalcUncleHash(ann.Block.Uncles()); hash != ann.Block.UncleHash() { + log.Warn("Propagated block has invalid uncles", "have", hash, "exp", ann.Block.UncleHash()) + return nil // TODO(karalabe): return error eventually, but wait a few releases + } + if hash := types.DeriveSha(ann.Block.Transactions(), trie.NewStackTrie(nil)); hash != ann.Block.TxHash() { + log.Warn("Propagated block has invalid body", "have", hash, "exp", ann.Block.TxHash()) + return nil // TODO(karalabe): return error eventually, but wait a few releases + } + if err := ann.sanityCheck(); err != nil { + return err + } + ann.Block.ReceivedAt = msg.Time() + ann.Block.ReceivedFrom = peer + + // Mark the peer as owning the block + peer.markBlock(ann.Block.Hash()) + + return backend.Handle(peer, ann) +} + +func handleBlockHeaders(backend Backend, msg Decoder, peer *Peer) error { + // A batch of headers arrived to one of our previous requests + res := new(BlockHeadersPacket) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + return backend.Handle(peer, res) +} + +func handleBlockHeaders66(backend Backend, msg Decoder, peer *Peer) error { + // A batch of headers arrived to one of our previous requests + res := new(BlockHeadersPacket66) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + return backend.Handle(peer, &res.BlockHeadersPacket) +} + +func handleBlockBodies(backend Backend, msg Decoder, peer *Peer) error { + // A batch of block bodies arrived to one of our previous requests + res := new(BlockBodiesPacket) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + return backend.Handle(peer, res) +} + +func handleBlockBodies66(backend Backend, msg Decoder, peer *Peer) error { + // A batch of block bodies arrived to one of our previous requests + res := new(BlockBodiesPacket66) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + return backend.Handle(peer, &res.BlockBodiesPacket) +} + +func handleNodeData(backend Backend, msg Decoder, peer *Peer) error { + // A batch of node state data arrived to one of our previous requests + res := new(NodeDataPacket) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + return backend.Handle(peer, res) +} + +func handleNodeData66(backend Backend, msg Decoder, peer *Peer) error { + // A batch of node state data arrived to one of our previous requests + res := new(NodeDataPacket66) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + return backend.Handle(peer, &res.NodeDataPacket) +} + +func handleReceipts(backend Backend, msg Decoder, peer *Peer) error { + // A batch of receipts arrived to one of our previous requests + res := new(ReceiptsPacket) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + return backend.Handle(peer, res) +} + +func handleReceipts66(backend Backend, msg Decoder, peer *Peer) error { + // A batch of receipts arrived to one of our previous requests + res := new(ReceiptsPacket66) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + return backend.Handle(peer, &res.ReceiptsPacket) +} + +func handleNewPooledTransactionHashes(backend Backend, msg Decoder, peer *Peer) error { + // New transaction announcement arrived, make sure we have + // a valid and fresh chain to handle them + if !backend.AcceptTxs() { + return nil + } + ann := new(NewPooledTransactionHashesPacket) + if err := msg.Decode(ann); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + // Schedule all the unknown hashes for retrieval + for _, hash := range *ann { + peer.markTransaction(hash) + } + return backend.Handle(peer, ann) +} + +func handleGetPooledTransactions(backend Backend, msg Decoder, peer *Peer) error { + // Decode the pooled transactions retrieval message + var query GetPooledTransactionsPacket + if err := msg.Decode(&query); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + hashes, txs := answerGetPooledTransactions(backend, query, peer) + return peer.SendPooledTransactionsRLP(hashes, txs) +} + +func handleGetPooledTransactions66(backend Backend, msg Decoder, peer *Peer) error { + // Decode the pooled transactions retrieval message + var query GetPooledTransactionsPacket66 + if err := msg.Decode(&query); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + hashes, txs := answerGetPooledTransactions(backend, query.GetPooledTransactionsPacket, peer) + return peer.ReplyPooledTransactionsRLP(query.RequestId, hashes, txs) +} + +func answerGetPooledTransactions(backend Backend, query GetPooledTransactionsPacket, peer *Peer) ([]common.Hash, []rlp.RawValue) { + // Gather transactions until the fetch or network limits is reached + var ( + bytes int + hashes []common.Hash + txs []rlp.RawValue + ) + for _, hash := range query { + if bytes >= softResponseLimit { + break + } + // Retrieve the requested transaction, skipping if unknown to us + tx := backend.TxPool().Get(hash) + if tx == nil { + continue + } + // If known, encode and queue for response packet + if encoded, err := rlp.EncodeToBytes(tx); err != nil { + log.Error("Failed to encode transaction", "err", err) + } else { + hashes = append(hashes, hash) + txs = append(txs, encoded) + bytes += len(encoded) + } + } + return hashes, txs +} + +func handleTransactions(backend Backend, msg Decoder, peer *Peer) error { + // Transactions arrived, make sure we have a valid and fresh chain to handle them + if !backend.AcceptTxs() { + return nil + } + // Transactions can be processed, parse all of them and deliver to the pool + var txs TransactionsPacket + if err := msg.Decode(&txs); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + for i, tx := range txs { + // Validate and mark the remote transaction + if tx == nil { + return fmt.Errorf("%w: transaction %d is nil", errDecode, i) + } + peer.markTransaction(tx.Hash()) + } + return backend.Handle(peer, &txs) +} + +func handlePooledTransactions(backend Backend, msg Decoder, peer *Peer) error { + // Transactions arrived, make sure we have a valid and fresh chain to handle them + if !backend.AcceptTxs() { + return nil + } + // Transactions can be processed, parse all of them and deliver to the pool + var txs PooledTransactionsPacket + if err := msg.Decode(&txs); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + for i, tx := range txs { + // Validate and mark the remote transaction + if tx == nil { + return fmt.Errorf("%w: transaction %d is nil", errDecode, i) + } + peer.markTransaction(tx.Hash()) + } + return backend.Handle(peer, &txs) +} + +func handlePooledTransactions66(backend Backend, msg Decoder, peer *Peer) error { + // Transactions arrived, make sure we have a valid and fresh chain to handle them + if !backend.AcceptTxs() { + return nil + } + // Transactions can be processed, parse all of them and deliver to the pool + var txs PooledTransactionsPacket66 + if err := msg.Decode(&txs); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + for i, tx := range txs.PooledTransactionsPacket { + // Validate and mark the remote transaction + if tx == nil { + return fmt.Errorf("%w: transaction %d is nil", errDecode, i) + } + peer.markTransaction(tx.Hash()) + } + return backend.Handle(peer, &txs.PooledTransactionsPacket) +} diff --git a/eth/protocols/eth/handshake.go b/eth/protocols/eth/handshake.go new file mode 100644 index 0000000000..57a4e0bc34 --- /dev/null +++ b/eth/protocols/eth/handshake.go @@ -0,0 +1,107 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "fmt" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/forkid" + "github.com/ethereum/go-ethereum/p2p" +) + +const ( + // handshakeTimeout is the maximum allowed time for the `eth` handshake to + // complete before dropping the connection.= as malicious. + handshakeTimeout = 5 * time.Second +) + +// Handshake executes the eth protocol handshake, negotiating version number, +// network IDs, difficulties, head and genesis blocks. +func (p *Peer) Handshake(network uint64, td *big.Int, head common.Hash, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter) error { + // Send out own handshake in a new thread + errc := make(chan error, 2) + + var status StatusPacket // safe to read after two values have been received from errc + + go func() { + errc <- p2p.Send(p.rw, StatusMsg, &StatusPacket{ + ProtocolVersion: uint32(p.version), + NetworkID: network, + TD: td, + Head: head, + Genesis: genesis, + ForkID: forkID, + }) + }() + go func() { + errc <- p.readStatus(network, &status, genesis, forkFilter) + }() + timeout := time.NewTimer(handshakeTimeout) + defer timeout.Stop() + for i := 0; i < 2; i++ { + select { + case err := <-errc: + if err != nil { + return err + } + case <-timeout.C: + return p2p.DiscReadTimeout + } + } + p.td, p.head = status.TD, status.Head + + // TD at mainnet block #7753254 is 76 bits. If it becomes 100 million times + // larger, it will still fit within 100 bits + if tdlen := p.td.BitLen(); tdlen > 100 { + return fmt.Errorf("too large total difficulty: bitlen %d", tdlen) + } + return nil +} + +// readStatus reads the remote handshake message. +func (p *Peer) readStatus(network uint64, status *StatusPacket, genesis common.Hash, forkFilter forkid.Filter) error { + msg, err := p.rw.ReadMsg() + if err != nil { + return err + } + if msg.Code != StatusMsg { + return fmt.Errorf("%w: first msg has code %x (!= %x)", errNoStatusMsg, msg.Code, StatusMsg) + } + if msg.Size > maxMessageSize { + return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize) + } + // Decode the handshake and make sure everything matches + if err := msg.Decode(&status); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + if status.NetworkID != network { + return fmt.Errorf("%w: %d (!= %d)", errNetworkIDMismatch, status.NetworkID, network) + } + if uint(status.ProtocolVersion) != p.version { + return fmt.Errorf("%w: %d (!= %d)", errProtocolVersionMismatch, status.ProtocolVersion, p.version) + } + if status.Genesis != genesis { + return fmt.Errorf("%w: %x (!= %x)", errGenesisMismatch, status.Genesis, genesis) + } + if err := forkFilter(status.ForkID); err != nil { + return fmt.Errorf("%w: %v", errForkIDRejected, err) + } + return nil +} diff --git a/eth/protocols/eth/handshake_test.go b/eth/protocols/eth/handshake_test.go new file mode 100644 index 0000000000..65f9a00064 --- /dev/null +++ b/eth/protocols/eth/handshake_test.go @@ -0,0 +1,91 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "errors" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/forkid" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +// Tests that handshake failures are detected and reported correctly. +func TestHandshake64(t *testing.T) { testHandshake(t, 64) } +func TestHandshake65(t *testing.T) { testHandshake(t, 65) } + +func testHandshake(t *testing.T, protocol uint) { + t.Parallel() + + // Create a test backend only to have some valid genesis chain + backend := newTestBackend(3) + defer backend.close() + + var ( + genesis = backend.chain.Genesis() + head = backend.chain.CurrentBlock() + td = backend.chain.GetTd(head.Hash(), head.NumberU64()) + forkID = forkid.NewID(backend.chain.Config(), backend.chain.Genesis().Hash(), backend.chain.CurrentHeader().Number.Uint64()) + ) + tests := []struct { + code uint64 + data interface{} + want error + }{ + { + code: TransactionsMsg, data: []interface{}{}, + want: errNoStatusMsg, + }, + { + code: StatusMsg, data: StatusPacket{10, 1, td, head.Hash(), genesis.Hash(), forkID}, + want: errProtocolVersionMismatch, + }, + { + code: StatusMsg, data: StatusPacket{uint32(protocol), 999, td, head.Hash(), genesis.Hash(), forkID}, + want: errNetworkIDMismatch, + }, + { + code: StatusMsg, data: StatusPacket{uint32(protocol), 1, td, head.Hash(), common.Hash{3}, forkID}, + want: errGenesisMismatch, + }, + { + code: StatusMsg, data: StatusPacket{uint32(protocol), 1, td, head.Hash(), genesis.Hash(), forkid.ID{Hash: [4]byte{0x00, 0x01, 0x02, 0x03}}}, + want: errForkIDRejected, + }, + } + for i, test := range tests { + // Create the two peers to shake with each other + app, net := p2p.MsgPipe() + defer app.Close() + defer net.Close() + + peer := NewPeer(protocol, p2p.NewPeer(enode.ID{}, "peer", nil), net, nil) + defer peer.Close() + + // Send the junk test with one peer, check the handshake failure + go p2p.Send(app, test.code, test.data) + + err := peer.Handshake(1, td, head.Hash(), genesis.Hash(), forkID, forkid.NewFilter(backend.chain)) + if err == nil { + t.Errorf("test %d: protocol returned nil error, want %q", i, test.want) + } else if !errors.Is(err, test.want) { + t.Errorf("test %d: wrong error: got %q, want %q", i, err, test.want) + } + } +} diff --git a/eth/protocols/eth/peer.go b/eth/protocols/eth/peer.go new file mode 100644 index 0000000000..709fca8655 --- /dev/null +++ b/eth/protocols/eth/peer.go @@ -0,0 +1,522 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "math/big" + "math/rand" + "sync" + + mapset "github.com/deckarep/golang-set" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/rlp" +) + +const ( + // maxKnownTxs is the maximum transactions hashes to keep in the known list + // before starting to randomly evict them. + maxKnownTxs = 32768 + + // maxKnownBlocks is the maximum block hashes to keep in the known list + // before starting to randomly evict them. + maxKnownBlocks = 1024 + + // maxQueuedTxs is the maximum number of transactions to queue up before dropping + // older broadcasts. + maxQueuedTxs = 4096 + + // maxQueuedTxAnns is the maximum number of transaction announcements to queue up + // before dropping older announcements. + maxQueuedTxAnns = 4096 + + // maxQueuedBlocks is the maximum number of block propagations to queue up before + // dropping broadcasts. There's not much point in queueing stale blocks, so a few + // that might cover uncles should be enough. + maxQueuedBlocks = 4 + + // maxQueuedBlockAnns is the maximum number of block announcements to queue up before + // dropping broadcasts. Similarly to block propagations, there's no point to queue + // above some healthy uncle limit, so use that. + maxQueuedBlockAnns = 4 +) + +// max is a helper function which returns the larger of the two given integers. +func max(a, b int) int { + if a > b { + return a + } + return b +} + +// Peer is a collection of relevant information we have about a `eth` peer. +type Peer struct { + id string // Unique ID for the peer, cached + + *p2p.Peer // The embedded P2P package peer + rw p2p.MsgReadWriter // Input/output streams for snap + version uint // Protocol version negotiated + + head common.Hash // Latest advertised head block hash + td *big.Int // Latest advertised head block total difficulty + + knownBlocks mapset.Set // Set of block hashes known to be known by this peer + queuedBlocks chan *blockPropagation // Queue of blocks to broadcast to the peer + queuedBlockAnns chan *types.Block // Queue of blocks to announce to the peer + + txpool TxPool // Transaction pool used by the broadcasters for liveness checks + knownTxs mapset.Set // Set of transaction hashes known to be known by this peer + txBroadcast chan []common.Hash // Channel used to queue transaction propagation requests + txAnnounce chan []common.Hash // Channel used to queue transaction announcement requests + + term chan struct{} // Termination channel to stop the broadcasters + lock sync.RWMutex // Mutex protecting the internal fields +} + +// NewPeer create a wrapper for a network connection and negotiated protocol +// version. +func NewPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter, txpool TxPool) *Peer { + peer := &Peer{ + id: p.ID().String(), + Peer: p, + rw: rw, + version: version, + knownTxs: mapset.NewSet(), + knownBlocks: mapset.NewSet(), + queuedBlocks: make(chan *blockPropagation, maxQueuedBlocks), + queuedBlockAnns: make(chan *types.Block, maxQueuedBlockAnns), + txBroadcast: make(chan []common.Hash), + txAnnounce: make(chan []common.Hash), + txpool: txpool, + term: make(chan struct{}), + } + // Start up all the broadcasters + go peer.broadcastBlocks() + go peer.broadcastTransactions() + if version >= ETH65 { + go peer.announceTransactions() + } + return peer +} + +// Close signals the broadcast goroutine to terminate. Only ever call this if +// you created the peer yourself via NewPeer. Otherwise let whoever created it +// clean it up! +func (p *Peer) Close() { + close(p.term) +} + +// ID retrieves the peer's unique identifier. +func (p *Peer) ID() string { + return p.id +} + +// Version retrieves the peer's negoatiated `eth` protocol version. +func (p *Peer) Version() uint { + return p.version +} + +// Head retrieves the current head hash and total difficulty of the peer. +func (p *Peer) Head() (hash common.Hash, td *big.Int) { + p.lock.RLock() + defer p.lock.RUnlock() + + copy(hash[:], p.head[:]) + return hash, new(big.Int).Set(p.td) +} + +// SetHead updates the head hash and total difficulty of the peer. +func (p *Peer) SetHead(hash common.Hash, td *big.Int) { + p.lock.Lock() + defer p.lock.Unlock() + + copy(p.head[:], hash[:]) + p.td.Set(td) +} + +// KnownBlock returns whether peer is known to already have a block. +func (p *Peer) KnownBlock(hash common.Hash) bool { + return p.knownBlocks.Contains(hash) +} + +// KnownTransaction returns whether peer is known to already have a transaction. +func (p *Peer) KnownTransaction(hash common.Hash) bool { + return p.knownTxs.Contains(hash) +} + +// markBlock marks a block as known for the peer, ensuring that the block will +// never be propagated to this particular peer. +func (p *Peer) markBlock(hash common.Hash) { + // If we reached the memory allowance, drop a previously known block hash + for p.knownBlocks.Cardinality() >= maxKnownBlocks { + p.knownBlocks.Pop() + } + p.knownBlocks.Add(hash) +} + +// markTransaction marks a transaction as known for the peer, ensuring that it +// will never be propagated to this particular peer. +func (p *Peer) markTransaction(hash common.Hash) { + // If we reached the memory allowance, drop a previously known transaction hash + for p.knownTxs.Cardinality() >= maxKnownTxs { + p.knownTxs.Pop() + } + p.knownTxs.Add(hash) +} + +// SendTransactions sends transactions to the peer and includes the hashes +// in its transaction hash set for future reference. +// +// This method is a helper used by the async transaction sender. Don't call it +// directly as the queueing (memory) and transmission (bandwidth) costs should +// not be managed directly. +// +// The reasons this is public is to allow packages using this protocol to write +// tests that directly send messages without having to do the asyn queueing. +func (p *Peer) SendTransactions(txs types.Transactions) error { + // Mark all the transactions as known, but ensure we don't overflow our limits + for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(txs)) { + p.knownTxs.Pop() + } + for _, tx := range txs { + p.knownTxs.Add(tx.Hash()) + } + return p2p.Send(p.rw, TransactionsMsg, txs) +} + +// AsyncSendTransactions queues a list of transactions (by hash) to eventually +// propagate to a remote peer. The number of pending sends are capped (new ones +// will force old sends to be dropped) +func (p *Peer) AsyncSendTransactions(hashes []common.Hash) { + select { + case p.txBroadcast <- hashes: + // Mark all the transactions as known, but ensure we don't overflow our limits + for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) { + p.knownTxs.Pop() + } + for _, hash := range hashes { + p.knownTxs.Add(hash) + } + case <-p.term: + p.Log().Debug("Dropping transaction propagation", "count", len(hashes)) + } +} + +// sendPooledTransactionHashes sends transaction hashes to the peer and includes +// them in its transaction hash set for future reference. +// +// This method is a helper used by the async transaction announcer. Don't call it +// directly as the queueing (memory) and transmission (bandwidth) costs should +// not be managed directly. +func (p *Peer) sendPooledTransactionHashes(hashes []common.Hash) error { + // Mark all the transactions as known, but ensure we don't overflow our limits + for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) { + p.knownTxs.Pop() + } + for _, hash := range hashes { + p.knownTxs.Add(hash) + } + return p2p.Send(p.rw, NewPooledTransactionHashesMsg, NewPooledTransactionHashesPacket(hashes)) +} + +// AsyncSendPooledTransactionHashes queues a list of transactions hashes to eventually +// announce to a remote peer. The number of pending sends are capped (new ones +// will force old sends to be dropped) +func (p *Peer) AsyncSendPooledTransactionHashes(hashes []common.Hash) { + select { + case p.txAnnounce <- hashes: + // Mark all the transactions as known, but ensure we don't overflow our limits + for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) { + p.knownTxs.Pop() + } + for _, hash := range hashes { + p.knownTxs.Add(hash) + } + case <-p.term: + p.Log().Debug("Dropping transaction announcement", "count", len(hashes)) + } +} + +// SendPooledTransactionsRLP sends requested transactions to the peer and adds the +// hashes in its transaction hash set for future reference. +// +// Note, the method assumes the hashes are correct and correspond to the list of +// transactions being sent. +func (p *Peer) SendPooledTransactionsRLP(hashes []common.Hash, txs []rlp.RawValue) error { + // Mark all the transactions as known, but ensure we don't overflow our limits + for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) { + p.knownTxs.Pop() + } + for _, hash := range hashes { + p.knownTxs.Add(hash) + } + return p2p.Send(p.rw, PooledTransactionsMsg, txs) // Not packed into PooledTransactionsPacket to avoid RLP decoding +} + +// ReplyPooledTransactionsRLP is the eth/66 version of SendPooledTransactionsRLP. +func (p *Peer) ReplyPooledTransactionsRLP(id uint64, hashes []common.Hash, txs []rlp.RawValue) error { + // Mark all the transactions as known, but ensure we don't overflow our limits + for p.knownTxs.Cardinality() > max(0, maxKnownTxs-len(hashes)) { + p.knownTxs.Pop() + } + for _, hash := range hashes { + p.knownTxs.Add(hash) + } + // Not packed into PooledTransactionsPacket to avoid RLP decoding + return p2p.Send(p.rw, PooledTransactionsMsg, PooledTransactionsRLPPacket66{ + RequestId: id, + PooledTransactionsRLPPacket: txs, + }) +} + +// SendNewBlockHashes announces the availability of a number of blocks through +// a hash notification. +func (p *Peer) SendNewBlockHashes(hashes []common.Hash, numbers []uint64) error { + // Mark all the block hashes as known, but ensure we don't overflow our limits + for p.knownBlocks.Cardinality() > max(0, maxKnownBlocks-len(hashes)) { + p.knownBlocks.Pop() + } + for _, hash := range hashes { + p.knownBlocks.Add(hash) + } + request := make(NewBlockHashesPacket, len(hashes)) + for i := 0; i < len(hashes); i++ { + request[i].Hash = hashes[i] + request[i].Number = numbers[i] + } + return p2p.Send(p.rw, NewBlockHashesMsg, request) +} + +// AsyncSendNewBlockHash queues the availability of a block for propagation to a +// remote peer. If the peer's broadcast queue is full, the event is silently +// dropped. +func (p *Peer) AsyncSendNewBlockHash(block *types.Block) { + select { + case p.queuedBlockAnns <- block: + // Mark all the block hash as known, but ensure we don't overflow our limits + for p.knownBlocks.Cardinality() >= maxKnownBlocks { + p.knownBlocks.Pop() + } + p.knownBlocks.Add(block.Hash()) + default: + p.Log().Debug("Dropping block announcement", "number", block.NumberU64(), "hash", block.Hash()) + } +} + +// SendNewBlock propagates an entire block to a remote peer. +func (p *Peer) SendNewBlock(block *types.Block, td *big.Int) error { + // Mark all the block hash as known, but ensure we don't overflow our limits + for p.knownBlocks.Cardinality() >= maxKnownBlocks { + p.knownBlocks.Pop() + } + p.knownBlocks.Add(block.Hash()) + return p2p.Send(p.rw, NewBlockMsg, &NewBlockPacket{ + Block: block, + TD: td, + }) +} + +// AsyncSendNewBlock queues an entire block for propagation to a remote peer. If +// the peer's broadcast queue is full, the event is silently dropped. +func (p *Peer) AsyncSendNewBlock(block *types.Block, td *big.Int) { + select { + case p.queuedBlocks <- &blockPropagation{block: block, td: td}: + // Mark all the block hash as known, but ensure we don't overflow our limits + for p.knownBlocks.Cardinality() >= maxKnownBlocks { + p.knownBlocks.Pop() + } + p.knownBlocks.Add(block.Hash()) + default: + p.Log().Debug("Dropping block propagation", "number", block.NumberU64(), "hash", block.Hash()) + } +} + +// SendBlockHeaders sends a batch of block headers to the remote peer. +func (p *Peer) SendBlockHeaders(headers []*types.Header) error { + return p2p.Send(p.rw, BlockHeadersMsg, BlockHeadersPacket(headers)) +} + +// ReplyBlockHeaders is the eth/66 version of SendBlockHeaders. +func (p *Peer) ReplyBlockHeaders(id uint64, headers []*types.Header) error { + return p2p.Send(p.rw, BlockHeadersMsg, BlockHeadersPacket66{ + RequestId: id, + BlockHeadersPacket: headers, + }) +} + +// SendBlockBodiesRLP sends a batch of block contents to the remote peer from +// an already RLP encoded format. +func (p *Peer) SendBlockBodiesRLP(bodies []rlp.RawValue) error { + return p2p.Send(p.rw, BlockBodiesMsg, bodies) // Not packed into BlockBodiesPacket to avoid RLP decoding +} + +// ReplyBlockBodiesRLP is the eth/66 version of SendBlockBodiesRLP. +func (p *Peer) ReplyBlockBodiesRLP(id uint64, bodies []rlp.RawValue) error { + // Not packed into BlockBodiesPacket to avoid RLP decoding + return p2p.Send(p.rw, BlockBodiesMsg, BlockBodiesRLPPacket66{ + RequestId: id, + BlockBodiesRLPPacket: bodies, + }) +} + +// SendNodeDataRLP sends a batch of arbitrary internal data, corresponding to the +// hashes requested. +func (p *Peer) SendNodeData(data [][]byte) error { + return p2p.Send(p.rw, NodeDataMsg, NodeDataPacket(data)) +} + +// ReplyNodeData is the eth/66 response to GetNodeData. +func (p *Peer) ReplyNodeData(id uint64, data [][]byte) error { + return p2p.Send(p.rw, NodeDataMsg, NodeDataPacket66{ + RequestId: id, + NodeDataPacket: data, + }) +} + +// SendReceiptsRLP sends a batch of transaction receipts, corresponding to the +// ones requested from an already RLP encoded format. +func (p *Peer) SendReceiptsRLP(receipts []rlp.RawValue) error { + return p2p.Send(p.rw, ReceiptsMsg, receipts) // Not packed into ReceiptsPacket to avoid RLP decoding +} + +// ReplyReceiptsRLP is the eth/66 response to GetReceipts. +func (p *Peer) ReplyReceiptsRLP(id uint64, receipts []rlp.RawValue) error { + return p2p.Send(p.rw, ReceiptsMsg, ReceiptsRLPPacket66{ + RequestId: id, + ReceiptsRLPPacket: receipts, + }) +} + +// RequestOneHeader is a wrapper around the header query functions to fetch a +// single header. It is used solely by the fetcher. +func (p *Peer) RequestOneHeader(hash common.Hash) error { + p.Log().Debug("Fetching single header", "hash", hash) + query := GetBlockHeadersPacket{ + Origin: HashOrNumber{Hash: hash}, + Amount: uint64(1), + Skip: uint64(0), + Reverse: false, + } + if p.Version() >= ETH66 { + return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ + RequestId: rand.Uint64(), + GetBlockHeadersPacket: &query, + }) + } + return p2p.Send(p.rw, GetBlockHeadersMsg, &query) +} + +// RequestHeadersByHash fetches a batch of blocks' headers corresponding to the +// specified header query, based on the hash of an origin block. +func (p *Peer) RequestHeadersByHash(origin common.Hash, amount int, skip int, reverse bool) error { + p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse) + query := GetBlockHeadersPacket{ + Origin: HashOrNumber{Hash: origin}, + Amount: uint64(amount), + Skip: uint64(skip), + Reverse: reverse, + } + if p.Version() >= ETH66 { + return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ + RequestId: rand.Uint64(), + GetBlockHeadersPacket: &query, + }) + } + return p2p.Send(p.rw, GetBlockHeadersMsg, &query) +} + +// RequestHeadersByNumber fetches a batch of blocks' headers corresponding to the +// specified header query, based on the number of an origin block. +func (p *Peer) RequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { + p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse) + query := GetBlockHeadersPacket{ + Origin: HashOrNumber{Number: origin}, + Amount: uint64(amount), + Skip: uint64(skip), + Reverse: reverse, + } + if p.Version() >= ETH66 { + return p2p.Send(p.rw, GetBlockHeadersMsg, &GetBlockHeadersPacket66{ + RequestId: rand.Uint64(), + GetBlockHeadersPacket: &query, + }) + } + return p2p.Send(p.rw, GetBlockHeadersMsg, &query) +} + +// ExpectRequestHeadersByNumber is a testing method to mirror the recipient side +// of the RequestHeadersByNumber operation. +func (p *Peer) ExpectRequestHeadersByNumber(origin uint64, amount int, skip int, reverse bool) error { + req := &GetBlockHeadersPacket{ + Origin: HashOrNumber{Number: origin}, + Amount: uint64(amount), + Skip: uint64(skip), + Reverse: reverse, + } + return p2p.ExpectMsg(p.rw, GetBlockHeadersMsg, req) +} + +// RequestBodies fetches a batch of blocks' bodies corresponding to the hashes +// specified. +func (p *Peer) RequestBodies(hashes []common.Hash) error { + p.Log().Debug("Fetching batch of block bodies", "count", len(hashes)) + if p.Version() >= ETH66 { + return p2p.Send(p.rw, GetBlockBodiesMsg, &GetBlockBodiesPacket66{ + RequestId: rand.Uint64(), + GetBlockBodiesPacket: hashes, + }) + } + return p2p.Send(p.rw, GetBlockBodiesMsg, GetBlockBodiesPacket(hashes)) +} + +// RequestNodeData fetches a batch of arbitrary data from a node's known state +// data, corresponding to the specified hashes. +func (p *Peer) RequestNodeData(hashes []common.Hash) error { + p.Log().Debug("Fetching batch of state data", "count", len(hashes)) + if p.Version() >= ETH66 { + return p2p.Send(p.rw, GetNodeDataMsg, &GetNodeDataPacket66{ + RequestId: rand.Uint64(), + GetNodeDataPacket: hashes, + }) + } + return p2p.Send(p.rw, GetNodeDataMsg, GetNodeDataPacket(hashes)) +} + +// RequestReceipts fetches a batch of transaction receipts from a remote node. +func (p *Peer) RequestReceipts(hashes []common.Hash) error { + p.Log().Debug("Fetching batch of receipts", "count", len(hashes)) + if p.Version() >= ETH66 { + return p2p.Send(p.rw, GetReceiptsMsg, &GetReceiptsPacket66{ + RequestId: rand.Uint64(), + GetReceiptsPacket: hashes, + }) + } + return p2p.Send(p.rw, GetReceiptsMsg, GetReceiptsPacket(hashes)) +} + +// RequestTxs fetches a batch of transactions from a remote node. +func (p *Peer) RequestTxs(hashes []common.Hash) error { + p.Log().Debug("Fetching batch of transactions", "count", len(hashes)) + if p.Version() >= ETH66 { + return p2p.Send(p.rw, GetPooledTransactionsMsg, &GetPooledTransactionsPacket66{ + RequestId: rand.Uint64(), + GetPooledTransactionsPacket: hashes, + }) + } + return p2p.Send(p.rw, GetPooledTransactionsMsg, GetPooledTransactionsPacket(hashes)) +} diff --git a/eth/protocols/eth/peer_test.go b/eth/protocols/eth/peer_test.go new file mode 100644 index 0000000000..70e9959f82 --- /dev/null +++ b/eth/protocols/eth/peer_test.go @@ -0,0 +1,61 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// This file contains some shares testing functionality, common to multiple +// different files and modules being tested. + +package eth + +import ( + "crypto/rand" + + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" +) + +// testPeer is a simulated peer to allow testing direct network calls. +type testPeer struct { + *Peer + + net p2p.MsgReadWriter // Network layer reader/writer to simulate remote messaging + app *p2p.MsgPipeRW // Application layer reader/writer to simulate the local side +} + +// newTestPeer creates a new peer registered at the given data backend. +func newTestPeer(name string, version uint, backend Backend) (*testPeer, <-chan error) { + // Create a message pipe to communicate through + app, net := p2p.MsgPipe() + + // Start the peer on a new thread + var id enode.ID + rand.Read(id[:]) + + peer := NewPeer(version, p2p.NewPeer(id, name, nil), net, backend.TxPool()) + errc := make(chan error, 1) + go func() { + errc <- backend.RunPeer(peer, func(peer *Peer) error { + return Handle(backend, peer) + }) + }() + return &testPeer{app: app, net: net, Peer: peer}, errc +} + +// close terminates the local side of the peer, notifying the remote protocol +// manager of termination. +func (p *testPeer) close() { + p.Peer.Close() + p.app.Close() +} diff --git a/eth/protocols/eth/protocol.go b/eth/protocols/eth/protocol.go new file mode 100644 index 0000000000..7f1832754f --- /dev/null +++ b/eth/protocols/eth/protocol.go @@ -0,0 +1,368 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "errors" + "fmt" + "io" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/forkid" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" +) + +// Constants to match up protocol versions and messages +const ( + ETH64 = 64 + ETH65 = 65 + ETH66 = 66 +) + +// ProtocolName is the official short name of the `eth` protocol used during +// devp2p capability negotiation. +const ProtocolName = "eth" + +// ProtocolVersions are the supported versions of the `eth` protocol (first +// is primary). +var ProtocolVersions = []uint{ETH66, ETH65, ETH64} + +// protocolLengths are the number of implemented message corresponding to +// different protocol versions. +var protocolLengths = map[uint]uint64{ETH66: 17, ETH65: 17, ETH64: 17} + +// maxMessageSize is the maximum cap on the size of a protocol message. +const maxMessageSize = 10 * 1024 * 1024 + +const ( + // Protocol messages in eth/64 + StatusMsg = 0x00 + NewBlockHashesMsg = 0x01 + TransactionsMsg = 0x02 + GetBlockHeadersMsg = 0x03 + BlockHeadersMsg = 0x04 + GetBlockBodiesMsg = 0x05 + BlockBodiesMsg = 0x06 + NewBlockMsg = 0x07 + GetNodeDataMsg = 0x0d + NodeDataMsg = 0x0e + GetReceiptsMsg = 0x0f + ReceiptsMsg = 0x10 + + // Protocol messages overloaded in eth/65 + NewPooledTransactionHashesMsg = 0x08 + GetPooledTransactionsMsg = 0x09 + PooledTransactionsMsg = 0x0a +) + +var ( + errNoStatusMsg = errors.New("no status message") + errMsgTooLarge = errors.New("message too long") + errDecode = errors.New("invalid message") + errInvalidMsgCode = errors.New("invalid message code") + errProtocolVersionMismatch = errors.New("protocol version mismatch") + errNetworkIDMismatch = errors.New("network ID mismatch") + errGenesisMismatch = errors.New("genesis mismatch") + errForkIDRejected = errors.New("fork ID rejected") +) + +// Packet represents a p2p message in the `eth` protocol. +type Packet interface { + Name() string // Name returns a string corresponding to the message type. + Kind() byte // Kind returns the message type. +} + +// StatusPacket is the network packet for the status message for eth/64 and later. +type StatusPacket struct { + ProtocolVersion uint32 + NetworkID uint64 + TD *big.Int + Head common.Hash + Genesis common.Hash + ForkID forkid.ID +} + +// NewBlockHashesPacket is the network packet for the block announcements. +type NewBlockHashesPacket []struct { + Hash common.Hash // Hash of one particular block being announced + Number uint64 // Number of one particular block being announced +} + +// Unpack retrieves the block hashes and numbers from the announcement packet +// and returns them in a split flat format that's more consistent with the +// internal data structures. +func (p *NewBlockHashesPacket) Unpack() ([]common.Hash, []uint64) { + var ( + hashes = make([]common.Hash, len(*p)) + numbers = make([]uint64, len(*p)) + ) + for i, body := range *p { + hashes[i], numbers[i] = body.Hash, body.Number + } + return hashes, numbers +} + +// TransactionsPacket is the network packet for broadcasting new transactions. +type TransactionsPacket []*types.Transaction + +// GetBlockHeadersPacket represents a block header query. +type GetBlockHeadersPacket struct { + Origin HashOrNumber // Block from which to retrieve headers + Amount uint64 // Maximum number of headers to retrieve + Skip uint64 // Blocks to skip between consecutive headers + Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) +} + +// GetBlockHeadersPacket represents a block header query over eth/66 +type GetBlockHeadersPacket66 struct { + RequestId uint64 + *GetBlockHeadersPacket +} + +// HashOrNumber is a combined field for specifying an origin block. +type HashOrNumber struct { + Hash common.Hash // Block hash from which to retrieve headers (excludes Number) + Number uint64 // Block hash from which to retrieve headers (excludes Hash) +} + +// EncodeRLP is a specialized encoder for HashOrNumber to encode only one of the +// two contained union fields. +func (hn *HashOrNumber) EncodeRLP(w io.Writer) error { + if hn.Hash == (common.Hash{}) { + return rlp.Encode(w, hn.Number) + } + if hn.Number != 0 { + return fmt.Errorf("both origin hash (%x) and number (%d) provided", hn.Hash, hn.Number) + } + return rlp.Encode(w, hn.Hash) +} + +// DecodeRLP is a specialized decoder for HashOrNumber to decode the contents +// into either a block hash or a block number. +func (hn *HashOrNumber) DecodeRLP(s *rlp.Stream) error { + _, size, _ := s.Kind() + origin, err := s.Raw() + if err == nil { + switch { + case size == 32: + err = rlp.DecodeBytes(origin, &hn.Hash) + case size <= 8: + err = rlp.DecodeBytes(origin, &hn.Number) + default: + err = fmt.Errorf("invalid input size %d for origin", size) + } + } + return err +} + +// BlockHeadersPacket represents a block header response. +type BlockHeadersPacket []*types.Header + +// BlockHeadersPacket represents a block header response over eth/66. +type BlockHeadersPacket66 struct { + RequestId uint64 + BlockHeadersPacket +} + +// NewBlockPacket is the network packet for the block propagation message. +type NewBlockPacket struct { + Block *types.Block + TD *big.Int +} + +// sanityCheck verifies that the values are reasonable, as a DoS protection +func (request *NewBlockPacket) sanityCheck() error { + if err := request.Block.SanityCheck(); err != nil { + return err + } + //TD at mainnet block #7753254 is 76 bits. If it becomes 100 million times + // larger, it will still fit within 100 bits + if tdlen := request.TD.BitLen(); tdlen > 100 { + return fmt.Errorf("too large block TD: bitlen %d", tdlen) + } + return nil +} + +// GetBlockBodiesPacket represents a block body query. +type GetBlockBodiesPacket []common.Hash + +// GetBlockBodiesPacket represents a block body query over eth/66. +type GetBlockBodiesPacket66 struct { + RequestId uint64 + GetBlockBodiesPacket +} + +// BlockBodiesPacket is the network packet for block content distribution. +type BlockBodiesPacket []*BlockBody + +// BlockBodiesPacket is the network packet for block content distribution over eth/66. +type BlockBodiesPacket66 struct { + RequestId uint64 + BlockBodiesPacket +} + +// BlockBodiesRLPPacket is used for replying to block body requests, in cases +// where we already have them RLP-encoded, and thus can avoid the decode-encode +// roundtrip. +type BlockBodiesRLPPacket []rlp.RawValue + +// BlockBodiesRLPPacket66 is the BlockBodiesRLPPacket over eth/66 +type BlockBodiesRLPPacket66 struct { + RequestId uint64 + BlockBodiesRLPPacket +} + +// BlockBody represents the data content of a single block. +type BlockBody struct { + Transactions []*types.Transaction // Transactions contained within a block + Uncles []*types.Header // Uncles contained within a block +} + +// Unpack retrieves the transactions and uncles from the range packet and returns +// them in a split flat format that's more consistent with the internal data structures. +func (p *BlockBodiesPacket) Unpack() ([][]*types.Transaction, [][]*types.Header) { + var ( + txset = make([][]*types.Transaction, len(*p)) + uncleset = make([][]*types.Header, len(*p)) + ) + for i, body := range *p { + txset[i], uncleset[i] = body.Transactions, body.Uncles + } + return txset, uncleset +} + +// GetNodeDataPacket represents a trie node data query. +type GetNodeDataPacket []common.Hash + +// GetNodeDataPacket represents a trie node data query over eth/66. +type GetNodeDataPacket66 struct { + RequestId uint64 + GetNodeDataPacket +} + +// NodeDataPacket is the network packet for trie node data distribution. +type NodeDataPacket [][]byte + +// NodeDataPacket is the network packet for trie node data distribution over eth/66. +type NodeDataPacket66 struct { + RequestId uint64 + NodeDataPacket +} + +// GetReceiptsPacket represents a block receipts query. +type GetReceiptsPacket []common.Hash + +// GetReceiptsPacket represents a block receipts query over eth/66. +type GetReceiptsPacket66 struct { + RequestId uint64 + GetReceiptsPacket +} + +// ReceiptsPacket is the network packet for block receipts distribution. +type ReceiptsPacket [][]*types.Receipt + +// ReceiptsPacket is the network packet for block receipts distribution over eth/66. +type ReceiptsPacket66 struct { + RequestId uint64 + ReceiptsPacket +} + +// ReceiptsRLPPacket is used for receipts, when we already have it encoded +type ReceiptsRLPPacket []rlp.RawValue + +// ReceiptsPacket66 is the eth-66 version of ReceiptsRLPPacket +type ReceiptsRLPPacket66 struct { + RequestId uint64 + ReceiptsRLPPacket +} + +// NewPooledTransactionHashesPacket represents a transaction announcement packet. +type NewPooledTransactionHashesPacket []common.Hash + +// GetPooledTransactionsPacket represents a transaction query. +type GetPooledTransactionsPacket []common.Hash + +type GetPooledTransactionsPacket66 struct { + RequestId uint64 + GetPooledTransactionsPacket +} + +// PooledTransactionsPacket is the network packet for transaction distribution. +type PooledTransactionsPacket []*types.Transaction + +// PooledTransactionsPacket is the network packet for transaction distribution over eth/66. +type PooledTransactionsPacket66 struct { + RequestId uint64 + PooledTransactionsPacket +} + +// PooledTransactionsPacket is the network packet for transaction distribution, used +// in the cases we already have them in rlp-encoded form +type PooledTransactionsRLPPacket []rlp.RawValue + +// PooledTransactionsRLPPacket66 is the eth/66 form of PooledTransactionsRLPPacket +type PooledTransactionsRLPPacket66 struct { + RequestId uint64 + PooledTransactionsRLPPacket +} + +func (*StatusPacket) Name() string { return "Status" } +func (*StatusPacket) Kind() byte { return StatusMsg } + +func (*NewBlockHashesPacket) Name() string { return "NewBlockHashes" } +func (*NewBlockHashesPacket) Kind() byte { return NewBlockHashesMsg } + +func (*TransactionsPacket) Name() string { return "Transactions" } +func (*TransactionsPacket) Kind() byte { return TransactionsMsg } + +func (*GetBlockHeadersPacket) Name() string { return "GetBlockHeaders" } +func (*GetBlockHeadersPacket) Kind() byte { return GetBlockHeadersMsg } + +func (*BlockHeadersPacket) Name() string { return "BlockHeaders" } +func (*BlockHeadersPacket) Kind() byte { return BlockHeadersMsg } + +func (*GetBlockBodiesPacket) Name() string { return "GetBlockBodies" } +func (*GetBlockBodiesPacket) Kind() byte { return GetBlockBodiesMsg } + +func (*BlockBodiesPacket) Name() string { return "BlockBodies" } +func (*BlockBodiesPacket) Kind() byte { return BlockBodiesMsg } + +func (*NewBlockPacket) Name() string { return "NewBlock" } +func (*NewBlockPacket) Kind() byte { return NewBlockMsg } + +func (*GetNodeDataPacket) Name() string { return "GetNodeData" } +func (*GetNodeDataPacket) Kind() byte { return GetNodeDataMsg } + +func (*NodeDataPacket) Name() string { return "NodeData" } +func (*NodeDataPacket) Kind() byte { return NodeDataMsg } + +func (*GetReceiptsPacket) Name() string { return "GetReceipts" } +func (*GetReceiptsPacket) Kind() byte { return GetReceiptsMsg } + +func (*ReceiptsPacket) Name() string { return "Receipts" } +func (*ReceiptsPacket) Kind() byte { return ReceiptsMsg } + +func (*NewPooledTransactionHashesPacket) Name() string { return "NewPooledTransactionHashes" } +func (*NewPooledTransactionHashesPacket) Kind() byte { return NewPooledTransactionHashesMsg } + +func (*GetPooledTransactionsPacket) Name() string { return "GetPooledTransactions" } +func (*GetPooledTransactionsPacket) Kind() byte { return GetPooledTransactionsMsg } + +func (*PooledTransactionsPacket) Name() string { return "PooledTransactions" } +func (*PooledTransactionsPacket) Kind() byte { return PooledTransactionsMsg } diff --git a/eth/protocols/eth/protocol_test.go b/eth/protocols/eth/protocol_test.go new file mode 100644 index 0000000000..d92f3ea837 --- /dev/null +++ b/eth/protocols/eth/protocol_test.go @@ -0,0 +1,268 @@ +// Copyright 2014 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "bytes" + "math/big" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" +) + +// Tests that the custom union field encoder and decoder works correctly. +func TestGetBlockHeadersDataEncodeDecode(t *testing.T) { + // Create a "random" hash for testing + var hash common.Hash + for i := range hash { + hash[i] = byte(i) + } + // Assemble some table driven tests + tests := []struct { + packet *GetBlockHeadersPacket + fail bool + }{ + // Providing the origin as either a hash or a number should both work + {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}}}, + {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}}}, + + // Providing arbitrary query field should also work + {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Number: 314}, Amount: 314, Skip: 1, Reverse: true}}, + {fail: false, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash}, Amount: 314, Skip: 1, Reverse: true}}, + + // Providing both the origin hash and origin number must fail + {fail: true, packet: &GetBlockHeadersPacket{Origin: HashOrNumber{Hash: hash, Number: 314}}}, + } + // Iterate over each of the tests and try to encode and then decode + for i, tt := range tests { + bytes, err := rlp.EncodeToBytes(tt.packet) + if err != nil && !tt.fail { + t.Fatalf("test %d: failed to encode packet: %v", i, err) + } else if err == nil && tt.fail { + t.Fatalf("test %d: encode should have failed", i) + } + if !tt.fail { + packet := new(GetBlockHeadersPacket) + if err := rlp.DecodeBytes(bytes, packet); err != nil { + t.Fatalf("test %d: failed to decode packet: %v", i, err) + } + if packet.Origin.Hash != tt.packet.Origin.Hash || packet.Origin.Number != tt.packet.Origin.Number || packet.Amount != tt.packet.Amount || + packet.Skip != tt.packet.Skip || packet.Reverse != tt.packet.Reverse { + t.Fatalf("test %d: encode decode mismatch: have %+v, want %+v", i, packet, tt.packet) + } + } + } +} + +// TestEth66EmptyMessages tests encoding of empty eth66 messages +func TestEth66EmptyMessages(t *testing.T) { + // All empty messages encodes to the same format + want := common.FromHex("c4820457c0") + + for i, msg := range []interface{}{ + // Headers + GetBlockHeadersPacket66{1111, nil}, + BlockHeadersPacket66{1111, nil}, + // Bodies + GetBlockBodiesPacket66{1111, nil}, + BlockBodiesPacket66{1111, nil}, + BlockBodiesRLPPacket66{1111, nil}, + // Node data + GetNodeDataPacket66{1111, nil}, + NodeDataPacket66{1111, nil}, + // Receipts + GetReceiptsPacket66{1111, nil}, + ReceiptsPacket66{1111, nil}, + // Transactions + GetPooledTransactionsPacket66{1111, nil}, + PooledTransactionsPacket66{1111, nil}, + PooledTransactionsRLPPacket66{1111, nil}, + + // Headers + BlockHeadersPacket66{1111, BlockHeadersPacket([]*types.Header{})}, + // Bodies + GetBlockBodiesPacket66{1111, GetBlockBodiesPacket([]common.Hash{})}, + BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{})}, + BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{})}, + // Node data + GetNodeDataPacket66{1111, GetNodeDataPacket([]common.Hash{})}, + NodeDataPacket66{1111, NodeDataPacket([][]byte{})}, + // Receipts + GetReceiptsPacket66{1111, GetReceiptsPacket([]common.Hash{})}, + ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{})}, + // Transactions + GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket([]common.Hash{})}, + PooledTransactionsPacket66{1111, PooledTransactionsPacket([]*types.Transaction{})}, + PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket([]rlp.RawValue{})}, + } { + if have, _ := rlp.EncodeToBytes(msg); !bytes.Equal(have, want) { + t.Errorf("test %d, type %T, have\n\t%x\nwant\n\t%x", i, msg, have, want) + } + } + +} + +// TestEth66Messages tests the encoding of all redefined eth66 messages +func TestEth66Messages(t *testing.T) { + + // Some basic structs used during testing + var ( + header *types.Header + blockBody *BlockBody + blockBodyRlp rlp.RawValue + txs []*types.Transaction + txRlps []rlp.RawValue + hashes []common.Hash + receipts []*types.Receipt + receiptsRlp rlp.RawValue + + err error + ) + header = &types.Header{ + Difficulty: big.NewInt(2222), + Number: big.NewInt(3333), + GasLimit: 4444, + GasUsed: 5555, + Time: 6666, + Extra: []byte{0x77, 0x88}, + } + // Init the transactions, taken from a different test + { + for _, hexrlp := range []string{ + "f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10", + "f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb", + } { + var tx *types.Transaction + rlpdata := common.FromHex(hexrlp) + if err := rlp.DecodeBytes(rlpdata, &tx); err != nil { + t.Fatal(err) + } + txs = append(txs, tx) + txRlps = append(txRlps, rlpdata) + } + } + // init the block body data, both object and rlp form + blockBody = &BlockBody{ + Transactions: txs, + Uncles: []*types.Header{header}, + } + blockBodyRlp, err = rlp.EncodeToBytes(blockBody) + if err != nil { + t.Fatal(err) + } + + hashes = []common.Hash{ + common.HexToHash("deadc0de"), + common.HexToHash("feedbeef"), + } + byteSlices := [][]byte{ + common.FromHex("deadc0de"), + common.FromHex("feedbeef"), + } + // init the receipts + { + receipts = []*types.Receipt{ + &types.Receipt{ + Status: types.ReceiptStatusFailed, + CumulativeGasUsed: 1, + Logs: []*types.Log{ + { + Address: common.BytesToAddress([]byte{0x11}), + Topics: []common.Hash{common.HexToHash("dead"), common.HexToHash("beef")}, + Data: []byte{0x01, 0x00, 0xff}, + }, + }, + TxHash: hashes[0], + ContractAddress: common.BytesToAddress([]byte{0x01, 0x11, 0x11}), + GasUsed: 111111, + }, + } + rlpData, err := rlp.EncodeToBytes(receipts) + if err != nil { + t.Fatal(err) + } + receiptsRlp = rlpData + } + + for i, tc := range []struct { + message interface{} + want []byte + }{ + { + GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{hashes[0], 0}, 5, 5, false}}, + common.FromHex("e8820457e4a000000000000000000000000000000000000000000000000000000000deadc0de050580"), + }, + { + GetBlockHeadersPacket66{1111, &GetBlockHeadersPacket{HashOrNumber{common.Hash{}, 9999}, 5, 5, false}}, + common.FromHex("ca820457c682270f050580"), + }, + { + BlockHeadersPacket66{1111, BlockHeadersPacket{header}}, + common.FromHex("f90202820457f901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), + }, + { + GetBlockBodiesPacket66{1111, GetBlockBodiesPacket(hashes)}, + common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), + }, + { + BlockBodiesPacket66{1111, BlockBodiesPacket([]*BlockBody{blockBody})}, + common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), + }, + { // Identical to non-rlp-shortcut version + BlockBodiesRLPPacket66{1111, BlockBodiesRLPPacket([]rlp.RawValue{blockBodyRlp})}, + common.FromHex("f902dc820457f902d6f902d3f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afbf901fcf901f9a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000940000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008208ae820d0582115c8215b3821a0a827788a00000000000000000000000000000000000000000000000000000000000000000880000000000000000"), + }, + { + GetNodeDataPacket66{1111, GetNodeDataPacket(hashes)}, + common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), + }, + { + NodeDataPacket66{1111, NodeDataPacket(byteSlices)}, + common.FromHex("ce820457ca84deadc0de84feedbeef"), + }, + { + GetReceiptsPacket66{1111, GetReceiptsPacket(hashes)}, + common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), + }, + { + ReceiptsPacket66{1111, ReceiptsPacket([][]*types.Receipt{receipts})}, + common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"), + }, + { + ReceiptsRLPPacket66{1111, ReceiptsRLPPacket([]rlp.RawValue{receiptsRlp})}, + common.FromHex("f90172820457f9016cf90169f901668001b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000f85ff85d940000000000000000000000000000000000000011f842a0000000000000000000000000000000000000000000000000000000000000deada0000000000000000000000000000000000000000000000000000000000000beef830100ff"), + }, + { + GetPooledTransactionsPacket66{1111, GetPooledTransactionsPacket(hashes)}, + common.FromHex("f847820457f842a000000000000000000000000000000000000000000000000000000000deadc0dea000000000000000000000000000000000000000000000000000000000feedbeef"), + }, + { + PooledTransactionsPacket66{1111, PooledTransactionsPacket(txs)}, + common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), + }, + { + PooledTransactionsRLPPacket66{1111, PooledTransactionsRLPPacket(txRlps)}, + common.FromHex("f8d7820457f8d2f867088504a817c8088302e2489435353535353535353535353535353535353535358202008025a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c12a064b1702d9298fee62dfeccc57d322a463ad55ca201256d01f62b45b2e1c21c10f867098504a817c809830334509435353535353535353535353535353535353535358202d98025a052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afba052f8f61201b2b11a78d6e866abc9c3db2ae8631fa656bfe5cb53668255367afb"), + }, + } { + if have, _ := rlp.EncodeToBytes(tc.message); !bytes.Equal(have, tc.want) { + t.Errorf("test %d, type %T, have\n\t%x\nwant\n\t%x", i, tc.message, have, tc.want) + } + } +} diff --git a/p2p/discv5/metrics.go b/eth/protocols/snap/discovery.go similarity index 66% rename from p2p/discv5/metrics.go rename to eth/protocols/snap/discovery.go index e68d53c13c..684ec7e632 100644 --- a/p2p/discv5/metrics.go +++ b/eth/protocols/snap/discovery.go @@ -1,4 +1,4 @@ -// Copyright 2018 The go-ethereum Authors +// Copyright 2020 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -14,11 +14,19 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package discv5 +package snap -import "github.com/ethereum/go-ethereum/metrics" - -var ( - ingressTrafficMeter = metrics.NewRegisteredMeter("discv5/InboundTraffic", nil) - egressTrafficMeter = metrics.NewRegisteredMeter("discv5/OutboundTraffic", nil) +import ( + "github.com/ethereum/go-ethereum/rlp" ) + +// enrEntry is the ENR entry which advertises `snap` protocol on the discovery. +type enrEntry struct { + // Ignore additional fields (for forward compatibility). + Rest []rlp.RawValue `rlp:"tail"` +} + +// ENRKey implements enr.Entry. +func (e enrEntry) ENRKey() string { + return "snap" +} diff --git a/eth/protocols/snap/handler.go b/eth/protocols/snap/handler.go new file mode 100644 index 0000000000..24c8599552 --- /dev/null +++ b/eth/protocols/snap/handler.go @@ -0,0 +1,490 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "bytes" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/light" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" +) + +const ( + // softResponseLimit is the target maximum size of replies to data retrievals. + softResponseLimit = 2 * 1024 * 1024 + + // maxCodeLookups is the maximum number of bytecodes to serve. This number is + // there to limit the number of disk lookups. + maxCodeLookups = 1024 + + // stateLookupSlack defines the ratio by how much a state response can exceed + // the requested limit in order to try and avoid breaking up contracts into + // multiple packages and proving them. + stateLookupSlack = 0.1 + + // maxTrieNodeLookups is the maximum number of state trie nodes to serve. This + // number is there to limit the number of disk lookups. + maxTrieNodeLookups = 1024 +) + +// Handler is a callback to invoke from an outside runner after the boilerplate +// exchanges have passed. +type Handler func(peer *Peer) error + +// Backend defines the data retrieval methods to serve remote requests and the +// callback methods to invoke on remote deliveries. +type Backend interface { + // Chain retrieves the blockchain object to serve data. + Chain() *core.BlockChain + + // RunPeer is invoked when a peer joins on the `eth` protocol. The handler + // should do any peer maintenance work, handshakes and validations. If all + // is passed, control should be given back to the `handler` to process the + // inbound messages going forward. + RunPeer(peer *Peer, handler Handler) error + + // PeerInfo retrieves all known `snap` information about a peer. + PeerInfo(id enode.ID) interface{} + + // Handle is a callback to be invoked when a data packet is received from + // the remote peer. Only packets not consumed by the protocol handler will + // be forwarded to the backend. + Handle(peer *Peer, packet Packet) error +} + +// MakeProtocols constructs the P2P protocol definitions for `snap`. +func MakeProtocols(backend Backend, dnsdisc enode.Iterator) []p2p.Protocol { + protocols := make([]p2p.Protocol, len(ProtocolVersions)) + for i, version := range ProtocolVersions { + version := version // Closure + + protocols[i] = p2p.Protocol{ + Name: ProtocolName, + Version: version, + Length: protocolLengths[version], + Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error { + return backend.RunPeer(newPeer(version, p, rw), func(peer *Peer) error { + return handle(backend, peer) + }) + }, + NodeInfo: func() interface{} { + return nodeInfo(backend.Chain()) + }, + PeerInfo: func(id enode.ID) interface{} { + return backend.PeerInfo(id) + }, + Attributes: []enr.Entry{&enrEntry{}}, + DialCandidates: dnsdisc, + } + } + return protocols +} + +// handle is the callback invoked to manage the life cycle of a `snap` peer. +// When this function terminates, the peer is disconnected. +func handle(backend Backend, peer *Peer) error { + for { + if err := handleMessage(backend, peer); err != nil { + peer.Log().Debug("Message handling failed in `snap`", "err", err) + return err + } + } +} + +// handleMessage is invoked whenever an inbound message is received from a +// remote peer on the `spap` protocol. The remote connection is torn down upon +// returning any error. +func handleMessage(backend Backend, peer *Peer) error { + // Read the next message from the remote peer, and ensure it's fully consumed + msg, err := peer.rw.ReadMsg() + if err != nil { + return err + } + if msg.Size > maxMessageSize { + return fmt.Errorf("%w: %v > %v", errMsgTooLarge, msg.Size, maxMessageSize) + } + defer msg.Discard() + + // Handle the message depending on its contents + switch { + case msg.Code == GetAccountRangeMsg: + // Decode the account retrieval request + var req GetAccountRangePacket + if err := msg.Decode(&req); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + // Retrieve the requested state and bail out if non existent + tr, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) + if err != nil { + return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) + } + it, err := backend.Chain().Snapshots().AccountIterator(req.Root, req.Origin) + if err != nil { + return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) + } + // Iterate over the requested range and pile accounts up + var ( + accounts []*AccountData + size uint64 + last common.Hash + ) + for it.Next() && size < req.Bytes { + hash, account := it.Hash(), common.CopyBytes(it.Account()) + + // Track the returned interval for the Merkle proofs + last = hash + + // Assemble the reply item + size += uint64(common.HashLength + len(account)) + accounts = append(accounts, &AccountData{ + Hash: hash, + Body: account, + }) + // If we've exceeded the request threshold, abort + if bytes.Compare(hash[:], req.Limit[:]) >= 0 { + break + } + } + it.Release() + + // Generate the Merkle proofs for the first and last account + proof := light.NewNodeSet() + if err := tr.Prove(req.Origin[:], 0, proof); err != nil { + log.Warn("Failed to prove account range", "origin", req.Origin, "err", err) + return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) + } + if last != (common.Hash{}) { + if err := tr.Prove(last[:], 0, proof); err != nil { + log.Warn("Failed to prove account range", "last", last, "err", err) + return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ID: req.ID}) + } + } + var proofs [][]byte + for _, blob := range proof.NodeList() { + proofs = append(proofs, blob) + } + // Send back anything accumulated + return p2p.Send(peer.rw, AccountRangeMsg, &AccountRangePacket{ + ID: req.ID, + Accounts: accounts, + Proof: proofs, + }) + + case msg.Code == AccountRangeMsg: + // A range of accounts arrived to one of our previous requests + res := new(AccountRangePacket) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + // Ensure the range is monotonically increasing + for i := 1; i < len(res.Accounts); i++ { + if bytes.Compare(res.Accounts[i-1].Hash[:], res.Accounts[i].Hash[:]) >= 0 { + return fmt.Errorf("accounts not monotonically increasing: #%d [%x] vs #%d [%x]", i-1, res.Accounts[i-1].Hash[:], i, res.Accounts[i].Hash[:]) + } + } + return backend.Handle(peer, res) + + case msg.Code == GetStorageRangesMsg: + // Decode the storage retrieval request + var req GetStorageRangesPacket + if err := msg.Decode(&req); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + // TODO(karalabe): Do we want to enforce > 0 accounts and 1 account if origin is set? + // TODO(karalabe): - Logging locally is not ideal as remote faulst annoy the local user + // TODO(karalabe): - Dropping the remote peer is less flexible wrt client bugs (slow is better than non-functional) + + // Calculate the hard limit at which to abort, even if mid storage trie + hardLimit := uint64(float64(req.Bytes) * (1 + stateLookupSlack)) + + // Retrieve storage ranges until the packet limit is reached + var ( + slots [][]*StorageData + proofs [][]byte + size uint64 + ) + for _, account := range req.Accounts { + // If we've exceeded the requested data limit, abort without opening + // a new storage range (that we'd need to prove due to exceeded size) + if size >= req.Bytes { + break + } + // The first account might start from a different origin and end sooner + var origin common.Hash + if len(req.Origin) > 0 { + origin, req.Origin = common.BytesToHash(req.Origin), nil + } + var limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + if len(req.Limit) > 0 { + limit, req.Limit = common.BytesToHash(req.Limit), nil + } + // Retrieve the requested state and bail out if non existent + it, err := backend.Chain().Snapshots().StorageIterator(req.Root, account, origin) + if err != nil { + return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) + } + // Iterate over the requested range and pile slots up + var ( + storage []*StorageData + last common.Hash + ) + for it.Next() && size < hardLimit { + hash, slot := it.Hash(), common.CopyBytes(it.Slot()) + + // Track the returned interval for the Merkle proofs + last = hash + + // Assemble the reply item + size += uint64(common.HashLength + len(slot)) + storage = append(storage, &StorageData{ + Hash: hash, + Body: slot, + }) + // If we've exceeded the request threshold, abort + if bytes.Compare(hash[:], limit[:]) >= 0 { + break + } + } + slots = append(slots, storage) + it.Release() + + // Generate the Merkle proofs for the first and last storage slot, but + // only if the response was capped. If the entire storage trie included + // in the response, no need for any proofs. + if origin != (common.Hash{}) || size >= hardLimit { + // Request started at a non-zero hash or was capped prematurely, add + // the endpoint Merkle proofs + accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB()) + if err != nil { + return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) + } + var acc state.Account + if err := rlp.DecodeBytes(accTrie.Get(account[:]), &acc); err != nil { + return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) + } + stTrie, err := trie.New(acc.Root, backend.Chain().StateCache().TrieDB()) + if err != nil { + return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) + } + proof := light.NewNodeSet() + if err := stTrie.Prove(origin[:], 0, proof); err != nil { + log.Warn("Failed to prove storage range", "origin", req.Origin, "err", err) + return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) + } + if last != (common.Hash{}) { + if err := stTrie.Prove(last[:], 0, proof); err != nil { + log.Warn("Failed to prove storage range", "last", last, "err", err) + return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ID: req.ID}) + } + } + for _, blob := range proof.NodeList() { + proofs = append(proofs, blob) + } + // Proof terminates the reply as proofs are only added if a node + // refuses to serve more data (exception when a contract fetch is + // finishing, but that's that). + break + } + } + // Send back anything accumulated + return p2p.Send(peer.rw, StorageRangesMsg, &StorageRangesPacket{ + ID: req.ID, + Slots: slots, + Proof: proofs, + }) + + case msg.Code == StorageRangesMsg: + // A range of storage slots arrived to one of our previous requests + res := new(StorageRangesPacket) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + // Ensure the ranges ae monotonically increasing + for i, slots := range res.Slots { + for j := 1; j < len(slots); j++ { + if bytes.Compare(slots[j-1].Hash[:], slots[j].Hash[:]) >= 0 { + return fmt.Errorf("storage slots not monotonically increasing for account #%d: #%d [%x] vs #%d [%x]", i, j-1, slots[j-1].Hash[:], j, slots[j].Hash[:]) + } + } + } + return backend.Handle(peer, res) + + case msg.Code == GetByteCodesMsg: + // Decode bytecode retrieval request + var req GetByteCodesPacket + if err := msg.Decode(&req); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + if len(req.Hashes) > maxCodeLookups { + req.Hashes = req.Hashes[:maxCodeLookups] + } + // Retrieve bytecodes until the packet size limit is reached + var ( + codes [][]byte + bytes uint64 + ) + for _, hash := range req.Hashes { + if hash == emptyCode { + // Peers should not request the empty code, but if they do, at + // least sent them back a correct response without db lookups + codes = append(codes, []byte{}) + } else if blob, err := backend.Chain().ContractCode(hash); err == nil { + codes = append(codes, blob) + bytes += uint64(len(blob)) + } + if bytes > req.Bytes { + break + } + } + // Send back anything accumulated + return p2p.Send(peer.rw, ByteCodesMsg, &ByteCodesPacket{ + ID: req.ID, + Codes: codes, + }) + + case msg.Code == ByteCodesMsg: + // A batch of byte codes arrived to one of our previous requests + res := new(ByteCodesPacket) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + return backend.Handle(peer, res) + + case msg.Code == GetTrieNodesMsg: + // Decode trie node retrieval request + var req GetTrieNodesPacket + if err := msg.Decode(&req); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + if req.Bytes > softResponseLimit { + req.Bytes = softResponseLimit + } + // Make sure we have the state associated with the request + triedb := backend.Chain().StateCache().TrieDB() + + accTrie, err := trie.NewSecure(req.Root, triedb) + if err != nil { + // We don't have the requested state available, bail out + return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID}) + } + snap := backend.Chain().Snapshots().Snapshot(req.Root) + if snap == nil { + // We don't have the requested state snapshotted yet, bail out. + // In reality we could still serve using the account and storage + // tries only, but let's protect the node a bit while it's doing + // snapshot generation. + return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ID: req.ID}) + } + // Retrieve trie nodes until the packet size limit is reached + var ( + nodes [][]byte + bytes uint64 + loads int // Trie hash expansions to cound database reads + ) + for _, pathset := range req.Paths { + switch len(pathset) { + case 0: + // Ensure we penalize invalid requests + return fmt.Errorf("%w: zero-item pathset requested", errBadRequest) + + case 1: + // If we're only retrieving an account trie node, fetch it directly + blob, resolved, err := accTrie.TryGetNode(pathset[0]) + loads += resolved // always account database reads, even for failures + if err != nil { + break + } + nodes = append(nodes, blob) + bytes += uint64(len(blob)) + + default: + // Storage slots requested, open the storage trie and retrieve from there + account, err := snap.Account(common.BytesToHash(pathset[0])) + loads++ // always account database reads, even for failures + if err != nil { + break + } + stTrie, err := trie.NewSecure(common.BytesToHash(account.Root), triedb) + loads++ // always account database reads, even for failures + if err != nil { + break + } + for _, path := range pathset[1:] { + blob, resolved, err := stTrie.TryGetNode(path) + loads += resolved // always account database reads, even for failures + if err != nil { + break + } + nodes = append(nodes, blob) + bytes += uint64(len(blob)) + + // Sanity check limits to avoid DoS on the store trie loads + if bytes > req.Bytes || loads > maxTrieNodeLookups { + break + } + } + } + // Abort request processing if we've exceeded our limits + if bytes > req.Bytes || loads > maxTrieNodeLookups { + break + } + } + // Send back anything accumulated + return p2p.Send(peer.rw, TrieNodesMsg, &TrieNodesPacket{ + ID: req.ID, + Nodes: nodes, + }) + + case msg.Code == TrieNodesMsg: + // A batch of trie nodes arrived to one of our previous requests + res := new(TrieNodesPacket) + if err := msg.Decode(res); err != nil { + return fmt.Errorf("%w: message %v: %v", errDecode, msg, err) + } + return backend.Handle(peer, res) + + default: + return fmt.Errorf("%w: %v", errInvalidMsgCode, msg.Code) + } +} + +// NodeInfo represents a short summary of the `snap` sub-protocol metadata +// known about the host peer. +type NodeInfo struct{} + +// nodeInfo retrieves some `snap` protocol metadata about the running host node. +func nodeInfo(chain *core.BlockChain) *NodeInfo { + return &NodeInfo{} +} diff --git a/eth/protocols/snap/peer.go b/eth/protocols/snap/peer.go new file mode 100644 index 0000000000..4f3d550f1f --- /dev/null +++ b/eth/protocols/snap/peer.go @@ -0,0 +1,116 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p" +) + +// Peer is a collection of relevant information we have about a `snap` peer. +type Peer struct { + id string // Unique ID for the peer, cached + + *p2p.Peer // The embedded P2P package peer + rw p2p.MsgReadWriter // Input/output streams for snap + version uint // Protocol version negotiated + + logger log.Logger // Contextual logger with the peer id injected +} + +// newPeer create a wrapper for a network connection and negotiated protocol +// version. +func newPeer(version uint, p *p2p.Peer, rw p2p.MsgReadWriter) *Peer { + id := p.ID().String() + return &Peer{ + id: id, + Peer: p, + rw: rw, + version: version, + logger: log.New("peer", id[:8]), + } +} + +// ID retrieves the peer's unique identifier. +func (p *Peer) ID() string { + return p.id +} + +// Version retrieves the peer's negoatiated `snap` protocol version. +func (p *Peer) Version() uint { + return p.version +} + +// Log overrides the P2P logget with the higher level one containing only the id. +func (p *Peer) Log() log.Logger { + return p.logger +} + +// RequestAccountRange fetches a batch of accounts rooted in a specific account +// trie, starting with the origin. +func (p *Peer) RequestAccountRange(id uint64, root common.Hash, origin, limit common.Hash, bytes uint64) error { + p.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes)) + return p2p.Send(p.rw, GetAccountRangeMsg, &GetAccountRangePacket{ + ID: id, + Root: root, + Origin: origin, + Limit: limit, + Bytes: bytes, + }) +} + +// RequestStorageRange fetches a batch of storage slots belonging to one or more +// accounts. If slots from only one accout is requested, an origin marker may also +// be used to retrieve from there. +func (p *Peer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { + if len(accounts) == 1 && origin != nil { + p.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes)) + } else { + p.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes)) + } + return p2p.Send(p.rw, GetStorageRangesMsg, &GetStorageRangesPacket{ + ID: id, + Root: root, + Accounts: accounts, + Origin: origin, + Limit: limit, + Bytes: bytes, + }) +} + +// RequestByteCodes fetches a batch of bytecodes by hash. +func (p *Peer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { + p.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes)) + return p2p.Send(p.rw, GetByteCodesMsg, &GetByteCodesPacket{ + ID: id, + Hashes: hashes, + Bytes: bytes, + }) +} + +// RequestTrieNodes fetches a batch of account or storage trie nodes rooted in +// a specificstate trie. +func (p *Peer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error { + p.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes)) + return p2p.Send(p.rw, GetTrieNodesMsg, &GetTrieNodesPacket{ + ID: id, + Root: root, + Paths: paths, + Bytes: bytes, + }) +} diff --git a/eth/protocols/snap/protocol.go b/eth/protocols/snap/protocol.go new file mode 100644 index 0000000000..5528e9212e --- /dev/null +++ b/eth/protocols/snap/protocol.go @@ -0,0 +1,218 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/state/snapshot" + "github.com/ethereum/go-ethereum/rlp" +) + +// Constants to match up protocol versions and messages +const ( + snap1 = 1 +) + +// ProtocolName is the official short name of the `snap` protocol used during +// devp2p capability negotiation. +const ProtocolName = "snap" + +// ProtocolVersions are the supported versions of the `snap` protocol (first +// is primary). +var ProtocolVersions = []uint{snap1} + +// protocolLengths are the number of implemented message corresponding to +// different protocol versions. +var protocolLengths = map[uint]uint64{snap1: 8} + +// maxMessageSize is the maximum cap on the size of a protocol message. +const maxMessageSize = 10 * 1024 * 1024 + +const ( + GetAccountRangeMsg = 0x00 + AccountRangeMsg = 0x01 + GetStorageRangesMsg = 0x02 + StorageRangesMsg = 0x03 + GetByteCodesMsg = 0x04 + ByteCodesMsg = 0x05 + GetTrieNodesMsg = 0x06 + TrieNodesMsg = 0x07 +) + +var ( + errMsgTooLarge = errors.New("message too long") + errDecode = errors.New("invalid message") + errInvalidMsgCode = errors.New("invalid message code") + errBadRequest = errors.New("bad request") +) + +// Packet represents a p2p message in the `snap` protocol. +type Packet interface { + Name() string // Name returns a string corresponding to the message type. + Kind() byte // Kind returns the message type. +} + +// GetAccountRangePacket represents an account query. +type GetAccountRangePacket struct { + ID uint64 // Request ID to match up responses with + Root common.Hash // Root hash of the account trie to serve + Origin common.Hash // Hash of the first account to retrieve + Limit common.Hash // Hash of the last account to retrieve + Bytes uint64 // Soft limit at which to stop returning data +} + +// AccountRangePacket represents an account query response. +type AccountRangePacket struct { + ID uint64 // ID of the request this is a response for + Accounts []*AccountData // List of consecutive accounts from the trie + Proof [][]byte // List of trie nodes proving the account range +} + +// AccountData represents a single account in a query response. +type AccountData struct { + Hash common.Hash // Hash of the account + Body rlp.RawValue // Account body in slim format +} + +// Unpack retrieves the accounts from the range packet and converts from slim +// wire representation to consensus format. The returned data is RLP encoded +// since it's expected to be serialized to disk without further interpretation. +// +// Note, this method does a round of RLP decoding and reencoding, so only use it +// once and cache the results if need be. Ideally discard the packet afterwards +// to not double the memory use. +func (p *AccountRangePacket) Unpack() ([]common.Hash, [][]byte, error) { + var ( + hashes = make([]common.Hash, len(p.Accounts)) + accounts = make([][]byte, len(p.Accounts)) + ) + for i, acc := range p.Accounts { + val, err := snapshot.FullAccountRLP(acc.Body) + if err != nil { + return nil, nil, fmt.Errorf("invalid account %x: %v", acc.Body, err) + } + hashes[i], accounts[i] = acc.Hash, val + } + return hashes, accounts, nil +} + +// GetStorageRangesPacket represents an storage slot query. +type GetStorageRangesPacket struct { + ID uint64 // Request ID to match up responses with + Root common.Hash // Root hash of the account trie to serve + Accounts []common.Hash // Account hashes of the storage tries to serve + Origin []byte // Hash of the first storage slot to retrieve (large contract mode) + Limit []byte // Hash of the last storage slot to retrieve (large contract mode) + Bytes uint64 // Soft limit at which to stop returning data +} + +// StorageRangesPacket represents a storage slot query response. +type StorageRangesPacket struct { + ID uint64 // ID of the request this is a response for + Slots [][]*StorageData // Lists of consecutive storage slots for the requested accounts + Proof [][]byte // Merkle proofs for the *last* slot range, if it's incomplete +} + +// StorageData represents a single storage slot in a query response. +type StorageData struct { + Hash common.Hash // Hash of the storage slot + Body []byte // Data content of the slot +} + +// Unpack retrieves the storage slots from the range packet and returns them in +// a split flat format that's more consistent with the internal data structures. +func (p *StorageRangesPacket) Unpack() ([][]common.Hash, [][][]byte) { + var ( + hashset = make([][]common.Hash, len(p.Slots)) + slotset = make([][][]byte, len(p.Slots)) + ) + for i, slots := range p.Slots { + hashset[i] = make([]common.Hash, len(slots)) + slotset[i] = make([][]byte, len(slots)) + for j, slot := range slots { + hashset[i][j] = slot.Hash + slotset[i][j] = slot.Body + } + } + return hashset, slotset +} + +// GetByteCodesPacket represents a contract bytecode query. +type GetByteCodesPacket struct { + ID uint64 // Request ID to match up responses with + Hashes []common.Hash // Code hashes to retrieve the code for + Bytes uint64 // Soft limit at which to stop returning data +} + +// ByteCodesPacket represents a contract bytecode query response. +type ByteCodesPacket struct { + ID uint64 // ID of the request this is a response for + Codes [][]byte // Requested contract bytecodes +} + +// GetTrieNodesPacket represents a state trie node query. +type GetTrieNodesPacket struct { + ID uint64 // Request ID to match up responses with + Root common.Hash // Root hash of the account trie to serve + Paths []TrieNodePathSet // Trie node hashes to retrieve the nodes for + Bytes uint64 // Soft limit at which to stop returning data +} + +// TrieNodePathSet is a list of trie node paths to retrieve. A naive way to +// represent trie nodes would be a simple list of `account || storage` path +// segments concatenated, but that would be very wasteful on the network. +// +// Instead, this array special cases the first element as the path in the +// account trie and the remaining elements as paths in the storage trie. To +// address an account node, the slice should have a length of 1 consisting +// of only the account path. There's no need to be able to address both an +// account node and a storage node in the same request as it cannot happen +// that a slot is accessed before the account path is fully expanded. +type TrieNodePathSet [][]byte + +// TrieNodesPacket represents a state trie node query response. +type TrieNodesPacket struct { + ID uint64 // ID of the request this is a response for + Nodes [][]byte // Requested state trie nodes +} + +func (*GetAccountRangePacket) Name() string { return "GetAccountRange" } +func (*GetAccountRangePacket) Kind() byte { return GetAccountRangeMsg } + +func (*AccountRangePacket) Name() string { return "AccountRange" } +func (*AccountRangePacket) Kind() byte { return AccountRangeMsg } + +func (*GetStorageRangesPacket) Name() string { return "GetStorageRanges" } +func (*GetStorageRangesPacket) Kind() byte { return GetStorageRangesMsg } + +func (*StorageRangesPacket) Name() string { return "StorageRanges" } +func (*StorageRangesPacket) Kind() byte { return StorageRangesMsg } + +func (*GetByteCodesPacket) Name() string { return "GetByteCodes" } +func (*GetByteCodesPacket) Kind() byte { return GetByteCodesMsg } + +func (*ByteCodesPacket) Name() string { return "ByteCodes" } +func (*ByteCodesPacket) Kind() byte { return ByteCodesMsg } + +func (*GetTrieNodesPacket) Name() string { return "GetTrieNodes" } +func (*GetTrieNodesPacket) Kind() byte { return GetTrieNodesMsg } + +func (*TrieNodesPacket) Name() string { return "TrieNodes" } +func (*TrieNodesPacket) Kind() byte { return TrieNodesMsg } diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go new file mode 100644 index 0000000000..1cfdef15bd --- /dev/null +++ b/eth/protocols/snap/sync.go @@ -0,0 +1,2615 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math/big" + "math/rand" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/light" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "golang.org/x/crypto/sha3" +) + +var ( + // emptyRoot is the known root hash of an empty trie. + emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + + // emptyCode is the known hash of the empty EVM bytecode. + emptyCode = crypto.Keccak256Hash(nil) +) + +const ( + // maxRequestSize is the maximum number of bytes to request from a remote peer. + maxRequestSize = 512 * 1024 + + // maxStorageSetRequestCountis th maximum number of contracts to request the + // storage of in a single query. If this number is too low, we're not filling + // responses fully and waste round trip times. If it's too high, we're capping + // responses and waste bandwidth. + maxStorageSetRequestCount = maxRequestSize / 1024 + + // maxCodeRequestCount is the maximum number of bytecode blobs to request in a + // single query. If this number is too low, we're not filling responses fully + // and waste round trip times. If it's too high, we're capping responses and + // waste bandwidth. + // + // Depoyed bytecodes are currently capped at 24KB, so the minimum request + // size should be maxRequestSize / 24K. Assuming that most contracts do not + // come close to that, requesting 4x should be a good approximation. + maxCodeRequestCount = maxRequestSize / (24 * 1024) * 4 + + // maxTrieRequestCount is the maximum number of trie node blobs to request in + // a single query. If this number is too low, we're not filling responses fully + // and waste round trip times. If it's too high, we're capping responses and + // waste bandwidth. + maxTrieRequestCount = 512 + + // accountConcurrency is the number of chunks to split the account trie into + // to allow concurrent retrievals. + accountConcurrency = 16 + + // storageConcurrency is the number of chunks to split the a large contract + // storage trie into to allow concurrent retrievals. + storageConcurrency = 16 +) + +var ( + // requestTimeout is the maximum time a peer is allowed to spend on serving + // a single network request. + requestTimeout = 10 * time.Second // TODO(karalabe): Make it dynamic ala fast-sync? +) + +// ErrCancelled is returned from snap syncing if the operation was prematurely +// terminated. +var ErrCancelled = errors.New("sync cancelled") + +// accountRequest tracks a pending account range request to ensure responses are +// to actual requests and to validate any security constraints. +// +// Concurrency note: account requests and responses are handled concurrently from +// the main runloop to allow Merkle proof verifications on the peer's thread and +// to drop on invalid response. The request struct must contain all the data to +// construct the response without accessing runloop internals (i.e. task). That +// is only included to allow the runloop to match a response to the task being +// synced without having yet another set of maps. +type accountRequest struct { + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + + cancel chan struct{} // Channel to track sync cancellation + timeout *time.Timer // Timer to track delivery timeout + stale chan struct{} // Channel to signal the request was dropped + + origin common.Hash // First account requested to allow continuation checks + limit common.Hash // Last account requested to allow non-overlapping chunking + + task *accountTask // Task which this request is filling (only access fields through the runloop!!) +} + +// accountResponse is an already Merkle-verified remote response to an account +// range request. It contains the subtrie for the requested account range and +// the database that's going to be filled with the internal nodes on commit. +type accountResponse struct { + task *accountTask // Task which this request is filling + + hashes []common.Hash // Account hashes in the returned range + accounts []*state.Account // Expanded accounts in the returned range + + nodes ethdb.KeyValueStore // Database containing the reconstructed trie nodes + trie *trie.Trie // Reconstructed trie to reject incomplete account paths + + bounds map[common.Hash]struct{} // Boundary nodes to avoid persisting incomplete accounts + overflow *light.NodeSet // Overflow nodes to avoid persisting across chunk boundaries + + cont bool // Whether the account range has a continuation +} + +// bytecodeRequest tracks a pending bytecode request to ensure responses are to +// actual requests and to validate any security constraints. +// +// Concurrency note: bytecode requests and responses are handled concurrently from +// the main runloop to allow Keccak256 hash verifications on the peer's thread and +// to drop on invalid response. The request struct must contain all the data to +// construct the response without accessing runloop internals (i.e. task). That +// is only included to allow the runloop to match a response to the task being +// synced without having yet another set of maps. +type bytecodeRequest struct { + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + + cancel chan struct{} // Channel to track sync cancellation + timeout *time.Timer // Timer to track delivery timeout + stale chan struct{} // Channel to signal the request was dropped + + hashes []common.Hash // Bytecode hashes to validate responses + task *accountTask // Task which this request is filling (only access fields through the runloop!!) +} + +// bytecodeResponse is an already verified remote response to a bytecode request. +type bytecodeResponse struct { + task *accountTask // Task which this request is filling + + hashes []common.Hash // Hashes of the bytecode to avoid double hashing + codes [][]byte // Actual bytecodes to store into the database (nil = missing) +} + +// storageRequest tracks a pending storage ranges request to ensure responses are +// to actual requests and to validate any security constraints. +// +// Concurrency note: storage requests and responses are handled concurrently from +// the main runloop to allow Merkel proof verifications on the peer's thread and +// to drop on invalid response. The request struct must contain all the data to +// construct the response without accessing runloop internals (i.e. tasks). That +// is only included to allow the runloop to match a response to the task being +// synced without having yet another set of maps. +type storageRequest struct { + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + + cancel chan struct{} // Channel to track sync cancellation + timeout *time.Timer // Timer to track delivery timeout + stale chan struct{} // Channel to signal the request was dropped + + accounts []common.Hash // Account hashes to validate responses + roots []common.Hash // Storage roots to validate responses + + origin common.Hash // First storage slot requested to allow continuation checks + limit common.Hash // Last storage slot requested to allow non-overlapping chunking + + mainTask *accountTask // Task which this response belongs to (only access fields through the runloop!!) + subTask *storageTask // Task which this response is filling (only access fields through the runloop!!) +} + +// storageResponse is an already Merkle-verified remote response to a storage +// range request. It contains the subtries for the requested storage ranges and +// the databases that's going to be filled with the internal nodes on commit. +type storageResponse struct { + mainTask *accountTask // Task which this response belongs to + subTask *storageTask // Task which this response is filling + + accounts []common.Hash // Account hashes requested, may be only partially filled + roots []common.Hash // Storage roots requested, may be only partially filled + + hashes [][]common.Hash // Storage slot hashes in the returned range + slots [][][]byte // Storage slot values in the returned range + nodes []ethdb.KeyValueStore // Database containing the reconstructed trie nodes + tries []*trie.Trie // Reconstructed tries to reject overflown slots + + // Fields relevant for the last account only + bounds map[common.Hash]struct{} // Boundary nodes to avoid persisting (incomplete) + overflow *light.NodeSet // Overflow nodes to avoid persisting across chunk boundaries + cont bool // Whether the last storage range has a continuation +} + +// trienodeHealRequest tracks a pending state trie request to ensure responses +// are to actual requests and to validate any security constraints. +// +// Concurrency note: trie node requests and responses are handled concurrently from +// the main runloop to allow Keccak256 hash verifications on the peer's thread and +// to drop on invalid response. The request struct must contain all the data to +// construct the response without accessing runloop internals (i.e. task). That +// is only included to allow the runloop to match a response to the task being +// synced without having yet another set of maps. +type trienodeHealRequest struct { + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + + cancel chan struct{} // Channel to track sync cancellation + timeout *time.Timer // Timer to track delivery timeout + stale chan struct{} // Channel to signal the request was dropped + + hashes []common.Hash // Trie node hashes to validate responses + paths []trie.SyncPath // Trie node paths requested for rescheduling + + task *healTask // Task which this request is filling (only access fields through the runloop!!) +} + +// trienodeHealResponse is an already verified remote response to a trie node request. +type trienodeHealResponse struct { + task *healTask // Task which this request is filling + + hashes []common.Hash // Hashes of the trie nodes to avoid double hashing + paths []trie.SyncPath // Trie node paths requested for rescheduling missing ones + nodes [][]byte // Actual trie nodes to store into the database (nil = missing) +} + +// bytecodeHealRequest tracks a pending bytecode request to ensure responses are to +// actual requests and to validate any security constraints. +// +// Concurrency note: bytecode requests and responses are handled concurrently from +// the main runloop to allow Keccak256 hash verifications on the peer's thread and +// to drop on invalid response. The request struct must contain all the data to +// construct the response without accessing runloop internals (i.e. task). That +// is only included to allow the runloop to match a response to the task being +// synced without having yet another set of maps. +type bytecodeHealRequest struct { + peer string // Peer to which this request is assigned + id uint64 // Request ID of this request + + cancel chan struct{} // Channel to track sync cancellation + timeout *time.Timer // Timer to track delivery timeout + stale chan struct{} // Channel to signal the request was dropped + + hashes []common.Hash // Bytecode hashes to validate responses + task *healTask // Task which this request is filling (only access fields through the runloop!!) +} + +// bytecodeHealResponse is an already verified remote response to a bytecode request. +type bytecodeHealResponse struct { + task *healTask // Task which this request is filling + + hashes []common.Hash // Hashes of the bytecode to avoid double hashing + codes [][]byte // Actual bytecodes to store into the database (nil = missing) +} + +// accountTask represents the sync task for a chunk of the account snapshot. +type accountTask struct { + // These fields get serialized to leveldb on shutdown + Next common.Hash // Next account to sync in this interval + Last common.Hash // Last account to sync in this interval + SubTasks map[common.Hash][]*storageTask // Storage intervals needing fetching for large contracts + + // These fields are internals used during runtime + req *accountRequest // Pending request to fill this task + res *accountResponse // Validate response filling this task + pend int // Number of pending subtasks for this round + + needCode []bool // Flags whether the filling accounts need code retrieval + needState []bool // Flags whether the filling accounts need storage retrieval + needHeal []bool // Flags whether the filling accounts's state was chunked and need healing + + codeTasks map[common.Hash]struct{} // Code hashes that need retrieval + stateTasks map[common.Hash]common.Hash // Account hashes->roots that need full state retrieval + + done bool // Flag whether the task can be removed +} + +// storageTask represents the sync task for a chunk of the storage snapshot. +type storageTask struct { + Next common.Hash // Next account to sync in this interval + Last common.Hash // Last account to sync in this interval + + // These fields are internals used during runtime + root common.Hash // Storage root hash for this instance + req *storageRequest // Pending request to fill this task + done bool // Flag whether the task can be removed +} + +// healTask represents the sync task for healing the snap-synced chunk boundaries. +type healTask struct { + scheduler *trie.Sync // State trie sync scheduler defining the tasks + + trieTasks map[common.Hash]trie.SyncPath // Set of trie node tasks currently queued for retrieval + codeTasks map[common.Hash]struct{} // Set of byte code tasks currently queued for retrieval +} + +// syncProgress is a database entry to allow suspending and resuming a snapshot state +// sync. Opposed to full and fast sync, there is no way to restart a suspended +// snap sync without prior knowledge of the suspension point. +type syncProgress struct { + Tasks []*accountTask // The suspended account tasks (contract tasks within) + + // Status report during syncing phase + AccountSynced uint64 // Number of accounts downloaded + AccountBytes common.StorageSize // Number of account trie bytes persisted to disk + BytecodeSynced uint64 // Number of bytecodes downloaded + BytecodeBytes common.StorageSize // Number of bytecode bytes downloaded + StorageSynced uint64 // Number of storage slots downloaded + StorageBytes common.StorageSize // Number of storage trie bytes persisted to disk + + // Status report during healing phase + TrienodeHealSynced uint64 // Number of state trie nodes downloaded + TrienodeHealBytes common.StorageSize // Number of state trie bytes persisted to disk + TrienodeHealDups uint64 // Number of state trie nodes already processed + TrienodeHealNops uint64 // Number of state trie nodes not requested + BytecodeHealSynced uint64 // Number of bytecodes downloaded + BytecodeHealBytes common.StorageSize // Number of bytecodes persisted to disk + BytecodeHealDups uint64 // Number of bytecodes already processed + BytecodeHealNops uint64 // Number of bytecodes not requested +} + +// SyncPeer abstracts out the methods required for a peer to be synced against +// with the goal of allowing the construction of mock peers without the full +// blown networking. +type SyncPeer interface { + // ID retrieves the peer's unique identifier. + ID() string + + // RequestAccountRange fetches a batch of accounts rooted in a specific account + // trie, starting with the origin. + RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error + + // RequestStorageRange fetches a batch of storage slots belonging to one or + // more accounts. If slots from only one accout is requested, an origin marker + // may also be used to retrieve from there. + RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error + + // RequestByteCodes fetches a batch of bytecodes by hash. + RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error + + // RequestTrieNodes fetches a batch of account or storage trie nodes rooted in + // a specificstate trie. + RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error + + // Log retrieves the peer's own contextual logger. + Log() log.Logger +} + +// Syncer is an Ethereum account and storage trie syncer based on snapshots and +// the snap protocol. It's purpose is to download all the accounts and storage +// slots from remote peers and reassemble chunks of the state trie, on top of +// which a state sync can be run to fix any gaps / overlaps. +// +// Every network request has a variety of failure events: +// - The peer disconnects after task assignment, failing to send the request +// - The peer disconnects after sending the request, before delivering on it +// - The peer remains connected, but does not deliver a response in time +// - The peer delivers a stale response after a previous timeout +// - The peer delivers a refusal to serve the requested state +type Syncer struct { + db ethdb.KeyValueStore // Database to store the trie nodes into (and dedup) + bloom *trie.SyncBloom // Bloom filter to deduplicate nodes for state fixup + + root common.Hash // Current state trie root being synced + tasks []*accountTask // Current account task set being synced + snapped bool // Flag to signal that snap phase is done + healer *healTask // Current state healing task being executed + update chan struct{} // Notification channel for possible sync progression + + peers map[string]SyncPeer // Currently active peers to download from + peerJoin *event.Feed // Event feed to react to peers joining + peerDrop *event.Feed // Event feed to react to peers dropping + + // Request tracking during syncing phase + statelessPeers map[string]struct{} // Peers that failed to deliver state data + accountIdlers map[string]struct{} // Peers that aren't serving account requests + bytecodeIdlers map[string]struct{} // Peers that aren't serving bytecode requests + storageIdlers map[string]struct{} // Peers that aren't serving storage requests + + accountReqs map[uint64]*accountRequest // Account requests currently running + bytecodeReqs map[uint64]*bytecodeRequest // Bytecode requests currently running + storageReqs map[uint64]*storageRequest // Storage requests currently running + + accountReqFails chan *accountRequest // Failed account range requests to revert + bytecodeReqFails chan *bytecodeRequest // Failed bytecode requests to revert + storageReqFails chan *storageRequest // Failed storage requests to revert + + accountResps chan *accountResponse // Account sub-tries to integrate into the database + bytecodeResps chan *bytecodeResponse // Bytecodes to integrate into the database + storageResps chan *storageResponse // Storage sub-tries to integrate into the database + + accountSynced uint64 // Number of accounts downloaded + accountBytes common.StorageSize // Number of account trie bytes persisted to disk + bytecodeSynced uint64 // Number of bytecodes downloaded + bytecodeBytes common.StorageSize // Number of bytecode bytes downloaded + storageSynced uint64 // Number of storage slots downloaded + storageBytes common.StorageSize // Number of storage trie bytes persisted to disk + + // Request tracking during healing phase + trienodeHealIdlers map[string]struct{} // Peers that aren't serving trie node requests + bytecodeHealIdlers map[string]struct{} // Peers that aren't serving bytecode requests + + trienodeHealReqs map[uint64]*trienodeHealRequest // Trie node requests currently running + bytecodeHealReqs map[uint64]*bytecodeHealRequest // Bytecode requests currently running + + trienodeHealReqFails chan *trienodeHealRequest // Failed trienode requests to revert + bytecodeHealReqFails chan *bytecodeHealRequest // Failed bytecode requests to revert + + trienodeHealResps chan *trienodeHealResponse // Trie nodes to integrate into the database + bytecodeHealResps chan *bytecodeHealResponse // Bytecodes to integrate into the database + + trienodeHealSynced uint64 // Number of state trie nodes downloaded + trienodeHealBytes common.StorageSize // Number of state trie bytes persisted to disk + trienodeHealDups uint64 // Number of state trie nodes already processed + trienodeHealNops uint64 // Number of state trie nodes not requested + bytecodeHealSynced uint64 // Number of bytecodes downloaded + bytecodeHealBytes common.StorageSize // Number of bytecodes persisted to disk + bytecodeHealDups uint64 // Number of bytecodes already processed + bytecodeHealNops uint64 // Number of bytecodes not requested + + startTime time.Time // Time instance when snapshot sync started + startAcc common.Hash // Account hash where sync started from + logTime time.Time // Time instance when status was last reported + + pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown + lock sync.RWMutex // Protects fields that can change outside of sync (peers, reqs, root) +} + +// NewSyncer creates a new snapshot syncer to download the Ethereum state over the +// snap protocol. +func NewSyncer(db ethdb.KeyValueStore, bloom *trie.SyncBloom) *Syncer { + return &Syncer{ + db: db, + bloom: bloom, + + peers: make(map[string]SyncPeer), + peerJoin: new(event.Feed), + peerDrop: new(event.Feed), + update: make(chan struct{}, 1), + + accountIdlers: make(map[string]struct{}), + storageIdlers: make(map[string]struct{}), + bytecodeIdlers: make(map[string]struct{}), + + accountReqs: make(map[uint64]*accountRequest), + storageReqs: make(map[uint64]*storageRequest), + bytecodeReqs: make(map[uint64]*bytecodeRequest), + accountReqFails: make(chan *accountRequest), + storageReqFails: make(chan *storageRequest), + bytecodeReqFails: make(chan *bytecodeRequest), + accountResps: make(chan *accountResponse), + storageResps: make(chan *storageResponse), + bytecodeResps: make(chan *bytecodeResponse), + + trienodeHealIdlers: make(map[string]struct{}), + bytecodeHealIdlers: make(map[string]struct{}), + + trienodeHealReqs: make(map[uint64]*trienodeHealRequest), + bytecodeHealReqs: make(map[uint64]*bytecodeHealRequest), + trienodeHealReqFails: make(chan *trienodeHealRequest), + bytecodeHealReqFails: make(chan *bytecodeHealRequest), + trienodeHealResps: make(chan *trienodeHealResponse), + bytecodeHealResps: make(chan *bytecodeHealResponse), + } +} + +// Register injects a new data source into the syncer's peerset. +func (s *Syncer) Register(peer SyncPeer) error { + // Make sure the peer is not registered yet + id := peer.ID() + + s.lock.Lock() + if _, ok := s.peers[id]; ok { + log.Error("Snap peer already registered", "id", id) + + s.lock.Unlock() + return errors.New("already registered") + } + s.peers[id] = peer + + // Mark the peer as idle, even if no sync is running + s.accountIdlers[id] = struct{}{} + s.storageIdlers[id] = struct{}{} + s.bytecodeIdlers[id] = struct{}{} + s.trienodeHealIdlers[id] = struct{}{} + s.bytecodeHealIdlers[id] = struct{}{} + s.lock.Unlock() + + // Notify any active syncs that a new peer can be assigned data + s.peerJoin.Send(id) + return nil +} + +// Unregister injects a new data source into the syncer's peerset. +func (s *Syncer) Unregister(id string) error { + // Remove all traces of the peer from the registry + s.lock.Lock() + if _, ok := s.peers[id]; !ok { + log.Error("Snap peer not registered", "id", id) + + s.lock.Unlock() + return errors.New("not registered") + } + delete(s.peers, id) + + // Remove status markers, even if no sync is running + delete(s.statelessPeers, id) + + delete(s.accountIdlers, id) + delete(s.storageIdlers, id) + delete(s.bytecodeIdlers, id) + delete(s.trienodeHealIdlers, id) + delete(s.bytecodeHealIdlers, id) + s.lock.Unlock() + + // Notify any active syncs that pending requests need to be reverted + s.peerDrop.Send(id) + return nil +} + +// Sync starts (or resumes a previous) sync cycle to iterate over an state trie +// with the given root and reconstruct the nodes based on the snapshot leaves. +// Previously downloaded segments will not be redownloaded of fixed, rather any +// errors will be healed after the leaves are fully accumulated. +func (s *Syncer) Sync(root common.Hash, cancel chan struct{}) error { + // Move the trie root from any previous value, revert stateless markers for + // any peers and initialize the syncer if it was not yet run + s.lock.Lock() + s.root = root + s.healer = &healTask{ + scheduler: state.NewStateSync(root, s.db, s.bloom), + trieTasks: make(map[common.Hash]trie.SyncPath), + codeTasks: make(map[common.Hash]struct{}), + } + s.statelessPeers = make(map[string]struct{}) + s.lock.Unlock() + + if s.startTime == (time.Time{}) { + s.startTime = time.Now() + } + // Retrieve the previous sync status from LevelDB and abort if already synced + s.loadSyncStatus() + if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 { + log.Debug("Snapshot sync already completed") + return nil + } + defer func() { // Persist any progress, independent of failure + for _, task := range s.tasks { + s.forwardAccountTask(task) + } + s.cleanAccountTasks() + s.saveSyncStatus() + }() + + log.Debug("Starting snapshot sync cycle", "root", root) + defer s.report(true) + + // Whether sync completed or not, disregard any future packets + defer func() { + log.Debug("Terminating snapshot sync cycle", "root", root) + s.lock.Lock() + s.accountReqs = make(map[uint64]*accountRequest) + s.storageReqs = make(map[uint64]*storageRequest) + s.bytecodeReqs = make(map[uint64]*bytecodeRequest) + s.trienodeHealReqs = make(map[uint64]*trienodeHealRequest) + s.bytecodeHealReqs = make(map[uint64]*bytecodeHealRequest) + s.lock.Unlock() + }() + // Keep scheduling sync tasks + peerJoin := make(chan string, 16) + peerJoinSub := s.peerJoin.Subscribe(peerJoin) + defer peerJoinSub.Unsubscribe() + + peerDrop := make(chan string, 16) + peerDropSub := s.peerDrop.Subscribe(peerDrop) + defer peerDropSub.Unsubscribe() + + for { + // Remove all completed tasks and terminate sync if everything's done + s.cleanStorageTasks() + s.cleanAccountTasks() + if len(s.tasks) == 0 && s.healer.scheduler.Pending() == 0 { + return nil + } + // Assign all the data retrieval tasks to any free peers + s.assignAccountTasks(cancel) + s.assignBytecodeTasks(cancel) + s.assignStorageTasks(cancel) + + if len(s.tasks) == 0 { + // Sync phase done, run heal phase + s.assignTrienodeHealTasks(cancel) + s.assignBytecodeHealTasks(cancel) + } + // Wait for something to happen + select { + case <-s.update: + // Something happened (new peer, delivery, timeout), recheck tasks + case <-peerJoin: + // A new peer joined, try to schedule it new tasks + case id := <-peerDrop: + s.revertRequests(id) + case <-cancel: + return ErrCancelled + + case req := <-s.accountReqFails: + s.revertAccountRequest(req) + case req := <-s.bytecodeReqFails: + s.revertBytecodeRequest(req) + case req := <-s.storageReqFails: + s.revertStorageRequest(req) + case req := <-s.trienodeHealReqFails: + s.revertTrienodeHealRequest(req) + case req := <-s.bytecodeHealReqFails: + s.revertBytecodeHealRequest(req) + + case res := <-s.accountResps: + s.processAccountResponse(res) + case res := <-s.bytecodeResps: + s.processBytecodeResponse(res) + case res := <-s.storageResps: + s.processStorageResponse(res) + case res := <-s.trienodeHealResps: + s.processTrienodeHealResponse(res) + case res := <-s.bytecodeHealResps: + s.processBytecodeHealResponse(res) + } + // Report stats if something meaningful happened + s.report(false) + } +} + +// loadSyncStatus retrieves a previously aborted sync status from the database, +// or generates a fresh one if none is available. +func (s *Syncer) loadSyncStatus() { + var progress syncProgress + + if status := rawdb.ReadSnapshotSyncStatus(s.db); status != nil { + if err := json.Unmarshal(status, &progress); err != nil { + log.Error("Failed to decode snap sync status", "err", err) + } else { + for _, task := range progress.Tasks { + log.Debug("Scheduled account sync task", "from", task.Next, "last", task.Last) + } + s.tasks = progress.Tasks + s.snapped = len(s.tasks) == 0 + + s.accountSynced = progress.AccountSynced + s.accountBytes = progress.AccountBytes + s.bytecodeSynced = progress.BytecodeSynced + s.bytecodeBytes = progress.BytecodeBytes + s.storageSynced = progress.StorageSynced + s.storageBytes = progress.StorageBytes + + s.trienodeHealSynced = progress.TrienodeHealSynced + s.trienodeHealBytes = progress.TrienodeHealBytes + s.bytecodeHealSynced = progress.BytecodeHealSynced + s.bytecodeHealBytes = progress.BytecodeHealBytes + return + } + } + // Either we've failed to decode the previus state, or there was none. + // Start a fresh sync by chunking up the account range and scheduling + // them for retrieval. + s.tasks = nil + s.accountSynced, s.accountBytes = 0, 0 + s.bytecodeSynced, s.bytecodeBytes = 0, 0 + s.storageSynced, s.storageBytes = 0, 0 + s.trienodeHealSynced, s.trienodeHealBytes = 0, 0 + s.bytecodeHealSynced, s.bytecodeHealBytes = 0, 0 + + var next common.Hash + step := new(big.Int).Sub( + new(big.Int).Div( + new(big.Int).Exp(common.Big2, common.Big256, nil), + big.NewInt(accountConcurrency), + ), common.Big1, + ) + for i := 0; i < accountConcurrency; i++ { + last := common.BigToHash(new(big.Int).Add(next.Big(), step)) + if i == accountConcurrency-1 { + // Make sure we don't overflow if the step is not a proper divisor + last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + s.tasks = append(s.tasks, &accountTask{ + Next: next, + Last: last, + SubTasks: make(map[common.Hash][]*storageTask), + }) + log.Debug("Created account sync task", "from", next, "last", last) + next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) + } +} + +// saveSyncStatus marshals the remaining sync tasks into leveldb. +func (s *Syncer) saveSyncStatus() { + progress := &syncProgress{ + Tasks: s.tasks, + AccountSynced: s.accountSynced, + AccountBytes: s.accountBytes, + BytecodeSynced: s.bytecodeSynced, + BytecodeBytes: s.bytecodeBytes, + StorageSynced: s.storageSynced, + StorageBytes: s.storageBytes, + TrienodeHealSynced: s.trienodeHealSynced, + TrienodeHealBytes: s.trienodeHealBytes, + BytecodeHealSynced: s.bytecodeHealSynced, + BytecodeHealBytes: s.bytecodeHealBytes, + } + status, err := json.Marshal(progress) + if err != nil { + panic(err) // This can only fail during implementation + } + rawdb.WriteSnapshotSyncStatus(s.db, status) +} + +// cleanAccountTasks removes account range retrieval tasks that have already been +// completed. +func (s *Syncer) cleanAccountTasks() { + for i := 0; i < len(s.tasks); i++ { + if s.tasks[i].done { + s.tasks = append(s.tasks[:i], s.tasks[i+1:]...) + i-- + } + } + if len(s.tasks) == 0 { + s.lock.Lock() + s.snapped = true + s.lock.Unlock() + } +} + +// cleanStorageTasks iterates over all the account tasks and storage sub-tasks +// within, cleaning any that have been completed. +func (s *Syncer) cleanStorageTasks() { + for _, task := range s.tasks { + for account, subtasks := range task.SubTasks { + // Remove storage range retrieval tasks that completed + for j := 0; j < len(subtasks); j++ { + if subtasks[j].done { + subtasks = append(subtasks[:j], subtasks[j+1:]...) + j-- + } + } + if len(subtasks) > 0 { + task.SubTasks[account] = subtasks + continue + } + // If all storage chunks are done, mark the account as done too + for j, hash := range task.res.hashes { + if hash == account { + task.needState[j] = false + } + } + delete(task.SubTasks, account) + task.pend-- + + // If this was the last pending task, forward the account task + if task.pend == 0 { + s.forwardAccountTask(task) + } + } + } +} + +// assignAccountTasks attempts to match idle peers to pending account range +// retrievals. +func (s *Syncer) assignAccountTasks(cancel chan struct{}) { + s.lock.Lock() + defer s.lock.Unlock() + + // If there are no idle peers, short circuit assignment + if len(s.accountIdlers) == 0 { + return + } + // Iterate over all the tasks and try to find a pending one + for _, task := range s.tasks { + // Skip any tasks already filling + if task.req != nil || task.res != nil { + continue + } + // Task pending retrieval, try to find an idle peer. If no such peer + // exists, we probably assigned tasks for all (or they are stateless). + // Abort the entire assignment mechanism. + var idle string + for id := range s.accountIdlers { + // If the peer rejected a query in this sync cycle, don't bother asking + // again for anything, it's either out of sync or already pruned + if _, ok := s.statelessPeers[id]; ok { + continue + } + idle = id + break + } + if idle == "" { + return + } + // Matched a pending task to an idle peer, allocate a unique request id + var reqid uint64 + for { + reqid = uint64(rand.Int63()) + if reqid == 0 { + continue + } + if _, ok := s.accountReqs[reqid]; ok { + continue + } + break + } + // Generate the network query and send it to the peer + req := &accountRequest{ + peer: idle, + id: reqid, + cancel: cancel, + stale: make(chan struct{}), + origin: task.Next, + limit: task.Last, + task: task, + } + req.timeout = time.AfterFunc(requestTimeout, func() { + log.Debug("Account range request timed out") + s.scheduleRevertAccountRequest(req) + }) + s.accountReqs[reqid] = req + delete(s.accountIdlers, idle) + + s.pend.Add(1) + go func(peer SyncPeer, root common.Hash) { + defer s.pend.Done() + + // Attempt to send the remote request and revert if it fails + if err := peer.RequestAccountRange(reqid, root, req.origin, req.limit, maxRequestSize); err != nil { + peer.Log().Debug("Failed to request account range", "err", err) + s.scheduleRevertAccountRequest(req) + } + }(s.peers[idle], s.root) // We're in the lock, peers[id] surely exists + + // Inject the request into the task to block further assignments + task.req = req + } +} + +// assignBytecodeTasks attempts to match idle peers to pending code retrievals. +func (s *Syncer) assignBytecodeTasks(cancel chan struct{}) { + s.lock.Lock() + defer s.lock.Unlock() + + // If there are no idle peers, short circuit assignment + if len(s.bytecodeIdlers) == 0 { + return + } + // Iterate over all the tasks and try to find a pending one + for _, task := range s.tasks { + // Skip any tasks not in the bytecode retrieval phase + if task.res == nil { + continue + } + // Skip tasks that are already retrieving (or done with) all codes + if len(task.codeTasks) == 0 { + continue + } + // Task pending retrieval, try to find an idle peer. If no such peer + // exists, we probably assigned tasks for all (or they are stateless). + // Abort the entire assignment mechanism. + var idle string + for id := range s.bytecodeIdlers { + // If the peer rejected a query in this sync cycle, don't bother asking + // again for anything, it's either out of sync or already pruned + if _, ok := s.statelessPeers[id]; ok { + continue + } + idle = id + break + } + if idle == "" { + return + } + // Matched a pending task to an idle peer, allocate a unique request id + var reqid uint64 + for { + reqid = uint64(rand.Int63()) + if reqid == 0 { + continue + } + if _, ok := s.bytecodeReqs[reqid]; ok { + continue + } + break + } + // Generate the network query and send it to the peer + hashes := make([]common.Hash, 0, maxCodeRequestCount) + for hash := range task.codeTasks { + delete(task.codeTasks, hash) + hashes = append(hashes, hash) + if len(hashes) >= maxCodeRequestCount { + break + } + } + req := &bytecodeRequest{ + peer: idle, + id: reqid, + cancel: cancel, + stale: make(chan struct{}), + hashes: hashes, + task: task, + } + req.timeout = time.AfterFunc(requestTimeout, func() { + log.Debug("Bytecode request timed out") + s.scheduleRevertBytecodeRequest(req) + }) + s.bytecodeReqs[reqid] = req + delete(s.bytecodeIdlers, idle) + + s.pend.Add(1) + go func(peer SyncPeer) { + defer s.pend.Done() + + // Attempt to send the remote request and revert if it fails + if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil { + log.Debug("Failed to request bytecodes", "err", err) + s.scheduleRevertBytecodeRequest(req) + } + }(s.peers[idle]) // We're in the lock, peers[id] surely exists + } +} + +// assignStorageTasks attempts to match idle peers to pending storage range +// retrievals. +func (s *Syncer) assignStorageTasks(cancel chan struct{}) { + s.lock.Lock() + defer s.lock.Unlock() + + // If there are no idle peers, short circuit assignment + if len(s.storageIdlers) == 0 { + return + } + // Iterate over all the tasks and try to find a pending one + for _, task := range s.tasks { + // Skip any tasks not in the storage retrieval phase + if task.res == nil { + continue + } + // Skip tasks that are already retrieving (or done with) all small states + if len(task.SubTasks) == 0 && len(task.stateTasks) == 0 { + continue + } + // Task pending retrieval, try to find an idle peer. If no such peer + // exists, we probably assigned tasks for all (or they are stateless). + // Abort the entire assignment mechanism. + var idle string + for id := range s.storageIdlers { + // If the peer rejected a query in this sync cycle, don't bother asking + // again for anything, it's either out of sync or already pruned + if _, ok := s.statelessPeers[id]; ok { + continue + } + idle = id + break + } + if idle == "" { + return + } + // Matched a pending task to an idle peer, allocate a unique request id + var reqid uint64 + for { + reqid = uint64(rand.Int63()) + if reqid == 0 { + continue + } + if _, ok := s.storageReqs[reqid]; ok { + continue + } + break + } + // Generate the network query and send it to the peer. If there are + // large contract tasks pending, complete those before diving into + // even more new contracts. + var ( + accounts = make([]common.Hash, 0, maxStorageSetRequestCount) + roots = make([]common.Hash, 0, maxStorageSetRequestCount) + subtask *storageTask + ) + for account, subtasks := range task.SubTasks { + for _, st := range subtasks { + // Skip any subtasks already filling + if st.req != nil { + continue + } + // Found an incomplete storage chunk, schedule it + accounts = append(accounts, account) + roots = append(roots, st.root) + subtask = st + break // Large contract chunks are downloaded individually + } + if subtask != nil { + break // Large contract chunks are downloaded individually + } + } + if subtask == nil { + // No large contract required retrieval, but small ones available + for acccount, root := range task.stateTasks { + delete(task.stateTasks, acccount) + + accounts = append(accounts, acccount) + roots = append(roots, root) + + if len(accounts) >= maxStorageSetRequestCount { + break + } + } + } + // If nothing was found, it means this task is actually already fully + // retrieving, but large contracts are hard to detect. Skip to the next. + if len(accounts) == 0 { + continue + } + req := &storageRequest{ + peer: idle, + id: reqid, + cancel: cancel, + stale: make(chan struct{}), + accounts: accounts, + roots: roots, + mainTask: task, + subTask: subtask, + } + if subtask != nil { + req.origin = subtask.Next + req.limit = subtask.Last + } + req.timeout = time.AfterFunc(requestTimeout, func() { + log.Debug("Storage request timed out") + s.scheduleRevertStorageRequest(req) + }) + s.storageReqs[reqid] = req + delete(s.storageIdlers, idle) + + s.pend.Add(1) + go func(peer SyncPeer, root common.Hash) { + defer s.pend.Done() + + // Attempt to send the remote request and revert if it fails + var origin, limit []byte + if subtask != nil { + origin, limit = req.origin[:], req.limit[:] + } + if err := peer.RequestStorageRanges(reqid, root, accounts, origin, limit, maxRequestSize); err != nil { + log.Debug("Failed to request storage", "err", err) + s.scheduleRevertStorageRequest(req) + } + }(s.peers[idle], s.root) // We're in the lock, peers[id] surely exists + + // Inject the request into the subtask to block further assignments + if subtask != nil { + subtask.req = req + } + } +} + +// assignTrienodeHealTasks attempts to match idle peers to trie node requests to +// heal any trie errors caused by the snap sync's chunked retrieval model. +func (s *Syncer) assignTrienodeHealTasks(cancel chan struct{}) { + s.lock.Lock() + defer s.lock.Unlock() + + // If there are no idle peers, short circuit assignment + if len(s.trienodeHealIdlers) == 0 { + return + } + // Iterate over pending tasks and try to find a peer to retrieve with + for len(s.healer.trieTasks) > 0 || s.healer.scheduler.Pending() > 0 { + // If there are not enough trie tasks queued to fully assign, fill the + // queue from the state sync scheduler. The trie synced schedules these + // together with bytecodes, so we need to queue them combined. + var ( + have = len(s.healer.trieTasks) + len(s.healer.codeTasks) + want = maxTrieRequestCount + maxCodeRequestCount + ) + if have < want { + nodes, paths, codes := s.healer.scheduler.Missing(want - have) + for i, hash := range nodes { + s.healer.trieTasks[hash] = paths[i] + } + for _, hash := range codes { + s.healer.codeTasks[hash] = struct{}{} + } + } + // If all the heal tasks are bytecodes or already downloading, bail + if len(s.healer.trieTasks) == 0 { + return + } + // Task pending retrieval, try to find an idle peer. If no such peer + // exists, we probably assigned tasks for all (or they are stateless). + // Abort the entire assignment mechanism. + var idle string + for id := range s.trienodeHealIdlers { + // If the peer rejected a query in this sync cycle, don't bother asking + // again for anything, it's either out of sync or already pruned + if _, ok := s.statelessPeers[id]; ok { + continue + } + idle = id + break + } + if idle == "" { + return + } + // Matched a pending task to an idle peer, allocate a unique request id + var reqid uint64 + for { + reqid = uint64(rand.Int63()) + if reqid == 0 { + continue + } + if _, ok := s.trienodeHealReqs[reqid]; ok { + continue + } + break + } + // Generate the network query and send it to the peer + var ( + hashes = make([]common.Hash, 0, maxTrieRequestCount) + paths = make([]trie.SyncPath, 0, maxTrieRequestCount) + pathsets = make([]TrieNodePathSet, 0, maxTrieRequestCount) + ) + for hash, pathset := range s.healer.trieTasks { + delete(s.healer.trieTasks, hash) + + hashes = append(hashes, hash) + paths = append(paths, pathset) + pathsets = append(pathsets, [][]byte(pathset)) // TODO(karalabe): group requests by account hash + + if len(hashes) >= maxTrieRequestCount { + break + } + } + req := &trienodeHealRequest{ + peer: idle, + id: reqid, + cancel: cancel, + stale: make(chan struct{}), + hashes: hashes, + paths: paths, + task: s.healer, + } + req.timeout = time.AfterFunc(requestTimeout, func() { + log.Debug("Trienode heal request timed out") + s.scheduleRevertTrienodeHealRequest(req) + }) + s.trienodeHealReqs[reqid] = req + delete(s.trienodeHealIdlers, idle) + + s.pend.Add(1) + go func(peer SyncPeer, root common.Hash) { + defer s.pend.Done() + + // Attempt to send the remote request and revert if it fails + if err := peer.RequestTrieNodes(reqid, root, pathsets, maxRequestSize); err != nil { + log.Debug("Failed to request trienode healers", "err", err) + s.scheduleRevertTrienodeHealRequest(req) + } + }(s.peers[idle], s.root) // We're in the lock, peers[id] surely exists + } +} + +// assignBytecodeHealTasks attempts to match idle peers to bytecode requests to +// heal any trie errors caused by the snap sync's chunked retrieval model. +func (s *Syncer) assignBytecodeHealTasks(cancel chan struct{}) { + s.lock.Lock() + defer s.lock.Unlock() + + // If there are no idle peers, short circuit assignment + if len(s.bytecodeHealIdlers) == 0 { + return + } + // Iterate over pending tasks and try to find a peer to retrieve with + for len(s.healer.codeTasks) > 0 || s.healer.scheduler.Pending() > 0 { + // If there are not enough trie tasks queued to fully assign, fill the + // queue from the state sync scheduler. The trie synced schedules these + // together with trie nodes, so we need to queue them combined. + var ( + have = len(s.healer.trieTasks) + len(s.healer.codeTasks) + want = maxTrieRequestCount + maxCodeRequestCount + ) + if have < want { + nodes, paths, codes := s.healer.scheduler.Missing(want - have) + for i, hash := range nodes { + s.healer.trieTasks[hash] = paths[i] + } + for _, hash := range codes { + s.healer.codeTasks[hash] = struct{}{} + } + } + // If all the heal tasks are trienodes or already downloading, bail + if len(s.healer.codeTasks) == 0 { + return + } + // Task pending retrieval, try to find an idle peer. If no such peer + // exists, we probably assigned tasks for all (or they are stateless). + // Abort the entire assignment mechanism. + var idle string + for id := range s.bytecodeHealIdlers { + // If the peer rejected a query in this sync cycle, don't bother asking + // again for anything, it's either out of sync or already pruned + if _, ok := s.statelessPeers[id]; ok { + continue + } + idle = id + break + } + if idle == "" { + return + } + // Matched a pending task to an idle peer, allocate a unique request id + var reqid uint64 + for { + reqid = uint64(rand.Int63()) + if reqid == 0 { + continue + } + if _, ok := s.bytecodeHealReqs[reqid]; ok { + continue + } + break + } + // Generate the network query and send it to the peer + hashes := make([]common.Hash, 0, maxCodeRequestCount) + for hash := range s.healer.codeTasks { + delete(s.healer.codeTasks, hash) + + hashes = append(hashes, hash) + if len(hashes) >= maxCodeRequestCount { + break + } + } + req := &bytecodeHealRequest{ + peer: idle, + id: reqid, + cancel: cancel, + stale: make(chan struct{}), + hashes: hashes, + task: s.healer, + } + req.timeout = time.AfterFunc(requestTimeout, func() { + log.Debug("Bytecode heal request timed out") + s.scheduleRevertBytecodeHealRequest(req) + }) + s.bytecodeHealReqs[reqid] = req + delete(s.bytecodeHealIdlers, idle) + + s.pend.Add(1) + go func(peer SyncPeer) { + defer s.pend.Done() + + // Attempt to send the remote request and revert if it fails + if err := peer.RequestByteCodes(reqid, hashes, maxRequestSize); err != nil { + log.Debug("Failed to request bytecode healers", "err", err) + s.scheduleRevertBytecodeHealRequest(req) + } + }(s.peers[idle]) // We're in the lock, peers[id] surely exists + } +} + +// revertRequests locates all the currently pending reuqests from a particular +// peer and reverts them, rescheduling for others to fulfill. +func (s *Syncer) revertRequests(peer string) { + // Gather the requests first, revertals need the lock too + s.lock.Lock() + var accountReqs []*accountRequest + for _, req := range s.accountReqs { + if req.peer == peer { + accountReqs = append(accountReqs, req) + } + } + var bytecodeReqs []*bytecodeRequest + for _, req := range s.bytecodeReqs { + if req.peer == peer { + bytecodeReqs = append(bytecodeReqs, req) + } + } + var storageReqs []*storageRequest + for _, req := range s.storageReqs { + if req.peer == peer { + storageReqs = append(storageReqs, req) + } + } + var trienodeHealReqs []*trienodeHealRequest + for _, req := range s.trienodeHealReqs { + if req.peer == peer { + trienodeHealReqs = append(trienodeHealReqs, req) + } + } + var bytecodeHealReqs []*bytecodeHealRequest + for _, req := range s.bytecodeHealReqs { + if req.peer == peer { + bytecodeHealReqs = append(bytecodeHealReqs, req) + } + } + s.lock.Unlock() + + // Revert all the requests matching the peer + for _, req := range accountReqs { + s.revertAccountRequest(req) + } + for _, req := range bytecodeReqs { + s.revertBytecodeRequest(req) + } + for _, req := range storageReqs { + s.revertStorageRequest(req) + } + for _, req := range trienodeHealReqs { + s.revertTrienodeHealRequest(req) + } + for _, req := range bytecodeHealReqs { + s.revertBytecodeHealRequest(req) + } +} + +// scheduleRevertAccountRequest asks the event loop to clean up an account range +// request and return all failed retrieval tasks to the scheduler for reassignment. +func (s *Syncer) scheduleRevertAccountRequest(req *accountRequest) { + select { + case s.accountReqFails <- req: + // Sync event loop notified + case <-req.cancel: + // Sync cycle got cancelled + case <-req.stale: + // Request already reverted + } +} + +// revertAccountRequest cleans up an account range request and returns all failed +// retrieval tasks to the scheduler for reassignment. +// +// Note, this needs to run on the event runloop thread to reschedule to idle peers. +// On peer threads, use scheduleRevertAccountRequest. +func (s *Syncer) revertAccountRequest(req *accountRequest) { + log.Debug("Reverting account request", "peer", req.peer, "reqid", req.id) + select { + case <-req.stale: + log.Trace("Account request already reverted", "peer", req.peer, "reqid", req.id) + return + default: + } + close(req.stale) + + // Remove the request from the tracked set + s.lock.Lock() + delete(s.accountReqs, req.id) + s.lock.Unlock() + + // If there's a timeout timer still running, abort it and mark the account + // task as not-pending, ready for resheduling + req.timeout.Stop() + if req.task.req == req { + req.task.req = nil + } +} + +// scheduleRevertBytecodeRequest asks the event loop to clean up a bytecode request +// and return all failed retrieval tasks to the scheduler for reassignment. +func (s *Syncer) scheduleRevertBytecodeRequest(req *bytecodeRequest) { + select { + case s.bytecodeReqFails <- req: + // Sync event loop notified + case <-req.cancel: + // Sync cycle got cancelled + case <-req.stale: + // Request already reverted + } +} + +// revertBytecodeRequest cleans up a bytecode request and returns all failed +// retrieval tasks to the scheduler for reassignment. +// +// Note, this needs to run on the event runloop thread to reschedule to idle peers. +// On peer threads, use scheduleRevertBytecodeRequest. +func (s *Syncer) revertBytecodeRequest(req *bytecodeRequest) { + log.Debug("Reverting bytecode request", "peer", req.peer) + select { + case <-req.stale: + log.Trace("Bytecode request already reverted", "peer", req.peer, "reqid", req.id) + return + default: + } + close(req.stale) + + // Remove the request from the tracked set + s.lock.Lock() + delete(s.bytecodeReqs, req.id) + s.lock.Unlock() + + // If there's a timeout timer still running, abort it and mark the code + // retrievals as not-pending, ready for resheduling + req.timeout.Stop() + for _, hash := range req.hashes { + req.task.codeTasks[hash] = struct{}{} + } +} + +// scheduleRevertStorageRequest asks the event loop to clean up a storage range +// request and return all failed retrieval tasks to the scheduler for reassignment. +func (s *Syncer) scheduleRevertStorageRequest(req *storageRequest) { + select { + case s.storageReqFails <- req: + // Sync event loop notified + case <-req.cancel: + // Sync cycle got cancelled + case <-req.stale: + // Request already reverted + } +} + +// revertStorageRequest cleans up a storage range request and returns all failed +// retrieval tasks to the scheduler for reassignment. +// +// Note, this needs to run on the event runloop thread to reschedule to idle peers. +// On peer threads, use scheduleRevertStorageRequest. +func (s *Syncer) revertStorageRequest(req *storageRequest) { + log.Debug("Reverting storage request", "peer", req.peer) + select { + case <-req.stale: + log.Trace("Storage request already reverted", "peer", req.peer, "reqid", req.id) + return + default: + } + close(req.stale) + + // Remove the request from the tracked set + s.lock.Lock() + delete(s.storageReqs, req.id) + s.lock.Unlock() + + // If there's a timeout timer still running, abort it and mark the storage + // task as not-pending, ready for resheduling + req.timeout.Stop() + if req.subTask != nil { + req.subTask.req = nil + } else { + for i, account := range req.accounts { + req.mainTask.stateTasks[account] = req.roots[i] + } + } +} + +// scheduleRevertTrienodeHealRequest asks the event loop to clean up a trienode heal +// request and return all failed retrieval tasks to the scheduler for reassignment. +func (s *Syncer) scheduleRevertTrienodeHealRequest(req *trienodeHealRequest) { + select { + case s.trienodeHealReqFails <- req: + // Sync event loop notified + case <-req.cancel: + // Sync cycle got cancelled + case <-req.stale: + // Request already reverted + } +} + +// revertTrienodeHealRequest cleans up a trienode heal request and returns all +// failed retrieval tasks to the scheduler for reassignment. +// +// Note, this needs to run on the event runloop thread to reschedule to idle peers. +// On peer threads, use scheduleRevertTrienodeHealRequest. +func (s *Syncer) revertTrienodeHealRequest(req *trienodeHealRequest) { + log.Debug("Reverting trienode heal request", "peer", req.peer) + select { + case <-req.stale: + log.Trace("Trienode heal request already reverted", "peer", req.peer, "reqid", req.id) + return + default: + } + close(req.stale) + + // Remove the request from the tracked set + s.lock.Lock() + delete(s.trienodeHealReqs, req.id) + s.lock.Unlock() + + // If there's a timeout timer still running, abort it and mark the trie node + // retrievals as not-pending, ready for resheduling + req.timeout.Stop() + for i, hash := range req.hashes { + req.task.trieTasks[hash] = req.paths[i] + } +} + +// scheduleRevertBytecodeHealRequest asks the event loop to clean up a bytecode heal +// request and return all failed retrieval tasks to the scheduler for reassignment. +func (s *Syncer) scheduleRevertBytecodeHealRequest(req *bytecodeHealRequest) { + select { + case s.bytecodeHealReqFails <- req: + // Sync event loop notified + case <-req.cancel: + // Sync cycle got cancelled + case <-req.stale: + // Request already reverted + } +} + +// revertBytecodeHealRequest cleans up a bytecode heal request and returns all +// failed retrieval tasks to the scheduler for reassignment. +// +// Note, this needs to run on the event runloop thread to reschedule to idle peers. +// On peer threads, use scheduleRevertBytecodeHealRequest. +func (s *Syncer) revertBytecodeHealRequest(req *bytecodeHealRequest) { + log.Debug("Reverting bytecode heal request", "peer", req.peer) + select { + case <-req.stale: + log.Trace("Bytecode heal request already reverted", "peer", req.peer, "reqid", req.id) + return + default: + } + close(req.stale) + + // Remove the request from the tracked set + s.lock.Lock() + delete(s.bytecodeHealReqs, req.id) + s.lock.Unlock() + + // If there's a timeout timer still running, abort it and mark the code + // retrievals as not-pending, ready for resheduling + req.timeout.Stop() + for _, hash := range req.hashes { + req.task.codeTasks[hash] = struct{}{} + } +} + +// processAccountResponse integrates an already validated account range response +// into the account tasks. +func (s *Syncer) processAccountResponse(res *accountResponse) { + // Switch the task from pending to filling + res.task.req = nil + res.task.res = res + + // Ensure that the response doesn't overflow into the subsequent task + last := res.task.Last.Big() + for i, hash := range res.hashes { + if hash.Big().Cmp(last) > 0 { + // Chunk overflown, cut off excess, but also update the boundary nodes + for j := i; j < len(res.hashes); j++ { + if err := res.trie.Prove(res.hashes[j][:], 0, res.overflow); err != nil { + panic(err) // Account range was already proven, what happened + } + } + res.hashes = res.hashes[:i] + res.accounts = res.accounts[:i] + res.cont = false // Mark range completed + break + } + } + // Iterate over all the accounts and assemble which ones need further sub- + // filling before the entire account range can be persisted. + res.task.needCode = make([]bool, len(res.accounts)) + res.task.needState = make([]bool, len(res.accounts)) + res.task.needHeal = make([]bool, len(res.accounts)) + + res.task.codeTasks = make(map[common.Hash]struct{}) + res.task.stateTasks = make(map[common.Hash]common.Hash) + + resumed := make(map[common.Hash]struct{}) + + res.task.pend = 0 + for i, account := range res.accounts { + // Check if the account is a contract with an unknown code + if !bytes.Equal(account.CodeHash, emptyCode[:]) { + if code := rawdb.ReadCodeWithPrefix(s.db, common.BytesToHash(account.CodeHash)); code == nil { + res.task.codeTasks[common.BytesToHash(account.CodeHash)] = struct{}{} + res.task.needCode[i] = true + res.task.pend++ + } + } + // Check if the account is a contract with an unknown storage trie + if account.Root != emptyRoot { + if node, err := s.db.Get(account.Root[:]); err != nil || node == nil { + // If there was a previous large state retrieval in progress, + // don't restart it from scratch. This happens if a sync cycle + // is interrupted and resumed later. However, *do* update the + // previous root hash. + if subtasks, ok := res.task.SubTasks[res.hashes[i]]; ok { + log.Debug("Resuming large storage retrieval", "account", res.hashes[i], "root", account.Root) + for _, subtask := range subtasks { + subtask.root = account.Root + } + res.task.needHeal[i] = true + resumed[res.hashes[i]] = struct{}{} + } else { + res.task.stateTasks[res.hashes[i]] = account.Root + } + res.task.needState[i] = true + res.task.pend++ + } + } + } + // Delete any subtasks that have been aborted but not resumed. This may undo + // some progress if a new peer gives us less accounts than an old one, but for + // now we have to live with that. + for hash := range res.task.SubTasks { + if _, ok := resumed[hash]; !ok { + log.Debug("Aborting suspended storage retrieval", "account", hash) + delete(res.task.SubTasks, hash) + } + } + // If the account range contained no contracts, or all have been fully filled + // beforehand, short circuit storage filling and forward to the next task + if res.task.pend == 0 { + s.forwardAccountTask(res.task) + return + } + // Some accounts are incomplete, leave as is for the storage and contract + // task assigners to pick up and fill. +} + +// processBytecodeResponse integrates an already validated bytecode response +// into the account tasks. +func (s *Syncer) processBytecodeResponse(res *bytecodeResponse) { + batch := s.db.NewBatch() + + var ( + codes uint64 + bytes common.StorageSize + ) + for i, hash := range res.hashes { + code := res.codes[i] + + // If the bytecode was not delivered, reschedule it + if code == nil { + res.task.codeTasks[hash] = struct{}{} + continue + } + // Code was delivered, mark it not needed any more + for j, account := range res.task.res.accounts { + if res.task.needCode[j] && hash == common.BytesToHash(account.CodeHash) { + res.task.needCode[j] = false + res.task.pend-- + } + } + // Push the bytecode into a database batch + s.bytecodeSynced++ + s.bytecodeBytes += common.StorageSize(len(code)) + + codes++ + bytes += common.StorageSize(len(code)) + + rawdb.WriteCode(batch, hash, code) + s.bloom.Add(hash[:]) + } + if err := batch.Write(); err != nil { + log.Crit("Failed to persist bytecodes", "err", err) + } + log.Debug("Persisted set of bytecodes", "count", codes, "bytes", bytes) + + // If this delivery completed the last pending task, forward the account task + // to the next chunk + if res.task.pend == 0 { + s.forwardAccountTask(res.task) + return + } + // Some accounts are still incomplete, leave as is for the storage and contract + // task assigners to pick up and fill. +} + +// processStorageResponse integrates an already validated storage response +// into the account tasks. +func (s *Syncer) processStorageResponse(res *storageResponse) { + // Switch the suntask from pending to idle + if res.subTask != nil { + res.subTask.req = nil + } + batch := s.db.NewBatch() + + var ( + slots int + nodes int + skipped int + bytes common.StorageSize + ) + // Iterate over all the accounts and reconstruct their storage tries from the + // delivered slots + for i, account := range res.accounts { + // If the account was not delivered, reschedule it + if i >= len(res.hashes) { + res.mainTask.stateTasks[account] = res.roots[i] + continue + } + // State was delivered, if complete mark as not needed any more, otherwise + // mark the account as needing healing + for j, hash := range res.mainTask.res.hashes { + if account != hash { + continue + } + acc := res.mainTask.res.accounts[j] + + // If the packet contains multiple contract storage slots, all + // but the last are surely complete. The last contract may be + // chunked, so check it's continuation flag. + if res.subTask == nil && res.mainTask.needState[j] && (i < len(res.hashes)-1 || !res.cont) { + res.mainTask.needState[j] = false + res.mainTask.pend-- + } + // If the last contract was chunked, mark it as needing healing + // to avoid writing it out to disk prematurely. + if res.subTask == nil && !res.mainTask.needHeal[j] && i == len(res.hashes)-1 && res.cont { + res.mainTask.needHeal[j] = true + } + // If the last contract was chunked, we need to switch to large + // contract handling mode + if res.subTask == nil && i == len(res.hashes)-1 && res.cont { + // If we haven't yet started a large-contract retrieval, create + // the subtasks for it within the main account task + if tasks, ok := res.mainTask.SubTasks[account]; !ok { + var ( + next common.Hash + ) + step := new(big.Int).Sub( + new(big.Int).Div( + new(big.Int).Exp(common.Big2, common.Big256, nil), + big.NewInt(storageConcurrency), + ), common.Big1, + ) + for k := 0; k < storageConcurrency; k++ { + last := common.BigToHash(new(big.Int).Add(next.Big(), step)) + if k == storageConcurrency-1 { + // Make sure we don't overflow if the step is not a proper divisor + last = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + } + tasks = append(tasks, &storageTask{ + Next: next, + Last: last, + root: acc.Root, + }) + log.Debug("Created storage sync task", "account", account, "root", acc.Root, "from", next, "last", last) + next = common.BigToHash(new(big.Int).Add(last.Big(), common.Big1)) + } + res.mainTask.SubTasks[account] = tasks + + // Since we've just created the sub-tasks, this response + // is surely for the first one (zero origin) + res.subTask = tasks[0] + } + } + // If we're in large contract delivery mode, forward the subtask + if res.subTask != nil { + // Ensure the response doesn't overflow into the subsequent task + last := res.subTask.Last.Big() + for k, hash := range res.hashes[i] { + if hash.Big().Cmp(last) > 0 { + // Chunk overflown, cut off excess, but also update the boundary + for l := k; l < len(res.hashes[i]); l++ { + if err := res.tries[i].Prove(res.hashes[i][l][:], 0, res.overflow); err != nil { + panic(err) // Account range was already proven, what happened + } + } + res.hashes[i] = res.hashes[i][:k] + res.slots[i] = res.slots[i][:k] + res.cont = false // Mark range completed + break + } + } + // Forward the relevant storage chunk (even if created just now) + if res.cont { + res.subTask.Next = common.BigToHash(new(big.Int).Add(res.hashes[i][len(res.hashes[i])-1].Big(), big.NewInt(1))) + } else { + res.subTask.done = true + } + } + } + // Iterate over all the reconstructed trie nodes and push them to disk + slots += len(res.hashes[i]) + + it := res.nodes[i].NewIterator(nil, nil) + for it.Next() { + // Boundary nodes are not written for the last result, since they are incomplete + if i == len(res.hashes)-1 { + if _, ok := res.bounds[common.BytesToHash(it.Key())]; ok { + skipped++ + continue + } + } + // Node is not a boundary, persist to disk + batch.Put(it.Key(), it.Value()) + s.bloom.Add(it.Key()) + + bytes += common.StorageSize(common.HashLength + len(it.Value())) + nodes++ + } + it.Release() + } + if err := batch.Write(); err != nil { + log.Crit("Failed to persist storage slots", "err", err) + } + s.storageSynced += uint64(slots) + s.storageBytes += bytes + + log.Debug("Persisted set of storage slots", "accounts", len(res.hashes), "slots", slots, "nodes", nodes, "skipped", skipped, "bytes", bytes) + + // If this delivery completed the last pending task, forward the account task + // to the next chunk + if res.mainTask.pend == 0 { + s.forwardAccountTask(res.mainTask) + return + } + // Some accounts are still incomplete, leave as is for the storage and contract + // task assigners to pick up and fill. +} + +// processTrienodeHealResponse integrates an already validated trienode response +// into the healer tasks. +func (s *Syncer) processTrienodeHealResponse(res *trienodeHealResponse) { + for i, hash := range res.hashes { + node := res.nodes[i] + + // If the trie node was not delivered, reschedule it + if node == nil { + res.task.trieTasks[hash] = res.paths[i] + continue + } + // Push the trie node into the state syncer + s.trienodeHealSynced++ + s.trienodeHealBytes += common.StorageSize(len(node)) + + err := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node}) + switch err { + case nil: + case trie.ErrAlreadyProcessed: + s.trienodeHealDups++ + case trie.ErrNotRequested: + s.trienodeHealNops++ + default: + log.Error("Invalid trienode processed", "hash", hash, "err", err) + } + } + batch := s.db.NewBatch() + if err := s.healer.scheduler.Commit(batch); err != nil { + log.Error("Failed to commit healing data", "err", err) + } + if err := batch.Write(); err != nil { + log.Crit("Failed to persist healing data", "err", err) + } + log.Debug("Persisted set of healing data", "type", "trienodes", "bytes", common.StorageSize(batch.ValueSize())) +} + +// processBytecodeHealResponse integrates an already validated bytecode response +// into the healer tasks. +func (s *Syncer) processBytecodeHealResponse(res *bytecodeHealResponse) { + for i, hash := range res.hashes { + node := res.codes[i] + + // If the trie node was not delivered, reschedule it + if node == nil { + res.task.codeTasks[hash] = struct{}{} + continue + } + // Push the trie node into the state syncer + s.bytecodeHealSynced++ + s.bytecodeHealBytes += common.StorageSize(len(node)) + + err := s.healer.scheduler.Process(trie.SyncResult{Hash: hash, Data: node}) + switch err { + case nil: + case trie.ErrAlreadyProcessed: + s.bytecodeHealDups++ + case trie.ErrNotRequested: + s.bytecodeHealNops++ + default: + log.Error("Invalid bytecode processed", "hash", hash, "err", err) + } + } + batch := s.db.NewBatch() + if err := s.healer.scheduler.Commit(batch); err != nil { + log.Error("Failed to commit healing data", "err", err) + } + if err := batch.Write(); err != nil { + log.Crit("Failed to persist healing data", "err", err) + } + log.Debug("Persisted set of healing data", "type", "bytecode", "bytes", common.StorageSize(batch.ValueSize())) +} + +// forwardAccountTask takes a filled account task and persists anything available +// into the database, after which it forwards the next account marker so that the +// task's next chunk may be filled. +func (s *Syncer) forwardAccountTask(task *accountTask) { + // Remove any pending delivery + res := task.res + if res == nil { + return // nothing to forward + } + task.res = nil + + // Iterate over all the accounts and gather all the incomplete trie nodes. A + // node is incomplete if we haven't yet filled it (sync was interrupted), or + // if we filled it in multiple chunks (storage trie), in which case the few + // nodes on the chunk boundaries are missing. + incompletes := light.NewNodeSet() + for i := range res.accounts { + // If the filling was interrupted, mark everything after as incomplete + if task.needCode[i] || task.needState[i] { + for j := i; j < len(res.accounts); j++ { + if err := res.trie.Prove(res.hashes[j][:], 0, incompletes); err != nil { + panic(err) // Account range was already proven, what happened + } + } + break + } + // Filling not interrupted until this point, mark incomplete if needs healing + if task.needHeal[i] { + if err := res.trie.Prove(res.hashes[i][:], 0, incompletes); err != nil { + panic(err) // Account range was already proven, what happened + } + } + } + // Persist every finalized trie node that's not on the boundary + batch := s.db.NewBatch() + + var ( + nodes int + skipped int + bytes common.StorageSize + ) + it := res.nodes.NewIterator(nil, nil) + for it.Next() { + // Boundary nodes are not written, since they are incomplete + if _, ok := res.bounds[common.BytesToHash(it.Key())]; ok { + skipped++ + continue + } + // Overflow nodes are not written, since they mess with another task + if _, err := res.overflow.Get(it.Key()); err == nil { + skipped++ + continue + } + // Accounts with split storage requests are incomplete + if _, err := incompletes.Get(it.Key()); err == nil { + skipped++ + continue + } + // Node is neither a boundary, not an incomplete account, persist to disk + batch.Put(it.Key(), it.Value()) + s.bloom.Add(it.Key()) + + bytes += common.StorageSize(common.HashLength + len(it.Value())) + nodes++ + } + it.Release() + + if err := batch.Write(); err != nil { + log.Crit("Failed to persist accounts", "err", err) + } + s.accountBytes += bytes + s.accountSynced += uint64(len(res.accounts)) + + log.Debug("Persisted range of accounts", "accounts", len(res.accounts), "nodes", nodes, "skipped", skipped, "bytes", bytes) + + // Task filling persisted, push it the chunk marker forward to the first + // account still missing data. + for i, hash := range res.hashes { + if task.needCode[i] || task.needState[i] { + return + } + task.Next = common.BigToHash(new(big.Int).Add(hash.Big(), big.NewInt(1))) + } + // All accounts marked as complete, track if the entire task is done + task.done = !res.cont +} + +// OnAccounts is a callback method to invoke when a range of accounts are +// received from a remote peer. +func (s *Syncer) OnAccounts(peer SyncPeer, id uint64, hashes []common.Hash, accounts [][]byte, proof [][]byte) error { + size := common.StorageSize(len(hashes) * common.HashLength) + for _, account := range accounts { + size += common.StorageSize(len(account)) + } + for _, node := range proof { + size += common.StorageSize(len(node)) + } + logger := peer.Log().New("reqid", id) + logger.Trace("Delivering range of accounts", "hashes", len(hashes), "accounts", len(accounts), "proofs", len(proof), "bytes", size) + + // Whether or not the response is valid, we can mark the peer as idle and + // notify the scheduler to assign a new task. If the response is invalid, + // we'll drop the peer in a bit. + s.lock.Lock() + if _, ok := s.peers[peer.ID()]; ok { + s.accountIdlers[peer.ID()] = struct{}{} + } + select { + case s.update <- struct{}{}: + default: + } + // Ensure the response is for a valid request + req, ok := s.accountReqs[id] + if !ok { + // Request stale, perhaps the peer timed out but came through in the end + logger.Warn("Unexpected account range packet") + s.lock.Unlock() + return nil + } + delete(s.accountReqs, id) + + // Clean up the request timeout timer, we'll see how to proceed further based + // on the actual delivered content + if !req.timeout.Stop() { + // The timeout is already triggered, and this request will be reverted+rescheduled + s.lock.Unlock() + return nil + } + + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For account range queries that means the state being + // retrieved was either already pruned remotely, or the peer is not yet + // synced to our head. + if len(hashes) == 0 && len(accounts) == 0 && len(proof) == 0 { + logger.Debug("Peer rejected account range request", "root", s.root) + s.statelessPeers[peer.ID()] = struct{}{} + s.lock.Unlock() + + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertAccountRequest(req) + return nil + } + root := s.root + s.lock.Unlock() + + // Reconstruct a partial trie from the response and verify it + keys := make([][]byte, len(hashes)) + for i, key := range hashes { + keys[i] = common.CopyBytes(key[:]) + } + nodes := make(light.NodeList, len(proof)) + for i, node := range proof { + nodes[i] = node + } + proofdb := nodes.NodeSet() + + var end []byte + if len(keys) > 0 { + end = keys[len(keys)-1] + } + db, tr, notary, cont, err := trie.VerifyRangeProof(root, req.origin[:], end, keys, accounts, proofdb) + if err != nil { + logger.Warn("Account range failed proof", "err", err) + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertAccountRequest(req) + return err + } + // Partial trie reconstructed, send it to the scheduler for storage filling + bounds := make(map[common.Hash]struct{}) + + it := notary.Accessed().NewIterator(nil, nil) + for it.Next() { + bounds[common.BytesToHash(it.Key())] = struct{}{} + } + it.Release() + + accs := make([]*state.Account, len(accounts)) + for i, account := range accounts { + acc := new(state.Account) + if err := rlp.DecodeBytes(account, acc); err != nil { + panic(err) // We created these blobs, we must be able to decode them + } + accs[i] = acc + } + response := &accountResponse{ + task: req.task, + hashes: hashes, + accounts: accs, + nodes: db, + trie: tr, + bounds: bounds, + overflow: light.NewNodeSet(), + cont: cont, + } + select { + case s.accountResps <- response: + case <-req.cancel: + case <-req.stale: + } + return nil +} + +// OnByteCodes is a callback method to invoke when a batch of contract +// bytes codes are received from a remote peer. +func (s *Syncer) OnByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error { + s.lock.RLock() + syncing := !s.snapped + s.lock.RUnlock() + + if syncing { + return s.onByteCodes(peer, id, bytecodes) + } + return s.onHealByteCodes(peer, id, bytecodes) +} + +// onByteCodes is a callback method to invoke when a batch of contract +// bytes codes are received from a remote peer in the syncing phase. +func (s *Syncer) onByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error { + var size common.StorageSize + for _, code := range bytecodes { + size += common.StorageSize(len(code)) + } + logger := peer.Log().New("reqid", id) + logger.Trace("Delivering set of bytecodes", "bytecodes", len(bytecodes), "bytes", size) + + // Whether or not the response is valid, we can mark the peer as idle and + // notify the scheduler to assign a new task. If the response is invalid, + // we'll drop the peer in a bit. + s.lock.Lock() + if _, ok := s.peers[peer.ID()]; ok { + s.bytecodeIdlers[peer.ID()] = struct{}{} + } + select { + case s.update <- struct{}{}: + default: + } + // Ensure the response is for a valid request + req, ok := s.bytecodeReqs[id] + if !ok { + // Request stale, perhaps the peer timed out but came through in the end + logger.Warn("Unexpected bytecode packet") + s.lock.Unlock() + return nil + } + delete(s.bytecodeReqs, id) + + // Clean up the request timeout timer, we'll see how to proceed further based + // on the actual delivered content + if !req.timeout.Stop() { + // The timeout is already triggered, and this request will be reverted+rescheduled + s.lock.Unlock() + return nil + } + + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For bytecode range queries that means the peer is not + // yet synced. + if len(bytecodes) == 0 { + logger.Debug("Peer rejected bytecode request") + s.statelessPeers[peer.ID()] = struct{}{} + s.lock.Unlock() + + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertBytecodeRequest(req) + return nil + } + s.lock.Unlock() + + // Cross reference the requested bytecodes with the response to find gaps + // that the serving node is missing + hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash := make([]byte, 32) + + codes := make([][]byte, len(req.hashes)) + for i, j := 0, 0; i < len(bytecodes); i++ { + // Find the next hash that we've been served, leaving misses with nils + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Read(hash) + + for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) { + j++ + } + if j < len(req.hashes) { + codes[j] = bytecodes[i] + j++ + continue + } + // We've either ran out of hashes, or got unrequested data + logger.Warn("Unexpected bytecodes", "count", len(bytecodes)-i) + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertBytecodeRequest(req) + return errors.New("unexpected bytecode") + } + // Response validated, send it to the scheduler for filling + response := &bytecodeResponse{ + task: req.task, + hashes: req.hashes, + codes: codes, + } + select { + case s.bytecodeResps <- response: + case <-req.cancel: + case <-req.stale: + } + return nil +} + +// OnStorage is a callback method to invoke when ranges of storage slots +// are received from a remote peer. +func (s *Syncer) OnStorage(peer SyncPeer, id uint64, hashes [][]common.Hash, slots [][][]byte, proof [][]byte) error { + // Gather some trace stats to aid in debugging issues + var ( + hashCount int + slotCount int + size common.StorageSize + ) + for _, hashset := range hashes { + size += common.StorageSize(common.HashLength * len(hashset)) + hashCount += len(hashset) + } + for _, slotset := range slots { + for _, slot := range slotset { + size += common.StorageSize(len(slot)) + } + slotCount += len(slotset) + } + for _, node := range proof { + size += common.StorageSize(len(node)) + } + logger := peer.Log().New("reqid", id) + logger.Trace("Delivering ranges of storage slots", "accounts", len(hashes), "hashes", hashCount, "slots", slotCount, "proofs", len(proof), "size", size) + + // Whether or not the response is valid, we can mark the peer as idle and + // notify the scheduler to assign a new task. If the response is invalid, + // we'll drop the peer in a bit. + s.lock.Lock() + if _, ok := s.peers[peer.ID()]; ok { + s.storageIdlers[peer.ID()] = struct{}{} + } + select { + case s.update <- struct{}{}: + default: + } + // Ensure the response is for a valid request + req, ok := s.storageReqs[id] + if !ok { + // Request stale, perhaps the peer timed out but came through in the end + logger.Warn("Unexpected storage ranges packet") + s.lock.Unlock() + return nil + } + delete(s.storageReqs, id) + + // Clean up the request timeout timer, we'll see how to proceed further based + // on the actual delivered content + if !req.timeout.Stop() { + // The timeout is already triggered, and this request will be reverted+rescheduled + s.lock.Unlock() + return nil + } + + // Reject the response if the hash sets and slot sets don't match, or if the + // peer sent more data than requested. + if len(hashes) != len(slots) { + s.lock.Unlock() + s.scheduleRevertStorageRequest(req) // reschedule request + logger.Warn("Hash and slot set size mismatch", "hashset", len(hashes), "slotset", len(slots)) + return errors.New("hash and slot set size mismatch") + } + if len(hashes) > len(req.accounts) { + s.lock.Unlock() + s.scheduleRevertStorageRequest(req) // reschedule request + logger.Warn("Hash set larger than requested", "hashset", len(hashes), "requested", len(req.accounts)) + return errors.New("hash set larger than requested") + } + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For storage range queries that means the state being + // retrieved was either already pruned remotely, or the peer is not yet + // synced to our head. + if len(hashes) == 0 { + logger.Debug("Peer rejected storage request") + s.statelessPeers[peer.ID()] = struct{}{} + s.lock.Unlock() + s.scheduleRevertStorageRequest(req) // reschedule request + return nil + } + s.lock.Unlock() + + // Reconstruct the partial tries from the response and verify them + var ( + dbs = make([]ethdb.KeyValueStore, len(hashes)) + tries = make([]*trie.Trie, len(hashes)) + notary *trie.KeyValueNotary + cont bool + ) + for i := 0; i < len(hashes); i++ { + // Convert the keys and proofs into an internal format + keys := make([][]byte, len(hashes[i])) + for j, key := range hashes[i] { + keys[j] = common.CopyBytes(key[:]) + } + nodes := make(light.NodeList, 0, len(proof)) + if i == len(hashes)-1 { + for _, node := range proof { + nodes = append(nodes, node) + } + } + var err error + if len(nodes) == 0 { + // No proof has been attached, the response must cover the entire key + // space and hash to the origin root. + dbs[i], tries[i], _, _, err = trie.VerifyRangeProof(req.roots[i], nil, nil, keys, slots[i], nil) + if err != nil { + s.scheduleRevertStorageRequest(req) // reschedule request + logger.Warn("Storage slots failed proof", "err", err) + return err + } + } else { + // A proof was attached, the response is only partial, check that the + // returned data is indeed part of the storage trie + proofdb := nodes.NodeSet() + + var end []byte + if len(keys) > 0 { + end = keys[len(keys)-1] + } + dbs[i], tries[i], notary, cont, err = trie.VerifyRangeProof(req.roots[i], req.origin[:], end, keys, slots[i], proofdb) + if err != nil { + s.scheduleRevertStorageRequest(req) // reschedule request + logger.Warn("Storage range failed proof", "err", err) + return err + } + } + } + // Partial tries reconstructed, send them to the scheduler for storage filling + bounds := make(map[common.Hash]struct{}) + + if notary != nil { // if all contract storages are delivered in full, no notary will be created + it := notary.Accessed().NewIterator(nil, nil) + for it.Next() { + bounds[common.BytesToHash(it.Key())] = struct{}{} + } + it.Release() + } + response := &storageResponse{ + mainTask: req.mainTask, + subTask: req.subTask, + accounts: req.accounts, + roots: req.roots, + hashes: hashes, + slots: slots, + nodes: dbs, + tries: tries, + bounds: bounds, + overflow: light.NewNodeSet(), + cont: cont, + } + select { + case s.storageResps <- response: + case <-req.cancel: + case <-req.stale: + } + return nil +} + +// OnTrieNodes is a callback method to invoke when a batch of trie nodes +// are received from a remote peer. +func (s *Syncer) OnTrieNodes(peer SyncPeer, id uint64, trienodes [][]byte) error { + var size common.StorageSize + for _, node := range trienodes { + size += common.StorageSize(len(node)) + } + logger := peer.Log().New("reqid", id) + logger.Trace("Delivering set of healing trienodes", "trienodes", len(trienodes), "bytes", size) + + // Whether or not the response is valid, we can mark the peer as idle and + // notify the scheduler to assign a new task. If the response is invalid, + // we'll drop the peer in a bit. + s.lock.Lock() + if _, ok := s.peers[peer.ID()]; ok { + s.trienodeHealIdlers[peer.ID()] = struct{}{} + } + select { + case s.update <- struct{}{}: + default: + } + // Ensure the response is for a valid request + req, ok := s.trienodeHealReqs[id] + if !ok { + // Request stale, perhaps the peer timed out but came through in the end + logger.Warn("Unexpected trienode heal packet") + s.lock.Unlock() + return nil + } + delete(s.trienodeHealReqs, id) + + // Clean up the request timeout timer, we'll see how to proceed further based + // on the actual delivered content + if !req.timeout.Stop() { + // The timeout is already triggered, and this request will be reverted+rescheduled + s.lock.Unlock() + return nil + } + + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For bytecode range queries that means the peer is not + // yet synced. + if len(trienodes) == 0 { + logger.Debug("Peer rejected trienode heal request") + s.statelessPeers[peer.ID()] = struct{}{} + s.lock.Unlock() + + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertTrienodeHealRequest(req) + return nil + } + s.lock.Unlock() + + // Cross reference the requested trienodes with the response to find gaps + // that the serving node is missing + hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash := make([]byte, 32) + + nodes := make([][]byte, len(req.hashes)) + for i, j := 0, 0; i < len(trienodes); i++ { + // Find the next hash that we've been served, leaving misses with nils + hasher.Reset() + hasher.Write(trienodes[i]) + hasher.Read(hash) + + for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) { + j++ + } + if j < len(req.hashes) { + nodes[j] = trienodes[i] + j++ + continue + } + // We've either ran out of hashes, or got unrequested data + logger.Warn("Unexpected healing trienodes", "count", len(trienodes)-i) + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertTrienodeHealRequest(req) + return errors.New("unexpected healing trienode") + } + // Response validated, send it to the scheduler for filling + response := &trienodeHealResponse{ + task: req.task, + hashes: req.hashes, + paths: req.paths, + nodes: nodes, + } + select { + case s.trienodeHealResps <- response: + case <-req.cancel: + case <-req.stale: + } + return nil +} + +// onHealByteCodes is a callback method to invoke when a batch of contract +// bytes codes are received from a remote peer in the healing phase. +func (s *Syncer) onHealByteCodes(peer SyncPeer, id uint64, bytecodes [][]byte) error { + var size common.StorageSize + for _, code := range bytecodes { + size += common.StorageSize(len(code)) + } + logger := peer.Log().New("reqid", id) + logger.Trace("Delivering set of healing bytecodes", "bytecodes", len(bytecodes), "bytes", size) + + // Whether or not the response is valid, we can mark the peer as idle and + // notify the scheduler to assign a new task. If the response is invalid, + // we'll drop the peer in a bit. + s.lock.Lock() + if _, ok := s.peers[peer.ID()]; ok { + s.bytecodeHealIdlers[peer.ID()] = struct{}{} + } + select { + case s.update <- struct{}{}: + default: + } + // Ensure the response is for a valid request + req, ok := s.bytecodeHealReqs[id] + if !ok { + // Request stale, perhaps the peer timed out but came through in the end + logger.Warn("Unexpected bytecode heal packet") + s.lock.Unlock() + return nil + } + delete(s.bytecodeHealReqs, id) + + // Clean up the request timeout timer, we'll see how to proceed further based + // on the actual delivered content + if !req.timeout.Stop() { + // The timeout is already triggered, and this request will be reverted+rescheduled + s.lock.Unlock() + return nil + } + + // Response is valid, but check if peer is signalling that it does not have + // the requested data. For bytecode range queries that means the peer is not + // yet synced. + if len(bytecodes) == 0 { + logger.Debug("Peer rejected bytecode heal request") + s.statelessPeers[peer.ID()] = struct{}{} + s.lock.Unlock() + + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertBytecodeHealRequest(req) + return nil + } + s.lock.Unlock() + + // Cross reference the requested bytecodes with the response to find gaps + // that the serving node is missing + hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + hash := make([]byte, 32) + + codes := make([][]byte, len(req.hashes)) + for i, j := 0, 0; i < len(bytecodes); i++ { + // Find the next hash that we've been served, leaving misses with nils + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Read(hash) + + for j < len(req.hashes) && !bytes.Equal(hash, req.hashes[j][:]) { + j++ + } + if j < len(req.hashes) { + codes[j] = bytecodes[i] + j++ + continue + } + // We've either ran out of hashes, or got unrequested data + logger.Warn("Unexpected healing bytecodes", "count", len(bytecodes)-i) + // Signal this request as failed, and ready for rescheduling + s.scheduleRevertBytecodeHealRequest(req) + return errors.New("unexpected healing bytecode") + } + // Response validated, send it to the scheduler for filling + response := &bytecodeHealResponse{ + task: req.task, + hashes: req.hashes, + codes: codes, + } + select { + case s.bytecodeHealResps <- response: + case <-req.cancel: + case <-req.stale: + } + return nil +} + +// hashSpace is the total size of the 256 bit hash space for accounts. +var hashSpace = new(big.Int).Exp(common.Big2, common.Big256, nil) + +// report calculates various status reports and provides it to the user. +func (s *Syncer) report(force bool) { + if len(s.tasks) > 0 { + s.reportSyncProgress(force) + return + } + s.reportHealProgress(force) +} + +// reportSyncProgress calculates various status reports and provides it to the user. +func (s *Syncer) reportSyncProgress(force bool) { + // Don't report all the events, just occasionally + if !force && time.Since(s.logTime) < 3*time.Second { + return + } + // Don't report anything until we have a meaningful progress + synced := s.accountBytes + s.bytecodeBytes + s.storageBytes + if synced == 0 { + return + } + accountGaps := new(big.Int) + for _, task := range s.tasks { + accountGaps.Add(accountGaps, new(big.Int).Sub(task.Last.Big(), task.Next.Big())) + } + accountFills := new(big.Int).Sub(hashSpace, accountGaps) + if accountFills.BitLen() == 0 { + return + } + s.logTime = time.Now() + estBytes := float64(new(big.Int).Div( + new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace), + accountFills, + ).Uint64()) + + elapsed := time.Since(s.startTime) + estTime := elapsed / time.Duration(synced) * time.Duration(estBytes) + + // Create a mega progress report + var ( + progress = fmt.Sprintf("%.2f%%", float64(synced)*100/estBytes) + accounts = fmt.Sprintf("%d@%v", s.accountSynced, s.accountBytes.TerminalString()) + storage = fmt.Sprintf("%d@%v", s.storageSynced, s.storageBytes.TerminalString()) + bytecode = fmt.Sprintf("%d@%v", s.bytecodeSynced, s.bytecodeBytes.TerminalString()) + ) + log.Info("State sync in progress", "synced", progress, "state", synced, + "accounts", accounts, "slots", storage, "codes", bytecode, "eta", common.PrettyDuration(estTime-elapsed)) +} + +// reportHealProgress calculates various status reports and provides it to the user. +func (s *Syncer) reportHealProgress(force bool) { + // Don't report all the events, just occasionally + if !force && time.Since(s.logTime) < 3*time.Second { + return + } + s.logTime = time.Now() + + // Create a mega progress report + var ( + trienode = fmt.Sprintf("%d@%v", s.trienodeHealSynced, s.trienodeHealBytes.TerminalString()) + bytecode = fmt.Sprintf("%d@%v", s.bytecodeHealSynced, s.bytecodeHealBytes.TerminalString()) + ) + log.Info("State heal in progress", "nodes", trienode, "codes", bytecode, + "pending", s.healer.scheduler.Pending()) +} diff --git a/eth/protocols/snap/sync_test.go b/eth/protocols/snap/sync_test.go new file mode 100644 index 0000000000..0b048786e8 --- /dev/null +++ b/eth/protocols/snap/sync_test.go @@ -0,0 +1,1118 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package snap + +import ( + "bytes" + "crypto/rand" + "encoding/binary" + "fmt" + "math/big" + "sort" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/light" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" + "golang.org/x/crypto/sha3" +) + +func TestHashing(t *testing.T) { + t.Parallel() + + var bytecodes = make([][]byte, 10) + for i := 0; i < len(bytecodes); i++ { + buf := make([]byte, 100) + rand.Read(buf) + bytecodes[i] = buf + } + var want, got string + var old = func() { + hasher := sha3.NewLegacyKeccak256() + for i := 0; i < len(bytecodes); i++ { + hasher.Reset() + hasher.Write(bytecodes[i]) + hash := hasher.Sum(nil) + got = fmt.Sprintf("%v\n%v", got, hash) + } + } + var new = func() { + hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + var hash = make([]byte, 32) + for i := 0; i < len(bytecodes); i++ { + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Read(hash) + want = fmt.Sprintf("%v\n%v", want, hash) + } + } + old() + new() + if want != got { + t.Errorf("want\n%v\ngot\n%v\n", want, got) + } +} + +func BenchmarkHashing(b *testing.B) { + var bytecodes = make([][]byte, 10000) + for i := 0; i < len(bytecodes); i++ { + buf := make([]byte, 100) + rand.Read(buf) + bytecodes[i] = buf + } + var old = func() { + hasher := sha3.NewLegacyKeccak256() + for i := 0; i < len(bytecodes); i++ { + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Sum(nil) + } + } + var new = func() { + hasher := sha3.NewLegacyKeccak256().(crypto.KeccakState) + var hash = make([]byte, 32) + for i := 0; i < len(bytecodes); i++ { + hasher.Reset() + hasher.Write(bytecodes[i]) + hasher.Read(hash) + } + } + b.Run("old", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + old() + } + }) + b.Run("new", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + new() + } + }) +} + +type storageHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error +type accountHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error +type trieHandlerFunc func(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error +type codeHandlerFunc func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error + +type testPeer struct { + id string + test *testing.T + remote *Syncer + logger log.Logger + accountTrie *trie.Trie + accountValues entrySlice + storageTries map[common.Hash]*trie.Trie + storageValues map[common.Hash]entrySlice + + accountRequestHandler accountHandlerFunc + storageRequestHandler storageHandlerFunc + trieRequestHandler trieHandlerFunc + codeRequestHandler codeHandlerFunc + cancelCh chan struct{} +} + +func newTestPeer(id string, t *testing.T, cancelCh chan struct{}) *testPeer { + peer := &testPeer{ + id: id, + test: t, + logger: log.New("id", id), + accountRequestHandler: defaultAccountRequestHandler, + trieRequestHandler: defaultTrieRequestHandler, + storageRequestHandler: defaultStorageRequestHandler, + codeRequestHandler: defaultCodeRequestHandler, + cancelCh: cancelCh, + } + //stderrHandler := log.StreamHandler(os.Stderr, log.TerminalFormat(true)) + //peer.logger.SetHandler(stderrHandler) + return peer + +} + +func (t *testPeer) ID() string { return t.id } +func (t *testPeer) Log() log.Logger { return t.logger } + +func (t *testPeer) RequestAccountRange(id uint64, root, origin, limit common.Hash, bytes uint64) error { + t.logger.Trace("Fetching range of accounts", "reqid", id, "root", root, "origin", origin, "limit", limit, "bytes", common.StorageSize(bytes)) + go t.accountRequestHandler(t, id, root, origin, bytes) + return nil +} + +func (t *testPeer) RequestTrieNodes(id uint64, root common.Hash, paths []TrieNodePathSet, bytes uint64) error { + t.logger.Trace("Fetching set of trie nodes", "reqid", id, "root", root, "pathsets", len(paths), "bytes", common.StorageSize(bytes)) + go t.trieRequestHandler(t, id, root, paths, bytes) + return nil +} + +func (t *testPeer) RequestStorageRanges(id uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, bytes uint64) error { + if len(accounts) == 1 && origin != nil { + t.logger.Trace("Fetching range of large storage slots", "reqid", id, "root", root, "account", accounts[0], "origin", common.BytesToHash(origin), "limit", common.BytesToHash(limit), "bytes", common.StorageSize(bytes)) + } else { + t.logger.Trace("Fetching ranges of small storage slots", "reqid", id, "root", root, "accounts", len(accounts), "first", accounts[0], "bytes", common.StorageSize(bytes)) + } + go t.storageRequestHandler(t, id, root, accounts, origin, limit, bytes) + return nil +} + +func (t *testPeer) RequestByteCodes(id uint64, hashes []common.Hash, bytes uint64) error { + t.logger.Trace("Fetching set of byte codes", "reqid", id, "hashes", len(hashes), "bytes", common.StorageSize(bytes)) + go t.codeRequestHandler(t, id, hashes, bytes) + return nil +} + +// defaultTrieRequestHandler is a well-behaving handler for trie healing requests +func defaultTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { + // Pass the response + var nodes [][]byte + for _, pathset := range paths { + switch len(pathset) { + case 1: + blob, _, err := t.accountTrie.TryGetNode(pathset[0]) + if err != nil { + t.logger.Info("Error handling req", "error", err) + break + } + nodes = append(nodes, blob) + default: + account := t.storageTries[(common.BytesToHash(pathset[0]))] + for _, path := range pathset[1:] { + blob, _, err := account.TryGetNode(path) + if err != nil { + t.logger.Info("Error handling req", "error", err) + break + } + nodes = append(nodes, blob) + } + } + } + t.remote.OnTrieNodes(t, requestId, nodes) + return nil +} + +// defaultAccountRequestHandler is a well-behaving handler for AccountRangeRequests +func defaultAccountRequestHandler(t *testPeer, id uint64, root common.Hash, origin common.Hash, cap uint64) error { + keys, vals, proofs := createAccountRequestResponse(t, root, origin, cap) + if err := t.remote.OnAccounts(t, id, keys, vals, proofs); err != nil { + t.logger.Error("remote error on delivery", "error", err) + t.test.Errorf("Remote side rejected our delivery: %v", err) + t.remote.Unregister(t.id) + close(t.cancelCh) + return err + } + return nil +} + +func createAccountRequestResponse(t *testPeer, root common.Hash, origin common.Hash, cap uint64) (keys []common.Hash, vals [][]byte, proofs [][]byte) { + var size uint64 + for _, entry := range t.accountValues { + if size > cap { + break + } + if bytes.Compare(origin[:], entry.k) <= 0 { + keys = append(keys, common.BytesToHash(entry.k)) + vals = append(vals, entry.v) + size += uint64(32 + len(entry.v)) + } + } + // Unless we send the entire trie, we need to supply proofs + // Actually, we need to supply proofs either way! This seems tob be an implementation + // quirk in go-ethereum + proof := light.NewNodeSet() + if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil { + t.logger.Error("Could not prove inexistence of origin", "origin", origin, + "error", err) + } + if len(keys) > 0 { + lastK := (keys[len(keys)-1])[:] + if err := t.accountTrie.Prove(lastK, 0, proof); err != nil { + t.logger.Error("Could not prove last item", + "error", err) + } + } + for _, blob := range proof.NodeList() { + proofs = append(proofs, blob) + } + return keys, vals, proofs +} + +// defaultStorageRequestHandler is a well-behaving storage request handler +func defaultStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) error { + hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, bOrigin, bLimit, max) + if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { + t.logger.Error("remote error on delivery", "error", err) + t.test.Errorf("Remote side rejected our delivery: %v", err) + close(t.cancelCh) + } + return nil +} + +func defaultCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { + var bytecodes [][]byte + for _, h := range hashes { + bytecodes = append(bytecodes, getCode(h)) + } + if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { + t.logger.Error("remote error on delivery", "error", err) + t.test.Errorf("Remote side rejected our delivery: %v", err) + close(t.cancelCh) + } + return nil +} + +func createStorageRequestResponse(t *testPeer, root common.Hash, accounts []common.Hash, bOrigin, bLimit []byte, max uint64) (hashes [][]common.Hash, slots [][][]byte, proofs [][]byte) { + var ( + size uint64 + limit = common.HexToHash("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff") + ) + if len(bLimit) > 0 { + limit = common.BytesToHash(bLimit) + } + var origin common.Hash + if len(bOrigin) > 0 { + origin = common.BytesToHash(bOrigin) + } + + var limitExceeded bool + var incomplete bool + for _, account := range accounts { + + var keys []common.Hash + var vals [][]byte + for _, entry := range t.storageValues[account] { + if limitExceeded { + incomplete = true + break + } + if bytes.Compare(entry.k, origin[:]) < 0 { + incomplete = true + continue + } + keys = append(keys, common.BytesToHash(entry.k)) + vals = append(vals, entry.v) + size += uint64(32 + len(entry.v)) + if bytes.Compare(entry.k, limit[:]) >= 0 { + limitExceeded = true + } + if size > max { + limitExceeded = true + } + } + hashes = append(hashes, keys) + slots = append(slots, vals) + + if incomplete { + // If we're aborting, we need to prove the first and last item + // This terminates the response (and thus the loop) + proof := light.NewNodeSet() + stTrie := t.storageTries[account] + + // Here's a potential gotcha: when constructing the proof, we cannot + // use the 'origin' slice directly, but must use the full 32-byte + // hash form. + if err := stTrie.Prove(origin[:], 0, proof); err != nil { + t.logger.Error("Could not prove inexistence of origin", "origin", origin, + "error", err) + } + if len(keys) > 0 { + lastK := (keys[len(keys)-1])[:] + if err := stTrie.Prove(lastK, 0, proof); err != nil { + t.logger.Error("Could not prove last item", "error", err) + } + } + for _, blob := range proof.NodeList() { + proofs = append(proofs, blob) + } + break + } + } + return hashes, slots, proofs +} + +// emptyRequestAccountRangeFn is a rejects AccountRangeRequests +func emptyRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { + var proofs [][]byte + var keys []common.Hash + var vals [][]byte + t.remote.OnAccounts(t, requestId, keys, vals, proofs) + return nil +} + +func nonResponsiveRequestAccountRangeFn(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { + return nil +} + +func emptyTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { + var nodes [][]byte + t.remote.OnTrieNodes(t, requestId, nodes) + return nil +} + +func nonResponsiveTrieRequestHandler(t *testPeer, requestId uint64, root common.Hash, paths []TrieNodePathSet, cap uint64) error { + return nil +} + +func emptyStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + var hashes [][]common.Hash + var slots [][][]byte + var proofs [][]byte + t.remote.OnStorage(t, requestId, hashes, slots, proofs) + return nil +} + +func nonResponsiveStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + return nil +} + +//func emptyCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { +// var bytecodes [][]byte +// t.remote.OnByteCodes(t, id, bytecodes) +// return nil +//} + +func corruptCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { + var bytecodes [][]byte + for _, h := range hashes { + // Send back the hashes + bytecodes = append(bytecodes, h[:]) + } + if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { + t.logger.Error("remote error on delivery", "error", err) + // Mimic the real-life handler, which drops a peer on errors + t.remote.Unregister(t.id) + } + return nil +} + +func cappedCodeRequestHandler(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { + var bytecodes [][]byte + for _, h := range hashes[:1] { + bytecodes = append(bytecodes, getCode(h)) + } + if err := t.remote.OnByteCodes(t, id, bytecodes); err != nil { + t.logger.Error("remote error on delivery", "error", err) + // Mimic the real-life handler, which drops a peer on errors + t.remote.Unregister(t.id) + } + return nil +} + +// starvingStorageRequestHandler is somewhat well-behaving storage handler, but it caps the returned results to be very small +func starvingStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + return defaultStorageRequestHandler(t, requestId, root, accounts, origin, limit, 500) +} + +func starvingAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { + return defaultAccountRequestHandler(t, requestId, root, origin, 500) +} + +//func misdeliveringAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { +// return defaultAccountRequestHandler(t, requestId-1, root, origin, 500) +//} + +func corruptAccountRequestHandler(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { + hashes, accounts, proofs := createAccountRequestResponse(t, root, origin, cap) + if len(proofs) > 0 { + proofs = proofs[1:] + } + if err := t.remote.OnAccounts(t, requestId, hashes, accounts, proofs); err != nil { + t.logger.Info("remote error on delivery (as expected)", "error", err) + // Mimic the real-life handler, which drops a peer on errors + t.remote.Unregister(t.id) + } + return nil +} + +// corruptStorageRequestHandler doesn't provide good proofs +func corruptStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + hashes, slots, proofs := createStorageRequestResponse(t, root, accounts, origin, limit, max) + if len(proofs) > 0 { + proofs = proofs[1:] + } + if err := t.remote.OnStorage(t, requestId, hashes, slots, proofs); err != nil { + t.logger.Info("remote error on delivery (as expected)", "error", err) + // Mimic the real-life handler, which drops a peer on errors + t.remote.Unregister(t.id) + } + return nil +} + +func noProofStorageRequestHandler(t *testPeer, requestId uint64, root common.Hash, accounts []common.Hash, origin, limit []byte, max uint64) error { + hashes, slots, _ := createStorageRequestResponse(t, root, accounts, origin, limit, max) + if err := t.remote.OnStorage(t, requestId, hashes, slots, nil); err != nil { + t.logger.Info("remote error on delivery (as expected)", "error", err) + // Mimic the real-life handler, which drops a peer on errors + t.remote.Unregister(t.id) + } + return nil +} + +// TestSyncBloatedProof tests a scenario where we provide only _one_ value, but +// also ship the entire trie inside the proof. If the attack is successful, +// the remote side does not do any follow-up requests +func TestSyncBloatedProof(t *testing.T) { + t.Parallel() + + sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + cancel := make(chan struct{}) + source := newTestPeer("source", t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + + source.accountRequestHandler = func(t *testPeer, requestId uint64, root common.Hash, origin common.Hash, cap uint64) error { + var proofs [][]byte + var keys []common.Hash + var vals [][]byte + + // The values + for _, entry := range t.accountValues { + if bytes.Compare(origin[:], entry.k) <= 0 { + keys = append(keys, common.BytesToHash(entry.k)) + vals = append(vals, entry.v) + } + } + // The proofs + proof := light.NewNodeSet() + if err := t.accountTrie.Prove(origin[:], 0, proof); err != nil { + t.logger.Error("Could not prove origin", "origin", origin, "error", err) + } + // The bloat: add proof of every single element + for _, entry := range t.accountValues { + if err := t.accountTrie.Prove(entry.k, 0, proof); err != nil { + t.logger.Error("Could not prove item", "error", err) + } + } + // And remove one item from the elements + if len(keys) > 2 { + keys = append(keys[:1], keys[2:]...) + vals = append(vals[:1], vals[2:]...) + } + for _, blob := range proof.NodeList() { + proofs = append(proofs, blob) + } + if err := t.remote.OnAccounts(t, requestId, keys, vals, proofs); err != nil { + t.logger.Info("remote error on delivery", "error", err) + // This is actually correct, signal to exit the test successfully + close(t.cancelCh) + } + return nil + } + syncer := setupSyncer(source) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err == nil { + t.Fatal("No error returned from incomplete/cancelled sync") + } +} + +func setupSyncer(peers ...*testPeer) *Syncer { + stateDb := rawdb.NewMemoryDatabase() + syncer := NewSyncer(stateDb, trie.NewSyncBloom(1, stateDb)) + for _, peer := range peers { + syncer.Register(peer) + peer.remote = syncer + } + return syncer +} + +// TestSync tests a basic sync with one peer +func TestSync(t *testing.T) { + t.Parallel() + + cancel := make(chan struct{}) + sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + return source + } + + syncer := setupSyncer(mkSource("sourceA")) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } +} + +// TestSyncTinyTriePanic tests a basic sync with one peer, and a tiny trie. This caused a +// panic within the prover +func TestSyncTinyTriePanic(t *testing.T) { + t.Parallel() + + cancel := make(chan struct{}) + + sourceAccountTrie, elems := makeAccountTrieNoStorage(1) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + return source + } + + syncer := setupSyncer( + mkSource("nice-a"), + ) + done := checkStall(t, cancel) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) +} + +// TestMultiSync tests a basic sync with multiple peers +func TestMultiSync(t *testing.T) { + t.Parallel() + + cancel := make(chan struct{}) + sourceAccountTrie, elems := makeAccountTrieNoStorage(100) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + return source + } + + syncer := setupSyncer(mkSource("sourceA"), mkSource("sourceB")) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } +} + +// TestSyncWithStorage tests basic sync using accounts + storage + code +func TestSyncWithStorage(t *testing.T) { + t.Parallel() + + cancel := make(chan struct{}) + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(3, 3000, true) + + mkSource := func(name string) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.storageTries = storageTries + source.storageValues = storageElems + return source + } + syncer := setupSyncer(mkSource("sourceA")) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } +} + +// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all +func TestMultiSyncManyUseless(t *testing.T) { + t.Parallel() + + cancel := make(chan struct{}) + + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + + mkSource := func(name string, a, b, c bool) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.storageTries = storageTries + source.storageValues = storageElems + + if !a { + source.accountRequestHandler = emptyRequestAccountRangeFn + } + if !b { + source.storageRequestHandler = emptyStorageRequestHandler + } + if !c { + source.trieRequestHandler = emptyTrieRequestHandler + } + return source + } + + syncer := setupSyncer( + mkSource("full", true, true, true), + mkSource("noAccounts", false, true, true), + mkSource("noStorage", true, false, true), + mkSource("noTrie", true, true, false), + ) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } +} + +// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all +func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) { + // We're setting the timeout to very low, to increase the chance of the timeout + // being triggered. This was previously a cause of panic, when a response + // arrived simultaneously as a timeout was triggered. + defer func(old time.Duration) { requestTimeout = old }(requestTimeout) + requestTimeout = time.Millisecond + + cancel := make(chan struct{}) + + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + + mkSource := func(name string, a, b, c bool) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.storageTries = storageTries + source.storageValues = storageElems + + if !a { + source.accountRequestHandler = emptyRequestAccountRangeFn + } + if !b { + source.storageRequestHandler = emptyStorageRequestHandler + } + if !c { + source.trieRequestHandler = emptyTrieRequestHandler + } + return source + } + + syncer := setupSyncer( + mkSource("full", true, true, true), + mkSource("noAccounts", false, true, true), + mkSource("noStorage", true, false, true), + mkSource("noTrie", true, true, false), + ) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } +} + +// TestMultiSyncManyUnresponsive contains one good peer, and many which doesn't respond at all +func TestMultiSyncManyUnresponsive(t *testing.T) { + // We're setting the timeout to very low, to make the test run a bit faster + defer func(old time.Duration) { requestTimeout = old }(requestTimeout) + requestTimeout = time.Millisecond + + cancel := make(chan struct{}) + + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + + mkSource := func(name string, a, b, c bool) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.storageTries = storageTries + source.storageValues = storageElems + + if !a { + source.accountRequestHandler = nonResponsiveRequestAccountRangeFn + } + if !b { + source.storageRequestHandler = nonResponsiveStorageRequestHandler + } + if !c { + source.trieRequestHandler = nonResponsiveTrieRequestHandler + } + return source + } + + syncer := setupSyncer( + mkSource("full", true, true, true), + mkSource("noAccounts", false, true, true), + mkSource("noStorage", true, false, true), + mkSource("noTrie", true, true, false), + ) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } +} + +func checkStall(t *testing.T, cancel chan struct{}) chan struct{} { + testDone := make(chan struct{}) + go func() { + select { + case <-time.After(time.Minute): // TODO(karalabe): Make tests smaller, this is too much + t.Log("Sync stalled") + close(cancel) + case <-testDone: + return + } + }() + return testDone +} + +// TestSyncNoStorageAndOneCappedPeer tests sync using accounts and no storage, where one peer is +// consistently returning very small results +func TestSyncNoStorageAndOneCappedPeer(t *testing.T) { + t.Parallel() + + cancel := make(chan struct{}) + + sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + + mkSource := func(name string, slow bool) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + + if slow { + source.accountRequestHandler = starvingAccountRequestHandler + } + return source + } + + syncer := setupSyncer( + mkSource("nice-a", false), + mkSource("nice-b", false), + mkSource("nice-c", false), + mkSource("capped", true), + ) + done := checkStall(t, cancel) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) +} + +// TestSyncNoStorageAndOneCodeCorruptPeer has one peer which doesn't deliver +// code requests properly. +func TestSyncNoStorageAndOneCodeCorruptPeer(t *testing.T) { + t.Parallel() + + cancel := make(chan struct{}) + + sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + + mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.codeRequestHandler = codeFn + return source + } + // One is capped, one is corrupt. If we don't use a capped one, there's a 50% + // chance that the full set of codes requested are sent only to the + // non-corrupt peer, which delivers everything in one go, and makes the + // test moot + syncer := setupSyncer( + mkSource("capped", cappedCodeRequestHandler), + mkSource("corrupt", corruptCodeRequestHandler), + ) + done := checkStall(t, cancel) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) +} + +func TestSyncNoStorageAndOneAccountCorruptPeer(t *testing.T) { + t.Parallel() + + cancel := make(chan struct{}) + + sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + + mkSource := func(name string, accFn accountHandlerFunc) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.accountRequestHandler = accFn + return source + } + // One is capped, one is corrupt. If we don't use a capped one, there's a 50% + // chance that the full set of codes requested are sent only to the + // non-corrupt peer, which delivers everything in one go, and makes the + // test moot + syncer := setupSyncer( + mkSource("capped", defaultAccountRequestHandler), + mkSource("corrupt", corruptAccountRequestHandler), + ) + done := checkStall(t, cancel) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) +} + +// TestSyncNoStorageAndOneCodeCappedPeer has one peer which delivers code hashes +// one by one +func TestSyncNoStorageAndOneCodeCappedPeer(t *testing.T) { + t.Parallel() + + cancel := make(chan struct{}) + + sourceAccountTrie, elems := makeAccountTrieNoStorage(3000) + + mkSource := func(name string, codeFn codeHandlerFunc) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.codeRequestHandler = codeFn + return source + } + // Count how many times it's invoked. Remember, there are only 8 unique hashes, + // so it shouldn't be more than that + var counter int + syncer := setupSyncer( + mkSource("capped", func(t *testPeer, id uint64, hashes []common.Hash, max uint64) error { + counter++ + return cappedCodeRequestHandler(t, id, hashes, max) + }), + ) + done := checkStall(t, cancel) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) + // There are only 8 unique hashes, and 3K accounts. However, the code + // deduplication is per request batch. If it were a perfect global dedup, + // we would expect only 8 requests. If there were no dedup, there would be + // 3k requests. + // We expect somewhere below 100 requests for these 8 unique hashes. + if threshold := 100; counter > threshold { + t.Fatalf("Error, expected < %d invocations, got %d", threshold, counter) + } +} + +// TestSyncWithStorageAndOneCappedPeer tests sync using accounts + storage, where one peer is +// consistently returning very small results +func TestSyncWithStorageAndOneCappedPeer(t *testing.T) { + t.Parallel() + + cancel := make(chan struct{}) + + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(300, 1000, false) + + mkSource := func(name string, slow bool) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.storageTries = storageTries + source.storageValues = storageElems + + if slow { + source.storageRequestHandler = starvingStorageRequestHandler + } + return source + } + + syncer := setupSyncer( + mkSource("nice-a", false), + mkSource("slow", true), + ) + done := checkStall(t, cancel) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) +} + +// TestSyncWithStorageAndCorruptPeer tests sync using accounts + storage, where one peer is +// sometimes sending bad proofs +func TestSyncWithStorageAndCorruptPeer(t *testing.T) { + t.Parallel() + + cancel := make(chan struct{}) + + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + + mkSource := func(name string, handler storageHandlerFunc) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.storageTries = storageTries + source.storageValues = storageElems + source.storageRequestHandler = handler + return source + } + + syncer := setupSyncer( + mkSource("nice-a", defaultStorageRequestHandler), + mkSource("nice-b", defaultStorageRequestHandler), + mkSource("nice-c", defaultStorageRequestHandler), + mkSource("corrupt", corruptStorageRequestHandler), + ) + done := checkStall(t, cancel) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) +} + +func TestSyncWithStorageAndNonProvingPeer(t *testing.T) { + t.Parallel() + + cancel := make(chan struct{}) + + sourceAccountTrie, elems, storageTries, storageElems := makeAccountTrieWithStorage(100, 3000, true) + + mkSource := func(name string, handler storageHandlerFunc) *testPeer { + source := newTestPeer(name, t, cancel) + source.accountTrie = sourceAccountTrie + source.accountValues = elems + source.storageTries = storageTries + source.storageValues = storageElems + source.storageRequestHandler = handler + return source + } + + syncer := setupSyncer( + mkSource("nice-a", defaultStorageRequestHandler), + mkSource("nice-b", defaultStorageRequestHandler), + mkSource("nice-c", defaultStorageRequestHandler), + mkSource("corrupt", noProofStorageRequestHandler), + ) + done := checkStall(t, cancel) + if err := syncer.Sync(sourceAccountTrie.Hash(), cancel); err != nil { + t.Fatalf("sync failed: %v", err) + } + close(done) +} + +type kv struct { + k, v []byte + t bool +} + +// Some helpers for sorting +type entrySlice []*kv + +func (p entrySlice) Len() int { return len(p) } +func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 } +func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func key32(i uint64) []byte { + key := make([]byte, 32) + binary.LittleEndian.PutUint64(key, i) + return key +} + +var ( + codehashes = []common.Hash{ + crypto.Keccak256Hash([]byte{0}), + crypto.Keccak256Hash([]byte{1}), + crypto.Keccak256Hash([]byte{2}), + crypto.Keccak256Hash([]byte{3}), + crypto.Keccak256Hash([]byte{4}), + crypto.Keccak256Hash([]byte{5}), + crypto.Keccak256Hash([]byte{6}), + crypto.Keccak256Hash([]byte{7}), + } +) + +// getACodeHash returns a pseudo-random code hash +func getACodeHash(i uint64) []byte { + h := codehashes[int(i)%len(codehashes)] + return common.CopyBytes(h[:]) +} + +// convenience function to lookup the code from the code hash +func getCode(hash common.Hash) []byte { + if hash == emptyCode { + return nil + } + for i, h := range codehashes { + if h == hash { + return []byte{byte(i)} + } + } + return nil +} + +// makeAccountTrieNoStorage spits out a trie, along with the leafs +func makeAccountTrieNoStorage(n int) (*trie.Trie, entrySlice) { + db := trie.NewDatabase(rawdb.NewMemoryDatabase()) + accTrie, _ := trie.New(common.Hash{}, db) + var entries entrySlice + for i := uint64(1); i <= uint64(n); i++ { + value, _ := rlp.EncodeToBytes(state.Account{ + Nonce: i, + Balance: big.NewInt(int64(i)), + Root: emptyRoot, + CodeHash: getACodeHash(i), + }) + key := key32(i) + elem := &kv{key, value, false} + accTrie.Update(elem.k, elem.v) + entries = append(entries, elem) + } + sort.Sort(entries) + // Push to disk layer + accTrie.Commit(nil) + return accTrie, entries +} + +// makeAccountTrieWithStorage spits out a trie, along with the leafs +func makeAccountTrieWithStorage(accounts, slots int, code bool) (*trie.Trie, entrySlice, + map[common.Hash]*trie.Trie, map[common.Hash]entrySlice) { + + var ( + db = trie.NewDatabase(rawdb.NewMemoryDatabase()) + accTrie, _ = trie.New(common.Hash{}, db) + entries entrySlice + storageTries = make(map[common.Hash]*trie.Trie) + storageEntries = make(map[common.Hash]entrySlice) + ) + + // Make a storage trie which we reuse for the whole lot + stTrie, stEntries := makeStorageTrie(slots, db) + stRoot := stTrie.Hash() + // Create n accounts in the trie + for i := uint64(1); i <= uint64(accounts); i++ { + key := key32(i) + codehash := emptyCode[:] + if code { + codehash = getACodeHash(i) + } + value, _ := rlp.EncodeToBytes(state.Account{ + Nonce: i, + Balance: big.NewInt(int64(i)), + Root: stRoot, + CodeHash: codehash, + }) + elem := &kv{key, value, false} + accTrie.Update(elem.k, elem.v) + entries = append(entries, elem) + // we reuse the same one for all accounts + storageTries[common.BytesToHash(key)] = stTrie + storageEntries[common.BytesToHash(key)] = stEntries + } + sort.Sort(entries) + stTrie.Commit(nil) + accTrie.Commit(nil) + return accTrie, entries, storageTries, storageEntries +} + +// makeStorageTrie fills a storage trie with n items, returning the +// not-yet-committed trie and the sorted entries +func makeStorageTrie(n int, db *trie.Database) (*trie.Trie, entrySlice) { + trie, _ := trie.New(common.Hash{}, db) + var entries entrySlice + for i := uint64(1); i <= uint64(n); i++ { + // store 'i' at slot 'i' + slotValue := key32(i) + rlpSlotValue, _ := rlp.EncodeToBytes(common.TrimLeftZeroes(slotValue[:])) + + slotKey := key32(i) + key := crypto.Keccak256Hash(slotKey[:]) + + elem := &kv{key[:], rlpSlotValue, false} + trie.Update(elem.k, elem.v) + entries = append(entries, elem) + } + sort.Sort(entries) + return trie, entries +} diff --git a/eth/state_accessor.go b/eth/state_accessor.go new file mode 100644 index 0000000000..869b3d7636 --- /dev/null +++ b/eth/state_accessor.go @@ -0,0 +1,230 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "errors" + "fmt" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/trie" +) + +// stateAtBlock retrieves the state database associated with a certain block. +// If no state is locally available for the given block, a number of blocks are +// attempted to be reexecuted to generate the desired state. +func (eth *Ethereum) stateAtBlock(block *types.Block, reexec uint64) (statedb *state.StateDB, release func(), err error) { + // If we have the state fully available, use that + statedb, err = eth.blockchain.StateAt(block.Root()) + if err == nil { + return statedb, func() {}, nil + } + // Otherwise try to reexec blocks until we find a state or reach our limit + origin := block.NumberU64() + database := state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16, Preimages: true}) + + for i := uint64(0); i < reexec; i++ { + if block.NumberU64() == 0 { + return nil, nil, errors.New("genesis state is missing") + } + parent := eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) + if parent == nil { + return nil, nil, fmt.Errorf("missing block %v %d", block.ParentHash(), block.NumberU64()-1) + } + block = parent + + statedb, err = state.New(block.Root(), database, nil) + if err == nil { + break + } + } + if err != nil { + switch err.(type) { + case *trie.MissingNodeError: + return nil, nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec) + default: + return nil, nil, err + } + } + // State was available at historical point, regenerate + var ( + start = time.Now() + logged time.Time + parent common.Hash + ) + defer func() { + if err != nil && parent != (common.Hash{}) { + database.TrieDB().Dereference(parent) + } + }() + for block.NumberU64() < origin { + // Print progress logs if long enough time elapsed + if time.Since(logged) > 8*time.Second { + log.Info("Regenerating historical state", "block", block.NumberU64()+1, "target", origin, "remaining", origin-block.NumberU64()-1, "elapsed", time.Since(start)) + logged = time.Now() + } + // Retrieve the next block to regenerate and process it + if block = eth.blockchain.GetBlockByNumber(block.NumberU64() + 1); block == nil { + return nil, nil, fmt.Errorf("block #%d not found", block.NumberU64()+1) + } + _, _, _, err := eth.blockchain.Processor().Process(block, statedb, vm.Config{}) + if err != nil { + return nil, nil, fmt.Errorf("processing block %d failed: %v", block.NumberU64(), err) + } + // Finalize the state so any modifications are written to the trie + root, err := statedb.Commit(eth.blockchain.Config().IsEIP158(block.Number())) + if err != nil { + return nil, nil, err + } + statedb, err = state.New(root, database, nil) + if err != nil { + return nil, nil, fmt.Errorf("state reset after block %d failed: %v", block.NumberU64(), err) + } + database.TrieDB().Reference(root, common.Hash{}) + if parent != (common.Hash{}) { + database.TrieDB().Dereference(parent) + } + parent = root + } + nodes, imgs := database.TrieDB().Size() + log.Info("Historical state regenerated", "block", block.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) + return statedb, func() { database.TrieDB().Dereference(parent) }, nil +} + +// statesInRange retrieves a batch of state databases associated with the specific +// block ranges. If no state is locally available for the given range, a number of +// blocks are attempted to be reexecuted to generate the ancestor state. +func (eth *Ethereum) statesInRange(fromBlock, toBlock *types.Block, reexec uint64) (states []*state.StateDB, release func(), err error) { + statedb, err := eth.blockchain.StateAt(fromBlock.Root()) + if err != nil { + statedb, _, err = eth.stateAtBlock(fromBlock, reexec) + } + if err != nil { + return nil, nil, err + } + states = append(states, statedb.Copy()) + + var ( + logged time.Time + parent common.Hash + start = time.Now() + refs = []common.Hash{fromBlock.Root()} + database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16, Preimages: true}) + ) + // Release all resources(including the states referenced by `stateAtBlock`) + // if error is returned. + defer func() { + if err != nil { + for _, ref := range refs { + database.TrieDB().Dereference(ref) + } + } + }() + for i := fromBlock.NumberU64() + 1; i <= toBlock.NumberU64(); i++ { + // Print progress logs if long enough time elapsed + if time.Since(logged) > 8*time.Second { + logged = time.Now() + log.Info("Regenerating historical state", "block", i, "target", fromBlock.NumberU64(), "remaining", toBlock.NumberU64()-i, "elapsed", time.Since(start)) + } + // Retrieve the next block to regenerate and process it + block := eth.blockchain.GetBlockByNumber(i) + if block == nil { + return nil, nil, fmt.Errorf("block #%d not found", i) + } + _, _, _, err := eth.blockchain.Processor().Process(block, statedb, vm.Config{}) + if err != nil { + return nil, nil, fmt.Errorf("processing block %d failed: %v", block.NumberU64(), err) + } + // Finalize the state so any modifications are written to the trie + root, err := statedb.Commit(eth.blockchain.Config().IsEIP158(block.Number())) + if err != nil { + return nil, nil, err + } + statedb, err := eth.blockchain.StateAt(root) + if err != nil { + return nil, nil, fmt.Errorf("state reset after block %d failed: %v", block.NumberU64(), err) + } + states = append(states, statedb.Copy()) + + // Reference the trie twice, once for us, once for the tracer + database.TrieDB().Reference(root, common.Hash{}) + database.TrieDB().Reference(root, common.Hash{}) + refs = append(refs, root) + + // Dereference all past tries we ourselves are done working with + if parent != (common.Hash{}) { + database.TrieDB().Dereference(parent) + } + parent = root + } + // release is handler to release all states referenced, including + // the one referenced in `stateAtBlock`. + release = func() { + for _, ref := range refs { + database.TrieDB().Dereference(ref) + } + } + return states, release, nil +} + +// stateAtTransaction returns the execution environment of a certain transaction. +func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) { + // Short circuit if it's genesis block. + if block.NumberU64() == 0 { + return nil, vm.BlockContext{}, nil, nil, errors.New("no transaction in genesis") + } + // Create the parent state database + parent := eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) + if parent == nil { + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) + } + statedb, release, err := eth.stateAtBlock(parent, reexec) + if err != nil { + return nil, vm.BlockContext{}, nil, nil, err + } + if txIndex == 0 && len(block.Transactions()) == 0 { + return nil, vm.BlockContext{}, statedb, release, nil + } + // Recompute transactions up to the target index. + signer := types.MakeSigner(eth.blockchain.Config(), block.Number()) + for idx, tx := range block.Transactions() { + // Assemble the transaction call message and return if the requested offset + msg, _ := tx.AsMessage(signer) + txContext := core.NewEVMTxContext(msg) + context := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil) + if idx == txIndex { + return msg, context, statedb, release, nil + } + // Not yet the searched for transaction, execute on top of the current state + vmenv := vm.NewEVM(context, txContext, statedb, eth.blockchain.Config(), vm.Config{}) + if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { + release() + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + } + // Ensure any modifications are committed to the state + // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect + statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) + } + release() + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) +} diff --git a/eth/sync.go b/eth/sync.go index 26badd1e21..dc72e88388 100644 --- a/eth/sync.go +++ b/eth/sync.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/enode" ) @@ -40,12 +41,12 @@ const ( ) type txsync struct { - p *peer + p *eth.Peer txs []*types.Transaction } // syncTransactions starts sending all currently pending transactions to the given peer. -func (pm *ProtocolManager) syncTransactions(p *peer) { +func (h *handler) syncTransactions(p *eth.Peer) { // Assemble the set of transaction to broadcast or announce to the remote // peer. Fun fact, this is quite an expensive operation as it needs to sort // the transactions if the sorting is not cached yet. However, with a random @@ -53,7 +54,7 @@ func (pm *ProtocolManager) syncTransactions(p *peer) { // // TODO(karalabe): Figure out if we could get away with random order somehow var txs types.Transactions - pending, _ := pm.txpool.Pending() + pending, _ := h.txpool.Pending() for _, batch := range pending { txs = append(txs, batch...) } @@ -63,7 +64,7 @@ func (pm *ProtocolManager) syncTransactions(p *peer) { // The eth/65 protocol introduces proper transaction announcements, so instead // of dripping transactions across multiple peers, just send the entire list as // an announcement and let the remote side decide what they need (likely nothing). - if p.version >= eth65 { + if p.Version() >= eth.ETH65 { hashes := make([]common.Hash, len(txs)) for i, tx := range txs { hashes[i] = tx.Hash() @@ -73,8 +74,8 @@ func (pm *ProtocolManager) syncTransactions(p *peer) { } // Out of luck, peer is running legacy protocols, drop the txs over select { - case pm.txsyncCh <- &txsync{p: p, txs: txs}: - case <-pm.quitSync: + case h.txsyncCh <- &txsync{p: p, txs: txs}: + case <-h.quitSync: } } @@ -82,8 +83,8 @@ func (pm *ProtocolManager) syncTransactions(p *peer) { // connection. When a new peer appears, we relay all currently pending // transactions. In order to minimise egress bandwidth usage, we send // the transactions in small packs to one peer at a time. -func (pm *ProtocolManager) txsyncLoop64() { - defer pm.wg.Done() +func (h *handler) txsyncLoop64() { + defer h.wg.Done() var ( pending = make(map[enode.ID]*txsync) @@ -94,7 +95,7 @@ func (pm *ProtocolManager) txsyncLoop64() { // send starts a sending a pack of transactions from the sync. send := func(s *txsync) { - if s.p.version >= eth65 { + if s.p.Version() >= eth.ETH65 { panic("initial transaction syncer running on eth/65+") } // Fill pack with transactions up to the target size. @@ -108,14 +109,13 @@ func (pm *ProtocolManager) txsyncLoop64() { // Remove the transactions that will be sent. s.txs = s.txs[:copy(s.txs, s.txs[len(pack.txs):])] if len(s.txs) == 0 { - delete(pending, s.p.ID()) + delete(pending, s.p.Peer.ID()) } // Send the pack in the background. s.p.Log().Trace("Sending batch of transactions", "count", len(pack.txs), "bytes", size) sending = true - go func() { done <- pack.p.SendTransactions64(pack.txs) }() + go func() { done <- pack.p.SendTransactions(pack.txs) }() } - // pick chooses the next pending sync. pick := func() *txsync { if len(pending) == 0 { @@ -132,8 +132,8 @@ func (pm *ProtocolManager) txsyncLoop64() { for { select { - case s := <-pm.txsyncCh: - pending[s.p.ID()] = s + case s := <-h.txsyncCh: + pending[s.p.Peer.ID()] = s if !sending { send(s) } @@ -142,13 +142,13 @@ func (pm *ProtocolManager) txsyncLoop64() { // Stop tracking peers that cause send failures. if err != nil { pack.p.Log().Debug("Transaction send failed", "err", err) - delete(pending, pack.p.ID()) + delete(pending, pack.p.Peer.ID()) } // Schedule the next send. if s := pick(); s != nil { send(s) } - case <-pm.quitSync: + case <-h.quitSync: return } } @@ -156,7 +156,7 @@ func (pm *ProtocolManager) txsyncLoop64() { // chainSyncer coordinates blockchain sync components. type chainSyncer struct { - pm *ProtocolManager + handler *handler force *time.Timer forced bool // true when force timer fired peerEventCh chan struct{} @@ -166,15 +166,15 @@ type chainSyncer struct { // chainSyncOp is a scheduled sync operation. type chainSyncOp struct { mode downloader.SyncMode - peer *peer + peer *eth.Peer td *big.Int head common.Hash } // newChainSyncer creates a chainSyncer. -func newChainSyncer(pm *ProtocolManager) *chainSyncer { +func newChainSyncer(handler *handler) *chainSyncer { return &chainSyncer{ - pm: pm, + handler: handler, peerEventCh: make(chan struct{}), } } @@ -182,23 +182,24 @@ func newChainSyncer(pm *ProtocolManager) *chainSyncer { // handlePeerEvent notifies the syncer about a change in the peer set. // This is called for new peers and every time a peer announces a new // chain head. -func (cs *chainSyncer) handlePeerEvent(p *peer) bool { +func (cs *chainSyncer) handlePeerEvent(peer *eth.Peer) bool { select { case cs.peerEventCh <- struct{}{}: return true - case <-cs.pm.quitSync: + case <-cs.handler.quitSync: return false } } // loop runs in its own goroutine and launches the sync when necessary. func (cs *chainSyncer) loop() { - defer cs.pm.wg.Done() + defer cs.handler.wg.Done() - cs.pm.blockFetcher.Start() - cs.pm.txFetcher.Start() - defer cs.pm.blockFetcher.Stop() - defer cs.pm.txFetcher.Stop() + cs.handler.blockFetcher.Start() + cs.handler.txFetcher.Start() + defer cs.handler.blockFetcher.Stop() + defer cs.handler.txFetcher.Stop() + defer cs.handler.downloader.Terminate() // The force timer lowers the peer count threshold down to one when it fires. // This ensures we'll always start sync even if there aren't enough peers. @@ -209,7 +210,6 @@ func (cs *chainSyncer) loop() { if op := cs.nextSyncOp(); op != nil { cs.startSync(op) } - select { case <-cs.peerEventCh: // Peer information changed, recheck. @@ -220,14 +220,13 @@ func (cs *chainSyncer) loop() { case <-cs.force.C: cs.forced = true - case <-cs.pm.quitSync: + case <-cs.handler.quitSync: // Disable all insertion on the blockchain. This needs to happen before // terminating the downloader because the downloader waits for blockchain // inserts, and these can take a long time to finish. - cs.pm.blockchain.StopInsert() - cs.pm.downloader.Terminate() + cs.handler.chain.StopInsert() + cs.handler.downloader.Terminate() if cs.doneCh != nil { - // Wait for the current sync to end. <-cs.doneCh } return @@ -245,19 +244,22 @@ func (cs *chainSyncer) nextSyncOp() *chainSyncOp { minPeers := defaultMinSyncPeers if cs.forced { minPeers = 1 - } else if minPeers > cs.pm.maxPeers { - minPeers = cs.pm.maxPeers + } else if minPeers > cs.handler.maxPeers { + minPeers = cs.handler.maxPeers } - if cs.pm.peers.Len() < minPeers { + if cs.handler.peers.len() < minPeers { return nil } - - // We have enough peers, check TD. - peer := cs.pm.peers.BestPeer() + // We have enough peers, check TD + peer := cs.handler.peers.peerWithHighestTD() if peer == nil { return nil } mode, ourTD := cs.modeAndLocalHead() + if mode == downloader.FastSync && atomic.LoadUint32(&cs.handler.snapSync) == 1 { + // Fast sync via the snap protocol + mode = downloader.SnapSync + } op := peerToSyncOp(mode, peer) if op.td.Cmp(ourTD) <= 0 { return nil // We're in sync. @@ -265,42 +267,42 @@ func (cs *chainSyncer) nextSyncOp() *chainSyncOp { return op } -func peerToSyncOp(mode downloader.SyncMode, p *peer) *chainSyncOp { +func peerToSyncOp(mode downloader.SyncMode, p *eth.Peer) *chainSyncOp { peerHead, peerTD := p.Head() return &chainSyncOp{mode: mode, peer: p, td: peerTD, head: peerHead} } func (cs *chainSyncer) modeAndLocalHead() (downloader.SyncMode, *big.Int) { // If we're in fast sync mode, return that directly - if atomic.LoadUint32(&cs.pm.fastSync) == 1 { - block := cs.pm.blockchain.CurrentFastBlock() - td := cs.pm.blockchain.GetTdByHash(block.Hash()) + if atomic.LoadUint32(&cs.handler.fastSync) == 1 { + block := cs.handler.chain.CurrentFastBlock() + td := cs.handler.chain.GetTdByHash(block.Hash()) return downloader.FastSync, td } // We are probably in full sync, but we might have rewound to before the // fast sync pivot, check if we should reenable - if pivot := rawdb.ReadLastPivotNumber(cs.pm.chaindb); pivot != nil { - if head := cs.pm.blockchain.CurrentBlock(); head.NumberU64() < *pivot { - block := cs.pm.blockchain.CurrentFastBlock() - td := cs.pm.blockchain.GetTdByHash(block.Hash()) + if pivot := rawdb.ReadLastPivotNumber(cs.handler.database); pivot != nil { + if head := cs.handler.chain.CurrentBlock(); head.NumberU64() < *pivot { + block := cs.handler.chain.CurrentFastBlock() + td := cs.handler.chain.GetTdByHash(block.Hash()) return downloader.FastSync, td } } // Nope, we're really full syncing - head := cs.pm.blockchain.CurrentHeader() - td := cs.pm.blockchain.GetTd(head.Hash(), head.Number.Uint64()) + head := cs.handler.chain.CurrentHeader() + td := cs.handler.chain.GetTd(head.Hash(), head.Number.Uint64()) return downloader.FullSync, td } // startSync launches doSync in a new goroutine. func (cs *chainSyncer) startSync(op *chainSyncOp) { cs.doneCh = make(chan error, 1) - go func() { cs.doneCh <- cs.pm.doSync(op) }() + go func() { cs.doneCh <- cs.handler.doSync(op) }() } // doSync synchronizes the local blockchain with a remote peer. -func (pm *ProtocolManager) doSync(op *chainSyncOp) error { - if op.mode == downloader.FastSync { +func (h *handler) doSync(op *chainSyncOp) error { + if op.mode == downloader.FastSync || op.mode == downloader.SnapSync { // Before launch the fast sync, we have to ensure user uses the same // txlookup limit. // The main concern here is: during the fast sync Geth won't index the @@ -310,35 +312,37 @@ func (pm *ProtocolManager) doSync(op *chainSyncOp) error { // has been indexed. So here for the user-experience wise, it's non-optimal // that user can't change limit during the fast sync. If changed, Geth // will just blindly use the original one. - limit := pm.blockchain.TxLookupLimit() - if stored := rawdb.ReadFastTxLookupLimit(pm.chaindb); stored == nil { - rawdb.WriteFastTxLookupLimit(pm.chaindb, limit) + limit := h.chain.TxLookupLimit() + if stored := rawdb.ReadFastTxLookupLimit(h.database); stored == nil { + rawdb.WriteFastTxLookupLimit(h.database, limit) } else if *stored != limit { - pm.blockchain.SetTxLookupLimit(*stored) + h.chain.SetTxLookupLimit(*stored) log.Warn("Update txLookup limit", "provided", limit, "updated", *stored) } } // Run the sync cycle, and disable fast sync if we're past the pivot block - err := pm.downloader.Synchronise(op.peer.id, op.head, op.td, op.mode) + err := h.downloader.Synchronise(op.peer.ID(), op.head, op.td, op.mode) if err != nil { return err } - if atomic.LoadUint32(&pm.fastSync) == 1 { + if atomic.LoadUint32(&h.fastSync) == 1 { log.Info("Fast sync complete, auto disabling") - atomic.StoreUint32(&pm.fastSync, 0) + atomic.StoreUint32(&h.fastSync, 0) + } + if atomic.LoadUint32(&h.snapSync) == 1 { + log.Info("Snap sync complete, auto disabling") + atomic.StoreUint32(&h.snapSync, 0) } - // If we've successfully finished a sync cycle and passed any required checkpoint, // enable accepting transactions from the network. - head := pm.blockchain.CurrentBlock() - if head.NumberU64() >= pm.checkpointNumber { + head := h.chain.CurrentBlock() + if head.NumberU64() >= h.checkpointNumber { // Checkpoint passed, sanity check the timestamp to have a fallback mechanism // for non-checkpointed (number = 0) private networks. if head.Time() >= uint64(time.Now().AddDate(0, -1, 0).Unix()) { - atomic.StoreUint32(&pm.acceptTxs, 1) + atomic.StoreUint32(&h.acceptTxs, 1) } } - if head.NumberU64() > 0 { // We've completed a sync cycle, notify all peers of new state. This path is // essential in star-topology networks where a gateway node needs to notify @@ -346,8 +350,7 @@ func (pm *ProtocolManager) doSync(op *chainSyncOp) error { // scenario will most often crop up in private and hackathon networks with // degenerate connectivity, but it should be healthy for the mainnet too to // more reliably update peers or the local TD state. - pm.BroadcastBlock(head, false) + h.BroadcastBlock(head, false) } - return nil } diff --git a/eth/sync_test.go b/eth/sync_test.go index ac1e5fad1b..9cc806b18a 100644 --- a/eth/sync_test.go +++ b/eth/sync_test.go @@ -22,43 +22,59 @@ import ( "time" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" ) -func TestFastSyncDisabling63(t *testing.T) { testFastSyncDisabling(t, 63) } +// Tests that fast sync is disabled after a successful sync cycle. func TestFastSyncDisabling64(t *testing.T) { testFastSyncDisabling(t, 64) } func TestFastSyncDisabling65(t *testing.T) { testFastSyncDisabling(t, 65) } // Tests that fast sync gets disabled as soon as a real block is successfully // imported into the blockchain. -func testFastSyncDisabling(t *testing.T, protocol int) { +func testFastSyncDisabling(t *testing.T, protocol uint) { t.Parallel() - // Create a pristine protocol manager, check that fast sync is left enabled - pmEmpty, _ := newTestProtocolManagerMust(t, downloader.FastSync, 0, nil, nil) - if atomic.LoadUint32(&pmEmpty.fastSync) == 0 { + // Create an empty handler and ensure it's in fast sync mode + empty := newTestHandler() + if atomic.LoadUint32(&empty.handler.fastSync) == 0 { t.Fatalf("fast sync disabled on pristine blockchain") } - // Create a full protocol manager, check that fast sync gets disabled - pmFull, _ := newTestProtocolManagerMust(t, downloader.FastSync, 1024, nil, nil) - if atomic.LoadUint32(&pmFull.fastSync) == 1 { + defer empty.close() + + // Create a full handler and ensure fast sync ends up disabled + full := newTestHandlerWithBlocks(1024) + if atomic.LoadUint32(&full.handler.fastSync) == 1 { t.Fatalf("fast sync not disabled on non-empty blockchain") } + defer full.close() + + // Sync up the two handlers + emptyPipe, fullPipe := p2p.MsgPipe() + defer emptyPipe.Close() + defer fullPipe.Close() - // Sync up the two peers - io1, io2 := p2p.MsgPipe() - go pmFull.handle(pmFull.newPeer(protocol, p2p.NewPeer(enode.ID{}, "empty", nil), io2, pmFull.txpool.Get)) - go pmEmpty.handle(pmEmpty.newPeer(protocol, p2p.NewPeer(enode.ID{}, "full", nil), io1, pmEmpty.txpool.Get)) + emptyPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{1}, "", nil), emptyPipe, empty.txpool) + fullPeer := eth.NewPeer(protocol, p2p.NewPeer(enode.ID{2}, "", nil), fullPipe, full.txpool) + defer emptyPeer.Close() + defer fullPeer.Close() + go empty.handler.runEthPeer(emptyPeer, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(empty.handler), peer) + }) + go full.handler.runEthPeer(fullPeer, func(peer *eth.Peer) error { + return eth.Handle((*ethHandler)(full.handler), peer) + }) + // Wait a bit for the above handlers to start time.Sleep(250 * time.Millisecond) - op := peerToSyncOp(downloader.FastSync, pmEmpty.peers.BestPeer()) - if err := pmEmpty.doSync(op); err != nil { - t.Fatal("sync failed:", err) - } // Check that fast sync was disabled - if atomic.LoadUint32(&pmEmpty.fastSync) == 1 { + op := peerToSyncOp(downloader.FastSync, empty.handler.peers.peerWithHighestTD()) + if err := empty.handler.doSync(op); err != nil { + t.Fatal("sync failed:", err) + } + if atomic.LoadUint32(&empty.handler.fastSync) == 1 { t.Fatalf("fast sync not disabled after successful synchronisation") } } diff --git a/eth/api_tracer.go b/eth/tracers/api.go similarity index 53% rename from eth/api_tracer.go rename to eth/tracers/api.go index 90d4a95c14..61fe055689 100644 --- a/eth/api_tracer.go +++ b/eth/tracers/api.go @@ -1,4 +1,4 @@ -// Copyright 2017 The go-ethereum Authors +// Copyright 2021 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package eth +package tracers import ( "bufio" @@ -30,18 +30,18 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" - "github.com/ethereum/go-ethereum/eth/tracers" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" - "github.com/ethereum/go-ethereum/trie" ) const ( @@ -55,6 +55,105 @@ const ( defaultTraceReexec = uint64(128) ) +// Backend interface provides the common API services (that are provided by +// both full and light clients) with access to necessary functions. +type Backend interface { + HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) + HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) + BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) + BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) + GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) + RPCGasCap() uint64 + ChainConfig() *params.ChainConfig + Engine() consensus.Engine + ChainDb() ethdb.Database + StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error) + StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) + StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error) +} + +// API is the collection of tracing APIs exposed over the private debugging endpoint. +type API struct { + backend Backend +} + +// NewAPI creates a new API definition for the tracing methods of the Ethereum service. +func NewAPI(backend Backend) *API { + return &API{backend: backend} +} + +type chainContext struct { + api *API + ctx context.Context +} + +func (context *chainContext) Engine() consensus.Engine { + return context.api.backend.Engine() +} + +func (context *chainContext) GetHeader(hash common.Hash, number uint64) *types.Header { + header, err := context.api.backend.HeaderByNumber(context.ctx, rpc.BlockNumber(number)) + if err != nil { + return nil + } + if header.Hash() == hash { + return header + } + header, err = context.api.backend.HeaderByHash(context.ctx, hash) + if err != nil { + return nil + } + return header +} + +// chainContext construts the context reader which is used by the evm for reading +// the necessary chain context. +func (api *API) chainContext(ctx context.Context) core.ChainContext { + return &chainContext{api: api, ctx: ctx} +} + +// blockByNumber is the wrapper of the chain access function offered by the backend. +// It will return an error if the block is not found. +func (api *API) blockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { + block, err := api.backend.BlockByNumber(ctx, number) + if err != nil { + return nil, err + } + if block == nil { + return nil, fmt.Errorf("block #%d not found", number) + } + return block, nil +} + +// blockByHash is the wrapper of the chain access function offered by the backend. +// It will return an error if the block is not found. +func (api *API) blockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + block, err := api.backend.BlockByHash(ctx, hash) + if err != nil { + return nil, err + } + if block == nil { + return nil, fmt.Errorf("block %s not found", hash.Hex()) + } + return block, nil +} + +// blockByNumberAndHash is the wrapper of the chain access function offered by +// the backend. It will return an error if the block is not found. +// +// Note this function is friendly for the light client which can only retrieve the +// historical(before the CHT) header/block by number. +func (api *API) blockByNumberAndHash(ctx context.Context, number rpc.BlockNumber, hash common.Hash) (*types.Block, error) { + block, err := api.blockByNumber(ctx, number) + if err != nil { + return nil, err + } + if block.Hash() == hash { + return block, nil + } + return api.blockByHash(ctx, hash) +} + // TraceConfig holds extra parameters to trace functions. type TraceConfig struct { *vm.LogConfig @@ -65,7 +164,7 @@ type TraceConfig struct { // StdTraceConfig holds extra parameters to standard-json trace functions. type StdTraceConfig struct { - *vm.LogConfig + vm.LogConfig Reexec *uint64 TxHash common.Hash } @@ -81,7 +180,6 @@ type txTraceResult struct { type blockTraceTask struct { statedb *state.StateDB // Intermediate state prepped for tracing block *types.Block // Block to trace the transactions from - rootref common.Hash // Trie root reference held for this task results []*txTraceResult // Trace results procudes by the task } @@ -102,32 +200,14 @@ type txTraceTask struct { // TraceChain returns the structured logs created during the execution of EVM // between two blocks (excluding start) and returns them as a JSON object. -func (api *PrivateDebugAPI) TraceChain(ctx context.Context, start, end rpc.BlockNumber, config *TraceConfig) (*rpc.Subscription, error) { - // Fetch the block interval that we want to trace - var from, to *types.Block - - switch start { - case rpc.PendingBlockNumber: - from = api.eth.miner.PendingBlock() - case rpc.LatestBlockNumber: - from = api.eth.blockchain.CurrentBlock() - default: - from = api.eth.blockchain.GetBlockByNumber(uint64(start)) - } - switch end { - case rpc.PendingBlockNumber: - to = api.eth.miner.PendingBlock() - case rpc.LatestBlockNumber: - to = api.eth.blockchain.CurrentBlock() - default: - to = api.eth.blockchain.GetBlockByNumber(uint64(end)) - } - // Trace the chain if we've found all our blocks - if from == nil { - return nil, fmt.Errorf("starting block #%d not found", start) +func (api *API) TraceChain(ctx context.Context, start, end rpc.BlockNumber, config *TraceConfig) (*rpc.Subscription, error) { // Fetch the block interval that we want to trace + from, err := api.blockByNumber(ctx, start) + if err != nil { + return nil, err } - if to == nil { - return nil, fmt.Errorf("end block #%d not found", end) + to, err := api.blockByNumber(ctx, end) + if err != nil { + return nil, err } if from.Number().Cmp(to.Number()) >= 0 { return nil, fmt.Errorf("end block (#%d) needs to come after start block (#%d)", end, start) @@ -138,7 +218,7 @@ func (api *PrivateDebugAPI) TraceChain(ctx context.Context, start, end rpc.Block // traceChain configures a new tracer according to the provided configuration, and // executes all the transactions contained within. The return value will be one item // per transaction, dependent on the requested tracer. -func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Block, config *TraceConfig) (*rpc.Subscription, error) { +func (api *API) traceChain(ctx context.Context, start, end *types.Block, config *TraceConfig) (*rpc.Subscription, error) { // Tracing a chain is a **long** operation, only do with subscriptions notifier, supported := rpc.NotifierFromContext(ctx) if !supported { @@ -146,46 +226,25 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl } sub := notifier.CreateSubscription() - // Ensure we have a valid starting state before doing any work - origin := start.NumberU64() - database := state.NewDatabaseWithCache(api.eth.ChainDb(), 16, "") // Chain tracing will probably start at genesis - - if number := start.NumberU64(); number > 0 { - start = api.eth.blockchain.GetBlock(start.ParentHash(), start.NumberU64()-1) - if start == nil { - return nil, fmt.Errorf("parent block #%d not found", number-1) - } + // Shift the border to a block ahead in order to get the states + // before these blocks. + endBlock, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(end.NumberU64()-1), end.ParentHash()) + if err != nil { + return nil, err + } + // Prepare all the states for tracing. Note this procedure can take very + // long time. Timeout mechanism is necessary. + reexec := defaultTraceReexec + if config != nil && config.Reexec != nil { + reexec = *config.Reexec } - statedb, err := state.New(start.Root(), database, nil) + states, release, err := api.backend.StatesInRange(ctx, start, endBlock, reexec) if err != nil { - // If the starting state is missing, allow some number of blocks to be reexecuted - reexec := defaultTraceReexec - if config != nil && config.Reexec != nil { - reexec = *config.Reexec - } - // Find the most recent block that has the state available - for i := uint64(0); i < reexec; i++ { - start = api.eth.blockchain.GetBlock(start.ParentHash(), start.NumberU64()-1) - if start == nil { - break - } - if statedb, err = state.New(start.Root(), database, nil); err == nil { - break - } - } - // If we still don't have the state available, bail out - if err != nil { - switch err.(type) { - case *trie.MissingNodeError: - return nil, errors.New("required historical state unavailable") - default: - return nil, err - } - } + return nil, err } - // Execute all the transaction contained within the chain concurrently for each block - blocks := int(end.NumberU64() - origin) + defer release() // Release all the resources in the last step. + blocks := int(end.NumberU64() - start.NumberU64()) threads := runtime.NumCPU() if threads > blocks { threads = blocks @@ -202,21 +261,19 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl // Fetch and execute the next block trace tasks for task := range tasks { - signer := types.MakeSigner(api.eth.blockchain.Config(), task.block.Number()) - + signer := types.MakeSigner(api.backend.ChainConfig(), task.block.Number()) + blockCtx := core.NewEVMBlockContext(task.block.Header(), api.chainContext(ctx), nil) // Trace all the transactions contained within for i, tx := range task.block.Transactions() { msg, _ := tx.AsMessage(signer) - vmctx := core.NewEVMContext(msg, task.block.Header(), api.eth.blockchain, nil) - - res, err := api.traceTx(ctx, msg, vmctx, task.statedb, config) + res, err := api.traceTx(ctx, msg, blockCtx, task.statedb, config) if err != nil { task.results[i] = &txTraceResult{Error: err.Error()} log.Warn("Tracing failed", "hash", tx.Hash(), "block", task.block.NumberU64(), "err", err) break } // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect - task.statedb.Finalise(api.eth.blockchain.Config().IsEIP158(task.block.Number())) + task.statedb.Finalise(api.backend.ChainConfig().IsEIP158(task.block.Number())) task.results[i] = &txTraceResult{Result: res} } // Stream the result back to the user or abort on teardown @@ -237,7 +294,6 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl number uint64 traced uint64 failed error - proot common.Hash ) // Ensure everything is properly cleaned up on any exit path defer func() { @@ -264,59 +320,23 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl } // Print progress logs if long enough time elapsed if time.Since(logged) > 8*time.Second { - if number > origin { - nodes, imgs := database.TrieDB().Size() - log.Info("Tracing chain segment", "start", origin, "end", end.NumberU64(), "current", number, "transactions", traced, "elapsed", time.Since(begin), "memory", nodes+imgs) - } else { - log.Info("Preparing state for chain trace", "block", number, "start", origin, "elapsed", time.Since(begin)) - } logged = time.Now() + log.Info("Tracing chain segment", "start", start.NumberU64(), "end", end.NumberU64(), "current", number, "transactions", traced, "elapsed", time.Since(begin)) } // Retrieve the next block to trace - block := api.eth.blockchain.GetBlockByNumber(number) - if block == nil { - failed = fmt.Errorf("block #%d not found", number) - break - } - // Send the block over to the concurrent tracers (if not in the fast-forward phase) - if number > origin { - txs := block.Transactions() - - select { - case tasks <- &blockTraceTask{statedb: statedb.Copy(), block: block, rootref: proot, results: make([]*txTraceResult, len(txs))}: - case <-notifier.Closed(): - return - } - traced += uint64(len(txs)) - } - // Generate the next state snapshot fast without tracing - _, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, vm.Config{}) + block, err := api.blockByNumber(ctx, rpc.BlockNumber(number)) if err != nil { failed = err break } - // Finalize the state so any modifications are written to the trie - root, err := statedb.Commit(api.eth.blockchain.Config().IsEIP158(block.Number())) - if err != nil { - failed = err - break - } - if err := statedb.Reset(root); err != nil { - failed = err - break - } - // Reference the trie twice, once for us, once for the tracer - database.TrieDB().Reference(root, common.Hash{}) - if number >= origin { - database.TrieDB().Reference(root, common.Hash{}) - } - // Dereference all past tries we ourselves are done working with - if proot != (common.Hash{}) { - database.TrieDB().Dereference(proot) + // Send the block over to the concurrent tracers (if not in the fast-forward phase) + txs := block.Transactions() + select { + case tasks <- &blockTraceTask{statedb: states[int(number-start.NumberU64()-1)], block: block, results: make([]*txTraceResult, len(txs))}: + case <-notifier.Closed(): + return } - proot = root - - // TODO(karalabe): Do we need the preimages? Won't they accumulate too much? + traced += uint64(len(txs)) } }() @@ -324,7 +344,7 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl go func() { var ( done = make(map[uint64]*blockTraceResult) - next = origin + 1 + next = start.NumberU64() + 1 ) for res := range results { // Queue up next received result @@ -335,9 +355,6 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl } done[uint64(result.Block)] = result - // Dereference any paret tries held in memory by this task - database.TrieDB().Dereference(res.rootref) - // Stream completed traces to the user, aborting on the first error for result, ok := done[next]; ok; result, ok = done[next] { if len(result.Traces) > 0 || next == end.NumberU64() { @@ -353,38 +370,27 @@ func (api *PrivateDebugAPI) traceChain(ctx context.Context, start, end *types.Bl // TraceBlockByNumber returns the structured logs created during the execution of // EVM and returns them as a JSON object. -func (api *PrivateDebugAPI) TraceBlockByNumber(ctx context.Context, number rpc.BlockNumber, config *TraceConfig) ([]*txTraceResult, error) { - // Fetch the block that we want to trace - var block *types.Block - - switch number { - case rpc.PendingBlockNumber: - block = api.eth.miner.PendingBlock() - case rpc.LatestBlockNumber: - block = api.eth.blockchain.CurrentBlock() - default: - block = api.eth.blockchain.GetBlockByNumber(uint64(number)) - } - // Trace the block if it was found - if block == nil { - return nil, fmt.Errorf("block #%d not found", number) +func (api *API) TraceBlockByNumber(ctx context.Context, number rpc.BlockNumber, config *TraceConfig) ([]*txTraceResult, error) { + block, err := api.blockByNumber(ctx, number) + if err != nil { + return nil, err } return api.traceBlock(ctx, block, config) } // TraceBlockByHash returns the structured logs created during the execution of // EVM and returns them as a JSON object. -func (api *PrivateDebugAPI) TraceBlockByHash(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) { - block := api.eth.blockchain.GetBlockByHash(hash) - if block == nil { - return nil, fmt.Errorf("block %#x not found", hash) +func (api *API) TraceBlockByHash(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) { + block, err := api.blockByHash(ctx, hash) + if err != nil { + return nil, err } return api.traceBlock(ctx, block, config) } // TraceBlock returns the structured logs created during the execution of EVM // and returns them as a JSON object. -func (api *PrivateDebugAPI) TraceBlock(ctx context.Context, blob []byte, config *TraceConfig) ([]*txTraceResult, error) { +func (api *API) TraceBlock(ctx context.Context, blob []byte, config *TraceConfig) ([]*txTraceResult, error) { block := new(types.Block) if err := rlp.Decode(bytes.NewReader(blob), block); err != nil { return nil, fmt.Errorf("could not decode block: %v", err) @@ -394,7 +400,7 @@ func (api *PrivateDebugAPI) TraceBlock(ctx context.Context, blob []byte, config // TraceBlockFromFile returns the structured logs created during the execution of // EVM and returns them as a JSON object. -func (api *PrivateDebugAPI) TraceBlockFromFile(ctx context.Context, file string, config *TraceConfig) ([]*txTraceResult, error) { +func (api *API) TraceBlockFromFile(ctx context.Context, file string, config *TraceConfig) ([]*txTraceResult, error) { blob, err := ioutil.ReadFile(file) if err != nil { return nil, fmt.Errorf("could not read file: %v", err) @@ -405,9 +411,8 @@ func (api *PrivateDebugAPI) TraceBlockFromFile(ctx context.Context, file string, // TraceBadBlock returns the structured logs created during the execution of // EVM against a block pulled from the pool of bad ones and returns them as a JSON // object. -func (api *PrivateDebugAPI) TraceBadBlock(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) { - blocks := api.eth.blockchain.BadBlocks() - for _, block := range blocks { +func (api *API) TraceBadBlock(ctx context.Context, hash common.Hash, config *TraceConfig) ([]*txTraceResult, error) { + for _, block := range rawdb.ReadAllBadBlocks(api.backend.ChainDb()) { if block.Hash() == hash { return api.traceBlock(ctx, block, config) } @@ -418,10 +423,10 @@ func (api *PrivateDebugAPI) TraceBadBlock(ctx context.Context, hash common.Hash, // StandardTraceBlockToFile dumps the structured logs created during the // execution of EVM to the local file system and returns a list of files // to the caller. -func (api *PrivateDebugAPI) StandardTraceBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) { - block := api.eth.blockchain.GetBlockByHash(hash) - if block == nil { - return nil, fmt.Errorf("block %#x not found", hash) +func (api *API) StandardTraceBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) { + block, err := api.blockByHash(ctx, hash) + if err != nil { + return nil, err } return api.standardTraceBlockToFile(ctx, block, config) } @@ -429,9 +434,8 @@ func (api *PrivateDebugAPI) StandardTraceBlockToFile(ctx context.Context, hash c // StandardTraceBadBlockToFile dumps the structured logs created during the // execution of EVM against a block pulled from the pool of bad ones to the // local file system and returns a list of files to the caller. -func (api *PrivateDebugAPI) StandardTraceBadBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) { - blocks := api.eth.blockchain.BadBlocks() - for _, block := range blocks { +func (api *API) StandardTraceBadBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) { + for _, block := range rawdb.ReadAllBadBlocks(api.backend.ChainDb()) { if block.Hash() == hash { return api.standardTraceBlockToFile(ctx, block, config) } @@ -442,27 +446,27 @@ func (api *PrivateDebugAPI) StandardTraceBadBlockToFile(ctx context.Context, has // traceBlock configures a new tracer according to the provided configuration, and // executes all the transactions contained within. The return value will be one item // per transaction, dependent on the requestd tracer. -func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block, config *TraceConfig) ([]*txTraceResult, error) { - // Create the parent state database - if err := api.eth.engine.VerifyHeader(api.eth.blockchain, block.Header(), true); err != nil { - return nil, err +func (api *API) traceBlock(ctx context.Context, block *types.Block, config *TraceConfig) ([]*txTraceResult, error) { + if block.NumberU64() == 0 { + return nil, errors.New("genesis is not traceable") } - parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) - if parent == nil { - return nil, fmt.Errorf("parent %#x not found", block.ParentHash()) + parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash()) + if err != nil { + return nil, err } reexec := defaultTraceReexec if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.computeStateDB(parent, reexec) + statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec) if err != nil { return nil, err } + defer release() + // Execute all the transaction contained within the block concurrently var ( - signer = types.MakeSigner(api.eth.blockchain.Config(), block.Number()) - + signer = types.MakeSigner(api.backend.ChainConfig(), block.Number()) txs = block.Transactions() results = make([]*txTraceResult, len(txs)) @@ -473,17 +477,15 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block, if threads > len(txs) { threads = len(txs) } + blockCtx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) for th := 0; th < threads; th++ { pend.Add(1) go func() { defer pend.Done() - // Fetch and execute the next transaction trace tasks for task := range jobs { msg, _ := txs[task.index].AsMessage(signer) - vmctx := core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil) - - res, err := api.traceTx(ctx, msg, vmctx, task.statedb, config) + res, err := api.traceTx(ctx, msg, blockCtx, task.statedb, config) if err != nil { results[task.index] = &txTraceResult{Error: err.Error()} continue @@ -500,9 +502,9 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block, // Generate the next state snapshot fast without tracing msg, _ := tx.AsMessage(signer) - vmctx := core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil) + txContext := core.NewEVMTxContext(msg) - vmenv := vm.NewEVM(vmctx, statedb, api.eth.blockchain.Config(), vm.Config{}) + vmenv := vm.NewEVM(blockCtx, txContext, statedb, api.backend.ChainConfig(), vm.Config{}) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())); err != nil { failed = err break @@ -524,47 +526,47 @@ func (api *PrivateDebugAPI) traceBlock(ctx context.Context, block *types.Block, // standardTraceBlockToFile configures a new tracer which uses standard JSON output, // and traces either a full block or an individual transaction. The return value will // be one filename per transaction traced. -func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block *types.Block, config *StdTraceConfig) ([]string, error) { +func (api *API) standardTraceBlockToFile(ctx context.Context, block *types.Block, config *StdTraceConfig) ([]string, error) { // If we're tracing a single transaction, make sure it's present if config != nil && config.TxHash != (common.Hash{}) { if !containsTx(block, config.TxHash) { return nil, fmt.Errorf("transaction %#x not found in block", config.TxHash) } } - // Create the parent state database - if err := api.eth.engine.VerifyHeader(api.eth.blockchain, block.Header(), true); err != nil { - return nil, err + if block.NumberU64() == 0 { + return nil, errors.New("genesis is not traceable") } - parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) - if parent == nil { - return nil, fmt.Errorf("parent %#x not found", block.ParentHash()) + parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash()) + if err != nil { + return nil, err } reexec := defaultTraceReexec if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.computeStateDB(parent, reexec) + statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec) if err != nil { return nil, err } + defer release() + // Retrieve the tracing configurations, or use default values var ( logConfig vm.LogConfig txHash common.Hash ) if config != nil { - if config.LogConfig != nil { - logConfig = *config.LogConfig - } + logConfig = config.LogConfig txHash = config.TxHash } logConfig.Debug = true // Execute transaction, either tracing all or just the requested one var ( - signer = types.MakeSigner(api.eth.blockchain.Config(), block.Number()) dumps []string - chainConfig = api.eth.blockchain.Config() + signer = types.MakeSigner(api.backend.ChainConfig(), block.Number()) + chainConfig = api.backend.ChainConfig() + vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) canon = true ) // Check if there are any overrides: the caller may wish to enable a future @@ -579,21 +581,20 @@ func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block chainConfigCopy := new(params.ChainConfig) *chainConfigCopy = *chainConfig chainConfig = chainConfigCopy - if yolov2 := config.Overrides.YoloV2Block; yolov2 != nil { - chainConfig.YoloV2Block = yolov2 + if berlin := config.LogConfig.Overrides.BerlinBlock; berlin != nil { + chainConfig.BerlinBlock = berlin canon = false } } for i, tx := range block.Transactions() { // Prepare the trasaction for un-traced execution var ( - msg, _ = tx.AsMessage(signer) - vmctx = core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil) - - vmConf vm.Config - dump *os.File - writer *bufio.Writer - err error + msg, _ = tx.AsMessage(signer) + txContext = core.NewEVMTxContext(msg) + vmConf vm.Config + dump *os.File + writer *bufio.Writer + err error ) // If the transaction needs tracing, swap out the configs if tx.Hash() == txHash || txHash == (common.Hash{}) { @@ -617,7 +618,7 @@ func (api *PrivateDebugAPI) standardTraceBlockToFile(ctx context.Context, block } } // Execute the transaction and flush any traces to disk - vmenv := vm.NewEVM(vmctx, statedb, chainConfig, vmConf) + vmenv := vm.NewEVM(vmctx, txContext, statedb, chainConfig, vmConf) _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())) if writer != nil { writer.Flush() @@ -652,142 +653,78 @@ func containsTx(block *types.Block, hash common.Hash) bool { return false } -// computeStateDB retrieves the state database associated with a certain block. -// If no state is locally available for the given block, a number of blocks are -// attempted to be reexecuted to generate the desired state. -func (api *PrivateDebugAPI) computeStateDB(block *types.Block, reexec uint64) (*state.StateDB, error) { - // If we have the state fully available, use that - statedb, err := api.eth.blockchain.StateAt(block.Root()) - if err == nil { - return statedb, nil - } - // Otherwise try to reexec blocks until we find a state or reach our limit - origin := block.NumberU64() - database := state.NewDatabaseWithCache(api.eth.ChainDb(), 16, "") - - for i := uint64(0); i < reexec; i++ { - block = api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) - if block == nil { - break - } - if statedb, err = state.New(block.Root(), database, nil); err == nil { - break - } - } - if err != nil { - switch err.(type) { - case *trie.MissingNodeError: - return nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec) - default: - return nil, err - } - } - // State was available at historical point, regenerate - var ( - start = time.Now() - logged time.Time - proot common.Hash - ) - for block.NumberU64() < origin { - // Print progress logs if long enough time elapsed - if time.Since(logged) > 8*time.Second { - log.Info("Regenerating historical state", "block", block.NumberU64()+1, "target", origin, "remaining", origin-block.NumberU64()-1, "elapsed", time.Since(start)) - logged = time.Now() - } - // Retrieve the next block to regenerate and process it - if block = api.eth.blockchain.GetBlockByNumber(block.NumberU64() + 1); block == nil { - return nil, fmt.Errorf("block #%d not found", block.NumberU64()+1) - } - _, _, _, err := api.eth.blockchain.Processor().Process(block, statedb, vm.Config{}) - if err != nil { - return nil, fmt.Errorf("processing block %d failed: %v", block.NumberU64(), err) - } - // Finalize the state so any modifications are written to the trie - root, err := statedb.Commit(api.eth.blockchain.Config().IsEIP158(block.Number())) - if err != nil { - return nil, err - } - if err := statedb.Reset(root); err != nil { - return nil, fmt.Errorf("state reset after block %d failed: %v", block.NumberU64(), err) - } - database.TrieDB().Reference(root, common.Hash{}) - if proot != (common.Hash{}) { - database.TrieDB().Dereference(proot) - } - proot = root - } - nodes, imgs := database.TrieDB().Size() - log.Info("Historical state regenerated", "block", block.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) - return statedb, nil -} - // TraceTransaction returns the structured logs created during the execution of EVM // and returns them as a JSON object. -func (api *PrivateDebugAPI) TraceTransaction(ctx context.Context, hash common.Hash, config *TraceConfig) (interface{}, error) { - // Retrieve the transaction and assemble its EVM context - tx, blockHash, _, index := rawdb.ReadTransaction(api.eth.ChainDb(), hash) - if tx == nil { - return nil, fmt.Errorf("transaction %#x not found", hash) +func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config *TraceConfig) (interface{}, error) { + _, blockHash, blockNumber, index, err := api.backend.GetTransaction(ctx, hash) + if err != nil { + return nil, err + } + // It shouldn't happen in practice. + if blockNumber == 0 { + return nil, errors.New("genesis is not traceable") } reexec := defaultTraceReexec if config != nil && config.Reexec != nil { reexec = *config.Reexec } - // Retrieve the block - block := api.eth.blockchain.GetBlockByHash(blockHash) - if block == nil { - return nil, fmt.Errorf("block %#x not found", blockHash) + block, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(blockNumber), blockHash) + if err != nil { + return nil, err } - msg, vmctx, statedb, err := api.computeTxEnv(block, int(index), reexec) + msg, vmctx, statedb, release, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec) if err != nil { return nil, err } - // Trace the transaction and return + defer release() + return api.traceTx(ctx, msg, vmctx, statedb, config) } -// TraceCall lets you trace a given eth_call. It collects the structured logs created during the execution of EVM -// if the given transaction was added on top of the provided block and returns them as a JSON object. +// TraceCall lets you trace a given eth_call. It collects the structured logs +// created during the execution of EVM if the given transaction was added on +// top of the provided block and returns them as a JSON object. // You can provide -2 as a block number to trace on top of the pending block. -func (api *PrivateDebugAPI) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *TraceConfig) (interface{}, error) { - // First try to retrieve the state - statedb, header, err := api.eth.APIBackend.StateAndHeaderByNumberOrHash(ctx, blockNrOrHash) +func (api *API) TraceCall(ctx context.Context, args ethapi.CallArgs, blockNrOrHash rpc.BlockNumberOrHash, config *TraceConfig) (interface{}, error) { + // Try to retrieve the specified block + var ( + err error + block *types.Block + ) + if hash, ok := blockNrOrHash.Hash(); ok { + block, err = api.blockByHash(ctx, hash) + } else if number, ok := blockNrOrHash.Number(); ok { + block, err = api.blockByNumber(ctx, number) + } if err != nil { - // Try to retrieve the specified block - var block *types.Block - if hash, ok := blockNrOrHash.Hash(); ok { - block = api.eth.blockchain.GetBlockByHash(hash) - } else if number, ok := blockNrOrHash.Number(); ok { - block = api.eth.blockchain.GetBlockByNumber(uint64(number)) - } - if block == nil { - return nil, fmt.Errorf("block %v not found: %v", blockNrOrHash, err) - } - // try to recompute the state - reexec := defaultTraceReexec - if config != nil && config.Reexec != nil { - reexec = *config.Reexec - } - _, _, statedb, err = api.computeTxEnv(block, 0, reexec) - if err != nil { - return nil, err - } + return nil, err + } + // try to recompute the state + reexec := defaultTraceReexec + if config != nil && config.Reexec != nil { + reexec = *config.Reexec + } + statedb, release, err := api.backend.StateAtBlock(ctx, block, reexec) + if err != nil { + return nil, err } + defer release() // Execute the trace - msg := args.ToMessage(api.eth.APIBackend.RPCGasCap()) - vmctx := core.NewEVMContext(msg, header, api.eth.blockchain, nil) + msg := args.ToMessage(api.backend.RPCGasCap()) + vmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) return api.traceTx(ctx, msg, vmctx, statedb, config) } // traceTx configures a new tracer according to the provided configuration, and // executes the given message in the provided environment. The return value will // be tracer dependent. -func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, vmctx vm.Context, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { +func (api *API) traceTx(ctx context.Context, message core.Message, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { // Assemble the structured logger or the JavaScript tracer var ( - tracer vm.Tracer - err error + tracer vm.Tracer + err error + txContext = core.NewEVMTxContext(message) ) switch { case config != nil && config.Tracer != nil: @@ -799,14 +736,14 @@ func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, v } } // Constuct the JavaScript tracer to execute with - if tracer, err = tracers.New(*config.Tracer); err != nil { + if tracer, err = New(*config.Tracer, txContext); err != nil { return nil, err } // Handle timeouts and RPC cancellations deadlineCtx, cancel := context.WithTimeout(ctx, timeout) go func() { <-deadlineCtx.Done() - tracer.(*tracers.Tracer).Stop(errors.New("execution timeout")) + tracer.(*Tracer).Stop(errors.New("execution timeout")) }() defer cancel() @@ -816,14 +753,15 @@ func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, v default: tracer = vm.NewStructLogger(config.LogConfig) } - // Run the transaction with tracing enabled. - vmenv := vm.NewEVM(vmctx, statedb, api.eth.blockchain.Config(), vm.Config{Debug: true, Tracer: tracer}) + // Run the transaction with tracing enabled. + vmenv := vm.NewEVM(vmctx, txContext, statedb, api.backend.ChainConfig(), vm.Config{Debug: true, Tracer: tracer}) result, err := core.ApplyMessage(vmenv, message, new(core.GasPool).AddGas(message.Gas())) if err != nil { return nil, fmt.Errorf("tracing failed: %v", err) } - // Depending on the tracer type, format and return the output + + // Depending on the tracer type, format and return the output. switch tracer := tracer.(type) { case *vm.StructLogger: // If the result contains a revert reason, return it. @@ -838,7 +776,7 @@ func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, v StructLogs: ethapi.FormatLogs(tracer.StructLogs()), }, nil - case *tracers.Tracer: + case *Tracer: return tracer.GetResult() default: @@ -846,40 +784,15 @@ func (api *PrivateDebugAPI) traceTx(ctx context.Context, message core.Message, v } } -// computeTxEnv returns the execution environment of a certain transaction. -func (api *PrivateDebugAPI) computeTxEnv(block *types.Block, txIndex int, reexec uint64) (core.Message, vm.Context, *state.StateDB, error) { - // Create the parent state database - parent := api.eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) - if parent == nil { - return nil, vm.Context{}, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) - } - statedb, err := api.computeStateDB(parent, reexec) - if err != nil { - return nil, vm.Context{}, nil, err - } - - if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.Context{}, statedb, nil - } - - // Recompute transactions up to the target index. - signer := types.MakeSigner(api.eth.blockchain.Config(), block.Number()) - - for idx, tx := range block.Transactions() { - // Assemble the transaction call message and return if the requested offset - msg, _ := tx.AsMessage(signer) - context := core.NewEVMContext(msg, block.Header(), api.eth.blockchain, nil) - if idx == txIndex { - return msg, context, statedb, nil - } - // Not yet the searched for transaction, execute on top of the current state - vmenv := vm.NewEVM(context, statedb, api.eth.blockchain.Config(), vm.Config{}) - if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return nil, vm.Context{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) - } - // Ensure any modifications are committed to the state - // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect - statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) +// APIs return the collection of RPC services the tracer package offers. +func APIs(backend Backend) []rpc.API { + // Append all the local APIs and return + return []rpc.API{ + { + Namespace: "debug", + Version: "1.0", + Service: NewAPI(backend), + Public: false, + }, } - return nil, vm.Context{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } diff --git a/eth/tracers/api_test.go b/eth/tracers/api_test.go new file mode 100644 index 0000000000..688b983bab --- /dev/null +++ b/eth/tracers/api_test.go @@ -0,0 +1,487 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package tracers + +import ( + "bytes" + "context" + "crypto/ecdsa" + "errors" + "fmt" + "math/big" + "reflect" + "sort" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/consensus" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/internal/ethapi" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" +) + +var ( + errStateNotFound = errors.New("state not found") + errBlockNotFound = errors.New("block not found") + errTransactionNotFound = errors.New("transaction not found") +) + +type testBackend struct { + chainConfig *params.ChainConfig + engine consensus.Engine + chaindb ethdb.Database + chain *core.BlockChain +} + +func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend { + backend := &testBackend{ + chainConfig: params.TestChainConfig, + engine: ethash.NewFaker(), + chaindb: rawdb.NewMemoryDatabase(), + } + // Generate blocks for testing + gspec.Config = backend.chainConfig + var ( + gendb = rawdb.NewMemoryDatabase() + genesis = gspec.MustCommit(gendb) + ) + blocks, _ := core.GenerateChain(backend.chainConfig, genesis, backend.engine, gendb, n, generator) + + // Import the canonical chain + gspec.MustCommit(backend.chaindb) + cacheConfig := &core.CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieTimeLimit: 5 * time.Minute, + SnapshotLimit: 0, + TrieDirtyDisabled: true, // Archive mode + } + chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, backend.chainConfig, backend.engine, vm.Config{}, nil, nil) + if err != nil { + t.Fatalf("failed to create tester chain: %v", err) + } + if n, err := chain.InsertChain(blocks); err != nil { + t.Fatalf("block %d: failed to insert into chain: %v", n, err) + } + backend.chain = chain + return backend +} + +func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + return b.chain.GetHeaderByHash(hash), nil +} + +func (b *testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { + if number == rpc.PendingBlockNumber || number == rpc.LatestBlockNumber { + return b.chain.CurrentHeader(), nil + } + return b.chain.GetHeaderByNumber(uint64(number)), nil +} + +func (b *testBackend) BlockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { + return b.chain.GetBlockByHash(hash), nil +} + +func (b *testBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { + if number == rpc.PendingBlockNumber || number == rpc.LatestBlockNumber { + return b.chain.CurrentBlock(), nil + } + return b.chain.GetBlockByNumber(uint64(number)), nil +} + +func (b *testBackend) GetTransaction(ctx context.Context, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { + tx, hash, blockNumber, index := rawdb.ReadTransaction(b.chaindb, txHash) + if tx == nil { + return nil, common.Hash{}, 0, 0, errTransactionNotFound + } + return tx, hash, blockNumber, index, nil +} + +func (b *testBackend) RPCGasCap() uint64 { + return 25000000 +} + +func (b *testBackend) ChainConfig() *params.ChainConfig { + return b.chainConfig +} + +func (b *testBackend) Engine() consensus.Engine { + return b.engine +} + +func (b *testBackend) ChainDb() ethdb.Database { + return b.chaindb +} + +func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error) { + statedb, err := b.chain.StateAt(block.Root()) + if err != nil { + return nil, nil, errStateNotFound + } + return statedb, func() {}, nil +} + +func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) { + parent := b.chain.GetBlock(block.ParentHash(), block.NumberU64()-1) + if parent == nil { + return nil, vm.BlockContext{}, nil, nil, errBlockNotFound + } + statedb, err := b.chain.StateAt(parent.Root()) + if err != nil { + return nil, vm.BlockContext{}, nil, nil, errStateNotFound + } + if txIndex == 0 && len(block.Transactions()) == 0 { + return nil, vm.BlockContext{}, statedb, func() {}, nil + } + // Recompute transactions up to the target index. + signer := types.MakeSigner(b.chainConfig, block.Number()) + for idx, tx := range block.Transactions() { + msg, _ := tx.AsMessage(signer) + txContext := core.NewEVMTxContext(msg) + context := core.NewEVMBlockContext(block.Header(), b.chain, nil) + if idx == txIndex { + return msg, context, statedb, func() {}, nil + } + vmenv := vm.NewEVM(context, txContext, statedb, b.chainConfig, vm.Config{}) + if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + } + statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) + } + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) +} + +func (b *testBackend) StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error) { + var result []*state.StateDB + for number := fromBlock.NumberU64(); number <= toBlock.NumberU64(); number += 1 { + block := b.chain.GetBlockByNumber(number) + if block == nil { + return nil, nil, errBlockNotFound + } + statedb, err := b.chain.StateAt(block.Root()) + if err != nil { + return nil, nil, errStateNotFound + } + result = append(result, statedb) + } + return result, func() {}, nil +} + +func TestTraceCall(t *testing.T) { + t.Parallel() + + // Initialize test accounts + accounts := newAccounts(3) + genesis := &core.Genesis{Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(params.Ether)}, + accounts[2].addr: {Balance: big.NewInt(params.Ether)}, + }} + genBlocks := 10 + signer := types.HomesteadSigner{} + api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + // Transfer from account[0] to account[1] + // value: 1000 wei + // fee: 0 wei + tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, big.NewInt(0), nil), signer, accounts[0].key) + b.AddTx(tx) + })) + + var testSuite = []struct { + blockNumber rpc.BlockNumber + call ethapi.CallArgs + config *TraceConfig + expectErr error + expect interface{} + }{ + // Standard JSON trace upon the genesis, plain transfer. + { + blockNumber: rpc.BlockNumber(0), + call: ethapi.CallArgs{ + From: &accounts[0].addr, + To: &accounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + config: nil, + expectErr: nil, + expect: ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, + }, + }, + // Standard JSON trace upon the head, plain transfer. + { + blockNumber: rpc.BlockNumber(genBlocks), + call: ethapi.CallArgs{ + From: &accounts[0].addr, + To: &accounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + config: nil, + expectErr: nil, + expect: ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, + }, + }, + // Standard JSON trace upon the non-existent block, error expects + { + blockNumber: rpc.BlockNumber(genBlocks + 1), + call: ethapi.CallArgs{ + From: &accounts[0].addr, + To: &accounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + config: nil, + expectErr: fmt.Errorf("block #%d not found", genBlocks+1), + expect: nil, + }, + // Standard JSON trace upon the latest block + { + blockNumber: rpc.LatestBlockNumber, + call: ethapi.CallArgs{ + From: &accounts[0].addr, + To: &accounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + config: nil, + expectErr: nil, + expect: ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, + }, + }, + // Standard JSON trace upon the pending block + { + blockNumber: rpc.PendingBlockNumber, + call: ethapi.CallArgs{ + From: &accounts[0].addr, + To: &accounts[1].addr, + Value: (*hexutil.Big)(big.NewInt(1000)), + }, + config: nil, + expectErr: nil, + expect: ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, + }, + }, + } + for _, testspec := range testSuite { + result, err := api.TraceCall(context.Background(), testspec.call, rpc.BlockNumberOrHash{BlockNumber: &testspec.blockNumber}, testspec.config) + if testspec.expectErr != nil { + if err == nil { + t.Errorf("Expect error %v, get nothing", testspec.expectErr) + continue + } + if !reflect.DeepEqual(err, testspec.expectErr) { + t.Errorf("Error mismatch, want %v, get %v", testspec.expectErr, err) + } + } else { + if err != nil { + t.Errorf("Expect no error, get %v", err) + continue + } + if !reflect.DeepEqual(result, testspec.expect) { + t.Errorf("Result mismatch, want %v, get %v", testspec.expect, result) + } + } + } +} + +func TestTraceTransaction(t *testing.T) { + t.Parallel() + + // Initialize test accounts + accounts := newAccounts(2) + genesis := &core.Genesis{Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(params.Ether)}, + }} + target := common.Hash{} + signer := types.HomesteadSigner{} + api := NewAPI(newTestBackend(t, 1, genesis, func(i int, b *core.BlockGen) { + // Transfer from account[0] to account[1] + // value: 1000 wei + // fee: 0 wei + tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, big.NewInt(0), nil), signer, accounts[0].key) + b.AddTx(tx) + target = tx.Hash() + })) + result, err := api.TraceTransaction(context.Background(), target, nil) + if err != nil { + t.Errorf("Failed to trace transaction %v", err) + } + if !reflect.DeepEqual(result, ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, + }) { + t.Error("Transaction tracing result is different") + } +} + +func TestTraceBlock(t *testing.T) { + t.Parallel() + + // Initialize test accounts + accounts := newAccounts(3) + genesis := &core.Genesis{Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(params.Ether)}, + accounts[2].addr: {Balance: big.NewInt(params.Ether)}, + }} + genBlocks := 10 + signer := types.HomesteadSigner{} + api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + // Transfer from account[0] to account[1] + // value: 1000 wei + // fee: 0 wei + tx, _ := types.SignTx(types.NewTransaction(uint64(i), accounts[1].addr, big.NewInt(1000), params.TxGas, big.NewInt(0), nil), signer, accounts[0].key) + b.AddTx(tx) + })) + + var testSuite = []struct { + blockNumber rpc.BlockNumber + config *TraceConfig + expect interface{} + expectErr error + }{ + // Trace genesis block, expect error + { + blockNumber: rpc.BlockNumber(0), + config: nil, + expect: nil, + expectErr: errors.New("genesis is not traceable"), + }, + // Trace head block + { + blockNumber: rpc.BlockNumber(genBlocks), + config: nil, + expectErr: nil, + expect: []*txTraceResult{ + { + Result: ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, + }, + }, + }, + }, + // Trace non-existent block + { + blockNumber: rpc.BlockNumber(genBlocks + 1), + config: nil, + expectErr: fmt.Errorf("block #%d not found", genBlocks+1), + expect: nil, + }, + // Trace latest block + { + blockNumber: rpc.LatestBlockNumber, + config: nil, + expectErr: nil, + expect: []*txTraceResult{ + { + Result: ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, + }, + }, + }, + }, + // Trace pending block + { + blockNumber: rpc.PendingBlockNumber, + config: nil, + expectErr: nil, + expect: []*txTraceResult{ + { + Result: ðapi.ExecutionResult{ + Gas: params.TxGas, + Failed: false, + ReturnValue: "", + StructLogs: []ethapi.StructLogRes{}, + }, + }, + }, + }, + } + for _, testspec := range testSuite { + result, err := api.TraceBlockByNumber(context.Background(), testspec.blockNumber, testspec.config) + if testspec.expectErr != nil { + if err == nil { + t.Errorf("Expect error %v, get nothing", testspec.expectErr) + continue + } + if !reflect.DeepEqual(err, testspec.expectErr) { + t.Errorf("Error mismatch, want %v, get %v", testspec.expectErr, err) + } + } else { + if err != nil { + t.Errorf("Expect no error, get %v", err) + continue + } + if !reflect.DeepEqual(result, testspec.expect) { + t.Errorf("Result mismatch, want %v, get %v", testspec.expect, result) + } + } + } +} + +type Account struct { + key *ecdsa.PrivateKey + addr common.Address +} + +type Accounts []Account + +func (a Accounts) Len() int { return len(a) } +func (a Accounts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a Accounts) Less(i, j int) bool { return bytes.Compare(a[i].addr.Bytes(), a[j].addr.Bytes()) < 0 } + +func newAccounts(n int) (accounts Accounts) { + for i := 0; i < n; i++ { + key, _ := crypto.GenerateKey() + addr := crypto.PubkeyToAddress(key.PublicKey) + accounts = append(accounts, Account{key: key, addr: addr}) + } + sort.Sort(accounts) + return accounts +} diff --git a/eth/tracers/internal/tracers/assets.go b/eth/tracers/internal/tracers/assets.go index 432398ebb5..7f45ab286e 100644 --- a/eth/tracers/internal/tracers/assets.go +++ b/eth/tracers/internal/tracers/assets.go @@ -6,9 +6,9 @@ // evmdis_tracer.js (4.195kB) // noop_tracer.js (1.271kB) // opcount_tracer.js (1.372kB) -// prestate_tracer.js (4.234kB) +// prestate_tracer.js (4.287kB) // trigram_tracer.js (1.788kB) -// unigram_tracer.js (1.51kB) +// unigram_tracer.js (1.469kB) package tracers @@ -28,7 +28,7 @@ import ( func bindataRead(data []byte, name string) ([]byte, error) { gz, err := gzip.NewReader(bytes.NewBuffer(data)) if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) + return nil, fmt.Errorf("read %q: %w", name, err) } var buf bytes.Buffer @@ -36,7 +36,7 @@ func bindataRead(data []byte, name string) ([]byte, error) { clErr := gz.Close() if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) + return nil, fmt.Errorf("read %q: %w", name, err) } if clErr != nil { return nil, err @@ -197,7 +197,7 @@ func opcount_tracerJs() (*asset, error) { return a, nil } -var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdd\x6f\xdb\x38\x12\x7f\x96\xfe\x8a\x41\x5f\x6c\xa3\xae\xdc\x64\x81\x3d\xc0\xb9\x1c\xa0\xba\x6e\x1b\x20\x9b\x04\xb6\x7b\xb9\xdc\x62\x1f\x28\x72\x24\x73\x4d\x93\x02\x49\xd9\xf1\x15\xf9\xdf\x0f\x43\x7d\xf8\xa3\x49\xd3\xdd\x37\x9b\x1c\xfe\xe6\xfb\x37\xa3\xd1\x08\x26\xa6\xdc\x59\x59\x2c\x3d\x9c\xbf\x3f\xfb\x07\x2c\x96\x08\x85\x79\x87\x7e\x89\x16\xab\x35\xa4\x95\x5f\x1a\xeb\xe2\xd1\x08\x16\x4b\xe9\x20\x97\x0a\x41\x3a\x28\x99\xf5\x60\x72\xf0\x27\xf2\x4a\x66\x96\xd9\x5d\x12\x8f\x46\xf5\x9b\x67\xaf\x09\x21\xb7\x88\xe0\x4c\xee\xb7\xcc\xe2\x18\x76\xa6\x02\xce\x34\x58\x14\xd2\x79\x2b\xb3\xca\x23\x48\x0f\x4c\x8b\x91\xb1\xb0\x36\x42\xe6\x3b\x82\x94\x1e\x2a\x2d\xd0\x06\xd5\x1e\xed\xda\xb5\x76\x7c\xbe\xf9\x0a\xd7\xe8\x1c\x5a\xf8\x8c\x1a\x2d\x53\x70\x57\x65\x4a\x72\xb8\x96\x1c\xb5\x43\x60\x0e\x4a\x3a\x71\x4b\x14\x90\x05\x38\x7a\xf8\x89\x4c\x99\x37\xa6\xc0\x27\x53\x69\xc1\xbc\x34\x7a\x08\x28\xc9\x72\xd8\xa0\x75\xd2\x68\xf8\xa5\x55\xd5\x00\x0e\xc1\x58\x02\xe9\x33\x4f\x0e\x58\x30\x25\xbd\x1b\x00\xd3\x3b\x50\xcc\xef\x9f\xfe\x44\x40\xf6\x7e\x0b\x90\x3a\xa8\x59\x9a\x12\xc1\x2f\x99\x27\xaf\xb7\x52\x29\xc8\x10\x2a\x87\x79\xa5\x86\x84\x96\x55\x1e\xee\xaf\x16\x5f\x6e\xbf\x2e\x20\xbd\x79\x80\xfb\x74\x36\x4b\x6f\x16\x0f\x17\xb0\x95\x7e\x69\x2a\x0f\xb8\xc1\x1a\x4a\xae\x4b\x25\x51\xc0\x96\x59\xcb\xb4\xdf\x81\xc9\x09\xe1\xb7\xe9\x6c\xf2\x25\xbd\x59\xa4\x1f\xae\xae\xaf\x16\x0f\x60\x2c\x7c\xba\x5a\xdc\x4c\xe7\x73\xf8\x74\x3b\x83\x14\xee\xd2\xd9\xe2\x6a\xf2\xf5\x3a\x9d\xc1\xdd\xd7\xd9\xdd\xed\x7c\x9a\xc0\x1c\xc9\x2a\xa4\xf7\xaf\xc7\x3c\x0f\xd9\xb3\x08\x02\x3d\x93\xca\xb5\x91\x78\x30\x15\xb8\xa5\xa9\x94\x80\x25\xdb\x20\x58\xe4\x28\x37\x28\x80\x01\x37\xe5\xee\xa7\x93\x4a\x58\x4c\x19\x5d\x04\x9f\x5f\x2c\x48\xb8\xca\x41\x1b\x3f\x04\x87\x08\xff\x5c\x7a\x5f\x8e\x47\xa3\xed\x76\x9b\x14\xba\x4a\x8c\x2d\x46\xaa\x86\x73\xa3\x7f\x25\x31\x61\x96\x16\x9d\x67\x1e\x17\x96\x71\xb4\x60\x2a\x5f\x56\xde\x81\xab\xf2\x5c\x72\x89\xda\x83\xd4\xb9\xb1\xeb\x50\x29\xe0\x0d\x70\x8b\xcc\x23\x30\x50\x86\x33\x05\xf8\x88\xbc\x0a\x77\x75\xa4\x43\xb9\x5a\xa6\x1d\xe3\xe1\x34\xb7\x66\x4d\xbe\x56\xce\xd3\x0f\xe7\x70\x9d\x29\x14\x50\xa0\x46\x27\x1d\x64\xca\xf0\x55\x12\x7f\x8b\xa3\x03\x63\xa8\x4e\x82\x87\x8d\x50\xa8\x8d\x2d\xf6\x2c\x42\x56\x49\x25\xa4\x2e\x92\x38\x6a\xa5\xc7\xa0\x2b\xa5\x86\x71\x80\x50\xc6\xac\xaa\x32\xe5\xdc\x54\xc1\xf6\x3f\x91\xfb\x1a\xcc\x95\xc8\x65\x4e\xc5\xc1\xba\x5b\x6f\xc2\x55\xa7\xd7\x64\x24\x9f\xc4\xd1\x11\xcc\x18\xf2\x4a\x07\x77\xfa\x4c\x08\x3b\x04\x91\x0d\xbe\xc5\x51\xb4\x61\x96\xb0\xe0\x12\xbc\xf9\x82\x8f\xe1\x72\x70\x11\x47\x91\xcc\xa1\xef\x97\xd2\x25\x2d\xf0\xef\x8c\xf3\x3f\xe0\xf2\xf2\x32\x34\x75\x2e\x35\x8a\x01\x10\x44\xf4\x9c\x58\x7d\x13\x65\x4c\x31\xcd\x71\x0c\xbd\xf7\x8f\x3d\x78\x0b\x22\x4b\x0a\xf4\x1f\xea\xd3\x5a\x59\xe2\xcd\xdc\x5b\xa9\x8b\xfe\xd9\xaf\x83\x61\x78\xa5\x4d\x78\x03\x8d\xf8\x8d\xe9\x84\xeb\x7b\x6e\x44\xb8\x6e\x6c\xae\xa5\x26\x46\x34\x42\x8d\x94\xf3\xc6\xb2\x02\xc7\xf0\xed\x89\xfe\x3f\x91\x57\x4f\x71\xf4\x74\x14\xe5\x79\x2d\xf4\x42\x94\x1b\x08\x40\xed\x6d\x57\xe7\x85\xa4\x4e\x3d\x4c\x40\xc0\xfb\x51\x12\xe6\xad\x29\x27\x49\x58\xe1\xee\xf5\x4c\xd0\x85\x14\x8f\xdd\xc5\x0a\x77\x83\x8b\xf8\xc5\x14\x25\x8d\xd1\xbf\x4b\xf1\xf8\xb3\xf9\x3a\x79\x73\x14\xd7\x39\x49\xed\xed\x1d\x0c\x4e\xe2\x68\xd1\x55\xca\x53\xb9\x4b\xbd\x31\x2b\x22\xae\x25\xc5\x47\xa9\x10\x12\x53\x52\xb6\x5c\xcd\x1c\x19\xa2\x06\xe9\xd1\x32\xa2\x4e\xb3\x41\x4b\x53\x03\x2c\xfa\xca\x6a\xd7\x85\x31\x97\x9a\xa9\x16\xb8\x89\xba\xb7\x8c\xd7\x3d\x53\x9f\x1f\xc4\x92\xfb\xc7\x10\xc5\xe0\xdd\x68\x04\xa9\x07\x72\x11\x4a\x23\xb5\x1f\xc2\x16\x41\x23\x0a\x6a\x7c\x81\xa2\xe2\x3e\xe0\xf5\x36\x4c\x55\xd8\xab\x9b\x9b\x28\x32\x3c\x35\x15\x4d\x82\x83\xe6\x1f\x06\x03\xd7\x66\x13\x46\x5c\xc6\xf8\x0a\x9a\x86\x33\x56\x16\x52\xc7\x4d\x38\x8f\x9a\x8d\x2c\x4a\x08\x38\x98\x15\x72\x45\x49\xa4\x93\x0f\x4c\xc1\x25\x64\xb2\xb8\xd2\xfe\x24\x79\x75\xd0\xdb\xa7\x83\x3f\x92\xa6\x79\x12\x47\x84\xd7\x3f\x1f\x0c\xe1\xec\xd7\xae\x22\xbc\x21\x28\x78\x1d\xcc\x9b\x97\xa1\xe2\xd3\x62\x78\xfe\x59\x50\x43\x1d\xfc\x36\x68\x4d\x5c\x95\x51\x3a\x6a\x3f\x43\x1c\x8f\xbb\xf8\xe2\x07\xb8\xc7\xbe\xb5\xb8\x4d\x68\x12\x26\xc4\xcb\xa0\x75\x8a\x3e\x22\xb7\xb8\x26\x56\xa7\x2c\x70\xa6\x14\xda\x9e\x83\xc0\x19\xc3\xa6\x9c\x42\xbe\x70\x5d\xfa\x5d\xcb\xf5\x9e\xd9\x02\xbd\x7b\xdd\xb0\x80\xf3\xee\x5d\x4b\x81\x21\x14\xbb\x12\xe1\xf2\x12\x7a\x93\xd9\x34\x5d\x4c\x7b\x4d\x1b\x8d\x46\x70\x8f\x61\x13\xca\x94\xcc\x84\xda\x81\x40\x85\x1e\x6b\xbb\x8c\x0e\x21\xea\x28\x61\x48\x2b\x0d\x2d\x1b\xf8\x28\x9d\x97\xba\x80\x9a\x29\xb6\x34\x57\x1b\xb8\xd0\x23\x9c\x55\x8e\xaa\xf5\x64\x08\x79\x43\x1b\x85\x45\xe2\x15\xe2\xff\xd0\x6e\x4c\xc9\x6e\x03\xc9\xa5\x75\x1e\x4a\xc5\x38\x26\x84\xd7\x19\xf3\x72\x7e\x9b\x4e\x26\xd5\xb3\xd0\x82\x01\x68\x3f\xe0\x98\xa2\x01\x49\xea\x1d\xf4\x5b\x8c\x41\x1c\x45\xb6\x95\x3e\xc0\xbe\xd8\x53\x82\xf3\x58\x1e\x12\x02\x2d\x16\xb8\x41\xa2\xd0\xc0\x06\xf5\x30\x24\x5d\xff\xfe\xad\x99\xbe\xe8\x92\x38\xa2\x77\x07\x7d\xad\x4c\x71\xdc\xd7\xa2\x0e\x0b\xaf\xac\xa5\xfc\x77\x14\x9c\x53\x8f\xff\x59\x39\x4f\x31\xb5\x14\x9e\x86\x2d\x9e\x23\xc9\x40\x89\x34\x6d\x07\xdf\x93\x21\xcd\xad\x30\x27\x48\x5d\x33\xa5\xea\x6d\xae\x34\x1e\xb5\x97\x4c\xa9\x1d\xe5\x61\x6b\x69\x8d\xa1\xc5\x65\x08\x4e\x92\x54\x60\x9c\x20\x2a\x35\x57\x95\xa8\xcb\x20\xd4\x71\x83\xe7\x82\xcd\xc7\xfb\xcf\x1a\x9d\x63\x05\x26\x54\x49\xb9\x7c\x6c\x36\x48\x0d\xbd\x9a\xe4\xfa\x83\x5e\xd2\x19\x79\x4c\x31\xca\x14\x49\x5b\x64\x44\xd3\xa9\x10\x16\x9d\xeb\x0f\x1a\xce\xe9\x32\x7b\xbf\x44\x4d\xc1\x07\x8d\x5b\xe8\x56\x13\xc6\x39\xad\x6a\x62\x08\x4c\x08\xa2\xb6\x93\x35\x22\x8e\x22\xb7\x95\x9e\x2f\x21\x68\x32\xe5\xbe\x17\x07\x4d\xfd\x73\xe6\x10\xde\x4c\xff\xb3\x98\xdc\x7e\x9c\x4e\x6e\xef\x1e\xde\x8c\xe1\xe8\x6c\x7e\xf5\xdf\x69\x77\xf6\x21\xbd\x4e\x6f\x26\xd3\x37\xe3\x30\x9b\x9f\x71\xc8\x9b\xd6\x05\x52\xe8\x3c\xe3\xab\xa4\x44\x5c\xf5\xdf\x1f\xf3\xc0\xde\xc1\x28\xca\x2c\xb2\xd5\xc5\xde\x98\xba\x41\x1b\x1d\x2d\xe5\xc2\x25\xbc\x18\xac\x8b\x97\xad\x99\x34\xf2\xfd\x96\xc8\xf7\xab\x48\xa0\x8a\xd7\xed\x38\xff\xcb\x86\x84\xde\x61\x7c\x35\x06\xc7\x14\x6d\xc0\xf2\x7f\xf4\xe5\x92\xe7\x0e\xfd\x10\x50\x0b\xb3\x25\xe6\xeb\x50\xeb\x9b\x06\xf7\x20\x64\x67\x83\x9a\x41\x6f\xf3\xfe\xa0\x13\x26\xb0\xef\x45\xcf\x9f\x13\x45\x2d\xe0\xb2\x45\x7f\x1b\x5e\xbe\x1e\xa8\xf3\x26\x52\x27\x0a\x7e\x39\xd9\xf0\xc2\xfd\x1a\xd7\xc6\xee\x9a\x71\x74\xe0\xdf\x8f\xa3\x9a\x5e\x5f\x77\xf5\x44\x7f\xa8\xc8\xba\x83\x8f\xd3\xeb\xe9\xe7\x74\x31\x3d\x92\x9a\x2f\xd2\xc5\xd5\xa4\x3e\xfa\xcb\x85\x77\xf6\xd3\x85\xd7\x9b\xcf\x17\xb7\xb3\x69\x6f\xdc\xfc\xbb\xbe\x4d\x3f\xf6\xbe\x53\xd8\x6c\x81\x3f\x6a\x5d\x6f\xee\x8d\x15\x7f\xa7\x03\x0e\x36\xb2\x9c\x3d\xb7\x90\x05\x6a\xe7\xbe\x3a\xf9\xe0\x01\xa6\x5b\x56\xce\xeb\x8f\xbe\x28\xbc\x7f\x96\x87\x9f\xe2\xa7\xf8\xff\x01\x00\x00\xff\xff\xb1\x28\x85\x2a\x8a\x10\x00\x00") +var _prestate_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x9c\x57\xdd\x6f\xdb\x38\x12\x7f\xb6\xfe\x8a\x41\x5f\x6c\x5d\x5d\xb9\xcd\x02\x7b\x80\x73\x39\x40\x75\xdd\x36\x40\x36\x09\x6c\xe7\x72\xb9\xc5\x3e\x50\xe4\x48\xe6\x9a\x26\x05\x92\xb2\xe3\x2b\xf2\xbf\x1f\x86\xfa\xf0\x47\x93\xa6\x7b\x6f\x16\x39\xfc\xcd\xf7\x6f\xc6\xa3\x11\x4c\x4c\xb9\xb3\xb2\x58\x7a\x38\x7b\xff\xe1\xef\xb0\x58\x22\x14\xe6\x1d\xfa\x25\x5a\xac\xd6\x90\x56\x7e\x69\xac\x8b\x46\x23\x58\x2c\xa5\x83\x5c\x2a\x04\xe9\xa0\x64\xd6\x83\xc9\xc1\x9f\xc8\x2b\x99\x59\x66\x77\x49\x34\x1a\xd5\x6f\x9e\xbd\x26\x84\xdc\x22\x82\x33\xb9\xdf\x32\x8b\x63\xd8\x99\x0a\x38\xd3\x60\x51\x48\xe7\xad\xcc\x2a\x8f\x20\x3d\x30\x2d\x46\xc6\xc2\xda\x08\x99\xef\x08\x52\x7a\xa8\xb4\x40\x1b\x54\x7b\xb4\x6b\xd7\xda\xf1\xe5\xfa\x0e\xae\xd0\x39\xb4\xf0\x05\x35\x5a\xa6\xe0\xb6\xca\x94\xe4\x70\x25\x39\x6a\x87\xc0\x1c\x94\x74\xe2\x96\x28\x20\x0b\x70\xf4\xf0\x33\x99\x32\x6f\x4c\x81\xcf\xa6\xd2\x82\x79\x69\xf4\x10\x50\x92\xe5\xb0\x41\xeb\xa4\xd1\xf0\x4b\xab\xaa\x01\x1c\x82\xb1\x04\x32\x60\x9e\x1c\xb0\x60\x4a\x7a\x17\x03\xd3\x3b\x50\xcc\xef\x9f\xfe\x44\x40\xf6\x7e\x0b\x90\x3a\xa8\x59\x9a\x12\xc1\x2f\x99\x27\xaf\xb7\x52\x29\xc8\x10\x2a\x87\x79\xa5\x86\x84\x96\x55\x1e\xee\x2f\x17\x5f\x6f\xee\x16\x90\x5e\x3f\xc0\x7d\x3a\x9b\xa5\xd7\x8b\x87\x73\xd8\x4a\xbf\x34\x95\x07\xdc\x60\x0d\x25\xd7\xa5\x92\x28\x60\xcb\xac\x65\xda\xef\xc0\xe4\x84\xf0\xdb\x74\x36\xf9\x9a\x5e\x2f\xd2\x8f\x97\x57\x97\x8b\x07\x30\x16\x3e\x5f\x2e\xae\xa7\xf3\x39\x7c\xbe\x99\x41\x0a\xb7\xe9\x6c\x71\x39\xb9\xbb\x4a\x67\x70\x7b\x37\xbb\xbd\x99\x4f\x13\x98\x23\x59\x85\xf4\xfe\xf5\x98\xe7\x21\x7b\x16\x41\xa0\x67\x52\xb9\x36\x12\x0f\xa6\x02\xb7\x34\x95\x12\xb0\x64\x1b\x04\x8b\x1c\xe5\x06\x05\x30\xe0\xa6\xdc\xfd\x74\x52\x09\x8b\x29\xa3\x8b\xe0\xf3\x8b\x05\x09\x97\x39\x68\xe3\x87\xe0\x10\xe1\x1f\x4b\xef\xcb\xf1\x68\xb4\xdd\x6e\x93\x42\x57\x89\xb1\xc5\x48\xd5\x70\x6e\xf4\xcf\x24\x22\xcc\xd2\xa2\xf3\xcc\xe3\xc2\x32\x8e\x16\x4c\xe5\xcb\xca\x3b\x70\x55\x9e\x4b\x2e\x51\x7b\x90\x3a\x37\x76\x1d\x2a\x05\xbc\x01\x6e\x91\x79\x04\x06\xca\x70\xa6\x00\x1f\x91\x57\xe1\xae\x8e\x74\x28\x57\xcb\xb4\x63\x3c\x9c\xe6\xd6\xac\xc9\xd7\xca\x79\xfa\xe1\x1c\xae\x33\x85\x02\x0a\xd4\xe8\xa4\x83\x4c\x19\xbe\x4a\xa2\x6f\x51\xef\xc0\x18\xaa\x93\xe0\x61\x23\x14\x6a\x63\x8b\x7d\x8b\x90\x55\x52\x09\xa9\x8b\x24\xea\xb5\xd2\x63\xd0\x95\x52\xc3\x28\x40\x28\x63\x56\x55\x99\x72\x6e\xaa\x60\xfb\x9f\xc8\x7d\x0d\xe6\x4a\xe4\x32\xa7\xe2\x60\xdd\xad\x37\xe1\xaa\xd3\x6b\x32\x92\x4f\xa2\xde\x11\xcc\x18\xf2\x4a\x07\x77\x06\x4c\x08\x3b\x04\x91\xc5\xdf\xa2\x5e\x6f\xc3\x2c\x61\xc1\x05\x78\xf3\x15\x1f\xc3\x65\x7c\x1e\xf5\x7a\x32\x87\x81\x5f\x4a\x97\xb4\xc0\xbf\x33\xce\xff\x80\x8b\x8b\x8b\xd0\xd4\xb9\xd4\x28\x62\x20\x88\xde\x73\x62\xf5\x4d\x2f\x63\x8a\x69\x8e\x63\xe8\xbf\x7f\xec\xc3\x5b\x10\x59\x52\xa0\xff\x58\x9f\xd6\xca\x12\x6f\xe6\xde\x4a\x5d\x0c\x3e\xfc\x1a\x0f\xc3\x2b\x6d\xc2\x1b\x68\xc4\xaf\x4d\x27\x5c\xdf\x73\x23\xc2\x75\x63\x73\x2d\x35\x31\xa2\x11\x6a\xa4\x9c\x37\x96\x15\x38\x86\x6f\x4f\xf4\xfd\x44\x5e\x3d\x45\xbd\xa7\xa3\x28\xcf\x6b\xa1\x17\xa2\xdc\x40\x00\x6a\x6f\xbb\x3a\x2f\x24\x75\xea\x61\x02\x02\xde\x8f\x92\x30\x6f\x4d\x39\x49\xc2\x0a\x77\xaf\x67\x82\x2e\xa4\x78\xec\x2e\x56\xb8\x8b\xcf\xa3\x17\x53\x94\x34\x46\xff\x2e\xc5\xe3\xcf\xe6\xeb\xe4\xcd\x51\x5c\xe7\x24\xb5\xb7\x37\x8e\x4f\xe2\x68\xd1\x55\xca\x53\xb9\x4b\xbd\x31\x2b\x22\xae\x25\xc5\x47\xa9\x10\x12\x53\x52\xb6\x5c\xcd\x1c\x19\xa2\x06\xe9\xd1\x32\xa2\x4e\xb3\x41\x4b\x53\x03\x2c\xfa\xca\x6a\xd7\x85\x31\x97\x9a\xa9\x16\xb8\x89\xba\xb7\x8c\xd7\x3d\x53\x9f\x1f\xc4\x92\xfb\xc7\x10\xc5\xe0\xdd\x68\x04\xa9\x07\x72\x11\x4a\x23\xb5\x1f\xc2\x16\x41\x23\x0a\x6a\x7c\x81\xa2\xe2\x3e\xe0\xf5\x37\x4c\x55\xd8\xaf\x9b\x9b\x28\x32\x3c\x35\x15\x4d\x82\x83\xe6\x1f\x06\x03\xd7\x66\x13\x46\x5c\xc6\xf8\x0a\x9a\x86\x33\x56\x16\x52\x47\x4d\x38\x8f\x9a\x8d\x2c\x4a\x08\x38\x98\x15\x72\x45\x49\xa4\x93\x8f\x4c\xc1\x05\x64\xb2\xb8\xd4\xfe\x24\x79\x75\xd0\xdb\xa7\xf1\x1f\x49\xd3\x3c\x89\x23\xc2\x1b\x9c\xc5\x43\xf8\xf0\x6b\x57\x11\xde\x10\x14\xbc\x0e\xe6\xcd\xcb\x50\xd1\x69\x31\x3c\xff\x2c\xa8\xa1\x0e\x7e\x1b\xb4\x26\xae\xca\x28\x1d\xb5\x9f\x21\x8e\xc7\x5d\x7c\xfe\x03\xdc\x63\xdf\x5a\xdc\x26\x34\x09\x13\xe2\x10\x94\x3e\xc3\x77\xc1\xdc\x9d\x43\x01\x6f\x81\xbe\xa4\x26\x55\x4e\xf2\x2f\xcc\xc5\xf0\x37\x68\x24\x6e\xad\xe4\xdf\x59\x52\xe7\xf5\x13\x72\x8b\x6b\x1a\x05\x94\x3a\xce\x94\x42\xdb\x77\x10\x88\x66\xd8\xd4\x60\x48\x32\xae\x4b\xbf\x6b\x07\x84\x67\xb6\x40\xef\x5e\xf7\x26\xe0\xbc\x7b\xd7\xf2\x66\x88\xdf\xae\x44\xb8\xb8\x80\xfe\x64\x36\x4d\x17\xd3\x7e\xd3\x7b\xa3\x11\xdc\x63\x58\x9f\x32\x25\x33\xa1\x76\x20\x50\xa1\xc7\xda\x2e\xa3\x43\x5c\x3b\x1e\x19\xd2\x1e\x44\x1b\x0a\x3e\x4a\xe7\xa5\x2e\xa0\xa6\x97\x2d\x0d\xe3\x06\x2e\x34\x16\x67\x15\x85\xe7\x74\x72\x79\x43\x6b\x88\x45\x22\x23\x1a\x1a\xa1\x47\x99\x92\xdd\xda\x92\x4b\xeb\x3c\x94\x8a\x71\x4c\x08\xaf\x33\xe6\xe5\xa2\x68\xda\x9f\x54\xcf\x42\xdf\x06\xa0\xfd\x54\x64\x8a\xa6\x2a\xa9\x77\x30\x68\x31\xe2\xa8\xd7\xb3\xad\xf4\x01\xf6\xf9\x9e\x47\x9c\xc7\xf2\x90\x45\x68\x1b\xc1\x0d\x12\xef\x06\x0a\xa9\x27\x28\xe9\xfa\xd7\x6f\xcd\xc8\x46\x97\x44\x3d\x7a\x77\x40\x06\xca\x14\xc7\x64\x20\xea\xb0\xf0\xca\x5a\xca\x7f\xc7\xdb\x39\x11\xc3\x9f\x95\xf3\x14\x53\x4b\xe1\x69\x28\xe6\x39\x66\x0d\x3c\x4a\x23\x3a\xfe\x9e\x41\x69\xd8\x85\xe1\x42\xea\x9a\xd1\x56\xaf\x80\xa5\xf1\xa8\xbd\x64\x4a\xed\x28\x0f\x5b\x4b\xbb\x0f\x6d\x3b\x43\x70\x92\xa4\x02\x4d\x05\x51\xa9\xb9\xaa\x44\x5d\x06\xa1\xf8\x1b\x3c\x17\x6c\x3e\x5e\x9a\xd6\xe8\x1c\x2b\x30\xa1\x4a\xca\xe5\x63\xb3\x76\x6a\xe8\xd7\xcc\x38\x88\xfb\x49\x67\xe4\x31\x2f\x29\x53\x24\x6d\x91\x11\xb7\xa7\x42\x58\x74\x6e\x10\x37\x44\xd5\x65\xf6\x7e\x89\x9a\x82\x0f\x1a\xb7\xd0\xed\x33\x8c\x73\xda\xef\xc4\x10\x98\x10\xc4\x87\x27\xbb\x47\xd4\xeb\xb9\xad\xf4\x7c\x09\x41\x93\x29\xf7\xbd\x18\x37\xf5\xcf\x99\x43\x78\x33\xfd\xf7\x62\x72\xf3\x69\x3a\xb9\xb9\x7d\x78\x33\x86\xa3\xb3\xf9\xe5\x7f\xa6\xdd\xd9\xc7\xf4\x2a\xbd\x9e\x4c\xdf\x8c\xc3\x40\x7f\xc6\x21\x6f\x5a\x17\x48\xa1\xf3\x8c\xaf\x92\x12\x71\x35\x78\x7f\xcc\x03\x7b\x07\x7b\xbd\xcc\x22\x5b\x9d\xef\x8d\xa9\x1b\xb4\xd1\xd1\xf2\x34\x5c\xc0\x8b\xc1\x3a\x7f\xd9\x9a\x49\x23\x3f\x68\xd9\x7f\xbf\xbf\x04\xaa\x78\xdd\x8e\xb3\xbf\x6c\x48\xe8\x1d\xc6\x57\x63\x70\x4c\xd1\xda\x2c\xff\x4b\x7f\x77\xf2\xdc\xa1\x1f\x02\x6a\x61\xb6\xc4\x7c\x1d\x6a\x7d\xd3\xe0\x1e\x84\xec\x43\x5c\xd3\xee\x4d\x3e\x88\x3b\x61\x02\xfb\x5e\xf4\xec\x39\x51\xd4\x02\x2e\x5a\xf4\xb7\xe1\xe5\xeb\x81\x3a\x6b\x22\x75\xa2\xe0\x97\x93\xb5\x30\xdc\xaf\x71\x6d\xec\xae\x99\x61\x07\xfe\xfd\x38\xaa\xe9\xd5\x55\x57\x4f\xf4\x41\x45\xd6\x1d\x7c\x9a\x5e\x4d\xbf\xa4\x8b\xe9\x91\xd4\x7c\x91\x2e\x2e\x27\xf5\xd1\x5f\x2e\xbc\x0f\x3f\x5d\x78\xfd\xf9\x7c\x71\x33\x9b\xf6\xc7\xcd\xd7\xd5\x4d\xfa\xa9\xff\x9d\xc2\x66\x75\xfc\x51\xeb\x7a\x73\x6f\xac\xf8\x7f\x3a\xe0\x60\x8d\xcb\xd9\x73\x5b\x5c\xa0\x76\xee\xab\x93\x7f\x49\xc0\x74\xcb\xca\x79\xfd\x4f\xb1\x17\xde\x3f\xcb\xc3\x4f\xd1\x53\xf4\xbf\x00\x00\x00\xff\xff\x3a\xb7\x37\x41\xbf\x10\x00\x00") func prestate_tracerJsBytes() ([]byte, error) { return bindataRead( @@ -213,7 +213,7 @@ func prestate_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "prestate_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe9, 0x79, 0x70, 0x4f, 0xc5, 0x78, 0x57, 0x63, 0x6f, 0x5, 0x31, 0xce, 0x3e, 0x5d, 0xbd, 0x71, 0x4, 0x46, 0x78, 0xcd, 0x1d, 0xcd, 0xb9, 0xd8, 0x10, 0xff, 0xe6, 0xc5, 0x59, 0xb9, 0x25, 0x6e}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xd4, 0x9, 0xf9, 0x44, 0x13, 0x31, 0x89, 0xf7, 0x35, 0x9a, 0xc6, 0xf0, 0x86, 0x9d, 0xb2, 0xe3, 0x57, 0xe2, 0xc0, 0xde, 0xc9, 0x3a, 0x4c, 0x4a, 0x94, 0x90, 0xa5, 0x92, 0x2f, 0xbf, 0xc0, 0xb8}} return a, nil } @@ -237,7 +237,7 @@ func trigram_tracerJs() (*asset, error) { return a, nil } -var _unigram_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x54\x4d\x6f\xdb\x46\x10\xbd\xeb\x57\xbc\xa3\x8c\xa8\xa4\xd3\x5e\x0a\xa5\x09\xc0\x1a\x76\x22\xc0\x91\x0d\x89\x6e\x60\x14\x3d\x2c\xc9\x21\xb9\xe8\x6a\x87\xd8\x9d\x95\x42\x04\xfa\xef\xc5\x92\xa2\xe5\x1a\x6e\x13\x9e\x04\xcd\xbc\x8f\x79\x33\x64\x9a\xe2\x8a\xbb\xde\xe9\xa6\x15\xfc\x7c\xf9\xf6\x57\xe4\x2d\xa1\xe1\x9f\x48\x5a\x72\x14\x76\xc8\x82\xb4\xec\xfc\x2c\x4d\x91\xb7\xda\xa3\xd6\x86\xa0\x3d\x3a\xe5\x04\x5c\x43\x5e\xf4\x1b\x5d\x38\xe5\xfa\x64\x96\xa6\x23\xe6\xd5\x72\x64\xa8\x1d\x11\x3c\xd7\x72\x50\x8e\x96\xe8\x39\xa0\x54\x16\x8e\x2a\xed\xc5\xe9\x22\x08\x41\x0b\x94\xad\x52\x76\xd8\x71\xa5\xeb\x3e\x52\x6a\x41\xb0\x15\xb9\x41\x5a\xc8\xed\xfc\xe4\xe3\xe3\xfa\x01\xb7\xe4\x3d\x39\x7c\x24\x4b\x4e\x19\xdc\x87\xc2\xe8\x12\xb7\xba\x24\xeb\x09\xca\xa3\x8b\xff\xf8\x96\x2a\x14\x03\x5d\x04\xde\x44\x2b\xdb\x93\x15\xdc\x70\xb0\x95\x12\xcd\x76\x01\xd2\xd1\x39\xf6\xe4\xbc\x66\x8b\x5f\x26\xa9\x13\xe1\x02\xec\x22\xc9\x5c\x49\x1c\xc0\x81\xbb\x88\xbb\x80\xb2\x3d\x8c\x92\x33\xf4\x07\x02\x39\xcf\x5d\x41\xdb\x41\xa6\xe5\x8e\x20\xad\x92\x38\xf5\x41\x1b\x83\x82\x10\x3c\xd5\xc1\x2c\x22\x5b\x11\x04\x5f\x56\xf9\xa7\xbb\x87\x1c\xd9\xfa\x11\x5f\xb2\xcd\x26\x5b\xe7\x8f\xef\x70\xd0\xd2\x72\x10\xd0\x9e\x46\x2a\xbd\xeb\x8c\xa6\x0a\x07\xe5\x9c\xb2\xd2\x83\xeb\xc8\xf0\xf9\x7a\x73\xf5\x29\x5b\xe7\xd9\xef\xab\xdb\x55\xfe\x08\x76\xb8\x59\xe5\xeb\xeb\xed\x16\x37\x77\x1b\x64\xb8\xcf\x36\xf9\xea\xea\xe1\x36\xdb\xe0\xfe\x61\x73\x7f\xb7\xbd\x4e\xb0\xa5\xe8\x8a\x22\xfe\xfb\x99\xd7\xc3\xf6\x1c\xa1\x22\x51\xda\xf8\x29\x89\x47\x0e\xf0\x2d\x07\x53\xa1\x55\x7b\x82\xa3\x92\xf4\x9e\x2a\x28\x94\xdc\xf5\x3f\xbc\xd4\xc8\xa5\x0c\xdb\x66\x98\xf9\x3f\x0f\x12\xab\x1a\x96\x65\x01\x4f\x84\xdf\x5a\x91\x6e\x99\xa6\x87\xc3\x21\x69\x6c\x48\xd8\x35\xa9\x19\xe9\x7c\xfa\x21\x99\xcd\xbe\xcd\x00\x20\x4d\xd1\x6a\x2f\x71\x39\x91\x76\xa7\xba\xe8\x8a\xbb\x92\x2b\xf2\x10\x46\xc9\xc1\x0a\x39\x3f\x74\xc7\xd6\x25\xbe\x1d\x17\x13\xd6\x72\xe7\xc7\x16\x0f\x1b\x76\x05\xb9\x11\x3e\xb6\xc7\xea\x12\x97\x4f\xdd\x5e\xa8\x8b\x4a\xda\xee\xf9\x6f\xaa\x86\xdc\x68\x4f\xae\x3f\x09\x8e\x77\x10\x7d\xfc\xf1\x19\xf4\x95\xca\x20\xe4\x93\x01\x1d\xa1\x4b\xd4\xc1\x96\xf1\xfa\xe6\x86\x9b\x05\xaa\xe2\x02\xe3\x14\xf1\xd9\xab\x78\x9b\x78\x0f\xc3\x4d\xc2\x5d\x22\xbc\x15\xa7\x6d\x33\xbf\x78\xf7\xd4\xa3\x6b\xcc\xa5\xd5\x3e\x89\x83\xfc\xc9\xdd\x5f\x17\x67\x7c\x7c\xfe\x55\x7b\xf3\xe6\x0c\x3c\x3e\xfd\x22\xe3\x09\xff\x83\xc2\x7b\xbc\x7d\x0d\x37\x34\xc5\x40\x26\xda\x73\x88\xb5\x0a\x46\x9e\xe7\x72\x68\x4f\x17\xad\x4a\x09\xca\x9c\xa2\x88\x6f\x27\xd7\x50\x76\x4a\xab\x1e\x6f\x2d\xb2\x0c\x14\xaf\xe6\x73\x5c\xcc\x26\x1d\x47\xfe\x35\x21\x65\xcc\x20\x36\x2d\x7d\x38\xd5\x82\xc8\x42\x0b\x39\x15\xdf\x55\xde\x93\x8b\x9f\x29\x38\x92\xe0\xac\x9f\x18\x23\xac\xd6\x56\x99\x89\xfb\x74\xd1\xe2\x54\xa9\x6d\x33\x7a\x1b\x4b\xcf\xcc\x95\xf2\xf5\xf9\xe2\x74\x3d\x7f\x0a\x07\x1f\x70\xf9\x62\x27\xa3\xe4\x39\xe4\x97\xe1\x1e\x17\xb3\xe3\xec\x9f\x00\x00\x00\xff\xff\x8d\xba\x8d\xa8\xe6\x05\x00\x00") +var _unigram_tracerJs = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x8c\x94\x41\x6f\xdb\xc6\x13\xc5\xef\xfa\x14\xef\x68\x23\xfa\x8b\xc9\xbf\x97\x42\x69\x0a\xb0\x86\x9d\x08\x70\x64\x43\xa2\x1b\x18\x45\x0f\x4b\x72\x48\x2e\xba\xda\x21\x76\x67\xa5\x08\x81\xbf\x7b\x31\xa4\x68\xb9\x85\xdb\x86\x27\x41\x3b\xef\x37\x6f\xde\x0e\x99\x65\xb8\xe2\xfe\x18\x6c\xdb\x09\xfe\xff\xf6\xdd\x8f\x28\x3a\x42\xcb\xff\x23\xe9\x28\x50\xda\x21\x4f\xd2\x71\x88\xb3\x2c\x43\xd1\xd9\x88\xc6\x3a\x82\x8d\xe8\x4d\x10\x70\x03\xf9\x5b\xbd\xb3\x65\x30\xe1\xb8\x98\x65\xd9\xa8\x79\xf5\x58\x09\x4d\x20\x42\xe4\x46\x0e\x26\xd0\x12\x47\x4e\xa8\x8c\x47\xa0\xda\x46\x09\xb6\x4c\x42\xb0\x02\xe3\xeb\x8c\x03\x76\x5c\xdb\xe6\xa8\x48\x2b\x48\xbe\xa6\x30\xb4\x16\x0a\xbb\x38\xf9\xf8\xb8\x7e\xc0\x2d\xc5\x48\x01\x1f\xc9\x53\x30\x0e\xf7\xa9\x74\xb6\xc2\xad\xad\xc8\x47\x82\x89\xe8\xf5\x9f\xd8\x51\x8d\x72\xc0\xa9\xf0\x46\xad\x6c\x4f\x56\x70\xc3\xc9\xd7\x46\x2c\xfb\x39\xc8\xaa\x73\xec\x29\x44\xcb\x1e\x3f\x4c\xad\x4e\xc0\x39\x38\x28\xe4\xc2\x88\x0e\x10\xc0\xbd\xea\x2e\x61\xfc\x11\xce\xc8\x59\xfa\x1d\x81\x9c\xe7\xae\x61\xfd\xd0\xa6\xe3\x9e\x20\x9d\x11\x9d\xfa\x60\x9d\x43\x49\x48\x91\x9a\xe4\xe6\x4a\x2b\x93\xe0\xcb\xaa\xf8\x74\xf7\x50\x20\x5f\x3f\xe2\x4b\xbe\xd9\xe4\xeb\xe2\xf1\x3d\x0e\x56\x3a\x4e\x02\xda\xd3\x88\xb2\xbb\xde\x59\xaa\x71\x30\x21\x18\x2f\x47\x70\xa3\x84\xcf\xd7\x9b\xab\x4f\xf9\xba\xc8\x7f\x59\xdd\xae\x8a\x47\x70\xc0\xcd\xaa\x58\x5f\x6f\xb7\xb8\xb9\xdb\x20\xc7\x7d\xbe\x29\x56\x57\x0f\xb7\xf9\x06\xf7\x0f\x9b\xfb\xbb\xed\xf5\x02\x5b\x52\x57\xa4\xfa\xff\xce\xbc\x19\x6e\x2f\x10\x6a\x12\x63\x5d\x9c\x92\x78\xe4\x84\xd8\x71\x72\x35\x3a\xb3\x27\x04\xaa\xc8\xee\xa9\x86\x41\xc5\xfd\xf1\xbb\x2f\x55\x59\xc6\xb1\x6f\x87\x99\xff\x71\x21\xb1\x6a\xe0\x59\xe6\x88\x44\xf8\xa9\x13\xe9\x97\x59\x76\x38\x1c\x16\xad\x4f\x0b\x0e\x6d\xe6\x46\x5c\xcc\x7e\x5e\xcc\x66\xdf\x66\x00\x90\x65\xe8\x6c\x14\xbd\x1c\xc5\xee\x4c\xaf\xae\xb8\xaf\xb8\xa6\x08\x61\x54\x9c\xbc\x50\x88\x43\xb5\x96\x2e\xf1\xed\x69\x3e\x69\x3d\xf7\x71\x2c\x89\xf0\x69\x57\x52\x18\xe5\x63\xb9\x9e\x2e\xf1\xf6\xb9\x3a\x0a\xf5\xda\xc9\xfa\x3d\xff\x41\xf5\x90\x1b\xed\x29\x1c\x4f\x0d\xc7\x3d\x50\x1f\xbf\x7e\x06\x7d\xa5\x2a\x09\xc5\xc5\xa0\x56\xe9\x12\x4d\xf2\x95\x6e\xdf\x85\xe3\x76\x8e\xba\xbc\xc4\x38\x85\x3e\x7b\xa3\xbb\x89\x0f\x70\xdc\x2e\xb8\x5f\x08\x6f\x25\x58\xdf\x5e\x5c\xbe\x7f\xae\xb1\x0d\x2e\xa4\xb3\x71\xa1\x83\xfc\xc6\xfd\xef\x97\x67\xbd\x3e\x7f\x39\x7b\xf3\xe6\x2c\x7c\x7a\xfe\x45\x2e\x12\xfe\x45\x85\x0f\x78\xf7\x9a\x6e\x28\xd2\x40\x26\xec\x39\xc4\xc6\x24\x27\x2f\x73\x39\x74\xa7\x8d\x36\x95\x24\xe3\x4e\x51\xe8\xdb\xc9\x0d\x8c\x9f\xd2\x6a\xc6\x5d\x53\xca\x80\x78\x35\x9f\xa7\xf9\x6c\xea\x13\x28\xbe\xd6\xc8\x38\x37\x34\x9b\x2e\x7d\x58\xd5\x92\xc8\xc3\x0a\x05\xa3\xef\x2a\xef\x29\xe8\x67\x0a\x81\x24\x05\x1f\x27\xa2\xca\x1a\xeb\x8d\x9b\xd8\xa7\x8d\x96\x60\x2a\xeb\xdb\xd1\xdb\x78\xf4\xc2\x5c\x25\x5f\x5f\x5e\xdc\xc8\x3c\xa7\xf8\x1c\xcf\xd3\xec\xcf\x00\x00\x00\xff\xff\xf1\x91\x30\xae\xbd\x05\x00\x00") func unigram_tracerJsBytes() ([]byte, error) { return bindataRead( @@ -253,7 +253,7 @@ func unigram_tracerJs() (*asset, error) { } info := bindataFileInfo{name: "unigram_tracer.js", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x2f, 0x36, 0x14, 0xc2, 0xf6, 0xc3, 0x80, 0x2b, 0x4a, 0x11, 0x7d, 0xd5, 0x3e, 0xef, 0x23, 0xb5, 0xd6, 0xe6, 0xe6, 0x5, 0x41, 0xf6, 0x14, 0x7a, 0x39, 0xf7, 0xf8, 0xac, 0x89, 0x8e, 0x43, 0xe6}} + a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xc, 0xe6, 0x5c, 0x88, 0x18, 0xa7, 0x85, 0x61, 0x18, 0xc6, 0xec, 0x17, 0xfc, 0xdf, 0x9d, 0xc0, 0x1b, 0x49, 0xf8, 0x8d, 0xf1, 0xeb, 0x35, 0xf3, 0xd, 0x3e, 0xf6, 0xa3, 0xac, 0x8c, 0xba, 0x74}} return a, nil } @@ -348,25 +348,20 @@ func AssetNames() []string { // _bindata is a table, holding each asset generator, mapped to its name. var _bindata = map[string]func() (*asset, error){ - "4byte_tracer.js": _4byte_tracerJs, - - "bigram_tracer.js": bigram_tracerJs, - - "call_tracer.js": call_tracerJs, - - "evmdis_tracer.js": evmdis_tracerJs, - - "noop_tracer.js": noop_tracerJs, - - "opcount_tracer.js": opcount_tracerJs, - + "4byte_tracer.js": _4byte_tracerJs, + "bigram_tracer.js": bigram_tracerJs, + "call_tracer.js": call_tracerJs, + "evmdis_tracer.js": evmdis_tracerJs, + "noop_tracer.js": noop_tracerJs, + "opcount_tracer.js": opcount_tracerJs, "prestate_tracer.js": prestate_tracerJs, - - "trigram_tracer.js": trigram_tracerJs, - - "unigram_tracer.js": unigram_tracerJs, + "trigram_tracer.js": trigram_tracerJs, + "unigram_tracer.js": unigram_tracerJs, } +// AssetDebug is true if the assets were built with the debug flag enabled. +const AssetDebug = false + // AssetDir returns the file names below a certain // directory embedded in the file by go-bindata. // For example if you run go-bindata on data/... and data contains the diff --git a/eth/tracers/internal/tracers/prestate_tracer.js b/eth/tracers/internal/tracers/prestate_tracer.js index e0a22bf157..084c04ec46 100644 --- a/eth/tracers/internal/tracers/prestate_tracer.js +++ b/eth/tracers/internal/tracers/prestate_tracer.js @@ -55,7 +55,7 @@ var toBal = bigInt(this.prestate[toHex(ctx.to)].balance.slice(2), 16); this.prestate[toHex(ctx.to)].balance = '0x'+toBal.subtract(ctx.value).toString(16); - this.prestate[toHex(ctx.from)].balance = '0x'+fromBal.add(ctx.value).toString(16); + this.prestate[toHex(ctx.from)].balance = '0x'+fromBal.add(ctx.value).add((ctx.gasUsed + ctx.intrinsicGas) * ctx.gasPrice).toString(16); // Decrement the caller's nonce, and remove empty create targets this.prestate[toHex(ctx.from)].nonce--; diff --git a/eth/tracers/internal/tracers/unigram_tracer.js b/eth/tracers/internal/tracers/unigram_tracer.js index 000fb13b1e..51107d8f3d 100644 --- a/eth/tracers/internal/tracers/unigram_tracer.js +++ b/eth/tracers/internal/tracers/unigram_tracer.js @@ -36,8 +36,6 @@ // result is invoked when all the opcodes have been iterated over and returns // the final result of the tracing. result: function(ctx) { - if(this.nops > 0){ - return this.hist; - } + return this.hist; }, } diff --git a/eth/tracers/tracer.go b/eth/tracers/tracer.go index 050fb05159..80775caa8e 100644 --- a/eth/tracers/tracer.go +++ b/eth/tracers/tracer.go @@ -27,10 +27,11 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - duktape "gopkg.in/olebedev/go-duktape.v3" + "gopkg.in/olebedev/go-duktape.v3" ) // bigIntegerJS is the minified version of https://github.com/peterolson/BigInteger.js. @@ -316,7 +317,7 @@ type Tracer struct { // New instantiates a new tracer instance. code specifies a Javascript snippet, // which must evaluate to an expression returning an object with 'step', 'fault' // and 'result' functions. -func New(code string) (*Tracer, error) { +func New(code string, txCtx vm.TxContext) (*Tracer, error) { // Resolve any tracers by name and assemble the tracer object if tracer, ok := tracer(code); ok { code = tracer @@ -335,6 +336,8 @@ func New(code string) (*Tracer, error) { depthValue: new(uint), refundValue: new(uint), } + tracer.ctx["gasPrice"] = txCtx.GasPrice + // Set up builtins for this environment tracer.vm.PushGlobalGoFunction("toHex", func(ctx *duktape.Context) int { ctx.PushString(hexutil.Encode(popSlice(ctx))) @@ -545,7 +548,19 @@ func (jst *Tracer) CaptureState(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost if jst.err == nil { // Initialize the context if it wasn't done yet if !jst.inited { - jst.ctx["block"] = env.BlockNumber.Uint64() + jst.ctx["block"] = env.Context.BlockNumber.Uint64() + // Compute intrinsic gas + isHomestead := env.ChainConfig().IsHomestead(env.Context.BlockNumber) + isIstanbul := env.ChainConfig().IsIstanbul(env.Context.BlockNumber) + var input []byte + if data, ok := jst.ctx["input"].([]byte); ok { + input = data + } + intrinsicGas, err := core.IntrinsicGas(input, nil, jst.ctx["type"] == "CREATE", isHomestead, isIstanbul) + if err != nil { + return err + } + jst.ctx["intrinsicGas"] = intrinsicGas jst.inited = true } // If tracing was interrupted, set the error and stop @@ -597,8 +612,8 @@ func (jst *Tracer) CaptureFault(env *vm.EVM, pc uint64, op vm.OpCode, gas, cost // CaptureEnd is called after the call finishes to finalize the tracing. func (jst *Tracer) CaptureEnd(output []byte, gasUsed uint64, t time.Duration, err error) error { jst.ctx["output"] = output - jst.ctx["gasUsed"] = gasUsed jst.ctx["time"] = t.String() + jst.ctx["gasUsed"] = gasUsed if err != nil { jst.ctx["error"] = err.Error() diff --git a/eth/tracers/tracer_test.go b/eth/tracers/tracer_test.go index b4de998651..f28e14864b 100644 --- a/eth/tracers/tracer_test.go +++ b/eth/tracers/tracer_test.go @@ -17,7 +17,6 @@ package tracers import ( - "bytes" "encoding/json" "errors" "math/big" @@ -50,94 +49,77 @@ type dummyStatedb struct { func (*dummyStatedb) GetRefund() uint64 { return 1337 } -func runTrace(tracer *Tracer) (json.RawMessage, error) { - env := vm.NewEVM(vm.Context{BlockNumber: big.NewInt(1)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) - - contract := vm.NewContract(account{}, account{}, big.NewInt(0), 10000) - contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0} - - _, err := env.Interpreter().Run(contract, []byte{}, false) - if err != nil { - return nil, err - } - return tracer.GetResult() -} - -// TestRegressionPanicSlice tests that we don't panic on bad arguments to memory access -func TestRegressionPanicSlice(t *testing.T) { - tracer, err := New("{depths: [], step: function(log) { this.depths.push(log.memory.slice(-1,-2)); }, fault: function() {}, result: function() { return this.depths; }}") - if err != nil { - t.Fatal(err) - } - if _, err = runTrace(tracer); err != nil { - t.Fatal(err) - } -} - -// TestRegressionPanicSlice tests that we don't panic on bad arguments to stack peeks -func TestRegressionPanicPeek(t *testing.T) { - tracer, err := New("{depths: [], step: function(log) { this.depths.push(log.stack.peek(-1)); }, fault: function() {}, result: function() { return this.depths; }}") - if err != nil { - t.Fatal(err) - } - if _, err = runTrace(tracer); err != nil { - t.Fatal(err) - } -} - -// TestRegressionPanicSlice tests that we don't panic on bad arguments to memory getUint -func TestRegressionPanicGetUint(t *testing.T) { - tracer, err := New("{ depths: [], step: function(log, db) { this.depths.push(log.memory.getUint(-64));}, fault: function() {}, result: function() { return this.depths; }}") - if err != nil { - t.Fatal(err) - } - if _, err = runTrace(tracer); err != nil { - t.Fatal(err) - } +type vmContext struct { + blockCtx vm.BlockContext + txCtx vm.TxContext } -func TestTracing(t *testing.T) { - tracer, err := New("{count: 0, step: function() { this.count += 1; }, fault: function() {}, result: function() { return this.count; }}") - if err != nil { - t.Fatal(err) - } - - ret, err := runTrace(tracer) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(ret, []byte("3")) { - t.Errorf("Expected return value to be 3, got %s", string(ret)) - } +func testCtx() *vmContext { + return &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}} } -func TestStack(t *testing.T) { - tracer, err := New("{depths: [], step: function(log) { this.depths.push(log.stack.length()); }, fault: function() {}, result: function() { return this.depths; }}") - if err != nil { - t.Fatal(err) - } +func runTrace(tracer *Tracer, vmctx *vmContext) (json.RawMessage, error) { + env := vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) + var ( + startGas uint64 = 10000 + value = big.NewInt(0) + ) + contract := vm.NewContract(account{}, account{}, value, startGas) + contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0} - ret, err := runTrace(tracer) + tracer.CaptureStart(contract.Caller(), contract.Address(), false, []byte{}, startGas, value) + ret, err := env.Interpreter().Run(contract, []byte{}, false) + tracer.CaptureEnd(ret, startGas-contract.Gas, 1, err) if err != nil { - t.Fatal(err) - } - if !bytes.Equal(ret, []byte("[0,1,2]")) { - t.Errorf("Expected return value to be [0,1,2], got %s", string(ret)) + return nil, err } + return tracer.GetResult() } -func TestOpcodes(t *testing.T) { - tracer, err := New("{opcodes: [], step: function(log) { this.opcodes.push(log.op.toString()); }, fault: function() {}, result: function() { return this.opcodes; }}") - if err != nil { - t.Fatal(err) - } - - ret, err := runTrace(tracer) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(ret, []byte("[\"PUSH1\",\"PUSH1\",\"STOP\"]")) { - t.Errorf("Expected return value to be [\"PUSH1\",\"PUSH1\",\"STOP\"], got %s", string(ret)) +func TestTracer(t *testing.T) { + execTracer := func(code string) []byte { + t.Helper() + ctx := &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}} + tracer, err := New(code, ctx.txCtx) + if err != nil { + t.Fatal(err) + } + ret, err := runTrace(tracer, ctx) + if err != nil { + t.Fatal(err) + } + return ret + } + for i, tt := range []struct { + code string + want string + }{ + { // tests that we don't panic on bad arguments to memory access + code: "{depths: [], step: function(log) { this.depths.push(log.memory.slice(-1,-2)); }, fault: function() {}, result: function() { return this.depths; }}", + want: `[{},{},{}]`, + }, { // tests that we don't panic on bad arguments to stack peeks + code: "{depths: [], step: function(log) { this.depths.push(log.stack.peek(-1)); }, fault: function() {}, result: function() { return this.depths; }}", + want: `["0","0","0"]`, + }, { // tests that we don't panic on bad arguments to memory getUint + code: "{ depths: [], step: function(log, db) { this.depths.push(log.memory.getUint(-64));}, fault: function() {}, result: function() { return this.depths; }}", + want: `["0","0","0"]`, + }, { // tests some general counting + code: "{count: 0, step: function() { this.count += 1; }, fault: function() {}, result: function() { return this.count; }}", + want: `3`, + }, { // tests that depth is reported correctly + code: "{depths: [], step: function(log) { this.depths.push(log.stack.length()); }, fault: function() {}, result: function() { return this.depths; }}", + want: `[0,1,2]`, + }, { // tests to-string of opcodes + code: "{opcodes: [], step: function(log) { this.opcodes.push(log.op.toString()); }, fault: function() {}, result: function() { return this.opcodes; }}", + want: `["PUSH1","PUSH1","STOP"]`, + }, { // tests intrinsic gas + code: "{depths: [], step: function() {}, fault: function() {}, result: function(ctx) { return ctx.gasPrice+'.'+ctx.gasUsed+'.'+ctx.intrinsicGas; }}", + want: `"100000.6.21000"`, + }, + } { + if have := execTracer(tt.code); tt.want != string(have) { + t.Errorf("testcase %d: expected return value to be %s got %s\n\tcode: %v", i, tt.want, string(have), tt.code) + } } } @@ -145,7 +127,8 @@ func TestHalt(t *testing.T) { t.Skip("duktape doesn't support abortion") timeout := errors.New("stahp") - tracer, err := New("{step: function() { while(1); }, result: function() { return null; }}") + vmctx := testCtx() + tracer, err := New("{step: function() { while(1); }, result: function() { return null; }}", vmctx.txCtx) if err != nil { t.Fatal(err) } @@ -155,18 +138,18 @@ func TestHalt(t *testing.T) { tracer.Stop(timeout) }() - if _, err = runTrace(tracer); err.Error() != "stahp in server-side tracer function 'step'" { + if _, err = runTrace(tracer, vmctx); err.Error() != "stahp in server-side tracer function 'step'" { t.Errorf("Expected timeout error, got %v", err) } } func TestHaltBetweenSteps(t *testing.T) { - tracer, err := New("{step: function() {}, fault: function() {}, result: function() { return null; }}") + vmctx := testCtx() + tracer, err := New("{step: function() {}, fault: function() {}, result: function() { return null; }}", vmctx.txCtx) if err != nil { t.Fatal(err) } - - env := vm.NewEVM(vm.Context{BlockNumber: big.NewInt(1)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) + env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) contract := vm.NewContract(&account{}, &account{}, big.NewInt(0), 0) tracer.CaptureState(env, 0, 0, 0, 0, nil, nil, nil, nil, contract, 0, nil) diff --git a/eth/tracers/tracers_test.go b/eth/tracers/tracers_test.go index 18f8eb12aa..9dc4c69631 100644 --- a/eth/tracers/tracers_test.go +++ b/eth/tracers/tracers_test.go @@ -143,16 +143,18 @@ func TestPrestateTracerCreate2(t *testing.T) { result: 0x60f3f640a8508fC6a86d45DF051962668E1e8AC7 */ origin, _ := signer.Sender(tx) - context := vm.Context{ + txContext := vm.TxContext{ + Origin: origin, + GasPrice: big.NewInt(1), + } + context := vm.BlockContext{ CanTransfer: core.CanTransfer, Transfer: core.Transfer, - Origin: origin, Coinbase: common.Address{}, BlockNumber: new(big.Int).SetUint64(8000000), Time: new(big.Int).SetUint64(5), Difficulty: big.NewInt(0x30000), GasLimit: uint64(6000000), - GasPrice: big.NewInt(1), } alloc := core.GenesisAlloc{} @@ -171,11 +173,11 @@ func TestPrestateTracerCreate2(t *testing.T) { _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), alloc, false) // Create the tracer, the EVM environment and run it - tracer, err := New("prestateTracer") + tracer, err := New("prestateTracer", txContext) if err != nil { t.Fatalf("failed to create call tracer: %v", err) } - evm := vm.NewEVM(context, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer}) + evm := vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vm.Config{Debug: true, Tracer: tracer}) msg, err := tx.AsMessage(signer) if err != nil { @@ -230,26 +232,27 @@ func TestCallTracer(t *testing.T) { } signer := types.MakeSigner(test.Genesis.Config, new(big.Int).SetUint64(uint64(test.Context.Number))) origin, _ := signer.Sender(tx) - - context := vm.Context{ + txContext := vm.TxContext{ + Origin: origin, + GasPrice: tx.GasPrice(), + } + context := vm.BlockContext{ CanTransfer: core.CanTransfer, Transfer: core.Transfer, - Origin: origin, Coinbase: test.Context.Miner, BlockNumber: new(big.Int).SetUint64(uint64(test.Context.Number)), Time: new(big.Int).SetUint64(uint64(test.Context.Time)), Difficulty: (*big.Int)(test.Context.Difficulty), GasLimit: uint64(test.Context.GasLimit), - GasPrice: tx.GasPrice(), } _, statedb := tests.MakePreState(rawdb.NewMemoryDatabase(), test.Genesis.Alloc, false) // Create the tracer, the EVM environment and run it - tracer, err := New("callTracer") + tracer, err := New("callTracer", txContext) if err != nil { t.Fatalf("failed to create call tracer: %v", err) } - evm := vm.NewEVM(context, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) + evm := vm.NewEVM(context, txContext, statedb, test.Genesis.Config, vm.Config{Debug: true, Tracer: tracer}) msg, err := tx.AsMessage(signer) if err != nil { diff --git a/ethclient/ethclient.go b/ethclient/ethclient.go index 8dc34a835e..a17696356c 100644 --- a/ethclient/ethclient.go +++ b/ethclient/ethclient.go @@ -28,7 +28,6 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" ) @@ -521,7 +520,7 @@ func (ec *Client) EstimateGas(ctx context.Context, msg ethereum.CallMsg) (uint64 // If the transaction was a contract creation use the TransactionReceipt method to get the // contract address after the transaction has been mined. func (ec *Client) SendTransaction(ctx context.Context, tx *types.Transaction) error { - data, err := rlp.EncodeToBytes(tx) + data, err := tx.MarshalBinary() if err != nil { return err } diff --git a/ethclient/ethclient_test.go b/ethclient/ethclient_test.go index 0ca72c6ee7..9a5a45e34f 100644 --- a/ethclient/ethclient_test.go +++ b/ethclient/ethclient_test.go @@ -17,6 +17,7 @@ package ethclient import ( + "bytes" "context" "errors" "fmt" @@ -33,8 +34,10 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rpc" ) // Verify that Client implements the ethereum interfaces. @@ -193,7 +196,7 @@ func newTestBackend(t *testing.T) (*node.Node, []*types.Block) { t.Fatalf("can't create new node: %v", err) } // Create Ethereum Service - config := ð.Config{Genesis: genesis} + config := ðconfig.Config{Genesis: genesis} config.Ethash.PowMode = ethash.ModeFake ethservice, err := eth.New(n, config) if err != nil { @@ -229,12 +232,48 @@ func generateTestChain() (*core.Genesis, []*types.Block) { return genesis, blocks } -func TestHeader(t *testing.T) { +func TestEthClient(t *testing.T) { backend, chain := newTestBackend(t) client, _ := backend.Attach() defer backend.Close() defer client.Close() + tests := map[string]struct { + test func(t *testing.T) + }{ + "TestHeader": { + func(t *testing.T) { testHeader(t, chain, client) }, + }, + "TestBalanceAt": { + func(t *testing.T) { testBalanceAt(t, client) }, + }, + "TestTxInBlockInterrupted": { + func(t *testing.T) { testTransactionInBlockInterrupted(t, client) }, + }, + "TestChainID": { + func(t *testing.T) { testChainID(t, client) }, + }, + "TestGetBlock": { + func(t *testing.T) { testGetBlock(t, client) }, + }, + "TestStatusFunctions": { + func(t *testing.T) { testStatusFunctions(t, client) }, + }, + "TestCallContract": { + func(t *testing.T) { testCallContract(t, client) }, + }, + "TestAtFunctions": { + func(t *testing.T) { testAtFunctions(t, client) }, + }, + } + + t.Parallel() + for name, tt := range tests { + t.Run(name, tt.test) + } +} + +func testHeader(t *testing.T, chain []*types.Block, client *rpc.Client) { tests := map[string]struct { block *big.Int want *types.Header @@ -273,12 +312,7 @@ func TestHeader(t *testing.T) { } } -func TestBalanceAt(t *testing.T) { - backend, _ := newTestBackend(t) - client, _ := backend.Attach() - defer backend.Close() - defer client.Close() - +func testBalanceAt(t *testing.T, client *rpc.Client) { tests := map[string]struct { account common.Address block *big.Int @@ -319,31 +353,32 @@ func TestBalanceAt(t *testing.T) { } } -func TestTransactionInBlockInterrupted(t *testing.T) { - backend, _ := newTestBackend(t) - client, _ := backend.Attach() - defer backend.Close() - defer client.Close() - +func testTransactionInBlockInterrupted(t *testing.T, client *rpc.Client) { ec := NewClient(client) + + // Get current block by number + block, err := ec.BlockByNumber(context.Background(), nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + // Test tx in block interupted ctx, cancel := context.WithCancel(context.Background()) cancel() - tx, err := ec.TransactionInBlock(ctx, common.Hash{1}, 1) + tx, err := ec.TransactionInBlock(ctx, block.Hash(), 1) if tx != nil { t.Fatal("transaction should be nil") } - if err == nil { - t.Fatal("error should not be nil") + if err == nil || err == ethereum.NotFound { + t.Fatal("error should not be nil/notfound") + } + // Test tx in block not found + if _, err := ec.TransactionInBlock(context.Background(), block.Hash(), 1); err != ethereum.NotFound { + t.Fatal("error should be ethereum.NotFound") } } -func TestChainID(t *testing.T) { - backend, _ := newTestBackend(t) - client, _ := backend.Attach() - defer backend.Close() - defer client.Close() +func testChainID(t *testing.T, client *rpc.Client) { ec := NewClient(client) - id, err := ec.ChainID(context.Background()) if err != nil { t.Fatalf("unexpected error: %v", err) @@ -353,13 +388,9 @@ func TestChainID(t *testing.T) { } } -func TestBlockNumber(t *testing.T) { - backend, _ := newTestBackend(t) - client, _ := backend.Attach() - defer backend.Close() - defer client.Close() +func testGetBlock(t *testing.T, client *rpc.Client) { ec := NewClient(client) - + // Get current block number blockNumber, err := ec.BlockNumber(context.Background()) if err != nil { t.Fatalf("unexpected error: %v", err) @@ -367,4 +398,177 @@ func TestBlockNumber(t *testing.T) { if blockNumber != 1 { t.Fatalf("BlockNumber returned wrong number: %d", blockNumber) } + // Get current block by number + block, err := ec.BlockByNumber(context.Background(), new(big.Int).SetUint64(blockNumber)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if block.NumberU64() != blockNumber { + t.Fatalf("BlockByNumber returned wrong block: want %d got %d", blockNumber, block.NumberU64()) + } + // Get current block by hash + blockH, err := ec.BlockByHash(context.Background(), block.Hash()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if block.Hash() != blockH.Hash() { + t.Fatalf("BlockByHash returned wrong block: want %v got %v", block.Hash().Hex(), blockH.Hash().Hex()) + } + // Get header by number + header, err := ec.HeaderByNumber(context.Background(), new(big.Int).SetUint64(blockNumber)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if block.Header().Hash() != header.Hash() { + t.Fatalf("HeaderByNumber returned wrong header: want %v got %v", block.Header().Hash().Hex(), header.Hash().Hex()) + } + // Get header by hash + headerH, err := ec.HeaderByHash(context.Background(), block.Hash()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if block.Header().Hash() != headerH.Hash() { + t.Fatalf("HeaderByHash returned wrong header: want %v got %v", block.Header().Hash().Hex(), headerH.Hash().Hex()) + } +} + +func testStatusFunctions(t *testing.T, client *rpc.Client) { + ec := NewClient(client) + + // Sync progress + progress, err := ec.SyncProgress(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if progress != nil { + t.Fatalf("unexpected progress: %v", progress) + } + // NetworkID + networkID, err := ec.NetworkID(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if networkID.Cmp(big.NewInt(0)) != 0 { + t.Fatalf("unexpected networkID: %v", networkID) + } + // SuggestGasPrice (should suggest 1 Gwei) + gasPrice, err := ec.SuggestGasPrice(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if gasPrice.Cmp(big.NewInt(1000000000)) != 0 { + t.Fatalf("unexpected gas price: %v", gasPrice) + } +} + +func testCallContract(t *testing.T, client *rpc.Client) { + ec := NewClient(client) + + // EstimateGas + msg := ethereum.CallMsg{ + From: testAddr, + To: &common.Address{}, + Gas: 21000, + GasPrice: big.NewInt(1), + Value: big.NewInt(1), + } + gas, err := ec.EstimateGas(context.Background(), msg) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if gas != 21000 { + t.Fatalf("unexpected gas price: %v", gas) + } + // CallContract + if _, err := ec.CallContract(context.Background(), msg, big.NewInt(1)); err != nil { + t.Fatalf("unexpected error: %v", err) + } + // PendingCallCOntract + if _, err := ec.PendingCallContract(context.Background(), msg); err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func testAtFunctions(t *testing.T, client *rpc.Client) { + ec := NewClient(client) + // send a transaction for some interesting pending status + sendTransaction(ec) + time.Sleep(100 * time.Millisecond) + // Check pending transaction count + pending, err := ec.PendingTransactionCount(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if pending != 1 { + t.Fatalf("unexpected pending, wanted 1 got: %v", pending) + } + // Query balance + balance, err := ec.BalanceAt(context.Background(), testAddr, nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + penBalance, err := ec.PendingBalanceAt(context.Background(), testAddr) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if balance.Cmp(penBalance) == 0 { + t.Fatalf("unexpected balance: %v %v", balance, penBalance) + } + // NonceAt + nonce, err := ec.NonceAt(context.Background(), testAddr, nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + penNonce, err := ec.PendingNonceAt(context.Background(), testAddr) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if penNonce != nonce+1 { + t.Fatalf("unexpected nonce: %v %v", nonce, penNonce) + } + // StorageAt + storage, err := ec.StorageAt(context.Background(), testAddr, common.Hash{}, nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + penStorage, err := ec.PendingStorageAt(context.Background(), testAddr, common.Hash{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !bytes.Equal(storage, penStorage) { + t.Fatalf("unexpected storage: %v %v", storage, penStorage) + } + // CodeAt + code, err := ec.CodeAt(context.Background(), testAddr, nil) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + penCode, err := ec.PendingCodeAt(context.Background(), testAddr) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !bytes.Equal(code, penCode) { + t.Fatalf("unexpected code: %v %v", code, penCode) + } +} + +func sendTransaction(ec *Client) error { + // Retrieve chainID + chainID, err := ec.ChainID(context.Background()) + if err != nil { + return err + } + // Create transaction + tx := types.NewTransaction(0, common.Address{1}, big.NewInt(1), 22000, big.NewInt(1), nil) + signer := types.LatestSignerForChainID(chainID) + signature, err := crypto.Sign(signer.Hash(tx).Bytes(), testKey) + if err != nil { + return err + } + signedTx, err := tx.WithSignature(signer, signature) + if err != nil { + return err + } + // Send transaction + return ec.SendTransaction(context.Background(), signedTx) } diff --git a/ethclient/signer.go b/ethclient/signer.go index 74a93f1e2f..9de020b352 100644 --- a/ethclient/signer.go +++ b/ethclient/signer.go @@ -51,6 +51,9 @@ func (s *senderFromServer) Sender(tx *types.Transaction) (common.Address, error) return s.addr, nil } +func (s *senderFromServer) ChainID() *big.Int { + panic("can't sign with senderFromServer") +} func (s *senderFromServer) Hash(tx *types.Transaction) common.Hash { panic("can't sign with senderFromServer") } diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go index 80380db325..70ac7a91ac 100644 --- a/ethdb/leveldb/leveldb.go +++ b/ethdb/leveldb/leveldb.go @@ -84,24 +84,36 @@ type Database struct { // New returns a wrapped LevelDB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. func New(file string, cache int, handles int, namespace string) (*Database, error) { - // Ensure we have some minimal caching and file guarantees - if cache < minCache { - cache = minCache - } - if handles < minHandles { - handles = minHandles - } + return NewCustom(file, namespace, func(options *opt.Options) { + // Ensure we have some minimal caching and file guarantees + if cache < minCache { + cache = minCache + } + if handles < minHandles { + handles = minHandles + } + // Set default options + options.OpenFilesCacheCapacity = handles + options.BlockCacheCapacity = cache / 2 * opt.MiB + options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally + }) +} + +// NewCustom returns a wrapped LevelDB object. The namespace is the prefix that the +// metrics reporting should use for surfacing internal stats. +// The customize function allows the caller to modify the leveldb options. +func NewCustom(file string, namespace string, customize func(options *opt.Options)) (*Database, error) { + options := configureOptions(customize) logger := log.New("database", file) - logger.Info("Allocated cache and file handles", "cache", common.StorageSize(cache*1024*1024), "handles", handles) + usedCache := options.GetBlockCacheCapacity() + options.GetWriteBuffer()*2 + logCtx := []interface{}{"cache", common.StorageSize(usedCache), "handles", options.GetOpenFilesCacheCapacity()} + if options.ReadOnly { + logCtx = append(logCtx, "readonly", "true") + } + logger.Info("Allocated cache and file handles", logCtx...) // Open the db and recover any potential corruptions - db, err := leveldb.OpenFile(file, &opt.Options{ - OpenFilesCacheCapacity: handles, - BlockCacheCapacity: cache / 2 * opt.MiB, - WriteBuffer: cache / 4 * opt.MiB, // Two of these are used internally - Filter: filter.NewBloomFilter(10), - DisableSeeksCompaction: true, - }) + db, err := leveldb.OpenFile(file, options) if _, corrupted := err.(*errors.ErrCorrupted); corrupted { db, err = leveldb.RecoverFile(file, nil) } @@ -133,6 +145,20 @@ func New(file string, cache int, handles int, namespace string) (*Database, erro return ldb, nil } +// configureOptions sets some default options, then runs the provided setter. +func configureOptions(customizeFn func(*opt.Options)) *opt.Options { + // Set default options + options := &opt.Options{ + Filter: filter.NewBloomFilter(10), + DisableSeeksCompaction: true, + } + // Allow caller to make custom modifications to the options + if customizeFn != nil { + customizeFn(options) + } + return options +} + // Close stops the metrics collection, flushes any pending data to disk and closes // all io accesses to the underlying key-value store. func (db *Database) Close() error { diff --git a/ethstats/ethstats.go b/ethstats/ethstats.go index 1828ad70fb..e0f4f95ff3 100644 --- a/ethstats/ethstats.go +++ b/ethstats/ethstats.go @@ -36,8 +36,8 @@ import ( "github.com/ethereum/go-ethereum/consensus" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" + ethproto "github.com/ethereum/go-ethereum/eth/protocols/eth" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/les" "github.com/ethereum/go-ethereum/log" @@ -444,13 +444,15 @@ func (s *Service) login(conn *connWrapper) error { // Construct and send the login authentication infos := s.server.NodeInfo() - var network, protocol string + var protocols []string + for _, proto := range s.server.Protocols { + protocols = append(protocols, fmt.Sprintf("%s/%d", proto.Name, proto.Version)) + } + var network string if info := infos.Protocols["eth"]; info != nil { - network = fmt.Sprintf("%d", info.(*eth.NodeInfo).Network) - protocol = fmt.Sprintf("eth/%d", eth.ProtocolVersions[0]) + network = fmt.Sprintf("%d", info.(*ethproto.NodeInfo).Network) } else { network = fmt.Sprintf("%d", infos.Protocols["les"].(*les.NodeInfo).Network) - protocol = fmt.Sprintf("les/%d", les.ClientProtocolVersions[0]) } auth := &authMsg{ ID: s.node, @@ -459,7 +461,7 @@ func (s *Service) login(conn *connWrapper) error { Node: infos.Name, Port: infos.Ports.Listener, Network: network, - Protocol: protocol, + Protocol: strings.Join(protocols, ", "), API: "No", Os: runtime.GOOS, OsVer: runtime.GOARCH, diff --git a/event/event_test.go b/event/event_test.go index cc9fa5d7c8..bdad11f13d 100644 --- a/event/event_test.go +++ b/event/event_test.go @@ -29,7 +29,7 @@ func TestSubCloseUnsub(t *testing.T) { // the point of this test is **not** to panic var mux TypeMux mux.Stop() - sub := mux.Subscribe(int(0)) + sub := mux.Subscribe(0) sub.Unsubscribe() } diff --git a/event/feed_test.go b/event/feed_test.go index 93badaaee6..cdf29fdd73 100644 --- a/event/feed_test.go +++ b/event/feed_test.go @@ -27,8 +27,8 @@ import ( func TestFeedPanics(t *testing.T) { { var f Feed - f.Send(int(2)) - want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(int(0))} + f.Send(2) + want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)} if err := checkPanic(want, func() { f.Send(uint64(2)) }); err != nil { t.Error(err) } @@ -37,14 +37,14 @@ func TestFeedPanics(t *testing.T) { var f Feed ch := make(chan int) f.Subscribe(ch) - want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(int(0))} + want := feedTypeError{op: "Send", got: reflect.TypeOf(uint64(0)), want: reflect.TypeOf(0)} if err := checkPanic(want, func() { f.Send(uint64(2)) }); err != nil { t.Error(err) } } { var f Feed - f.Send(int(2)) + f.Send(2) want := feedTypeError{op: "Subscribe", got: reflect.TypeOf(make(chan uint64)), want: reflect.TypeOf(make(chan<- int))} if err := checkPanic(want, func() { f.Subscribe(make(chan uint64)) }); err != nil { t.Error(err) @@ -58,7 +58,7 @@ func TestFeedPanics(t *testing.T) { } { var f Feed - if err := checkPanic(errBadChannel, func() { f.Subscribe(int(0)) }); err != nil { + if err := checkPanic(errBadChannel, func() { f.Subscribe(0) }); err != nil { t.Error(err) } } diff --git a/event/subscription.go b/event/subscription.go index c80d171f3a..6c62874719 100644 --- a/event/subscription.go +++ b/event/subscription.go @@ -95,6 +95,26 @@ func (s *funcSub) Err() <-chan error { // Resubscribe applies backoff between calls to fn. The time between calls is adapted // based on the error rate, but will never exceed backoffMax. func Resubscribe(backoffMax time.Duration, fn ResubscribeFunc) Subscription { + return ResubscribeErr(backoffMax, func(ctx context.Context, _ error) (Subscription, error) { + return fn(ctx) + }) +} + +// A ResubscribeFunc attempts to establish a subscription. +type ResubscribeFunc func(context.Context) (Subscription, error) + +// ResubscribeErr calls fn repeatedly to keep a subscription established. When the +// subscription is established, ResubscribeErr waits for it to fail and calls fn again. This +// process repeats until Unsubscribe is called or the active subscription ends +// successfully. +// +// The difference between Resubscribe and ResubscribeErr is that with ResubscribeErr, +// the error of the failing subscription is available to the callback for logging +// purposes. +// +// ResubscribeErr applies backoff between calls to fn. The time between calls is adapted +// based on the error rate, but will never exceed backoffMax. +func ResubscribeErr(backoffMax time.Duration, fn ResubscribeErrFunc) Subscription { s := &resubscribeSub{ waitTime: backoffMax / 10, backoffMax: backoffMax, @@ -106,15 +126,18 @@ func Resubscribe(backoffMax time.Duration, fn ResubscribeFunc) Subscription { return s } -// A ResubscribeFunc attempts to establish a subscription. -type ResubscribeFunc func(context.Context) (Subscription, error) +// A ResubscribeErrFunc attempts to establish a subscription. +// For every call but the first, the second argument to this function is +// the error that occurred with the previous subscription. +type ResubscribeErrFunc func(context.Context, error) (Subscription, error) type resubscribeSub struct { - fn ResubscribeFunc + fn ResubscribeErrFunc err chan error unsub chan struct{} unsubOnce sync.Once lastTry mclock.AbsTime + lastSubErr error waitTime, backoffMax time.Duration } @@ -149,7 +172,7 @@ func (s *resubscribeSub) subscribe() Subscription { s.lastTry = mclock.Now() ctx, cancel := context.WithCancel(context.Background()) go func() { - rsub, err := s.fn(ctx) + rsub, err := s.fn(ctx, s.lastSubErr) sub = rsub subscribed <- err }() @@ -178,6 +201,7 @@ func (s *resubscribeSub) waitForError(sub Subscription) bool { defer sub.Unsubscribe() select { case err := <-sub.Err(): + s.lastSubErr = err return err == nil case <-s.unsub: return true diff --git a/event/subscription_test.go b/event/subscription_test.go index c48be3aa30..ba081705c4 100644 --- a/event/subscription_test.go +++ b/event/subscription_test.go @@ -19,6 +19,8 @@ package event import ( "context" "errors" + "fmt" + "reflect" "testing" "time" ) @@ -118,3 +120,37 @@ func TestResubscribeAbort(t *testing.T) { t.Fatal(err) } } + +func TestResubscribeWithErrorHandler(t *testing.T) { + t.Parallel() + + var i int + nfails := 6 + subErrs := make([]string, 0) + sub := ResubscribeErr(100*time.Millisecond, func(ctx context.Context, lastErr error) (Subscription, error) { + i++ + var lastErrVal string + if lastErr != nil { + lastErrVal = lastErr.Error() + } + subErrs = append(subErrs, lastErrVal) + sub := NewSubscription(func(unsubscribed <-chan struct{}) error { + if i < nfails { + return fmt.Errorf("err-%v", i) + } else { + return nil + } + }) + return sub, nil + }) + + <-sub.Err() + if i != nfails { + t.Fatalf("resubscribe function called %d times, want %d times", i, nfails) + } + + expectedSubErrs := []string{"", "err-1", "err-2", "err-3", "err-4", "err-5"} + if !reflect.DeepEqual(subErrs, expectedSubErrs) { + t.Fatalf("unexpected subscription errors %v, want %v", subErrs, expectedSubErrs) + } +} diff --git a/go.mod b/go.mod old mode 100755 new mode 100644 index ae1cf64aaf..96a217ed06 --- a/go.mod +++ b/go.mod @@ -3,70 +3,55 @@ module github.com/ethereum/go-ethereum go 1.13 require ( - github.com/Azure/azure-pipeline-go v0.2.2 // indirect github.com/Azure/azure-storage-blob-go v0.7.0 - github.com/Azure/go-autorest/autorest/adal v0.8.0 // indirect - github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 // indirect github.com/VictoriaMetrics/fastcache v1.5.7 - github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847 github.com/aws/aws-sdk-go v1.25.48 - github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 + github.com/btcsuite/btcd v0.20.1-beta github.com/cespare/cp v0.1.0 github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9 + github.com/consensys/gurvy v0.3.8 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea - github.com/dlclark/regexp2 v1.2.0 // indirect github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498 - github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813 // indirect - github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c - github.com/fatih/color v1.3.0 - github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc + github.com/edsrzf/mmap-go v1.0.0 + github.com/fatih/color v1.7.0 + github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff - github.com/go-ole/go-ole v1.2.1 // indirect - github.com/go-sourcemap/sourcemap v2.1.2+incompatible // indirect github.com/go-stack/stack v1.8.0 - github.com/golang/protobuf v1.4.2 - github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26 + github.com/golang/protobuf v1.4.3 + github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa - github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989 - github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277 - github.com/hashicorp/golang-lru v0.5.4 + github.com/google/uuid v1.1.5 + github.com/gorilla/websocket v1.4.2 + github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 + github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d + github.com/holiman/bloomfilter/v2 v2.0.3 github.com/holiman/uint256 v1.1.1 - github.com/huin/goupnp v1.0.0 - github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883 + github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031 + github.com/influxdata/influxdb v1.8.3 github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 - github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21 + github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e + github.com/julienschmidt/httprouter v1.2.0 github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 - github.com/kr/pretty v0.1.0 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect github.com/mattn/go-colorable v0.1.0 github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035 - github.com/naoina/go-stringutil v0.1.0 // indirect github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c - github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 - github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150 + github.com/prometheus/tsdb v0.7.1 github.com/rjeczalik/notify v0.9.1 - github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00 - github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 // indirect + github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v2.20.5+incompatible github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 - github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 - github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 // indirect - github.com/stretchr/testify v1.4.0 + github.com/stretchr/testify v1.7.0 github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef - github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/mobile v0.0.0-20200801112145-973feb4309de // indirect - golang.org/x/net v0.0.0-20200822124328-c89045814202 // indirect - golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8 + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c golang.org/x/text v0.3.3 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 gopkg.in/urfave/cli.v1 v1.20.0 - gotest.tools v2.2.0+incompatible ) diff --git a/go.sum b/go.sum old mode 100755 new mode 100644 index 10bec96411..af76759c51 --- a/go.sum +++ b/go.sum @@ -1,3 +1,24 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot6ltoThhY= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= @@ -21,31 +42,69 @@ github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VY github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6 h1:fLjPD/aNc3UIOA6tDi6QXUemppXK3P9BI7mr2hd6gx8= github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VictoriaMetrics/fastcache v1.5.7 h1:4y6y0G8PRzszQUYIQHHssv/jgPHAb5qQuuDNdCbyAgw= github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6RoTu1dAWCbrk+6WsEM8= +github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847 h1:rtI0fD4oG/8eVokGVPYJEW1F88p1ZNgXiEIs9thEE4A= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= github.com/aristanetworks/goarista v0.0.0-20170210015632-ea17b1a17847/go.mod h1:D/tb0zPVXnP7fmsLZjtdUhSsumbK/ij54UXjjVgMGxQ= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6 h1:Eey/GGQ/E5Xp1P2Lyx1qj007hLZfbi0+CoVeJruGCtI= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= +github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/btcsuite/btcd v0.0.0-20171128150713-2e60448ffcc6/go.mod h1:Dmm/EzmjnCiweXmzRIAiUWCInVmPgjkzgv5k4tVyXiQ= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= +github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= +github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= +github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= +github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= +github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9 h1:J82+/8rub3qSy0HxEnoYD8cs+HDlHWYrqYXe2Vqxluk= github.com/cloudflare/cloudflare-go v0.10.2-0.20190916151808-a80f83b9add9/go.mod h1:1MxXX1Ux4x6mqPmjkUgTP1CdXIBXKX7T+Jk9Gxrmx+U= +github.com/consensys/bavard v0.1.8-0.20210105233146-c16790d2aa8b/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= +github.com/consensys/goff v0.3.10/go.mod h1:xTldOBEHmFiYS0gPXd3NsaEqZWlnmeWcRLWgD3ba3xc= +github.com/consensys/gurvy v0.3.8 h1:H2hvjvT2OFMgdMn5ZbhXqHt+F8DJ2clZW7Vmc0kFFxc= +github.com/consensys/gurvy v0.3.8/go.mod h1:sN75xnsiD593XnhbhvG2PkOy194pZBzqShWF/kwuW/g= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= +github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -53,6 +112,7 @@ github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea h1:j4317fAZh7X github.com/deckarep/golang-set v0.0.0-20180603214616-504e848d77ea/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk= github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= @@ -60,75 +120,175 @@ github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf h1:sh8rkQZavChcmak github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498 h1:Y9vTBSsV4hSwPSj4bacAU/eSnV3dAxVpepaghAdhGoQ= github.com/dop251/goja v0.0.0-20200721192441-a695b0cdd498/go.mod h1:Mw6PkjjMXWbTj+nnj4s3QPXq1jaT0s5pC0iFD4+BOAA= -github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813 h1:NgO45/5mBLRVfiXerEFzH6ikcZ7DNRPS639xFg3ENzU= github.com/dvyukov/go-fuzz v0.0.0-20200318091601-be3528f3a813/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw= -github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c h1:JHHhtb9XWJrGNMcrVP6vyzO4dusgi/HnceHTgxSejUM= +github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v0.0.0-20160512033002-935e0e8a636c/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/fatih/color v1.3.0 h1:YehCCcyeQ6Km0D6+IapqPinWBK6y+0eB5umvZXK9WPs= +github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/ethereum/go-ethereum v1.9.25/go.mod h1:vMkFiYLHI4tgPw4k2j4MHKoovchFE8plZ0M9VMk4/oM= github.com/fatih/color v1.3.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc h1:jtW8jbpkO4YirRSyepBOH8E+2HEw6/hKkBvFPwhUN8c= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fjl/memsize v0.0.0-20180418122429-ca190fb6ffbc/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= +github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI= github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE= +github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-ole/go-ole v1.2.1 h1:2lOsA72HgjxAuMlKpFiCbHTvu44PIVkZ5hqm3RSdI/E= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-sourcemap/sourcemap v2.1.2+incompatible h1:0b/xya7BKGhXuqFESKM4oIiRo9WOt2ebz7KxfreD6ug= github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26 h1:lMm2hD9Fy0ynom5+85/pbdkiYcBqM1JWmhpAXLmy0fw= -github.com/golang/snappy v0.0.2-0.20200707131729-196ae77b8a26/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 h1:ur2rms48b3Ep1dxh7aUV2FZEQ8jEVO2F6ILKx8ofkAg= +github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64= github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989 h1:giknQ4mEuDFmmHSrGcbargOuLHQGtywqo4mheITex54= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.5 h1:kxhtnfFVi+rYdOALN0B3k9UT86zVJKfBimRaciULW4I= +github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.1-0.20190629185528-ae1634f6a989/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277 h1:E0whKxgp2ojts0FDgUA8dl62bmH0LxKanMoBr6MDTDM= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/graph-gophers/graphql-go v0.0.0-20191115155744-f33e81362277/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29 h1:sezaKhEfPFg8W0Enm61B9Gs911H8iesGY5R8NDPtd1M= +github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= +github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.1.1 h1:4JywC80b+/hSfljFlEBLHrrh+CIONLDz9NuFl0af4Mw= github.com/holiman/uint256 v1.1.1/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= +github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031 h1:HarGZ5h9HD9LgEg1yRVMXyfiw4wlXiLiYM2oMjeA/SE= +github.com/huin/goupnp v1.0.1-0.20200620063722-49508fba0031/go.mod h1:nNs7wvRfN1eKaMknBydLNQU6146XQim8t4h+q90biWo= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= -github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883 h1:FSeK4fZCo8u40n2JMnyAsd6x7+SbvoOMHvQOU/n10P4= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/influxdata/flux v0.65.1/go.mod h1:J754/zds0vvpfwuq7Gc2wRdVwEodfpCFM7mYlOw2LqY= github.com/influxdata/influxdb v1.2.3-0.20180221223340-01288bdb0883/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= +github.com/influxdata/influxdb v1.8.3 h1:WEypI1BQFTT4teLM+1qkEcvUi0dAvopAI/ir0vAiBg8= +github.com/influxdata/influxdb v1.8.3/go.mod h1:JugdFhsvvI8gadxOI6noqNeeBHvWNTbfYGtiAn+2jhI= +github.com/influxdata/influxql v1.1.1-0.20200828144457-65d3ef77d385/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= +github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= +github.com/influxdata/promql/v2 v2.12.0/go.mod h1:fxOPu+DY0bqCTCECchSRtWfc+0X19ybifQhZoQNF5D8= +github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bSgUQ7q5ZLSO+bKBGqJiCBGAl+9DxyW63zLTujjUlOE= +github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0= +github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458 h1:6OvNmYgJyexcZ3pYbTI9jWx5tHo1Dee/tWbLMfPe2TA= github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= +github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e h1:UvSe12bq+Uj2hWd8aOlwPmoZ+CITRFrdit+sDGfAg8U= +github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU= +github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21 h1:F/iKcka0K2LgnKy/fgSBf235AETtm1n1TvBzqu40LE0= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.1.1-0.20170430222011-975b5c4c7c21/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356 h1:I/yrLt2WilKxlQKCM52clh5rGzTKpVctGT1lH4Dc8Jw= github.com/karalabe/usb v0.0.0-20190919080040-51dc0efba356/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU= +github.com/kilic/bls12-381 v0.0.0-20201226121925-69dacb279461/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= +github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= @@ -138,17 +298,40 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leanovate/gopter v0.2.8/go.mod h1:gNcbPWNEWRe4lm+bycKqxUYoH5uoVje5SkOJ3uoLer8= +github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= +github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.0 h1:v2XXALHHh6zHfYTJ+cSkwtyffnaOyR1MXaA91mTrb8o= github.com/mattn/go-colorable v0.1.0/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d h1:oNAwILwmgWKFpuU+dXvI6dl9jG2mAWAZLX3r9s0PPiw= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035 h1:USWjF42jDCSEeikX/G1g40ZWnsPXN5WkZ4jMHZWyBK4= github.com/mattn/go-isatty v0.0.5-0.20180830101745-3fb116b82035/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/naoina/go-stringutil v0.1.0 h1:rCUeRUHjBjGTSHl0VC00jUPLz8/F9dDzYI70Hzifhks= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 h1:shk/vn9oCoOTmwcouEdwIeOtOGA/ELRUw/GwvxwfT+0= @@ -160,117 +343,297 @@ github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXW github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c h1:1RHs3tNxjXGHeul8z2t6H2N2TlAqpKe5yryJztRx4Jk= github.com/olekukonko/tablewriter v0.0.2-0.20190409134802-7e037d187b0c/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222 h1:goeTyGkArOZIVOMA0dQbyuPWGNQJZGPwPu/QS9GlpnA= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE= github.com/pborman/uuid v0.0.0-20170112150404-1b00554d8222/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM= github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0= +github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/term v0.0.0-20180730021639-bffc007b7fd5/go.mod h1:eCbImbZ95eXtAUIbLAuAVnBnwf83mjf6QIVH8SHYwqQ= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150 h1:ZeU+auZj1iNzN8iVhff6M38Mfu73FQiJve/GEXYJBjE= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/tsdb v0.6.2-0.20190402121629-4f204dcbc150/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE= github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho= -github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00 h1:8DPul/X0IT/1TNMIxoKLwdemEOBBHDC/K4EB16Cw5WE= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v0.0.0-20160617231935-a62a804a8a00/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521 h1:3hxavr+IHMsQBrYUPQM5v0CgENFktkkbg1sfpgM3h20= +github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xhandler v0.0.0-20160618193221-ed27b6fd6521/go.mod h1:RvLn4FgxWubrpZHtQLnOf6EwhN2hEMusxZOhcW9H3UQ= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v2.20.5+incompatible h1:tYH07UPoQt0OCQdgWWMgYHy3/a9bcxNpBIysykNIP7I= github.com/shirou/gopsutil v2.20.5+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg= github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= -github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570 h1:gIlAHnH1vJb5vwEjIp5kBj/eu99p/bl0Ay2goiPe5xE= github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8OR4w3TdeIHIh1g6EMY5p0gVNOovcWC+1vpc7naMuAw= -github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM= github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= +github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4= github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208 h1:1cngl9mPEoITZG8s8cVcUy5CeIBYhEESkOB7m6Gmkrk= +github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/wsddn/go-ecdh v0.0.0-20161211032359-48726bab9208/go.mod h1:IotVbo4F+mw0EzQ08zFqg7pK3FebNXpaMsRy2RT+Ees= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190909091759-094676da4a83/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20200801112145-973feb4309de h1:OVJ6QQUBAesB8CZijKDSsXX7xYVtUhrkY0gwMfbi4p4= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20200801112145-973feb4309de/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd h1:ePuNC7PZ6O5BzgPn9bZayERXBdfZjUYoXEf5BTfDfh8= golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03iWnKLEWinaScsxF2Vm2o= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8 h1:AvbQYmiaaaza3cW3QXRyPo5kYgpFIzOAfeAAN7m3qQ4= golang.org/x/sys v0.0.0-20200824131525-c12d262b63d8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210105210732-16f7687f5001/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69 h1:yBHHx+XZqXJBm6Exke3N7V9gnlsyXxoCPEb1yVenjfk= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= +gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -282,20 +645,33 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/graphql/graphql.go b/graphql/graphql.go index 559da8aaaa..2374beb8e1 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -20,6 +20,8 @@ package graphql import ( "context" "errors" + "fmt" + "strconv" "time" "github.com/ethereum/go-ethereum" @@ -31,7 +33,6 @@ import ( "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/internal/ethapi" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" ) @@ -39,6 +40,37 @@ var ( errBlockInvariant = errors.New("block objects must be instantiated with at least one of num or hash") ) +type Long int64 + +// ImplementsGraphQLType returns true if Long implements the provided GraphQL type. +func (b Long) ImplementsGraphQLType(name string) bool { return name == "Long" } + +// UnmarshalGraphQL unmarshals the provided GraphQL query data. +func (b *Long) UnmarshalGraphQL(input interface{}) error { + var err error + switch input := input.(type) { + case string: + // uncomment to support hex values + //if strings.HasPrefix(input, "0x") { + // // apply leniency and support hex representations of longs. + // value, err := hexutil.DecodeUint64(input) + // *b = Long(value) + // return err + //} else { + value, err := strconv.ParseInt(input, 10, 64) + *b = Long(value) + return err + //} + case int32: + *b = Long(input) + case int64: + *b = Long(input) + default: + err = fmt.Errorf("unexpected type %T for Long", input) + } + return err +} + // Account represents an Ethereum account at a particular block. type Account struct { backend ethapi.Backend @@ -77,7 +109,7 @@ func (a *Account) Code(ctx context.Context) (hexutil.Bytes, error) { if err != nil { return hexutil.Bytes{}, err } - return hexutil.Bytes(state.GetCode(a.address)), nil + return state.GetCode(a.address), nil } func (a *Account) Storage(ctx context.Context, args struct{ Slot common.Hash }) (common.Hash, error) { @@ -116,7 +148,7 @@ func (l *Log) Topics(ctx context.Context) []common.Hash { } func (l *Log) Data(ctx context.Context) hexutil.Bytes { - return hexutil.Bytes(l.log.Data) + return l.log.Data } // Transaction represents an Ethereum transaction. @@ -157,7 +189,7 @@ func (t *Transaction) InputData(ctx context.Context) (hexutil.Bytes, error) { if err != nil || tx == nil { return hexutil.Bytes{}, err } - return hexutil.Bytes(tx.Data()), nil + return tx.Data(), nil } func (t *Transaction) Gas(ctx context.Context) (hexutil.Uint64, error) { @@ -213,12 +245,8 @@ func (t *Transaction) From(ctx context.Context, args BlockNumberArgs) (*Account, if err != nil || tx == nil { return nil, err } - var signer types.Signer = types.HomesteadSigner{} - if tx.Protected() { - signer = types.NewEIP155Signer(tx.ChainId()) - } + signer := types.LatestSigner(t.backend.ChainConfig()) from, _ := types.Sender(signer, tx) - return &Account{ backend: t.backend, address: from, @@ -259,30 +287,30 @@ func (t *Transaction) getReceipt(ctx context.Context) (*types.Receipt, error) { return receipts[t.index], nil } -func (t *Transaction) Status(ctx context.Context) (*hexutil.Uint64, error) { +func (t *Transaction) Status(ctx context.Context) (*Long, error) { receipt, err := t.getReceipt(ctx) if err != nil || receipt == nil { return nil, err } - ret := hexutil.Uint64(receipt.Status) + ret := Long(receipt.Status) return &ret, nil } -func (t *Transaction) GasUsed(ctx context.Context) (*hexutil.Uint64, error) { +func (t *Transaction) GasUsed(ctx context.Context) (*Long, error) { receipt, err := t.getReceipt(ctx) if err != nil || receipt == nil { return nil, err } - ret := hexutil.Uint64(receipt.GasUsed) + ret := Long(receipt.GasUsed) return &ret, nil } -func (t *Transaction) CumulativeGasUsed(ctx context.Context) (*hexutil.Uint64, error) { +func (t *Transaction) CumulativeGasUsed(ctx context.Context) (*Long, error) { receipt, err := t.getReceipt(ctx) if err != nil || receipt == nil { return nil, err } - ret := hexutil.Uint64(receipt.CumulativeGasUsed) + ret := Long(receipt.CumulativeGasUsed) return &ret, nil } @@ -410,18 +438,18 @@ func (b *Block) resolveReceipts(ctx context.Context) ([]*types.Receipt, error) { if err != nil { return nil, err } - b.receipts = []*types.Receipt(receipts) + b.receipts = receipts } return b.receipts, nil } -func (b *Block) Number(ctx context.Context) (hexutil.Uint64, error) { +func (b *Block) Number(ctx context.Context) (Long, error) { header, err := b.resolveHeader(ctx) if err != nil { return 0, err } - return hexutil.Uint64(header.Number.Uint64()), nil + return Long(header.Number.Uint64()), nil } func (b *Block) Hash(ctx context.Context) (common.Hash, error) { @@ -435,20 +463,20 @@ func (b *Block) Hash(ctx context.Context) (common.Hash, error) { return b.hash, nil } -func (b *Block) GasLimit(ctx context.Context) (hexutil.Uint64, error) { +func (b *Block) GasLimit(ctx context.Context) (Long, error) { header, err := b.resolveHeader(ctx) if err != nil { return 0, err } - return hexutil.Uint64(header.GasLimit), nil + return Long(header.GasLimit), nil } -func (b *Block) GasUsed(ctx context.Context) (hexutil.Uint64, error) { +func (b *Block) GasUsed(ctx context.Context) (Long, error) { header, err := b.resolveHeader(ctx) if err != nil { return 0, err } - return hexutil.Uint64(header.GasUsed), nil + return Long(header.GasUsed), nil } func (b *Block) Parent(ctx context.Context) (*Block, error) { @@ -490,7 +518,7 @@ func (b *Block) Nonce(ctx context.Context) (hexutil.Bytes, error) { if err != nil { return hexutil.Bytes{}, err } - return hexutil.Bytes(header.Nonce[:]), nil + return header.Nonce[:], nil } func (b *Block) MixHash(ctx context.Context) (common.Hash, error) { @@ -564,7 +592,7 @@ func (b *Block) ExtraData(ctx context.Context) (hexutil.Bytes, error) { if err != nil { return hexutil.Bytes{}, err } - return hexutil.Bytes(header.Extra), nil + return header.Extra, nil } func (b *Block) LogsBloom(ctx context.Context) (hexutil.Bytes, error) { @@ -572,7 +600,7 @@ func (b *Block) LogsBloom(ctx context.Context) (hexutil.Bytes, error) { if err != nil { return hexutil.Bytes{}, err } - return hexutil.Bytes(header.Bloom.Bytes()), nil + return header.Bloom.Bytes(), nil } func (b *Block) TotalDifficulty(ctx context.Context) (hexutil.Big, error) { @@ -777,20 +805,20 @@ type CallData struct { // CallResult encapsulates the result of an invocation of the `call` accessor. type CallResult struct { - data hexutil.Bytes // The return data from the call - gasUsed hexutil.Uint64 // The amount of gas used - status hexutil.Uint64 // The return status of the call - 0 for failure or 1 for success. + data hexutil.Bytes // The return data from the call + gasUsed Long // The amount of gas used + status Long // The return status of the call - 0 for failure or 1 for success. } func (c *CallResult) Data() hexutil.Bytes { return c.data } -func (c *CallResult) GasUsed() hexutil.Uint64 { +func (c *CallResult) GasUsed() Long { return c.gasUsed } -func (c *CallResult) Status() hexutil.Uint64 { +func (c *CallResult) Status() Long { return c.status } @@ -807,29 +835,29 @@ func (b *Block) Call(ctx context.Context, args struct { if err != nil { return nil, err } - status := hexutil.Uint64(1) + status := Long(1) if result.Failed() { status = 0 } return &CallResult{ data: result.ReturnData, - gasUsed: hexutil.Uint64(result.UsedGas), + gasUsed: Long(result.UsedGas), status: status, }, nil } func (b *Block) EstimateGas(ctx context.Context, args struct { Data ethapi.CallArgs -}) (hexutil.Uint64, error) { +}) (Long, error) { if b.numberOrHash == nil { _, err := b.resolveHeader(ctx) if err != nil { - return hexutil.Uint64(0), err + return 0, err } } gas, err := ethapi.DoEstimateGas(ctx, b.backend, args.Data, *b.numberOrHash, b.backend.RPCGasCap()) - return gas, err + return Long(gas), err } type Pending struct { @@ -877,23 +905,24 @@ func (p *Pending) Call(ctx context.Context, args struct { if err != nil { return nil, err } - status := hexutil.Uint64(1) + status := Long(1) if result.Failed() { status = 0 } return &CallResult{ data: result.ReturnData, - gasUsed: hexutil.Uint64(result.UsedGas), + gasUsed: Long(result.UsedGas), status: status, }, nil } func (p *Pending) EstimateGas(ctx context.Context, args struct { Data ethapi.CallArgs -}) (hexutil.Uint64, error) { +}) (Long, error) { pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) - return ethapi.DoEstimateGas(ctx, p.backend, args.Data, pendingBlockNr, p.backend.RPCGasCap()) + gas, err := ethapi.DoEstimateGas(ctx, p.backend, args.Data, pendingBlockNr, p.backend.RPCGasCap()) + return Long(gas), err } // Resolver is the top-level object in the GraphQL hierarchy. @@ -902,12 +931,15 @@ type Resolver struct { } func (r *Resolver) Block(ctx context.Context, args struct { - Number *hexutil.Uint64 + Number *Long Hash *common.Hash }) (*Block, error) { var block *Block if args.Number != nil { - number := rpc.BlockNumber(uint64(*args.Number)) + if *args.Number < 0 { + return nil, nil + } + number := rpc.BlockNumber(*args.Number) numberOrHash := rpc.BlockNumberOrHashWithNumber(number) block = &Block{ backend: r.backend, @@ -939,10 +971,10 @@ func (r *Resolver) Block(ctx context.Context, args struct { } func (r *Resolver) Blocks(ctx context.Context, args struct { - From hexutil.Uint64 - To *hexutil.Uint64 + From *Long + To *Long }) ([]*Block, error) { - from := rpc.BlockNumber(args.From) + from := rpc.BlockNumber(*args.From) var to rpc.BlockNumber if args.To != nil { @@ -985,7 +1017,7 @@ func (r *Resolver) Transaction(ctx context.Context, args struct{ Hash common.Has func (r *Resolver) SendRawTransaction(ctx context.Context, args struct{ Data hexutil.Bytes }) (common.Hash, error) { tx := new(types.Transaction) - if err := rlp.DecodeBytes(args.Data, tx); err != nil { + if err := tx.UnmarshalBinary(args.Data); err != nil { return common.Hash{}, err } hash, err := ethapi.SubmitTransaction(ctx, r.backend, tx) @@ -1040,10 +1072,6 @@ func (r *Resolver) GasPrice(ctx context.Context) (hexutil.Big, error) { return hexutil.Big(*price), err } -func (r *Resolver) ProtocolVersion(ctx context.Context) (int32, error) { - return int32(r.backend.ProtocolVersion()), nil -} - func (r *Resolver) ChainID(ctx context.Context) (hexutil.Big, error) { return hexutil.Big(*r.backend.ChainConfig().ChainID), nil } diff --git a/graphql/graphql_test.go b/graphql/graphql_test.go index 5ba9c95537..2f3b230329 100644 --- a/graphql/graphql_test.go +++ b/graphql/graphql_test.go @@ -19,17 +19,31 @@ package graphql import ( "fmt" "io/ioutil" + "math/big" "net/http" "strings" "testing" + "time" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/assert" ) func TestBuildSchema(t *testing.T) { - stack, err := node.New(&node.DefaultConfig) + ddir, err := ioutil.TempDir("", "graphql-buildschema") + if err != nil { + t.Fatalf("failed to create temporary datadir: %v", err) + } + // Copy config + conf := node.DefaultConfig + conf.DataDir = ddir + stack, err := node.New(&conf) if err != nil { t.Fatalf("could not create new node: %v", err) } @@ -40,28 +54,107 @@ func TestBuildSchema(t *testing.T) { } // Tests that a graphQL request is successfully handled when graphql is enabled on the specified endpoint -func TestGraphQLHTTPOnSamePort_GQLRequest_Successful(t *testing.T) { +func TestGraphQLBlockSerialization(t *testing.T) { stack := createNode(t, true) defer stack.Close() // start node if err := stack.Start(); err != nil { t.Fatalf("could not start node: %v", err) } - // create http request - body := strings.NewReader("{\"query\": \"{block{number}}\",\"variables\": null}") - gqlReq, err := http.NewRequest(http.MethodGet, fmt.Sprintf("http://%s/graphql", "127.0.0.1:9393"), body) - if err != nil { - t.Error("could not issue new http request ", err) - } - gqlReq.Header.Set("Content-Type", "application/json") - // read from response - resp := doHTTPRequest(t, gqlReq) - bodyBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("could not read from response body: %v", err) + + for i, tt := range []struct { + body string + want string + code int + }{ + { // Should return latest block + body: `{"query": "{block{number}}","variables": null}`, + want: `{"data":{"block":{"number":10}}}`, + code: 200, + }, + { // Should return info about latest block + body: `{"query": "{block{number,gasUsed,gasLimit}}","variables": null}`, + want: `{"data":{"block":{"number":10,"gasUsed":0,"gasLimit":11500000}}}`, + code: 200, + }, + { + body: `{"query": "{block(number:0){number,gasUsed,gasLimit}}","variables": null}`, + want: `{"data":{"block":{"number":0,"gasUsed":0,"gasLimit":11500000}}}`, + code: 200, + }, + { + body: `{"query": "{block(number:-1){number,gasUsed,gasLimit}}","variables": null}`, + want: `{"data":{"block":null}}`, + code: 200, + }, + { + body: `{"query": "{block(number:-500){number,gasUsed,gasLimit}}","variables": null}`, + want: `{"data":{"block":null}}`, + code: 200, + }, + { + body: `{"query": "{block(number:\"0\"){number,gasUsed,gasLimit}}","variables": null}`, + want: `{"data":{"block":{"number":0,"gasUsed":0,"gasLimit":11500000}}}`, + code: 200, + }, + { + body: `{"query": "{block(number:\"-33\"){number,gasUsed,gasLimit}}","variables": null}`, + want: `{"data":{"block":null}}`, + code: 200, + }, + { + body: `{"query": "{block(number:\"1337\"){number,gasUsed,gasLimit}}","variables": null}`, + want: `{"data":{"block":null}}`, + code: 200, + }, + { + body: `{"query": "{block(number:\"0xbad\"){number,gasUsed,gasLimit}}","variables": null}`, + want: `{"errors":[{"message":"strconv.ParseInt: parsing \"0xbad\": invalid syntax"}],"data":{}}`, + code: 400, + }, + { // hex strings are currently not supported. If that's added to the spec, this test will need to change + body: `{"query": "{block(number:\"0x0\"){number,gasUsed,gasLimit}}","variables": null}`, + want: `{"errors":[{"message":"strconv.ParseInt: parsing \"0x0\": invalid syntax"}],"data":{}}`, + code: 400, + }, + { + body: `{"query": "{block(number:\"a\"){number,gasUsed,gasLimit}}","variables": null}`, + want: `{"errors":[{"message":"strconv.ParseInt: parsing \"a\": invalid syntax"}],"data":{}}`, + code: 400, + }, + { + body: `{"query": "{bleh{number}}","variables": null}"`, + want: `{"errors":[{"message":"Cannot query field \"bleh\" on type \"Query\".","locations":[{"line":1,"column":2}]}]}`, + code: 400, + }, + // should return `estimateGas` as decimal + { + body: `{"query": "{block{ estimateGas(data:{}) }}"}`, + want: `{"data":{"block":{"estimateGas":53000}}}`, + code: 200, + }, + // should return `status` as decimal + { + body: `{"query": "{block {number call (data : {from : \"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b\", to: \"0x6295ee1b4f6dd65047762f924ecd367c17eabf8f\", data :\"0x12a7b914\"}){data status}}}"}`, + want: `{"data":{"block":{"number":10,"call":{"data":"0x","status":1}}}}`, + code: 200, + }, + } { + resp, err := http.Post(fmt.Sprintf("%s/graphql", stack.HTTPEndpoint()), "application/json", strings.NewReader(tt.body)) + if err != nil { + t.Fatalf("could not post: %v", err) + } + bodyBytes, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("could not read from response body: %v", err) + } + if have := string(bodyBytes); have != tt.want { + t.Errorf("testcase %d %s,\nhave:\n%v\nwant:\n%v", i, tt.body, have, tt.want) + } + if tt.code != resp.StatusCode { + t.Errorf("testcase %d %s,\nwrong statuscode, have: %v, want: %v", i, tt.body, resp.StatusCode, tt.code) + } } - expected := "{\"data\":{\"block\":{\"number\":\"0x0\"}}}" - assert.Equal(t, expected, string(bodyBytes)) } // Tests that a graphQL request is not handled successfully when graphql is not enabled on the specified endpoint @@ -71,31 +164,21 @@ func TestGraphQLHTTPOnSamePort_GQLRequest_Unsuccessful(t *testing.T) { if err := stack.Start(); err != nil { t.Fatalf("could not start node: %v", err) } - - // create http request - body := strings.NewReader("{\"query\": \"{block{number}}\",\"variables\": null}") - gqlReq, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://%s/graphql", "127.0.0.1:9393"), body) + body := strings.NewReader(`{"query": "{block{number}}","variables": null}`) + resp, err := http.Post(fmt.Sprintf("%s/graphql", stack.HTTPEndpoint()), "application/json", body) if err != nil { - t.Error("could not issue new http request ", err) - } - gqlReq.Header.Set("Content-Type", "application/json") - // read from response - resp := doHTTPRequest(t, gqlReq) - bodyBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("could not read from response body: %v", err) + t.Fatalf("could not post: %v", err) } // make sure the request is not handled successfully - assert.Equal(t, 404, resp.StatusCode) - assert.Equal(t, "404 page not found\n", string(bodyBytes)) + assert.Equal(t, http.StatusNotFound, resp.StatusCode) } func createNode(t *testing.T, gqlEnabled bool) *node.Node { stack, err := node.New(&node.Config{ HTTPHost: "127.0.0.1", - HTTPPort: 9393, + HTTPPort: 0, WSHost: "127.0.0.1", - WSPort: 9393, + WSPort: 0, }) if err != nil { t.Fatalf("could not create node: %v", err) @@ -103,32 +186,43 @@ func createNode(t *testing.T, gqlEnabled bool) *node.Node { if !gqlEnabled { return stack } - - createGQLService(t, stack, "127.0.0.1:9393") - + createGQLService(t, stack) return stack } -func createGQLService(t *testing.T, stack *node.Node, endpoint string) { +func createGQLService(t *testing.T, stack *node.Node) { // create backend - ethBackend, err := eth.New(stack, ð.DefaultConfig) + ethConf := ðconfig.Config{ + Genesis: &core.Genesis{ + Config: params.AllEthashProtocolChanges, + GasLimit: 11500000, + Difficulty: big.NewInt(1048576), + }, + Ethash: ethash.Config{ + PowMode: ethash.ModeFake, + }, + NetworkId: 1337, + TrieCleanCache: 5, + TrieCleanCacheJournal: "triecache", + TrieCleanCacheRejournal: 60 * time.Minute, + TrieDirtyCache: 5, + TrieTimeout: 60 * time.Minute, + SnapshotCache: 5, + } + ethBackend, err := eth.New(stack, ethConf) if err != nil { t.Fatalf("could not create eth backend: %v", err) } - + // Create some blocks and import them + chain, _ := core.GenerateChain(params.AllEthashProtocolChanges, ethBackend.BlockChain().Genesis(), + ethash.NewFaker(), ethBackend.ChainDb(), 10, func(i int, gen *core.BlockGen) {}) + _, err = ethBackend.BlockChain().InsertChain(chain) + if err != nil { + t.Fatalf("could not create import blocks: %v", err) + } // create gql service err = New(stack, ethBackend.APIBackend, []string{}, []string{}) if err != nil { t.Fatalf("could not create graphql service: %v", err) } } - -func doHTTPRequest(t *testing.T, req *http.Request) *http.Response { - client := &http.Client{} - resp, err := client.Do(req) - if err != nil { - t.Fatal("could not issue a GET request to the given endpoint", err) - - } - return resp -} diff --git a/graphql/schema.go b/graphql/schema.go index d7b253f227..6ea63db636 100644 --- a/graphql/schema.go +++ b/graphql/schema.go @@ -300,7 +300,7 @@ const schema string = ` block(number: Long, hash: Bytes32): Block # Blocks returns all the blocks between two numbers, inclusive. If # to is not supplied, it defaults to the most recent known block. - blocks(from: Long!, to: Long): [Block!]! + blocks(from: Long, to: Long): [Block!]! # Pending returns the current pending state. pending: Pending! # Transaction returns a transaction specified by its hash. @@ -310,8 +310,6 @@ const schema string = ` # GasPrice returns the node's estimate of a gas price sufficient to # ensure a transaction is mined in a timely fashion. gasPrice: BigInt! - # ProtocolVersion returns the current wire protocol version number. - protocolVersion: Int! # Syncing returns information on the current synchronisation state. syncing: SyncState # ChainID returns the current chain ID for transaction replay protection. diff --git a/graphql/service.go b/graphql/service.go index ae962e5b36..bcb0a4990d 100644 --- a/graphql/service.go +++ b/graphql/service.go @@ -17,12 +17,44 @@ package graphql import ( + "encoding/json" + "net/http" + "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/node" "github.com/graph-gophers/graphql-go" - "github.com/graph-gophers/graphql-go/relay" ) +type handler struct { + Schema *graphql.Schema +} + +func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var params struct { + Query string `json:"query"` + OperationName string `json:"operationName"` + Variables map[string]interface{} `json:"variables"` + } + if err := json.NewDecoder(r.Body).Decode(¶ms); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + response := h.Schema.Exec(r.Context(), params.Query, params.OperationName, params.Variables) + responseJSON, err := json.Marshal(response) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if len(response.Errors) > 0 { + w.WriteHeader(http.StatusBadRequest) + } + + w.Header().Set("Content-Type", "application/json") + w.Write(responseJSON) + +} + // New constructs a new GraphQL service instance. func New(stack *node.Node, backend ethapi.Backend, cors, vhosts []string) error { if backend == nil { @@ -41,7 +73,7 @@ func newHandler(stack *node.Node, backend ethapi.Backend, cors, vhosts []string) if err != nil { return err } - h := &relay.Handler{Schema: s} + h := handler{Schema: s} handler := node.NewHTTPHandlerStack(h, cors, vhosts) stack.RegisterHandler("GraphQL UI", "/graphql/ui", GraphiQL{}) diff --git a/interfaces.go b/interfaces.go index 1ff31f96b6..afcdc17e58 100644 --- a/interfaces.go +++ b/interfaces.go @@ -119,6 +119,8 @@ type CallMsg struct { GasPrice *big.Int // wei <-> gas exchange ratio Value *big.Int // amount of wei sent along with the call Data []byte // input data, usually an ABI-encoded contract method invocation + + AccessList types.AccessList // EIP-2930 access list. } // A ContractCaller provides contract calls, essentially transactions that are executed by diff --git a/internal/cmdtest/test_cmd.go b/internal/cmdtest/test_cmd.go index 0edfccec5a..82ad9c15b6 100644 --- a/internal/cmdtest/test_cmd.go +++ b/internal/cmdtest/test_cmd.go @@ -27,6 +27,7 @@ import ( "regexp" "strings" "sync" + "sync/atomic" "syscall" "testing" "text/template" @@ -55,10 +56,13 @@ type TestCmd struct { Err error } +var id int32 + // Run exec's the current binary using name as argv[0] which will trigger the // reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go) func (tt *TestCmd) Run(name string, args ...string) { - tt.stderr = &testlogger{t: tt.T} + id := atomic.AddInt32(&id, 1) + tt.stderr = &testlogger{t: tt.T, name: fmt.Sprintf("%d", id)} tt.cmd = &exec.Cmd{ Path: reexec.Self(), Args: append([]string{name}, args...), @@ -238,16 +242,17 @@ func (tt *TestCmd) withKillTimeout(fn func()) { // testlogger logs all written lines via t.Log and also // collects them for later inspection. type testlogger struct { - t *testing.T - mu sync.Mutex - buf bytes.Buffer + t *testing.T + mu sync.Mutex + buf bytes.Buffer + name string } func (tl *testlogger) Write(b []byte) (n int, err error) { lines := bytes.Split(b, []byte("\n")) for _, line := range lines { if len(line) > 0 { - tl.t.Logf("(stderr) %s", line) + tl.t.Logf("(stderr:%v) %s", tl.name, line) } } tl.mu.Lock() diff --git a/internal/debug/flags.go b/internal/debug/flags.go index 3b077b6e08..2c92b19de6 100644 --- a/internal/debug/flags.go +++ b/internal/debug/flags.go @@ -28,7 +28,7 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/metrics/exp" "github.com/fjl/memsize/memsizeui" - colorable "github.com/mattn/go-colorable" + "github.com/mattn/go-colorable" "github.com/mattn/go-isatty" "gopkg.in/urfave/cli.v1" ) @@ -41,6 +41,10 @@ var ( Usage: "Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail", Value: 3, } + logjsonFlag = cli.BoolFlag{ + Name: "log.json", + Usage: "Format logs with JSON", + } vmoduleFlag = cli.StringFlag{ Name: "vmodule", Usage: "Per-module verbosity: comma-separated list of = (e.g. eth/*=5,p2p=4)", @@ -86,62 +90,40 @@ var ( Name: "trace", Usage: "Write execution trace to the given file", } - // (Deprecated April 2020) - legacyPprofPortFlag = cli.IntFlag{ - Name: "pprofport", - Usage: "pprof HTTP server listening port (deprecated, use --pprof.port)", - Value: 6060, - } - legacyPprofAddrFlag = cli.StringFlag{ - Name: "pprofaddr", - Usage: "pprof HTTP server listening interface (deprecated, use --pprof.addr)", - Value: "127.0.0.1", - } - legacyMemprofilerateFlag = cli.IntFlag{ - Name: "memprofilerate", - Usage: "Turn on memory profiling with the given rate (deprecated, use --pprof.memprofilerate)", - Value: runtime.MemProfileRate, - } - legacyBlockprofilerateFlag = cli.IntFlag{ - Name: "blockprofilerate", - Usage: "Turn on block profiling with the given rate (deprecated, use --pprof.blockprofilerate)", - } - legacyCpuprofileFlag = cli.StringFlag{ - Name: "cpuprofile", - Usage: "Write CPU profile to the given file (deprecated, use --pprof.cpuprofile)", - } ) // Flags holds all command-line flags required for debugging. var Flags = []cli.Flag{ - verbosityFlag, vmoduleFlag, backtraceAtFlag, debugFlag, + verbosityFlag, logjsonFlag, vmoduleFlag, backtraceAtFlag, debugFlag, pprofFlag, pprofAddrFlag, pprofPortFlag, memprofilerateFlag, blockprofilerateFlag, cpuprofileFlag, traceFlag, } -var DeprecatedFlags = []cli.Flag{ - legacyPprofPortFlag, legacyPprofAddrFlag, legacyMemprofilerateFlag, - legacyBlockprofilerateFlag, legacyCpuprofileFlag, -} - var ( - ostream log.Handler glogger *log.GlogHandler ) func init() { - usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb" - output := io.Writer(os.Stderr) - if usecolor { - output = colorable.NewColorableStderr() - } - ostream = log.StreamHandler(output, log.TerminalFormat(usecolor)) - glogger = log.NewGlogHandler(ostream) + glogger = log.NewGlogHandler(log.StreamHandler(os.Stderr, log.TerminalFormat(false))) + glogger.Verbosity(log.LvlInfo) + log.Root().SetHandler(glogger) } // Setup initializes profiling and logging based on the CLI flags. // It should be called as early as possible in the program. func Setup(ctx *cli.Context) error { + var ostream log.Handler + output := io.Writer(os.Stderr) + if ctx.GlobalBool(logjsonFlag.Name) { + ostream = log.StreamHandler(output, log.JSONFormat()) + } else { + usecolor := (isatty.IsTerminal(os.Stderr.Fd()) || isatty.IsCygwinTerminal(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb" + if usecolor { + output = colorable.NewColorableStderr() + } + ostream = log.StreamHandler(output, log.TerminalFormat(usecolor)) + } + glogger.SetHandler(ostream) // logging log.PrintOrigins(ctx.GlobalBool(debugFlag.Name)) glogger.Verbosity(log.Lvl(ctx.GlobalInt(verbosityFlag.Name))) @@ -150,16 +132,8 @@ func Setup(ctx *cli.Context) error { log.Root().SetHandler(glogger) // profiling, tracing - if ctx.GlobalIsSet(legacyMemprofilerateFlag.Name) { - runtime.MemProfileRate = ctx.GlobalInt(legacyMemprofilerateFlag.Name) - log.Warn("The flag --memprofilerate is deprecated and will be removed in the future, please use --pprof.memprofilerate") - } runtime.MemProfileRate = ctx.GlobalInt(memprofilerateFlag.Name) - if ctx.GlobalIsSet(legacyBlockprofilerateFlag.Name) { - Handler.SetBlockProfileRate(ctx.GlobalInt(legacyBlockprofilerateFlag.Name)) - log.Warn("The flag --blockprofilerate is deprecated and will be removed in the future, please use --pprof.blockprofilerate") - } Handler.SetBlockProfileRate(ctx.GlobalInt(blockprofilerateFlag.Name)) if traceFile := ctx.GlobalString(traceFlag.Name); traceFile != "" { @@ -173,26 +147,12 @@ func Setup(ctx *cli.Context) error { return err } } - if cpuFile := ctx.GlobalString(legacyCpuprofileFlag.Name); cpuFile != "" { - log.Warn("The flag --cpuprofile is deprecated and will be removed in the future, please use --pprof.cpuprofile") - if err := Handler.StartCPUProfile(cpuFile); err != nil { - return err - } - } // pprof server if ctx.GlobalBool(pprofFlag.Name) { listenHost := ctx.GlobalString(pprofAddrFlag.Name) - if ctx.GlobalIsSet(legacyPprofAddrFlag.Name) && !ctx.GlobalIsSet(pprofAddrFlag.Name) { - listenHost = ctx.GlobalString(legacyPprofAddrFlag.Name) - log.Warn("The flag --pprofaddr is deprecated and will be removed in the future, please use --pprof.addr") - } port := ctx.GlobalInt(pprofPortFlag.Name) - if ctx.GlobalIsSet(legacyPprofPortFlag.Name) && !ctx.GlobalIsSet(pprofPortFlag.Name) { - port = ctx.GlobalInt(legacyPprofPortFlag.Name) - log.Warn("The flag --pprofport is deprecated and will be removed in the future, please use --pprof.port") - } address := fmt.Sprintf("%s:%d", listenHost, port) // This context value ("metrics.addr") represents the utils.MetricsHTTPFlag.Name. diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 0d6ace9b5b..622063cf64 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -64,11 +64,6 @@ func (s *PublicEthereumAPI) GasPrice(ctx context.Context) (*hexutil.Big, error) return (*hexutil.Big)(price), err } -// ProtocolVersion returns the current Ethereum protocol version this node supports -func (s *PublicEthereumAPI) ProtocolVersion() hexutil.Uint { - return hexutil.Uint(s.b.ProtocolVersion()) -} - // Syncing returns false in case the node is currently not syncing with the network. It can be up to date or has not // yet received the latest block headers from its pears. In case it is synchronizing: // - startingBlock: block number this node started to synchronise from @@ -373,7 +368,7 @@ func (s *PrivateAccountAPI) signTransaction(ctx context.Context, args *SendTxArg } // SendTransaction will create a transaction from the given arguments and -// tries to sign it with the key associated with args.To. If the given passwd isn't +// tries to sign it with the key associated with args.From. If the given passwd isn't // able to decrypt the key it fails. func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs, passwd string) (common.Hash, error) { if args.Nonce == nil { @@ -391,7 +386,7 @@ func (s *PrivateAccountAPI) SendTransaction(ctx context.Context, args SendTxArgs } // SignTransaction will create a transaction from the given arguments and -// tries to sign it with the key associated with args.To. If the given passwd isn't +// tries to sign it with the key associated with args.From. If the given passwd isn't // able to decrypt the key it fails. The transaction is returned in RLP-form, not broadcast // to other nodes func (s *PrivateAccountAPI) SignTransaction(ctx context.Context, args SendTxArgs, passwd string) (*SignTransactionResult, error) { @@ -415,7 +410,7 @@ func (s *PrivateAccountAPI) SignTransaction(ctx context.Context, args SendTxArgs log.Warn("Failed transaction sign attempt", "from", args.From, "to", args.To, "value", args.Value.ToInt(), "err", err) return nil, err } - data, err := rlp.EncodeToBytes(signed) + data, err := signed.MarshalBinary() if err != nil { return nil, err } @@ -753,12 +748,13 @@ func (s *PublicBlockChainAPI) GetStorageAt(ctx context.Context, address common.A // CallArgs represents the arguments for a call. type CallArgs struct { - From *common.Address `json:"from"` - To *common.Address `json:"to"` - Gas *hexutil.Uint64 `json:"gas"` - GasPrice *hexutil.Big `json:"gasPrice"` - Value *hexutil.Big `json:"value"` - Data *hexutil.Bytes `json:"data"` + From *common.Address `json:"from"` + To *common.Address `json:"to"` + Gas *hexutil.Uint64 `json:"gas"` + GasPrice *hexutil.Big `json:"gasPrice"` + Value *hexutil.Big `json:"value"` + Data *hexutil.Bytes `json:"data"` + AccessList *types.AccessList `json:"accessList"` } // ToMessage converts CallArgs to the Message type used by the core evm @@ -785,18 +781,20 @@ func (args *CallArgs) ToMessage(globalGasCap uint64) types.Message { if args.GasPrice != nil { gasPrice = args.GasPrice.ToInt() } - value := new(big.Int) if args.Value != nil { value = args.Value.ToInt() } - var data []byte if args.Data != nil { - data = []byte(*args.Data) + data = *args.Data + } + var accessList types.AccessList + if args.AccessList != nil { + accessList = *args.AccessList } - msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, data, false) + msg := types.NewMessage(addr, args.To, 0, value, gas, gasPrice, data, accessList, false) return msg } @@ -874,13 +872,13 @@ func DoCall(ctx context.Context, b Backend, args CallArgs, blockNrOrHash rpc.Blo evm.Cancel() }() - // Setup the gas pool (also for unmetered requests) - // and apply the message. + // Execute the message. gp := new(core.GasPool).AddGas(math.MaxUint64) result, err := core.ApplyMessage(evm, msg, gp) if err := vmError(); err != nil { return nil, err } + // If the timer caused an abort, return an appropriate error message if evm.Cancelled() { return nil, fmt.Errorf("execution aborted (timeout = %v)", timeout) @@ -1205,33 +1203,43 @@ func (s *PublicBlockChainAPI) rpcMarshalBlock(ctx context.Context, b *types.Bloc // RPCTransaction represents a transaction that will serialize to the RPC representation of a transaction type RPCTransaction struct { - BlockHash *common.Hash `json:"blockHash"` - BlockNumber *hexutil.Big `json:"blockNumber"` - From common.Address `json:"from"` - Gas hexutil.Uint64 `json:"gas"` - GasPrice *hexutil.Big `json:"gasPrice"` - Hash common.Hash `json:"hash"` - Input hexutil.Bytes `json:"input"` - Nonce hexutil.Uint64 `json:"nonce"` - To *common.Address `json:"to"` - TransactionIndex *hexutil.Uint64 `json:"transactionIndex"` - Value *hexutil.Big `json:"value"` - V *hexutil.Big `json:"v"` - R *hexutil.Big `json:"r"` - S *hexutil.Big `json:"s"` + BlockHash *common.Hash `json:"blockHash"` + BlockNumber *hexutil.Big `json:"blockNumber"` + From common.Address `json:"from"` + Gas hexutil.Uint64 `json:"gas"` + GasPrice *hexutil.Big `json:"gasPrice"` + Hash common.Hash `json:"hash"` + Input hexutil.Bytes `json:"input"` + Nonce hexutil.Uint64 `json:"nonce"` + To *common.Address `json:"to"` + TransactionIndex *hexutil.Uint64 `json:"transactionIndex"` + Value *hexutil.Big `json:"value"` + Type hexutil.Uint64 `json:"type"` + Accesses *types.AccessList `json:"accessList,omitempty"` + ChainID *hexutil.Big `json:"chainId,omitempty"` + V *hexutil.Big `json:"v"` + R *hexutil.Big `json:"r"` + S *hexutil.Big `json:"s"` } // newRPCTransaction returns a transaction that will serialize to the RPC // representation, with the given location metadata set (if available). func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber uint64, index uint64) *RPCTransaction { - var signer types.Signer = types.FrontierSigner{} + // Determine the signer. For replay-protected transactions, use the most permissive + // signer, because we assume that signers are backwards-compatible with old + // transactions. For non-protected transactions, the homestead signer signer is used + // because the return value of ChainId is zero for those transactions. + var signer types.Signer if tx.Protected() { - signer = types.NewEIP155Signer(tx.ChainId()) + signer = types.LatestSignerForChainID(tx.ChainId()) + } else { + signer = types.HomesteadSigner{} } + from, _ := types.Sender(signer, tx) v, r, s := tx.RawSignatureValues() - result := &RPCTransaction{ + Type: hexutil.Uint64(tx.Type()), From: from, Gas: hexutil.Uint64(tx.Gas()), GasPrice: (*hexutil.Big)(tx.GasPrice()), @@ -1249,6 +1257,11 @@ func newRPCTransaction(tx *types.Transaction, blockHash common.Hash, blockNumber result.BlockNumber = (*hexutil.Big)(new(big.Int).SetUint64(blockNumber)) result.TransactionIndex = (*hexutil.Uint64)(&index) } + if tx.Type() == types.AccessListTxType { + al := tx.AccessList() + result.Accesses = &al + result.ChainID = (*hexutil.Big)(tx.ChainId()) + } return result } @@ -1272,7 +1285,7 @@ func newRPCRawTransactionFromBlockIndex(b *types.Block, index uint64) hexutil.By if index >= uint64(len(txs)) { return nil } - blob, _ := rlp.EncodeToBytes(txs[index]) + blob, _ := txs[index].MarshalBinary() return blob } @@ -1290,11 +1303,15 @@ func newRPCTransactionFromBlockHash(b *types.Block, hash common.Hash) *RPCTransa type PublicTransactionPoolAPI struct { b Backend nonceLock *AddrLocker + signer types.Signer } // NewPublicTransactionPoolAPI creates a new RPC service with methods specific for the transaction pool. func NewPublicTransactionPoolAPI(b Backend, nonceLock *AddrLocker) *PublicTransactionPoolAPI { - return &PublicTransactionPoolAPI{b, nonceLock} + // The signer used by the API should always be the 'latest' known one because we expect + // signers to be backwards-compatible with old transactions. + signer := types.LatestSigner(b.ChainConfig()) + return &PublicTransactionPoolAPI{b, nonceLock, signer} } // GetBlockTransactionCountByNumber returns the number of transactions in the block with the given block number. @@ -1399,7 +1416,7 @@ func (s *PublicTransactionPoolAPI) GetRawTransactionByHash(ctx context.Context, } } // Serialize to RLP and return - return rlp.EncodeToBytes(tx) + return tx.MarshalBinary() } // GetTransactionReceipt returns the transaction receipt for the given transaction hash. @@ -1417,10 +1434,9 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(ctx context.Context, ha } receipt := receipts[index] - var signer types.Signer = types.FrontierSigner{} - if tx.Protected() { - signer = types.NewEIP155Signer(tx.ChainId()) - } + // Derive the sender. + bigblock := new(big.Int).SetUint64(blockNumber) + signer := types.MakeSigner(s.b.ChainConfig(), bigblock) from, _ := types.Sender(signer, tx) fields := map[string]interface{}{ @@ -1435,6 +1451,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionReceipt(ctx context.Context, ha "contractAddress": nil, "logs": receipt.Logs, "logsBloom": receipt.Bloom, + "type": hexutil.Uint(tx.Type()), } // Assign receipt status or post state. @@ -1478,9 +1495,13 @@ type SendTxArgs struct { // newer name and should be preferred by clients. Data *hexutil.Bytes `json:"data"` Input *hexutil.Bytes `json:"input"` + + // For non-legacy transactions + AccessList *types.AccessList `json:"accessList,omitempty"` + ChainID *hexutil.Big `json:"chainId,omitempty"` } -// setDefaults is a helper function that fills in default values for unspecified tx fields. +// setDefaults fills in default values for unspecified tx fields. func (args *SendTxArgs) setDefaults(ctx context.Context, b Backend) error { if args.GasPrice == nil { price, err := b.SuggestPrice(ctx) @@ -1514,6 +1535,7 @@ func (args *SendTxArgs) setDefaults(ctx context.Context, b Backend) error { return errors.New(`contract creation without any data provided`) } } + // Estimate the gas usage if necessary. if args.Gas == nil { // For backwards-compatibility reason, we try both input and data @@ -1523,11 +1545,12 @@ func (args *SendTxArgs) setDefaults(ctx context.Context, b Backend) error { input = args.Data } callArgs := CallArgs{ - From: &args.From, // From shouldn't be nil - To: args.To, - GasPrice: args.GasPrice, - Value: args.Value, - Data: input, + From: &args.From, // From shouldn't be nil + To: args.To, + GasPrice: args.GasPrice, + Value: args.Value, + Data: input, + AccessList: args.AccessList, } pendingBlockNr := rpc.BlockNumberOrHashWithNumber(rpc.PendingBlockNumber) estimated, err := DoEstimateGas(ctx, b, callArgs, pendingBlockNr, b.RPCGasCap()) @@ -1537,9 +1560,15 @@ func (args *SendTxArgs) setDefaults(ctx context.Context, b Backend) error { args.Gas = &estimated log.Trace("Estimate gas usage automatically", "gas", args.Gas) } + if args.ChainID == nil { + id := (*hexutil.Big)(b.ChainConfig().ChainID) + args.ChainID = id + } return nil } +// toTransaction converts the arguments to a transaction. +// This assumes that setDefaults has been called. func (args *SendTxArgs) toTransaction() *types.Transaction { var input []byte if args.Input != nil { @@ -1547,10 +1576,30 @@ func (args *SendTxArgs) toTransaction() *types.Transaction { } else if args.Data != nil { input = *args.Data } - if args.To == nil { - return types.NewContractCreation(uint64(*args.Nonce), (*big.Int)(args.Value), uint64(*args.Gas), (*big.Int)(args.GasPrice), input) + + var data types.TxData + if args.AccessList == nil { + data = &types.LegacyTx{ + To: args.To, + Nonce: uint64(*args.Nonce), + Gas: uint64(*args.Gas), + GasPrice: (*big.Int)(args.GasPrice), + Value: (*big.Int)(args.Value), + Data: input, + } + } else { + data = &types.AccessListTx{ + To: args.To, + ChainID: (*big.Int)(args.ChainID), + Nonce: uint64(*args.Nonce), + Gas: uint64(*args.Gas), + GasPrice: (*big.Int)(args.GasPrice), + Value: (*big.Int)(args.Value), + Data: input, + AccessList: *args.AccessList, + } } - return types.NewTransaction(uint64(*args.Nonce), *args.To, (*big.Int)(args.Value), uint64(*args.Gas), (*big.Int)(args.GasPrice), input) + return types.NewTx(data) } // SubmitTransaction is a helper function that submits tx to txPool and logs a message. @@ -1560,19 +1609,25 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c if err := checkTxFee(tx.GasPrice(), tx.Gas(), b.RPCTxFeeCap()); err != nil { return common.Hash{}, err } + if !b.UnprotectedAllowed() && !tx.Protected() { + // Ensure only eip155 signed transactions are submitted if EIP155Required is set. + return common.Hash{}, errors.New("only replay-protected (EIP-155) transactions allowed over RPC") + } if err := b.SendTx(ctx, tx); err != nil { return common.Hash{}, err } + // Print a log with full tx details for manual investigations and interventions + signer := types.MakeSigner(b.ChainConfig(), b.CurrentBlock().Number()) + from, err := types.Sender(signer, tx) + if err != nil { + return common.Hash{}, err + } + if tx.To() == nil { - signer := types.MakeSigner(b.ChainConfig(), b.CurrentBlock().Number()) - from, err := types.Sender(signer, tx) - if err != nil { - return common.Hash{}, err - } addr := crypto.CreateAddress(from, tx.Nonce()) - log.Info("Submitted contract creation", "fullhash", tx.Hash().Hex(), "contract", addr.Hex()) + log.Info("Submitted contract creation", "hash", tx.Hash().Hex(), "from", from, "nonce", tx.Nonce(), "contract", addr.Hex(), "value", tx.Value()) } else { - log.Info("Submitted transaction", "fullhash", tx.Hash().Hex(), "recipient", tx.To()) + log.Info("Submitted transaction", "hash", tx.Hash().Hex(), "from", from, "nonce", tx.Nonce(), "recipient", tx.To(), "value", tx.Value()) } return tx.Hash(), nil } @@ -1618,7 +1673,7 @@ func (s *PublicTransactionPoolAPI) FillTransaction(ctx context.Context, args Sen } // Assemble the transaction and obtain rlp tx := args.toTransaction() - data, err := rlp.EncodeToBytes(tx) + data, err := tx.MarshalBinary() if err != nil { return nil, err } @@ -1627,9 +1682,9 @@ func (s *PublicTransactionPoolAPI) FillTransaction(ctx context.Context, args Sen // SendRawTransaction will add the signed transaction to the transaction pool. // The sender is responsible for signing the transaction and using the correct nonce. -func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, encodedTx hexutil.Bytes) (common.Hash, error) { +func (s *PublicTransactionPoolAPI) SendRawTransaction(ctx context.Context, input hexutil.Bytes) (common.Hash, error) { tx := new(types.Transaction) - if err := rlp.DecodeBytes(encodedTx, tx); err != nil { + if err := tx.UnmarshalBinary(input); err != nil { return common.Hash{}, err } return SubmitTransaction(ctx, s.b, tx) @@ -1690,7 +1745,7 @@ func (s *PublicTransactionPoolAPI) SignTransaction(ctx context.Context, args Sen if err != nil { return nil, err } - data, err := rlp.EncodeToBytes(tx) + data, err := tx.MarshalBinary() if err != nil { return nil, err } @@ -1712,11 +1767,7 @@ func (s *PublicTransactionPoolAPI) PendingTransactions() ([]*RPCTransaction, err } transactions := make([]*RPCTransaction, 0, len(pending)) for _, tx := range pending { - var signer types.Signer = types.HomesteadSigner{} - if tx.Protected() { - signer = types.NewEIP155Signer(tx.ChainId()) - } - from, _ := types.Sender(signer, tx) + from, _ := types.Sender(s.signer, tx) if _, exists := accounts[from]; exists { transactions = append(transactions, newRPCPendingTransaction(tx)) } @@ -1753,13 +1804,9 @@ func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs SendTxAr return common.Hash{}, err } for _, p := range pending { - var signer types.Signer = types.HomesteadSigner{} - if p.Protected() { - signer = types.NewEIP155Signer(p.ChainId()) - } - wantSigHash := signer.Hash(matchTx) - - if pFrom, err := types.Sender(signer, p); err == nil && pFrom == sendArgs.From && signer.Hash(p) == wantSigHash { + wantSigHash := s.signer.Hash(matchTx) + pFrom, err := types.Sender(s.signer, p) + if err == nil && pFrom == sendArgs.From && s.signer.Hash(p) == wantSigHash { // Match. Re-sign and send the transaction. if gasPrice != nil && (*big.Int)(gasPrice).Sign() != 0 { sendArgs.GasPrice = gasPrice @@ -1777,7 +1824,6 @@ func (s *PublicTransactionPoolAPI) Resend(ctx context.Context, sendArgs SendTxAr return signedTx.Hash(), nil } } - return common.Hash{}, fmt.Errorf("transaction %#x not found", matchTx.Hash()) } diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 10e716bf20..ebb088fef5 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -41,13 +41,13 @@ import ( type Backend interface { // General Ethereum API Downloader() *downloader.Downloader - ProtocolVersion() int SuggestPrice(ctx context.Context) (*big.Int, error) ChainDb() ethdb.Database AccountManager() *accounts.Manager ExtRPCEnabled() bool - RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection - RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs + RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection + RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs + UnprotectedAllowed() bool // allows only for EIP155 transactions. // Blockchain API SetHead(number uint64) diff --git a/internal/flags/helpers.go b/internal/flags/helpers.go index 900fec454c..43bbcf0201 100644 --- a/internal/flags/helpers.go +++ b/internal/flags/helpers.go @@ -21,11 +21,11 @@ import ( "path/filepath" "github.com/ethereum/go-ethereum/params" - cli "gopkg.in/urfave/cli.v1" + "gopkg.in/urfave/cli.v1" ) var ( - CommandHelpTemplate = `{{.cmd.Name}}{{if .cmd.Subcommands}} command{{end}}{{if .cmd.Flags}} [command options]{{end}} [arguments...] + CommandHelpTemplate = `{{.cmd.Name}}{{if .cmd.Subcommands}} command{{end}}{{if .cmd.Flags}} [command options]{{end}} {{.cmd.ArgsUsage}} {{if .cmd.Description}}{{.cmd.Description}} {{end}}{{if .cmd.Subcommands}} SUBCOMMANDS: @@ -36,7 +36,7 @@ SUBCOMMANDS: {{end}} {{end}}{{end}}` - OriginCommandHelpTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} [arguments...] + OriginCommandHelpTemplate = `{{.Name}}{{if .Subcommands}} command{{end}}{{if .Flags}} [command options]{{end}} {{.ArgsUsage}} {{if .Description}}{{.Description}} {{end}}{{if .Subcommands}} SUBCOMMANDS: @@ -51,10 +51,10 @@ OPTIONS: AppHelpTemplate = `NAME: {{.App.Name}} - {{.App.Usage}} - Copyright 2013-2019 The go-ethereum Authors + Copyright 2013-2021 The go-ethereum Authors USAGE: - {{.App.HelpName}} [options]{{if .App.Commands}} command [command options]{{end}} {{if .App.ArgsUsage}}{{.App.ArgsUsage}}{{else}}[arguments...]{{end}} + {{.App.HelpName}} [options]{{if .App.Commands}} [command] [command options]{{end}} {{if .App.ArgsUsage}}{{.App.ArgsUsage}}{{else}}[arguments...]{{end}} {{if .App.Version}} VERSION: {{.App.Version}} @@ -77,7 +77,7 @@ COPYRIGHT: ClefAppHelpTemplate = `NAME: {{.App.Name}} - {{.App.Usage}} - Copyright 2013-2019 The go-ethereum Authors + Copyright 2013-2021 The go-ethereum Authors USAGE: {{.App.HelpName}} [options]{{if .App.Commands}} command [command options]{{end}} {{if .App.ArgsUsage}}{{.App.ArgsUsage}}{{else}}[arguments...]{{end}} diff --git a/internal/guide/guide_test.go b/internal/guide/guide_test.go index 9c7ad16d18..abc48e0e4b 100644 --- a/internal/guide/guide_test.go +++ b/internal/guide/guide_test.go @@ -31,6 +31,7 @@ import ( "time" "github.com/ethereum/go-ethereum/accounts/keystore" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" ) @@ -75,7 +76,8 @@ func TestAccountManagement(t *testing.T) { if err != nil { t.Fatalf("Failed to create signer account: %v", err) } - tx, chain := new(types.Transaction), big.NewInt(1) + tx := types.NewTransaction(0, common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil) + chain := big.NewInt(1) // Sign a transaction with a single authorization if _, err := ks.SignTxWithPassphrase(signer, "Signer password", tx, chain); err != nil { diff --git a/internal/web3ext/web3ext.go b/internal/web3ext/web3ext.go index 77954bbbf0..6fcf4b8380 100644 --- a/internal/web3ext/web3ext.go +++ b/internal/web3ext/web3ext.go @@ -33,7 +33,7 @@ var Modules = map[string]string{ "swarmfs": SwarmfsJs, "txpool": TxpoolJs, "les": LESJs, - "lespay": LESPayJs, + "vflux": VfluxJs, } const ChequebookJs = ` @@ -877,24 +877,24 @@ web3._extend({ }); ` -const LESPayJs = ` +const VfluxJs = ` web3._extend({ - property: 'lespay', + property: 'vflux', methods: [ new web3._extend.Method({ name: 'distribution', - call: 'lespay_distribution', + call: 'vflux_distribution', params: 2 }), new web3._extend.Method({ name: 'timeout', - call: 'lespay_timeout', + call: 'vflux_timeout', params: 2 }), new web3._extend.Method({ name: 'value', - call: 'lespay_value', + call: 'vflux_value', params: 2 }), ], @@ -902,7 +902,7 @@ web3._extend({ [ new web3._extend.Property({ name: 'requestStats', - getter: 'lespay_requestStats' + getter: 'vflux_requestStats' }), ] }); diff --git a/les/api.go b/les/api.go index 66d133b854..6491c4dcc4 100644 --- a/les/api.go +++ b/les/api.go @@ -23,7 +23,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/mclock" - lps "github.com/ethereum/go-ethereum/les/lespay/server" + vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/p2p/enode" ) @@ -37,7 +37,7 @@ var ( // PrivateLightServerAPI provides an API to access the LES light server. type PrivateLightServerAPI struct { server *LesServer - defaultPosFactors, defaultNegFactors lps.PriceFactors + defaultPosFactors, defaultNegFactors vfs.PriceFactors } // NewPrivateLightServerAPI creates a new LES light server API. @@ -107,7 +107,7 @@ func (api *PrivateLightServerAPI) clientInfo(c *clientInfo) map[string]interface // setParams either sets the given parameters for a single connected client (if specified) // or the default parameters applicable to clients connected in the future -func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, client *clientInfo, posFactors, negFactors *lps.PriceFactors) (updateFactors bool, err error) { +func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, client *clientInfo, posFactors, negFactors *vfs.PriceFactors) (updateFactors bool, err error) { defParams := client == nil for name, value := range params { errValue := func() error { diff --git a/les/api_backend.go b/les/api_backend.go index 75bea56da6..f5d2354b60 100644 --- a/les/api_backend.go +++ b/les/api_backend.go @@ -40,9 +40,10 @@ import ( ) type LesApiBackend struct { - extRPCEnabled bool - eth *LightEthereum - gpo *gasprice.Oracle + extRPCEnabled bool + allowUnprotectedTxs bool + eth *LightEthereum + gpo *gasprice.Oracle } func (b *LesApiBackend) ChainConfig() *params.ChainConfig { @@ -171,8 +172,9 @@ func (b *LesApiBackend) GetTd(ctx context.Context, hash common.Hash) *big.Int { } func (b *LesApiBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header) (*vm.EVM, func() error, error) { - context := core.NewEVMContext(msg, header, b.eth.blockchain, nil) - return vm.NewEVM(context, state, b.eth.chainConfig, vm.Config{}), state.Error, nil + txContext := core.NewEVMTxContext(msg) + context := core.NewEVMBlockContext(header, b.eth.blockchain, nil) + return vm.NewEVM(context, txContext, state, b.eth.chainConfig, vm.Config{}), state.Error, nil } func (b *LesApiBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { @@ -262,6 +264,10 @@ func (b *LesApiBackend) ExtRPCEnabled() bool { return b.extRPCEnabled } +func (b *LesApiBackend) UnprotectedAllowed() bool { + return b.allowUnprotectedTxs +} + func (b *LesApiBackend) RPCGasCap() uint64 { return b.eth.config.RPCGasCap } @@ -291,3 +297,15 @@ func (b *LesApiBackend) Engine() consensus.Engine { func (b *LesApiBackend) CurrentHeader() *types.Header { return b.eth.blockchain.CurrentHeader() } + +func (b *LesApiBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error) { + return b.eth.stateAtBlock(ctx, block, reexec) +} + +func (b *LesApiBackend) StatesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error) { + return b.eth.statesInRange(ctx, fromBlock, toBlock, reexec) +} + +func (b *LesApiBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) { + return b.eth.stateAtTransaction(ctx, block, txIndex, reexec) +} diff --git a/les/api_test.go b/les/api_test.go index 2895264f67..f7017c5d98 100644 --- a/les/api_test.go +++ b/les/api_test.go @@ -33,6 +33,7 @@ import ( "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/les/flowcontrol" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" @@ -492,14 +493,14 @@ func testSim(t *testing.T, serverCount, clientCount int, serverDir, clientDir [] } func newLesClientService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { - config := eth.DefaultConfig + config := ethconfig.Defaults config.SyncMode = downloader.LightSync config.Ethash.PowMode = ethash.ModeFake return New(stack, &config) } func newLesServerService(ctx *adapters.ServiceContext, stack *node.Node) (node.Lifecycle, error) { - config := eth.DefaultConfig + config := ethconfig.Defaults config.SyncMode = downloader.FullSync config.LightServ = testServerCapacity config.LightPeers = testMaxClients diff --git a/les/benchmark.go b/les/benchmark.go index a146de2fed..757822a6b3 100644 --- a/les/benchmark.go +++ b/les/benchmark.go @@ -75,9 +75,8 @@ func (b *benchmarkBlockHeaders) init(h *serverHandler, count int) error { func (b *benchmarkBlockHeaders) request(peer *serverPeer, index int) error { if b.byHash { return peer.requestHeadersByHash(0, b.hashes[index], b.amount, b.skip, b.reverse) - } else { - return peer.requestHeadersByNumber(0, uint64(b.offset+rand.Int63n(b.randMax)), b.amount, b.skip, b.reverse) } + return peer.requestHeadersByNumber(0, uint64(b.offset+rand.Int63n(b.randMax)), b.amount, b.skip, b.reverse) } // benchmarkBodiesOrReceipts implements requestBenchmark @@ -98,9 +97,8 @@ func (b *benchmarkBodiesOrReceipts) init(h *serverHandler, count int) error { func (b *benchmarkBodiesOrReceipts) request(peer *serverPeer, index int) error { if b.receipts { return peer.requestReceipts(0, []common.Hash{b.hashes[index]}) - } else { - return peer.requestBodies(0, []common.Hash{b.hashes[index]}) } + return peer.requestBodies(0, []common.Hash{b.hashes[index]}) } // benchmarkProofsOrCode implements requestBenchmark @@ -119,9 +117,8 @@ func (b *benchmarkProofsOrCode) request(peer *serverPeer, index int) error { rand.Read(key) if b.code { return peer.requestCode(0, []CodeReq{{BHash: b.headHash, AccKey: key}}) - } else { - return peer.requestProofs(0, []ProofReq{{BHash: b.headHash, Key: key}}) } + return peer.requestProofs(0, []ProofReq{{BHash: b.headHash, Key: key}}) } // benchmarkHelperTrie implements requestBenchmark @@ -159,7 +156,7 @@ func (b *benchmarkHelperTrie) request(peer *serverPeer, index int) error { for i := range reqs { key := make([]byte, 8) binary.BigEndian.PutUint64(key[:], uint64(rand.Int63n(int64(b.headNum)))) - reqs[i] = HelperTrieReq{Type: htCanonical, TrieIdx: b.sectionCount - 1, Key: key, AuxReq: auxHeader} + reqs[i] = HelperTrieReq{Type: htCanonical, TrieIdx: b.sectionCount - 1, Key: key, AuxReq: htAuxHeader} } } @@ -174,7 +171,7 @@ type benchmarkTxSend struct { func (b *benchmarkTxSend) init(h *serverHandler, count int) error { key, _ := crypto.GenerateKey() addr := crypto.PubkeyToAddress(key.PublicKey) - signer := types.NewEIP155Signer(big.NewInt(18)) + signer := types.LatestSigner(h.server.chainConfig) b.txs = make(types.Transactions, count) for i := range b.txs { diff --git a/les/client.go b/les/client.go index a2f7c56dfd..605c4d03ca 100644 --- a/les/client.go +++ b/les/client.go @@ -30,37 +30,39 @@ import ( "github.com/ethereum/go-ethereum/core/bloombits" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/eth/filters" "github.com/ethereum/go-ethereum/eth/gasprice" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/internal/ethapi" - lpc "github.com/ethereum/go-ethereum/les/lespay/client" + "github.com/ethereum/go-ethereum/les/vflux" + vfc "github.com/ethereum/go-ethereum/les/vflux/client" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/rpc" ) type LightEthereum struct { lesCommons - peers *serverPeerSet - reqDist *requestDistributor - retriever *retrieveManager - odr *LesOdr - relay *lesTxRelay - handler *clientHandler - txPool *light.TxPool - blockchain *light.LightChain - serverPool *serverPool - valueTracker *lpc.ValueTracker - dialCandidates enode.Iterator - pruner *pruner + peers *serverPeerSet + reqDist *requestDistributor + retriever *retrieveManager + odr *LesOdr + relay *lesTxRelay + handler *clientHandler + txPool *light.TxPool + blockchain *light.LightChain + serverPool *vfc.ServerPool + serverPoolIterator enode.Iterator + pruner *pruner bloomRequests chan chan *bloombits.Retrieval // Channel receiving bloom data retrieval requests bloomIndexer *core.ChainIndexer // Bloom indexer operating during block imports @@ -72,19 +74,20 @@ type LightEthereum struct { netRPCService *ethapi.PublicNetAPI p2pServer *p2p.Server + p2pConfig *p2p.Config } // New creates an instance of the light client. -func New(stack *node.Node, config *eth.Config) (*LightEthereum, error) { +func New(stack *node.Node, config *ethconfig.Config) (*LightEthereum, error) { chainDb, err := stack.OpenDatabase("lightchaindata", config.DatabaseCache, config.DatabaseHandles, "eth/db/chaindata/") if err != nil { return nil, err } - lespayDb, err := stack.OpenDatabase("lespay", 0, 0, "eth/db/lespay") + lesDb, err := stack.OpenDatabase("les.client", 0, 0, "eth/db/les.client") if err != nil { return nil, err } - chainConfig, genesisHash, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis) + chainConfig, genesisHash, genesisErr := core.SetupGenesisBlockWithOverride(chainDb, config.Genesis, config.OverrideBerlin) if _, isCompat := genesisErr.(*params.ConfigCompatError); genesisErr != nil && !isCompat { return nil, genesisErr } @@ -98,32 +101,31 @@ func New(stack *node.Node, config *eth.Config) (*LightEthereum, error) { chainConfig: chainConfig, iConfig: light.DefaultClientIndexerConfig, chainDb: chainDb, + lesDb: lesDb, closeCh: make(chan struct{}), }, peers: peers, eventMux: stack.EventMux(), reqDist: newRequestDistributor(peers, &mclock.System{}), accountManager: stack.AccountManager(), - engine: eth.CreateConsensusEngine(stack, chainConfig, &config.Ethash, nil, false, chainDb), + engine: ethconfig.CreateConsensusEngine(stack, chainConfig, &config.Ethash, nil, false, chainDb), bloomRequests: make(chan chan *bloombits.Retrieval), - bloomIndexer: eth.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations), - valueTracker: lpc.NewValueTracker(lespayDb, &mclock.System{}, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000)), + bloomIndexer: core.NewBloomIndexer(chainDb, params.BloomBitsBlocksClient, params.HelperTrieConfirmations), p2pServer: stack.Server(), + p2pConfig: &stack.Config().P2P, } - peers.subscribe((*vtSubscription)(leth.valueTracker)) - dnsdisc, err := leth.setupDiscovery(&stack.Config().P2P) - if err != nil { - return nil, err + var prenegQuery vfc.QueryFunc + if leth.p2pServer.DiscV5 != nil { + prenegQuery = leth.prenegQuery } - leth.serverPool = newServerPool(lespayDb, []byte("serverpool:"), leth.valueTracker, dnsdisc, time.Second, nil, &mclock.System{}, config.UltraLightServers) - peers.subscribe(leth.serverPool) - leth.dialCandidates = leth.serverPool.dialIterator + leth.serverPool, leth.serverPoolIterator = vfc.NewServerPool(lesDb, []byte("serverpool:"), time.Second, prenegQuery, &mclock.System{}, config.UltraLightServers, requestList) + leth.serverPool.AddMetrics(suggestedTimeoutGauge, totalValueGauge, serverSelectableGauge, serverConnectedGauge, sessionValueMeter, serverDialedMeter) - leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool.getTimeout) + leth.retriever = newRetrieveManager(peers, leth.reqDist, leth.serverPool.GetTimeout) leth.relay = newLesTxRelay(peers, leth.retriever) - leth.odr = NewLesOdr(chainDb, light.DefaultClientIndexerConfig, leth.retriever) + leth.odr = NewLesOdr(chainDb, light.DefaultClientIndexerConfig, leth.peers, leth.retriever) leth.chtIndexer = light.NewChtIndexer(chainDb, leth.odr, params.CHTFrequency, params.HelperTrieConfirmations, config.LightNoPrune) leth.bloomTrieIndexer = light.NewBloomTrieIndexer(chainDb, leth.odr, params.BloomBitsBlocksClient, params.BloomTrieFrequency, config.LightNoPrune) leth.odr.SetIndexers(leth.chtIndexer, leth.bloomTrieIndexer, leth.bloomIndexer) @@ -158,7 +160,7 @@ func New(stack *node.Node, config *eth.Config) (*LightEthereum, error) { rawdb.WriteChainConfig(chainDb, genesisHash, chainConfig) } - leth.ApiBackend = &LesApiBackend{stack.Config().ExtRPCEnabled(), leth, nil} + leth.ApiBackend = &LesApiBackend{stack.Config().ExtRPCEnabled(), stack.Config().AllowUnprotectedTxs, leth, nil} gpoParams := config.GPO if gpoParams.Default == nil { gpoParams.Default = config.Miner.GasPrice @@ -178,24 +180,82 @@ func New(stack *node.Node, config *eth.Config) (*LightEthereum, error) { stack.RegisterProtocols(leth.Protocols()) stack.RegisterLifecycle(leth) + // Check for unclean shutdown + if uncleanShutdowns, discards, err := rawdb.PushUncleanShutdownMarker(chainDb); err != nil { + log.Error("Could not update unclean-shutdown-marker list", "error", err) + } else { + if discards > 0 { + log.Warn("Old unclean shutdowns found", "count", discards) + } + for _, tstamp := range uncleanShutdowns { + t := time.Unix(int64(tstamp), 0) + log.Warn("Unclean shutdown detected", "booted", t, + "age", common.PrettyAge(t)) + } + } return leth, nil } -// vtSubscription implements serverPeerSubscriber -type vtSubscription lpc.ValueTracker +// VfluxRequest sends a batch of requests to the given node through discv5 UDP TalkRequest and returns the responses +func (s *LightEthereum) VfluxRequest(n *enode.Node, reqs vflux.Requests) vflux.Replies { + if s.p2pServer.DiscV5 == nil { + return nil + } + reqsEnc, _ := rlp.EncodeToBytes(&reqs) + repliesEnc, _ := s.p2pServer.DiscV5.TalkRequest(s.serverPool.DialNode(n), "vfx", reqsEnc) + var replies vflux.Replies + if len(repliesEnc) == 0 || rlp.DecodeBytes(repliesEnc, &replies) != nil { + return nil + } + return replies +} + +// vfxVersion returns the version number of the "les" service subdomain of the vflux UDP +// service, as advertised in the ENR record +func (s *LightEthereum) vfxVersion(n *enode.Node) uint { + if n.Seq() == 0 { + var err error + if s.p2pServer.DiscV5 == nil { + return 0 + } + if n, err = s.p2pServer.DiscV5.RequestENR(n); n != nil && err == nil && n.Seq() != 0 { + s.serverPool.Persist(n) + } else { + return 0 + } + } -// registerPeer implements serverPeerSubscriber -func (v *vtSubscription) registerPeer(p *serverPeer) { - vt := (*lpc.ValueTracker)(v) - p.setValueTracker(vt, vt.Register(p.ID())) - p.updateVtParams() + var les []rlp.RawValue + if err := n.Load(enr.WithEntry("les", &les)); err != nil || len(les) < 1 { + return 0 + } + var version uint + rlp.DecodeBytes(les[0], &version) // Ignore additional fields (for forward compatibility). + return version } -// unregisterPeer implements serverPeerSubscriber -func (v *vtSubscription) unregisterPeer(p *serverPeer) { - vt := (*lpc.ValueTracker)(v) - vt.Unregister(p.ID()) - p.setValueTracker(nil, nil) +// prenegQuery sends a capacity query to the given server node to determine whether +// a connection slot is immediately available +func (s *LightEthereum) prenegQuery(n *enode.Node) int { + if s.vfxVersion(n) < 1 { + // UDP query not supported, always try TCP connection + return 1 + } + + var requests vflux.Requests + requests.Add("les", vflux.CapacityQueryName, vflux.CapacityQueryReq{ + Bias: 180, + AddTokens: []vflux.IntOrInf{{}}, + }) + replies := s.VfluxRequest(n, requests) + var cqr vflux.CapacityQueryReply + if replies.Get(0, &cqr) != nil || len(cqr) != 1 { // Note: Get returns an error if replies is nil + return -1 + } + if cqr[0] > 0 { + return 1 + } + return 0 } type LightDummyAPI struct{} @@ -239,7 +299,7 @@ func (s *LightEthereum) APIs() []rpc.API { }, { Namespace: "eth", Version: "1.0", - Service: filters.NewPublicFilterAPI(s.ApiBackend, true), + Service: filters.NewPublicFilterAPI(s.ApiBackend, true, 5*time.Minute), Public: true, }, { Namespace: "net", @@ -252,9 +312,9 @@ func (s *LightEthereum) APIs() []rpc.API { Service: NewPrivateLightAPI(&s.lesCommons), Public: false, }, { - Namespace: "lespay", + Namespace: "vflux", Version: "1.0", - Service: lpc.NewPrivateClientAPI(s.valueTracker), + Service: s.serverPool.API(), Public: false, }, }...) @@ -278,7 +338,7 @@ func (s *LightEthereum) Protocols() []p2p.Protocol { return p.Info() } return nil - }, s.dialCandidates) + }, s.serverPoolIterator) } // Start implements node.Lifecycle, starting all internal goroutines needed by the @@ -286,7 +346,12 @@ func (s *LightEthereum) Protocols() []p2p.Protocol { func (s *LightEthereum) Start() error { log.Warn("Light client mode is an experimental feature") - s.serverPool.start() + discovery, err := s.setupDiscovery(s.p2pConfig) + if err != nil { + return err + } + s.serverPool.AddSource(discovery) + s.serverPool.Start() // Start bloom request workers. s.wg.Add(bloomServiceThreads) s.startBloomHandlers(params.BloomBitsBlocksClient) @@ -299,8 +364,7 @@ func (s *LightEthereum) Start() error { // Ethereum protocol. func (s *LightEthereum) Stop() error { close(s.closeCh) - s.serverPool.stop() - s.valueTracker.Stop() + s.serverPool.Stop() s.peers.close() s.reqDist.close() s.odr.Stop() @@ -313,7 +377,9 @@ func (s *LightEthereum) Stop() error { s.engine.Close() s.pruner.close() s.eventMux.Stop() + rawdb.PopUncleanShutdownMarker(s.chainDb) s.chainDb.Close() + s.lesDb.Close() s.wg.Wait() log.Info("Light ethereum stopped") return nil diff --git a/les/client_handler.go b/les/client_handler.go index 77a0ea5c6f..f8e9edc9fe 100644 --- a/les/client_handler.go +++ b/les/client_handler.go @@ -17,6 +17,7 @@ package les import ( + "context" "math/big" "sync" "sync/atomic" @@ -24,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/light" @@ -36,18 +38,23 @@ import ( // responses. type clientHandler struct { ulc *ulc + forkFilter forkid.Filter checkpoint *params.TrustedCheckpoint fetcher *lightFetcher downloader *downloader.Downloader backend *LightEthereum - closeCh chan struct{} - wg sync.WaitGroup // WaitGroup used to track all connected peers. - syncDone func() // Test hooks when syncing is done. + closeCh chan struct{} + wg sync.WaitGroup // WaitGroup used to track all connected peers. + + // Hooks used in the testing + syncStart func(header *types.Header) // Hook called when the syncing is started + syncEnd func(header *types.Header) // Hook called when the syncing is done } func newClientHandler(ulcServers []string, ulcFraction int, checkpoint *params.TrustedCheckpoint, backend *LightEthereum) *clientHandler { handler := &clientHandler{ + forkFilter: forkid.NewFilter(backend.blockchain), checkpoint: checkpoint, backend: backend, closeCh: make(chan struct{}), @@ -102,15 +109,30 @@ func (h *clientHandler) handle(p *serverPeer) error { p.Log().Debug("Light Ethereum peer connected", "name", p.Name()) // Execute the LES handshake - if err := p.Handshake(h.backend.blockchain.Genesis().Hash()); err != nil { + forkid := forkid.NewID(h.backend.blockchain.Config(), h.backend.genesis, h.backend.blockchain.CurrentHeader().Number.Uint64()) + if err := p.Handshake(h.backend.blockchain.Genesis().Hash(), forkid, h.forkFilter); err != nil { p.Log().Debug("Light Ethereum handshake failed", "err", err) return err } + // Register peer with the server pool + if h.backend.serverPool != nil { + if nvt, err := h.backend.serverPool.RegisterNode(p.Node()); err == nil { + p.setValueTracker(nvt) + p.updateVtParams() + defer func() { + p.setValueTracker(nil) + h.backend.serverPool.UnregisterNode(p.Node()) + }() + } else { + return err + } + } // Register the peer locally if err := h.backend.peers.register(p); err != nil { p.Log().Error("Light Ethereum peer registration failed", "err", err) return err } + serverConnectionGauge.Update(int64(h.backend.peers.len())) connectedAt := mclock.Now() @@ -153,8 +175,8 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { var deliverMsg *Msg // Handle the message depending on its contents - switch msg.Code { - case AnnounceMsg: + switch { + case msg.Code == AnnounceMsg: p.Log().Trace("Received announce message") var req announceData if err := msg.Decode(&req); err != nil { @@ -187,7 +209,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { p.updateHead(req.Hash, req.Number, req.Td) h.fetcher.announce(p, &req) } - case BlockHeadersMsg: + case msg.Code == BlockHeadersMsg: p.Log().Trace("Received block header response message") var resp struct { ReqID, BV uint64 @@ -200,17 +222,26 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { p.fcServer.ReceivedReply(resp.ReqID, resp.BV) p.answeredRequest(resp.ReqID) - // Filter out any explicitly requested headers, deliver the rest to the downloader - filter := len(headers) == 1 - if filter { - headers = h.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers) - } - if len(headers) != 0 || !filter { - if err := h.downloader.DeliverHeaders(p.id, headers); err != nil { - log.Debug("Failed to deliver headers", "err", err) + // Filter out the explicitly requested header by the retriever + if h.backend.retriever.requested(resp.ReqID) { + deliverMsg = &Msg{ + MsgType: MsgBlockHeaders, + ReqID: resp.ReqID, + Obj: resp.Headers, + } + } else { + // Filter out any explicitly requested headers, deliver the rest to the downloader + filter := len(headers) == 1 + if filter { + headers = h.fetcher.deliverHeaders(p, resp.ReqID, resp.Headers) + } + if len(headers) != 0 || !filter { + if err := h.downloader.DeliverHeaders(p.id, headers); err != nil { + log.Debug("Failed to deliver headers", "err", err) + } } } - case BlockBodiesMsg: + case msg.Code == BlockBodiesMsg: p.Log().Trace("Received block bodies response") var resp struct { ReqID, BV uint64 @@ -226,7 +257,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { ReqID: resp.ReqID, Obj: resp.Data, } - case CodeMsg: + case msg.Code == CodeMsg: p.Log().Trace("Received code response") var resp struct { ReqID, BV uint64 @@ -242,7 +273,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { ReqID: resp.ReqID, Obj: resp.Data, } - case ReceiptsMsg: + case msg.Code == ReceiptsMsg: p.Log().Trace("Received receipts response") var resp struct { ReqID, BV uint64 @@ -258,7 +289,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { ReqID: resp.ReqID, Obj: resp.Receipts, } - case ProofsV2Msg: + case msg.Code == ProofsV2Msg: p.Log().Trace("Received les/2 proofs response") var resp struct { ReqID, BV uint64 @@ -274,7 +305,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { ReqID: resp.ReqID, Obj: resp.Data, } - case HelperTrieProofsMsg: + case msg.Code == HelperTrieProofsMsg: p.Log().Trace("Received helper trie proof response") var resp struct { ReqID, BV uint64 @@ -290,7 +321,7 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { ReqID: resp.ReqID, Obj: resp.Data, } - case TxStatusMsg: + case msg.Code == TxStatusMsg: p.Log().Trace("Received tx status response") var resp struct { ReqID, BV uint64 @@ -306,11 +337,11 @@ func (h *clientHandler) handleMsg(p *serverPeer) error { ReqID: resp.ReqID, Obj: resp.Status, } - case StopMsg: + case msg.Code == StopMsg && p.version >= lpv3: p.freeze() h.backend.retriever.frozen(p) p.Log().Debug("Service stopped") - case ResumeMsg: + case msg.Code == ResumeMsg && p.version >= lpv3: var bv uint64 if err := msg.Decode(&bv); err != nil { return errResp(ErrDecode, "msg %v: %v", msg, err) @@ -394,6 +425,42 @@ func (pc *peerConnection) RequestHeadersByNumber(origin uint64, amount int, skip return nil } +// RetrieveSingleHeaderByNumber requests a single header by the specified block +// number. This function will wait the response until it's timeout or delivered. +func (pc *peerConnection) RetrieveSingleHeaderByNumber(context context.Context, number uint64) (*types.Header, error) { + reqID := genReqID() + rq := &distReq{ + getCost: func(dp distPeer) uint64 { + peer := dp.(*serverPeer) + return peer.getRequestCost(GetBlockHeadersMsg, 1) + }, + canSend: func(dp distPeer) bool { + return dp.(*serverPeer) == pc.peer + }, + request: func(dp distPeer) func() { + peer := dp.(*serverPeer) + cost := peer.getRequestCost(GetBlockHeadersMsg, 1) + peer.fcServer.QueuedRequest(reqID, cost) + return func() { peer.requestHeadersByNumber(reqID, number, 1, 0, false) } + }, + } + var header *types.Header + if err := pc.handler.backend.retriever.retrieve(context, reqID, rq, func(peer distPeer, msg *Msg) error { + if msg.MsgType != MsgBlockHeaders { + return errInvalidMessageType + } + headers := msg.Obj.([]*types.Header) + if len(headers) != 1 { + return errInvalidEntryCount + } + header = headers[0] + return nil + }, nil); err != nil { + return nil, err + } + return header, nil +} + // downloaderPeerNotify implements peerSetNotify type downloaderPeerNotify clientHandler diff --git a/les/clientpool.go b/les/clientpool.go index da0db6e622..1aa63a281e 100644 --- a/les/clientpool.go +++ b/les/clientpool.go @@ -23,12 +23,14 @@ import ( "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/ethdb" - lps "github.com/ethereum/go-ethereum/les/lespay/server" "github.com/ethereum/go-ethereum/les/utils" + "github.com/ethereum/go-ethereum/les/vflux" + vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/nodestate" + "github.com/ethereum/go-ethereum/rlp" ) const ( @@ -64,17 +66,17 @@ const ( // and negative banalce. Boeth positive balance and negative balance will decrease // exponentially. If the balance is low enough, then the record will be dropped. type clientPool struct { - lps.BalanceTrackerSetup - lps.PriorityPoolSetup + vfs.BalanceTrackerSetup + vfs.PriorityPoolSetup lock sync.Mutex clock mclock.Clock closed bool removePeer func(enode.ID) ns *nodestate.NodeStateMachine - pp *lps.PriorityPool - bt *lps.BalanceTracker + pp *vfs.PriorityPool + bt *vfs.BalanceTracker - defaultPosFactors, defaultNegFactors lps.PriceFactors + defaultPosFactors, defaultNegFactors vfs.PriceFactors posExpTC, negExpTC uint64 minCap uint64 // The minimal capacity value allowed for any client connectedBias time.Duration @@ -101,11 +103,11 @@ type clientInfo struct { peer clientPoolPeer connected, priority bool connectedAt mclock.AbsTime - balance *lps.NodeBalance + balance *vfs.NodeBalance } // newClientPool creates a new client pool -func newClientPool(ns *nodestate.NodeStateMachine, lespayDb ethdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID)) *clientPool { +func newClientPool(ns *nodestate.NodeStateMachine, lesDb ethdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID)) *clientPool { pool := &clientPool{ ns: ns, BalanceTrackerSetup: balanceTrackerSetup, @@ -115,8 +117,8 @@ func newClientPool(ns *nodestate.NodeStateMachine, lespayDb ethdb.Database, minC connectedBias: connectedBias, removePeer: removePeer, } - pool.bt = lps.NewBalanceTracker(ns, balanceTrackerSetup, lespayDb, clock, &utils.Expirer{}, &utils.Expirer{}) - pool.pp = lps.NewPriorityPool(ns, priorityPoolSetup, clock, minCap, connectedBias, 4) + pool.bt = vfs.NewBalanceTracker(ns, balanceTrackerSetup, lesDb, clock, &utils.Expirer{}, &utils.Expirer{}) + pool.pp = vfs.NewPriorityPool(ns, priorityPoolSetup, clock, minCap, connectedBias, 4) // set default expiration constants used by tests // Note: server overwrites this if token sale is active @@ -221,7 +223,7 @@ func (f *clientPool) connect(peer clientPoolPeer) (uint64, error) { } f.ns.SetField(node, clientInfoField, c) f.ns.SetField(node, connAddressField, freeID) - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance == nil { + if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { f.disconnect(peer) return 0, nil } @@ -266,7 +268,7 @@ func (f *clientPool) disconnectNode(node *enode.Node) { } // setDefaultFactors sets the default price factors applied to subsequently connected clients -func (f *clientPool) setDefaultFactors(posFactors, negFactors lps.PriceFactors) { +func (f *clientPool) setDefaultFactors(posFactors, negFactors vfs.PriceFactors) { f.lock.Lock() defer f.lock.Unlock() @@ -305,7 +307,7 @@ func (f *clientPool) setCapacity(node *enode.Node, freeID string, capacity uint6 c = &clientInfo{node: node} f.ns.SetField(node, clientInfoField, c) f.ns.SetField(node, connAddressField, freeID) - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance == nil { + if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { log.Error("BalanceField is missing", "node", node.ID()) return 0, fmt.Errorf("BalanceField of %064x is missing", node.ID()) } @@ -371,7 +373,7 @@ func (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) { c = &clientInfo{node: node} f.ns.SetField(node, clientInfoField, c) f.ns.SetField(node, connAddressField, "") - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*lps.NodeBalance); c.balance != nil { + if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance != nil { cb(c) } else { log.Error("BalanceField is missing") @@ -382,3 +384,56 @@ func (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) { } } } + +// serveCapQuery serves a vflux capacity query. It receives multiple token amount values +// and a bias time value. For each given token amount it calculates the maximum achievable +// capacity in case the amount is added to the balance. +func (f *clientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []byte { + var req vflux.CapacityQueryReq + if rlp.DecodeBytes(data, &req) != nil { + return nil + } + if l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen { + return nil + } + node := f.ns.GetNode(id) + if node == nil { + node = enode.SignNull(&enr.Record{}, id) + } + c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) + if c == nil { + c = &clientInfo{node: node} + f.ns.SetField(node, clientInfoField, c) + f.ns.SetField(node, connAddressField, freeID) + defer func() { + f.ns.SetField(node, connAddressField, nil) + f.ns.SetField(node, clientInfoField, nil) + }() + if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { + log.Error("BalanceField is missing", "node", node.ID()) + return nil + } + } + // use vfs.CapacityCurve to answer request for multiple newly bought token amounts + curve := f.pp.GetCapacityCurve().Exclude(id) + result := make(vflux.CapacityQueryReply, len(req.AddTokens)) + bias := time.Second * time.Duration(req.Bias) + if f.connectedBias > bias { + bias = f.connectedBias + } + pb, _ := c.balance.GetBalance() + for i, addTokens := range req.AddTokens { + add := addTokens.Int64() + result[i] = curve.MaxCapacity(func(capacity uint64) int64 { + return c.balance.EstimatePriority(capacity, add, 0, bias, false) / int64(capacity) + }) + if add <= 0 && uint64(-add) >= pb && result[i] > f.minCap { + result[i] = f.minCap + } + if result[i] < f.minCap { + result[i] = 0 + } + } + reply, _ := rlp.EncodeToBytes(&result) + return reply +} diff --git a/les/clientpool_test.go b/les/clientpool_test.go index b1c38d374c..345b373b0f 100644 --- a/les/clientpool_test.go +++ b/les/clientpool_test.go @@ -24,7 +24,7 @@ import ( "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/core/rawdb" - lps "github.com/ethereum/go-ethereum/les/lespay/server" + vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/nodestate" @@ -100,7 +100,7 @@ func getBalance(pool *clientPool, p *poolTestPeer) (pos, neg uint64) { if temp { pool.ns.SetField(p.node, connAddressField, p.freeClientId()) } - n, _ := pool.ns.GetField(p.node, pool.BalanceField).(*lps.NodeBalance) + n, _ := pool.ns.GetField(p.node, pool.BalanceField).(*vfs.NodeBalance) pos, neg = n.GetBalance() if temp { pool.ns.SetField(p.node, connAddressField, nil) @@ -138,7 +138,7 @@ func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, rando pool.ns.Start() pool.setLimits(activeLimit, uint64(activeLimit)) - pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) // pool should accept new peers up to its connected limit for i := 0; i < activeLimit; i++ { @@ -243,7 +243,7 @@ func TestConnectPaidClient(t *testing.T) { pool.ns.Start() defer pool.stop() pool.setLimits(10, uint64(10)) - pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) // Add balance for an external client and mark it as paid client addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute)) @@ -259,7 +259,7 @@ func TestConnectPaidClientToSmallPool(t *testing.T) { pool.ns.Start() defer pool.stop() pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) // Add balance for an external client and mark it as paid client addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute)) @@ -278,7 +278,7 @@ func TestConnectPaidClientToFullPool(t *testing.T) { pool.ns.Start() defer pool.stop() pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20)) @@ -309,7 +309,7 @@ func TestPaidClientKickedOut(t *testing.T) { pool.bt.SetExpirationTCs(0, 0) defer pool.stop() pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance @@ -339,7 +339,7 @@ func TestConnectFreeClient(t *testing.T) { pool.ns.Start() defer pool.stop() pool.setLimits(10, uint64(10)) - pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) if cap, _ := pool.connect(newPoolTestPeer(0, nil)); cap == 0 { t.Fatalf("Failed to connect free client") } @@ -356,7 +356,7 @@ func TestConnectFreeClientToFullPool(t *testing.T) { pool.ns.Start() defer pool.stop() pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { pool.connect(newPoolTestPeer(i, nil)) @@ -386,7 +386,7 @@ func TestFreeClientKickedOut(t *testing.T) { pool.ns.Start() defer pool.stop() pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { pool.connect(newPoolTestPeer(i, kicked)) @@ -428,7 +428,7 @@ func TestPositiveBalanceCalculation(t *testing.T) { pool.ns.Start() defer pool.stop() pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3)) testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true) @@ -452,7 +452,7 @@ func TestDowngradePriorityClient(t *testing.T) { pool.ns.Start() defer pool.stop() pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) p := newPoolTestPeer(0, kicked) addBalance(pool, p.node.ID(), int64(time.Minute)) @@ -487,7 +487,7 @@ func TestNegativeBalanceCalculation(t *testing.T) { pool.ns.Start() defer pool.stop() pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}) + pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { pool.connect(newPoolTestPeer(i, nil)) @@ -508,8 +508,10 @@ func TestNegativeBalanceCalculation(t *testing.T) { for i := 0; i < 10; i++ { pool.disconnect(newPoolTestPeer(i, nil)) _, nb := getBalance(pool, newPoolTestPeer(i, nil)) - if checkDiff(nb, uint64(time.Minute)/1000) { - t.Fatalf("Negative balance mismatch, want %v, got %v", uint64(time.Minute)/1000, nb) + exp := uint64(time.Minute) / 1000 + exp -= exp / 120 // correct for negative balance expiration + if checkDiff(nb, exp) { + t.Fatalf("Negative balance mismatch, want %v, got %v", exp, nb) } } } @@ -564,7 +566,7 @@ func TestInactiveClient(t *testing.T) { if p2.cap != 0 { t.Fatalf("Failed to deactivate peer #2") } - pool.setDefaultFactors(lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, lps.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}) + pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}) p4 := newPoolTestPeer(4, nil) addBalance(pool, p4.node.ID(), 1500*int64(time.Second)) // p1: 1000 p2: 500 p3: 2000 p4: 1500 diff --git a/les/commons.go b/les/commons.go index 003e196d2b..d090fc21fc 100644 --- a/les/commons.go +++ b/les/commons.go @@ -25,7 +25,7 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/les/checkpointoracle" @@ -33,7 +33,6 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" ) @@ -42,17 +41,6 @@ func errResp(code errCode, format string, v ...interface{}) error { return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...)) } -func lesTopic(genesisHash common.Hash, protocolVersion uint) discv5.Topic { - var name string - switch protocolVersion { - case lpv2: - name = "LES2" - default: - panic(nil) - } - return discv5.Topic(name + "@" + common.Bytes2Hex(genesisHash.Bytes()[0:8])) -} - type chainReader interface { CurrentHeader() *types.Header } @@ -60,10 +48,10 @@ type chainReader interface { // lesCommons contains fields needed by both server and client. type lesCommons struct { genesis common.Hash - config *eth.Config + config *ethconfig.Config chainConfig *params.ChainConfig iConfig *light.IndexerConfig - chainDb ethdb.Database + chainDb, lesDb ethdb.Database chainReader chainReader chtIndexer, bloomTrieIndexer *core.ChainIndexer oracle *checkpointoracle.CheckpointOracle @@ -150,24 +138,24 @@ func (c *lesCommons) localCheckpoint(index uint64) params.TrustedCheckpoint { } // setupOracle sets up the checkpoint oracle contract client. -func (c *lesCommons) setupOracle(node *node.Node, genesis common.Hash, ethconfig *eth.Config) *checkpointoracle.CheckpointOracle { +func (c *lesCommons) setupOracle(node *node.Node, genesis common.Hash, ethconfig *ethconfig.Config) *checkpointoracle.CheckpointOracle { config := ethconfig.CheckpointOracle if config == nil { // Try loading default config. config = params.CheckpointOracles[genesis] } if config == nil { - log.Info("Checkpoint registrar is not enabled") + log.Info("Checkpoint oracle is not enabled") return nil } if config.Address == (common.Address{}) || uint64(len(config.Signers)) < config.Threshold { - log.Warn("Invalid checkpoint registrar config") + log.Warn("Invalid checkpoint oracle config") return nil } oracle := checkpointoracle.New(config, c.localCheckpoint) rpcClient, _ := node.Attach() client := ethclient.NewClient(rpcClient) oracle.Start(client) - log.Info("Configured checkpoint registrar", "address", config.Address, "signers", len(config.Signers), "threshold", config.Threshold) + log.Info("Configured checkpoint oracle", "address", config.Address, "signers", len(config.Signers), "threshold", config.Threshold) return oracle } diff --git a/les/costtracker.go b/les/costtracker.go index 0558779bc5..43e32a5b2d 100644 --- a/les/costtracker.go +++ b/les/costtracker.go @@ -24,7 +24,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/les/flowcontrol" "github.com/ethereum/go-ethereum/log" @@ -137,7 +137,7 @@ type costTracker struct { // newCostTracker creates a cost tracker and loads the cost factor statistics from the database. // It also returns the minimum capacity that can be assigned to any peer. -func newCostTracker(db ethdb.Database, config *eth.Config) (*costTracker, uint64) { +func newCostTracker(db ethdb.Database, config *ethconfig.Config) (*costTracker, uint64) { utilTarget := float64(config.LightServ) * flowcontrol.FixedPointMultiplier / 100 ct := &costTracker{ db: db, diff --git a/les/enr_entry.go b/les/enr_entry.go index 11e6273be5..8be4a7a00e 100644 --- a/les/enr_entry.go +++ b/les/enr_entry.go @@ -17,6 +17,7 @@ package les import ( + "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/dnsdisc" "github.com/ethereum/go-ethereum/p2p/enode" @@ -26,19 +27,47 @@ import ( // lesEntry is the "les" ENR entry. This is set for LES servers only. type lesEntry struct { // Ignore additional fields (for forward compatibility). - Rest []rlp.RawValue `rlp:"tail"` + VfxVersion uint + Rest []rlp.RawValue `rlp:"tail"` } -// ENRKey implements enr.Entry. -func (e lesEntry) ENRKey() string { - return "les" +func (lesEntry) ENRKey() string { return "les" } + +// ethEntry is the "eth" ENR entry. This is redeclared here to avoid depending on package eth. +type ethEntry struct { + ForkID forkid.ID + _ []rlp.RawValue `rlp:"tail"` } +func (ethEntry) ENRKey() string { return "eth" } + // setupDiscovery creates the node discovery source for the eth protocol. func (eth *LightEthereum) setupDiscovery(cfg *p2p.Config) (enode.Iterator, error) { - if cfg.NoDiscovery || len(eth.config.DiscoveryURLs) == 0 { - return nil, nil + it := enode.NewFairMix(0) + + // Enable DNS discovery. + if len(eth.config.EthDiscoveryURLs) != 0 { + client := dnsdisc.NewClient(dnsdisc.Config{}) + dns, err := client.NewIterator(eth.config.EthDiscoveryURLs...) + if err != nil { + return nil, err + } + it.AddSource(dns) + } + + // Enable DHT. + if cfg.DiscoveryV5 && eth.p2pServer.DiscV5 != nil { + it.AddSource(eth.p2pServer.DiscV5.RandomNodes()) } - client := dnsdisc.NewClient(dnsdisc.Config{}) - return client.NewIterator(eth.config.DiscoveryURLs...) + + forkFilter := forkid.NewFilter(eth.blockchain) + iterator := enode.Filter(it, func(n *enode.Node) bool { return nodeIsServer(forkFilter, n) }) + return iterator, nil +} + +// nodeIsServer checks whether n is an LES server node. +func nodeIsServer(forkFilter forkid.Filter, n *enode.Node) bool { + var les lesEntry + var eth ethEntry + return n.Load(&les) == nil && n.Load(ð) == nil && forkFilter(eth.ForkID) == nil } diff --git a/les/fetcher_test.go b/les/fetcher_test.go index a9e6e6835e..d3a74d25c2 100644 --- a/les/fetcher_test.go +++ b/les/fetcher_test.go @@ -66,7 +66,12 @@ func TestSequentialAnnouncementsLes2(t *testing.T) { testSequentialAnnouncements func TestSequentialAnnouncementsLes3(t *testing.T) { testSequentialAnnouncements(t, 3) } func testSequentialAnnouncements(t *testing.T, protocol int) { - s, c, teardown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, false, true) + netconfig := testnetConfig{ + blocks: 4, + protocol: protocol, + nopruning: true, + } + s, c, teardown := newClientServerEnv(t, netconfig) defer teardown() // Create connected peer pair. @@ -101,7 +106,12 @@ func TestGappedAnnouncementsLes2(t *testing.T) { testGappedAnnouncements(t, 2) } func TestGappedAnnouncementsLes3(t *testing.T) { testGappedAnnouncements(t, 3) } func testGappedAnnouncements(t *testing.T, protocol int) { - s, c, teardown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, false, true) + netconfig := testnetConfig{ + blocks: 4, + protocol: protocol, + nopruning: true, + } + s, c, teardown := newClientServerEnv(t, netconfig) defer teardown() // Create connected peer pair. @@ -183,7 +193,13 @@ func testTrustedAnnouncement(t *testing.T, protocol int) { ids = append(ids, n.String()) } } - _, c, teardown := newClientServerEnv(t, 0, protocol, nil, ids, 60, false, false, true) + netconfig := testnetConfig{ + protocol: protocol, + nopruning: true, + ulcServers: ids, + ulcFraction: 60, + } + _, c, teardown := newClientServerEnv(t, netconfig) defer teardown() defer func() { for i := 0; i < len(teardowns); i++ { @@ -233,8 +249,17 @@ func testTrustedAnnouncement(t *testing.T, protocol int) { check([]uint64{10}, 10, func() { <-newHead }) // Sync the whole chain. } -func TestInvalidAnnounces(t *testing.T) { - s, c, teardown := newClientServerEnv(t, 4, lpv3, nil, nil, 0, false, false, true) +func TestInvalidAnnouncesLES2(t *testing.T) { testInvalidAnnounces(t, lpv2) } +func TestInvalidAnnouncesLES3(t *testing.T) { testInvalidAnnounces(t, lpv3) } +func TestInvalidAnnouncesLES4(t *testing.T) { testInvalidAnnounces(t, lpv4) } + +func testInvalidAnnounces(t *testing.T, protocol int) { + netconfig := testnetConfig{ + blocks: 4, + protocol: protocol, + nopruning: true, + } + s, c, teardown := newClientServerEnv(t, netconfig) defer teardown() // Create connected peer pair. diff --git a/les/flowcontrol/control.go b/les/flowcontrol/control.go index 490013677c..4f0de82318 100644 --- a/les/flowcontrol/control.go +++ b/les/flowcontrol/control.go @@ -19,6 +19,7 @@ package flowcontrol import ( "fmt" + "math" "sync" "time" @@ -316,6 +317,9 @@ func (node *ServerNode) CanSend(maxCost uint64) (time.Duration, float64) { node.lock.RLock() defer node.lock.RUnlock() + if node.params.BufLimit == 0 { + return time.Duration(math.MaxInt64), 0 + } now := node.clock.Now() node.recalcBLE(now) maxCost += uint64(safetyMargin) * node.params.MinRecharge / uint64(fcTimeConst) diff --git a/les/handler_test.go b/les/handler_test.go index 1612caf427..d1dbee6bdf 100644 --- a/les/handler_test.go +++ b/les/handler_test.go @@ -49,11 +49,19 @@ func expectResponse(r p2p.MsgReader, msgcode, reqID, bv uint64, data interface{} // Tests that block headers can be retrieved from a remote chain based on user queries. func TestGetBlockHeadersLes2(t *testing.T) { testGetBlockHeaders(t, 2) } func TestGetBlockHeadersLes3(t *testing.T) { testGetBlockHeaders(t, 3) } +func TestGetBlockHeadersLes4(t *testing.T) { testGetBlockHeaders(t, 4) } func testGetBlockHeaders(t *testing.T, protocol int) { - server, tearDown := newServerEnv(t, downloader.MaxHashFetch+15, protocol, nil, false, true, 0) + netconfig := testnetConfig{ + blocks: downloader.MaxHeaderFetch + 15, + protocol: protocol, + nopruning: true, + } + server, _, tearDown := newClientServerEnv(t, netconfig) defer tearDown() + rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol) + defer closePeer() bc := server.handler.blockchain // Create a "random" unknown hash for testing @@ -64,27 +72,27 @@ func testGetBlockHeaders(t *testing.T, protocol int) { // Create a batch of tests for various scenarios limit := uint64(MaxHeaderFetch) tests := []struct { - query *getBlockHeadersData // The query to execute for header retrieval + query *GetBlockHeadersData // The query to execute for header retrieval expect []common.Hash // The hashes of the block whose headers are expected }{ // A single random block should be retrievable by hash and number too { - &getBlockHeadersData{Origin: hashOrNumber{Hash: bc.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, + &GetBlockHeadersData{Origin: hashOrNumber{Hash: bc.GetBlockByNumber(limit / 2).Hash()}, Amount: 1}, []common.Hash{bc.GetBlockByNumber(limit / 2).Hash()}, }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1}, + &GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 1}, []common.Hash{bc.GetBlockByNumber(limit / 2).Hash()}, }, // Multiple headers should be retrievable in both directions { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3}, + &GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3}, []common.Hash{ bc.GetBlockByNumber(limit / 2).Hash(), bc.GetBlockByNumber(limit/2 + 1).Hash(), bc.GetBlockByNumber(limit/2 + 2).Hash(), }, }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, + &GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Amount: 3, Reverse: true}, []common.Hash{ bc.GetBlockByNumber(limit / 2).Hash(), bc.GetBlockByNumber(limit/2 - 1).Hash(), @@ -93,14 +101,14 @@ func testGetBlockHeaders(t *testing.T, protocol int) { }, // Multiple headers with skip lists should be retrievable { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, + &GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3}, []common.Hash{ bc.GetBlockByNumber(limit / 2).Hash(), bc.GetBlockByNumber(limit/2 + 4).Hash(), bc.GetBlockByNumber(limit/2 + 8).Hash(), }, }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, + &GetBlockHeadersData{Origin: hashOrNumber{Number: limit / 2}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ bc.GetBlockByNumber(limit / 2).Hash(), bc.GetBlockByNumber(limit/2 - 4).Hash(), @@ -109,26 +117,26 @@ func testGetBlockHeaders(t *testing.T, protocol int) { }, // The chain endpoints should be retrievable { - &getBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1}, + &GetBlockHeadersData{Origin: hashOrNumber{Number: 0}, Amount: 1}, []common.Hash{bc.GetBlockByNumber(0).Hash()}, }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64()}, Amount: 1}, + &GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64()}, Amount: 1}, []common.Hash{bc.CurrentBlock().Hash()}, }, // Ensure protocol limits are honored //{ - // &getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true}, + // &GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 1}, Amount: limit + 10, Reverse: true}, // []common.Hash{}, //}, // Check that requesting more than available is handled gracefully { - &getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3}, + &GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 3, Amount: 3}, []common.Hash{ bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(), bc.GetBlockByNumber(bc.CurrentBlock().NumberU64()).Hash(), }, }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, + &GetBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 3, Amount: 3, Reverse: true}, []common.Hash{ bc.GetBlockByNumber(4).Hash(), bc.GetBlockByNumber(0).Hash(), @@ -136,13 +144,13 @@ func testGetBlockHeaders(t *testing.T, protocol int) { }, // Check that requesting more than available is handled gracefully, even if mid skip { - &getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3}, + &GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() - 4}, Skip: 2, Amount: 3}, []common.Hash{ bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 4).Hash(), bc.GetBlockByNumber(bc.CurrentBlock().NumberU64() - 1).Hash(), }, }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, + &GetBlockHeadersData{Origin: hashOrNumber{Number: 4}, Skip: 2, Amount: 3, Reverse: true}, []common.Hash{ bc.GetBlockByNumber(4).Hash(), bc.GetBlockByNumber(1).Hash(), @@ -150,10 +158,10 @@ func testGetBlockHeaders(t *testing.T, protocol int) { }, // Check that non existing headers aren't returned { - &getBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1}, + &GetBlockHeadersData{Origin: hashOrNumber{Hash: unknown}, Amount: 1}, []common.Hash{}, }, { - &getBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() + 1}, Amount: 1}, + &GetBlockHeadersData{Origin: hashOrNumber{Number: bc.CurrentBlock().NumberU64() + 1}, Amount: 1}, []common.Hash{}, }, } @@ -168,8 +176,8 @@ func testGetBlockHeaders(t *testing.T, protocol int) { // Send the hash request and verify the response reqID++ - sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, tt.query) - if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil { + sendRequest(rawPeer.app, GetBlockHeadersMsg, reqID, tt.query) + if err := expectResponse(rawPeer.app, BlockHeadersMsg, reqID, testBufLimit, headers); err != nil { t.Errorf("test %d: headers mismatch: %v", i, err) } } @@ -178,11 +186,20 @@ func testGetBlockHeaders(t *testing.T, protocol int) { // Tests that block contents can be retrieved from a remote chain based on their hashes. func TestGetBlockBodiesLes2(t *testing.T) { testGetBlockBodies(t, 2) } func TestGetBlockBodiesLes3(t *testing.T) { testGetBlockBodies(t, 3) } +func TestGetBlockBodiesLes4(t *testing.T) { testGetBlockBodies(t, 4) } func testGetBlockBodies(t *testing.T, protocol int) { - server, tearDown := newServerEnv(t, downloader.MaxBlockFetch+15, protocol, nil, false, true, 0) + netconfig := testnetConfig{ + blocks: downloader.MaxHeaderFetch + 15, + protocol: protocol, + nopruning: true, + } + server, _, tearDown := newClientServerEnv(t, netconfig) defer tearDown() + rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol) + defer closePeer() + bc := server.handler.blockchain // Create a batch of tests for various scenarios @@ -245,8 +262,8 @@ func testGetBlockBodies(t *testing.T, protocol int) { reqID++ // Send the hash request and verify the response - sendRequest(server.peer.app, GetBlockBodiesMsg, reqID, hashes) - if err := expectResponse(server.peer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil { + sendRequest(rawPeer.app, GetBlockBodiesMsg, reqID, hashes) + if err := expectResponse(rawPeer.app, BlockBodiesMsg, reqID, testBufLimit, bodies); err != nil { t.Errorf("test %d: bodies mismatch: %v", i, err) } } @@ -255,11 +272,21 @@ func testGetBlockBodies(t *testing.T, protocol int) { // Tests that the contract codes can be retrieved based on account addresses. func TestGetCodeLes2(t *testing.T) { testGetCode(t, 2) } func TestGetCodeLes3(t *testing.T) { testGetCode(t, 3) } +func TestGetCodeLes4(t *testing.T) { testGetCode(t, 4) } func testGetCode(t *testing.T, protocol int) { // Assemble the test environment - server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0) + netconfig := testnetConfig{ + blocks: 4, + protocol: protocol, + nopruning: true, + } + server, _, tearDown := newClientServerEnv(t, netconfig) defer tearDown() + + rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol) + defer closePeer() + bc := server.handler.blockchain var codereqs []*CodeReq @@ -276,8 +303,8 @@ func testGetCode(t *testing.T, protocol int) { } } - sendRequest(server.peer.app, GetCodeMsg, 42, codereqs) - if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, codes); err != nil { + sendRequest(rawPeer.app, GetCodeMsg, 42, codereqs) + if err := expectResponse(rawPeer.app, CodeMsg, 42, testBufLimit, codes); err != nil { t.Errorf("codes mismatch: %v", err) } } @@ -285,10 +312,20 @@ func testGetCode(t *testing.T, protocol int) { // Tests that the stale contract codes can't be retrieved based on account addresses. func TestGetStaleCodeLes2(t *testing.T) { testGetStaleCode(t, 2) } func TestGetStaleCodeLes3(t *testing.T) { testGetStaleCode(t, 3) } +func TestGetStaleCodeLes4(t *testing.T) { testGetStaleCode(t, 4) } func testGetStaleCode(t *testing.T, protocol int) { - server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil, false, true, 0) + netconfig := testnetConfig{ + blocks: core.TriesInMemory + 4, + protocol: protocol, + nopruning: true, + } + server, _, tearDown := newClientServerEnv(t, netconfig) defer tearDown() + + rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol) + defer closePeer() + bc := server.handler.blockchain check := func(number uint64, expected [][]byte) { @@ -296,8 +333,8 @@ func testGetStaleCode(t *testing.T, protocol int) { BHash: bc.GetHeaderByNumber(number).Hash(), AccKey: crypto.Keccak256(testContractAddr[:]), } - sendRequest(server.peer.app, GetCodeMsg, 42, []*CodeReq{req}) - if err := expectResponse(server.peer.app, CodeMsg, 42, testBufLimit, expected); err != nil { + sendRequest(rawPeer.app, GetCodeMsg, 42, []*CodeReq{req}) + if err := expectResponse(rawPeer.app, CodeMsg, 42, testBufLimit, expected); err != nil { t.Errorf("codes mismatch: %v", err) } } @@ -309,12 +346,21 @@ func testGetStaleCode(t *testing.T, protocol int) { // Tests that the transaction receipts can be retrieved based on hashes. func TestGetReceiptLes2(t *testing.T) { testGetReceipt(t, 2) } func TestGetReceiptLes3(t *testing.T) { testGetReceipt(t, 3) } +func TestGetReceiptLes4(t *testing.T) { testGetReceipt(t, 4) } func testGetReceipt(t *testing.T, protocol int) { // Assemble the test environment - server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0) + netconfig := testnetConfig{ + blocks: 4, + protocol: protocol, + nopruning: true, + } + server, _, tearDown := newClientServerEnv(t, netconfig) defer tearDown() + rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol) + defer closePeer() + bc := server.handler.blockchain // Collect the hashes to request, and the response to expect @@ -327,8 +373,8 @@ func testGetReceipt(t *testing.T, protocol int) { receipts = append(receipts, rawdb.ReadRawReceipts(server.db, block.Hash(), block.NumberU64())) } // Send the hash request and verify the response - sendRequest(server.peer.app, GetReceiptsMsg, 42, hashes) - if err := expectResponse(server.peer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil { + sendRequest(rawPeer.app, GetReceiptsMsg, 42, hashes) + if err := expectResponse(rawPeer.app, ReceiptsMsg, 42, testBufLimit, receipts); err != nil { t.Errorf("receipts mismatch: %v", err) } } @@ -336,12 +382,21 @@ func testGetReceipt(t *testing.T, protocol int) { // Tests that trie merkle proofs can be retrieved func TestGetProofsLes2(t *testing.T) { testGetProofs(t, 2) } func TestGetProofsLes3(t *testing.T) { testGetProofs(t, 3) } +func TestGetProofsLes4(t *testing.T) { testGetProofs(t, 4) } func testGetProofs(t *testing.T, protocol int) { // Assemble the test environment - server, tearDown := newServerEnv(t, 4, protocol, nil, false, true, 0) + netconfig := testnetConfig{ + blocks: 4, + protocol: protocol, + nopruning: true, + } + server, _, tearDown := newClientServerEnv(t, netconfig) defer tearDown() + rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol) + defer closePeer() + bc := server.handler.blockchain var proofreqs []ProofReq @@ -362,8 +417,8 @@ func testGetProofs(t *testing.T, protocol int) { } } // Send the proof request and verify the response - sendRequest(server.peer.app, GetProofsV2Msg, 42, proofreqs) - if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil { + sendRequest(rawPeer.app, GetProofsV2Msg, 42, proofreqs) + if err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, proofsV2.NodeList()); err != nil { t.Errorf("proofs mismatch: %v", err) } } @@ -371,10 +426,20 @@ func testGetProofs(t *testing.T, protocol int) { // Tests that the stale contract codes can't be retrieved based on account addresses. func TestGetStaleProofLes2(t *testing.T) { testGetStaleProof(t, 2) } func TestGetStaleProofLes3(t *testing.T) { testGetStaleProof(t, 3) } +func TestGetStaleProofLes4(t *testing.T) { testGetStaleProof(t, 4) } func testGetStaleProof(t *testing.T, protocol int) { - server, tearDown := newServerEnv(t, core.TriesInMemory+4, protocol, nil, false, true, 0) + netconfig := testnetConfig{ + blocks: core.TriesInMemory + 4, + protocol: protocol, + nopruning: true, + } + server, _, tearDown := newClientServerEnv(t, netconfig) defer tearDown() + + rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol) + defer closePeer() + bc := server.handler.blockchain check := func(number uint64, wantOK bool) { @@ -386,7 +451,7 @@ func testGetStaleProof(t *testing.T, protocol int) { BHash: header.Hash(), Key: account, } - sendRequest(server.peer.app, GetProofsV2Msg, 42, []*ProofReq{req}) + sendRequest(rawPeer.app, GetProofsV2Msg, 42, []*ProofReq{req}) var expected []rlp.RawValue if wantOK { @@ -395,7 +460,7 @@ func testGetStaleProof(t *testing.T, protocol int) { t.Prove(account, 0, proofsV2) expected = proofsV2.NodeList() } - if err := expectResponse(server.peer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil { + if err := expectResponse(rawPeer.app, ProofsV2Msg, 42, testBufLimit, expected); err != nil { t.Errorf("codes mismatch: %v", err) } } @@ -407,22 +472,33 @@ func testGetStaleProof(t *testing.T, protocol int) { // Tests that CHT proofs can be correctly retrieved. func TestGetCHTProofsLes2(t *testing.T) { testGetCHTProofs(t, 2) } func TestGetCHTProofsLes3(t *testing.T) { testGetCHTProofs(t, 3) } +func TestGetCHTProofsLes4(t *testing.T) { testGetCHTProofs(t, 4) } func testGetCHTProofs(t *testing.T, protocol int) { - config := light.TestServerIndexerConfig - - waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { - for { - cs, _, _ := cIndexer.Sections() - if cs >= 1 { - break + var ( + config = light.TestServerIndexerConfig + waitIndexers = func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { + for { + cs, _, _ := cIndexer.Sections() + if cs >= 1 { + break + } + time.Sleep(10 * time.Millisecond) } - time.Sleep(10 * time.Millisecond) } - } - server, tearDown := newServerEnv(t, int(config.ChtSize+config.ChtConfirms), protocol, waitIndexers, false, true, 0) + netconfig = testnetConfig{ + blocks: int(config.ChtSize + config.ChtConfirms), + protocol: protocol, + indexFn: waitIndexers, + nopruning: true, + } + ) + server, _, tearDown := newClientServerEnv(t, netconfig) defer tearDown() + rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol) + defer closePeer() + bc := server.handler.blockchain // Assemble the proofs from the different protocols @@ -443,34 +519,45 @@ func testGetCHTProofs(t *testing.T, protocol int) { Type: htCanonical, TrieIdx: 0, Key: key, - AuxReq: auxHeader, + AuxReq: htAuxHeader, }} // Send the proof request and verify the response - sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, requestsV2) - if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil { + sendRequest(rawPeer.app, GetHelperTrieProofsMsg, 42, requestsV2) + if err := expectResponse(rawPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofsV2); err != nil { t.Errorf("proofs mismatch: %v", err) } } func TestGetBloombitsProofsLes2(t *testing.T) { testGetBloombitsProofs(t, 2) } func TestGetBloombitsProofsLes3(t *testing.T) { testGetBloombitsProofs(t, 3) } +func TestGetBloombitsProofsLes4(t *testing.T) { testGetBloombitsProofs(t, 4) } // Tests that bloombits proofs can be correctly retrieved. func testGetBloombitsProofs(t *testing.T, protocol int) { - config := light.TestServerIndexerConfig - - waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { - for { - bts, _, _ := btIndexer.Sections() - if bts >= 1 { - break + var ( + config = light.TestServerIndexerConfig + waitIndexers = func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { + for { + bts, _, _ := btIndexer.Sections() + if bts >= 1 { + break + } + time.Sleep(10 * time.Millisecond) } - time.Sleep(10 * time.Millisecond) } - } - server, tearDown := newServerEnv(t, int(config.BloomTrieSize+config.BloomTrieConfirms), protocol, waitIndexers, false, true, 0) + netconfig = testnetConfig{ + blocks: int(config.BloomTrieSize + config.BloomTrieConfirms), + protocol: protocol, + indexFn: waitIndexers, + nopruning: true, + } + ) + server, _, tearDown := newClientServerEnv(t, netconfig) defer tearDown() + rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol) + defer closePeer() + bc := server.handler.blockchain // Request and verify each bit of the bloom bits proofs @@ -494,19 +581,28 @@ func testGetBloombitsProofs(t *testing.T, protocol int) { trie.Prove(key, 0, &proofs.Proofs) // Send the proof request and verify the response - sendRequest(server.peer.app, GetHelperTrieProofsMsg, 42, requests) - if err := expectResponse(server.peer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil { + sendRequest(rawPeer.app, GetHelperTrieProofsMsg, 42, requests) + if err := expectResponse(rawPeer.app, HelperTrieProofsMsg, 42, testBufLimit, proofs); err != nil { t.Errorf("bit %d: proofs mismatch: %v", bit, err) } } } -func TestTransactionStatusLes2(t *testing.T) { testTransactionStatus(t, 2) } -func TestTransactionStatusLes3(t *testing.T) { testTransactionStatus(t, 3) } +func TestTransactionStatusLes2(t *testing.T) { testTransactionStatus(t, lpv2) } +func TestTransactionStatusLes3(t *testing.T) { testTransactionStatus(t, lpv3) } +func TestTransactionStatusLes4(t *testing.T) { testTransactionStatus(t, lpv4) } func testTransactionStatus(t *testing.T, protocol int) { - server, tearDown := newServerEnv(t, 0, protocol, nil, false, true, 0) + netconfig := testnetConfig{ + protocol: protocol, + nopruning: true, + } + server, _, tearDown := newClientServerEnv(t, netconfig) defer tearDown() + + rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol) + defer closePeer() + server.handler.addTxsSync = true chain := server.handler.blockchain @@ -516,11 +612,11 @@ func testTransactionStatus(t *testing.T, protocol int) { test := func(tx *types.Transaction, send bool, expStatus light.TxStatus) { reqID++ if send { - sendRequest(server.peer.app, SendTxV2Msg, reqID, types.Transactions{tx}) + sendRequest(rawPeer.app, SendTxV2Msg, reqID, types.Transactions{tx}) } else { - sendRequest(server.peer.app, GetTxStatusMsg, reqID, []common.Hash{tx.Hash()}) + sendRequest(rawPeer.app, GetTxStatusMsg, reqID, []common.Hash{tx.Hash()}) } - if err := expectResponse(server.peer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil { + if err := expectResponse(rawPeer.app, TxStatusMsg, reqID, testBufLimit, []light.TxStatus{expStatus}); err != nil { t.Errorf("transaction status mismatch") } } @@ -562,7 +658,7 @@ func testTransactionStatus(t *testing.T, protocol int) { t.Fatalf("pending count mismatch: have %d, want 1", pending) } // Discard new block announcement - msg, _ := server.peer.app.ReadMsg() + msg, _ := rawPeer.app.ReadMsg() msg.Discard() // check if their status is included now @@ -587,7 +683,7 @@ func testTransactionStatus(t *testing.T, protocol int) { t.Fatalf("pending count mismatch: have %d, want 3", pending) } // Discard new block announcement - msg, _ = server.peer.app.ReadMsg() + msg, _ = rawPeer.app.ReadMsg() msg.Discard() // check if their status is pending again @@ -595,11 +691,23 @@ func testTransactionStatus(t *testing.T, protocol int) { test(tx2, false, light.TxStatus{Status: core.TxStatusPending}) } -func TestStopResumeLes3(t *testing.T) { - server, tearDown := newServerEnv(t, 0, 3, nil, true, true, testBufLimit/10) +func TestStopResumeLES3(t *testing.T) { testStopResume(t, lpv3) } +func TestStopResumeLES4(t *testing.T) { testStopResume(t, lpv4) } + +func testStopResume(t *testing.T, protocol int) { + netconfig := testnetConfig{ + protocol: protocol, + simClock: true, + nopruning: true, + } + server, _, tearDown := newClientServerEnv(t, netconfig) defer tearDown() server.handler.server.costTracker.testing = true + server.handler.server.costTracker.testCostList = testCostList(testBufLimit / 10) + + rawPeer, closePeer, _ := server.newRawPeer(t, "peer", protocol) + defer closePeer() var ( reqID uint64 @@ -609,14 +717,14 @@ func TestStopResumeLes3(t *testing.T) { header := server.handler.blockchain.CurrentHeader() req := func() { reqID++ - sendRequest(server.peer.app, GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Hash: header.Hash()}, Amount: 1}) + sendRequest(rawPeer.app, GetBlockHeadersMsg, reqID, &GetBlockHeadersData{Origin: hashOrNumber{Hash: header.Hash()}, Amount: 1}) } for i := 1; i <= 5; i++ { // send requests while we still have enough buffer and expect a response for expBuf >= testCost { req() expBuf -= testCost - if err := expectResponse(server.peer.app, BlockHeadersMsg, reqID, expBuf, []*types.Header{header}); err != nil { + if err := expectResponse(rawPeer.app, BlockHeadersMsg, reqID, expBuf, []*types.Header{header}); err != nil { t.Errorf("expected response and failed: %v", err) } } @@ -626,7 +734,7 @@ func TestStopResumeLes3(t *testing.T) { req() c-- } - if err := p2p.ExpectMsg(server.peer.app, StopMsg, nil); err != nil { + if err := p2p.ExpectMsg(rawPeer.app, StopMsg, nil); err != nil { t.Errorf("expected StopMsg and failed: %v", err) } // wait until the buffer is recharged by half of the limit @@ -635,7 +743,7 @@ func TestStopResumeLes3(t *testing.T) { // expect a ResumeMsg with the partially recharged buffer value expBuf += testBufRecharge * wait - if err := p2p.ExpectMsg(server.peer.app, ResumeMsg, expBuf); err != nil { + if err := p2p.ExpectMsg(rawPeer.app, ResumeMsg, expBuf); err != nil { t.Errorf("expected ResumeMsg and failed: %v", err) } } diff --git a/les/lespay/server/prioritypool_test.go b/les/lespay/server/prioritypool_test.go deleted file mode 100644 index cbb3f5b372..0000000000 --- a/les/lespay/server/prioritypool_test.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2020 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package server - -import ( - "math/rand" - "reflect" - "testing" - - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/p2p/nodestate" -) - -var ( - testSetup = &nodestate.Setup{} - ppTestClientFlag = testSetup.NewFlag("ppTestClientFlag") - ppTestClientField = testSetup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{})) - ppUpdateFlag = testSetup.NewFlag("ppUpdateFlag") - ppTestSetup = NewPriorityPoolSetup(testSetup) -) - -func init() { - ppTestSetup.Connect(ppTestClientField, ppUpdateFlag) -} - -const ( - testCapacityStepDiv = 100 - testCapacityToleranceDiv = 10 -) - -type ppTestClient struct { - node *enode.Node - balance, cap uint64 -} - -func (c *ppTestClient) Priority(now mclock.AbsTime, cap uint64) int64 { - return int64(c.balance / cap) -} - -func (c *ppTestClient) EstMinPriority(until mclock.AbsTime, cap uint64, update bool) int64 { - return int64(c.balance / cap) -} - -func TestPriorityPool(t *testing.T) { - clock := &mclock.Simulated{} - ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - - ns.SubscribeField(ppTestSetup.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - if n := ns.GetField(node, ppTestSetup.priorityField); n != nil { - c := n.(*ppTestClient) - c.cap = newValue.(uint64) - } - }) - pp := NewPriorityPool(ns, ppTestSetup, clock, 100, 0, testCapacityStepDiv) - ns.Start() - pp.SetLimits(100, 1000000) - clients := make([]*ppTestClient, 100) - raise := func(c *ppTestClient) { - for { - var ok bool - ns.Operation(func() { - _, ok = pp.RequestCapacity(c.node, c.cap+c.cap/testCapacityStepDiv, 0, true) - }) - if !ok { - return - } - } - } - var sumBalance uint64 - check := func(c *ppTestClient) { - expCap := 1000000 * c.balance / sumBalance - capTol := expCap / testCapacityToleranceDiv - if c.cap < expCap-capTol || c.cap > expCap+capTol { - t.Errorf("Wrong node capacity (expected %d, got %d)", expCap, c.cap) - } - } - - for i := range clients { - c := &ppTestClient{ - node: enode.SignNull(&enr.Record{}, enode.ID{byte(i)}), - balance: 1000000000, - cap: 1000, - } - sumBalance += c.balance - clients[i] = c - ns.SetState(c.node, ppTestClientFlag, nodestate.Flags{}, 0) - ns.SetField(c.node, ppTestSetup.priorityField, c) - ns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0) - raise(c) - check(c) - } - - for count := 0; count < 100; count++ { - c := clients[rand.Intn(len(clients))] - oldBalance := c.balance - c.balance = uint64(rand.Int63n(1000000000) + 1000000000) - sumBalance += c.balance - oldBalance - pp.ns.SetState(c.node, ppUpdateFlag, nodestate.Flags{}, 0) - pp.ns.SetState(c.node, nodestate.Flags{}, ppUpdateFlag, 0) - if c.balance > oldBalance { - raise(c) - } else { - for _, c := range clients { - raise(c) - } - } - for _, c := range clients { - check(c) - } - } - - ns.Stop() -} diff --git a/les/odr.go b/les/odr.go index f8469cc103..d45c6a1a5d 100644 --- a/les/odr.go +++ b/les/odr.go @@ -18,13 +18,13 @@ package les import ( "context" + "sort" "time" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/light" - "github.com/ethereum/go-ethereum/log" ) // LesOdr implements light.OdrBackend @@ -32,14 +32,16 @@ type LesOdr struct { db ethdb.Database indexerConfig *light.IndexerConfig chtIndexer, bloomTrieIndexer, bloomIndexer *core.ChainIndexer + peers *serverPeerSet retriever *retrieveManager stop chan struct{} } -func NewLesOdr(db ethdb.Database, config *light.IndexerConfig, retriever *retrieveManager) *LesOdr { +func NewLesOdr(db ethdb.Database, config *light.IndexerConfig, peers *serverPeerSet, retriever *retrieveManager) *LesOdr { return &LesOdr{ db: db, indexerConfig: config, + peers: peers, retriever: retriever, stop: make(chan struct{}), } @@ -83,7 +85,8 @@ func (odr *LesOdr) IndexerConfig() *light.IndexerConfig { } const ( - MsgBlockBodies = iota + MsgBlockHeaders = iota + MsgBlockBodies MsgCode MsgReceipts MsgProofsV2 @@ -98,7 +101,101 @@ type Msg struct { Obj interface{} } -// Retrieve tries to fetch an object from the LES network. +// peerByTxHistory is a heap.Interface implementation which can sort +// the peerset by transaction history. +type peerByTxHistory []*serverPeer + +func (h peerByTxHistory) Len() int { return len(h) } +func (h peerByTxHistory) Less(i, j int) bool { + if h[i].txHistory == txIndexUnlimited { + return false + } + if h[j].txHistory == txIndexUnlimited { + return true + } + return h[i].txHistory < h[j].txHistory +} +func (h peerByTxHistory) Swap(i, j int) { h[i], h[j] = h[j], h[i] } + +const ( + maxTxStatusRetry = 3 // The maximum retrys will be made for tx status request. + maxTxStatusCandidates = 5 // The maximum les servers the tx status requests will be sent to. +) + +// RetrieveTxStatus retrieves the transaction status from the LES network. +// There is no guarantee in the LES protocol that the mined transaction will +// be retrieved back for sure because of different reasons(the transaction +// is unindexed, the malicous server doesn't reply it deliberately, etc). +// Therefore, unretrieved transactions(UNKNOWN) will receive a certain number +// of retries, thus giving a weak guarantee. +func (odr *LesOdr) RetrieveTxStatus(ctx context.Context, req *light.TxStatusRequest) error { + // Sort according to the transaction history supported by the peer and + // select the peers with longest history. + var ( + retries int + peers []*serverPeer + missing = len(req.Hashes) + result = make([]light.TxStatus, len(req.Hashes)) + canSend = make(map[string]bool) + ) + for _, peer := range odr.peers.allPeers() { + if peer.txHistory == txIndexDisabled { + continue + } + peers = append(peers, peer) + } + sort.Sort(sort.Reverse(peerByTxHistory(peers))) + for i := 0; i < maxTxStatusCandidates && i < len(peers); i++ { + canSend[peers[i].id] = true + } + // Send out the request and assemble the result. + for { + if retries >= maxTxStatusRetry || len(canSend) == 0 { + break + } + var ( + // Deep copy the request, so that the partial result won't be mixed. + req = &TxStatusRequest{Hashes: req.Hashes} + id = genReqID() + distreq = &distReq{ + getCost: func(dp distPeer) uint64 { return req.GetCost(dp.(*serverPeer)) }, + canSend: func(dp distPeer) bool { return canSend[dp.(*serverPeer).id] }, + request: func(dp distPeer) func() { + p := dp.(*serverPeer) + p.fcServer.QueuedRequest(id, req.GetCost(p)) + delete(canSend, p.id) + return func() { req.Request(id, p) } + }, + } + ) + if err := odr.retriever.retrieve(ctx, id, distreq, func(p distPeer, msg *Msg) error { return req.Validate(odr.db, msg) }, odr.stop); err != nil { + return err + } + // Collect the response and assemble them to the final result. + // All the response is not verifiable, so always pick the first + // one we get. + for index, status := range req.Status { + if result[index].Status != core.TxStatusUnknown { + continue + } + if status.Status == core.TxStatusUnknown { + continue + } + result[index], missing = status, missing-1 + } + // Abort the procedure if all the status are retrieved + if missing == 0 { + break + } + retries += 1 + } + req.Status = result + return nil +} + +// Retrieve tries to fetch an object from the LES network. It's a common API +// for most of the LES requests except for the TxStatusRequest which needs +// the additional retry mechanism. // If the network retrieval was successful, it stores the object in local db. func (odr *LesOdr) Retrieve(ctx context.Context, req light.OdrRequest) (err error) { lreq := LesRequest(req) @@ -122,13 +219,17 @@ func (odr *LesOdr) Retrieve(ctx context.Context, req light.OdrRequest) (err erro return func() { lreq.Request(reqID, p) } }, } - sent := mclock.Now() - if err = odr.retriever.retrieve(ctx, reqID, rq, func(p distPeer, msg *Msg) error { return lreq.Validate(odr.db, msg) }, odr.stop); err == nil { - // retrieved from network, store in db - req.StoreResult(odr.db) + + defer func(sent mclock.AbsTime) { + if err != nil { + return + } requestRTT.Update(time.Duration(mclock.Now() - sent)) - } else { - log.Debug("Failed to retrieve data from network", "err", err) + }(mclock.Now()) + + if err := odr.retriever.retrieve(ctx, reqID, rq, func(p distPeer, msg *Msg) error { return lreq.Validate(odr.db, msg) }, odr.stop); err != nil { + return err } - return + req.StoreResult(odr.db) + return nil } diff --git a/les/odr_requests.go b/les/odr_requests.go index 3cc55c98d8..d548fb1ee0 100644 --- a/les/odr_requests.go +++ b/les/odr_requests.go @@ -116,7 +116,7 @@ func (r *BlockRequest) Validate(db ethdb.Database, msg *Msg) error { if r.Header == nil { return errHeaderUnavailable } - if r.Header.TxHash != types.DeriveSha(types.Transactions(body.Transactions), new(trie.Trie)) { + if r.Header.TxHash != types.DeriveSha(types.Transactions(body.Transactions), trie.NewStackTrie(nil)) { return errTxHashMismatch } if r.Header.UncleHash != types.CalcUncleHash(body.Uncles) { @@ -174,7 +174,7 @@ func (r *ReceiptsRequest) Validate(db ethdb.Database, msg *Msg) error { if r.Header == nil { return errHeaderUnavailable } - if r.Header.ReceiptHash != types.DeriveSha(receipt, new(trie.Trie)) { + if r.Header.ReceiptHash != types.DeriveSha(receipt, trie.NewStackTrie(nil)) { return errReceiptHashMismatch } // Validations passed, store and return @@ -295,10 +295,9 @@ const ( htCanonical = iota // Canonical hash trie htBloomBits // BloomBits trie - // applicable for all helper trie requests - auxRoot = 1 - // applicable for htCanonical - auxHeader = 2 + // helper trie auxiliary types + // htAuxNone = 1 ; deprecated number, used in les2/3 previously. + htAuxHeader = 2 // applicable for htCanonical, requests for relevant headers ) type HelperTrieReq struct { @@ -327,11 +326,7 @@ func (r *ChtRequest) CanSend(peer *serverPeer) bool { peer.lock.RLock() defer peer.lock.RUnlock() - if r.Untrusted { - return peer.headInfo.Number >= r.BlockNum && peer.id == r.PeerId - } else { - return peer.headInfo.Number >= r.Config.ChtConfirms && r.ChtNum <= (peer.headInfo.Number-r.Config.ChtConfirms)/r.Config.ChtSize - } + return peer.headInfo.Number >= r.Config.ChtConfirms && r.ChtNum <= (peer.headInfo.Number-r.Config.ChtConfirms)/r.Config.ChtSize } // Request sends an ODR request to the LES network (implementation of LesOdrRequest) @@ -343,7 +338,7 @@ func (r *ChtRequest) Request(reqID uint64, peer *serverPeer) error { Type: htCanonical, TrieIdx: r.ChtNum, Key: encNum[:], - AuxReq: auxHeader, + AuxReq: htAuxHeader, } return peer.requestHelperTrieProofs(reqID, []HelperTrieReq{req}) } @@ -370,39 +365,34 @@ func (r *ChtRequest) Validate(db ethdb.Database, msg *Msg) error { if err := rlp.DecodeBytes(headerEnc, header); err != nil { return errHeaderUnavailable } - // Verify the CHT - // Note: For untrusted CHT request, there is no proof response but - // header data. - var node light.ChtNode - if !r.Untrusted { - var encNumber [8]byte - binary.BigEndian.PutUint64(encNumber[:], r.BlockNum) - - reads := &readTraceDB{db: nodeSet} - value, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads) - if err != nil { - return fmt.Errorf("merkle proof verification failed: %v", err) - } - if len(reads.reads) != nodeSet.KeyCount() { - return errUselessNodes - } + var ( + node light.ChtNode + encNumber [8]byte + ) + binary.BigEndian.PutUint64(encNumber[:], r.BlockNum) - if err := rlp.DecodeBytes(value, &node); err != nil { - return err - } - if node.Hash != header.Hash() { - return errCHTHashMismatch - } - if r.BlockNum != header.Number.Uint64() { - return errCHTNumberMismatch - } + reads := &readTraceDB{db: nodeSet} + value, err := trie.VerifyProof(r.ChtRoot, encNumber[:], reads) + if err != nil { + return fmt.Errorf("merkle proof verification failed: %v", err) + } + if len(reads.reads) != nodeSet.KeyCount() { + return errUselessNodes + } + if err := rlp.DecodeBytes(value, &node); err != nil { + return err + } + if node.Hash != header.Hash() { + return errCHTHashMismatch + } + if r.BlockNum != header.Number.Uint64() { + return errCHTNumberMismatch } // Verifications passed, store and return r.Header = header r.Proof = nodeSet - r.Td = node.Td // For untrusted request, td here is nil, todo improve the les/2 protocol - + r.Td = node.Td return nil } @@ -497,7 +487,7 @@ func (r *TxStatusRequest) GetCost(peer *serverPeer) uint64 { // CanSend tells if a certain peer is suitable for serving the given request func (r *TxStatusRequest) CanSend(peer *serverPeer) bool { - return peer.version >= lpv2 + return peer.txHistory != txIndexDisabled } // Request sends an ODR request to the LES network (implementation of LesOdrRequest) @@ -506,13 +496,12 @@ func (r *TxStatusRequest) Request(reqID uint64, peer *serverPeer) error { return peer.requestTxStatus(reqID, r.Hashes) } -// Valid processes an ODR request reply message from the LES network +// Validate processes an ODR request reply message from the LES network // returns true and stores results in memory if the message was a valid reply // to the request (implementation of LesOdrRequest) func (r *TxStatusRequest) Validate(db ethdb.Database, msg *Msg) error { log.Debug("Validating transaction status", "count", len(r.Hashes)) - // Ensure we have a correct message with a single block body if msg.MsgType != MsgTxStatus { return errInvalidMessageType } diff --git a/les/odr_test.go b/les/odr_test.go index ccd220d692..0c75014d49 100644 --- a/les/odr_test.go +++ b/les/odr_test.go @@ -19,7 +19,10 @@ package les import ( "bytes" "context" + "crypto/rand" + "fmt" "math/big" + "reflect" "testing" "time" @@ -40,6 +43,7 @@ type odrTestFn func(ctx context.Context, db ethdb.Database, config *params.Chain func TestOdrGetBlockLes2(t *testing.T) { testOdr(t, 2, 1, true, odrGetBlock) } func TestOdrGetBlockLes3(t *testing.T) { testOdr(t, 3, 1, true, odrGetBlock) } +func TestOdrGetBlockLes4(t *testing.T) { testOdr(t, 4, 1, true, odrGetBlock) } func odrGetBlock(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte { var block *types.Block @@ -57,6 +61,7 @@ func odrGetBlock(ctx context.Context, db ethdb.Database, config *params.ChainCon func TestOdrGetReceiptsLes2(t *testing.T) { testOdr(t, 2, 1, true, odrGetReceipts) } func TestOdrGetReceiptsLes3(t *testing.T) { testOdr(t, 3, 1, true, odrGetReceipts) } +func TestOdrGetReceiptsLes4(t *testing.T) { testOdr(t, 4, 1, true, odrGetReceipts) } func odrGetReceipts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte { var receipts types.Receipts @@ -78,6 +83,7 @@ func odrGetReceipts(ctx context.Context, db ethdb.Database, config *params.Chain func TestOdrAccountsLes2(t *testing.T) { testOdr(t, 2, 1, true, odrAccounts) } func TestOdrAccountsLes3(t *testing.T) { testOdr(t, 3, 1, true, odrAccounts) } +func TestOdrAccountsLes4(t *testing.T) { testOdr(t, 4, 1, true, odrAccounts) } func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte { dummyAddr := common.HexToAddress("1234567812345678123456781234567812345678") @@ -107,6 +113,7 @@ func odrAccounts(ctx context.Context, db ethdb.Database, config *params.ChainCon func TestOdrContractCallLes2(t *testing.T) { testOdr(t, 2, 2, true, odrContractCall) } func TestOdrContractCallLes3(t *testing.T) { testOdr(t, 3, 2, true, odrContractCall) } +func TestOdrContractCallLes4(t *testing.T) { testOdr(t, 4, 2, true, odrContractCall) } type callmsg struct { types.Message @@ -128,10 +135,11 @@ func odrContractCall(ctx context.Context, db ethdb.Database, config *params.Chai from := statedb.GetOrNewStateObject(bankAddr) from.SetBalance(math.MaxBig256) - msg := callmsg{types.NewMessage(from.Address(), &testContractAddr, 0, new(big.Int), 100000, new(big.Int), data, false)} + msg := callmsg{types.NewMessage(from.Address(), &testContractAddr, 0, new(big.Int), 100000, new(big.Int), data, nil, false)} - context := core.NewEVMContext(msg, header, bc, nil) - vmenv := vm.NewEVM(context, statedb, config, vm.Config{}) + context := core.NewEVMBlockContext(header, bc, nil) + txContext := core.NewEVMTxContext(msg) + vmenv := vm.NewEVM(context, txContext, statedb, config, vm.Config{}) //vmenv := core.NewEnv(statedb, config, bc, msg, header, vm.Config{}) gp := new(core.GasPool).AddGas(math.MaxUint64) @@ -142,9 +150,10 @@ func odrContractCall(ctx context.Context, db ethdb.Database, config *params.Chai header := lc.GetHeaderByHash(bhash) state := light.NewState(ctx, header, lc.Odr()) state.SetBalance(bankAddr, math.MaxBig256) - msg := callmsg{types.NewMessage(bankAddr, &testContractAddr, 0, new(big.Int), 100000, new(big.Int), data, false)} - context := core.NewEVMContext(msg, header, lc, nil) - vmenv := vm.NewEVM(context, state, config, vm.Config{}) + msg := callmsg{types.NewMessage(bankAddr, &testContractAddr, 0, new(big.Int), 100000, new(big.Int), data, nil, false)} + context := core.NewEVMBlockContext(header, lc, nil) + txContext := core.NewEVMTxContext(msg) + vmenv := vm.NewEVM(context, txContext, state, config, vm.Config{}) gp := new(core.GasPool).AddGas(math.MaxUint64) result, _ := core.ApplyMessage(vmenv, msg, gp) if state.Error() == nil { @@ -157,6 +166,7 @@ func odrContractCall(ctx context.Context, db ethdb.Database, config *params.Chai func TestOdrTxStatusLes2(t *testing.T) { testOdr(t, 2, 1, false, odrTxStatus) } func TestOdrTxStatusLes3(t *testing.T) { testOdr(t, 3, 1, false, odrTxStatus) } +func TestOdrTxStatusLes4(t *testing.T) { testOdr(t, 4, 1, false, odrTxStatus) } func odrTxStatus(ctx context.Context, db ethdb.Database, config *params.ChainConfig, bc *core.BlockChain, lc *light.LightChain, bhash common.Hash) []byte { var txs types.Transactions @@ -183,7 +193,13 @@ func odrTxStatus(ctx context.Context, db ethdb.Database, config *params.ChainCon // testOdr tests odr requests whose validation guaranteed by block headers. func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn odrTestFn) { // Assemble the test environment - server, client, tearDown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, true, true) + netconfig := testnetConfig{ + blocks: 4, + protocol: protocol, + connect: true, + nopruning: true, + } + server, client, tearDown := newClientServerEnv(t, netconfig) defer tearDown() // Ensure the client has synced all necessary data. @@ -239,3 +255,184 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od test(5) } } + +func TestGetTxStatusFromUnindexedPeersLES4(t *testing.T) { testGetTxStatusFromUnindexedPeers(t, lpv4) } + +func testGetTxStatusFromUnindexedPeers(t *testing.T, protocol int) { + var ( + blocks = 8 + netconfig = testnetConfig{ + blocks: blocks, + protocol: protocol, + nopruning: true, + } + ) + server, client, tearDown := newClientServerEnv(t, netconfig) + defer tearDown() + + // Iterate the chain, create the tx indexes locally + var ( + testHash common.Hash + testStatus light.TxStatus + + txs = make(map[common.Hash]*types.Transaction) // Transaction objects set + blockNumbers = make(map[common.Hash]uint64) // Transaction hash to block number mappings + blockHashes = make(map[common.Hash]common.Hash) // Transaction hash to block hash mappings + intraIndex = make(map[common.Hash]uint64) // Transaction intra-index in block + ) + for number := uint64(1); number < server.backend.Blockchain().CurrentBlock().NumberU64(); number++ { + block := server.backend.Blockchain().GetBlockByNumber(number) + if block == nil { + t.Fatalf("Failed to retrieve block %d", number) + } + for index, tx := range block.Transactions() { + txs[tx.Hash()] = tx + blockNumbers[tx.Hash()] = number + blockHashes[tx.Hash()] = block.Hash() + intraIndex[tx.Hash()] = uint64(index) + + if testHash == (common.Hash{}) { + testHash = tx.Hash() + testStatus = light.TxStatus{ + Status: core.TxStatusIncluded, + Lookup: &rawdb.LegacyTxLookupEntry{ + BlockHash: block.Hash(), + BlockIndex: block.NumberU64(), + Index: uint64(index), + }, + } + } + } + } + // serveMsg processes incoming GetTxStatusMsg and sends the response back. + serveMsg := func(peer *testPeer, txLookup uint64) error { + msg, err := peer.app.ReadMsg() + if err != nil { + return err + } + if msg.Code != GetTxStatusMsg { + return fmt.Errorf("message code mismatch: got %d, expected %d", msg.Code, GetTxStatusMsg) + } + var r GetTxStatusPacket + if err := msg.Decode(&r); err != nil { + return err + } + stats := make([]light.TxStatus, len(r.Hashes)) + for i, hash := range r.Hashes { + number, exist := blockNumbers[hash] + if !exist { + continue // Filter out unknown transactions + } + min := uint64(blocks) - txLookup + if txLookup != txIndexUnlimited && (txLookup == txIndexDisabled || number < min) { + continue // Filter out unindexed transactions + } + stats[i].Status = core.TxStatusIncluded + stats[i].Lookup = &rawdb.LegacyTxLookupEntry{ + BlockHash: blockHashes[hash], + BlockIndex: number, + Index: intraIndex[hash], + } + } + data, _ := rlp.EncodeToBytes(stats) + reply := &reply{peer.app, TxStatusMsg, r.ReqID, data} + reply.send(testBufLimit) + return nil + } + + var testspecs = []struct { + peers int + txLookups []uint64 + txs []common.Hash + results []light.TxStatus + }{ + // Retrieve mined transaction from the empty peerset + { + peers: 0, + txLookups: []uint64{}, + txs: []common.Hash{testHash}, + results: []light.TxStatus{{}}, + }, + // Retrieve unknown transaction from the full peers + { + peers: 3, + txLookups: []uint64{txIndexUnlimited, txIndexUnlimited, txIndexUnlimited}, + txs: []common.Hash{randomHash()}, + results: []light.TxStatus{{}}, + }, + // Retrieve mined transaction from the full peers + { + peers: 3, + txLookups: []uint64{txIndexUnlimited, txIndexUnlimited, txIndexUnlimited}, + txs: []common.Hash{testHash}, + results: []light.TxStatus{testStatus}, + }, + // Retrieve mixed transactions from the full peers + { + peers: 3, + txLookups: []uint64{txIndexUnlimited, txIndexUnlimited, txIndexUnlimited}, + txs: []common.Hash{randomHash(), testHash}, + results: []light.TxStatus{{}, testStatus}, + }, + // Retrieve mixed transactions from unindexed peer(but the target is still available) + { + peers: 3, + txLookups: []uint64{uint64(blocks) - testStatus.Lookup.BlockIndex, uint64(blocks) - testStatus.Lookup.BlockIndex - 1, uint64(blocks) - testStatus.Lookup.BlockIndex - 2}, + txs: []common.Hash{randomHash(), testHash}, + results: []light.TxStatus{{}, testStatus}, + }, + // Retrieve mixed transactions from unindexed peer(but the target is not available) + { + peers: 3, + txLookups: []uint64{uint64(blocks) - testStatus.Lookup.BlockIndex - 1, uint64(blocks) - testStatus.Lookup.BlockIndex - 1, uint64(blocks) - testStatus.Lookup.BlockIndex - 2}, + txs: []common.Hash{randomHash(), testHash}, + results: []light.TxStatus{{}, {}}, + }, + } + for _, testspec := range testspecs { + // Create a bunch of server peers with different tx history + var ( + serverPeers []*testPeer + closeFns []func() + ) + for i := 0; i < testspec.peers; i++ { + peer, closePeer, _ := client.newRawPeer(t, fmt.Sprintf("server-%d", i), protocol, testspec.txLookups[i]) + serverPeers = append(serverPeers, peer) + closeFns = append(closeFns, closePeer) + + // Create a one-time routine for serving message + go func(i int, peer *testPeer) { + serveMsg(peer, testspec.txLookups[i]) + }(i, peer) + } + + // Send out the GetTxStatus requests, compare the result with + // expected value. + r := &light.TxStatusRequest{Hashes: testspec.txs} + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + err := client.handler.backend.odr.RetrieveTxStatus(ctx, r) + if err != nil { + t.Errorf("Failed to retrieve tx status %v", err) + } else { + if !reflect.DeepEqual(testspec.results, r.Status) { + t.Errorf("Result mismatch, diff") + } + } + + // Close all connected peers and start the next round + for _, closeFn := range closeFns { + closeFn() + } + } +} + +// randomHash generates a random blob of data and returns it as a hash. +func randomHash() common.Hash { + var hash common.Hash + if n, err := rand.Read(hash[:]); n != common.HashLength || err != nil { + panic(err) + } + return hash +} diff --git a/les/peer.go b/les/peer.go index 2b0117bedc..78019b1d87 100644 --- a/les/peer.go +++ b/les/peer.go @@ -29,12 +29,12 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/les/flowcontrol" - lpc "github.com/ethereum/go-ethereum/les/lespay/client" - lps "github.com/ethereum/go-ethereum/les/lespay/server" "github.com/ethereum/go-ethereum/les/utils" + vfc "github.com/ethereum/go-ethereum/les/vflux/client" + vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/params" @@ -161,9 +161,17 @@ func (p *peerCommons) String() string { return fmt.Sprintf("Peer %s [%s]", p.id, fmt.Sprintf("les/%d", p.version)) } +// PeerInfo represents a short summary of the `eth` sub-protocol metadata known +// about a connected peer. +type PeerInfo struct { + Version int `json:"version"` // Ethereum protocol version negotiated + Difficulty *big.Int `json:"difficulty"` // Total difficulty of the peer's blockchain + Head string `json:"head"` // SHA3 hash of the peer's best owned block +} + // Info gathers and returns a collection of metadata known about a peer. -func (p *peerCommons) Info() *eth.PeerInfo { - return ð.PeerInfo{ +func (p *peerCommons) Info() *PeerInfo { + return &PeerInfo{ Version: p.version, Difficulty: p.Td(), Head: fmt.Sprintf("%x", p.Head()), @@ -246,7 +254,7 @@ func (p *peerCommons) sendReceiveHandshake(sendList keyValueList) (keyValueList, // network IDs, difficulties, head and genesis blocks. Besides the basic handshake // fields, server and client can exchange and resolve some specified fields through // two callback functions. -func (p *peerCommons) handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, sendCallback func(*keyValueList), recvCallback func(keyValueMap) error) error { +func (p *peerCommons) handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter, sendCallback func(*keyValueList), recvCallback func(keyValueMap) error) error { p.lock.Lock() defer p.lock.Unlock() @@ -262,6 +270,12 @@ func (p *peerCommons) handshake(td *big.Int, head common.Hash, headNum uint64, g send = send.add("headNum", headNum) send = send.add("genesisHash", genesis) + // If the protocol version is beyond les4, then pass the forkID + // as well. Check http://eips.ethereum.org/EIPS/eip-2124 for more + // spec detail. + if p.version >= lpv4 { + send = send.add("forkID", forkID) + } // Add client-specified or server-specified fields if sendCallback != nil { sendCallback(&send) @@ -295,6 +309,16 @@ func (p *peerCommons) handshake(td *big.Int, head common.Hash, headNum uint64, g if int(rVersion) != p.version { return errResp(ErrProtocolVersionMismatch, "%d (!= %d)", rVersion, p.version) } + // Check forkID if the protocol version is beyond the les4 + if p.version >= lpv4 { + var forkID forkid.ID + if err := recv.get("forkID", &forkID); err != nil { + return err + } + if err := forkFilter(forkID); err != nil { + return errResp(ErrForkIDRejected, "%v", err) + } + } if recvCallback != nil { return recvCallback(recv) } @@ -317,6 +341,7 @@ type serverPeer struct { onlyAnnounce bool // The flag whether the server sends announcement only. chainSince, chainRecent uint64 // The range of chain server peer can serve. stateSince, stateRecent uint64 // The range of state server peer can serve. + txHistory uint64 // The length of available tx history, 0 means all, 1 means disabled // Advertised checkpoint fields checkpointNumber uint64 // The block height which the checkpoint is registered. @@ -324,8 +349,7 @@ type serverPeer struct { fcServer *flowcontrol.ServerNode // Client side mirror token bucket. vtLock sync.Mutex - valueTracker *lpc.ValueTracker - nodeValueTracker *lpc.NodeValueTracker + nodeValueTracker *vfc.NodeValueTracker sentReqs map[uint64]sentReqEntry // Statistics @@ -407,14 +431,14 @@ func (p *serverPeer) sendRequest(msgcode, reqID uint64, data interface{}, amount // specified header query, based on the hash of an origin block. func (p *serverPeer) requestHeadersByHash(reqID uint64, origin common.Hash, amount int, skip int, reverse bool) error { p.Log().Debug("Fetching batch of headers", "count", amount, "fromhash", origin, "skip", skip, "reverse", reverse) - return p.sendRequest(GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}, amount) + return p.sendRequest(GetBlockHeadersMsg, reqID, &GetBlockHeadersData{Origin: hashOrNumber{Hash: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}, amount) } // requestHeadersByNumber fetches a batch of blocks' headers corresponding to the // specified header query, based on the number of an origin block. func (p *serverPeer) requestHeadersByNumber(reqID, origin uint64, amount int, skip int, reverse bool) error { p.Log().Debug("Fetching batch of headers", "count", amount, "fromnum", origin, "skip", skip, "reverse", reverse) - return p.sendRequest(GetBlockHeadersMsg, reqID, &getBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}, amount) + return p.sendRequest(GetBlockHeadersMsg, reqID, &GetBlockHeadersData{Origin: hashOrNumber{Number: origin}, Amount: uint64(amount), Skip: uint64(skip), Reverse: reverse}, amount) } // requestBodies fetches a batch of blocks' bodies corresponding to the hashes @@ -561,10 +585,10 @@ func (p *serverPeer) updateHead(hash common.Hash, number uint64, td *big.Int) { // Handshake executes the les protocol handshake, negotiating version number, // network IDs and genesis blocks. -func (p *serverPeer) Handshake(genesis common.Hash) error { +func (p *serverPeer) Handshake(genesis common.Hash, forkid forkid.ID, forkFilter forkid.Filter) error { // Note: there is no need to share local head with a server but older servers still // require these fields so we announce zero values. - return p.handshake(common.Big0, common.Hash{}, 0, genesis, func(lists *keyValueList) { + return p.handshake(common.Big0, common.Hash{}, 0, genesis, forkid, forkFilter, func(lists *keyValueList) { // Add some client-specific handshake fields // // Enable signed announcement randomly even the server is not trusted. @@ -604,6 +628,18 @@ func (p *serverPeer) Handshake(genesis common.Hash) error { if recv.get("txRelay", nil) != nil { p.onlyAnnounce = true } + if p.version >= lpv4 { + var recentTx uint + if err := recv.get("recentTxLookup", &recentTx); err != nil { + return err + } + p.txHistory = uint64(recentTx) + } else { + // The weak assumption is held here that legacy les server(les2,3) + // has unlimited transaction history. The les serving in these legacy + // versions is disabled if the transaction is unindexed. + p.txHistory = txIndexUnlimited + } if p.onlyAnnounce && !p.trusted { return errResp(ErrUselessPeer, "peer cannot serve requests") } @@ -639,9 +675,8 @@ func (p *serverPeer) Handshake(genesis common.Hash) error { // setValueTracker sets the value tracker references for connected servers. Note that the // references should be removed upon disconnection by setValueTracker(nil, nil). -func (p *serverPeer) setValueTracker(vt *lpc.ValueTracker, nvt *lpc.NodeValueTracker) { +func (p *serverPeer) setValueTracker(nvt *vfc.NodeValueTracker) { p.vtLock.Lock() - p.valueTracker = vt p.nodeValueTracker = nvt if nvt != nil { p.sentReqs = make(map[uint64]sentReqEntry) @@ -668,7 +703,7 @@ func (p *serverPeer) updateVtParams() { } } } - p.valueTracker.UpdateCosts(p.nodeValueTracker, reqCosts) + p.nodeValueTracker.UpdateCosts(reqCosts) } // sentReqEntry remembers sent requests and their sending times @@ -695,27 +730,26 @@ func (p *serverPeer) answeredRequest(id uint64) { } e, ok := p.sentReqs[id] delete(p.sentReqs, id) - vt := p.valueTracker nvt := p.nodeValueTracker p.vtLock.Unlock() if !ok { return } var ( - vtReqs [2]lpc.ServedRequest + vtReqs [2]vfc.ServedRequest reqCount int ) m := requestMapping[e.reqType] if m.rest == -1 || e.amount <= 1 { reqCount = 1 - vtReqs[0] = lpc.ServedRequest{ReqType: uint32(m.first), Amount: e.amount} + vtReqs[0] = vfc.ServedRequest{ReqType: uint32(m.first), Amount: e.amount} } else { reqCount = 2 - vtReqs[0] = lpc.ServedRequest{ReqType: uint32(m.first), Amount: 1} - vtReqs[1] = lpc.ServedRequest{ReqType: uint32(m.rest), Amount: e.amount - 1} + vtReqs[0] = vfc.ServedRequest{ReqType: uint32(m.first), Amount: 1} + vtReqs[1] = vfc.ServedRequest{ReqType: uint32(m.rest), Amount: e.amount - 1} } dt := time.Duration(mclock.Now() - e.at) - vt.Served(nvt, vtReqs[:reqCount], dt) + nvt.Served(vtReqs[:reqCount], dt) } // clientPeer represents each node to which the les server is connected. @@ -728,7 +762,7 @@ type clientPeer struct { responseLock sync.Mutex responseCount uint64 // Counter to generate an unique id for request processing. - balance *lps.NodeBalance + balance *vfs.NodeBalance // invalidLock is used for protecting invalidCount. invalidLock sync.RWMutex @@ -944,11 +978,25 @@ func (p *clientPeer) freezeClient() { // Handshake executes the les protocol handshake, negotiating version number, // network IDs, difficulties, head and genesis blocks. -func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, server *LesServer) error { +func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID, forkFilter forkid.Filter, server *LesServer) error { + recentTx := server.handler.blockchain.TxLookupLimit() + if recentTx != txIndexUnlimited { + if recentTx < blockSafetyMargin { + recentTx = txIndexDisabled + } else { + recentTx -= blockSafetyMargin - txIndexRecentOffset + } + } + if server.config.UltraLightOnlyAnnounce { + recentTx = txIndexDisabled + } + if recentTx != txIndexUnlimited && p.version < lpv4 { + return errors.New("Cannot serve old clients without a complete tx index") + } // Note: clientPeer.headInfo should contain the last head announced to the client by us. // The values announced in the handshake are dummy values for compatibility reasons and should be ignored. p.headInfo = blockInfo{Hash: head, Number: headNum, Td: td} - return p.handshake(td, head, headNum, genesis, func(lists *keyValueList) { + return p.handshake(td, head, headNum, genesis, forkID, forkFilter, func(lists *keyValueList) { // Add some information which services server can offer. if !server.config.UltraLightOnlyAnnounce { *lists = (*lists).add("serveHeaders", nil) @@ -957,13 +1005,16 @@ func (p *clientPeer) Handshake(td *big.Int, head common.Hash, headNum uint64, ge // If local ethereum node is running in archive mode, advertise ourselves we have // all version state data. Otherwise only recent state is available. - stateRecent := uint64(core.TriesInMemory - 4) + stateRecent := uint64(core.TriesInMemory - blockSafetyMargin) if server.archiveMode { stateRecent = 0 } *lists = (*lists).add("serveRecentState", stateRecent) *lists = (*lists).add("txRelay", nil) } + if p.version >= lpv4 { + *lists = (*lists).add("recentTxLookup", recentTx) + } *lists = (*lists).add("flowControl/BL", server.defParams.BufLimit) *lists = (*lists).add("flowControl/MRR", server.defParams.MinRecharge) diff --git a/les/peer_test.go b/les/peer_test.go index 6d3c7f9755..d6551ce6b6 100644 --- a/les/peer_test.go +++ b/les/peer_test.go @@ -26,8 +26,13 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/forkid" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/params" ) type testServerPeerSub struct { @@ -91,6 +96,14 @@ func TestPeerSubscription(t *testing.T) { checkPeers(sub.unregCh) } +type fakeChain struct{} + +func (f *fakeChain) Config() *params.ChainConfig { return params.MainnetChainConfig } +func (f *fakeChain) Genesis() *types.Block { + return core.DefaultGenesisBlock().ToBlock(rawdb.NewMemoryDatabase()) +} +func (f *fakeChain) CurrentHeader() *types.Header { return &types.Header{Number: big.NewInt(10000000)} } + func TestHandshake(t *testing.T) { // Create a message pipe to communicate through app, net := p2p.MsgPipe() @@ -110,15 +123,21 @@ func TestHandshake(t *testing.T) { head = common.HexToHash("deadbeef") headNum = uint64(10) genesis = common.HexToHash("cafebabe") + + chain1, chain2 = &fakeChain{}, &fakeChain{} + forkID1 = forkid.NewID(chain1.Config(), chain1.Genesis().Hash(), chain1.CurrentHeader().Number.Uint64()) + forkID2 = forkid.NewID(chain2.Config(), chain2.Genesis().Hash(), chain2.CurrentHeader().Number.Uint64()) + filter1, filter2 = forkid.NewFilter(chain1), forkid.NewFilter(chain2) ) + go func() { - errCh1 <- peer1.handshake(td, head, headNum, genesis, func(list *keyValueList) { + errCh1 <- peer1.handshake(td, head, headNum, genesis, forkID1, filter1, func(list *keyValueList) { var announceType uint64 = announceTypeSigned *list = (*list).add("announceType", announceType) }, nil) }() go func() { - errCh2 <- peer2.handshake(td, head, headNum, genesis, nil, func(recv keyValueMap) error { + errCh2 <- peer2.handshake(td, head, headNum, genesis, forkID2, filter2, nil, func(recv keyValueMap) error { var reqType uint64 err := recv.get("announceType", &reqType) if err != nil { diff --git a/les/protocol.go b/les/protocol.go index 19a9561ce9..07a4452f40 100644 --- a/les/protocol.go +++ b/les/protocol.go @@ -24,8 +24,9 @@ import ( "math/big" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - lpc "github.com/ethereum/go-ethereum/les/lespay/client" + vfc "github.com/ethereum/go-ethereum/les/vflux/client" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/rlp" ) @@ -34,21 +35,27 @@ import ( const ( lpv2 = 2 lpv3 = 3 + lpv4 = 4 ) // Supported versions of the les protocol (first is primary) var ( - ClientProtocolVersions = []uint{lpv2, lpv3} - ServerProtocolVersions = []uint{lpv2, lpv3} + ClientProtocolVersions = []uint{lpv2, lpv3, lpv4} + ServerProtocolVersions = []uint{lpv2, lpv3, lpv4} AdvertiseProtocolVersions = []uint{lpv2} // clients are searching for the first advertised protocol in the list ) // Number of implemented message corresponding to different protocol versions. -var ProtocolLengths = map[uint]uint64{lpv2: 22, lpv3: 24} +var ProtocolLengths = map[uint]uint64{lpv2: 22, lpv3: 24, lpv4: 24} const ( NetworkId = 1 ProtocolMaxMsgSize = 10 * 1024 * 1024 // Maximum cap on the size of a protocol message + blockSafetyMargin = 4 // safety margin applied to block ranges specified relative to head block + + txIndexUnlimited = 0 // this value in the "recentTxLookup" handshake field means the entire tx index history is served + txIndexDisabled = 1 // this value means tx index is not served at all + txIndexRecentOffset = 1 // txIndexRecentOffset + N in the handshake field means then tx index of the last N blocks is supported ) // les protocol message codes @@ -77,13 +84,69 @@ const ( ResumeMsg = 0x17 ) +// GetBlockHeadersData represents a block header query (the request ID is not included) +type GetBlockHeadersData struct { + Origin hashOrNumber // Block from which to retrieve headers + Amount uint64 // Maximum number of headers to retrieve + Skip uint64 // Blocks to skip between consecutive headers + Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) +} + +// GetBlockHeadersPacket represents a block header request +type GetBlockHeadersPacket struct { + ReqID uint64 + Query GetBlockHeadersData +} + +// GetBlockBodiesPacket represents a block body request +type GetBlockBodiesPacket struct { + ReqID uint64 + Hashes []common.Hash +} + +// GetCodePacket represents a contract code request +type GetCodePacket struct { + ReqID uint64 + Reqs []CodeReq +} + +// GetReceiptsPacket represents a block receipts request +type GetReceiptsPacket struct { + ReqID uint64 + Hashes []common.Hash +} + +// GetProofsPacket represents a proof request +type GetProofsPacket struct { + ReqID uint64 + Reqs []ProofReq +} + +// GetHelperTrieProofsPacket represents a helper trie proof request +type GetHelperTrieProofsPacket struct { + ReqID uint64 + Reqs []HelperTrieReq +} + +// SendTxPacket represents a transaction propagation request +type SendTxPacket struct { + ReqID uint64 + Txs []*types.Transaction +} + +// GetTxStatusPacket represents a transaction status query +type GetTxStatusPacket struct { + ReqID uint64 + Hashes []common.Hash +} + type requestInfo struct { name string maxCount uint64 refBasketFirst, refBasketRest float64 } -// reqMapping maps an LES request to one or two lespay service vector entries. +// reqMapping maps an LES request to one or two vflux service vector entries. // If rest != -1 and the request type is used with amounts larger than one then the // first one of the multi-request is mapped to first while the rest is mapped to rest. type reqMapping struct { @@ -92,7 +155,7 @@ type reqMapping struct { var ( // requests describes the available LES request types and their initializing amounts - // in the lespay/client.ValueTracker reference basket. Initial values are estimates + // in the vfc.ValueTracker reference basket. Initial values are estimates // based on the same values as the server's default cost estimates (reqAvgTimeCost). requests = map[uint64]requestInfo{ GetBlockHeadersMsg: {"GetBlockHeaders", MaxHeaderFetch, 10, 1000}, @@ -104,25 +167,25 @@ var ( SendTxV2Msg: {"SendTxV2", MaxTxSend, 1, 0}, GetTxStatusMsg: {"GetTxStatus", MaxTxStatus, 10, 0}, } - requestList []lpc.RequestInfo + requestList []vfc.RequestInfo requestMapping map[uint32]reqMapping ) -// init creates a request list and mapping between protocol message codes and lespay +// init creates a request list and mapping between protocol message codes and vflux // service vector indices. func init() { requestMapping = make(map[uint32]reqMapping) for code, req := range requests { cost := reqAvgTimeCost[code] rm := reqMapping{len(requestList), -1} - requestList = append(requestList, lpc.RequestInfo{ + requestList = append(requestList, vfc.RequestInfo{ Name: req.name + ".first", InitAmount: req.refBasketFirst, InitValue: float64(cost.baseCost + cost.reqCost), }) if req.refBasketRest != 0 { rm.rest = len(requestList) - requestList = append(requestList, lpc.RequestInfo{ + requestList = append(requestList, vfc.RequestInfo{ Name: req.name + ".rest", InitAmount: req.refBasketRest, InitValue: float64(cost.reqCost), @@ -150,6 +213,7 @@ const ( ErrInvalidResponse ErrTooManyTimeouts ErrMissingKey + ErrForkIDRejected ) func (e errCode) String() string { @@ -172,6 +236,7 @@ var errorToString = map[int]string{ ErrInvalidResponse: "Invalid response", ErrTooManyTimeouts: "Too many request timeouts", ErrMissingKey: "Key missing from list", + ErrForkIDRejected: "ForkID rejected", } // announceData is the network packet for the block announcements. @@ -221,14 +286,6 @@ type blockInfo struct { Td *big.Int // Total difficulty of one particular block being announced } -// getBlockHeadersData represents a block header query. -type getBlockHeadersData struct { - Origin hashOrNumber // Block from which to retrieve headers - Amount uint64 // Maximum number of headers to retrieve - Skip uint64 // Blocks to skip between consecutive headers - Reverse bool // Query direction (false = rising towards latest, true = falling towards genesis) -} - // hashOrNumber is a combined field for specifying an origin block. type hashOrNumber struct { Hash common.Hash // Block hash from which to retrieve headers (excludes Number) diff --git a/les/pruner_test.go b/les/pruner_test.go index 62b4e9a950..c6f198c088 100644 --- a/les/pruner_test.go +++ b/les/pruner_test.go @@ -28,19 +28,26 @@ import ( ) func TestLightPruner(t *testing.T) { - config := light.TestClientIndexerConfig - - waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { - for { - cs, _, _ := cIndexer.Sections() - bts, _, _ := btIndexer.Sections() - if cs >= 3 && bts >= 3 { - break + var ( + waitIndexers = func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { + for { + cs, _, _ := cIndexer.Sections() + bts, _, _ := btIndexer.Sections() + if cs >= 3 && bts >= 3 { + break + } + time.Sleep(10 * time.Millisecond) } - time.Sleep(10 * time.Millisecond) } - } - server, client, tearDown := newClientServerEnv(t, int(3*config.ChtSize+config.ChtConfirms), 2, waitIndexers, nil, 0, false, true, false) + config = light.TestClientIndexerConfig + netconfig = testnetConfig{ + blocks: int(3*config.ChtSize + config.ChtConfirms), + protocol: 3, + indexFn: waitIndexers, + connect: true, + } + ) + server, client, tearDown := newClientServerEnv(t, netconfig) defer tearDown() // checkDB iterates the chain with given prefix, resolves the block number diff --git a/les/request_test.go b/les/request_test.go index 4851274382..c65405e375 100644 --- a/les/request_test.go +++ b/les/request_test.go @@ -38,6 +38,7 @@ type accessTestFn func(db ethdb.Database, bhash common.Hash, number uint64) ligh func TestBlockAccessLes2(t *testing.T) { testAccess(t, 2, tfBlockAccess) } func TestBlockAccessLes3(t *testing.T) { testAccess(t, 3, tfBlockAccess) } +func TestBlockAccessLes4(t *testing.T) { testAccess(t, 4, tfBlockAccess) } func tfBlockAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest { return &light.BlockRequest{Hash: bhash, Number: number} @@ -45,6 +46,7 @@ func tfBlockAccess(db ethdb.Database, bhash common.Hash, number uint64) light.Od func TestReceiptsAccessLes2(t *testing.T) { testAccess(t, 2, tfReceiptsAccess) } func TestReceiptsAccessLes3(t *testing.T) { testAccess(t, 3, tfReceiptsAccess) } +func TestReceiptsAccessLes4(t *testing.T) { testAccess(t, 4, tfReceiptsAccess) } func tfReceiptsAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest { return &light.ReceiptsRequest{Hash: bhash, Number: number} @@ -52,6 +54,7 @@ func tfReceiptsAccess(db ethdb.Database, bhash common.Hash, number uint64) light func TestTrieEntryAccessLes2(t *testing.T) { testAccess(t, 2, tfTrieEntryAccess) } func TestTrieEntryAccessLes3(t *testing.T) { testAccess(t, 3, tfTrieEntryAccess) } +func TestTrieEntryAccessLes4(t *testing.T) { testAccess(t, 4, tfTrieEntryAccess) } func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) light.OdrRequest { if number := rawdb.ReadHeaderNumber(db, bhash); number != nil { @@ -62,6 +65,7 @@ func tfTrieEntryAccess(db ethdb.Database, bhash common.Hash, number uint64) ligh func TestCodeAccessLes2(t *testing.T) { testAccess(t, 2, tfCodeAccess) } func TestCodeAccessLes3(t *testing.T) { testAccess(t, 3, tfCodeAccess) } +func TestCodeAccessLes4(t *testing.T) { testAccess(t, 4, tfCodeAccess) } func tfCodeAccess(db ethdb.Database, bhash common.Hash, num uint64) light.OdrRequest { number := rawdb.ReadHeaderNumber(db, bhash) @@ -79,7 +83,14 @@ func tfCodeAccess(db ethdb.Database, bhash common.Hash, num uint64) light.OdrReq func testAccess(t *testing.T, protocol int, fn accessTestFn) { // Assemble the test environment - server, client, tearDown := newClientServerEnv(t, 4, protocol, nil, nil, 0, false, true, true) + netconfig := testnetConfig{ + blocks: 4, + protocol: protocol, + indexFn: nil, + connect: true, + nopruning: true, + } + server, client, tearDown := newClientServerEnv(t, netconfig) defer tearDown() // Ensure the client has synced all necessary data. diff --git a/les/retrieve.go b/les/retrieve.go index 4f77004f20..3174d49878 100644 --- a/les/retrieve.go +++ b/les/retrieve.go @@ -155,6 +155,15 @@ func (rm *retrieveManager) sendReq(reqID uint64, req *distReq, val validatorFunc return r } +// requested reports whether the request with given reqid is sent by the retriever. +func (rm *retrieveManager) requested(reqId uint64) bool { + rm.lock.RLock() + defer rm.lock.RUnlock() + + _, ok := rm.sentReqs[reqId] + return ok +} + // deliver is called by the LES protocol manager to deliver reply messages to waiting requests func (rm *retrieveManager) deliver(peer distPeer, msg *Msg) error { rm.lock.RLock() @@ -328,7 +337,6 @@ func (r *sentReq) tryRequest() { } defer func() { - // send feedback to server pool and remove peer if hard timeout happened pp, ok := p.(*serverPeer) if hrto && ok { pp.Log().Debug("Request timed out hard") @@ -336,10 +344,6 @@ func (r *sentReq) tryRequest() { r.rm.peers.unregister(pp.id) } } - - r.lock.Lock() - delete(r.sentTo, p) - r.lock.Unlock() }() select { diff --git a/les/server.go b/les/server.go index cbedce136c..63feaf892c 100644 --- a/les/server.go +++ b/les/server.go @@ -22,14 +22,16 @@ import ( "time" "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth/ethconfig" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/les/flowcontrol" - lps "github.com/ethereum/go-ethereum/les/lespay/server" + "github.com/ethereum/go-ethereum/les/vflux" + vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/nodestate" @@ -42,8 +44,8 @@ var ( clientPeerField = serverSetup.NewField("clientPeer", reflect.TypeOf(&clientPeer{})) clientInfoField = serverSetup.NewField("clientInfo", reflect.TypeOf(&clientInfo{})) connAddressField = serverSetup.NewField("connAddr", reflect.TypeOf("")) - balanceTrackerSetup = lps.NewBalanceTrackerSetup(serverSetup) - priorityPoolSetup = lps.NewPriorityPoolSetup(serverSetup) + balanceTrackerSetup = vfs.NewBalanceTrackerSetup(serverSetup) + priorityPoolSetup = vfs.NewPriorityPoolSetup(serverSetup) ) func init() { @@ -51,6 +53,15 @@ func init() { priorityPoolSetup.Connect(balanceTrackerSetup.BalanceField, balanceTrackerSetup.UpdateFlag) // NodeBalance implements nodePriority } +type ethBackend interface { + ArchiveMode() bool + BlockChain() *core.BlockChain + BloomIndexer() *core.ChainIndexer + ChainDb() ethdb.Database + Synced() bool + TxPool() *core.TxPool +} + type LesServer struct { lesCommons @@ -58,7 +69,7 @@ type LesServer struct { archiveMode bool // Flag whether the ethereum node runs in archive mode. handler *serverHandler broadcaster *broadcaster - lesTopics []discv5.Topic + vfluxServer *vfs.Server privateKey *ecdsa.PrivateKey // Flow control and capacity management @@ -75,13 +86,12 @@ type LesServer struct { p2pSrv *p2p.Server } -func NewLesServer(node *node.Node, e *eth.Ethereum, config *eth.Config) (*LesServer, error) { - ns := nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup) - // Collect les protocol version information supported by local node. - lesTopics := make([]discv5.Topic, len(AdvertiseProtocolVersions)) - for i, pv := range AdvertiseProtocolVersions { - lesTopics[i] = lesTopic(e.BlockChain().Genesis().Hash(), pv) +func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*LesServer, error) { + lesDb, err := node.OpenDatabase("les.server", 0, 0, "eth/db/les.server") + if err != nil { + return nil, err } + ns := nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup) // Calculate the number of threads used to service the light client // requests based on the user-specified value. threads := config.LightServ * 4 / 100 @@ -95,6 +105,7 @@ func NewLesServer(node *node.Node, e *eth.Ethereum, config *eth.Config) (*LesSer chainConfig: e.BlockChain().Config(), iConfig: light.DefaultServerIndexerConfig, chainDb: e.ChainDb(), + lesDb: lesDb, chainReader: e.BlockChain(), chtIndexer: light.NewChtIndexer(e.ChainDb(), nil, params.CHTFrequency, params.HelperTrieProcessConfirmations, true), bloomTrieIndexer: light.NewBloomTrieIndexer(e.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency, true), @@ -103,14 +114,19 @@ func NewLesServer(node *node.Node, e *eth.Ethereum, config *eth.Config) (*LesSer ns: ns, archiveMode: e.ArchiveMode(), broadcaster: newBroadcaster(ns), - lesTopics: lesTopics, + vfluxServer: vfs.NewServer(time.Millisecond * 10), fcManager: flowcontrol.NewClientManager(nil, &mclock.System{}), servingQueue: newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100), threadsBusy: config.LightServ/100 + 1, threadsIdle: threads, p2pSrv: node.Server(), } - srv.handler = newServerHandler(srv, e.BlockChain(), e.ChainDb(), e.TxPool(), e.Synced) + srv.vfluxServer.Register(srv) + issync := e.Synced + if config.LightNoSyncServe { + issync = func() bool { return true } + } + srv.handler = newServerHandler(srv, e.BlockChain(), e.ChainDb(), e.TxPool(), issync) srv.costTracker, srv.minCapacity = newCostTracker(e.ChainDb(), config) srv.oracle = srv.setupOracle(node, e.BlockChain().Genesis().Hash(), config) @@ -133,8 +149,8 @@ func NewLesServer(node *node.Node, e *eth.Ethereum, config *eth.Config) (*LesSer srv.maxCapacity = totalRecharge } srv.fcManager.SetCapacityLimits(srv.minCapacity, srv.maxCapacity, srv.minCapacity*2) - srv.clientPool = newClientPool(ns, srv.chainDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, srv.dropClient) - srv.clientPool.setDefaultFactors(lps.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}, lps.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}) + srv.clientPool = newClientPool(ns, lesDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, srv.dropClient) + srv.clientPool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}) checkpoint := srv.latestLocalCheckpoint() if !checkpoint.Empty() { @@ -189,7 +205,9 @@ func (s *LesServer) Protocols() []p2p.Protocol { }, nil) // Add "les" ENR entries. for i := range ps { - ps[i].Attributes = []enr.Entry{&lesEntry{}} + ps[i].Attributes = []enr.Entry{&lesEntry{ + VfxVersion: 1, + }} } return ps } @@ -199,23 +217,11 @@ func (s *LesServer) Start() error { s.privateKey = s.p2pSrv.PrivateKey s.broadcaster.setSignerKey(s.privateKey) s.handler.start() - s.wg.Add(1) go s.capacityManagement() - if s.p2pSrv.DiscV5 != nil { - for _, topic := range s.lesTopics { - topic := topic - go func() { - logger := log.New("topic", topic) - logger.Info("Starting topic registration") - defer logger.Info("Terminated topic registration") - - s.p2pSrv.DiscV5.RegisterTopic(topic, s.closeCh) - }() - } + s.p2pSrv.DiscV5.RegisterTalkHandler("vfx", s.vfluxServer.ServeEncoded) } - return nil } @@ -229,9 +235,11 @@ func (s *LesServer) Stop() error { s.costTracker.stop() s.handler.stop() s.servingQueue.stop() + s.vfluxServer.Stop() // Note, bloom trie indexer is closed by parent bloombits indexer. s.chtIndexer.Close() + s.lesDb.Close() s.wg.Wait() log.Info("Les server stopped") @@ -311,3 +319,18 @@ func (s *LesServer) dropClient(id enode.ID) { p.Peer.Disconnect(p2p.DiscRequested) } } + +// ServiceInfo implements vfs.Service +func (s *LesServer) ServiceInfo() (string, string) { + return "les", "Ethereum light client service" +} + +// Handle implements vfs.Service +func (s *LesServer) Handle(id enode.ID, address string, name string, data []byte) []byte { + switch name { + case vflux.CapacityQueryName: + return s.clientPool.serveCapQuery(id, address, data) + default: + return nil + } +} diff --git a/les/server_handler.go b/les/server_handler.go index d3e2c956b3..7651d03cab 100644 --- a/les/server_handler.go +++ b/les/server_handler.go @@ -18,8 +18,6 @@ package les import ( "crypto/ecdsa" - "encoding/binary" - "encoding/json" "errors" "sync" "sync/atomic" @@ -28,11 +26,12 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" - lps "github.com/ethereum/go-ethereum/les/lespay/server" + vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" @@ -46,7 +45,7 @@ import ( const ( softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data. estHeaderRlpSize = 500 // Approximate size of an RLP encoded block header - ethVersion = 63 // equivalent eth version for the downloader + ethVersion = 64 // equivalent eth version for the downloader MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request MaxBodyFetch = 32 // Amount of block bodies to be fetched per retrieval request @@ -66,6 +65,7 @@ var ( // serverHandler is responsible for serving light client and process // all incoming light requests. type serverHandler struct { + forkFilter forkid.Filter blockchain *core.BlockChain chainDb ethdb.Database txpool *core.TxPool @@ -81,6 +81,7 @@ type serverHandler struct { func newServerHandler(server *LesServer, blockchain *core.BlockChain, chainDb ethdb.Database, txpool *core.TxPool, synced func() bool) *serverHandler { handler := &serverHandler{ + forkFilter: forkid.NewFilter(blockchain), server: server, blockchain: blockchain, chainDb: chainDb, @@ -121,8 +122,9 @@ func (h *serverHandler) handle(p *clientPeer) error { hash = head.Hash() number = head.Number.Uint64() td = h.blockchain.GetTd(hash, number) + forkID = forkid.NewID(h.blockchain.Config(), h.blockchain.Genesis().Hash(), h.blockchain.CurrentBlock().NumberU64()) ) - if err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), h.server); err != nil { + if err := p.Handshake(td, hash, number, h.blockchain.Genesis().Hash(), forkID, h.forkFilter, h.server); err != nil { p.Log().Debug("Light Ethereum handshake failed", "err", err) return err } @@ -165,7 +167,7 @@ func (h *serverHandler) handle(p *clientPeer) error { p.Log().Debug("Light Ethereum peer rejected", "err", errFullClientPool) return errFullClientPool } - p.balance, _ = h.server.ns.GetField(p.Node(), h.server.clientPool.BalanceField).(*lps.NodeBalance) + p.balance, _ = h.server.ns.GetField(p.Node(), h.server.clientPool.BalanceField).(*vfs.NodeBalance) if p.balance == nil { return p2p.DiscRequested } @@ -202,6 +204,90 @@ func (h *serverHandler) handle(p *clientPeer) error { } } +// beforeHandle will do a series of prechecks before handling message. +func (h *serverHandler) beforeHandle(p *clientPeer, reqID, responseCount uint64, msg p2p.Msg, reqCnt uint64, maxCount uint64) (*servingTask, uint64) { + // Ensure that the request sent by client peer is valid + inSizeCost := h.server.costTracker.realCost(0, msg.Size, 0) + if reqCnt == 0 || reqCnt > maxCount { + p.fcClient.OneTimeCost(inSizeCost) + return nil, 0 + } + // Ensure that the client peer complies with the flow control + // rules agreed by both sides. + if p.isFrozen() { + p.fcClient.OneTimeCost(inSizeCost) + return nil, 0 + } + maxCost := p.fcCosts.getMaxCost(msg.Code, reqCnt) + accepted, bufShort, priority := p.fcClient.AcceptRequest(reqID, responseCount, maxCost) + if !accepted { + p.freeze() + p.Log().Error("Request came too early", "remaining", common.PrettyDuration(time.Duration(bufShort*1000000/p.fcParams.MinRecharge))) + p.fcClient.OneTimeCost(inSizeCost) + return nil, 0 + } + // Create a multi-stage task, estimate the time it takes for the task to + // execute, and cache it in the request service queue. + factor := h.server.costTracker.globalFactor() + if factor < 0.001 { + factor = 1 + p.Log().Error("Invalid global cost factor", "factor", factor) + } + maxTime := uint64(float64(maxCost) / factor) + task := h.server.servingQueue.newTask(p, maxTime, priority) + if !task.start() { + p.fcClient.RequestProcessed(reqID, responseCount, maxCost, inSizeCost) + return nil, 0 + } + return task, maxCost +} + +// Afterhandle will perform a series of operations after message handling, +// such as updating flow control data, sending reply, etc. +func (h *serverHandler) afterHandle(p *clientPeer, reqID, responseCount uint64, msg p2p.Msg, maxCost uint64, reqCnt uint64, task *servingTask, reply *reply) { + if reply != nil { + task.done() + } + p.responseLock.Lock() + defer p.responseLock.Unlock() + + // Short circuit if the client is already frozen. + if p.isFrozen() { + realCost := h.server.costTracker.realCost(task.servingTime, msg.Size, 0) + p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost) + return + } + // Positive correction buffer value with real cost. + var replySize uint32 + if reply != nil { + replySize = reply.size() + } + var realCost uint64 + if h.server.costTracker.testing { + realCost = maxCost // Assign a fake cost for testing purpose + } else { + realCost = h.server.costTracker.realCost(task.servingTime, msg.Size, replySize) + if realCost > maxCost { + realCost = maxCost + } + } + bv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost) + if reply != nil { + // Feed cost tracker request serving statistic. + h.server.costTracker.updateStats(msg.Code, reqCnt, task.servingTime, realCost) + // Reduce priority "balance" for the specific peer. + p.balance.RequestServed(realCost) + p.queueSend(func() { + if err := reply.send(bv); err != nil { + select { + case p.errCh <- err: + default: + } + } + }) + } +} + // handleMsg is invoked whenever an inbound message is received from a remote // peer. The remote connection is torn down upon returning any error. func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { @@ -219,648 +305,52 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { } defer msg.Discard() - var ( - maxCost uint64 - task *servingTask - ) - p.responseCount++ - responseCount := p.responseCount - // accept returns an indicator whether the request can be served. - // If so, deduct the max cost from the flow control buffer. - accept := func(reqID, reqCnt, maxCnt uint64) bool { - // Short circuit if the peer is already frozen or the request is invalid. - inSizeCost := h.server.costTracker.realCost(0, msg.Size, 0) - if p.isFrozen() || reqCnt == 0 || reqCnt > maxCnt { - p.fcClient.OneTimeCost(inSizeCost) - return false - } - // Prepaid max cost units before request been serving. - maxCost = p.fcCosts.getMaxCost(msg.Code, reqCnt) - accepted, bufShort, priority := p.fcClient.AcceptRequest(reqID, responseCount, maxCost) - if !accepted { - p.freeze() - p.Log().Error("Request came too early", "remaining", common.PrettyDuration(time.Duration(bufShort*1000000/p.fcParams.MinRecharge))) - p.fcClient.OneTimeCost(inSizeCost) - return false - } - // Create a multi-stage task, estimate the time it takes for the task to - // execute, and cache it in the request service queue. - factor := h.server.costTracker.globalFactor() - if factor < 0.001 { - factor = 1 - p.Log().Error("Invalid global cost factor", "factor", factor) - } - maxTime := uint64(float64(maxCost) / factor) - task = h.server.servingQueue.newTask(p, maxTime, priority) - if task.start() { - return true - } - p.fcClient.RequestProcessed(reqID, responseCount, maxCost, inSizeCost) - return false + // Lookup the request handler table, ensure it's supported + // message type by the protocol. + req, ok := Les3[msg.Code] + if !ok { + p.Log().Trace("Received invalid message", "code", msg.Code) + clientErrorMeter.Mark(1) + return errResp(ErrInvalidMsgCode, "%v", msg.Code) } - // sendResponse sends back the response and updates the flow control statistic. - sendResponse := func(reqID, amount uint64, reply *reply, servingTime uint64) { - p.responseLock.Lock() - defer p.responseLock.Unlock() + p.Log().Trace("Received " + req.Name) - // Short circuit if the client is already frozen. - if p.isFrozen() { - realCost := h.server.costTracker.realCost(servingTime, msg.Size, 0) - p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost) - return - } - // Positive correction buffer value with real cost. - var replySize uint32 - if reply != nil { - replySize = reply.size() - } - var realCost uint64 - if h.server.costTracker.testing { - realCost = maxCost // Assign a fake cost for testing purpose - } else { - realCost = h.server.costTracker.realCost(servingTime, msg.Size, replySize) - if realCost > maxCost { - realCost = maxCost - } - } - bv := p.fcClient.RequestProcessed(reqID, responseCount, maxCost, realCost) - if amount != 0 { - // Feed cost tracker request serving statistic. - h.server.costTracker.updateStats(msg.Code, amount, servingTime, realCost) - // Reduce priority "balance" for the specific peer. - p.balance.RequestServed(realCost) - } - if reply != nil { - p.queueSend(func() { - if err := reply.send(bv); err != nil { - select { - case p.errCh <- err: - default: - } - } - }) - } + // Decode the p2p message, resolve the concrete handler for it. + serve, reqID, reqCnt, err := req.Handle(msg) + if err != nil { + clientErrorMeter.Mark(1) + return errResp(ErrDecode, "%v: %v", msg, err) } - switch msg.Code { - case GetBlockHeadersMsg: - p.Log().Trace("Received block header request") - if metrics.EnabledExpensive { - miscInHeaderPacketsMeter.Mark(1) - miscInHeaderTrafficMeter.Mark(int64(msg.Size)) - } - var req struct { - ReqID uint64 - Query getBlockHeadersData - } - if err := msg.Decode(&req); err != nil { - clientErrorMeter.Mark(1) - return errResp(ErrDecode, "%v: %v", msg, err) - } - query := req.Query - if accept(req.ReqID, query.Amount, MaxHeaderFetch) { - wg.Add(1) - go func() { - defer wg.Done() - hashMode := query.Origin.Hash != (common.Hash{}) - first := true - maxNonCanonical := uint64(100) - - // Gather headers until the fetch or network limits is reached - var ( - bytes common.StorageSize - headers []*types.Header - unknown bool - ) - for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit { - if !first && !task.waitOrStop() { - sendResponse(req.ReqID, 0, nil, task.servingTime) - return - } - // Retrieve the next header satisfying the query - var origin *types.Header - if hashMode { - if first { - origin = h.blockchain.GetHeaderByHash(query.Origin.Hash) - if origin != nil { - query.Origin.Number = origin.Number.Uint64() - } - } else { - origin = h.blockchain.GetHeader(query.Origin.Hash, query.Origin.Number) - } - } else { - origin = h.blockchain.GetHeaderByNumber(query.Origin.Number) - } - if origin == nil { - break - } - headers = append(headers, origin) - bytes += estHeaderRlpSize - - // Advance to the next header of the query - switch { - case hashMode && query.Reverse: - // Hash based traversal towards the genesis block - ancestor := query.Skip + 1 - if ancestor == 0 { - unknown = true - } else { - query.Origin.Hash, query.Origin.Number = h.blockchain.GetAncestor(query.Origin.Hash, query.Origin.Number, ancestor, &maxNonCanonical) - unknown = query.Origin.Hash == common.Hash{} - } - case hashMode && !query.Reverse: - // Hash based traversal towards the leaf block - var ( - current = origin.Number.Uint64() - next = current + query.Skip + 1 - ) - if next <= current { - infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ") - p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos) - unknown = true - } else { - if header := h.blockchain.GetHeaderByNumber(next); header != nil { - nextHash := header.Hash() - expOldHash, _ := h.blockchain.GetAncestor(nextHash, next, query.Skip+1, &maxNonCanonical) - if expOldHash == query.Origin.Hash { - query.Origin.Hash, query.Origin.Number = nextHash, next - } else { - unknown = true - } - } else { - unknown = true - } - } - case query.Reverse: - // Number based traversal towards the genesis block - if query.Origin.Number >= query.Skip+1 { - query.Origin.Number -= query.Skip + 1 - } else { - unknown = true - } - - case !query.Reverse: - // Number based traversal towards the leaf block - query.Origin.Number += query.Skip + 1 - } - first = false - } - reply := p.replyBlockHeaders(req.ReqID, headers) - sendResponse(req.ReqID, query.Amount, reply, task.done()) - if metrics.EnabledExpensive { - miscOutHeaderPacketsMeter.Mark(1) - miscOutHeaderTrafficMeter.Mark(int64(reply.size())) - miscServingTimeHeaderTimer.Update(time.Duration(task.servingTime)) - } - }() - } - - case GetBlockBodiesMsg: - p.Log().Trace("Received block bodies request") - if metrics.EnabledExpensive { - miscInBodyPacketsMeter.Mark(1) - miscInBodyTrafficMeter.Mark(int64(msg.Size)) - } - var req struct { - ReqID uint64 - Hashes []common.Hash - } - if err := msg.Decode(&req); err != nil { - clientErrorMeter.Mark(1) - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - var ( - bytes int - bodies []rlp.RawValue - ) - reqCnt := len(req.Hashes) - if accept(req.ReqID, uint64(reqCnt), MaxBodyFetch) { - wg.Add(1) - go func() { - defer wg.Done() - for i, hash := range req.Hashes { - if i != 0 && !task.waitOrStop() { - sendResponse(req.ReqID, 0, nil, task.servingTime) - return - } - if bytes >= softResponseLimit { - break - } - body := h.blockchain.GetBodyRLP(hash) - if body == nil { - p.bumpInvalid() - continue - } - bodies = append(bodies, body) - bytes += len(body) - } - reply := p.replyBlockBodiesRLP(req.ReqID, bodies) - sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) - if metrics.EnabledExpensive { - miscOutBodyPacketsMeter.Mark(1) - miscOutBodyTrafficMeter.Mark(int64(reply.size())) - miscServingTimeBodyTimer.Update(time.Duration(task.servingTime)) - } - }() - } - - case GetCodeMsg: - p.Log().Trace("Received code request") - if metrics.EnabledExpensive { - miscInCodePacketsMeter.Mark(1) - miscInCodeTrafficMeter.Mark(int64(msg.Size)) - } - var req struct { - ReqID uint64 - Reqs []CodeReq - } - if err := msg.Decode(&req); err != nil { - clientErrorMeter.Mark(1) - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - var ( - bytes int - data [][]byte - ) - reqCnt := len(req.Reqs) - if accept(req.ReqID, uint64(reqCnt), MaxCodeFetch) { - wg.Add(1) - go func() { - defer wg.Done() - for i, request := range req.Reqs { - if i != 0 && !task.waitOrStop() { - sendResponse(req.ReqID, 0, nil, task.servingTime) - return - } - // Look up the root hash belonging to the request - header := h.blockchain.GetHeaderByHash(request.BHash) - if header == nil { - p.Log().Warn("Failed to retrieve associate header for code", "hash", request.BHash) - p.bumpInvalid() - continue - } - // Refuse to search stale state data in the database since looking for - // a non-exist key is kind of expensive. - local := h.blockchain.CurrentHeader().Number.Uint64() - if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local { - p.Log().Debug("Reject stale code request", "number", header.Number.Uint64(), "head", local) - p.bumpInvalid() - continue - } - triedb := h.blockchain.StateCache().TrieDB() - - account, err := h.getAccount(triedb, header.Root, common.BytesToHash(request.AccKey)) - if err != nil { - p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err) - p.bumpInvalid() - continue - } - code, err := h.blockchain.StateCache().ContractCode(common.BytesToHash(request.AccKey), common.BytesToHash(account.CodeHash)) - if err != nil { - p.Log().Warn("Failed to retrieve account code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "codehash", common.BytesToHash(account.CodeHash), "err", err) - continue - } - // Accumulate the code and abort if enough data was retrieved - data = append(data, code) - if bytes += len(code); bytes >= softResponseLimit { - break - } - } - reply := p.replyCode(req.ReqID, data) - sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) - if metrics.EnabledExpensive { - miscOutCodePacketsMeter.Mark(1) - miscOutCodeTrafficMeter.Mark(int64(reply.size())) - miscServingTimeCodeTimer.Update(time.Duration(task.servingTime)) - } - }() - } - - case GetReceiptsMsg: - p.Log().Trace("Received receipts request") - if metrics.EnabledExpensive { - miscInReceiptPacketsMeter.Mark(1) - miscInReceiptTrafficMeter.Mark(int64(msg.Size)) - } - var req struct { - ReqID uint64 - Hashes []common.Hash - } - if err := msg.Decode(&req); err != nil { - clientErrorMeter.Mark(1) - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - var ( - bytes int - receipts []rlp.RawValue - ) - reqCnt := len(req.Hashes) - if accept(req.ReqID, uint64(reqCnt), MaxReceiptFetch) { - wg.Add(1) - go func() { - defer wg.Done() - for i, hash := range req.Hashes { - if i != 0 && !task.waitOrStop() { - sendResponse(req.ReqID, 0, nil, task.servingTime) - return - } - if bytes >= softResponseLimit { - break - } - // Retrieve the requested block's receipts, skipping if unknown to us - results := h.blockchain.GetReceiptsByHash(hash) - if results == nil { - if header := h.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { - p.bumpInvalid() - continue - } - } - // If known, encode and queue for response packet - if encoded, err := rlp.EncodeToBytes(results); err != nil { - log.Error("Failed to encode receipt", "err", err) - } else { - receipts = append(receipts, encoded) - bytes += len(encoded) - } - } - reply := p.replyReceiptsRLP(req.ReqID, receipts) - sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) - if metrics.EnabledExpensive { - miscOutReceiptPacketsMeter.Mark(1) - miscOutReceiptTrafficMeter.Mark(int64(reply.size())) - miscServingTimeReceiptTimer.Update(time.Duration(task.servingTime)) - } - }() - } - - case GetProofsV2Msg: - p.Log().Trace("Received les/2 proofs request") - if metrics.EnabledExpensive { - miscInTrieProofPacketsMeter.Mark(1) - miscInTrieProofTrafficMeter.Mark(int64(msg.Size)) - } - var req struct { - ReqID uint64 - Reqs []ProofReq - } - if err := msg.Decode(&req); err != nil { - clientErrorMeter.Mark(1) - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Gather state data until the fetch or network limits is reached - var ( - lastBHash common.Hash - root common.Hash - ) - reqCnt := len(req.Reqs) - if accept(req.ReqID, uint64(reqCnt), MaxProofsFetch) { - wg.Add(1) - go func() { - defer wg.Done() - nodes := light.NewNodeSet() - - for i, request := range req.Reqs { - if i != 0 && !task.waitOrStop() { - sendResponse(req.ReqID, 0, nil, task.servingTime) - return - } - // Look up the root hash belonging to the request - var ( - header *types.Header - trie state.Trie - ) - if request.BHash != lastBHash { - root, lastBHash = common.Hash{}, request.BHash - - if header = h.blockchain.GetHeaderByHash(request.BHash); header == nil { - p.Log().Warn("Failed to retrieve header for proof", "hash", request.BHash) - p.bumpInvalid() - continue - } - // Refuse to search stale state data in the database since looking for - // a non-exist key is kind of expensive. - local := h.blockchain.CurrentHeader().Number.Uint64() - if !h.server.archiveMode && header.Number.Uint64()+core.TriesInMemory <= local { - p.Log().Debug("Reject stale trie request", "number", header.Number.Uint64(), "head", local) - p.bumpInvalid() - continue - } - root = header.Root - } - // If a header lookup failed (non existent), ignore subsequent requests for the same header - if root == (common.Hash{}) { - p.bumpInvalid() - continue - } - // Open the account or storage trie for the request - statedb := h.blockchain.StateCache() - - switch len(request.AccKey) { - case 0: - // No account key specified, open an account trie - trie, err = statedb.OpenTrie(root) - if trie == nil || err != nil { - p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", root, "err", err) - continue - } - default: - // Account key specified, open a storage trie - account, err := h.getAccount(statedb.TrieDB(), root, common.BytesToHash(request.AccKey)) - if err != nil { - p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err) - p.bumpInvalid() - continue - } - trie, err = statedb.OpenStorageTrie(common.BytesToHash(request.AccKey), account.Root) - if trie == nil || err != nil { - p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "root", account.Root, "err", err) - continue - } - } - // Prove the user's request from the account or stroage trie - if err := trie.Prove(request.Key, request.FromLevel, nodes); err != nil { - p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err) - continue - } - if nodes.DataSize() >= softResponseLimit { - break - } - } - reply := p.replyProofsV2(req.ReqID, nodes.NodeList()) - sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) - if metrics.EnabledExpensive { - miscOutTrieProofPacketsMeter.Mark(1) - miscOutTrieProofTrafficMeter.Mark(int64(reply.size())) - miscServingTimeTrieProofTimer.Update(time.Duration(task.servingTime)) - } - }() - } + if metrics.EnabledExpensive { + req.InPacketsMeter.Mark(1) + req.InTrafficMeter.Mark(int64(msg.Size)) + } + p.responseCount++ + responseCount := p.responseCount - case GetHelperTrieProofsMsg: - p.Log().Trace("Received helper trie proof request") - if metrics.EnabledExpensive { - miscInHelperTriePacketsMeter.Mark(1) - miscInHelperTrieTrafficMeter.Mark(int64(msg.Size)) - } - var req struct { - ReqID uint64 - Reqs []HelperTrieReq - } - if err := msg.Decode(&req); err != nil { - clientErrorMeter.Mark(1) - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - // Gather state data until the fetch or network limits is reached - var ( - auxBytes int - auxData [][]byte - ) - reqCnt := len(req.Reqs) - if accept(req.ReqID, uint64(reqCnt), MaxHelperTrieProofsFetch) { - wg.Add(1) - go func() { - defer wg.Done() - var ( - lastIdx uint64 - lastType uint - root common.Hash - auxTrie *trie.Trie - ) - nodes := light.NewNodeSet() - for i, request := range req.Reqs { - if i != 0 && !task.waitOrStop() { - sendResponse(req.ReqID, 0, nil, task.servingTime) - return - } - if auxTrie == nil || request.Type != lastType || request.TrieIdx != lastIdx { - auxTrie, lastType, lastIdx = nil, request.Type, request.TrieIdx + // First check this client message complies all rules before + // handling it and return a processor if all checks are passed. + task, maxCost := h.beforeHandle(p, reqID, responseCount, msg, reqCnt, req.MaxCount) + if task == nil { + return nil + } + wg.Add(1) + go func() { + defer wg.Done() - var prefix string - if root, prefix = h.getHelperTrie(request.Type, request.TrieIdx); root != (common.Hash{}) { - auxTrie, _ = trie.New(root, trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix))) - } - } - if request.AuxReq == auxRoot { - var data []byte - if root != (common.Hash{}) { - data = root[:] - } - auxData = append(auxData, data) - auxBytes += len(data) - } else { - if auxTrie != nil { - auxTrie.Prove(request.Key, request.FromLevel, nodes) - } - if request.AuxReq != 0 { - data := h.getAuxiliaryHeaders(request) - auxData = append(auxData, data) - auxBytes += len(data) - } - } - if nodes.DataSize()+auxBytes >= softResponseLimit { - break - } - } - reply := p.replyHelperTrieProofs(req.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData}) - sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) - if metrics.EnabledExpensive { - miscOutHelperTriePacketsMeter.Mark(1) - miscOutHelperTrieTrafficMeter.Mark(int64(reply.size())) - miscServingTimeHelperTrieTimer.Update(time.Duration(task.servingTime)) - } - }() - } + reply := serve(h, p, task.waitOrStop) + h.afterHandle(p, reqID, responseCount, msg, maxCost, reqCnt, task, reply) - case SendTxV2Msg: - p.Log().Trace("Received new transactions") if metrics.EnabledExpensive { - miscInTxsPacketsMeter.Mark(1) - miscInTxsTrafficMeter.Mark(int64(msg.Size)) - } - var req struct { - ReqID uint64 - Txs []*types.Transaction - } - if err := msg.Decode(&req); err != nil { - clientErrorMeter.Mark(1) - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - reqCnt := len(req.Txs) - if accept(req.ReqID, uint64(reqCnt), MaxTxSend) { - wg.Add(1) - go func() { - defer wg.Done() - stats := make([]light.TxStatus, len(req.Txs)) - for i, tx := range req.Txs { - if i != 0 && !task.waitOrStop() { - return - } - hash := tx.Hash() - stats[i] = h.txStatus(hash) - if stats[i].Status == core.TxStatusUnknown { - addFn := h.txpool.AddRemotes - // Add txs synchronously for testing purpose - if h.addTxsSync { - addFn = h.txpool.AddRemotesSync - } - if errs := addFn([]*types.Transaction{tx}); errs[0] != nil { - stats[i].Error = errs[0].Error() - continue - } - stats[i] = h.txStatus(hash) - } - } - reply := p.replyTxStatus(req.ReqID, stats) - sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) - if metrics.EnabledExpensive { - miscOutTxsPacketsMeter.Mark(1) - miscOutTxsTrafficMeter.Mark(int64(reply.size())) - miscServingTimeTxTimer.Update(time.Duration(task.servingTime)) - } - }() - } - - case GetTxStatusMsg: - p.Log().Trace("Received transaction status query request") - if metrics.EnabledExpensive { - miscInTxStatusPacketsMeter.Mark(1) - miscInTxStatusTrafficMeter.Mark(int64(msg.Size)) - } - var req struct { - ReqID uint64 - Hashes []common.Hash - } - if err := msg.Decode(&req); err != nil { - clientErrorMeter.Mark(1) - return errResp(ErrDecode, "msg %v: %v", msg, err) - } - reqCnt := len(req.Hashes) - if accept(req.ReqID, uint64(reqCnt), MaxTxStatus) { - wg.Add(1) - go func() { - defer wg.Done() - stats := make([]light.TxStatus, len(req.Hashes)) - for i, hash := range req.Hashes { - if i != 0 && !task.waitOrStop() { - sendResponse(req.ReqID, 0, nil, task.servingTime) - return - } - stats[i] = h.txStatus(hash) - } - reply := p.replyTxStatus(req.ReqID, stats) - sendResponse(req.ReqID, uint64(reqCnt), reply, task.done()) - if metrics.EnabledExpensive { - miscOutTxStatusPacketsMeter.Mark(1) - miscOutTxStatusTrafficMeter.Mark(int64(reply.size())) - miscServingTimeTxStatusTimer.Update(time.Duration(task.servingTime)) - } - }() + size := uint32(0) + if reply != nil { + size = reply.size() + } + req.OutPacketsMeter.Mark(1) + req.OutTrafficMeter.Mark(int64(size)) + req.ServingTimeMeter.Update(time.Duration(task.servingTime)) } - - default: - p.Log().Trace("Received invalid message", "code", msg.Code) - clientErrorMeter.Mark(1) - return errResp(ErrInvalidMsgCode, "%v", msg.Code) - } + }() // If the client has made too much invalid request(e.g. request a non-existent data), // reject them to prevent SPAM attack. if p.getInvalid() > maxRequestErrors { @@ -870,8 +360,28 @@ func (h *serverHandler) handleMsg(p *clientPeer, wg *sync.WaitGroup) error { return nil } +// BlockChain implements serverBackend +func (h *serverHandler) BlockChain() *core.BlockChain { + return h.blockchain +} + +// TxPool implements serverBackend +func (h *serverHandler) TxPool() *core.TxPool { + return h.txpool +} + +// ArchiveMode implements serverBackend +func (h *serverHandler) ArchiveMode() bool { + return h.server.archiveMode +} + +// AddTxsSync implements serverBackend +func (h *serverHandler) AddTxsSync() bool { + return h.addTxsSync +} + // getAccount retrieves an account from the state based on root. -func (h *serverHandler) getAccount(triedb *trie.Database, root, hash common.Hash) (state.Account, error) { +func getAccount(triedb *trie.Database, root, hash common.Hash) (state.Account, error) { trie, err := trie.New(root, triedb) if err != nil { return state.Account{}, err @@ -888,43 +398,24 @@ func (h *serverHandler) getAccount(triedb *trie.Database, root, hash common.Hash } // getHelperTrie returns the post-processed trie root for the given trie ID and section index -func (h *serverHandler) getHelperTrie(typ uint, index uint64) (common.Hash, string) { +func (h *serverHandler) GetHelperTrie(typ uint, index uint64) *trie.Trie { + var ( + root common.Hash + prefix string + ) switch typ { case htCanonical: sectionHead := rawdb.ReadCanonicalHash(h.chainDb, (index+1)*h.server.iConfig.ChtSize-1) - return light.GetChtRoot(h.chainDb, index, sectionHead), light.ChtTablePrefix + root, prefix = light.GetChtRoot(h.chainDb, index, sectionHead), light.ChtTablePrefix case htBloomBits: sectionHead := rawdb.ReadCanonicalHash(h.chainDb, (index+1)*h.server.iConfig.BloomTrieSize-1) - return light.GetBloomTrieRoot(h.chainDb, index, sectionHead), light.BloomTrieTablePrefix + root, prefix = light.GetBloomTrieRoot(h.chainDb, index, sectionHead), light.BloomTrieTablePrefix } - return common.Hash{}, "" -} - -// getAuxiliaryHeaders returns requested auxiliary headers for the CHT request. -func (h *serverHandler) getAuxiliaryHeaders(req HelperTrieReq) []byte { - if req.Type == htCanonical && req.AuxReq == auxHeader && len(req.Key) == 8 { - blockNum := binary.BigEndian.Uint64(req.Key) - hash := rawdb.ReadCanonicalHash(h.chainDb, blockNum) - return rawdb.ReadHeaderRLP(h.chainDb, hash, blockNum) - } - return nil -} - -// txStatus returns the status of a specified transaction. -func (h *serverHandler) txStatus(hash common.Hash) light.TxStatus { - var stat light.TxStatus - // Looking the transaction in txpool first. - stat.Status = h.txpool.Status([]common.Hash{hash})[0] - - // If the transaction is unknown to the pool, try looking it up locally. - if stat.Status == core.TxStatusUnknown { - lookup := h.blockchain.GetTransactionLookup(hash) - if lookup != nil { - stat.Status = core.TxStatusIncluded - stat.Lookup = lookup - } + if root == (common.Hash{}) { + return nil } - return stat + trie, _ := trie.New(root, trie.NewDatabase(rawdb.NewTable(h.chainDb, prefix))) + return trie } // broadcastLoop broadcasts new block information to all connected light diff --git a/les/server_requests.go b/les/server_requests.go new file mode 100644 index 0000000000..07f30b1b73 --- /dev/null +++ b/les/server_requests.go @@ -0,0 +1,569 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package les + +import ( + "encoding/binary" + "encoding/json" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/light" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" +) + +// serverBackend defines the backend functions needed for serving LES requests +type serverBackend interface { + ArchiveMode() bool + AddTxsSync() bool + BlockChain() *core.BlockChain + TxPool() *core.TxPool + GetHelperTrie(typ uint, index uint64) *trie.Trie +} + +// Decoder is implemented by the messages passed to the handler functions +type Decoder interface { + Decode(val interface{}) error +} + +// RequestType is a static struct that describes an LES request type and references +// its handler function. +type RequestType struct { + Name string + MaxCount uint64 + InPacketsMeter, InTrafficMeter, OutPacketsMeter, OutTrafficMeter metrics.Meter + ServingTimeMeter metrics.Timer + Handle func(msg Decoder) (serve serveRequestFn, reqID, amount uint64, err error) +} + +// serveRequestFn is returned by the request handler functions after decoding the request. +// This function does the actual request serving using the supplied backend. waitOrStop is +// called between serving individual request items and may block if the serving process +// needs to be throttled. If it returns false then the process is terminated. +// The reply is not sent by this function yet. The flow control feedback value is supplied +// by the protocol handler when calling the send function of the returned reply struct. +type serveRequestFn func(backend serverBackend, peer *clientPeer, waitOrStop func() bool) *reply + +// Les3 contains the request types supported by les/2 and les/3 +var Les3 = map[uint64]RequestType{ + GetBlockHeadersMsg: { + Name: "block header request", + MaxCount: MaxHeaderFetch, + InPacketsMeter: miscInHeaderPacketsMeter, + InTrafficMeter: miscInHeaderTrafficMeter, + OutPacketsMeter: miscOutHeaderPacketsMeter, + OutTrafficMeter: miscOutHeaderTrafficMeter, + ServingTimeMeter: miscServingTimeHeaderTimer, + Handle: handleGetBlockHeaders, + }, + GetBlockBodiesMsg: { + Name: "block bodies request", + MaxCount: MaxBodyFetch, + InPacketsMeter: miscInBodyPacketsMeter, + InTrafficMeter: miscInBodyTrafficMeter, + OutPacketsMeter: miscOutBodyPacketsMeter, + OutTrafficMeter: miscOutBodyTrafficMeter, + ServingTimeMeter: miscServingTimeBodyTimer, + Handle: handleGetBlockBodies, + }, + GetCodeMsg: { + Name: "code request", + MaxCount: MaxCodeFetch, + InPacketsMeter: miscInCodePacketsMeter, + InTrafficMeter: miscInCodeTrafficMeter, + OutPacketsMeter: miscOutCodePacketsMeter, + OutTrafficMeter: miscOutCodeTrafficMeter, + ServingTimeMeter: miscServingTimeCodeTimer, + Handle: handleGetCode, + }, + GetReceiptsMsg: { + Name: "receipts request", + MaxCount: MaxReceiptFetch, + InPacketsMeter: miscInReceiptPacketsMeter, + InTrafficMeter: miscInReceiptTrafficMeter, + OutPacketsMeter: miscOutReceiptPacketsMeter, + OutTrafficMeter: miscOutReceiptTrafficMeter, + ServingTimeMeter: miscServingTimeReceiptTimer, + Handle: handleGetReceipts, + }, + GetProofsV2Msg: { + Name: "les/2 proofs request", + MaxCount: MaxProofsFetch, + InPacketsMeter: miscInTrieProofPacketsMeter, + InTrafficMeter: miscInTrieProofTrafficMeter, + OutPacketsMeter: miscOutTrieProofPacketsMeter, + OutTrafficMeter: miscOutTrieProofTrafficMeter, + ServingTimeMeter: miscServingTimeTrieProofTimer, + Handle: handleGetProofs, + }, + GetHelperTrieProofsMsg: { + Name: "helper trie proof request", + MaxCount: MaxHelperTrieProofsFetch, + InPacketsMeter: miscInHelperTriePacketsMeter, + InTrafficMeter: miscInHelperTrieTrafficMeter, + OutPacketsMeter: miscOutHelperTriePacketsMeter, + OutTrafficMeter: miscOutHelperTrieTrafficMeter, + ServingTimeMeter: miscServingTimeHelperTrieTimer, + Handle: handleGetHelperTrieProofs, + }, + SendTxV2Msg: { + Name: "new transactions", + MaxCount: MaxTxSend, + InPacketsMeter: miscInTxsPacketsMeter, + InTrafficMeter: miscInTxsTrafficMeter, + OutPacketsMeter: miscOutTxsPacketsMeter, + OutTrafficMeter: miscOutTxsTrafficMeter, + ServingTimeMeter: miscServingTimeTxTimer, + Handle: handleSendTx, + }, + GetTxStatusMsg: { + Name: "transaction status query request", + MaxCount: MaxTxStatus, + InPacketsMeter: miscInTxStatusPacketsMeter, + InTrafficMeter: miscInTxStatusTrafficMeter, + OutPacketsMeter: miscOutTxStatusPacketsMeter, + OutTrafficMeter: miscOutTxStatusTrafficMeter, + ServingTimeMeter: miscServingTimeTxStatusTimer, + Handle: handleGetTxStatus, + }, +} + +// handleGetBlockHeaders handles a block header request +func handleGetBlockHeaders(msg Decoder) (serveRequestFn, uint64, uint64, error) { + var r GetBlockHeadersPacket + if err := msg.Decode(&r); err != nil { + return nil, 0, 0, err + } + return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply { + // Gather headers until the fetch or network limits is reached + var ( + bc = backend.BlockChain() + hashMode = r.Query.Origin.Hash != (common.Hash{}) + first = true + maxNonCanonical = uint64(100) + bytes common.StorageSize + headers []*types.Header + unknown bool + ) + for !unknown && len(headers) < int(r.Query.Amount) && bytes < softResponseLimit { + if !first && !waitOrStop() { + return nil + } + // Retrieve the next header satisfying the r + var origin *types.Header + if hashMode { + if first { + origin = bc.GetHeaderByHash(r.Query.Origin.Hash) + if origin != nil { + r.Query.Origin.Number = origin.Number.Uint64() + } + } else { + origin = bc.GetHeader(r.Query.Origin.Hash, r.Query.Origin.Number) + } + } else { + origin = bc.GetHeaderByNumber(r.Query.Origin.Number) + } + if origin == nil { + break + } + headers = append(headers, origin) + bytes += estHeaderRlpSize + + // Advance to the next header of the r + switch { + case hashMode && r.Query.Reverse: + // Hash based traversal towards the genesis block + ancestor := r.Query.Skip + 1 + if ancestor == 0 { + unknown = true + } else { + r.Query.Origin.Hash, r.Query.Origin.Number = bc.GetAncestor(r.Query.Origin.Hash, r.Query.Origin.Number, ancestor, &maxNonCanonical) + unknown = r.Query.Origin.Hash == common.Hash{} + } + case hashMode && !r.Query.Reverse: + // Hash based traversal towards the leaf block + var ( + current = origin.Number.Uint64() + next = current + r.Query.Skip + 1 + ) + if next <= current { + infos, _ := json.MarshalIndent(p.Peer.Info(), "", " ") + p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", r.Query.Skip, "next", next, "attacker", infos) + unknown = true + } else { + if header := bc.GetHeaderByNumber(next); header != nil { + nextHash := header.Hash() + expOldHash, _ := bc.GetAncestor(nextHash, next, r.Query.Skip+1, &maxNonCanonical) + if expOldHash == r.Query.Origin.Hash { + r.Query.Origin.Hash, r.Query.Origin.Number = nextHash, next + } else { + unknown = true + } + } else { + unknown = true + } + } + case r.Query.Reverse: + // Number based traversal towards the genesis block + if r.Query.Origin.Number >= r.Query.Skip+1 { + r.Query.Origin.Number -= r.Query.Skip + 1 + } else { + unknown = true + } + + case !r.Query.Reverse: + // Number based traversal towards the leaf block + r.Query.Origin.Number += r.Query.Skip + 1 + } + first = false + } + return p.replyBlockHeaders(r.ReqID, headers) + }, r.ReqID, r.Query.Amount, nil +} + +// handleGetBlockBodies handles a block body request +func handleGetBlockBodies(msg Decoder) (serveRequestFn, uint64, uint64, error) { + var r GetBlockBodiesPacket + if err := msg.Decode(&r); err != nil { + return nil, 0, 0, err + } + return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply { + var ( + bytes int + bodies []rlp.RawValue + ) + bc := backend.BlockChain() + for i, hash := range r.Hashes { + if i != 0 && !waitOrStop() { + return nil + } + if bytes >= softResponseLimit { + break + } + body := bc.GetBodyRLP(hash) + if body == nil { + p.bumpInvalid() + continue + } + bodies = append(bodies, body) + bytes += len(body) + } + return p.replyBlockBodiesRLP(r.ReqID, bodies) + }, r.ReqID, uint64(len(r.Hashes)), nil +} + +// handleGetCode handles a contract code request +func handleGetCode(msg Decoder) (serveRequestFn, uint64, uint64, error) { + var r GetCodePacket + if err := msg.Decode(&r); err != nil { + return nil, 0, 0, err + } + return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply { + var ( + bytes int + data [][]byte + ) + bc := backend.BlockChain() + for i, request := range r.Reqs { + if i != 0 && !waitOrStop() { + return nil + } + // Look up the root hash belonging to the request + header := bc.GetHeaderByHash(request.BHash) + if header == nil { + p.Log().Warn("Failed to retrieve associate header for code", "hash", request.BHash) + p.bumpInvalid() + continue + } + // Refuse to search stale state data in the database since looking for + // a non-exist key is kind of expensive. + local := bc.CurrentHeader().Number.Uint64() + if !backend.ArchiveMode() && header.Number.Uint64()+core.TriesInMemory <= local { + p.Log().Debug("Reject stale code request", "number", header.Number.Uint64(), "head", local) + p.bumpInvalid() + continue + } + triedb := bc.StateCache().TrieDB() + + account, err := getAccount(triedb, header.Root, common.BytesToHash(request.AccKey)) + if err != nil { + p.Log().Warn("Failed to retrieve account for code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err) + p.bumpInvalid() + continue + } + code, err := bc.StateCache().ContractCode(common.BytesToHash(request.AccKey), common.BytesToHash(account.CodeHash)) + if err != nil { + p.Log().Warn("Failed to retrieve account code", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "codehash", common.BytesToHash(account.CodeHash), "err", err) + continue + } + // Accumulate the code and abort if enough data was retrieved + data = append(data, code) + if bytes += len(code); bytes >= softResponseLimit { + break + } + } + return p.replyCode(r.ReqID, data) + }, r.ReqID, uint64(len(r.Reqs)), nil +} + +// handleGetReceipts handles a block receipts request +func handleGetReceipts(msg Decoder) (serveRequestFn, uint64, uint64, error) { + var r GetReceiptsPacket + if err := msg.Decode(&r); err != nil { + return nil, 0, 0, err + } + return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply { + var ( + bytes int + receipts []rlp.RawValue + ) + bc := backend.BlockChain() + for i, hash := range r.Hashes { + if i != 0 && !waitOrStop() { + return nil + } + if bytes >= softResponseLimit { + break + } + // Retrieve the requested block's receipts, skipping if unknown to us + results := bc.GetReceiptsByHash(hash) + if results == nil { + if header := bc.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash { + p.bumpInvalid() + continue + } + } + // If known, encode and queue for response packet + if encoded, err := rlp.EncodeToBytes(results); err != nil { + log.Error("Failed to encode receipt", "err", err) + } else { + receipts = append(receipts, encoded) + bytes += len(encoded) + } + } + return p.replyReceiptsRLP(r.ReqID, receipts) + }, r.ReqID, uint64(len(r.Hashes)), nil +} + +// handleGetProofs handles a proof request +func handleGetProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) { + var r GetProofsPacket + if err := msg.Decode(&r); err != nil { + return nil, 0, 0, err + } + return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply { + var ( + lastBHash common.Hash + root common.Hash + header *types.Header + err error + ) + bc := backend.BlockChain() + nodes := light.NewNodeSet() + + for i, request := range r.Reqs { + if i != 0 && !waitOrStop() { + return nil + } + // Look up the root hash belonging to the request + if request.BHash != lastBHash { + root, lastBHash = common.Hash{}, request.BHash + + if header = bc.GetHeaderByHash(request.BHash); header == nil { + p.Log().Warn("Failed to retrieve header for proof", "hash", request.BHash) + p.bumpInvalid() + continue + } + // Refuse to search stale state data in the database since looking for + // a non-exist key is kind of expensive. + local := bc.CurrentHeader().Number.Uint64() + if !backend.ArchiveMode() && header.Number.Uint64()+core.TriesInMemory <= local { + p.Log().Debug("Reject stale trie request", "number", header.Number.Uint64(), "head", local) + p.bumpInvalid() + continue + } + root = header.Root + } + // If a header lookup failed (non existent), ignore subsequent requests for the same header + if root == (common.Hash{}) { + p.bumpInvalid() + continue + } + // Open the account or storage trie for the request + statedb := bc.StateCache() + + var trie state.Trie + switch len(request.AccKey) { + case 0: + // No account key specified, open an account trie + trie, err = statedb.OpenTrie(root) + if trie == nil || err != nil { + p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "root", root, "err", err) + continue + } + default: + // Account key specified, open a storage trie + account, err := getAccount(statedb.TrieDB(), root, common.BytesToHash(request.AccKey)) + if err != nil { + p.Log().Warn("Failed to retrieve account for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "err", err) + p.bumpInvalid() + continue + } + trie, err = statedb.OpenStorageTrie(common.BytesToHash(request.AccKey), account.Root) + if trie == nil || err != nil { + p.Log().Warn("Failed to open storage trie for proof", "block", header.Number, "hash", header.Hash(), "account", common.BytesToHash(request.AccKey), "root", account.Root, "err", err) + continue + } + } + // Prove the user's request from the account or stroage trie + if err := trie.Prove(request.Key, request.FromLevel, nodes); err != nil { + p.Log().Warn("Failed to prove state request", "block", header.Number, "hash", header.Hash(), "err", err) + continue + } + if nodes.DataSize() >= softResponseLimit { + break + } + } + return p.replyProofsV2(r.ReqID, nodes.NodeList()) + }, r.ReqID, uint64(len(r.Reqs)), nil +} + +// handleGetHelperTrieProofs handles a helper trie proof request +func handleGetHelperTrieProofs(msg Decoder) (serveRequestFn, uint64, uint64, error) { + var r GetHelperTrieProofsPacket + if err := msg.Decode(&r); err != nil { + return nil, 0, 0, err + } + return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply { + var ( + lastIdx uint64 + lastType uint + auxTrie *trie.Trie + auxBytes int + auxData [][]byte + ) + bc := backend.BlockChain() + nodes := light.NewNodeSet() + for i, request := range r.Reqs { + if i != 0 && !waitOrStop() { + return nil + } + if auxTrie == nil || request.Type != lastType || request.TrieIdx != lastIdx { + lastType, lastIdx = request.Type, request.TrieIdx + auxTrie = backend.GetHelperTrie(request.Type, request.TrieIdx) + } + if auxTrie == nil { + return nil + } + // TODO(rjl493456442) short circuit if the proving is failed. + // The original client side code has a dirty hack to retrieve + // the headers with no valid proof. Keep the compatibility for + // legacy les protocol and drop this hack when the les2/3 are + // not supported. + err := auxTrie.Prove(request.Key, request.FromLevel, nodes) + if p.version >= lpv4 && err != nil { + return nil + } + if request.Type == htCanonical && request.AuxReq == htAuxHeader && len(request.Key) == 8 { + header := bc.GetHeaderByNumber(binary.BigEndian.Uint64(request.Key)) + data, err := rlp.EncodeToBytes(header) + if err != nil { + log.Error("Failed to encode header", "err", err) + return nil + } + auxData = append(auxData, data) + auxBytes += len(data) + } + if nodes.DataSize()+auxBytes >= softResponseLimit { + break + } + } + return p.replyHelperTrieProofs(r.ReqID, HelperTrieResps{Proofs: nodes.NodeList(), AuxData: auxData}) + }, r.ReqID, uint64(len(r.Reqs)), nil +} + +// handleSendTx handles a transaction propagation request +func handleSendTx(msg Decoder) (serveRequestFn, uint64, uint64, error) { + var r SendTxPacket + if err := msg.Decode(&r); err != nil { + return nil, 0, 0, err + } + amount := uint64(len(r.Txs)) + return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply { + stats := make([]light.TxStatus, len(r.Txs)) + for i, tx := range r.Txs { + if i != 0 && !waitOrStop() { + return nil + } + hash := tx.Hash() + stats[i] = txStatus(backend, hash) + if stats[i].Status == core.TxStatusUnknown { + addFn := backend.TxPool().AddRemotes + // Add txs synchronously for testing purpose + if backend.AddTxsSync() { + addFn = backend.TxPool().AddRemotesSync + } + if errs := addFn([]*types.Transaction{tx}); errs[0] != nil { + stats[i].Error = errs[0].Error() + continue + } + stats[i] = txStatus(backend, hash) + } + } + return p.replyTxStatus(r.ReqID, stats) + }, r.ReqID, amount, nil +} + +// handleGetTxStatus handles a transaction status query +func handleGetTxStatus(msg Decoder) (serveRequestFn, uint64, uint64, error) { + var r GetTxStatusPacket + if err := msg.Decode(&r); err != nil { + return nil, 0, 0, err + } + return func(backend serverBackend, p *clientPeer, waitOrStop func() bool) *reply { + stats := make([]light.TxStatus, len(r.Hashes)) + for i, hash := range r.Hashes { + if i != 0 && !waitOrStop() { + return nil + } + stats[i] = txStatus(backend, hash) + } + return p.replyTxStatus(r.ReqID, stats) + }, r.ReqID, uint64(len(r.Hashes)), nil +} + +// txStatus returns the status of a specified transaction. +func txStatus(b serverBackend, hash common.Hash) light.TxStatus { + var stat light.TxStatus + // Looking the transaction in txpool first. + stat.Status = b.TxPool().Status([]common.Hash{hash})[0] + + // If the transaction is unknown to the pool, try looking it up locally. + if stat.Status == core.TxStatusUnknown { + lookup := b.BlockChain().GetTransactionLookup(hash) + if lookup != nil { + stat.Status = core.TxStatusIncluded + stat.Lookup = lookup + } + } + return stat +} diff --git a/les/state_accessor.go b/les/state_accessor.go new file mode 100644 index 0000000000..3c9143c875 --- /dev/null +++ b/les/state_accessor.go @@ -0,0 +1,88 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package les + +import ( + "context" + "errors" + "fmt" + + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/light" +) + +// stateAtBlock retrieves the state database associated with a certain block. +func (leth *LightEthereum) stateAtBlock(ctx context.Context, block *types.Block, reexec uint64) (*state.StateDB, func(), error) { + return light.NewState(ctx, block.Header(), leth.odr), func() {}, nil +} + +// statesInRange retrieves a batch of state databases associated with the specific +// block ranges. +func (leth *LightEthereum) statesInRange(ctx context.Context, fromBlock *types.Block, toBlock *types.Block, reexec uint64) ([]*state.StateDB, func(), error) { + var states []*state.StateDB + for number := fromBlock.NumberU64(); number <= toBlock.NumberU64(); number++ { + header, err := leth.blockchain.GetHeaderByNumberOdr(ctx, number) + if err != nil { + return nil, nil, err + } + states = append(states, light.NewState(ctx, header, leth.odr)) + } + return states, nil, nil +} + +// stateAtTransaction returns the execution environment of a certain transaction. +func (leth *LightEthereum) stateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, func(), error) { + // Short circuit if it's genesis block. + if block.NumberU64() == 0 { + return nil, vm.BlockContext{}, nil, nil, errors.New("no transaction in genesis") + } + // Create the parent state database + parent, err := leth.blockchain.GetBlock(ctx, block.ParentHash(), block.NumberU64()-1) + if err != nil { + return nil, vm.BlockContext{}, nil, nil, err + } + statedb, _, err := leth.stateAtBlock(ctx, parent, reexec) + if err != nil { + return nil, vm.BlockContext{}, nil, nil, err + } + if txIndex == 0 && len(block.Transactions()) == 0 { + return nil, vm.BlockContext{}, statedb, func() {}, nil + } + // Recompute transactions up to the target index. + signer := types.MakeSigner(leth.blockchain.Config(), block.Number()) + for idx, tx := range block.Transactions() { + // Assemble the transaction call message and return if the requested offset + msg, _ := tx.AsMessage(signer) + txContext := core.NewEVMTxContext(msg) + context := core.NewEVMBlockContext(block.Header(), leth.blockchain, nil) + if idx == txIndex { + return msg, context, statedb, func() {}, nil + } + // Not yet the searched for transaction, execute on top of the current state + vmenv := vm.NewEVM(context, txContext, statedb, leth.blockchain.Config(), vm.Config{}) + if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + } + // Ensure any modifications are committed to the state + // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect + statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) + } + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) +} diff --git a/les/sync.go b/les/sync.go index d2568d45bc..fa5ef4ff82 100644 --- a/les/sync.go +++ b/les/sync.go @@ -26,6 +26,7 @@ import ( "github.com/ethereum/go-ethereum/eth/downloader" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" ) var errInvalidCheckpoint = errors.New("invalid advertised checkpoint") @@ -56,8 +57,8 @@ func (h *clientHandler) validateCheckpoint(peer *serverPeer) error { defer cancel() // Fetch the block header corresponding to the checkpoint registration. - cp := peer.checkpoint - header, err := light.GetUntrustedHeaderByNumber(ctx, h.backend.odr, peer.checkpointNumber, peer.id) + wrapPeer := &peerConnection{handler: h, peer: peer} + header, err := wrapPeer.RetrieveSingleHeaderByNumber(ctx, peer.checkpointNumber) if err != nil { return err } @@ -66,7 +67,7 @@ func (h *clientHandler) validateCheckpoint(peer *serverPeer) error { if err != nil { return err } - events := h.backend.oracle.Contract().LookupCheckpointEvents(logs, cp.SectionIndex, cp.Hash()) + events := h.backend.oracle.Contract().LookupCheckpointEvents(logs, peer.checkpoint.SectionIndex, peer.checkpoint.Hash()) if len(events) == 0 { return errInvalidCheckpoint } @@ -98,22 +99,33 @@ func (h *clientHandler) synchronise(peer *serverPeer) { if currentTd != nil && peer.Td().Cmp(currentTd) < 0 { return } - // Recap the checkpoint. - // - // The light client may be connected to several different versions of the server. - // (1) Old version server which can not provide stable checkpoint in the handshake packet. - // => Use hardcoded checkpoint or empty checkpoint - // (2) New version server but simple checkpoint syncing is not enabled(e.g. mainnet, new testnet or private network) - // => Use hardcoded checkpoint or empty checkpoint - // (3) New version server but the provided stable checkpoint is even lower than the hardcoded one. - // => Use hardcoded checkpoint + // Recap the checkpoint. The light client may be connected to several different + // versions of the server. + // (1) Old version server which can not provide stable checkpoint in the + // handshake packet. + // => Use local checkpoint or empty checkpoint + // (2) New version server but simple checkpoint syncing is not enabled + // (e.g. mainnet, new testnet or private network) + // => Use local checkpoint or empty checkpoint + // (3) New version server but the provided stable checkpoint is even lower + // than the local one. + // => Use local checkpoint // (4) New version server with valid and higher stable checkpoint // => Use provided checkpoint - var checkpoint = &peer.checkpoint - var hardcoded bool + var ( + local bool + checkpoint = &peer.checkpoint + ) if h.checkpoint != nil && h.checkpoint.SectionIndex >= peer.checkpoint.SectionIndex { - checkpoint = h.checkpoint // Use the hardcoded one. - hardcoded = true + local, checkpoint = true, h.checkpoint + } + // Replace the checkpoint with locally configured one If it's required by + // users. Nil checkpoint means synchronization from the scratch. + if h.backend.config.SyncFromCheckpoint { + local, checkpoint = true, h.backend.config.Checkpoint + if h.backend.config.Checkpoint == nil { + checkpoint = ¶ms.TrustedCheckpoint{} + } } // Determine whether we should run checkpoint syncing or normal light syncing. // @@ -121,7 +133,7 @@ func (h *clientHandler) synchronise(peer *serverPeer) { // // 1. The checkpoint is empty // 2. The latest head block of the local chain is above the checkpoint. - // 3. The checkpoint is hardcoded(recap with local hardcoded checkpoint) + // 3. The checkpoint is local(replaced with local checkpoint) // 4. For some networks the checkpoint syncing is not activated. mode := checkpointSync switch { @@ -131,7 +143,7 @@ func (h *clientHandler) synchronise(peer *serverPeer) { case latest.Number.Uint64() >= (checkpoint.SectionIndex+1)*h.backend.iConfig.ChtSize-1: mode = lightSync log.Debug("Disable checkpoint syncing", "reason", "local chain beyond the checkpoint") - case hardcoded: + case local: mode = legacyCheckpointSync log.Debug("Disable checkpoint syncing", "reason", "checkpoint is hardcoded") case h.backend.oracle == nil || !h.backend.oracle.IsRunning(): @@ -143,12 +155,14 @@ func (h *clientHandler) synchronise(peer *serverPeer) { } log.Debug("Disable checkpoint syncing", "reason", "checkpoint syncing is not activated") } + // Notify testing framework if syncing has completed(for testing purpose). defer func() { - if h.syncDone != nil { - h.syncDone() + if h.syncEnd != nil { + h.syncEnd(h.backend.blockchain.CurrentHeader()) } }() + start := time.Now() if mode == checkpointSync || mode == legacyCheckpointSync { // Validate the advertised checkpoint @@ -177,6 +191,10 @@ func (h *clientHandler) synchronise(peer *serverPeer) { return } } + + if h.syncStart != nil { + h.syncStart(h.backend.blockchain.CurrentHeader()) + } // Fetch the remaining block headers based on the current chain header. if err := h.downloader.Synchronise(peer.id, peer.Head(), peer.Td(), downloader.LightSync); err != nil { log.Debug("Synchronise failed", "reason", err) diff --git a/les/sync_test.go b/les/sync_test.go index ffce4d8df2..d3bb90df02 100644 --- a/les/sync_test.go +++ b/les/sync_test.go @@ -24,21 +24,22 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/params" ) // Test light syncing which will download all headers from genesis. -func TestLightSyncingLes3(t *testing.T) { testCheckpointSyncing(t, 3, 0) } +func TestLightSyncingLes3(t *testing.T) { testCheckpointSyncing(t, lpv3, 0) } // Test legacy checkpoint syncing which will download tail headers // based on a hardcoded checkpoint. -func TestLegacyCheckpointSyncingLes3(t *testing.T) { testCheckpointSyncing(t, 3, 1) } +func TestLegacyCheckpointSyncingLes3(t *testing.T) { testCheckpointSyncing(t, lpv3, 1) } // Test checkpoint syncing which will download tail headers based // on a verified checkpoint. -func TestCheckpointSyncingLes3(t *testing.T) { testCheckpointSyncing(t, 3, 2) } +func TestCheckpointSyncingLes3(t *testing.T) { testCheckpointSyncing(t, lpv3, 2) } func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) { config := light.TestServerIndexerConfig @@ -53,8 +54,14 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) { time.Sleep(10 * time.Millisecond) } } - // Generate 512+4 blocks (totally 1 CHT sections) - server, client, tearDown := newClientServerEnv(t, int(config.ChtSize+config.ChtConfirms), protocol, waitIndexers, nil, 0, false, false, true) + // Generate 128+1 blocks (totally 1 CHT section) + netconfig := testnetConfig{ + blocks: int(config.ChtSize + config.ChtConfirms), + protocol: protocol, + indexFn: waitIndexers, + nopruning: true, + } + server, client, tearDown := newClientServerEnv(t, netconfig) defer tearDown() expected := config.ChtSize + config.ChtConfirms @@ -77,10 +84,11 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) { // Register the assembled checkpoint into oracle. header := server.backend.Blockchain().CurrentHeader() - data := append([]byte{0x19, 0x00}, append(registrarAddr.Bytes(), append([]byte{0, 0, 0, 0, 0, 0, 0, 0}, cp.Hash().Bytes()...)...)...) + data := append([]byte{0x19, 0x00}, append(oracleAddr.Bytes(), append([]byte{0, 0, 0, 0, 0, 0, 0, 0}, cp.Hash().Bytes()...)...)...) sig, _ := crypto.Sign(crypto.Keccak256(data), signerKey) sig[64] += 27 // Transform V from 0/1 to 27/28 according to the yellow paper - if _, err := server.handler.server.oracle.Contract().RegisterCheckpoint(bind.NewKeyedTransactor(signerKey), cp.SectionIndex, cp.Hash().Bytes(), new(big.Int).Sub(header.Number, big.NewInt(1)), header.ParentHash, [][]byte{sig}); err != nil { + auth, _ := bind.NewKeyedTransactorWithChainID(signerKey, big.NewInt(1337)) + if _, err := server.handler.server.oracle.Contract().RegisterCheckpoint(auth, cp.SectionIndex, cp.Hash().Bytes(), new(big.Int).Sub(header.Number, big.NewInt(1)), header.ParentHash, [][]byte{sig}); err != nil { t.Error("register checkpoint failed", err) } server.backend.Commit() @@ -99,8 +107,7 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) { } done := make(chan error) - client.handler.syncDone = func() { - header := client.handler.backend.blockchain.CurrentHeader() + client.handler.syncEnd = func(header *types.Header) { if header.Number.Uint64() == expected { done <- nil } else { @@ -127,10 +134,10 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) { } } -func TestMissOracleBackend(t *testing.T) { testMissOracleBackend(t, true) } -func TestMissOracleBackendNoCheckpoint(t *testing.T) { testMissOracleBackend(t, false) } +func TestMissOracleBackendLES3(t *testing.T) { testMissOracleBackend(t, true, lpv3) } +func TestMissOracleBackendNoCheckpointLES3(t *testing.T) { testMissOracleBackend(t, false, lpv3) } -func testMissOracleBackend(t *testing.T, hasCheckpoint bool) { +func testMissOracleBackend(t *testing.T, hasCheckpoint bool, protocol int) { config := light.TestServerIndexerConfig waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { @@ -143,8 +150,14 @@ func testMissOracleBackend(t *testing.T, hasCheckpoint bool) { time.Sleep(10 * time.Millisecond) } } - // Generate 512+4 blocks (totally 1 CHT sections) - server, client, tearDown := newClientServerEnv(t, int(config.ChtSize+config.ChtConfirms), 3, waitIndexers, nil, 0, false, false, true) + // Generate 128+1 blocks (totally 1 CHT section) + netconfig := testnetConfig{ + blocks: int(config.ChtSize + config.ChtConfirms), + protocol: protocol, + indexFn: waitIndexers, + nopruning: true, + } + server, client, tearDown := newClientServerEnv(t, netconfig) defer tearDown() expected := config.ChtSize + config.ChtConfirms @@ -159,10 +172,11 @@ func testMissOracleBackend(t *testing.T, hasCheckpoint bool) { // Register the assembled checkpoint into oracle. header := server.backend.Blockchain().CurrentHeader() - data := append([]byte{0x19, 0x00}, append(registrarAddr.Bytes(), append([]byte{0, 0, 0, 0, 0, 0, 0, 0}, cp.Hash().Bytes()...)...)...) + data := append([]byte{0x19, 0x00}, append(oracleAddr.Bytes(), append([]byte{0, 0, 0, 0, 0, 0, 0, 0}, cp.Hash().Bytes()...)...)...) sig, _ := crypto.Sign(crypto.Keccak256(data), signerKey) sig[64] += 27 // Transform V from 0/1 to 27/28 according to the yellow paper - if _, err := server.handler.server.oracle.Contract().RegisterCheckpoint(bind.NewKeyedTransactor(signerKey), cp.SectionIndex, cp.Hash().Bytes(), new(big.Int).Sub(header.Number, big.NewInt(1)), header.ParentHash, [][]byte{sig}); err != nil { + auth, _ := bind.NewKeyedTransactorWithChainID(signerKey, big.NewInt(1337)) + if _, err := server.handler.server.oracle.Contract().RegisterCheckpoint(auth, cp.SectionIndex, cp.Hash().Bytes(), new(big.Int).Sub(header.Number, big.NewInt(1)), header.ParentHash, [][]byte{sig}); err != nil { t.Error("register checkpoint failed", err) } server.backend.Commit() @@ -196,8 +210,7 @@ func testMissOracleBackend(t *testing.T, hasCheckpoint bool) { } done := make(chan error) - client.handler.syncDone = func() { - header := client.handler.backend.blockchain.CurrentHeader() + client.handler.syncEnd = func(header *types.Header) { if header.Number.Uint64() == expected { done <- nil } else { @@ -218,3 +231,160 @@ func testMissOracleBackend(t *testing.T, hasCheckpoint bool) { t.Error("checkpoint syncing timeout") } } + +func TestSyncFromConfiguredCheckpointLES3(t *testing.T) { testSyncFromConfiguredCheckpoint(t, lpv3) } + +func testSyncFromConfiguredCheckpoint(t *testing.T, protocol int) { + config := light.TestServerIndexerConfig + + waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { + for { + cs, _, _ := cIndexer.Sections() + bts, _, _ := btIndexer.Sections() + if cs >= 2 && bts >= 2 { + break + } + time.Sleep(10 * time.Millisecond) + } + } + // Generate 256+1 blocks (totally 2 CHT sections) + netconfig := testnetConfig{ + blocks: int(2*config.ChtSize + config.ChtConfirms), + protocol: protocol, + indexFn: waitIndexers, + nopruning: true, + } + server, client, tearDown := newClientServerEnv(t, netconfig) + defer tearDown() + + // Configure the local checkpoint(the first section) + head := server.handler.blockchain.GetHeaderByNumber(config.ChtSize - 1).Hash() + cp := ¶ms.TrustedCheckpoint{ + SectionIndex: 0, + SectionHead: head, + CHTRoot: light.GetChtRoot(server.db, 0, head), + BloomRoot: light.GetBloomTrieRoot(server.db, 0, head), + } + client.handler.backend.config.SyncFromCheckpoint = true + client.handler.backend.config.Checkpoint = cp + client.handler.checkpoint = cp + client.handler.backend.blockchain.AddTrustedCheckpoint(cp) + + var ( + start = make(chan error, 1) + end = make(chan error, 1) + expectStart = config.ChtSize - 1 + expectEnd = 2*config.ChtSize + config.ChtConfirms + ) + client.handler.syncStart = func(header *types.Header) { + if header.Number.Uint64() == expectStart { + start <- nil + } else { + start <- fmt.Errorf("blockchain length mismatch, want %d, got %d", expectStart, header.Number) + } + } + client.handler.syncEnd = func(header *types.Header) { + if header.Number.Uint64() == expectEnd { + end <- nil + } else { + end <- fmt.Errorf("blockchain length mismatch, want %d, got %d", expectEnd, header.Number) + } + } + // Create connected peer pair. + if _, _, err := newTestPeerPair("peer", 2, server.handler, client.handler); err != nil { + t.Fatalf("Failed to connect testing peers %v", err) + } + + select { + case err := <-start: + if err != nil { + t.Error("sync failed", err) + } + return + case <-time.NewTimer(10 * time.Second).C: + t.Error("checkpoint syncing timeout") + } + + select { + case err := <-end: + if err != nil { + t.Error("sync failed", err) + } + return + case <-time.NewTimer(10 * time.Second).C: + t.Error("checkpoint syncing timeout") + } +} + +func TestSyncAll(t *testing.T) { testSyncAll(t, lpv3) } + +func testSyncAll(t *testing.T, protocol int) { + config := light.TestServerIndexerConfig + + waitIndexers := func(cIndexer, bIndexer, btIndexer *core.ChainIndexer) { + for { + cs, _, _ := cIndexer.Sections() + bts, _, _ := btIndexer.Sections() + if cs >= 2 && bts >= 2 { + break + } + time.Sleep(10 * time.Millisecond) + } + } + // Generate 256+1 blocks (totally 2 CHT sections) + netconfig := testnetConfig{ + blocks: int(2*config.ChtSize + config.ChtConfirms), + protocol: protocol, + indexFn: waitIndexers, + nopruning: true, + } + server, client, tearDown := newClientServerEnv(t, netconfig) + defer tearDown() + + client.handler.backend.config.SyncFromCheckpoint = true + + var ( + start = make(chan error, 1) + end = make(chan error, 1) + expectStart = uint64(0) + expectEnd = 2*config.ChtSize + config.ChtConfirms + ) + client.handler.syncStart = func(header *types.Header) { + if header.Number.Uint64() == expectStart { + start <- nil + } else { + start <- fmt.Errorf("blockchain length mismatch, want %d, got %d", expectStart, header.Number) + } + } + client.handler.syncEnd = func(header *types.Header) { + if header.Number.Uint64() == expectEnd { + end <- nil + } else { + end <- fmt.Errorf("blockchain length mismatch, want %d, got %d", expectEnd, header.Number) + } + } + // Create connected peer pair. + if _, _, err := newTestPeerPair("peer", 2, server.handler, client.handler); err != nil { + t.Fatalf("Failed to connect testing peers %v", err) + } + + select { + case err := <-start: + if err != nil { + t.Error("sync failed", err) + } + return + case <-time.NewTimer(10 * time.Second).C: + t.Error("checkpoint syncing timeout") + } + + select { + case err := <-end: + if err != nil { + t.Error("sync failed", err) + } + return + case <-time.NewTimer(10 * time.Second).C: + t.Error("checkpoint syncing timeout") + } +} diff --git a/les/test_helper.go b/les/test_helper.go index 5a8d64f767..39313b1e3b 100644 --- a/les/test_helper.go +++ b/les/test_helper.go @@ -14,8 +14,9 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// This file contains some shares testing functionality, common to multiple -// different files and modules being tested. +// This file contains some shares testing functionality, common to multiple +// different files and modules being tested. Client based network and Server +// based network can be created easily with available APIs. package les @@ -35,10 +36,11 @@ import ( "github.com/ethereum/go-ethereum/consensus/ethash" "github.com/ethereum/go-ethereum/contracts/checkpointoracle/contract" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/forkid" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/les/checkpointoracle" @@ -67,10 +69,10 @@ var ( testEventEmitterCode = common.Hex2Bytes("60606040523415600e57600080fd5b7f57050ab73f6b9ebdd9f76b8d4997793f48cf956e965ee070551b9ca0bb71584e60405160405180910390a160358060476000396000f3006060604052600080fd00a165627a7a723058203f727efcad8b5811f8cb1fc2620ce5e8c63570d697aef968172de296ea3994140029") - // Checkpoint registrar relative - registrarAddr common.Address - signerKey, _ = crypto.GenerateKey() - signerAddr = crypto.PubkeyToAddress(signerKey.PublicKey) + // Checkpoint oracle relative fields + oracleAddr common.Address + signerKey, _ = crypto.GenerateKey() + signerAddr = crypto.PubkeyToAddress(signerKey.PublicKey) ) var ( @@ -111,13 +113,23 @@ func prepare(n int, backend *backends.SimulatedBackend) { for i := 0; i < n; i++ { switch i { case 0: + // Builtin-block + // number: 1 + // txs: 2 + // deploy checkpoint contract - registrarAddr, _, _, _ = contract.DeployCheckpointOracle(bind.NewKeyedTransactor(bankKey), backend, []common.Address{signerAddr}, sectionSize, processConfirms, big.NewInt(1)) + auth, _ := bind.NewKeyedTransactorWithChainID(bankKey, big.NewInt(1337)) + oracleAddr, _, _, _ = contract.DeployCheckpointOracle(auth, backend, []common.Address{signerAddr}, sectionSize, processConfirms, big.NewInt(1)) + // bankUser transfers some ether to user1 nonce, _ := backend.PendingNonceAt(ctx, bankAddr) tx, _ := types.SignTx(types.NewTransaction(nonce, userAddr1, big.NewInt(10000), params.TxGas, nil, nil), signer, bankKey) backend.SendTransaction(ctx, tx) case 1: + // Builtin-block + // number: 2 + // txs: 4 + bankNonce, _ := backend.PendingNonceAt(ctx, bankAddr) userNonce1, _ := backend.PendingNonceAt(ctx, userAddr1) @@ -138,6 +150,10 @@ func prepare(n int, backend *backends.SimulatedBackend) { tx4, _ := types.SignTx(types.NewContractCreation(userNonce1+2, big.NewInt(0), 200000, big.NewInt(0), testEventEmitterCode), signer, userKey1) backend.SendTransaction(ctx, tx4) case 2: + // Builtin-block + // number: 3 + // txs: 2 + // bankUser transfer some ether to signer bankNonce, _ := backend.PendingNonceAt(ctx, bankAddr) tx1, _ := types.SignTx(types.NewTransaction(bankNonce, signerAddr, big.NewInt(1000000000), params.TxGas, nil, nil), signer, bankKey) @@ -148,6 +164,10 @@ func prepare(n int, backend *backends.SimulatedBackend) { tx2, _ := types.SignTx(types.NewTransaction(bankNonce+1, testContractAddr, big.NewInt(0), 100000, nil, data), signer, bankKey) backend.SendTransaction(ctx, tx2) case 3: + // Builtin-block + // number: 4 + // txs: 1 + // invoke test contract bankNonce, _ := backend.PendingNonceAt(ctx, bankAddr) data := common.Hex2Bytes("C16431B900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000002") @@ -162,7 +182,7 @@ func prepare(n int, backend *backends.SimulatedBackend) { func testIndexers(db ethdb.Database, odr light.OdrBackend, config *light.IndexerConfig, disablePruning bool) []*core.ChainIndexer { var indexers [3]*core.ChainIndexer indexers[0] = light.NewChtIndexer(db, odr, config.ChtSize, config.ChtConfirms, disablePruning) - indexers[1] = eth.NewBloomIndexer(db, config.BloomSize, config.BloomConfirms) + indexers[1] = core.NewBloomIndexer(db, config.BloomSize, config.BloomConfirms) indexers[2] = light.NewBloomTrieIndexer(db, odr, config.BloomSize, config.BloomTrieSize, disablePruning) // make bloomTrieIndexer as a child indexer of bloom indexer. indexers[1].AddChildIndexer(indexers[2]) @@ -203,7 +223,7 @@ func newTestClientHandler(backend *backends.SimulatedBackend, odr *LesOdr, index client := &LightEthereum{ lesCommons: lesCommons{ genesis: genesis.Hash(), - config: ð.Config{LightPeers: 100, NetworkId: NetworkId}, + config: ðconfig.Config{LightPeers: 100, NetworkId: NetworkId}, chainConfig: params.AllEthashProtocolChanges, iConfig: light.TestClientIndexerConfig, chainDb: db, @@ -268,7 +288,7 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da server := &LesServer{ lesCommons: lesCommons{ genesis: genesis.Hash(), - config: ð.Config{LightPeers: 100, NetworkId: NetworkId}, + config: ðconfig.Config{LightPeers: 100, NetworkId: NetworkId}, chainConfig: params.AllEthashProtocolChanges, iConfig: light.TestServerIndexerConfig, chainDb: db, @@ -308,44 +328,61 @@ type testPeer struct { app *p2p.MsgPipeRW // Application layer reader/writer to simulate the local side } -// newTestPeer creates a new peer registered at the given protocol manager. -func newTestPeer(t *testing.T, name string, version int, handler *serverHandler, shake bool, testCost uint64) (*testPeer, <-chan error) { - // Create a message pipe to communicate through - app, net := p2p.MsgPipe() - - // Generate a random id and create the peer - var id enode.ID - rand.Read(id[:]) - peer := newClientPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net) +// handshakeWithServer executes the handshake with the remote server peer. +func (p *testPeer) handshakeWithServer(t *testing.T, td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID) { + // It only works for the simulated client peer + if p.cpeer == nil { + t.Fatal("handshake for client peer only") + } + var sendList keyValueList + sendList = sendList.add("protocolVersion", uint64(p.cpeer.version)) + sendList = sendList.add("networkId", uint64(NetworkId)) + sendList = sendList.add("headTd", td) + sendList = sendList.add("headHash", head) + sendList = sendList.add("headNum", headNum) + sendList = sendList.add("genesisHash", genesis) + if p.cpeer.version >= lpv4 { + sendList = sendList.add("forkID", &forkID) + } + if err := p2p.ExpectMsg(p.app, StatusMsg, nil); err != nil { + t.Fatalf("status recv: %v", err) + } + if err := p2p.Send(p.app, StatusMsg, sendList); err != nil { + t.Fatalf("status send: %v", err) + } +} - // Start the peer on a new thread - errCh := make(chan error, 1) - go func() { - select { - case <-handler.closeCh: - errCh <- p2p.DiscQuitting - case errCh <- handler.handle(peer): - } - }() - tp := &testPeer{ - app: app, - net: net, - cpeer: peer, +// handshakeWithClient executes the handshake with the remote client peer. +func (p *testPeer) handshakeWithClient(t *testing.T, td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, forkID forkid.ID, costList RequestCostList, recentTxLookup uint64) { + // It only works for the simulated client peer + if p.speer == nil { + t.Fatal("handshake for server peer only") } - // Execute any implicitly requested handshakes and return - if shake { - // Customize the cost table if required. - if testCost != 0 { - handler.server.costTracker.testCostList = testCostList(testCost) - } - var ( - genesis = handler.blockchain.Genesis() - head = handler.blockchain.CurrentHeader() - td = handler.blockchain.GetTd(head.Hash(), head.Number.Uint64()) - ) - tp.handshake(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), testCostList(testCost)) + var sendList keyValueList + sendList = sendList.add("protocolVersion", uint64(p.speer.version)) + sendList = sendList.add("networkId", uint64(NetworkId)) + sendList = sendList.add("headTd", td) + sendList = sendList.add("headHash", head) + sendList = sendList.add("headNum", headNum) + sendList = sendList.add("genesisHash", genesis) + sendList = sendList.add("serveHeaders", nil) + sendList = sendList.add("serveChainSince", uint64(0)) + sendList = sendList.add("serveStateSince", uint64(0)) + sendList = sendList.add("serveRecentState", uint64(core.TriesInMemory-4)) + sendList = sendList.add("txRelay", nil) + sendList = sendList.add("flowControl/BL", testBufLimit) + sendList = sendList.add("flowControl/MRR", testBufRecharge) + sendList = sendList.add("flowControl/MRC", costList) + if p.speer.version >= lpv4 { + sendList = sendList.add("forkID", &forkID) + sendList = sendList.add("recentTxLookup", recentTxLookup) + } + if err := p2p.ExpectMsg(p.app, StatusMsg, nil); err != nil { + t.Fatalf("status recv: %v", err) + } + if err := p2p.Send(p.app, StatusMsg, sendList); err != nil { + t.Fatalf("status send: %v", err) } - return tp, errCh } // close terminates the local side of the peer, notifying the remote protocol @@ -399,42 +436,9 @@ func newTestPeerPair(name string, version int, server *serverHandler, client *cl return &testPeer{cpeer: peer1, net: net, app: app}, &testPeer{speer: peer2, net: app, app: net}, nil } -// handshake simulates a trivial handshake that expects the same state from the -// remote side as we are simulating locally. -func (p *testPeer) handshake(t *testing.T, td *big.Int, head common.Hash, headNum uint64, genesis common.Hash, costList RequestCostList) { - var expList keyValueList - expList = expList.add("protocolVersion", uint64(p.cpeer.version)) - expList = expList.add("networkId", uint64(NetworkId)) - expList = expList.add("headTd", td) - expList = expList.add("headHash", head) - expList = expList.add("headNum", headNum) - expList = expList.add("genesisHash", genesis) - sendList := make(keyValueList, len(expList)) - copy(sendList, expList) - expList = expList.add("serveHeaders", nil) - expList = expList.add("serveChainSince", uint64(0)) - expList = expList.add("serveStateSince", uint64(0)) - expList = expList.add("serveRecentState", uint64(core.TriesInMemory-4)) - expList = expList.add("txRelay", nil) - expList = expList.add("flowControl/BL", testBufLimit) - expList = expList.add("flowControl/MRR", testBufRecharge) - expList = expList.add("flowControl/MRC", costList) - - if err := p2p.ExpectMsg(p.app, StatusMsg, expList); err != nil { - t.Fatalf("status recv: %v", err) - } - if err := p2p.Send(p.app, StatusMsg, sendList); err != nil { - t.Fatalf("status send: %v", err) - } - p.cpeer.fcParams = flowcontrol.ServerParams{ - BufLimit: testBufLimit, - MinRecharge: testBufRecharge, - } -} - type indexerCallback func(*core.ChainIndexer, *core.ChainIndexer, *core.ChainIndexer) -// testClient represents a client for testing with necessary auxiliary fields. +// testClient represents a client object for testing with necessary auxiliary fields. type testClient struct { clock mclock.Clock db ethdb.Database @@ -446,7 +450,58 @@ type testClient struct { bloomTrieIndexer *core.ChainIndexer } -// testServer represents a server for testing with necessary auxiliary fields. +// newRawPeer creates a new server peer connects to the server and do the handshake. +func (client *testClient) newRawPeer(t *testing.T, name string, version int, recentTxLookup uint64) (*testPeer, func(), <-chan error) { + // Create a message pipe to communicate through + app, net := p2p.MsgPipe() + + // Generate a random id and create the peer + var id enode.ID + rand.Read(id[:]) + peer := newServerPeer(version, NetworkId, false, p2p.NewPeer(id, name, nil), net) + + // Start the peer on a new thread + errCh := make(chan error, 1) + go func() { + select { + case <-client.handler.closeCh: + errCh <- p2p.DiscQuitting + case errCh <- client.handler.handle(peer): + } + }() + tp := &testPeer{ + app: app, + net: net, + speer: peer, + } + var ( + genesis = client.handler.backend.blockchain.Genesis() + head = client.handler.backend.blockchain.CurrentHeader() + td = client.handler.backend.blockchain.GetTd(head.Hash(), head.Number.Uint64()) + ) + forkID := forkid.NewID(client.handler.backend.blockchain.Config(), genesis.Hash(), head.Number.Uint64()) + tp.handshakeWithClient(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID, testCostList(0), recentTxLookup) // disable flow control by default + + // Ensure the connection is established or exits when any error occurs + for { + select { + case <-errCh: + return nil, nil, nil + default: + } + if atomic.LoadUint32(&peer.serving) == 1 { + break + } + time.Sleep(50 * time.Millisecond) + } + closePeer := func() { + tp.speer.close() + tp.close() + } + return tp, closePeer, errCh +} + +// testServer represents a server object for testing with necessary auxiliary fields. type testServer struct { clock mclock.Clock backend *backends.SimulatedBackend @@ -459,89 +514,109 @@ type testServer struct { bloomTrieIndexer *core.ChainIndexer } -func newServerEnv(t *testing.T, blocks int, protocol int, callback indexerCallback, simClock bool, newPeer bool, testCost uint64) (*testServer, func()) { - db := rawdb.NewMemoryDatabase() - indexers := testIndexers(db, nil, light.TestServerIndexerConfig, true) +// newRawPeer creates a new client peer connects to the server and do the handshake. +func (server *testServer) newRawPeer(t *testing.T, name string, version int) (*testPeer, func(), <-chan error) { + // Create a message pipe to communicate through + app, net := p2p.MsgPipe() - var clock mclock.Clock = &mclock.System{} - if simClock { - clock = &mclock.Simulated{} - } - handler, b := newTestServerHandler(blocks, indexers, db, clock) + // Generate a random id and create the peer + var id enode.ID + rand.Read(id[:]) + peer := newClientPeer(version, NetworkId, p2p.NewPeer(id, name, nil), net) - var peer *testPeer - if newPeer { - peer, _ = newTestPeer(t, "peer", protocol, handler, true, testCost) + // Start the peer on a new thread + errCh := make(chan error, 1) + go func() { + select { + case <-server.handler.closeCh: + errCh <- p2p.DiscQuitting + case errCh <- server.handler.handle(peer): + } + }() + tp := &testPeer{ + app: app, + net: net, + cpeer: peer, } + var ( + genesis = server.handler.blockchain.Genesis() + head = server.handler.blockchain.CurrentHeader() + td = server.handler.blockchain.GetTd(head.Hash(), head.Number.Uint64()) + ) + forkID := forkid.NewID(server.handler.blockchain.Config(), genesis.Hash(), head.Number.Uint64()) + tp.handshakeWithServer(t, td, head.Hash(), head.Number.Uint64(), genesis.Hash(), forkID) - cIndexer, bIndexer, btIndexer := indexers[0], indexers[1], indexers[2] - cIndexer.Start(handler.blockchain) - bIndexer.Start(handler.blockchain) - - // Wait until indexers generate enough index data. - if callback != nil { - callback(cIndexer, bIndexer, btIndexer) - } - server := &testServer{ - clock: clock, - backend: b, - db: db, - peer: peer, - handler: handler, - chtIndexer: cIndexer, - bloomIndexer: bIndexer, - bloomTrieIndexer: btIndexer, - } - teardown := func() { - if newPeer { - peer.close() - peer.cpeer.close() - b.Close() + // Ensure the connection is established or exits when any error occurs + for { + select { + case <-errCh: + return nil, nil, nil + default: + } + if atomic.LoadUint32(&peer.serving) == 1 { + break } - cIndexer.Close() - bIndexer.Close() + time.Sleep(50 * time.Millisecond) + } + closePeer := func() { + tp.cpeer.close() + tp.close() } - return server, teardown + return tp, closePeer, errCh } -func newClientServerEnv(t *testing.T, blocks int, protocol int, callback indexerCallback, ulcServers []string, ulcFraction int, simClock bool, connect bool, disablePruning bool) (*testServer, *testClient, func()) { - sdb, cdb := rawdb.NewMemoryDatabase(), rawdb.NewMemoryDatabase() - speers := newServerPeerSet() +// testnetConfig wraps all the configurations for testing network. +type testnetConfig struct { + blocks int + protocol int + indexFn indexerCallback + ulcServers []string + ulcFraction int + simClock bool + connect bool + nopruning bool +} +func newClientServerEnv(t *testing.T, config testnetConfig) (*testServer, *testClient, func()) { + var ( + sdb = rawdb.NewMemoryDatabase() + cdb = rawdb.NewMemoryDatabase() + speers = newServerPeerSet() + ) var clock mclock.Clock = &mclock.System{} - if simClock { + if config.simClock { clock = &mclock.Simulated{} } dist := newRequestDistributor(speers, clock) rm := newRetrieveManager(speers, dist, func() time.Duration { return time.Millisecond * 500 }) - odr := NewLesOdr(cdb, light.TestClientIndexerConfig, rm) + odr := NewLesOdr(cdb, light.TestClientIndexerConfig, speers, rm) sindexers := testIndexers(sdb, nil, light.TestServerIndexerConfig, true) - cIndexers := testIndexers(cdb, odr, light.TestClientIndexerConfig, disablePruning) + cIndexers := testIndexers(cdb, odr, light.TestClientIndexerConfig, config.nopruning) scIndexer, sbIndexer, sbtIndexer := sindexers[0], sindexers[1], sindexers[2] ccIndexer, cbIndexer, cbtIndexer := cIndexers[0], cIndexers[1], cIndexers[2] odr.SetIndexers(ccIndexer, cbIndexer, cbtIndexer) - server, b := newTestServerHandler(blocks, sindexers, sdb, clock) - client := newTestClientHandler(b, odr, cIndexers, cdb, speers, ulcServers, ulcFraction) + server, b := newTestServerHandler(config.blocks, sindexers, sdb, clock) + client := newTestClientHandler(b, odr, cIndexers, cdb, speers, config.ulcServers, config.ulcFraction) scIndexer.Start(server.blockchain) sbIndexer.Start(server.blockchain) ccIndexer.Start(client.backend.blockchain) cbIndexer.Start(client.backend.blockchain) - if callback != nil { - callback(scIndexer, sbIndexer, sbtIndexer) + if config.indexFn != nil { + config.indexFn(scIndexer, sbIndexer, sbtIndexer) } var ( err error speer, cpeer *testPeer ) - if connect { + if config.connect { done := make(chan struct{}) - client.syncDone = func() { close(done) } - cpeer, speer, err = newTestPeerPair("peer", protocol, server, client) + client.syncEnd = func(_ *types.Header) { close(done) } + cpeer, speer, err = newTestPeerPair("peer", config.protocol, server, client) if err != nil { t.Fatalf("Failed to connect testing peers %v", err) } @@ -571,7 +646,7 @@ func newClientServerEnv(t *testing.T, blocks int, protocol int, callback indexer bloomTrieIndexer: cbtIndexer, } teardown := func() { - if connect { + if config.connect { speer.close() cpeer.close() cpeer.cpeer.close() @@ -585,3 +660,7 @@ func newClientServerEnv(t *testing.T, blocks int, protocol int, callback indexer } return s, c, teardown } + +func NewFuzzerPeer(version int) *clientPeer { + return newClientPeer(version, 0, p2p.NewPeer(enode.ID{}, "", nil), nil) +} diff --git a/les/txrelay.go b/les/txrelay.go index 57f2412eba..9d29b2f234 100644 --- a/les/txrelay.go +++ b/les/txrelay.go @@ -25,13 +25,8 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) -type ltrInfo struct { - tx *types.Transaction - sentTo map[*serverPeer]struct{} -} - type lesTxRelay struct { - txSent map[common.Hash]*ltrInfo + txSent map[common.Hash]*types.Transaction txPending map[common.Hash]struct{} peerList []*serverPeer peerStartPos int @@ -43,7 +38,7 @@ type lesTxRelay struct { func newLesTxRelay(ps *serverPeerSet, retriever *retrieveManager) *lesTxRelay { r := &lesTxRelay{ - txSent: make(map[common.Hash]*ltrInfo), + txSent: make(map[common.Hash]*types.Transaction), txPending: make(map[common.Hash]struct{}), retriever: retriever, stop: make(chan struct{}), @@ -80,8 +75,7 @@ func (ltrx *lesTxRelay) unregisterPeer(p *serverPeer) { } } -// send sends a list of transactions to at most a given number of peers at -// once, never resending any particular transaction to the same peer twice +// send sends a list of transactions to at most a given number of peers. func (ltrx *lesTxRelay) send(txs types.Transactions, count int) { sendTo := make(map[*serverPeer]types.Transactions) @@ -92,26 +86,18 @@ func (ltrx *lesTxRelay) send(txs types.Transactions, count int) { for _, tx := range txs { hash := tx.Hash() - ltr, ok := ltrx.txSent[hash] + _, ok := ltrx.txSent[hash] if !ok { - ltr = <rInfo{ - tx: tx, - sentTo: make(map[*serverPeer]struct{}), - } - ltrx.txSent[hash] = ltr + ltrx.txSent[hash] = tx ltrx.txPending[hash] = struct{}{} } - if len(ltrx.peerList) > 0 { cnt := count pos := ltrx.peerStartPos for { peer := ltrx.peerList[pos] - if _, ok := ltr.sentTo[peer]; !ok { - sendTo[peer] = append(sendTo[peer], tx) - ltr.sentTo[peer] = struct{}{} - cnt-- - } + sendTo[peer] = append(sendTo[peer], tx) + cnt-- if cnt == 0 { break // sent it to the desired number of peers } @@ -174,7 +160,7 @@ func (ltrx *lesTxRelay) NewHead(head common.Hash, mined []common.Hash, rollback txs := make(types.Transactions, len(ltrx.txPending)) i := 0 for hash := range ltrx.txPending { - txs[i] = ltrx.txSent[hash].tx + txs[i] = ltrx.txSent[hash] i++ } ltrx.send(txs, 1) diff --git a/les/ulc_test.go b/les/ulc_test.go index 657b13db2c..d7308fa593 100644 --- a/les/ulc_test.go +++ b/les/ulc_test.go @@ -126,7 +126,12 @@ func connect(server *serverHandler, serverId enode.ID, client *clientHandler, pr // newTestServerPeer creates server peer. func newTestServerPeer(t *testing.T, blocks int, protocol int) (*testServer, *enode.Node, func()) { - s, teardown := newServerEnv(t, blocks, protocol, nil, false, false, 0) + netconfig := testnetConfig{ + blocks: blocks, + protocol: protocol, + nopruning: true, + } + s, _, teardown := newClientServerEnv(t, netconfig) key, err := crypto.GenerateKey() if err != nil { t.Fatal("generate key err:", err) @@ -138,6 +143,12 @@ func newTestServerPeer(t *testing.T, blocks int, protocol int) (*testServer, *en // newTestLightPeer creates node with light sync mode func newTestLightPeer(t *testing.T, protocol int, ulcServers []string, ulcFraction int) (*testClient, func()) { - _, c, teardown := newClientServerEnv(t, 0, protocol, nil, ulcServers, ulcFraction, false, false, true) + netconfig := testnetConfig{ + protocol: protocol, + ulcServers: ulcServers, + ulcFraction: ulcFraction, + nopruning: true, + } + _, c, teardown := newClientServerEnv(t, netconfig) return c, teardown } diff --git a/les/utils/expiredvalue.go b/les/utils/expiredvalue.go index 55e82cee48..3fd52616fa 100644 --- a/les/utils/expiredvalue.go +++ b/les/utils/expiredvalue.go @@ -86,10 +86,15 @@ func (e *ExpiredValue) Add(amount int64, logOffset Fixed64) int64 { e.Exp = integer } if base >= 0 || uint64(-base) <= e.Base { - // This is a temporary fix to circumvent a golang - // uint conversion issue on arm64, which needs to - // be investigated further. FIXME - e.Base = uint64(int64(e.Base) + int64(base)) + // The conversion from negative float64 to + // uint64 is undefined in golang, and doesn't + // work with ARMv8. More details at: + // https://github.com/golang/go/issues/43047 + if base >= 0 { + e.Base += uint64(base) + } else { + e.Base -= uint64(-base) + } return amount } net := int64(-float64(e.Base) / factor) diff --git a/les/utils/limiter.go b/les/utils/limiter.go new file mode 100644 index 0000000000..0cc2d7b262 --- /dev/null +++ b/les/utils/limiter.go @@ -0,0 +1,405 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package utils + +import ( + "sort" + "sync" + + "github.com/ethereum/go-ethereum/p2p/enode" +) + +const maxSelectionWeight = 1000000000 // maximum selection weight of each individual node/address group + +// Limiter protects a network request serving mechanism from denial-of-service attacks. +// It limits the total amount of resources used for serving requests while ensuring that +// the most valuable connections always have a reasonable chance of being served. +type Limiter struct { + lock sync.Mutex + cond *sync.Cond + quit bool + + nodes map[enode.ID]*nodeQueue + addresses map[string]*addressGroup + addressSelect, valueSelect *WeightedRandomSelect + maxValue float64 + maxCost, sumCost, sumCostLimit uint + selectAddressNext bool +} + +// nodeQueue represents queued requests coming from a single node ID +type nodeQueue struct { + queue []request // always nil if penaltyCost != 0 + id enode.ID + address string + value float64 + flatWeight, valueWeight uint64 // current selection weights in the address/value selectors + sumCost uint // summed cost of requests queued by the node + penaltyCost uint // cumulative cost of dropped requests since last processed request + groupIndex int +} + +// addressGroup is a group of node IDs that have sent their last requests from the same +// network address +type addressGroup struct { + nodes []*nodeQueue + nodeSelect *WeightedRandomSelect + sumFlatWeight, groupWeight uint64 +} + +// request represents an incoming request scheduled for processing +type request struct { + process chan chan struct{} + cost uint +} + +// flatWeight distributes weights equally between each active network address +func flatWeight(item interface{}) uint64 { return item.(*nodeQueue).flatWeight } + +// add adds the node queue to the address group. It is the caller's responsibility to +// add the address group to the address map and the address selector if it wasn't +// there before. +func (ag *addressGroup) add(nq *nodeQueue) { + if nq.groupIndex != -1 { + panic("added node queue is already in an address group") + } + l := len(ag.nodes) + nq.groupIndex = l + ag.nodes = append(ag.nodes, nq) + ag.sumFlatWeight += nq.flatWeight + ag.groupWeight = ag.sumFlatWeight / uint64(l+1) + ag.nodeSelect.Update(ag.nodes[l]) +} + +// update updates the selection weight of the node queue inside the address group. +// It is the caller's responsibility to update the group's selection weight in the +// address selector. +func (ag *addressGroup) update(nq *nodeQueue, weight uint64) { + if nq.groupIndex == -1 || nq.groupIndex >= len(ag.nodes) || ag.nodes[nq.groupIndex] != nq { + panic("updated node queue is not in this address group") + } + ag.sumFlatWeight += weight - nq.flatWeight + nq.flatWeight = weight + ag.groupWeight = ag.sumFlatWeight / uint64(len(ag.nodes)) + ag.nodeSelect.Update(nq) +} + +// remove removes the node queue from the address group. It is the caller's responsibility +// to remove the address group from the address map if it is empty. +func (ag *addressGroup) remove(nq *nodeQueue) { + if nq.groupIndex == -1 || nq.groupIndex >= len(ag.nodes) || ag.nodes[nq.groupIndex] != nq { + panic("removed node queue is not in this address group") + } + + l := len(ag.nodes) - 1 + if nq.groupIndex != l { + ag.nodes[nq.groupIndex] = ag.nodes[l] + ag.nodes[nq.groupIndex].groupIndex = nq.groupIndex + } + nq.groupIndex = -1 + ag.nodes = ag.nodes[:l] + ag.sumFlatWeight -= nq.flatWeight + if l >= 1 { + ag.groupWeight = ag.sumFlatWeight / uint64(l) + } else { + ag.groupWeight = 0 + } + ag.nodeSelect.Remove(nq) +} + +// choose selects one of the node queues belonging to the address group +func (ag *addressGroup) choose() *nodeQueue { + return ag.nodeSelect.Choose().(*nodeQueue) +} + +// NewLimiter creates a new Limiter +func NewLimiter(sumCostLimit uint) *Limiter { + l := &Limiter{ + addressSelect: NewWeightedRandomSelect(func(item interface{}) uint64 { return item.(*addressGroup).groupWeight }), + valueSelect: NewWeightedRandomSelect(func(item interface{}) uint64 { return item.(*nodeQueue).valueWeight }), + nodes: make(map[enode.ID]*nodeQueue), + addresses: make(map[string]*addressGroup), + sumCostLimit: sumCostLimit, + } + l.cond = sync.NewCond(&l.lock) + go l.processLoop() + return l +} + +// selectionWeights calculates the selection weights of a node for both the address and +// the value selector. The selection weight depends on the next request cost or the +// summed cost of recently dropped requests. +func (l *Limiter) selectionWeights(reqCost uint, value float64) (flatWeight, valueWeight uint64) { + if value > l.maxValue { + l.maxValue = value + } + if value > 0 { + // normalize value to <= 1 + value /= l.maxValue + } + if reqCost > l.maxCost { + l.maxCost = reqCost + } + relCost := float64(reqCost) / float64(l.maxCost) + var f float64 + if relCost <= 0.001 { + f = 1 + } else { + f = 0.001 / relCost + } + f *= maxSelectionWeight + flatWeight, valueWeight = uint64(f), uint64(f*value) + if flatWeight == 0 { + flatWeight = 1 + } + return +} + +// Add adds a new request to the node queue belonging to the given id. Value belongs +// to the requesting node. A higher value gives the request a higher chance of being +// served quickly in case of heavy load or a DDoS attack. Cost is a rough estimate +// of the serving cost of the request. A lower cost also gives the request a +// better chance. +func (l *Limiter) Add(id enode.ID, address string, value float64, reqCost uint) chan chan struct{} { + l.lock.Lock() + defer l.lock.Unlock() + + process := make(chan chan struct{}, 1) + if l.quit { + close(process) + return process + } + if reqCost == 0 { + reqCost = 1 + } + if nq, ok := l.nodes[id]; ok { + if nq.queue != nil { + nq.queue = append(nq.queue, request{process, reqCost}) + nq.sumCost += reqCost + nq.value = value + if address != nq.address { + // known id sending request from a new address, move to different address group + l.removeFromGroup(nq) + l.addToGroup(nq, address) + } + } else { + // already waiting on a penalty, just add to the penalty cost and drop the request + nq.penaltyCost += reqCost + l.update(nq) + close(process) + return process + } + } else { + nq := &nodeQueue{ + queue: []request{{process, reqCost}}, + id: id, + value: value, + sumCost: reqCost, + groupIndex: -1, + } + nq.flatWeight, nq.valueWeight = l.selectionWeights(reqCost, value) + if len(l.nodes) == 0 { + l.cond.Signal() + } + l.nodes[id] = nq + if nq.valueWeight != 0 { + l.valueSelect.Update(nq) + } + l.addToGroup(nq, address) + } + l.sumCost += reqCost + if l.sumCost > l.sumCostLimit { + l.dropRequests() + } + return process +} + +// update updates the selection weights of the node queue +func (l *Limiter) update(nq *nodeQueue) { + var cost uint + if nq.queue != nil { + cost = nq.queue[0].cost + } else { + cost = nq.penaltyCost + } + flatWeight, valueWeight := l.selectionWeights(cost, nq.value) + ag := l.addresses[nq.address] + ag.update(nq, flatWeight) + l.addressSelect.Update(ag) + nq.valueWeight = valueWeight + l.valueSelect.Update(nq) +} + +// addToGroup adds the node queue to the given address group. The group is created if +// it does not exist yet. +func (l *Limiter) addToGroup(nq *nodeQueue, address string) { + nq.address = address + ag := l.addresses[address] + if ag == nil { + ag = &addressGroup{nodeSelect: NewWeightedRandomSelect(flatWeight)} + l.addresses[address] = ag + } + ag.add(nq) + l.addressSelect.Update(ag) +} + +// removeFromGroup removes the node queue from its address group +func (l *Limiter) removeFromGroup(nq *nodeQueue) { + ag := l.addresses[nq.address] + ag.remove(nq) + if len(ag.nodes) == 0 { + delete(l.addresses, nq.address) + } + l.addressSelect.Update(ag) +} + +// remove removes the node queue from its address group, the nodes map and the value +// selector +func (l *Limiter) remove(nq *nodeQueue) { + l.removeFromGroup(nq) + if nq.valueWeight != 0 { + l.valueSelect.Remove(nq) + } + delete(l.nodes, nq.id) +} + +// choose selects the next node queue to process. +func (l *Limiter) choose() *nodeQueue { + if l.valueSelect.IsEmpty() || l.selectAddressNext { + if ag, ok := l.addressSelect.Choose().(*addressGroup); ok { + l.selectAddressNext = false + return ag.choose() + } + } + nq, _ := l.valueSelect.Choose().(*nodeQueue) + l.selectAddressNext = true + return nq +} + +// processLoop processes requests sequentially +func (l *Limiter) processLoop() { + l.lock.Lock() + defer l.lock.Unlock() + + for { + if l.quit { + for _, nq := range l.nodes { + for _, request := range nq.queue { + close(request.process) + } + } + return + } + nq := l.choose() + if nq == nil { + l.cond.Wait() + continue + } + if nq.queue != nil { + request := nq.queue[0] + nq.queue = nq.queue[1:] + nq.sumCost -= request.cost + l.sumCost -= request.cost + l.lock.Unlock() + ch := make(chan struct{}) + request.process <- ch + <-ch + l.lock.Lock() + if len(nq.queue) > 0 { + l.update(nq) + } else { + l.remove(nq) + } + } else { + // penalized queue removed, next request will be added to a clean queue + l.remove(nq) + } + } +} + +// Stop stops the processing loop. All queued and future requests are rejected. +func (l *Limiter) Stop() { + l.lock.Lock() + defer l.lock.Unlock() + + l.quit = true + l.cond.Signal() +} + +type ( + dropList []dropListItem + dropListItem struct { + nq *nodeQueue + priority float64 + } +) + +func (l dropList) Len() int { + return len(l) +} + +func (l dropList) Less(i, j int) bool { + return l[i].priority < l[j].priority +} + +func (l dropList) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// dropRequests selects the nodes with the highest queued request cost to selection +// weight ratio and drops their queued request. The empty node queues stay in the +// selectors with a low selection weight in order to penalize these nodes. +func (l *Limiter) dropRequests() { + var ( + sumValue float64 + list dropList + ) + for _, nq := range l.nodes { + sumValue += nq.value + } + for _, nq := range l.nodes { + if nq.sumCost == 0 { + continue + } + w := 1 / float64(len(l.addresses)*len(l.addresses[nq.address].nodes)) + if sumValue > 0 { + w += nq.value / sumValue + } + list = append(list, dropListItem{ + nq: nq, + priority: w / float64(nq.sumCost), + }) + } + sort.Sort(list) + for _, item := range list { + for _, request := range item.nq.queue { + close(request.process) + } + // make the queue penalized; no more requests are accepted until the node is + // selected based on the penalty cost which is the cumulative cost of all dropped + // requests. This ensures that sending excess requests is always penalized + // and incentivizes the sender to stop for a while if no replies are received. + item.nq.queue = nil + item.nq.penaltyCost = item.nq.sumCost + l.sumCost -= item.nq.sumCost // penalty costs are not counted in sumCost + item.nq.sumCost = 0 + l.update(item.nq) + if l.sumCost <= l.sumCostLimit/2 { + return + } + } +} diff --git a/les/utils/limiter_test.go b/les/utils/limiter_test.go new file mode 100644 index 0000000000..43af3309ab --- /dev/null +++ b/les/utils/limiter_test.go @@ -0,0 +1,206 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package utils + +import ( + "math/rand" + "testing" + + "github.com/ethereum/go-ethereum/p2p/enode" +) + +const ( + ltTolerance = 0.03 + ltRounds = 7 +) + +type ( + ltNode struct { + addr, id int + value, exp float64 + cost uint + reqRate float64 + reqMax, runCount int + lastTotalCost uint + + served, dropped int + } + + ltResult struct { + node *ltNode + ch chan struct{} + } + + limTest struct { + limiter *Limiter + results chan ltResult + runCount int + expCost, totalCost uint + } +) + +func (lt *limTest) request(n *ltNode) { + var ( + address string + id enode.ID + ) + if n.addr >= 0 { + address = string([]byte{byte(n.addr)}) + } else { + var b [32]byte + rand.Read(b[:]) + address = string(b[:]) + } + if n.id >= 0 { + id = enode.ID{byte(n.id)} + } else { + rand.Read(id[:]) + } + lt.runCount++ + n.runCount++ + cch := lt.limiter.Add(id, address, n.value, n.cost) + go func() { + lt.results <- ltResult{n, <-cch} + }() +} + +func (lt *limTest) moreRequests(n *ltNode) { + maxStart := int(float64(lt.totalCost-n.lastTotalCost) * n.reqRate) + if maxStart != 0 { + n.lastTotalCost = lt.totalCost + } + for n.reqMax > n.runCount && maxStart > 0 { + lt.request(n) + maxStart-- + } +} + +func (lt *limTest) process() { + res := <-lt.results + lt.runCount-- + res.node.runCount-- + if res.ch != nil { + res.node.served++ + if res.node.exp != 0 { + lt.expCost += res.node.cost + } + lt.totalCost += res.node.cost + close(res.ch) + } else { + res.node.dropped++ + } +} + +func TestLimiter(t *testing.T) { + limTests := [][]*ltNode{ + { // one id from an individual address and two ids from a shared address + {addr: 0, id: 0, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.5}, + {addr: 1, id: 1, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25}, + {addr: 1, id: 2, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25}, + }, + { // varying request costs + {addr: 0, id: 0, value: 0, cost: 10, reqRate: 0.2, reqMax: 1, exp: 0.5}, + {addr: 1, id: 1, value: 0, cost: 3, reqRate: 0.5, reqMax: 1, exp: 0.25}, + {addr: 1, id: 2, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25}, + }, + { // different request rate + {addr: 0, id: 0, value: 0, cost: 1, reqRate: 2, reqMax: 2, exp: 0.5}, + {addr: 1, id: 1, value: 0, cost: 1, reqRate: 10, reqMax: 10, exp: 0.25}, + {addr: 1, id: 2, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25}, + }, + { // adding value + {addr: 0, id: 0, value: 3, cost: 1, reqRate: 1, reqMax: 1, exp: (0.5 + 0.3) / 2}, + {addr: 1, id: 1, value: 0, cost: 1, reqRate: 1, reqMax: 1, exp: 0.25 / 2}, + {addr: 1, id: 2, value: 7, cost: 1, reqRate: 1, reqMax: 1, exp: (0.25 + 0.7) / 2}, + }, + { // DoS attack from a single address with a single id + {addr: 0, id: 0, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333}, + {addr: 1, id: 1, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333}, + {addr: 2, id: 2, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333}, + {addr: 3, id: 3, value: 0, cost: 1, reqRate: 10, reqMax: 1000000000, exp: 0}, + }, + { // DoS attack from a single address with different ids + {addr: 0, id: 0, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333}, + {addr: 1, id: 1, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333}, + {addr: 2, id: 2, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333}, + {addr: 3, id: -1, value: 0, cost: 1, reqRate: 1, reqMax: 1000000000, exp: 0}, + }, + { // DDoS attack from different addresses with a single id + {addr: 0, id: 0, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333}, + {addr: 1, id: 1, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333}, + {addr: 2, id: 2, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333}, + {addr: -1, id: 3, value: 0, cost: 1, reqRate: 1, reqMax: 1000000000, exp: 0}, + }, + { // DDoS attack from different addresses with different ids + {addr: 0, id: 0, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333}, + {addr: 1, id: 1, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333}, + {addr: 2, id: 2, value: 1, cost: 1, reqRate: 1, reqMax: 1, exp: 0.3333}, + {addr: -1, id: -1, value: 0, cost: 1, reqRate: 1, reqMax: 1000000000, exp: 0}, + }, + } + + lt := &limTest{ + limiter: NewLimiter(100), + results: make(chan ltResult), + } + for _, test := range limTests { + lt.expCost, lt.totalCost = 0, 0 + iterCount := 10000 + for j := 0; j < ltRounds; j++ { + // try to reach expected target range in multiple rounds with increasing iteration counts + last := j == ltRounds-1 + for _, n := range test { + lt.request(n) + } + for i := 0; i < iterCount; i++ { + lt.process() + for _, n := range test { + lt.moreRequests(n) + } + } + for lt.runCount > 0 { + lt.process() + } + if spamRatio := 1 - float64(lt.expCost)/float64(lt.totalCost); spamRatio > 0.5*(1+ltTolerance) { + t.Errorf("Spam ratio too high (%f)", spamRatio) + } + fail, success := false, true + for _, n := range test { + if n.exp != 0 { + if n.dropped > 0 { + t.Errorf("Dropped %d requests of non-spam node", n.dropped) + fail = true + } + r := float64(n.served) * float64(n.cost) / float64(lt.expCost) + if r < n.exp*(1-ltTolerance) || r > n.exp*(1+ltTolerance) { + if last { + // print error only if the target is still not reached in the last round + t.Errorf("Request ratio (%f) does not match expected value (%f)", r, n.exp) + } + success = false + } + } + } + if fail || success { + break + } + // neither failed nor succeeded; try more iterations to reach probability targets + iterCount *= 2 + } + } + lt.limiter.Stop() +} diff --git a/les/utils/weighted_select.go b/les/utils/weighted_select.go index d6db3c0e65..486b00820a 100644 --- a/les/utils/weighted_select.go +++ b/les/utils/weighted_select.go @@ -17,7 +17,10 @@ package utils import ( + "math" "math/rand" + + "github.com/ethereum/go-ethereum/log" ) type ( @@ -49,11 +52,19 @@ func (w *WeightedRandomSelect) Remove(item WrsItem) { // IsEmpty returns true if the set is empty func (w *WeightedRandomSelect) IsEmpty() bool { - return w.root.sumWeight == 0 + return w.root.sumCost == 0 } // setWeight sets an item's weight to a specific value (removes it if zero) func (w *WeightedRandomSelect) setWeight(item WrsItem, weight uint64) { + if weight > math.MaxInt64-w.root.sumCost { + // old weight is still included in sumCost, remove and check again + w.setWeight(item, 0) + if weight > math.MaxInt64-w.root.sumCost { + log.Error("WeightedRandomSelect overflow", "sumCost", w.root.sumCost, "new weight", weight) + weight = math.MaxInt64 - w.root.sumCost + } + } idx, ok := w.idx[item] if ok { w.root.setWeight(idx, weight) @@ -64,9 +75,9 @@ func (w *WeightedRandomSelect) setWeight(item WrsItem, weight uint64) { if weight != 0 { if w.root.itemCnt == w.root.maxItems { // add a new level - newRoot := &wrsNode{sumWeight: w.root.sumWeight, itemCnt: w.root.itemCnt, level: w.root.level + 1, maxItems: w.root.maxItems * wrsBranches} + newRoot := &wrsNode{sumCost: w.root.sumCost, itemCnt: w.root.itemCnt, level: w.root.level + 1, maxItems: w.root.maxItems * wrsBranches} newRoot.items[0] = w.root - newRoot.weights[0] = w.root.sumWeight + newRoot.weights[0] = w.root.sumCost w.root = newRoot } w.idx[item] = w.root.insert(item, weight) @@ -80,10 +91,10 @@ func (w *WeightedRandomSelect) setWeight(item WrsItem, weight uint64) { // updates its weight and selects another one func (w *WeightedRandomSelect) Choose() WrsItem { for { - if w.root.sumWeight == 0 { + if w.root.sumCost == 0 { return nil } - val := uint64(rand.Int63n(int64(w.root.sumWeight))) + val := uint64(rand.Int63n(int64(w.root.sumCost))) choice, lastWeight := w.root.choose(val) weight := w.wfn(choice) if weight != lastWeight { @@ -101,7 +112,7 @@ const wrsBranches = 8 // max number of branches in the wrsNode tree type wrsNode struct { items [wrsBranches]interface{} weights [wrsBranches]uint64 - sumWeight uint64 + sumCost uint64 level, itemCnt, maxItems int } @@ -115,7 +126,7 @@ func (n *wrsNode) insert(item WrsItem, weight uint64) int { } } n.itemCnt++ - n.sumWeight += weight + n.sumCost += weight n.weights[branch] += weight if n.level == 0 { n.items[branch] = item @@ -139,7 +150,7 @@ func (n *wrsNode) setWeight(idx int, weight uint64) uint64 { oldWeight := n.weights[idx] n.weights[idx] = weight diff := weight - oldWeight - n.sumWeight += diff + n.sumCost += diff if weight == 0 { n.items[idx] = nil n.itemCnt-- @@ -150,7 +161,7 @@ func (n *wrsNode) setWeight(idx int, weight uint64) uint64 { branch := idx / branchItems diff := n.items[branch].(*wrsNode).setWeight(idx-branch*branchItems, weight) n.weights[branch] += diff - n.sumWeight += diff + n.sumCost += diff if weight == 0 { n.itemCnt-- } diff --git a/les/lespay/client/api.go b/les/vflux/client/api.go similarity index 98% rename from les/lespay/client/api.go rename to les/vflux/client/api.go index 5ad6ffd77e..135273ef96 100644 --- a/les/lespay/client/api.go +++ b/les/vflux/client/api.go @@ -24,7 +24,7 @@ import ( "github.com/ethereum/go-ethereum/p2p/enode" ) -// PrivateClientAPI implements the lespay client side API +// PrivateClientAPI implements the vflux client side API type PrivateClientAPI struct { vt *ValueTracker } diff --git a/les/lespay/client/fillset.go b/les/vflux/client/fillset.go similarity index 100% rename from les/lespay/client/fillset.go rename to les/vflux/client/fillset.go diff --git a/les/lespay/client/fillset_test.go b/les/vflux/client/fillset_test.go similarity index 100% rename from les/lespay/client/fillset_test.go rename to les/vflux/client/fillset_test.go diff --git a/les/lespay/client/queueiterator.go b/les/vflux/client/queueiterator.go similarity index 100% rename from les/lespay/client/queueiterator.go rename to les/vflux/client/queueiterator.go diff --git a/les/lespay/client/queueiterator_test.go b/les/vflux/client/queueiterator_test.go similarity index 93% rename from les/lespay/client/queueiterator_test.go rename to les/vflux/client/queueiterator_test.go index a74301c7d3..400d978e19 100644 --- a/les/lespay/client/queueiterator_test.go +++ b/les/vflux/client/queueiterator_test.go @@ -26,17 +26,6 @@ import ( "github.com/ethereum/go-ethereum/p2p/nodestate" ) -func testNodeID(i int) enode.ID { - return enode.ID{42, byte(i % 256), byte(i / 256)} -} - -func testNodeIndex(id enode.ID) int { - if id[0] != 42 { - return -1 - } - return int(id[1]) + int(id[2])*256 -} - func testNode(i int) *enode.Node { return enode.SignNull(new(enr.Record), testNodeID(i)) } diff --git a/les/lespay/client/requestbasket.go b/les/vflux/client/requestbasket.go similarity index 100% rename from les/lespay/client/requestbasket.go rename to les/vflux/client/requestbasket.go diff --git a/les/lespay/client/requestbasket_test.go b/les/vflux/client/requestbasket_test.go similarity index 100% rename from les/lespay/client/requestbasket_test.go rename to les/vflux/client/requestbasket_test.go diff --git a/les/serverpool.go b/les/vflux/client/serverpool.go similarity index 66% rename from les/serverpool.go rename to les/vflux/client/serverpool.go index 9bfa0bd725..e73b277ced 100644 --- a/les/serverpool.go +++ b/les/vflux/client/serverpool.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package les +package client import ( "errors" @@ -26,9 +26,9 @@ import ( "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/ethdb" - lpc "github.com/ethereum/go-ethereum/les/lespay/client" "github.com/ethereum/go-ethereum/les/utils" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/nodestate" @@ -50,31 +50,34 @@ const ( maxQueryFails = 100 // number of consecutive UDP query failures before we print a warning ) -// serverPool provides a node iterator for dial candidates. The output is a mix of newly discovered +// ServerPool provides a node iterator for dial candidates. The output is a mix of newly discovered // nodes, a weighted random selection of known (previously valuable) nodes and trusted/paid nodes. -type serverPool struct { +type ServerPool struct { clock mclock.Clock unixTime func() int64 db ethdb.KeyValueStore - ns *nodestate.NodeStateMachine - vt *lpc.ValueTracker - mixer *enode.FairMix - mixSources []enode.Iterator - dialIterator enode.Iterator - validSchemes enr.IdentityScheme - trustedURLs []string - fillSet *lpc.FillSet - queryFails uint32 + ns *nodestate.NodeStateMachine + vt *ValueTracker + mixer *enode.FairMix + mixSources []enode.Iterator + dialIterator enode.Iterator + validSchemes enr.IdentityScheme + trustedURLs []string + fillSet *FillSet + started, queryFails uint32 timeoutLock sync.RWMutex timeout time.Duration - timeWeights lpc.ResponseTimeWeights + timeWeights ResponseTimeWeights timeoutRefreshed mclock.AbsTime + + suggestedTimeoutGauge, totalValueGauge metrics.Gauge + sessionValueMeter metrics.Meter } // nodeHistory keeps track of dial costs which determine node weight together with the -// service value calculated by lpc.ValueTracker. +// service value calculated by ValueTracker. type nodeHistory struct { dialCost utils.ExpiredValue redialWaitStart, redialWaitEnd int64 // unix time (seconds) @@ -88,21 +91,21 @@ type nodeHistoryEnc struct { // queryFunc sends a pre-negotiation query and blocks until a response arrives or timeout occurs. // It returns 1 if the remote node has confirmed that connection is possible, 0 if not // possible and -1 if no response arrived (timeout). -type queryFunc func(*enode.Node) int +type QueryFunc func(*enode.Node) int var ( - serverPoolSetup = &nodestate.Setup{Version: 1} - sfHasValue = serverPoolSetup.NewPersistentFlag("hasValue") - sfQueried = serverPoolSetup.NewFlag("queried") - sfCanDial = serverPoolSetup.NewFlag("canDial") - sfDialing = serverPoolSetup.NewFlag("dialed") - sfWaitDialTimeout = serverPoolSetup.NewFlag("dialTimeout") - sfConnected = serverPoolSetup.NewFlag("connected") - sfRedialWait = serverPoolSetup.NewFlag("redialWait") - sfAlwaysConnect = serverPoolSetup.NewFlag("alwaysConnect") + clientSetup = &nodestate.Setup{Version: 2} + sfHasValue = clientSetup.NewPersistentFlag("hasValue") + sfQueried = clientSetup.NewFlag("queried") + sfCanDial = clientSetup.NewFlag("canDial") + sfDialing = clientSetup.NewFlag("dialed") + sfWaitDialTimeout = clientSetup.NewFlag("dialTimeout") + sfConnected = clientSetup.NewFlag("connected") + sfRedialWait = clientSetup.NewFlag("redialWait") + sfAlwaysConnect = clientSetup.NewFlag("alwaysConnect") sfDisableSelection = nodestate.MergeFlags(sfQueried, sfCanDial, sfDialing, sfConnected, sfRedialWait) - sfiNodeHistory = serverPoolSetup.NewPersistentField("nodeHistory", reflect.TypeOf(nodeHistory{}), + sfiNodeHistory = clientSetup.NewPersistentField("nodeHistory", reflect.TypeOf(nodeHistory{}), func(field interface{}) ([]byte, error) { if n, ok := field.(nodeHistory); ok { ne := nodeHistoryEnc{ @@ -112,9 +115,8 @@ var ( } enc, err := rlp.EncodeToBytes(&ne) return enc, err - } else { - return nil, errors.New("invalid field type") } + return nil, errors.New("invalid field type") }, func(enc []byte) (interface{}, error) { var ne nodeHistoryEnc @@ -127,40 +129,48 @@ var ( return n, err }, ) - sfiNodeWeight = serverPoolSetup.NewField("nodeWeight", reflect.TypeOf(uint64(0))) - sfiConnectedStats = serverPoolSetup.NewField("connectedStats", reflect.TypeOf(lpc.ResponseTimeStats{})) + sfiNodeWeight = clientSetup.NewField("nodeWeight", reflect.TypeOf(uint64(0))) + sfiConnectedStats = clientSetup.NewField("connectedStats", reflect.TypeOf(ResponseTimeStats{})) + sfiLocalAddress = clientSetup.NewPersistentField("localAddress", reflect.TypeOf(&enr.Record{}), + func(field interface{}) ([]byte, error) { + if enr, ok := field.(*enr.Record); ok { + enc, err := rlp.EncodeToBytes(enr) + return enc, err + } + return nil, errors.New("invalid field type") + }, + func(enc []byte) (interface{}, error) { + var enr enr.Record + if err := rlp.DecodeBytes(enc, &enr); err != nil { + return nil, err + } + return &enr, nil + }, + ) ) -// newServerPool creates a new server pool -func newServerPool(db ethdb.KeyValueStore, dbKey []byte, vt *lpc.ValueTracker, discovery enode.Iterator, mixTimeout time.Duration, query queryFunc, clock mclock.Clock, trustedURLs []string) *serverPool { - s := &serverPool{ +// NewServerPool creates a new server pool +func NewServerPool(db ethdb.KeyValueStore, dbKey []byte, mixTimeout time.Duration, query QueryFunc, clock mclock.Clock, trustedURLs []string, requestList []RequestInfo) (*ServerPool, enode.Iterator) { + s := &ServerPool{ db: db, clock: clock, unixTime: func() int64 { return time.Now().Unix() }, validSchemes: enode.ValidSchemes, trustedURLs: trustedURLs, - vt: vt, - ns: nodestate.NewNodeStateMachine(db, []byte(string(dbKey)+"ns:"), clock, serverPoolSetup), + vt: NewValueTracker(db, &mclock.System{}, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000)), + ns: nodestate.NewNodeStateMachine(db, []byte(string(dbKey)+"ns:"), clock, clientSetup), } s.recalTimeout() s.mixer = enode.NewFairMix(mixTimeout) - knownSelector := lpc.NewWrsIterator(s.ns, sfHasValue, sfDisableSelection, sfiNodeWeight) - alwaysConnect := lpc.NewQueueIterator(s.ns, sfAlwaysConnect, sfDisableSelection, true, nil) + knownSelector := NewWrsIterator(s.ns, sfHasValue, sfDisableSelection, sfiNodeWeight) + alwaysConnect := NewQueueIterator(s.ns, sfAlwaysConnect, sfDisableSelection, true, nil) s.mixSources = append(s.mixSources, knownSelector) s.mixSources = append(s.mixSources, alwaysConnect) - if discovery != nil { - s.mixSources = append(s.mixSources, discovery) - } - iter := enode.Iterator(s.mixer) + s.dialIterator = s.mixer if query != nil { - iter = s.addPreNegFilter(iter, query) + s.dialIterator = s.addPreNegFilter(s.dialIterator, query) } - s.dialIterator = enode.Filter(iter, func(node *enode.Node) bool { - s.ns.SetState(node, sfDialing, sfCanDial, 0) - s.ns.SetState(node, sfWaitDialTimeout, nodestate.Flags{}, time.Second*10) - return true - }) s.ns.SubscribeState(nodestate.MergeFlags(sfWaitDialTimeout, sfConnected), func(n *enode.Node, oldState, newState nodestate.Flags) { if oldState.Equals(sfWaitDialTimeout) && newState.IsEmpty() { @@ -170,17 +180,74 @@ func newServerPool(db ethdb.KeyValueStore, dbKey []byte, vt *lpc.ValueTracker, d } }) - s.ns.AddLogMetrics(sfHasValue, sfDisableSelection, "selectable", nil, nil, serverSelectableGauge) - s.ns.AddLogMetrics(sfDialing, nodestate.Flags{}, "dialed", serverDialedMeter, nil, nil) - s.ns.AddLogMetrics(sfConnected, nodestate.Flags{}, "connected", nil, nil, serverConnectedGauge) - return s + return s, &serverPoolIterator{ + dialIterator: s.dialIterator, + nextFn: func(node *enode.Node) { + s.ns.Operation(func() { + s.ns.SetStateSub(node, sfDialing, sfCanDial, 0) + s.ns.SetStateSub(node, sfWaitDialTimeout, nodestate.Flags{}, time.Second*10) + }) + }, + nodeFn: s.DialNode, + } +} + +type serverPoolIterator struct { + dialIterator enode.Iterator + nextFn func(*enode.Node) + nodeFn func(*enode.Node) *enode.Node +} + +// Next implements enode.Iterator +func (s *serverPoolIterator) Next() bool { + if s.dialIterator.Next() { + s.nextFn(s.dialIterator.Node()) + return true + } + return false +} + +// Node implements enode.Iterator +func (s *serverPoolIterator) Node() *enode.Node { + return s.nodeFn(s.dialIterator.Node()) +} + +// Close implements enode.Iterator +func (s *serverPoolIterator) Close() { + s.dialIterator.Close() +} + +// AddMetrics adds metrics to the server pool. Should be called before Start(). +func (s *ServerPool) AddMetrics( + suggestedTimeoutGauge, totalValueGauge, serverSelectableGauge, serverConnectedGauge metrics.Gauge, + sessionValueMeter, serverDialedMeter metrics.Meter) { + + s.suggestedTimeoutGauge = suggestedTimeoutGauge + s.totalValueGauge = totalValueGauge + s.sessionValueMeter = sessionValueMeter + if serverSelectableGauge != nil { + s.ns.AddLogMetrics(sfHasValue, sfDisableSelection, "selectable", nil, nil, serverSelectableGauge) + } + if serverDialedMeter != nil { + s.ns.AddLogMetrics(sfDialing, nodestate.Flags{}, "dialed", serverDialedMeter, nil, nil) + } + if serverConnectedGauge != nil { + s.ns.AddLogMetrics(sfConnected, nodestate.Flags{}, "connected", nil, nil, serverConnectedGauge) + } +} + +// AddSource adds a node discovery source to the server pool (should be called before start) +func (s *ServerPool) AddSource(source enode.Iterator) { + if source != nil { + s.mixSources = append(s.mixSources, source) + } } // addPreNegFilter installs a node filter mechanism that performs a pre-negotiation query. // Nodes that are filtered out and does not appear on the output iterator are put back // into redialWait state. -func (s *serverPool) addPreNegFilter(input enode.Iterator, query queryFunc) enode.Iterator { - s.fillSet = lpc.NewFillSet(s.ns, input, sfQueried) +func (s *ServerPool) addPreNegFilter(input enode.Iterator, query QueryFunc) enode.Iterator { + s.fillSet = NewFillSet(s.ns, input, sfQueried) s.ns.SubscribeState(sfQueried, func(n *enode.Node, oldState, newState nodestate.Flags) { if newState.Equals(sfQueried) { fails := atomic.LoadUint32(&s.queryFails) @@ -218,7 +285,7 @@ func (s *serverPool) addPreNegFilter(input enode.Iterator, query queryFunc) enod }() } }) - return lpc.NewQueueIterator(s.ns, sfCanDial, nodestate.Flags{}, false, func(waiting bool) { + return NewQueueIterator(s.ns, sfCanDial, nodestate.Flags{}, false, func(waiting bool) { if waiting { s.fillSet.SetTarget(preNegLimit) } else { @@ -228,7 +295,7 @@ func (s *serverPool) addPreNegFilter(input enode.Iterator, query queryFunc) enod } // start starts the server pool. Note that NodeStateMachine should be started first. -func (s *serverPool) start() { +func (s *ServerPool) Start() { s.ns.Start() for _, iter := range s.mixSources { // add sources to mixer at startup because the mixer instantly tries to read them @@ -258,11 +325,11 @@ func (s *serverPool) start() { } }) }) + atomic.StoreUint32(&s.started, 1) } // stop stops the server pool -func (s *serverPool) stop() { - s.dialIterator.Close() +func (s *ServerPool) Stop() { if s.fillSet != nil { s.fillSet.Close() } @@ -273,32 +340,39 @@ func (s *serverPool) stop() { }) }) s.ns.Stop() + s.vt.Stop() } -// registerPeer implements serverPeerSubscriber -func (s *serverPool) registerPeer(p *serverPeer) { - s.ns.SetState(p.Node(), sfConnected, sfDialing.Or(sfWaitDialTimeout), 0) - nvt := s.vt.Register(p.ID()) - s.ns.SetField(p.Node(), sfiConnectedStats, nvt.RtStats()) - p.setValueTracker(s.vt, nvt) - p.updateVtParams() +// RegisterNode implements serverPeerSubscriber +func (s *ServerPool) RegisterNode(node *enode.Node) (*NodeValueTracker, error) { + if atomic.LoadUint32(&s.started) == 0 { + return nil, errors.New("server pool not started yet") + } + nvt := s.vt.Register(node.ID()) + s.ns.Operation(func() { + s.ns.SetStateSub(node, sfConnected, sfDialing.Or(sfWaitDialTimeout), 0) + s.ns.SetFieldSub(node, sfiConnectedStats, nvt.RtStats()) + if node.IP().IsLoopback() { + s.ns.SetFieldSub(node, sfiLocalAddress, node.Record()) + } + }) + return nvt, nil } -// unregisterPeer implements serverPeerSubscriber -func (s *serverPool) unregisterPeer(p *serverPeer) { +// UnregisterNode implements serverPeerSubscriber +func (s *ServerPool) UnregisterNode(node *enode.Node) { s.ns.Operation(func() { - s.setRedialWait(p.Node(), dialCost, dialWaitStep) - s.ns.SetStateSub(p.Node(), nodestate.Flags{}, sfConnected, 0) - s.ns.SetFieldSub(p.Node(), sfiConnectedStats, nil) + s.setRedialWait(node, dialCost, dialWaitStep) + s.ns.SetStateSub(node, nodestate.Flags{}, sfConnected, 0) + s.ns.SetFieldSub(node, sfiConnectedStats, nil) }) - s.vt.Unregister(p.ID()) - p.setValueTracker(nil, nil) + s.vt.Unregister(node.ID()) } // recalTimeout calculates the current recommended timeout. This value is used by // the client as a "soft timeout" value. It also affects the service value calculation // of individual nodes. -func (s *serverPool) recalTimeout() { +func (s *ServerPool) recalTimeout() { // Use cached result if possible, avoid recalculating too frequently. s.timeoutLock.RLock() refreshed := s.timeoutRefreshed @@ -327,17 +401,21 @@ func (s *serverPool) recalTimeout() { s.timeoutLock.Lock() if s.timeout != timeout { s.timeout = timeout - s.timeWeights = lpc.TimeoutWeights(s.timeout) + s.timeWeights = TimeoutWeights(s.timeout) - suggestedTimeoutGauge.Update(int64(s.timeout / time.Millisecond)) - totalValueGauge.Update(int64(rts.Value(s.timeWeights, s.vt.StatsExpFactor()))) + if s.suggestedTimeoutGauge != nil { + s.suggestedTimeoutGauge.Update(int64(s.timeout / time.Millisecond)) + } + if s.totalValueGauge != nil { + s.totalValueGauge.Update(int64(rts.Value(s.timeWeights, s.vt.StatsExpFactor()))) + } } s.timeoutRefreshed = now s.timeoutLock.Unlock() } -// getTimeout returns the recommended request timeout. -func (s *serverPool) getTimeout() time.Duration { +// GetTimeout returns the recommended request timeout. +func (s *ServerPool) GetTimeout() time.Duration { s.recalTimeout() s.timeoutLock.RLock() defer s.timeoutLock.RUnlock() @@ -346,7 +424,7 @@ func (s *serverPool) getTimeout() time.Duration { // getTimeoutAndWeight returns the recommended request timeout as well as the // response time weight which is necessary to calculate service value. -func (s *serverPool) getTimeoutAndWeight() (time.Duration, lpc.ResponseTimeWeights) { +func (s *ServerPool) getTimeoutAndWeight() (time.Duration, ResponseTimeWeights) { s.recalTimeout() s.timeoutLock.RLock() defer s.timeoutLock.RUnlock() @@ -355,7 +433,7 @@ func (s *serverPool) getTimeoutAndWeight() (time.Duration, lpc.ResponseTimeWeigh // addDialCost adds the given amount of dial cost to the node history and returns the current // amount of total dial cost -func (s *serverPool) addDialCost(n *nodeHistory, amount int64) uint64 { +func (s *ServerPool) addDialCost(n *nodeHistory, amount int64) uint64 { logOffset := s.vt.StatsExpirer().LogOffset(s.clock.Now()) if amount > 0 { n.dialCost.Add(amount, logOffset) @@ -368,7 +446,7 @@ func (s *serverPool) addDialCost(n *nodeHistory, amount int64) uint64 { } // serviceValue returns the service value accumulated in this session and in total -func (s *serverPool) serviceValue(node *enode.Node) (sessionValue, totalValue float64) { +func (s *ServerPool) serviceValue(node *enode.Node) (sessionValue, totalValue float64) { nvt := s.vt.GetNode(node.ID()) if nvt == nil { return 0, 0 @@ -378,11 +456,13 @@ func (s *serverPool) serviceValue(node *enode.Node) (sessionValue, totalValue fl expFactor := s.vt.StatsExpFactor() totalValue = currentStats.Value(timeWeights, expFactor) - if connStats, ok := s.ns.GetField(node, sfiConnectedStats).(lpc.ResponseTimeStats); ok { + if connStats, ok := s.ns.GetField(node, sfiConnectedStats).(ResponseTimeStats); ok { diff := currentStats diff.SubStats(&connStats) sessionValue = diff.Value(timeWeights, expFactor) - sessionValueMeter.Mark(int64(sessionValue)) + if s.sessionValueMeter != nil { + s.sessionValueMeter.Mark(int64(sessionValue)) + } } return } @@ -390,7 +470,7 @@ func (s *serverPool) serviceValue(node *enode.Node) (sessionValue, totalValue fl // updateWeight calculates the node weight and updates the nodeWeight field and the // hasValue flag. It also saves the node state if necessary. // Note: this function should run inside a NodeStateMachine operation -func (s *serverPool) updateWeight(node *enode.Node, totalValue float64, totalDialCost uint64) { +func (s *ServerPool) updateWeight(node *enode.Node, totalValue float64, totalDialCost uint64) { weight := uint64(totalValue * nodeWeightMul / float64(totalDialCost)) if weight >= nodeWeightThreshold { s.ns.SetStateSub(node, sfHasValue, nodestate.Flags{}, 0) @@ -399,6 +479,7 @@ func (s *serverPool) updateWeight(node *enode.Node, totalValue float64, totalDia s.ns.SetStateSub(node, nodestate.Flags{}, sfHasValue, 0) s.ns.SetFieldSub(node, sfiNodeWeight, nil) s.ns.SetFieldSub(node, sfiNodeHistory, nil) + s.ns.SetFieldSub(node, sfiLocalAddress, nil) } s.ns.Persist(node) // saved if node history or hasValue changed } @@ -412,7 +493,7 @@ func (s *serverPool) updateWeight(node *enode.Node, totalValue float64, totalDia // to the minimum. // Note: node weight is also recalculated and updated by this function. // Note 2: this function should run inside a NodeStateMachine operation -func (s *serverPool) setRedialWait(node *enode.Node, addDialCost int64, waitStep float64) { +func (s *ServerPool) setRedialWait(node *enode.Node, addDialCost int64, waitStep float64) { n, _ := s.ns.GetField(node, sfiNodeHistory).(nodeHistory) sessionValue, totalValue := s.serviceValue(node) totalDialCost := s.addDialCost(&n, addDialCost) @@ -478,9 +559,39 @@ func (s *serverPool) setRedialWait(node *enode.Node, addDialCost int64, waitStep // This function should be called during startup and shutdown only, otherwise setRedialWait // will keep the weights updated as the underlying statistics are adjusted. // Note: this function should run inside a NodeStateMachine operation -func (s *serverPool) calculateWeight(node *enode.Node) { +func (s *ServerPool) calculateWeight(node *enode.Node) { n, _ := s.ns.GetField(node, sfiNodeHistory).(nodeHistory) _, totalValue := s.serviceValue(node) totalDialCost := s.addDialCost(&n, 0) s.updateWeight(node, totalValue, totalDialCost) } + +// API returns the vflux client API +func (s *ServerPool) API() *PrivateClientAPI { + return NewPrivateClientAPI(s.vt) +} + +type dummyIdentity enode.ID + +func (id dummyIdentity) Verify(r *enr.Record, sig []byte) error { return nil } +func (id dummyIdentity) NodeAddr(r *enr.Record) []byte { return id[:] } + +// DialNode replaces the given enode with a locally generated one containing the ENR +// stored in the sfiLocalAddress field if present. This workaround ensures that nodes +// on the local network can be dialed at the local address if a connection has been +// successfully established previously. +// Note that NodeStateMachine always remembers the enode with the latest version of +// the remote signed ENR. ENR filtering should be performed on that version while +// dialNode should be used for dialing the node over TCP or UDP. +func (s *ServerPool) DialNode(n *enode.Node) *enode.Node { + if enr, ok := s.ns.GetField(n, sfiLocalAddress).(*enr.Record); ok { + n, _ := enode.New(dummyIdentity(n.ID()), enr) + return n + } + return n +} + +// Persist immediately stores the state of a node in the node database +func (s *ServerPool) Persist(n *enode.Node) { + s.ns.Persist(n) +} diff --git a/les/serverpool_test.go b/les/vflux/client/serverpool_test.go similarity index 80% rename from les/serverpool_test.go rename to les/vflux/client/serverpool_test.go index 3d0487d102..c777d6c16d 100644 --- a/les/serverpool_test.go +++ b/les/vflux/client/serverpool_test.go @@ -14,10 +14,11 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package les +package client import ( "math/rand" + "strconv" "sync/atomic" "testing" "time" @@ -25,8 +26,6 @@ import ( "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb/memorydb" - lpc "github.com/ethereum/go-ethereum/les/lespay/client" - "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" ) @@ -50,13 +49,14 @@ func testNodeIndex(id enode.ID) int { return int(id[1]) + int(id[2])*256 } -type serverPoolTest struct { +type ServerPoolTest struct { db ethdb.KeyValueStore clock *mclock.Simulated quit chan struct{} preNeg, preNegFail bool - vt *lpc.ValueTracker - sp *serverPool + vt *ValueTracker + sp *ServerPool + spi enode.Iterator input enode.Iterator testNodes []spTestNode trusted []string @@ -71,15 +71,15 @@ type spTestNode struct { connectCycles, waitCycles int nextConnCycle, totalConn int connected, service bool - peer *serverPeer + node *enode.Node } -func newServerPoolTest(preNeg, preNegFail bool) *serverPoolTest { +func newServerPoolTest(preNeg, preNegFail bool) *ServerPoolTest { nodes := make([]*enode.Node, spTestNodes) for i := range nodes { nodes[i] = enode.SignNull(&enr.Record{}, testNodeID(i)) } - return &serverPoolTest{ + return &ServerPoolTest{ clock: &mclock.Simulated{}, db: memorydb.New(), input: enode.CycleNodes(nodes), @@ -89,7 +89,7 @@ func newServerPoolTest(preNeg, preNegFail bool) *serverPoolTest { } } -func (s *serverPoolTest) beginWait() { +func (s *ServerPoolTest) beginWait() { // ensure that dialIterator and the maximal number of pre-neg queries are not all stuck in a waiting state for atomic.AddInt32(&s.waitCount, 1) > preNegLimit { atomic.AddInt32(&s.waitCount, -1) @@ -97,17 +97,17 @@ func (s *serverPoolTest) beginWait() { } } -func (s *serverPoolTest) endWait() { +func (s *ServerPoolTest) endWait() { atomic.AddInt32(&s.waitCount, -1) atomic.AddInt32(&s.waitEnded, 1) } -func (s *serverPoolTest) addTrusted(i int) { +func (s *ServerPoolTest) addTrusted(i int) { s.trusted = append(s.trusted, enode.SignNull(&enr.Record{}, testNodeID(i)).String()) } -func (s *serverPoolTest) start() { - var testQuery queryFunc +func (s *ServerPoolTest) start() { + var testQuery QueryFunc if s.preNeg { testQuery = func(node *enode.Node) int { idx := testNodeIndex(node.ID()) @@ -119,40 +119,42 @@ func (s *serverPoolTest) start() { s.clock.Sleep(time.Second * 5) s.endWait() return -1 - } else { - switch idx % 3 { - case 0: - // pre-neg returns true only if connection is possible - if canConnect { - return 1 - } else { - return 0 - } - case 1: - // pre-neg returns true but connection might still fail + } + switch idx % 3 { + case 0: + // pre-neg returns true only if connection is possible + if canConnect { + return 1 + } + return 0 + case 1: + // pre-neg returns true but connection might still fail + return 1 + case 2: + // pre-neg returns true if connection is possible, otherwise timeout (node unresponsive) + if canConnect { return 1 - case 2: - // pre-neg returns true if connection is possible, otherwise timeout (node unresponsive) - if canConnect { - return 1 - } else { - s.beginWait() - s.clock.Sleep(time.Second * 5) - s.endWait() - return -1 - } } + s.beginWait() + s.clock.Sleep(time.Second * 5) + s.endWait() return -1 } + return -1 } } - s.vt = lpc.NewValueTracker(s.db, s.clock, requestList, time.Minute, 1/float64(time.Hour), 1/float64(time.Hour*100), 1/float64(time.Hour*1000)) - s.sp = newServerPool(s.db, []byte("serverpool:"), s.vt, s.input, 0, testQuery, s.clock, s.trusted) + requestList := make([]RequestInfo, testReqTypes) + for i := range requestList { + requestList[i] = RequestInfo{Name: "testreq" + strconv.Itoa(i), InitAmount: 1, InitValue: 1} + } + + s.sp, s.spi = NewServerPool(s.db, []byte("sp:"), 0, testQuery, s.clock, s.trusted, requestList) + s.sp.AddSource(s.input) s.sp.validSchemes = enode.ValidSchemesForTesting s.sp.unixTime = func() int64 { return int64(s.clock.Now()) / int64(time.Second) } s.disconnect = make(map[int][]int) - s.sp.start() + s.sp.Start() s.quit = make(chan struct{}) go func() { last := int32(-1) @@ -172,31 +174,31 @@ func (s *serverPoolTest) start() { }() } -func (s *serverPoolTest) stop() { +func (s *ServerPoolTest) stop() { close(s.quit) - s.sp.stop() - s.vt.Stop() + s.sp.Stop() + s.spi.Close() for i := range s.testNodes { n := &s.testNodes[i] if n.connected { n.totalConn += s.cycle } n.connected = false - n.peer = nil + n.node = nil n.nextConnCycle = 0 } s.conn, s.servedConn = 0, 0 } -func (s *serverPoolTest) run() { +func (s *ServerPoolTest) run() { for count := spTestLength; count > 0; count-- { if dcList := s.disconnect[s.cycle]; dcList != nil { for _, idx := range dcList { n := &s.testNodes[idx] - s.sp.unregisterPeer(n.peer) + s.sp.UnregisterNode(n.node) n.totalConn += s.cycle n.connected = false - n.peer = nil + n.node = nil s.conn-- if n.service { s.servedConn-- @@ -208,9 +210,9 @@ func (s *serverPoolTest) run() { if s.conn < spTestTarget { s.dialCount++ s.beginWait() - s.sp.dialIterator.Next() + s.spi.Next() s.endWait() - dial := s.sp.dialIterator.Node() + dial := s.spi.Node() id := dial.ID() idx := testNodeIndex(id) n := &s.testNodes[idx] @@ -223,10 +225,10 @@ func (s *serverPoolTest) run() { n.connected = true dc := s.cycle + n.connectCycles s.disconnect[dc] = append(s.disconnect[dc], idx) - n.peer = &serverPeer{peerCommons: peerCommons{Peer: p2p.NewPeer(id, "", nil)}} - s.sp.registerPeer(n.peer) + n.node = dial + nv, _ := s.sp.RegisterNode(n.node) if n.service { - s.vt.Served(s.vt.GetNode(id), []lpc.ServedRequest{{ReqType: 0, Amount: 100}}, 0) + nv.Served([]ServedRequest{{ReqType: 0, Amount: 100}}, 0) } } } @@ -236,7 +238,7 @@ func (s *serverPoolTest) run() { } } -func (s *serverPoolTest) setNodes(count, conn, wait int, service, trusted bool) (res []int) { +func (s *ServerPoolTest) setNodes(count, conn, wait int, service, trusted bool) (res []int) { for ; count > 0; count-- { idx := rand.Intn(spTestNodes) for s.testNodes[idx].connectCycles != 0 || s.testNodes[idx].connected { @@ -255,11 +257,11 @@ func (s *serverPoolTest) setNodes(count, conn, wait int, service, trusted bool) return } -func (s *serverPoolTest) resetNodes() { +func (s *ServerPoolTest) resetNodes() { for i, n := range s.testNodes { if n.connected { n.totalConn += s.cycle - s.sp.unregisterPeer(n.peer) + s.sp.UnregisterNode(n.node) } s.testNodes[i] = spTestNode{totalConn: n.totalConn} } @@ -268,7 +270,7 @@ func (s *serverPoolTest) resetNodes() { s.trusted = nil } -func (s *serverPoolTest) checkNodes(t *testing.T, nodes []int) { +func (s *ServerPoolTest) checkNodes(t *testing.T, nodes []int) { var sum int for _, idx := range nodes { n := &s.testNodes[idx] diff --git a/les/lespay/client/timestats.go b/les/vflux/client/timestats.go similarity index 100% rename from les/lespay/client/timestats.go rename to les/vflux/client/timestats.go diff --git a/les/lespay/client/timestats_test.go b/les/vflux/client/timestats_test.go similarity index 100% rename from les/lespay/client/timestats_test.go rename to les/vflux/client/timestats_test.go diff --git a/les/lespay/client/valuetracker.go b/les/vflux/client/valuetracker.go similarity index 94% rename from les/lespay/client/valuetracker.go rename to les/vflux/client/valuetracker.go index 4e67b31d96..f5390d0920 100644 --- a/les/lespay/client/valuetracker.go +++ b/les/vflux/client/valuetracker.go @@ -45,6 +45,7 @@ var ( type NodeValueTracker struct { lock sync.Mutex + vt *ValueTracker rtStats, lastRtStats ResponseTimeStats lastTransfer mclock.AbsTime basket serverBasket @@ -52,15 +53,12 @@ type NodeValueTracker struct { reqValues *[]float64 } -// init initializes a NodeValueTracker. -// Note that the contents of the referenced reqValues slice will not change; a new -// reference is passed if the values are updated by ValueTracker. -func (nv *NodeValueTracker) init(now mclock.AbsTime, reqValues *[]float64) { - reqTypeCount := len(*reqValues) - nv.reqCosts = make([]uint64, reqTypeCount) - nv.lastTransfer = now - nv.reqValues = reqValues - nv.basket.init(reqTypeCount) +// UpdateCosts updates the node value tracker's request cost table +func (nv *NodeValueTracker) UpdateCosts(reqCosts []uint64) { + nv.vt.lock.Lock() + defer nv.vt.lock.Unlock() + + nv.updateCosts(reqCosts, &nv.vt.refBasket.reqValues, nv.vt.refBasket.reqValueFactor(reqCosts)) } // updateCosts updates the request cost table of the server. The request value factor @@ -97,6 +95,28 @@ func (nv *NodeValueTracker) transferStats(now mclock.AbsTime, transferRate float return nv.basket.transfer(-math.Expm1(-transferRate * float64(dt))), recentRtStats } +type ServedRequest struct { + ReqType, Amount uint32 +} + +// Served adds a served request to the node's statistics. An actual request may be composed +// of one or more request types (service vector indices). +func (nv *NodeValueTracker) Served(reqs []ServedRequest, respTime time.Duration) { + nv.vt.statsExpLock.RLock() + expFactor := nv.vt.statsExpFactor + nv.vt.statsExpLock.RUnlock() + + nv.lock.Lock() + defer nv.lock.Unlock() + + var value float64 + for _, r := range reqs { + nv.basket.add(r.ReqType, r.Amount, nv.reqCosts[r.ReqType]*uint64(r.Amount), expFactor) + value += (*nv.reqValues)[r.ReqType] * float64(r.Amount) + } + nv.rtStats.Add(respTime, value, expFactor) +} + // RtStats returns the node's own response time distribution statistics func (nv *NodeValueTracker) RtStats() ResponseTimeStats { nv.lock.Lock() @@ -333,7 +353,12 @@ func (vt *ValueTracker) Register(id enode.ID) *NodeValueTracker { return nil } nv := vt.loadOrNewNode(id) - nv.init(vt.clock.Now(), &vt.refBasket.reqValues) + reqTypeCount := len(vt.refBasket.reqValues) + nv.reqCosts = make([]uint64, reqTypeCount) + nv.lastTransfer = vt.clock.Now() + nv.reqValues = &vt.refBasket.reqValues + nv.basket.init(reqTypeCount) + vt.connected[id] = nv return nv } @@ -364,7 +389,7 @@ func (vt *ValueTracker) loadOrNewNode(id enode.ID) *NodeValueTracker { if nv, ok := vt.connected[id]; ok { return nv } - nv := &NodeValueTracker{lastTransfer: vt.clock.Now()} + nv := &NodeValueTracker{vt: vt, lastTransfer: vt.clock.Now()} enc, err := vt.db.Get(append(vtNodeKey, id[:]...)) if err != nil { return nv @@ -425,14 +450,6 @@ func (vt *ValueTracker) saveNode(id enode.ID, nv *NodeValueTracker) { } } -// UpdateCosts updates the node value tracker's request cost table -func (vt *ValueTracker) UpdateCosts(nv *NodeValueTracker, reqCosts []uint64) { - vt.lock.Lock() - defer vt.lock.Unlock() - - nv.updateCosts(reqCosts, &vt.refBasket.reqValues, vt.refBasket.reqValueFactor(reqCosts)) -} - // RtStats returns the global response time distribution statistics func (vt *ValueTracker) RtStats() ResponseTimeStats { vt.lock.Lock() @@ -464,28 +481,6 @@ func (vt *ValueTracker) periodicUpdate() { vt.saveToDb() } -type ServedRequest struct { - ReqType, Amount uint32 -} - -// Served adds a served request to the node's statistics. An actual request may be composed -// of one or more request types (service vector indices). -func (vt *ValueTracker) Served(nv *NodeValueTracker, reqs []ServedRequest, respTime time.Duration) { - vt.statsExpLock.RLock() - expFactor := vt.statsExpFactor - vt.statsExpLock.RUnlock() - - nv.lock.Lock() - defer nv.lock.Unlock() - - var value float64 - for _, r := range reqs { - nv.basket.add(r.ReqType, r.Amount, nv.reqCosts[r.ReqType]*uint64(r.Amount), expFactor) - value += (*nv.reqValues)[r.ReqType] * float64(r.Amount) - } - nv.rtStats.Add(respTime, value, vt.statsExpFactor) -} - type RequestStatsItem struct { Name string ReqAmount, ReqValue float64 diff --git a/les/lespay/client/valuetracker_test.go b/les/vflux/client/valuetracker_test.go similarity index 97% rename from les/lespay/client/valuetracker_test.go rename to les/vflux/client/valuetracker_test.go index ad398749e9..87a337be8d 100644 --- a/les/lespay/client/valuetracker_test.go +++ b/les/vflux/client/valuetracker_test.go @@ -64,7 +64,7 @@ func TestValueTracker(t *testing.T) { for j := range costList { costList[j] = uint64(baseCost * relPrices[j]) } - vt.UpdateCosts(nodes[i], costList) + nodes[i].UpdateCosts(costList) } for i := range nodes { nodes[i] = vt.Register(enode.ID{byte(i)}) @@ -77,7 +77,7 @@ func TestValueTracker(t *testing.T) { node := rand.Intn(testNodeCount) respTime := time.Duration((rand.Float64() + 1) * float64(time.Second) * float64(node+1) / testNodeCount) totalAmount[reqType] += uint64(reqAmount) - vt.Served(nodes[node], []ServedRequest{{uint32(reqType), uint32(reqAmount)}}, respTime) + nodes[node].Served([]ServedRequest{{uint32(reqType), uint32(reqAmount)}}, respTime) clock.Run(time.Second) } } else { diff --git a/les/lespay/client/wrsiterator.go b/les/vflux/client/wrsiterator.go similarity index 100% rename from les/lespay/client/wrsiterator.go rename to les/vflux/client/wrsiterator.go diff --git a/les/lespay/client/wrsiterator_test.go b/les/vflux/client/wrsiterator_test.go similarity index 100% rename from les/lespay/client/wrsiterator_test.go rename to les/vflux/client/wrsiterator_test.go diff --git a/les/vflux/requests.go b/les/vflux/requests.go new file mode 100644 index 0000000000..11255607e8 --- /dev/null +++ b/les/vflux/requests.go @@ -0,0 +1,180 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package vflux + +import ( + "errors" + "math" + "math/big" + + "github.com/ethereum/go-ethereum/rlp" +) + +var ErrNoReply = errors.New("no reply for given request") + +const ( + MaxRequestLength = 16 // max number of individual requests in a batch + CapacityQueryName = "cq" + CapacityQueryMaxLen = 16 +) + +type ( + // Request describes a single vflux request inside a batch. Service and request + // type are identified by strings, parameters are RLP encoded. + Request struct { + Service, Name string + Params []byte + } + // Requests are a batch of vflux requests + Requests []Request + + // Replies are the replies to a batch of requests + Replies [][]byte + + // CapacityQueryReq is the encoding format of the capacity query + CapacityQueryReq struct { + Bias uint64 // seconds + AddTokens []IntOrInf + } + // CapacityQueryReq is the encoding format of the response to the capacity query + CapacityQueryReply []uint64 +) + +// Add encodes and adds a new request to the batch +func (r *Requests) Add(service, name string, val interface{}) (int, error) { + enc, err := rlp.EncodeToBytes(val) + if err != nil { + return -1, err + } + *r = append(*r, Request{ + Service: service, + Name: name, + Params: enc, + }) + return len(*r) - 1, nil +} + +// Get decodes the reply to the i-th request in the batch +func (r Replies) Get(i int, val interface{}) error { + if i < 0 || i >= len(r) { + return ErrNoReply + } + return rlp.DecodeBytes(r[i], val) +} + +const ( + IntNonNegative = iota + IntNegative + IntPlusInf + IntMinusInf +) + +// IntOrInf is the encoding format for arbitrary length signed integers that can also +// hold the values of +Inf or -Inf +type IntOrInf struct { + Type uint8 + Value big.Int +} + +// BigInt returns the value as a big.Int or panics if the value is infinity +func (i *IntOrInf) BigInt() *big.Int { + switch i.Type { + case IntNonNegative: + return new(big.Int).Set(&i.Value) + case IntNegative: + return new(big.Int).Neg(&i.Value) + case IntPlusInf: + panic(nil) // caller should check Inf() before trying to convert to big.Int + case IntMinusInf: + panic(nil) + } + return &big.Int{} // invalid type decodes to 0 value +} + +// Inf returns 1 if the value is +Inf, -1 if it is -Inf, 0 otherwise +func (i *IntOrInf) Inf() int { + switch i.Type { + case IntPlusInf: + return 1 + case IntMinusInf: + return -1 + } + return 0 // invalid type decodes to 0 value +} + +// Int64 limits the value between MinInt64 and MaxInt64 (even if it is +-Inf) and returns an int64 type +func (i *IntOrInf) Int64() int64 { + switch i.Type { + case IntNonNegative: + if i.Value.IsInt64() { + return i.Value.Int64() + } else { + return math.MaxInt64 + } + case IntNegative: + if i.Value.IsInt64() { + return -i.Value.Int64() + } else { + return math.MinInt64 + } + case IntPlusInf: + return math.MaxInt64 + case IntMinusInf: + return math.MinInt64 + } + return 0 // invalid type decodes to 0 value +} + +// SetBigInt sets the value to the given big.Int +func (i *IntOrInf) SetBigInt(v *big.Int) { + if v.Sign() >= 0 { + i.Type = IntNonNegative + i.Value.Set(v) + } else { + i.Type = IntNegative + i.Value.Neg(v) + } +} + +// SetInt64 sets the value to the given int64. Note that MaxInt64 translates to +Inf +// while MinInt64 translates to -Inf. +func (i *IntOrInf) SetInt64(v int64) { + if v >= 0 { + if v == math.MaxInt64 { + i.Type = IntPlusInf + } else { + i.Type = IntNonNegative + i.Value.SetInt64(v) + } + } else { + if v == math.MinInt64 { + i.Type = IntMinusInf + } else { + i.Type = IntNegative + i.Value.SetInt64(-v) + } + } +} + +// SetInf sets the value to +Inf or -Inf +func (i *IntOrInf) SetInf(sign int) { + if sign == 1 { + i.Type = IntPlusInf + } else { + i.Type = IntMinusInf + } +} diff --git a/les/lespay/server/balance.go b/les/vflux/server/balance.go similarity index 92% rename from les/lespay/server/balance.go rename to les/vflux/server/balance.go index f820a4ad05..db12a5c573 100644 --- a/les/lespay/server/balance.go +++ b/les/vflux/server/balance.go @@ -243,11 +243,11 @@ func (n *NodeBalance) RequestServed(cost uint64) uint64 { } // Priority returns the actual priority based on the current balance -func (n *NodeBalance) Priority(now mclock.AbsTime, capacity uint64) int64 { +func (n *NodeBalance) Priority(capacity uint64) int64 { n.lock.Lock() defer n.lock.Unlock() - n.updateBalance(now) + n.updateBalance(n.bt.clock.Now()) return n.balanceToPriority(n.balance, capacity) } @@ -256,16 +256,35 @@ func (n *NodeBalance) Priority(now mclock.AbsTime, capacity uint64) int64 { // in the current session. // If update is true then a priority callback is added that turns UpdateFlag on and off // in case the priority goes below the estimated minimum. -func (n *NodeBalance) EstMinPriority(at mclock.AbsTime, capacity uint64, update bool) int64 { +func (n *NodeBalance) EstimatePriority(capacity uint64, addBalance int64, future, bias time.Duration, update bool) int64 { n.lock.Lock() defer n.lock.Unlock() - var avgReqCost float64 - dt := time.Duration(n.lastUpdate - n.initTime) - if dt > time.Second { - avgReqCost = float64(n.sumReqCost) * 2 / float64(dt) + now := n.bt.clock.Now() + n.updateBalance(now) + b := n.balance + if addBalance != 0 { + offset := n.bt.posExp.LogOffset(now) + old := n.balance.pos.Value(offset) + if addBalance > 0 && (addBalance > maxBalance || old > maxBalance-uint64(addBalance)) { + b.pos = utils.ExpiredValue{} + b.pos.Add(maxBalance, offset) + } else { + b.pos.Add(addBalance, offset) + } + } + if future > 0 { + var avgReqCost float64 + dt := time.Duration(n.lastUpdate - n.initTime) + if dt > time.Second { + avgReqCost = float64(n.sumReqCost) * 2 / float64(dt) + } + b = n.reducedBalance(b, now, future, capacity, avgReqCost) } - pri := n.balanceToPriority(n.reducedBalance(at, capacity, avgReqCost), capacity) + if bias > 0 { + b = n.reducedBalance(b, now+mclock.AbsTime(future), bias, capacity, 0) + } + pri := n.balanceToPriority(b, capacity) if update { n.addCallback(balanceCallbackUpdate, pri, n.signalPriorityUpdate) } @@ -366,7 +385,7 @@ func (n *NodeBalance) deactivate() { // updateBalance updates balance based on the time factor func (n *NodeBalance) updateBalance(now mclock.AbsTime) { if n.active && now > n.lastUpdate { - n.balance = n.reducedBalance(now, n.capacity, 0) + n.balance = n.reducedBalance(n.balance, n.lastUpdate, time.Duration(now-n.lastUpdate), n.capacity, 0) n.lastUpdate = now } } @@ -546,23 +565,25 @@ func (n *NodeBalance) balanceToPriority(b balance, capacity uint64) int64 { } // reducedBalance estimates the reduced balance at a given time in the fututre based -// on the current balance, the time factor and an estimated average request cost per time ratio -func (n *NodeBalance) reducedBalance(at mclock.AbsTime, capacity uint64, avgReqCost float64) balance { - dt := float64(at - n.lastUpdate) - b := n.balance +// on the given balance, the time factor and an estimated average request cost per time ratio +func (n *NodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Duration, capacity uint64, avgReqCost float64) balance { + // since the costs are applied continuously during the dt time period we calculate + // the expiration offset at the middle of the period + at := start + mclock.AbsTime(dt/2) + dtf := float64(dt) if !b.pos.IsZero() { factor := n.posFactor.timePrice(capacity) + n.posFactor.RequestFactor*avgReqCost - diff := -int64(dt * factor) + diff := -int64(dtf * factor) dd := b.pos.Add(diff, n.bt.posExp.LogOffset(at)) if dd == diff { - dt = 0 + dtf = 0 } else { - dt += float64(dd) / factor + dtf += float64(dd) / factor } } if dt > 0 { factor := n.negFactor.timePrice(capacity) + n.negFactor.RequestFactor*avgReqCost - b.neg.Add(int64(dt*factor), n.bt.negExp.LogOffset(at)) + b.neg.Add(int64(dtf*factor), n.bt.negExp.LogOffset(at)) } return b } diff --git a/les/lespay/server/balance_test.go b/les/vflux/server/balance_test.go similarity index 84% rename from les/lespay/server/balance_test.go rename to les/vflux/server/balance_test.go index 67e1944373..e22074db2d 100644 --- a/les/lespay/server/balance_test.go +++ b/les/vflux/server/balance_test.go @@ -17,6 +17,7 @@ package server import ( + "math" "math/rand" "reflect" "testing" @@ -69,7 +70,9 @@ func (b *balanceTestSetup) newNode(capacity uint64) *NodeBalance { node := enode.SignNull(&enr.Record{}, enode.ID{}) b.ns.SetState(node, testFlag, nodestate.Flags{}, 0) b.ns.SetField(node, btTestSetup.connAddressField, "") - b.ns.SetField(node, ppTestSetup.CapacityField, capacity) + if capacity != 0 { + b.ns.SetField(node, ppTestSetup.CapacityField, capacity) + } n, _ := b.ns.GetField(node, btTestSetup.BalanceField).(*NodeBalance) return n } @@ -228,7 +231,7 @@ func TestBalanceToPriority(t *testing.T) { } for _, i := range inputs { node.SetBalance(i.pos, i.neg) - priority := node.Priority(b.clock.Now(), 1000) + priority := node.Priority(1000) if priority != i.priority { t.Fatalf("Priority mismatch, want %v, got %v", i.priority, priority) } @@ -269,7 +272,7 @@ func TestEstimatedPriority(t *testing.T) { for _, i := range inputs { b.clock.Run(i.runTime) node.RequestServed(i.reqCost) - priority := node.EstMinPriority(b.clock.Now()+mclock.AbsTime(i.futureTime), 1000000000, false) + priority := node.EstimatePriority(1000000000, 0, i.futureTime, 0, false) if priority != i.priority { t.Fatalf("Estimated priority mismatch, want %v, got %v", i.priority, priority) } @@ -398,3 +401,71 @@ func TestCallback(t *testing.T) { case <-time.NewTimer(time.Millisecond * 100).C: } } + +func TestBalancePersistence(t *testing.T) { + clock := &mclock.Simulated{} + ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) + db := memorydb.New() + posExp := &utils.Expirer{} + negExp := &utils.Expirer{} + posExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour*2)) // halves every two hours + negExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour)) // halves every hour + bt := NewBalanceTracker(ns, btTestSetup, db, clock, posExp, negExp) + ns.Start() + bts := &balanceTestSetup{ + clock: clock, + ns: ns, + bt: bt, + } + var nb *NodeBalance + exp := func(expPos, expNeg uint64) { + pos, neg := nb.GetBalance() + if pos != expPos { + t.Fatalf("Positive balance incorrect, want %v, got %v", expPos, pos) + } + if neg != expNeg { + t.Fatalf("Positive balance incorrect, want %v, got %v", expPos, pos) + } + } + expTotal := func(expTotal uint64) { + total := bt.TotalTokenAmount() + if total != expTotal { + t.Fatalf("Total token amount incorrect, want %v, got %v", expTotal, total) + } + } + + expTotal(0) + nb = bts.newNode(0) + expTotal(0) + nb.SetBalance(16000000000, 16000000000) + exp(16000000000, 16000000000) + expTotal(16000000000) + clock.Run(time.Hour * 2) + exp(8000000000, 4000000000) + expTotal(8000000000) + bt.Stop() + ns.Stop() + + clock = &mclock.Simulated{} + ns = nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) + posExp = &utils.Expirer{} + negExp = &utils.Expirer{} + posExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour*2)) // halves every two hours + negExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour)) // halves every hour + bt = NewBalanceTracker(ns, btTestSetup, db, clock, posExp, negExp) + ns.Start() + bts = &balanceTestSetup{ + clock: clock, + ns: ns, + bt: bt, + } + expTotal(8000000000) + nb = bts.newNode(0) + exp(8000000000, 4000000000) + expTotal(8000000000) + clock.Run(time.Hour * 2) + exp(4000000000, 1000000000) + expTotal(4000000000) + bt.Stop() + ns.Stop() +} diff --git a/les/lespay/server/balance_tracker.go b/les/vflux/server/balance_tracker.go similarity index 97% rename from les/lespay/server/balance_tracker.go rename to les/vflux/server/balance_tracker.go index c1ea3c6496..1708019de4 100644 --- a/les/lespay/server/balance_tracker.go +++ b/les/vflux/server/balance_tracker.go @@ -99,6 +99,10 @@ func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup balanceTimer: utils.NewUpdateTimer(clock, time.Second*10), quit: make(chan struct{}), } + posOffset, negOffset := bt.ndb.getExpiration() + posExp.SetLogOffset(clock.Now(), posOffset) + negExp.SetLogOffset(clock.Now(), negOffset) + bt.ndb.forEachBalance(false, func(id enode.ID, balance utils.ExpiredValue) bool { bt.inactive.AddExp(balance) return true @@ -177,7 +181,7 @@ func (bt *BalanceTracker) TotalTokenAmount() uint64 { bt.balanceTimer.Update(func(_ time.Duration) bool { bt.active = utils.ExpiredValue{} bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - if n, ok := bt.ns.GetField(node, bt.BalanceField).(*NodeBalance); ok { + if n, ok := bt.ns.GetField(node, bt.BalanceField).(*NodeBalance); ok && n.active { pos, _ := n.GetRawBalance() bt.active.AddExp(pos) } @@ -261,9 +265,8 @@ func (bt *BalanceTracker) storeBalance(id []byte, neg bool, value utils.ExpiredV func (bt *BalanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.ExpiredValue) bool { if neg { return b.Value(bt.negExp.LogOffset(now)) <= negThreshold - } else { - return b.Value(bt.posExp.LogOffset(now)) <= posThreshold } + return b.Value(bt.posExp.LogOffset(now)) <= posThreshold } // updateTotalBalance adjusts the total balance after executing given callback. diff --git a/les/lespay/server/clientdb.go b/les/vflux/server/clientdb.go similarity index 100% rename from les/lespay/server/clientdb.go rename to les/vflux/server/clientdb.go diff --git a/les/lespay/server/clientdb_test.go b/les/vflux/server/clientdb_test.go similarity index 100% rename from les/lespay/server/clientdb_test.go rename to les/vflux/server/clientdb_test.go diff --git a/les/lespay/server/prioritypool.go b/les/vflux/server/prioritypool.go similarity index 72% rename from les/lespay/server/prioritypool.go rename to les/vflux/server/prioritypool.go index c0c33840ca..e940ac7c65 100644 --- a/les/lespay/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -101,17 +101,21 @@ type PriorityPool struct { minCap uint64 activeBias time.Duration capacityStepDiv uint64 + + cachedCurve *CapacityCurve + ccUpdatedAt mclock.AbsTime + ccUpdateForced bool } // nodePriority interface provides current and estimated future priorities on demand type nodePriority interface { // Priority should return the current priority of the node (higher is better) - Priority(now mclock.AbsTime, cap uint64) int64 + Priority(cap uint64) int64 // EstMinPriority should return a lower estimate for the minimum of the node priority // value starting from the current moment until the given time. If the priority goes // under the returned estimate before the specified moment then it is the caller's // responsibility to signal with updateFlag. - EstMinPriority(until mclock.AbsTime, cap uint64, update bool) int64 + EstimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 } // ppNodeInfo is the internal node descriptor of PriorityPool @@ -131,12 +135,12 @@ func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, cl ns: ns, PriorityPoolSetup: setup, clock: clock, - activeQueue: prque.NewLazyQueue(activeSetIndex, activePriority, activeMaxPriority, clock, lazyQueueRefresh), inactiveQueue: prque.New(inactiveSetIndex), minCap: minCap, activeBias: activeBias, capacityStepDiv: capacityStepDiv, } + pp.activeQueue = prque.NewLazyQueue(activeSetIndex, activePriority, pp.activeMaxPriority, clock, lazyQueueRefresh) ns.SubscribeField(pp.priorityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { if newValue != nil { @@ -197,6 +201,9 @@ func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias if targetCap < pp.minCap { targetCap = pp.minCap } + if bias < pp.activeBias { + bias = pp.activeBias + } c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo) if c == nil { log.Error("RequestCapacity called for unknown node", "id", node.ID()) @@ -204,9 +211,9 @@ func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias } var priority int64 if targetCap > c.capacity { - priority = c.nodePriority.EstMinPriority(pp.clock.Now()+mclock.AbsTime(bias), targetCap, false) + priority = c.nodePriority.EstimatePriority(targetCap, 0, 0, bias, false) } else { - priority = c.nodePriority.Priority(pp.clock.Now(), targetCap) + priority = c.nodePriority.Priority(targetCap) } pp.markForChange(c) pp.setCapacity(c, targetCap) @@ -214,7 +221,7 @@ func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias pp.activeQueue.Remove(c.activeIndex) pp.inactiveQueue.Remove(c.inactiveIndex) pp.activeQueue.Push(c) - minPriority = pp.enforceLimits() + _, minPriority = pp.enforceLimits() // if capacity update is possible now then minPriority == math.MinInt64 // if it is not possible at all then minPriority == math.MaxInt64 allowed = priority > minPriority @@ -281,30 +288,34 @@ func invertPriority(p int64) int64 { } // activePriority callback returns actual priority of ppNodeInfo item in activeQueue -func activePriority(a interface{}, now mclock.AbsTime) int64 { +func activePriority(a interface{}) int64 { c := a.(*ppNodeInfo) if c.forced { return math.MinInt64 } if c.bias == 0 { - return invertPriority(c.nodePriority.Priority(now, c.capacity)) + return invertPriority(c.nodePriority.Priority(c.capacity)) } else { - return invertPriority(c.nodePriority.EstMinPriority(now+mclock.AbsTime(c.bias), c.capacity, true)) + return invertPriority(c.nodePriority.EstimatePriority(c.capacity, 0, 0, c.bias, true)) } } // activeMaxPriority callback returns estimated maximum priority of ppNodeInfo item in activeQueue -func activeMaxPriority(a interface{}, until mclock.AbsTime) int64 { +func (pp *PriorityPool) activeMaxPriority(a interface{}, until mclock.AbsTime) int64 { c := a.(*ppNodeInfo) if c.forced { return math.MinInt64 } - return invertPriority(c.nodePriority.EstMinPriority(until+mclock.AbsTime(c.bias), c.capacity, false)) + future := time.Duration(until - pp.clock.Now()) + if future < 0 { + future = 0 + } + return invertPriority(c.nodePriority.EstimatePriority(c.capacity, 0, future, c.bias, false)) } // inactivePriority callback returns actual priority of ppNodeInfo item in inactiveQueue func (pp *PriorityPool) inactivePriority(p *ppNodeInfo) int64 { - return p.nodePriority.Priority(pp.clock.Now(), pp.minCap) + return p.nodePriority.Priority(pp.minCap) } // connectedNode is called when a new node has been added to the pool (InactiveFlag set) @@ -380,16 +391,19 @@ func (pp *PriorityPool) setCapacity(n *ppNodeInfo, cap uint64) { // enforceLimits enforces active node count and total capacity limits. It returns the // lowest active node priority. Note that this function is performed on the temporary // internal state. -func (pp *PriorityPool) enforceLimits() int64 { +func (pp *PriorityPool) enforceLimits() (*ppNodeInfo, int64) { if pp.activeCap <= pp.maxCap && pp.activeCount <= pp.maxCount { - return math.MinInt64 + return nil, math.MinInt64 } - var maxActivePriority int64 + var ( + c *ppNodeInfo + maxActivePriority int64 + ) pp.activeQueue.MultiPop(func(data interface{}, priority int64) bool { - c := data.(*ppNodeInfo) + c = data.(*ppNodeInfo) pp.markForChange(c) maxActivePriority = priority - if c.capacity == pp.minCap { + if c.capacity == pp.minCap || pp.activeCount > pp.maxCount { pp.setCapacity(c, 0) } else { sub := c.capacity / pp.capacityStepDiv @@ -401,7 +415,7 @@ func (pp *PriorityPool) enforceLimits() int64 { } return pp.activeCap > pp.maxCap || pp.activeCount > pp.maxCount }) - return invertPriority(maxActivePriority) + return c, invertPriority(maxActivePriority) } // finalizeChanges either commits or reverts temporary changes. The necessary capacity @@ -431,6 +445,9 @@ func (pp *PriorityPool) finalizeChanges(commit bool) (updates []capUpdate) { c.origCap = 0 } pp.changed = nil + if commit { + pp.ccUpdateForced = true + } return } @@ -473,6 +490,7 @@ func (pp *PriorityPool) tryActivate() []capUpdate { break } } + pp.ccUpdateForced = true return pp.finalizeChanges(commit) } @@ -501,3 +519,150 @@ func (pp *PriorityPool) updatePriority(node *enode.Node) { } updates = pp.tryActivate() } + +// CapacityCurve is a snapshot of the priority pool contents in a format that can efficiently +// estimate how much capacity could be granted to a given node at a given priority level. +type CapacityCurve struct { + points []curvePoint // curve points sorted in descending order of priority + index map[enode.ID][]int // curve point indexes belonging to each node + exclude []int // curve point indexes of excluded node + excludeFirst bool // true if activeCount == maxCount +} + +type curvePoint struct { + freeCap uint64 // available capacity and node count at the current priority level + nextPri int64 // next priority level where more capacity will be available +} + +// GetCapacityCurve returns a new or recently cached CapacityCurve based on the contents of the pool +func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { + pp.lock.Lock() + defer pp.lock.Unlock() + + now := pp.clock.Now() + dt := time.Duration(now - pp.ccUpdatedAt) + if !pp.ccUpdateForced && pp.cachedCurve != nil && dt < time.Second*10 { + return pp.cachedCurve + } + + pp.ccUpdateForced = false + pp.ccUpdatedAt = now + curve := &CapacityCurve{ + index: make(map[enode.ID][]int), + } + pp.cachedCurve = curve + + var excludeID enode.ID + excludeFirst := pp.maxCount == pp.activeCount + // reduce node capacities or remove nodes until nothing is left in the queue; + // record the available capacity and the necessary priority after each step + for pp.activeCap > 0 { + cp := curvePoint{} + if pp.activeCap > pp.maxCap { + log.Error("Active capacity is greater than allowed maximum", "active", pp.activeCap, "maximum", pp.maxCap) + } else { + cp.freeCap = pp.maxCap - pp.activeCap + } + // temporarily increase activeCap to enforce reducing or removing a node capacity + tempCap := cp.freeCap + 1 + pp.activeCap += tempCap + var next *ppNodeInfo + // enforceLimits removes the lowest priority node if it has minimal capacity, + // otherwise reduces its capacity + next, cp.nextPri = pp.enforceLimits() + pp.activeCap -= tempCap + if next == nil { + log.Error("GetCapacityCurve: cannot remove next element from the priority queue") + break + } + id := next.node.ID() + if excludeFirst { + // if the node count limit is already reached then mark the node with the + // lowest priority for exclusion + curve.excludeFirst = true + excludeID = id + excludeFirst = false + } + // multiple curve points and therefore multiple indexes may belong to a node + // if it was removed in multiple steps (if its capacity was more than the minimum) + curve.index[id] = append(curve.index[id], len(curve.points)) + curve.points = append(curve.points, cp) + } + // restore original state of the queue + pp.finalizeChanges(false) + curve.points = append(curve.points, curvePoint{ + freeCap: pp.maxCap, + nextPri: math.MaxInt64, + }) + if curve.excludeFirst { + curve.exclude = curve.index[excludeID] + } + return curve +} + +// Exclude returns a CapacityCurve with the given node excluded from the original curve +func (cc *CapacityCurve) Exclude(id enode.ID) *CapacityCurve { + if exclude, ok := cc.index[id]; ok { + // return a new version of the curve (only one excluded node can be selected) + // Note: if the first node was excluded by default (excludeFirst == true) then + // we can forget about that and exclude the node with the given id instead. + return &CapacityCurve{ + points: cc.points, + index: cc.index, + exclude: exclude, + } + } + return cc +} + +func (cc *CapacityCurve) getPoint(i int) curvePoint { + cp := cc.points[i] + if i == 0 && cc.excludeFirst { + cp.freeCap = 0 + return cp + } + for ii := len(cc.exclude) - 1; ii >= 0; ii-- { + ei := cc.exclude[ii] + if ei < i { + break + } + e1, e2 := cc.points[ei], cc.points[ei+1] + cp.freeCap += e2.freeCap - e1.freeCap + } + return cp +} + +// MaxCapacity calculates the maximum capacity available for a node with a given +// (monotonically decreasing) priority vs. capacity function. Note that if the requesting +// node is already in the pool then it should be excluded from the curve in order to get +// the correct result. +func (cc *CapacityCurve) MaxCapacity(priority func(cap uint64) int64) uint64 { + min, max := 0, len(cc.points)-1 // the curve always has at least one point + for min < max { + mid := (min + max) / 2 + cp := cc.getPoint(mid) + if cp.freeCap == 0 || priority(cp.freeCap) > cp.nextPri { + min = mid + 1 + } else { + max = mid + } + } + cp2 := cc.getPoint(min) + if cp2.freeCap == 0 || min == 0 { + return cp2.freeCap + } + cp1 := cc.getPoint(min - 1) + if priority(cp2.freeCap) > cp1.nextPri { + return cp2.freeCap + } + minc, maxc := cp1.freeCap, cp2.freeCap-1 + for minc < maxc { + midc := (minc + maxc + 1) / 2 + if midc == 0 || priority(midc) > cp1.nextPri { + minc = midc + } else { + maxc = midc - 1 + } + } + return maxc +} diff --git a/les/vflux/server/prioritypool_test.go b/les/vflux/server/prioritypool_test.go new file mode 100644 index 0000000000..d83ddc1767 --- /dev/null +++ b/les/vflux/server/prioritypool_test.go @@ -0,0 +1,245 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package server + +import ( + "math/rand" + "reflect" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/ethereum/go-ethereum/p2p/nodestate" +) + +var ( + testSetup = &nodestate.Setup{} + ppTestClientFlag = testSetup.NewFlag("ppTestClientFlag") + ppTestClientField = testSetup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{})) + ppUpdateFlag = testSetup.NewFlag("ppUpdateFlag") + ppTestSetup = NewPriorityPoolSetup(testSetup) +) + +func init() { + ppTestSetup.Connect(ppTestClientField, ppUpdateFlag) +} + +const ( + testCapacityStepDiv = 100 + testCapacityToleranceDiv = 10 + testMinCap = 100 +) + +type ppTestClient struct { + node *enode.Node + balance, cap uint64 +} + +func (c *ppTestClient) Priority(cap uint64) int64 { + return int64(c.balance / cap) +} + +func (c *ppTestClient) EstimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 { + return int64(c.balance / cap) +} + +func TestPriorityPool(t *testing.T) { + clock := &mclock.Simulated{} + ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) + + ns.SubscribeField(ppTestSetup.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + if n := ns.GetField(node, ppTestSetup.priorityField); n != nil { + c := n.(*ppTestClient) + c.cap = newValue.(uint64) + } + }) + pp := NewPriorityPool(ns, ppTestSetup, clock, testMinCap, 0, testCapacityStepDiv) + ns.Start() + pp.SetLimits(100, 1000000) + clients := make([]*ppTestClient, 100) + raise := func(c *ppTestClient) { + for { + var ok bool + ns.Operation(func() { + _, ok = pp.RequestCapacity(c.node, c.cap+c.cap/testCapacityStepDiv, 0, true) + }) + if !ok { + return + } + } + } + var sumBalance uint64 + check := func(c *ppTestClient) { + expCap := 1000000 * c.balance / sumBalance + capTol := expCap / testCapacityToleranceDiv + if c.cap < expCap-capTol || c.cap > expCap+capTol { + t.Errorf("Wrong node capacity (expected %d, got %d)", expCap, c.cap) + } + } + + for i := range clients { + c := &ppTestClient{ + node: enode.SignNull(&enr.Record{}, enode.ID{byte(i)}), + balance: 100000000000, + cap: 1000, + } + sumBalance += c.balance + clients[i] = c + ns.SetState(c.node, ppTestClientFlag, nodestate.Flags{}, 0) + ns.SetField(c.node, ppTestSetup.priorityField, c) + ns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0) + raise(c) + check(c) + } + + for count := 0; count < 100; count++ { + c := clients[rand.Intn(len(clients))] + oldBalance := c.balance + c.balance = uint64(rand.Int63n(100000000000) + 100000000000) + sumBalance += c.balance - oldBalance + pp.ns.SetState(c.node, ppUpdateFlag, nodestate.Flags{}, 0) + pp.ns.SetState(c.node, nodestate.Flags{}, ppUpdateFlag, 0) + if c.balance > oldBalance { + raise(c) + } else { + for _, c := range clients { + raise(c) + } + } + // check whether capacities are proportional to balances + for _, c := range clients { + check(c) + } + if count%10 == 0 { + // test available capacity calculation with capacity curve + c = clients[rand.Intn(len(clients))] + curve := pp.GetCapacityCurve().Exclude(c.node.ID()) + + add := uint64(rand.Int63n(10000000000000)) + c.balance += add + sumBalance += add + expCap := curve.MaxCapacity(func(cap uint64) int64 { + return int64(c.balance / cap) + }) + //fmt.Println(expCap, c.balance, sumBalance) + /*for i, cp := range curve.points { + fmt.Println("cp", i, cp, "ex", curve.getPoint(i)) + }*/ + var ok bool + expFail := expCap + 1 + if expFail < testMinCap { + expFail = testMinCap + } + ns.Operation(func() { + _, ok = pp.RequestCapacity(c.node, expFail, 0, true) + }) + if ok { + t.Errorf("Request for more than expected available capacity succeeded") + } + if expCap >= testMinCap { + ns.Operation(func() { + _, ok = pp.RequestCapacity(c.node, expCap, 0, true) + }) + if !ok { + t.Errorf("Request for expected available capacity failed") + } + } + c.balance -= add + sumBalance -= add + pp.ns.SetState(c.node, ppUpdateFlag, nodestate.Flags{}, 0) + pp.ns.SetState(c.node, nodestate.Flags{}, ppUpdateFlag, 0) + for _, c := range clients { + raise(c) + } + } + } + + ns.Stop() +} + +func TestCapacityCurve(t *testing.T) { + clock := &mclock.Simulated{} + ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) + pp := NewPriorityPool(ns, ppTestSetup, clock, 400000, 0, 2) + ns.Start() + pp.SetLimits(10, 10000000) + clients := make([]*ppTestClient, 10) + + for i := range clients { + c := &ppTestClient{ + node: enode.SignNull(&enr.Record{}, enode.ID{byte(i)}), + balance: 100000000000 * uint64(i+1), + cap: 1000000, + } + clients[i] = c + ns.SetState(c.node, ppTestClientFlag, nodestate.Flags{}, 0) + ns.SetField(c.node, ppTestSetup.priorityField, c) + ns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0) + ns.Operation(func() { + pp.RequestCapacity(c.node, c.cap, 0, true) + }) + } + + curve := pp.GetCapacityCurve() + check := func(balance, expCap uint64) { + cap := curve.MaxCapacity(func(cap uint64) int64 { + return int64(balance / cap) + }) + var fail bool + if cap == 0 || expCap == 0 { + fail = cap != expCap + } else { + pri := balance / cap + expPri := balance / expCap + fail = pri != expPri && pri != expPri+1 + } + if fail { + t.Errorf("Incorrect capacity for %d balance (got %d, expected %d)", balance, cap, expCap) + } + } + + check(0, 0) + check(10000000000, 100000) + check(50000000000, 500000) + check(100000000000, 1000000) + check(200000000000, 1000000) + check(300000000000, 1500000) + check(450000000000, 1500000) + check(600000000000, 2000000) + check(800000000000, 2000000) + check(1000000000000, 2500000) + + pp.SetLimits(11, 10000000) + curve = pp.GetCapacityCurve() + + check(0, 0) + check(10000000000, 100000) + check(50000000000, 500000) + check(150000000000, 750000) + check(200000000000, 1000000) + check(220000000000, 1100000) + check(275000000000, 1100000) + check(375000000000, 1500000) + check(450000000000, 1500000) + check(600000000000, 2000000) + check(800000000000, 2000000) + check(1000000000000, 2500000) + + ns.Stop() +} diff --git a/les/vflux/server/service.go b/les/vflux/server/service.go new file mode 100644 index 0000000000..ab759ae441 --- /dev/null +++ b/les/vflux/server/service.go @@ -0,0 +1,122 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package server + +import ( + "net" + "strings" + "sync" + "time" + + "github.com/ethereum/go-ethereum/les/utils" + "github.com/ethereum/go-ethereum/les/vflux" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/rlp" +) + +type ( + // Server serves vflux requests + Server struct { + limiter *utils.Limiter + lock sync.Mutex + services map[string]*serviceEntry + delayPerRequest time.Duration + } + + // Service is a service registered at the Server and identified by a string id + Service interface { + ServiceInfo() (id, desc string) // only called during registration + Handle(id enode.ID, address string, name string, data []byte) []byte // never called concurrently + } + + serviceEntry struct { + id, desc string + backend Service + } +) + +// NewServer creates a new Server +func NewServer(delayPerRequest time.Duration) *Server { + return &Server{ + limiter: utils.NewLimiter(1000), + delayPerRequest: delayPerRequest, + services: make(map[string]*serviceEntry), + } +} + +// Register registers a Service +func (s *Server) Register(b Service) { + srv := &serviceEntry{backend: b} + srv.id, srv.desc = b.ServiceInfo() + if strings.Contains(srv.id, ":") { + // srv.id + ":" will be used as a service database prefix + log.Error("Service ID contains ':'", "id", srv.id) + return + } + s.lock.Lock() + s.services[srv.id] = srv + s.lock.Unlock() +} + +// Serve serves a vflux request batch +// Note: requests are served by the Handle functions of the registered services. Serve +// may be called concurrently but the Handle functions are called sequentially and +// therefore thread safety is guaranteed. +func (s *Server) Serve(id enode.ID, address string, requests vflux.Requests) vflux.Replies { + reqLen := uint(len(requests)) + if reqLen == 0 || reqLen > vflux.MaxRequestLength { + return nil + } + // Note: the value parameter will be supplied by the token sale module (total amount paid) + ch := <-s.limiter.Add(id, address, 0, reqLen) + if ch == nil { + return nil + } + // Note: the limiter ensures that the following section is not running concurrently, + // the lock only protects against contention caused by new service registration + s.lock.Lock() + results := make(vflux.Replies, len(requests)) + for i, req := range requests { + if service := s.services[req.Service]; service != nil { + results[i] = service.backend.Handle(id, address, req.Name, req.Params) + } + } + s.lock.Unlock() + time.Sleep(s.delayPerRequest * time.Duration(reqLen)) + close(ch) + return results +} + +// ServeEncoded serves an encoded vflux request batch and returns the encoded replies +func (s *Server) ServeEncoded(id enode.ID, addr *net.UDPAddr, req []byte) []byte { + var requests vflux.Requests + if err := rlp.DecodeBytes(req, &requests); err != nil { + return nil + } + results := s.Serve(id, addr.String(), requests) + if results == nil { + return nil + } + res, _ := rlp.EncodeToBytes(&results) + return res +} + +// Stop shuts down the server +func (s *Server) Stop() { + s.limiter.Stop() +} diff --git a/light/lightchain.go b/light/lightchain.go index 6fc321ae0b..ca6fbfac49 100644 --- a/light/lightchain.go +++ b/light/lightchain.go @@ -396,24 +396,26 @@ func (lc *LightChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (i lc.wg.Add(1) defer lc.wg.Done() - var events []interface{} - whFunc := func(header *types.Header) error { - status, err := lc.hc.WriteHeader(header) - - switch status { - case core.CanonStatTy: - log.Debug("Inserted new header", "number", header.Number, "hash", header.Hash()) - events = append(events, core.ChainEvent{Block: types.NewBlockWithHeader(header), Hash: header.Hash()}) - - case core.SideStatTy: - log.Debug("Inserted forked header", "number", header.Number, "hash", header.Hash()) - events = append(events, core.ChainSideEvent{Block: types.NewBlockWithHeader(header)}) - } - return err + status, err := lc.hc.InsertHeaderChain(chain, start) + if err != nil || len(chain) == 0 { + return 0, err + } + + // Create chain event for the new head block of this insertion. + var ( + events = make([]interface{}, 0, 1) + lastHeader = chain[len(chain)-1] + block = types.NewBlockWithHeader(lastHeader) + ) + switch status { + case core.CanonStatTy: + events = append(events, core.ChainEvent{Block: block, Hash: block.Hash()}) + case core.SideStatTy: + events = append(events, core.ChainSideEvent{Block: block}) } - i, err := lc.hc.InsertHeaderChain(chain, whFunc, start) lc.postChainEvents(events) - return i, err + + return 0, err } // CurrentHeader retrieves the current head header of the canonical chain. The diff --git a/light/lightchain_test.go b/light/lightchain_test.go index 70d2e70c18..2aed08d74e 100644 --- a/light/lightchain_test.go +++ b/light/lightchain_test.go @@ -18,6 +18,7 @@ package light import ( "context" + "errors" "math/big" "testing" @@ -321,7 +322,7 @@ func TestBadHeaderHashes(t *testing.T) { var err error headers := makeHeaderChainWithDiff(bc.genesisBlock, []int{1, 2, 4}, 10) core.BadHashes[headers[2].Hash()] = true - if _, err = bc.InsertHeaderChain(headers, 1); err != core.ErrBlacklistedHash { + if _, err = bc.InsertHeaderChain(headers, 1); !errors.Is(err, core.ErrBlacklistedHash) { t.Errorf("error mismatch: have: %v, want %v", err, core.ErrBlacklistedHash) } } diff --git a/light/odr.go b/light/odr.go index 7016ef8ef2..9521dd53e8 100644 --- a/light/odr.go +++ b/light/odr.go @@ -42,6 +42,7 @@ type OdrBackend interface { BloomTrieIndexer() *core.ChainIndexer BloomIndexer() *core.ChainIndexer Retrieve(ctx context.Context, req OdrRequest) error + RetrieveTxStatus(ctx context.Context, req *TxStatusRequest) error IndexerConfig() *IndexerConfig } @@ -135,8 +136,6 @@ func (req *ReceiptsRequest) StoreResult(db ethdb.Database) { // ChtRequest is the ODR request type for retrieving header by Canonical Hash Trie type ChtRequest struct { - Untrusted bool // Indicator whether the result retrieved is trusted or not - PeerId string // The specified peer id from which to retrieve data. Config *IndexerConfig ChtNum, BlockNum uint64 ChtRoot common.Hash @@ -148,12 +147,9 @@ type ChtRequest struct { // StoreResult stores the retrieved data in local database func (req *ChtRequest) StoreResult(db ethdb.Database) { hash, num := req.Header.Hash(), req.Header.Number.Uint64() - - if !req.Untrusted { - rawdb.WriteHeader(db, req.Header) - rawdb.WriteTd(db, hash, num, req.Td) - rawdb.WriteCanonicalHash(db, hash, num) - } + rawdb.WriteHeader(db, req.Header) + rawdb.WriteTd(db, hash, num, req.Td) + rawdb.WriteCanonicalHash(db, hash, num) } // BloomRequest is the ODR request type for retrieving bloom filters from a CHT structure diff --git a/light/odr_test.go b/light/odr_test.go index 5f7f4d96cb..0fc45b8734 100644 --- a/light/odr_test.go +++ b/light/odr_test.go @@ -194,9 +194,10 @@ func odrContractCall(ctx context.Context, db ethdb.Database, bc *core.BlockChain // Perform read-only call. st.SetBalance(testBankAddress, math.MaxBig256) - msg := callmsg{types.NewMessage(testBankAddress, &testContractAddr, 0, new(big.Int), 1000000, new(big.Int), data, false)} - context := core.NewEVMContext(msg, header, chain, nil) - vmenv := vm.NewEVM(context, st, config, vm.Config{}) + msg := callmsg{types.NewMessage(testBankAddress, &testContractAddr, 0, new(big.Int), 1000000, new(big.Int), data, nil, false)} + txContext := core.NewEVMTxContext(msg) + context := core.NewEVMBlockContext(header, chain, nil) + vmenv := vm.NewEVM(context, txContext, st, config, vm.Config{}) gp := new(core.GasPool).AddGas(math.MaxUint64) result, _ := core.ApplyMessage(vmenv, msg, gp) res = append(res, result.Return()...) diff --git a/light/odr_util.go b/light/odr_util.go index aec0c7b69f..bbbcdbce21 100644 --- a/light/odr_util.go +++ b/light/odr_util.go @@ -19,20 +19,23 @@ package light import ( "bytes" "context" + "errors" "math/big" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" ) -var sha3Nil = crypto.Keccak256Hash(nil) +// errNonCanonicalHash is returned if the requested chain data doesn't belong +// to the canonical chain. ODR can only retrieve the canonical chain data covered +// by the CHT or Bloom trie for verification. +var errNonCanonicalHash = errors.New("hash is not currently canonical") // GetHeaderByNumber retrieves the canonical block header corresponding to the -// given number. +// given number. The returned header is proven by local CHT. func GetHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64) (*types.Header, error) { // Try to find it in the local database first. db := odr.Database() @@ -63,25 +66,6 @@ func GetHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64) (*typ return r.Header, nil } -// GetUntrustedHeaderByNumber retrieves specified block header without -// correctness checking. Note this function should only be used in light -// client checkpoint syncing. -func GetUntrustedHeaderByNumber(ctx context.Context, odr OdrBackend, number uint64, peerId string) (*types.Header, error) { - // todo(rjl493456442) it's a hack to retrieve headers which is not covered - // by CHT. Fix it in LES4 - r := &ChtRequest{ - BlockNum: number, - ChtNum: number / odr.IndexerConfig().ChtSize, - Untrusted: true, - PeerId: peerId, - Config: odr.IndexerConfig(), - } - if err := odr.Retrieve(ctx, r); err != nil { - return nil, err - } - return r.Header, nil -} - // GetCanonicalHash retrieves the canonical block hash corresponding to the number. func GetCanonicalHash(ctx context.Context, odr OdrBackend, number uint64) (common.Hash, error) { hash := rawdb.ReadCanonicalHash(odr.Database(), number) @@ -102,10 +86,13 @@ func GetTd(ctx context.Context, odr OdrBackend, hash common.Hash, number uint64) if td != nil { return td, nil } - _, err := GetHeaderByNumber(ctx, odr, number) + header, err := GetHeaderByNumber(ctx, odr, number) if err != nil { return nil, err } + if header.Hash() != hash { + return nil, errNonCanonicalHash + } // -> td mapping already be stored in db, get it. return rawdb.ReadTd(odr.Database(), hash, number), nil } @@ -120,6 +107,9 @@ func GetBodyRLP(ctx context.Context, odr OdrBackend, hash common.Hash, number ui if err != nil { return nil, errNoHeader } + if header.Hash() != hash { + return nil, errNonCanonicalHash + } r := &BlockRequest{Hash: hash, Number: number, Header: header} if err := odr.Retrieve(ctx, r); err != nil { return nil, err @@ -167,6 +157,9 @@ func GetBlockReceipts(ctx context.Context, odr OdrBackend, hash common.Hash, num if err != nil { return nil, errNoHeader } + if header.Hash() != hash { + return nil, errNonCanonicalHash + } r := &ReceiptsRequest{Hash: hash, Number: number, Header: header} if err := odr.Retrieve(ctx, r); err != nil { return nil, err @@ -276,10 +269,15 @@ func GetBloomBits(ctx context.Context, odr OdrBackend, bit uint, sections []uint return result, nil } -// GetTransaction retrieves a canonical transaction by hash and also returns its position in the chain +// GetTransaction retrieves a canonical transaction by hash and also returns +// its position in the chain. There is no guarantee in the LES protocol that +// the mined transaction will be retrieved back for sure because of different +// reasons(the transaction is unindexed, the malicous server doesn't reply it +// deliberately, etc). Therefore, unretrieved transactions will receive a certain +// number of retrys, thus giving a weak guarantee. func GetTransaction(ctx context.Context, odr OdrBackend, txHash common.Hash) (*types.Transaction, common.Hash, uint64, uint64, error) { r := &TxStatusRequest{Hashes: []common.Hash{txHash}} - if err := odr.Retrieve(ctx, r); err != nil || r.Status[0].Status != core.TxStatusIncluded { + if err := odr.RetrieveTxStatus(ctx, r); err != nil || r.Status[0].Status != core.TxStatusIncluded { return nil, common.Hash{}, 0, 0, err } pos := r.Status[0].Lookup diff --git a/light/postprocess.go b/light/postprocess.go index de207ad4a3..891c8a5869 100644 --- a/light/postprocess.go +++ b/light/postprocess.go @@ -147,7 +147,7 @@ func NewChtIndexer(db ethdb.Database, odr OdrBackend, size, confirms uint64, dis diskdb: db, odr: odr, trieTable: trieTable, - triedb: trie.NewDatabaseWithCache(trieTable, 1, ""), // Use a tiny cache only to keep memory down + triedb: trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down trieset: mapset.NewSet(), sectionSize: size, disablePruning: disablePruning, @@ -340,7 +340,7 @@ func NewBloomTrieIndexer(db ethdb.Database, odr OdrBackend, parentSize, size uin diskdb: db, odr: odr, trieTable: trieTable, - triedb: trie.NewDatabaseWithCache(trieTable, 1, ""), // Use a tiny cache only to keep memory down + triedb: trie.NewDatabaseWithConfig(trieTable, &trie.Config{Cache: 1}), // Use a tiny cache only to keep memory down trieset: mapset.NewSet(), parentSize: parentSize, size: size, diff --git a/light/trie.go b/light/trie.go index 3eb05f4a3f..0516b94486 100644 --- a/light/trie.go +++ b/light/trie.go @@ -30,6 +30,10 @@ import ( "github.com/ethereum/go-ethereum/trie" ) +var ( + sha3Nil = crypto.Keccak256Hash(nil) +) + func NewState(ctx context.Context, head *types.Header, odr OdrBackend) *state.StateDB { state, _ := state.New(head.Root, NewStateDatabase(ctx, head, odr), nil) return state diff --git a/light/txpool.go b/light/txpool.go index 2831de5a65..1296389e3b 100644 --- a/light/txpool.go +++ b/light/txpool.go @@ -32,7 +32,6 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rlp" ) const ( @@ -69,6 +68,7 @@ type TxPool struct { clearIdx uint64 // earliest block nr that can contain mined tx info istanbul bool // Fork indicator whether we are in the istanbul stage. + eip2718 bool // Fork indicator whether we are in the eip2718 stage. } // TxRelayBackend provides an interface to the mechanism that forwards transacions @@ -90,7 +90,7 @@ type TxRelayBackend interface { func NewTxPool(config *params.ChainConfig, chain *LightChain, relay TxRelayBackend) *TxPool { pool := &TxPool{ config: config, - signer: types.NewEIP155Signer(config.ChainID), + signer: types.LatestSigner(config), nonce: make(map[common.Address]uint64), pending: make(map[common.Hash]*types.Transaction), mined: make(map[common.Hash][]*types.Transaction), @@ -314,6 +314,7 @@ func (pool *TxPool) setNewHead(head *types.Header) { // Update fork indicator by next pending block number next := new(big.Int).Add(head.Number, big.NewInt(1)) pool.istanbul = pool.config.IsIstanbul(next) + pool.eip2718 = pool.config.IsBerlin(next) } // Stop stops the light transaction pool @@ -381,7 +382,7 @@ func (pool *TxPool) validateTx(ctx context.Context, tx *types.Transaction) error } // Should supply enough intrinsic gas - gas, err := core.IntrinsicGas(tx.Data(), tx.To() == nil, true, pool.istanbul) + gas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul) if err != nil { return err } @@ -430,8 +431,7 @@ func (pool *TxPool) add(ctx context.Context, tx *types.Transaction) error { func (pool *TxPool) Add(ctx context.Context, tx *types.Transaction) error { pool.mu.Lock() defer pool.mu.Unlock() - - data, err := rlp.EncodeToBytes(tx) + data, err := tx.MarshalBinary() if err != nil { return err } diff --git a/metrics/config.go b/metrics/config.go new file mode 100644 index 0000000000..d05d664265 --- /dev/null +++ b/metrics/config.go @@ -0,0 +1,45 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of go-ethereum. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package metrics + +// Config contains the configuration for the metric collection. +type Config struct { + Enabled bool `toml:",omitempty"` + EnabledExpensive bool `toml:",omitempty"` + HTTP string `toml:",omitempty"` + Port int `toml:",omitempty"` + EnableInfluxDB bool `toml:",omitempty"` + InfluxDBEndpoint string `toml:",omitempty"` + InfluxDBDatabase string `toml:",omitempty"` + InfluxDBUsername string `toml:",omitempty"` + InfluxDBPassword string `toml:",omitempty"` + InfluxDBTags string `toml:",omitempty"` +} + +// DefaultConfig is the default config for metrics used in go-ethereum. +var DefaultConfig = Config{ + Enabled: false, + EnabledExpensive: false, + HTTP: "127.0.0.1", + Port: 6060, + EnableInfluxDB: false, + InfluxDBEndpoint: "http://localhost:8086", + InfluxDBDatabase: "geth", + InfluxDBUsername: "test", + InfluxDBPassword: "test", + InfluxDBTags: "host=localhost", +} diff --git a/metrics/cpu_enabled.go b/metrics/cpu_enabled.go index 52a3c2e966..02192928b7 100644 --- a/metrics/cpu_enabled.go +++ b/metrics/cpu_enabled.go @@ -31,6 +31,10 @@ func ReadCPUStats(stats *CPUStats) { log.Error("Could not read cpu stats", "err", err) return } + if len(timeStats) == 0 { + log.Error("Empty cpu stats") + return + } // requesting all cpu times will always return an array with only one time stats entry timeStat := timeStats[0] stats.GlobalTime = int64((timeStat.User + timeStat.Nice + timeStat.System) * cpu.ClocksPerSec) diff --git a/metrics/exp/exp.go b/metrics/exp/exp.go index f510b8381e..3ebe8cc68a 100644 --- a/metrics/exp/exp.go +++ b/metrics/exp/exp.go @@ -128,7 +128,7 @@ func (exp *exp) publishMeter(name string, metric metrics.Meter) { exp.getInt(name + ".count").Set(m.Count()) exp.getFloat(name + ".one-minute").Set(m.Rate1()) exp.getFloat(name + ".five-minute").Set(m.Rate5()) - exp.getFloat(name + ".fifteen-minute").Set((m.Rate15())) + exp.getFloat(name + ".fifteen-minute").Set(m.Rate15()) exp.getFloat(name + ".mean").Set(m.RateMean()) } diff --git a/metrics/gauge_float64_test.go b/metrics/gauge_float64_test.go index 3ee568e7ba..02b75580c4 100644 --- a/metrics/gauge_float64_test.go +++ b/metrics/gauge_float64_test.go @@ -12,27 +12,27 @@ func BenchmarkGuageFloat64(b *testing.B) { func TestGaugeFloat64(t *testing.T) { g := NewGaugeFloat64() - g.Update(float64(47.0)) - if v := g.Value(); float64(47.0) != v { + g.Update(47.0) + if v := g.Value(); 47.0 != v { t.Errorf("g.Value(): 47.0 != %v\n", v) } } func TestGaugeFloat64Snapshot(t *testing.T) { g := NewGaugeFloat64() - g.Update(float64(47.0)) + g.Update(47.0) snapshot := g.Snapshot() g.Update(float64(0)) - if v := snapshot.Value(); float64(47.0) != v { + if v := snapshot.Value(); 47.0 != v { t.Errorf("g.Value(): 47.0 != %v\n", v) } } func TestGetOrRegisterGaugeFloat64(t *testing.T) { r := NewRegistry() - NewRegisteredGaugeFloat64("foo", r).Update(float64(47.0)) + NewRegisteredGaugeFloat64("foo", r).Update(47.0) t.Logf("registry: %v", r) - if g := GetOrRegisterGaugeFloat64("foo", r); float64(47.0) != g.Value() { + if g := GetOrRegisterGaugeFloat64("foo", r); 47.0 != g.Value() { t.Fatal(g) } } diff --git a/miner/miner_test.go b/miner/miner_test.go index 127b4c7687..da1e472dbd 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -63,7 +63,7 @@ type testBlockChain struct { func (bc *testBlockChain) CurrentBlock() *types.Block { return types.NewBlock(&types.Header{ GasLimit: bc.gasLimit, - }, nil, nil, nil, new(trie.Trie)) + }, nil, nil, nil, trie.NewStackTrie(nil)) } func (bc *testBlockChain) GetBlock(hash common.Hash, number uint64) *types.Block { diff --git a/miner/stress_clique.go b/miner/stress_clique.go index 21538aaaed..c585e0b1f6 100644 --- a/miner/stress_clique.go +++ b/miner/stress_clique.go @@ -178,7 +178,6 @@ func makeSealer(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) { NoDiscovery: true, MaxPeers: 25, }, - NoUSB: true, } // Start the node and configure a full Ethereum node on it stack, err := node.New(config) @@ -186,7 +185,7 @@ func makeSealer(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) { return nil, nil, err } // Create and register the backend - ethBackend, err := eth.New(stack, ð.Config{ + ethBackend, err := eth.New(stack, ðconfig.Config{ Genesis: genesis, NetworkId: genesis.Config.ChainID.Uint64(), SyncMode: downloader.FullSync, diff --git a/miner/stress_ethash.go b/miner/stress_ethash.go index 5a7e7685a6..0b838d48b9 100644 --- a/miner/stress_ethash.go +++ b/miner/stress_ethash.go @@ -155,7 +155,6 @@ func makeMiner(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) { NoDiscovery: true, MaxPeers: 25, }, - NoUSB: true, UseLightweightKDF: true, } // Create the node and configure a full Ethereum node on it @@ -163,7 +162,7 @@ func makeMiner(genesis *core.Genesis) (*node.Node, *eth.Ethereum, error) { if err != nil { return nil, nil, err } - ethBackend, err := eth.New(stack, ð.Config{ + ethBackend, err := eth.New(stack, ðconfig.Config{ Genesis: genesis, NetworkId: genesis.Config.ChainID.Uint64(), SyncMode: downloader.FullSync, diff --git a/miner/unconfirmed_test.go b/miner/unconfirmed_test.go index 42e77f3e64..dc83cb9265 100644 --- a/miner/unconfirmed_test.go +++ b/miner/unconfirmed_test.go @@ -19,7 +19,6 @@ package miner import ( "testing" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" ) @@ -43,7 +42,7 @@ func TestUnconfirmedInsertBounds(t *testing.T) { for depth := uint64(0); depth < 2*uint64(limit); depth++ { // Insert multiple blocks for the same level just to stress it for i := 0; i < int(depth); i++ { - pool.Insert(depth, common.Hash([32]byte{byte(depth), byte(i)})) + pool.Insert(depth, [32]byte{byte(depth), byte(i)}) } // Validate that no blocks below the depth allowance are left in pool.blocks.Do(func(block interface{}) { @@ -63,7 +62,7 @@ func TestUnconfirmedShifts(t *testing.T) { pool := newUnconfirmedBlocks(new(noopChainRetriever), limit) for depth := start; depth < start+uint64(limit); depth++ { - pool.Insert(depth, common.Hash([32]byte{byte(depth)})) + pool.Insert(depth, [32]byte{byte(depth)}) } // Try to shift below the limit and ensure no blocks are dropped pool.Shift(start + uint64(limit) - 1) diff --git a/miner/worker.go b/miner/worker.go index 16f4c1c313..2cee6af0c3 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -303,6 +303,9 @@ func (w *worker) isRunning() bool { // close terminates all background threads maintained by the worker. // Note the worker does not support being closed multiple times. func (w *worker) close() { + if w.current != nil && w.current.state != nil { + w.current.state.StopPrefetcher() + } atomic.StoreInt32(&w.running, 0) close(w.exitCh) } @@ -347,7 +350,11 @@ func (w *worker) newWorkLoop(recommit time.Duration) { atomic.StoreInt32(interrupt, s) } interrupt = new(int32) - w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp} + select { + case w.newWorkCh <- &newWorkReq{interrupt: interrupt, noempty: noempty, timestamp: timestamp}: + case <-w.exitCh: + return + } timer.Reset(recommit) atomic.StoreInt32(&w.newTxs, 0) } @@ -638,19 +645,22 @@ func (w *worker) resultLoop() { // makeCurrent creates a new environment for the current cycle. func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { + // Retrieve the parent state to execute on top and start a prefetcher for + // the miner to speed block sealing up a bit state, err := w.chain.StateAt(parent.Root()) if err != nil { return err } + state.StartPrefetcher("miner") + env := &environment{ - signer: types.NewEIP155Signer(w.chainConfig.ChainID), + signer: types.MakeSigner(w.chainConfig, header.Number), state: state, ancestors: mapset.NewSet(), family: mapset.NewSet(), uncles: mapset.NewSet(), header: header, } - // when 08 is processed ancestors contain 07 (quick block) for _, ancestor := range w.chain.GetBlocksFromHash(parent.Hash(), 7) { for _, uncle := range ancestor.Uncles() { @@ -659,9 +669,14 @@ func (w *worker) makeCurrent(parent *types.Block, header *types.Header) error { env.family.Add(ancestor.Hash()) env.ancestors.Add(ancestor.Hash()) } - // Keep track of transactions which return errors so they can be removed env.tcount = 0 + + // Swap out the old work with the new one, terminating any leftover prefetcher + // processes in the mean time and starting a new one. + if w.current != nil && w.current.state != nil { + w.current.state.StopPrefetcher() + } w.current = env return nil } @@ -713,9 +728,8 @@ func (w *worker) updateSnapshot() { w.current.txs, uncles, w.current.receipts, - new(trie.Trie), + trie.NewStackTrie(nil), ) - w.snapshotState = w.current.state.Copy() } @@ -793,28 +807,33 @@ func (w *worker) commitTransactions(txs *types.TransactionsByPriceAndNonce, coin w.current.state.Prepare(tx.Hash(), common.Hash{}, w.current.tcount) logs, err := w.commitTransaction(tx, coinbase) - switch err { - case core.ErrGasLimitReached: + switch { + case errors.Is(err, core.ErrGasLimitReached): // Pop the current out-of-gas transaction without shifting in the next from the account log.Trace("Gas limit exceeded for current block", "sender", from) txs.Pop() - case core.ErrNonceTooLow: + case errors.Is(err, core.ErrNonceTooLow): // New head notification data race between the transaction pool and miner, shift log.Trace("Skipping transaction with low nonce", "sender", from, "nonce", tx.Nonce()) txs.Shift() - case core.ErrNonceTooHigh: + case errors.Is(err, core.ErrNonceTooHigh): // Reorg notification data race between the transaction pool and miner, skip account = log.Trace("Skipping account with hight nonce", "sender", from, "nonce", tx.Nonce()) txs.Pop() - case nil: + case errors.Is(err, nil): // Everything ok, collect the logs and shift in the next transaction from the same account coalescedLogs = append(coalescedLogs, logs...) w.current.tcount++ txs.Shift() + case errors.Is(err, core.ErrTxTypeNotSupported): + // Pop the unsupported transaction without shifting in the next from the account + log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) + txs.Pop() + default: // Strange error, discard the transaction and get the next in line (note, the // nonce-too-high clause will prevent us from executing in vain). @@ -857,13 +876,6 @@ func (w *worker) commitNewWork(interrupt *int32, noempty bool, timestamp int64) if parent.Time() >= uint64(timestamp) { timestamp = int64(parent.Time() + 1) } - // this will ensure we're not going off too far in the future - if now := time.Now().Unix(); timestamp > now+1 { - wait := time.Duration(timestamp-now) * time.Second - log.Info("Mining too far in the future", "wait", common.PrettyDuration(wait)) - time.Sleep(wait) - } - num := parent.Number() header := &types.Header{ ParentHash: parent.Hash(), diff --git a/miner/worker_test.go b/miner/worker_test.go index a5c558ba5f..0fe62316e1 100644 --- a/miner/worker_test.go +++ b/miner/worker_test.go @@ -81,10 +81,25 @@ func init() { Period: 10, Epoch: 30000, } - tx1, _ := types.SignTx(types.NewTransaction(0, testUserAddress, big.NewInt(1000), params.TxGas, nil, nil), types.HomesteadSigner{}, testBankKey) + + signer := types.LatestSigner(params.TestChainConfig) + tx1 := types.MustSignNewTx(testBankKey, signer, &types.AccessListTx{ + ChainID: params.TestChainConfig.ChainID, + Nonce: 0, + To: &testUserAddress, + Value: big.NewInt(1000), + Gas: params.TxGas, + }) pendingTxs = append(pendingTxs, tx1) - tx2, _ := types.SignTx(types.NewTransaction(1, testUserAddress, big.NewInt(1000), params.TxGas, nil, nil), types.HomesteadSigner{}, testBankKey) + + tx2 := types.MustSignNewTx(testBankKey, signer, &types.LegacyTx{ + Nonce: 1, + To: &testUserAddress, + Value: big.NewInt(1000), + Gas: params.TxGas, + }) newTxs = append(newTxs, tx2) + rand.Seed(time.Now().UnixNano()) } diff --git a/mobile/bind.go b/mobile/bind.go index afa97b5382..e32d864aa5 100644 --- a/mobile/bind.go +++ b/mobile/bind.go @@ -40,7 +40,7 @@ type MobileSigner struct { } func (s *MobileSigner) Sign(addr *Address, unsignedTx *Transaction) (signedTx *Transaction, _ error) { - sig, err := s.sign(types.EIP155Signer{}, addr.address, unsignedTx.tx) + sig, err := s.sign(addr.address, unsignedTx.tx) if err != nil { return nil, err } @@ -82,12 +82,16 @@ func NewTransactOpts() *TransactOpts { // NewKeyedTransactOpts is a utility method to easily create a transaction signer // from a single private key. -func NewKeyedTransactOpts(keyJson []byte, passphrase string) (*TransactOpts, error) { +func NewKeyedTransactOpts(keyJson []byte, passphrase string, chainID *big.Int) (*TransactOpts, error) { key, err := keystore.DecryptKey(keyJson, passphrase) if err != nil { return nil, err } - return &TransactOpts{*bind.NewKeyedTransactor(key.PrivateKey)}, nil + auth, err := bind.NewKeyedTransactorWithChainID(key.PrivateKey, chainID) + if err != nil { + return nil, err + } + return &TransactOpts{*auth}, nil } func (opts *TransactOpts) GetFrom() *Address { return &Address{opts.opts.From} } @@ -106,7 +110,7 @@ func (opts *TransactOpts) GetGasLimit() int64 { return int64(opts.opts.GasLimi func (opts *TransactOpts) SetFrom(from *Address) { opts.opts.From = from.address } func (opts *TransactOpts) SetNonce(nonce int64) { opts.opts.Nonce = big.NewInt(nonce) } func (opts *TransactOpts) SetSigner(s Signer) { - opts.opts.Signer = func(signer types.Signer, addr common.Address, tx *types.Transaction) (*types.Transaction, error) { + opts.opts.Signer = func(addr common.Address, tx *types.Transaction) (*types.Transaction, error) { sig, err := s.Sign(&Address{addr}, &Transaction{tx}) if err != nil { return nil, err diff --git a/mobile/discover.go b/mobile/discover.go index 9b3c93ccd9..2c699f08be 100644 --- a/mobile/discover.go +++ b/mobile/discover.go @@ -22,12 +22,12 @@ package geth import ( "errors" - "github.com/ethereum/go-ethereum/p2p/discv5" + "github.com/ethereum/go-ethereum/p2p/enode" ) // Enode represents a host on the network. type Enode struct { - node *discv5.Node + node *enode.Node } // NewEnode parses a node designator. @@ -53,8 +53,8 @@ type Enode struct { // and UDP discovery port 30301. // // enode://@10.3.58.6:30303?discport=30301 -func NewEnode(rawurl string) (enode *Enode, _ error) { - node, err := discv5.ParseNode(rawurl) +func NewEnode(rawurl string) (*Enode, error) { + node, err := enode.Parse(enode.ValidSchemes, rawurl) if err != nil { return nil, err } @@ -62,12 +62,12 @@ func NewEnode(rawurl string) (enode *Enode, _ error) { } // Enodes represents a slice of accounts. -type Enodes struct{ nodes []*discv5.Node } +type Enodes struct{ nodes []*enode.Node } // NewEnodes creates a slice of uninitialized enodes. func NewEnodes(size int) *Enodes { return &Enodes{ - nodes: make([]*discv5.Node, size), + nodes: make([]*enode.Node, size), } } diff --git a/mobile/ethereum.go b/mobile/ethereum.go index 59da852397..97c46ddca7 100644 --- a/mobile/ethereum.go +++ b/mobile/ethereum.go @@ -21,7 +21,7 @@ package geth import ( "errors" - ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" ) diff --git a/mobile/geth.go b/mobile/geth.go index b561e33675..704d432e04 100644 --- a/mobile/geth.go +++ b/mobile/geth.go @@ -25,8 +25,8 @@ import ( "path/filepath" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/eth/downloader" + "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethclient" "github.com/ethereum/go-ethereum/ethstats" "github.com/ethereum/go-ethereum/internal/debug" @@ -182,7 +182,7 @@ func NewNode(datadir string, config *NodeConfig) (stack *Node, _ error) { } // Register the Ethereum protocol if requested if config.EthereumEnabled { - ethConf := eth.DefaultConfig + ethConf := ethconfig.Defaults ethConf.Genesis = genesis ethConf.SyncMode = downloader.LightSync ethConf.NetworkId = uint64(config.EthereumNetworkID) diff --git a/mobile/params.go b/mobile/params.go index 43ac004740..0fc197c9e5 100644 --- a/mobile/params.go +++ b/mobile/params.go @@ -22,7 +22,7 @@ import ( "encoding/json" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/p2p/discv5" + "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" ) @@ -62,9 +62,13 @@ func GoerliGenesis() string { // FoundationBootnodes returns the enode URLs of the P2P bootstrap nodes operated // by the foundation running the V5 discovery protocol. func FoundationBootnodes() *Enodes { - nodes := &Enodes{nodes: make([]*discv5.Node, len(params.MainnetBootnodes))} + nodes := &Enodes{nodes: make([]*enode.Node, len(params.MainnetBootnodes))} for i, url := range params.MainnetBootnodes { - nodes.nodes[i] = discv5.MustParseNode(url) + var err error + nodes.nodes[i], err = enode.Parse(enode.ValidSchemes, url) + if err != nil { + panic("invalid node URL: " + err.Error()) + } } return nodes } diff --git a/node/api_test.go b/node/api_test.go index e4c08962c3..9c3fa3a31d 100644 --- a/node/api_test.go +++ b/node/api_test.go @@ -244,11 +244,13 @@ func TestStartRPC(t *testing.T) { } for _, test := range tests { + test := test t.Run(test.name, func(t *testing.T) { + t.Parallel() + // Apply some sane defaults. config := test.cfg // config.Logger = testlog.Logger(t, log.LvlDebug) - config.NoUSB = true config.P2P.NoDiscovery = true // Create Node. diff --git a/node/config.go b/node/config.go index 55532632cd..ef1da15d70 100644 --- a/node/config.go +++ b/node/config.go @@ -95,6 +95,9 @@ type Config struct { // NoUSB disables hardware wallet monitoring and connectivity. NoUSB bool `toml:",omitempty"` + // USB enables hardware wallet monitoring and connectivity. + USB bool `toml:",omitempty"` + // SmartCardDaemonPath is the path to the smartcard daemon's socket SmartCardDaemonPath string `toml:",omitempty"` @@ -136,6 +139,9 @@ type Config struct { // interface. HTTPTimeouts rpc.HTTPTimeouts + // HTTPPathPrefix specifies a path prefix on which http-rpc is to be served. + HTTPPathPrefix string `toml:",omitempty"` + // WSHost is the host interface on which to start the websocket RPC server. If // this field is empty, no websocket API endpoint will be started. WSHost string @@ -145,6 +151,9 @@ type Config struct { // ephemeral nodes). WSPort int `toml:",omitempty"` + // WSPathPrefix specifies a path prefix on which ws-rpc is to be served. + WSPathPrefix string `toml:",omitempty"` + // WSOrigins is the list of domain to accept websocket requests from. Please be // aware that the server can only act upon the HTTP request the client sends and // cannot verify the validity of the request header. @@ -182,6 +191,9 @@ type Config struct { staticNodesWarning bool trustedNodesWarning bool oldGethResourceWarning bool + + // AllowUnprotectedTxs allows non EIP-155 protected transactions to be send over RPC. + AllowUnprotectedTxs bool `toml:",omitempty"` } // IPCEndpoint resolves an IPC endpoint based on a configured value, taking into @@ -476,7 +488,7 @@ func makeAccountManager(conf *Config) (*accounts.Manager, string, error) { // we can have both, but it's very confusing for the user to see the same // accounts in both externally and locally, plus very racey. backends = append(backends, keystore.NewKeyStore(keydir, scryptN, scryptP)) - if !conf.NoUSB { + if conf.USB { // Start a USB hub for Ledger hardware wallets if ledgerhub, err := usbwallet.NewLedgerHub(); err != nil { log.Warn(fmt.Sprintf("Failed to start Ledger hub, disabling: %v", err)) diff --git a/node/node.go b/node/node.go index c66ebb89d0..2ed4c31f60 100644 --- a/node/node.go +++ b/node/node.go @@ -135,6 +135,14 @@ func New(conf *Config) (*Node, error) { node.server.Config.NodeDatabase = node.config.NodeDB() } + // Check HTTP/WS prefixes are valid. + if err := validatePrefix("HTTP", conf.HTTPPathPrefix); err != nil { + return nil, err + } + if err := validatePrefix("WebSocket", conf.WSPathPrefix); err != nil { + return nil, err + } + // Configure RPC servers. node.http = newHTTPServer(node.log, conf.HTTPTimeouts) node.ws = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts) @@ -159,12 +167,13 @@ func (n *Node) Start() error { return ErrNodeStopped } n.state = runningState - err := n.startNetworking() + // open networking and RPC endpoints + err := n.openEndpoints() lifecycles := make([]Lifecycle, len(n.lifecycles)) copy(lifecycles, n.lifecycles) n.lock.Unlock() - // Check if networking startup failed. + // Check if endpoint startup failed. if err != nil { n.doClose(nil) return err @@ -247,12 +256,14 @@ func (n *Node) doClose(errs []error) error { } } -// startNetworking starts all network endpoints. -func (n *Node) startNetworking() error { +// openEndpoints starts all network and RPC endpoints. +func (n *Node) openEndpoints() error { + // start networking endpoints n.log.Info("Starting peer-to-peer node", "instance", n.server.Name) if err := n.server.Start(); err != nil { return convertFileLockError(err) } + // start RPC endpoints err := n.startRPC() if err != nil { n.stopRPC() @@ -343,6 +354,7 @@ func (n *Node) startRPC() error { CorsAllowedOrigins: n.config.HTTPCors, Vhosts: n.config.HTTPVirtualHosts, Modules: n.config.HTTPModules, + prefix: n.config.HTTPPathPrefix, } if err := n.http.setListenAddr(n.config.HTTPHost, n.config.HTTPPort); err != nil { return err @@ -358,6 +370,7 @@ func (n *Node) startRPC() error { config := wsConfig{ Modules: n.config.WSModules, Origins: n.config.WSOrigins, + prefix: n.config.WSPathPrefix, } if err := server.setListenAddr(n.config.WSHost, n.config.WSPort); err != nil { return err @@ -454,6 +467,7 @@ func (n *Node) RegisterHandler(name, path string, handler http.Handler) { if n.state != initializingState { panic("can't register HTTP handler on running/stopped node") } + n.http.mux.Handle(path, handler) n.http.handlerNames[path] = name } @@ -510,17 +524,18 @@ func (n *Node) IPCEndpoint() string { return n.ipc.endpoint } -// HTTPEndpoint returns the URL of the HTTP server. +// HTTPEndpoint returns the URL of the HTTP server. Note that this URL does not +// contain the JSON-RPC path prefix set by HTTPPathPrefix. func (n *Node) HTTPEndpoint() string { return "http://" + n.http.listenAddr() } -// WSEndpoint retrieves the current WS endpoint used by the protocol stack. +// WSEndpoint returns the current JSON-RPC over WebSocket endpoint. func (n *Node) WSEndpoint() string { if n.http.wsAllowed() { - return "ws://" + n.http.listenAddr() + return "ws://" + n.http.listenAddr() + n.http.wsConfig.prefix } - return "ws://" + n.ws.listenAddr() + return "ws://" + n.ws.listenAddr() + n.ws.wsConfig.prefix } // EventMux retrieves the event multiplexer used by all the network services in diff --git a/node/node_test.go b/node/node_test.go index 8f306ef021..6731dbac1f 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -390,7 +390,7 @@ func TestLifecycleTerminationGuarantee(t *testing.T) { } // Tests whether a handler can be successfully mounted on the canonical HTTP server -// on the given path +// on the given prefix func TestRegisterHandler_Successful(t *testing.T) { node := createNode(t, 7878, 7979) @@ -483,7 +483,112 @@ func TestWebsocketHTTPOnSeparatePort_WSRequest(t *testing.T) { if !checkRPC(node.HTTPEndpoint()) { t.Fatalf("http request failed") } +} + +type rpcPrefixTest struct { + httpPrefix, wsPrefix string + // These lists paths on which JSON-RPC should be served / not served. + wantHTTP []string + wantNoHTTP []string + wantWS []string + wantNoWS []string +} + +func TestNodeRPCPrefix(t *testing.T) { + t.Parallel() + + tests := []rpcPrefixTest{ + // both off + { + httpPrefix: "", wsPrefix: "", + wantHTTP: []string{"/", "/?p=1"}, + wantNoHTTP: []string{"/test", "/test?p=1"}, + wantWS: []string{"/", "/?p=1"}, + wantNoWS: []string{"/test", "/test?p=1"}, + }, + // only http prefix + { + httpPrefix: "/testprefix", wsPrefix: "", + wantHTTP: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"}, + wantNoHTTP: []string{"/", "/?p=1", "/test", "/test?p=1"}, + wantWS: []string{"/", "/?p=1"}, + wantNoWS: []string{"/testprefix", "/testprefix?p=1", "/test", "/test?p=1"}, + }, + // only ws prefix + { + httpPrefix: "", wsPrefix: "/testprefix", + wantHTTP: []string{"/", "/?p=1"}, + wantNoHTTP: []string{"/testprefix", "/testprefix?p=1", "/test", "/test?p=1"}, + wantWS: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"}, + wantNoWS: []string{"/", "/?p=1", "/test", "/test?p=1"}, + }, + // both set + { + httpPrefix: "/testprefix", wsPrefix: "/testprefix", + wantHTTP: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"}, + wantNoHTTP: []string{"/", "/?p=1", "/test", "/test?p=1"}, + wantWS: []string{"/testprefix", "/testprefix?p=1", "/testprefix/x", "/testprefix/x?p=1"}, + wantNoWS: []string{"/", "/?p=1", "/test", "/test?p=1"}, + }, + } + + for _, test := range tests { + test := test + name := fmt.Sprintf("http=%s ws=%s", test.httpPrefix, test.wsPrefix) + t.Run(name, func(t *testing.T) { + cfg := &Config{ + HTTPHost: "127.0.0.1", + HTTPPathPrefix: test.httpPrefix, + WSHost: "127.0.0.1", + WSPathPrefix: test.wsPrefix, + } + node, err := New(cfg) + if err != nil { + t.Fatal("can't create node:", err) + } + defer node.Close() + if err := node.Start(); err != nil { + t.Fatal("can't start node:", err) + } + test.check(t, node) + }) + } +} + +func (test rpcPrefixTest) check(t *testing.T, node *Node) { + t.Helper() + httpBase := "http://" + node.http.listenAddr() + wsBase := "ws://" + node.http.listenAddr() + + if node.WSEndpoint() != wsBase+test.wsPrefix { + t.Errorf("Error: node has wrong WSEndpoint %q", node.WSEndpoint()) + } + + for _, path := range test.wantHTTP { + resp := rpcRequest(t, httpBase+path) + if resp.StatusCode != 200 { + t.Errorf("Error: %s: bad status code %d, want 200", path, resp.StatusCode) + } + } + for _, path := range test.wantNoHTTP { + resp := rpcRequest(t, httpBase+path) + if resp.StatusCode != 404 { + t.Errorf("Error: %s: bad status code %d, want 404", path, resp.StatusCode) + } + } + for _, path := range test.wantWS { + err := wsRequest(t, wsBase+path, "") + if err != nil { + t.Errorf("Error: %s: WebSocket connection failed: %v", path, err) + } + } + for _, path := range test.wantNoWS { + err := wsRequest(t, wsBase+path, "") + if err == nil { + t.Errorf("Error: %s: WebSocket connection succeeded for path in wantNoWS", path) + } + } } func createNode(t *testing.T, httpPort, wsPort int) *Node { diff --git a/node/rpcstack.go b/node/rpcstack.go index 731e807aca..56e23cc5c7 100644 --- a/node/rpcstack.go +++ b/node/rpcstack.go @@ -39,12 +39,14 @@ type httpConfig struct { Modules []string CorsAllowedOrigins []string Vhosts []string + prefix string // path prefix on which to mount http handler } // wsConfig is the JSON-RPC/Websocket configuration type wsConfig struct { Origins []string Modules []string + prefix string // path prefix on which to mount ws handler } type rpcHandler struct { @@ -62,6 +64,7 @@ type httpServer struct { listener net.Listener // non-nil when server is running // HTTP RPC handler things. + httpConfig httpConfig httpHandler atomic.Value // *rpcHandler @@ -79,6 +82,7 @@ type httpServer struct { func newHTTPServer(log log.Logger, timeouts rpc.HTTPTimeouts) *httpServer { h := &httpServer{log: log, timeouts: timeouts, handlerNames: make(map[string]string)} + h.httpHandler.Store((*rpcHandler)(nil)) h.wsHandler.Store((*rpcHandler)(nil)) return h @@ -140,14 +144,21 @@ func (h *httpServer) start() error { h.listener = listener go h.server.Serve(listener) + if h.wsAllowed() { + url := fmt.Sprintf("ws://%v", listener.Addr()) + if h.wsConfig.prefix != "" { + url += h.wsConfig.prefix + } + h.log.Info("WebSocket enabled", "url", url) + } // if server is websocket only, return after logging - if h.wsAllowed() && !h.rpcAllowed() { - h.log.Info("WebSocket enabled", "url", fmt.Sprintf("ws://%v", listener.Addr())) + if !h.rpcAllowed() { return nil } // Log http endpoint. h.log.Info("HTTP server started", "endpoint", listener.Addr(), + "prefix", h.httpConfig.prefix, "cors", strings.Join(h.httpConfig.CorsAllowedOrigins, ","), "vhosts", strings.Join(h.httpConfig.Vhosts, ","), ) @@ -170,26 +181,60 @@ func (h *httpServer) start() error { } func (h *httpServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { - rpc := h.httpHandler.Load().(*rpcHandler) - if r.RequestURI == "/" { - // Serve JSON-RPC on the root path. - ws := h.wsHandler.Load().(*rpcHandler) - if ws != nil && isWebsocket(r) { + // check if ws request and serve if ws enabled + ws := h.wsHandler.Load().(*rpcHandler) + if ws != nil && isWebsocket(r) { + if checkPath(r, h.wsConfig.prefix) { ws.ServeHTTP(w, r) + } + return + } + // if http-rpc is enabled, try to serve request + rpc := h.httpHandler.Load().(*rpcHandler) + if rpc != nil { + // First try to route in the mux. + // Requests to a path below root are handled by the mux, + // which has all the handlers registered via Node.RegisterHandler. + // These are made available when RPC is enabled. + muxHandler, pattern := h.mux.Handler(r) + if pattern != "" { + muxHandler.ServeHTTP(w, r) return } - if rpc != nil { + + if checkPath(r, h.httpConfig.prefix) { rpc.ServeHTTP(w, r) return } - } else if rpc != nil { - // Requests to a path below root are handled by the mux, - // which has all the handlers registered via Node.RegisterHandler. - // These are made available when RPC is enabled. - h.mux.ServeHTTP(w, r) - return } - w.WriteHeader(404) + w.WriteHeader(http.StatusNotFound) +} + +// checkPath checks whether a given request URL matches a given path prefix. +func checkPath(r *http.Request, path string) bool { + // if no prefix has been specified, request URL must be on root + if path == "" { + return r.URL.Path == "/" + } + // otherwise, check to make sure prefix matches + return len(r.URL.Path) >= len(path) && r.URL.Path[:len(path)] == path +} + +// validatePrefix checks if 'path' is a valid configuration value for the RPC prefix option. +func validatePrefix(what, path string) error { + if path == "" { + return nil + } + if path[0] != '/' { + return fmt.Errorf(`%s RPC path prefix %q does not contain leading "/"`, what, path) + } + if strings.ContainsAny(path, "?#") { + // This is just to avoid confusion. While these would match correctly (i.e. they'd + // match if URL-escaped into path), it's not easy to understand for users when + // setting that on the command line. + return fmt.Errorf("%s RPC path prefix %q contains URL meta-characters", what, path) + } + return nil } // stop shuts down the HTTP server. @@ -448,6 +493,7 @@ func (is *ipcServer) start(apis []rpc.API) error { } listener, srv, err := rpc.StartIPCEndpoint(is.endpoint, apis) if err != nil { + is.log.Warn("IPC opening failed", "url", is.endpoint, "error", err) return err } is.log.Info("IPC endpoint opened", "url", is.endpoint) diff --git a/node/rpcstack_test.go b/node/rpcstack_test.go index 0ee120efd7..f92f0ba396 100644 --- a/node/rpcstack_test.go +++ b/node/rpcstack_test.go @@ -18,7 +18,11 @@ package node import ( "bytes" + "fmt" "net/http" + "net/url" + "strconv" + "strings" "testing" "github.com/ethereum/go-ethereum/internal/testlog" @@ -30,47 +34,129 @@ import ( // TestCorsHandler makes sure CORS are properly handled on the http server. func TestCorsHandler(t *testing.T) { - srv := createAndStartServer(t, httpConfig{CorsAllowedOrigins: []string{"test", "test.com"}}, false, wsConfig{}) + srv := createAndStartServer(t, &httpConfig{CorsAllowedOrigins: []string{"test", "test.com"}}, false, &wsConfig{}) defer srv.stop() + url := "http://" + srv.listenAddr() - resp := testRequest(t, "origin", "test.com", "", srv) + resp := rpcRequest(t, url, "origin", "test.com") assert.Equal(t, "test.com", resp.Header.Get("Access-Control-Allow-Origin")) - resp2 := testRequest(t, "origin", "bad", "", srv) + resp2 := rpcRequest(t, url, "origin", "bad") assert.Equal(t, "", resp2.Header.Get("Access-Control-Allow-Origin")) } // TestVhosts makes sure vhosts are properly handled on the http server. func TestVhosts(t *testing.T) { - srv := createAndStartServer(t, httpConfig{Vhosts: []string{"test"}}, false, wsConfig{}) + srv := createAndStartServer(t, &httpConfig{Vhosts: []string{"test"}}, false, &wsConfig{}) defer srv.stop() + url := "http://" + srv.listenAddr() - resp := testRequest(t, "", "", "test", srv) + resp := rpcRequest(t, url, "host", "test") assert.Equal(t, resp.StatusCode, http.StatusOK) - resp2 := testRequest(t, "", "", "bad", srv) + resp2 := rpcRequest(t, url, "host", "bad") assert.Equal(t, resp2.StatusCode, http.StatusForbidden) } +type originTest struct { + spec string + expOk []string + expFail []string +} + +// splitAndTrim splits input separated by a comma +// and trims excessive white space from the substrings. +// Copied over from flags.go +func splitAndTrim(input string) (ret []string) { + l := strings.Split(input, ",") + for _, r := range l { + r = strings.TrimSpace(r) + if len(r) > 0 { + ret = append(ret, r) + } + } + return ret +} + // TestWebsocketOrigins makes sure the websocket origins are properly handled on the websocket server. func TestWebsocketOrigins(t *testing.T) { - srv := createAndStartServer(t, httpConfig{}, true, wsConfig{Origins: []string{"test"}}) - defer srv.stop() - - dialer := websocket.DefaultDialer - _, _, err := dialer.Dial("ws://"+srv.listenAddr(), http.Header{ - "Content-type": []string{"application/json"}, - "Sec-WebSocket-Version": []string{"13"}, - "Origin": []string{"test"}, - }) - assert.NoError(t, err) - - _, _, err = dialer.Dial("ws://"+srv.listenAddr(), http.Header{ - "Content-type": []string{"application/json"}, - "Sec-WebSocket-Version": []string{"13"}, - "Origin": []string{"bad"}, - }) - assert.Error(t, err) + tests := []originTest{ + { + spec: "*", // allow all + expOk: []string{"", "http://test", "https://test", "http://test:8540", "https://test:8540", + "http://test.com", "https://foo.test", "http://testa", "http://atestb:8540", "https://atestb:8540"}, + }, + { + spec: "test", + expOk: []string{"http://test", "https://test", "http://test:8540", "https://test:8540"}, + expFail: []string{"http://test.com", "https://foo.test", "http://testa", "http://atestb:8540", "https://atestb:8540"}, + }, + // scheme tests + { + spec: "https://test", + expOk: []string{"https://test", "https://test:9999"}, + expFail: []string{ + "test", // no scheme, required by spec + "http://test", // wrong scheme + "http://test.foo", "https://a.test.x", // subdomain variatoins + "http://testx:8540", "https://xtest:8540"}, + }, + // ip tests + { + spec: "https://12.34.56.78", + expOk: []string{"https://12.34.56.78", "https://12.34.56.78:8540"}, + expFail: []string{ + "http://12.34.56.78", // wrong scheme + "http://12.34.56.78:443", // wrong scheme + "http://1.12.34.56.78", // wrong 'domain name' + "http://12.34.56.78.a", // wrong 'domain name' + "https://87.65.43.21", "http://87.65.43.21:8540", "https://87.65.43.21:8540"}, + }, + // port tests + { + spec: "test:8540", + expOk: []string{"http://test:8540", "https://test:8540"}, + expFail: []string{ + "http://test", "https://test", // spec says port required + "http://test:8541", "https://test:8541", // wrong port + "http://bad", "https://bad", "http://bad:8540", "https://bad:8540"}, + }, + // scheme and port + { + spec: "https://test:8540", + expOk: []string{"https://test:8540"}, + expFail: []string{ + "https://test", // missing port + "http://test", // missing port, + wrong scheme + "http://test:8540", // wrong scheme + "http://test:8541", "https://test:8541", // wrong port + "http://bad", "https://bad", "http://bad:8540", "https://bad:8540"}, + }, + // several allowed origins + { + spec: "localhost,http://127.0.0.1", + expOk: []string{"localhost", "http://localhost", "https://localhost:8443", + "http://127.0.0.1", "http://127.0.0.1:8080"}, + expFail: []string{ + "https://127.0.0.1", // wrong scheme + "http://bad", "https://bad", "http://bad:8540", "https://bad:8540"}, + }, + } + for _, tc := range tests { + srv := createAndStartServer(t, &httpConfig{}, true, &wsConfig{Origins: splitAndTrim(tc.spec)}) + url := fmt.Sprintf("ws://%v", srv.listenAddr()) + for _, origin := range tc.expOk { + if err := wsRequest(t, url, origin); err != nil { + t.Errorf("spec '%v', origin '%v': expected ok, got %v", tc.spec, origin, err) + } + } + for _, origin := range tc.expFail { + if err := wsRequest(t, url, origin); err == nil { + t.Errorf("spec '%v', origin '%v': expected not to allow, got ok", tc.spec, origin) + } + } + srv.stop() + } } // TestIsWebsocket tests if an incoming websocket upgrade request is handled properly. @@ -88,36 +174,118 @@ func TestIsWebsocket(t *testing.T) { assert.True(t, isWebsocket(r)) } -func createAndStartServer(t *testing.T, conf httpConfig, ws bool, wsConf wsConfig) *httpServer { +func Test_checkPath(t *testing.T) { + tests := []struct { + req *http.Request + prefix string + expected bool + }{ + { + req: &http.Request{URL: &url.URL{Path: "/test"}}, + prefix: "/test", + expected: true, + }, + { + req: &http.Request{URL: &url.URL{Path: "/testing"}}, + prefix: "/test", + expected: true, + }, + { + req: &http.Request{URL: &url.URL{Path: "/"}}, + prefix: "/test", + expected: false, + }, + { + req: &http.Request{URL: &url.URL{Path: "/fail"}}, + prefix: "/test", + expected: false, + }, + { + req: &http.Request{URL: &url.URL{Path: "/"}}, + prefix: "", + expected: true, + }, + { + req: &http.Request{URL: &url.URL{Path: "/fail"}}, + prefix: "", + expected: false, + }, + { + req: &http.Request{URL: &url.URL{Path: "/"}}, + prefix: "/", + expected: true, + }, + { + req: &http.Request{URL: &url.URL{Path: "/testing"}}, + prefix: "/", + expected: true, + }, + } + + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + assert.Equal(t, tt.expected, checkPath(tt.req, tt.prefix)) + }) + } +} + +func createAndStartServer(t *testing.T, conf *httpConfig, ws bool, wsConf *wsConfig) *httpServer { t.Helper() srv := newHTTPServer(testlog.Logger(t, log.LvlDebug), rpc.DefaultHTTPTimeouts) - - assert.NoError(t, srv.enableRPC(nil, conf)) + assert.NoError(t, srv.enableRPC(nil, *conf)) if ws { - assert.NoError(t, srv.enableWS(nil, wsConf)) + assert.NoError(t, srv.enableWS(nil, *wsConf)) } assert.NoError(t, srv.setListenAddr("localhost", 0)) assert.NoError(t, srv.start()) - return srv } -func testRequest(t *testing.T, key, value, host string, srv *httpServer) *http.Response { +// wsRequest attempts to open a WebSocket connection to the given URL. +func wsRequest(t *testing.T, url, browserOrigin string) error { + t.Helper() + t.Logf("checking WebSocket on %s (origin %q)", url, browserOrigin) + + headers := make(http.Header) + if browserOrigin != "" { + headers.Set("Origin", browserOrigin) + } + conn, _, err := websocket.DefaultDialer.Dial(url, headers) + if conn != nil { + conn.Close() + } + return err +} + +// rpcRequest performs a JSON-RPC request to the given URL. +func rpcRequest(t *testing.T, url string, extraHeaders ...string) *http.Response { t.Helper() - body := bytes.NewReader([]byte(`{"jsonrpc":"2.0","id":1,method":"rpc_modules"}`)) - req, _ := http.NewRequest("POST", "http://"+srv.listenAddr(), body) + // Create the request. + body := bytes.NewReader([]byte(`{"jsonrpc":"2.0","id":1,"method":"rpc_modules","params":[]}`)) + req, err := http.NewRequest("POST", url, body) + if err != nil { + t.Fatal("could not create http request:", err) + } req.Header.Set("content-type", "application/json") - if key != "" && value != "" { - req.Header.Set(key, value) + + // Apply extra headers. + if len(extraHeaders)%2 != 0 { + panic("odd extraHeaders length") } - if host != "" { - req.Host = host + for i := 0; i < len(extraHeaders); i += 2 { + key, value := extraHeaders[i], extraHeaders[i+1] + if strings.ToLower(key) == "host" { + req.Host = value + } else { + req.Header.Set(key, value) + } } - client := http.DefaultClient - resp, err := client.Do(req) + // Perform the request. + t.Logf("checking RPC/HTTP on %s %v", url, extraHeaders) + resp, err := http.DefaultClient.Do(req) if err != nil { t.Fatal(err) } diff --git a/node/utils_test.go b/node/utils_test.go index 44c83e22da..b7474bb706 100644 --- a/node/utils_test.go +++ b/node/utils_test.go @@ -82,11 +82,11 @@ func (f *FullService) Stop() error { return nil } func (f *FullService) Protocols() []p2p.Protocol { return []p2p.Protocol{ - p2p.Protocol{ + { Name: "test1", Version: uint(1), }, - p2p.Protocol{ + { Name: "test2", Version: uint(2), }, diff --git a/oss-fuzz.sh b/oss-fuzz.sh index 23fb4dd412..ac93a5a467 100644 --- a/oss-fuzz.sh +++ b/oss-fuzz.sh @@ -26,31 +26,92 @@ # $CFLAGS, $CXXFLAGS C and C++ compiler flags. # $LIB_FUZZING_ENGINE C++ compiler argument to link fuzz target against the prebuilt engine library (e.g. libFuzzer). +# This sets the -coverpgk for the coverage report when the corpus is executed through go test +coverpkg="github.com/ethereum/go-ethereum/..." + +function coverbuild { + path=$1 + function=$2 + fuzzer=$3 + tags="" + + if [[ $# -eq 4 ]]; then + tags="-tags $4" + fi + cd $path + fuzzed_package=`pwd | rev | cut -d'/' -f 1 | rev` + cp $GOPATH/ossfuzz_coverage_runner.go ./"${function,,}"_test.go + sed -i -e 's/FuzzFunction/'$function'/' ./"${function,,}"_test.go + sed -i -e 's/mypackagebeingfuzzed/'$fuzzed_package'/' ./"${function,,}"_test.go + sed -i -e 's/TestFuzzCorpus/Test'$function'Corpus/' ./"${function,,}"_test.go + +cat << DOG > $OUT/$fuzzer +#/bin/sh + + cd $OUT/$path + go test -run Test${function}Corpus -v $tags -coverprofile \$1 -coverpkg $coverpkg + +DOG + + chmod +x $OUT/$fuzzer + #echo "Built script $OUT/$fuzzer" + #cat $OUT/$fuzzer + cd - +} + function compile_fuzzer { - path=$SRC/go-ethereum/$1 + # Inputs: + # $1: The package to fuzz, within go-ethereum + # $2: The name of the fuzzing function + # $3: The name to give to the final fuzzing-binary + + path=$GOPATH/src/github.com/ethereum/go-ethereum/$1 func=$2 fuzzer=$3 + echo "Building $fuzzer" - (cd $path && \ + + # Do a coverage-build or a regular build + if [[ $SANITIZER = *coverage* ]]; then + coverbuild $path $func $fuzzer $coverpkg + else + (cd $path && \ go-fuzz -func $func -o $WORK/$fuzzer.a . && \ - echo "First stage built OK" && \ - $CXX $CXXFLAGS $LIB_FUZZING_ENGINE $WORK/$fuzzer.a -o $OUT/$fuzzer && \ - echo "Second stage built ok" ) + $CXX $CXXFLAGS $LIB_FUZZING_ENGINE $WORK/$fuzzer.a -o $OUT/$fuzzer) + fi + ## Check if there exists a seed corpus file + corpusfile="${path}/testdata/${fuzzer}_seed_corpus.zip" + if [ -f $corpusfile ] + then + cp $corpusfile $OUT/ + echo "Found seed corpus: $corpusfile" + fi } -compile_fuzzer common/bitutil Fuzz fuzzBitutilCompress -compile_fuzzer crypto/bn256 FuzzAdd fuzzBn256Add -compile_fuzzer crypto/bn256 FuzzMul fuzzBn256Mul -compile_fuzzer crypto/bn256 FuzzPair fuzzBn256Pair -compile_fuzzer core/vm/runtime Fuzz fuzzVmRuntime -compile_fuzzer crypto/blake2b Fuzz fuzzBlake2b +compile_fuzzer tests/fuzzers/bitutil Fuzz fuzzBitutilCompress +compile_fuzzer tests/fuzzers/bn256 FuzzAdd fuzzBn256Add +compile_fuzzer tests/fuzzers/bn256 FuzzMul fuzzBn256Mul +compile_fuzzer tests/fuzzers/bn256 FuzzPair fuzzBn256Pair +compile_fuzzer tests/fuzzers/runtime Fuzz fuzzVmRuntime compile_fuzzer tests/fuzzers/keystore Fuzz fuzzKeystore compile_fuzzer tests/fuzzers/txfetcher Fuzz fuzzTxfetcher compile_fuzzer tests/fuzzers/rlp Fuzz fuzzRlp compile_fuzzer tests/fuzzers/trie Fuzz fuzzTrie compile_fuzzer tests/fuzzers/stacktrie Fuzz fuzzStackTrie +compile_fuzzer tests/fuzzers/difficulty Fuzz fuzzDifficulty +compile_fuzzer tests/fuzzers/abi Fuzz fuzzAbi +compile_fuzzer tests/fuzzers/les Fuzz fuzzLes -# This doesn't work very well @TODO -#compile_fuzzertests/fuzzers/abi Fuzz fuzzAbi +compile_fuzzer tests/fuzzers/bls12381 FuzzG1Add fuzz_g1_add +compile_fuzzer tests/fuzzers/bls12381 FuzzG1Mul fuzz_g1_mul +compile_fuzzer tests/fuzzers/bls12381 FuzzG1MultiExp fuzz_g1_multiexp +compile_fuzzer tests/fuzzers/bls12381 FuzzG2Add fuzz_g2_add +compile_fuzzer tests/fuzzers/bls12381 FuzzG2Mul fuzz_g2_mul +compile_fuzzer tests/fuzzers/bls12381 FuzzG2MultiExp fuzz_g2_multiexp +compile_fuzzer tests/fuzzers/bls12381 FuzzPairing fuzz_pairing +compile_fuzzer tests/fuzzers/bls12381 FuzzMapG1 fuzz_map_g1 +compile_fuzzer tests/fuzzers/bls12381 FuzzMapG2 fuzz_map_g2 +#TODO: move this to tests/fuzzers, if possible +compile_fuzzer crypto/blake2b Fuzz fuzzBlake2b diff --git a/p2p/discover/v5_udp.go b/p2p/discover/v5_udp.go index c95317a005..eb01d95e93 100644 --- a/p2p/discover/v5_udp.go +++ b/p2p/discover/v5_udp.go @@ -74,7 +74,7 @@ type UDPv5 struct { // talkreq handler registry trlock sync.Mutex - trhandlers map[string]func([]byte) []byte + trhandlers map[string]TalkRequestHandler // channels into dispatch packetInCh chan ReadPacket @@ -96,6 +96,9 @@ type UDPv5 struct { wg sync.WaitGroup } +// TalkRequestHandler callback processes a talk request and optionally returns a reply +type TalkRequestHandler func(enode.ID, *net.UDPAddr, []byte) []byte + // callV5 represents a remote procedure call against another node. type callV5 struct { node *enode.Node @@ -145,7 +148,7 @@ func newUDPv5(conn UDPConn, ln *enode.LocalNode, cfg Config) (*UDPv5, error) { log: cfg.Log, validSchemes: cfg.ValidSchemes, clock: cfg.Clock, - trhandlers: make(map[string]func([]byte) []byte), + trhandlers: make(map[string]TalkRequestHandler), // channels into dispatch packetInCh: make(chan ReadPacket, 1), readNextCh: make(chan struct{}, 1), @@ -233,7 +236,7 @@ func (t *UDPv5) LocalNode() *enode.LocalNode { // RegisterTalkHandler adds a handler for 'talk requests'. The handler function is called // whenever a request for the given protocol is received and should return the response // data or nil. -func (t *UDPv5) RegisterTalkHandler(protocol string, handler func([]byte) []byte) { +func (t *UDPv5) RegisterTalkHandler(protocol string, handler TalkRequestHandler) { t.trlock.Lock() defer t.trlock.Unlock() t.trhandlers[protocol] = handler @@ -454,9 +457,20 @@ func (t *UDPv5) call(node *enode.Node, responseType byte, packet v5wire.Packet) // callDone tells dispatch that the active call is done. func (t *UDPv5) callDone(c *callV5) { - select { - case t.callDoneCh <- c: - case <-t.closeCtx.Done(): + // This needs a loop because further responses may be incoming until the + // send to callDoneCh has completed. Such responses need to be discarded + // in order to avoid blocking the dispatch loop. + for { + select { + case <-c.ch: + // late response, discard. + case <-c.err: + // late error, discard. + case t.callDoneCh <- c: + return + case <-t.closeCtx.Done(): + return + } } } @@ -830,7 +844,7 @@ func (t *UDPv5) handleTalkRequest(p *v5wire.TalkRequest, fromID enode.ID, fromAd var response []byte if handler != nil { - response = handler(p.Message) + response = handler(fromID, fromAddr, p.Message) } resp := &v5wire.TalkResponse{ReqID: p.ReqID, Message: response} t.sendResponse(fromID, fromAddr, resp) diff --git a/p2p/discover/v5_udp_test.go b/p2p/discover/v5_udp_test.go index d91a2097db..292785bd51 100644 --- a/p2p/discover/v5_udp_test.go +++ b/p2p/discover/v5_udp_test.go @@ -435,7 +435,7 @@ func TestUDPv5_talkHandling(t *testing.T) { defer test.close() var recvMessage []byte - test.udp.RegisterTalkHandler("test", func(message []byte) []byte { + test.udp.RegisterTalkHandler("test", func(id enode.ID, addr *net.UDPAddr, message []byte) []byte { recvMessage = message return []byte("test response") }) diff --git a/p2p/discv5/README b/p2p/discv5/README deleted file mode 100644 index 617a473d7f..0000000000 --- a/p2p/discv5/README +++ /dev/null @@ -1,4 +0,0 @@ -This package is an early prototype of Discovery v5. Do not use this code. - -See https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md for the -current Discovery v5 specification. \ No newline at end of file diff --git a/p2p/discv5/database.go b/p2p/discv5/database.go deleted file mode 100644 index ca118e7f80..0000000000 --- a/p2p/discv5/database.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Contains the node database, storing previously seen nodes and any collected -// metadata about them for QoS purposes. - -package discv5 - -import ( - "bytes" - "crypto/rand" - "encoding/binary" - "fmt" - "os" - "sync" - "time" - - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/errors" - "github.com/syndtr/goleveldb/leveldb/iterator" - "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/storage" - "github.com/syndtr/goleveldb/leveldb/util" -) - -var ( - nodeDBNilNodeID = NodeID{} // Special node ID to use as a nil element. - nodeDBNodeExpiration = 24 * time.Hour // Time after which an unseen node should be dropped. - nodeDBCleanupCycle = time.Hour // Time period for running the expiration task. -) - -// nodeDB stores all nodes we know about. -type nodeDB struct { - lvl *leveldb.DB // Interface to the database itself - self NodeID // Own node id to prevent adding it into the database - runner sync.Once // Ensures we can start at most one expirer - quit chan struct{} // Channel to signal the expiring thread to stop -} - -// Schema layout for the node database -var ( - nodeDBVersionKey = []byte("version") // Version of the database to flush if changes - nodeDBItemPrefix = []byte("n:") // Identifier to prefix node entries with - - nodeDBDiscoverRoot = ":discover" - nodeDBDiscoverPing = nodeDBDiscoverRoot + ":lastping" - nodeDBDiscoverPong = nodeDBDiscoverRoot + ":lastpong" - nodeDBDiscoverFindFails = nodeDBDiscoverRoot + ":findfail" - nodeDBTopicRegTickets = ":tickets" -) - -// newNodeDB creates a new node database for storing and retrieving infos about -// known peers in the network. If no path is given, an in-memory, temporary -// database is constructed. -func newNodeDB(path string, version int, self NodeID) (*nodeDB, error) { - if path == "" { - return newMemoryNodeDB(self) - } - return newPersistentNodeDB(path, version, self) -} - -// newMemoryNodeDB creates a new in-memory node database without a persistent -// backend. -func newMemoryNodeDB(self NodeID) (*nodeDB, error) { - db, err := leveldb.Open(storage.NewMemStorage(), nil) - if err != nil { - return nil, err - } - return &nodeDB{ - lvl: db, - self: self, - quit: make(chan struct{}), - }, nil -} - -// newPersistentNodeDB creates/opens a leveldb backed persistent node database, -// also flushing its contents in case of a version mismatch. -func newPersistentNodeDB(path string, version int, self NodeID) (*nodeDB, error) { - opts := &opt.Options{OpenFilesCacheCapacity: 5} - db, err := leveldb.OpenFile(path, opts) - if _, iscorrupted := err.(*errors.ErrCorrupted); iscorrupted { - db, err = leveldb.RecoverFile(path, nil) - } - if err != nil { - return nil, err - } - // The nodes contained in the cache correspond to a certain protocol version. - // Flush all nodes if the version doesn't match. - currentVer := make([]byte, binary.MaxVarintLen64) - currentVer = currentVer[:binary.PutVarint(currentVer, int64(version))] - - blob, err := db.Get(nodeDBVersionKey, nil) - switch err { - case leveldb.ErrNotFound: - // Version not found (i.e. empty cache), insert it - if err := db.Put(nodeDBVersionKey, currentVer, nil); err != nil { - db.Close() - return nil, err - } - - case nil: - // Version present, flush if different - if !bytes.Equal(blob, currentVer) { - db.Close() - if err = os.RemoveAll(path); err != nil { - return nil, err - } - return newPersistentNodeDB(path, version, self) - } - } - return &nodeDB{ - lvl: db, - self: self, - quit: make(chan struct{}), - }, nil -} - -// makeKey generates the leveldb key-blob from a node id and its particular -// field of interest. -func makeKey(id NodeID, field string) []byte { - if bytes.Equal(id[:], nodeDBNilNodeID[:]) { - return []byte(field) - } - return append(nodeDBItemPrefix, append(id[:], field...)...) -} - -// splitKey tries to split a database key into a node id and a field part. -func splitKey(key []byte) (id NodeID, field string) { - // If the key is not of a node, return it plainly - if !bytes.HasPrefix(key, nodeDBItemPrefix) { - return NodeID{}, string(key) - } - // Otherwise split the id and field - item := key[len(nodeDBItemPrefix):] - copy(id[:], item[:len(id)]) - field = string(item[len(id):]) - - return id, field -} - -// fetchInt64 retrieves an integer instance associated with a particular -// database key. -func (db *nodeDB) fetchInt64(key []byte) int64 { - blob, err := db.lvl.Get(key, nil) - if err != nil { - return 0 - } - val, read := binary.Varint(blob) - if read <= 0 { - return 0 - } - return val -} - -// storeInt64 update a specific database entry to the current time instance as a -// unix timestamp. -func (db *nodeDB) storeInt64(key []byte, n int64) error { - blob := make([]byte, binary.MaxVarintLen64) - blob = blob[:binary.PutVarint(blob, n)] - return db.lvl.Put(key, blob, nil) -} - -func (db *nodeDB) storeRLP(key []byte, val interface{}) error { - blob, err := rlp.EncodeToBytes(val) - if err != nil { - return err - } - return db.lvl.Put(key, blob, nil) -} - -func (db *nodeDB) fetchRLP(key []byte, val interface{}) error { - blob, err := db.lvl.Get(key, nil) - if err != nil { - return err - } - err = rlp.DecodeBytes(blob, val) - if err != nil { - log.Warn(fmt.Sprintf("key %x (%T) %v", key, val, err)) - } - return err -} - -// node retrieves a node with a given id from the database. -func (db *nodeDB) node(id NodeID) *Node { - var node Node - if err := db.fetchRLP(makeKey(id, nodeDBDiscoverRoot), &node); err != nil { - return nil - } - node.sha = crypto.Keccak256Hash(node.ID[:]) - return &node -} - -// updateNode inserts - potentially overwriting - a node into the peer database. -func (db *nodeDB) updateNode(node *Node) error { - return db.storeRLP(makeKey(node.ID, nodeDBDiscoverRoot), node) -} - -// deleteNode deletes all information/keys associated with a node. -func (db *nodeDB) deleteNode(id NodeID) error { - deleter := db.lvl.NewIterator(util.BytesPrefix(makeKey(id, "")), nil) - for deleter.Next() { - if err := db.lvl.Delete(deleter.Key(), nil); err != nil { - return err - } - } - return nil -} - -// ensureExpirer is a small helper method ensuring that the data expiration -// mechanism is running. If the expiration goroutine is already running, this -// method simply returns. -// -// The goal is to start the data evacuation only after the network successfully -// bootstrapped itself (to prevent dumping potentially useful seed nodes). Since -// it would require significant overhead to exactly trace the first successful -// convergence, it's simpler to "ensure" the correct state when an appropriate -// condition occurs (i.e. a successful bonding), and discard further events. -func (db *nodeDB) ensureExpirer() { - db.runner.Do(func() { go db.expirer() }) -} - -// expirer should be started in a go routine, and is responsible for looping ad -// infinitum and dropping stale data from the database. -func (db *nodeDB) expirer() { - tick := time.NewTicker(nodeDBCleanupCycle) - defer tick.Stop() - for { - select { - case <-tick.C: - if err := db.expireNodes(); err != nil { - log.Error(fmt.Sprintf("Failed to expire nodedb items: %v", err)) - } - case <-db.quit: - return - } - } -} - -// expireNodes iterates over the database and deletes all nodes that have not -// been seen (i.e. received a pong from) for some allotted time. -func (db *nodeDB) expireNodes() error { - threshold := time.Now().Add(-nodeDBNodeExpiration) - - // Find discovered nodes that are older than the allowance - it := db.lvl.NewIterator(nil, nil) - defer it.Release() - - for it.Next() { - // Skip the item if not a discovery node - id, field := splitKey(it.Key()) - if field != nodeDBDiscoverRoot { - continue - } - // Skip the node if not expired yet (and not self) - if !bytes.Equal(id[:], db.self[:]) { - if seen := db.lastPong(id); seen.After(threshold) { - continue - } - } - // Otherwise delete all associated information - db.deleteNode(id) - } - return nil -} - -// lastPing retrieves the time of the last ping packet send to a remote node, -// requesting binding. -func (db *nodeDB) lastPing(id NodeID) time.Time { - return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPing)), 0) -} - -// updateLastPing updates the last time we tried contacting a remote node. -func (db *nodeDB) updateLastPing(id NodeID, instance time.Time) error { - return db.storeInt64(makeKey(id, nodeDBDiscoverPing), instance.Unix()) -} - -// lastPong retrieves the time of the last successful contact from remote node. -func (db *nodeDB) lastPong(id NodeID) time.Time { - return time.Unix(db.fetchInt64(makeKey(id, nodeDBDiscoverPong)), 0) -} - -// updateLastPong updates the last time a remote node successfully contacted. -func (db *nodeDB) updateLastPong(id NodeID, instance time.Time) error { - return db.storeInt64(makeKey(id, nodeDBDiscoverPong), instance.Unix()) -} - -// findFails retrieves the number of findnode failures since bonding. -func (db *nodeDB) findFails(id NodeID) int { - return int(db.fetchInt64(makeKey(id, nodeDBDiscoverFindFails))) -} - -// updateFindFails updates the number of findnode failures since bonding. -func (db *nodeDB) updateFindFails(id NodeID, fails int) error { - return db.storeInt64(makeKey(id, nodeDBDiscoverFindFails), int64(fails)) -} - -// querySeeds retrieves random nodes to be used as potential seed nodes -// for bootstrapping. -func (db *nodeDB) querySeeds(n int, maxAge time.Duration) []*Node { - var ( - now = time.Now() - nodes = make([]*Node, 0, n) - it = db.lvl.NewIterator(nil, nil) - id NodeID - ) - defer it.Release() - -seek: - for seeks := 0; len(nodes) < n && seeks < n*5; seeks++ { - // Seek to a random entry. The first byte is incremented by a - // random amount each time in order to increase the likelihood - // of hitting all existing nodes in very small databases. - ctr := id[0] - rand.Read(id[:]) - id[0] = ctr + id[0]%16 - it.Seek(makeKey(id, nodeDBDiscoverRoot)) - - n := nextNode(it) - if n == nil { - id[0] = 0 - continue seek // iterator exhausted - } - if n.ID == db.self { - continue seek - } - if now.Sub(db.lastPong(n.ID)) > maxAge { - continue seek - } - for i := range nodes { - if nodes[i].ID == n.ID { - continue seek // duplicate - } - } - nodes = append(nodes, n) - } - return nodes -} - -func (db *nodeDB) fetchTopicRegTickets(id NodeID) (issued, used uint32) { - key := makeKey(id, nodeDBTopicRegTickets) - blob, _ := db.lvl.Get(key, nil) - if len(blob) != 8 { - return 0, 0 - } - issued = binary.BigEndian.Uint32(blob[0:4]) - used = binary.BigEndian.Uint32(blob[4:8]) - return -} - -func (db *nodeDB) updateTopicRegTickets(id NodeID, issued, used uint32) error { - key := makeKey(id, nodeDBTopicRegTickets) - blob := make([]byte, 8) - binary.BigEndian.PutUint32(blob[0:4], issued) - binary.BigEndian.PutUint32(blob[4:8], used) - return db.lvl.Put(key, blob, nil) -} - -// reads the next node record from the iterator, skipping over other -// database entries. -func nextNode(it iterator.Iterator) *Node { - for end := false; !end; end = !it.Next() { - id, field := splitKey(it.Key()) - if field != nodeDBDiscoverRoot { - continue - } - var n Node - if err := rlp.DecodeBytes(it.Value(), &n); err != nil { - log.Warn(fmt.Sprintf("invalid node %x: %v", id, err)) - continue - } - return &n - } - return nil -} - -// close flushes and closes the database files. -func (db *nodeDB) close() { - close(db.quit) - db.lvl.Close() -} diff --git a/p2p/discv5/database_test.go b/p2p/discv5/database_test.go deleted file mode 100644 index 2b86dc9cec..0000000000 --- a/p2p/discv5/database_test.go +++ /dev/null @@ -1,380 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discv5 - -import ( - "bytes" - "io/ioutil" - "net" - "os" - "path/filepath" - "reflect" - "testing" - "time" -) - -var nodeDBKeyTests = []struct { - id NodeID - field string - key []byte -}{ - { - id: NodeID{}, - field: "version", - key: []byte{0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e}, // field - }, - { - id: MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - field: ":discover", - key: []byte{0x6e, 0x3a, // prefix - 0x1d, 0xd9, 0xd6, 0x5c, 0x45, 0x52, 0xb5, 0xeb, // node id - 0x43, 0xd5, 0xad, 0x55, 0xa2, 0xee, 0x3f, 0x56, // - 0xc6, 0xcb, 0xc1, 0xc6, 0x4a, 0x5c, 0x8d, 0x65, // - 0x9f, 0x51, 0xfc, 0xd5, 0x1b, 0xac, 0xe2, 0x43, // - 0x51, 0x23, 0x2b, 0x8d, 0x78, 0x21, 0x61, 0x7d, // - 0x2b, 0x29, 0xb5, 0x4b, 0x81, 0xcd, 0xef, 0xb9, // - 0xb3, 0xe9, 0xc3, 0x7d, 0x7f, 0xd5, 0xf6, 0x32, // - 0x70, 0xbc, 0xc9, 0xe1, 0xa6, 0xf6, 0xa4, 0x39, // - 0x3a, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, // field - }, - }, -} - -func TestNodeDBKeys(t *testing.T) { - for i, tt := range nodeDBKeyTests { - if key := makeKey(tt.id, tt.field); !bytes.Equal(key, tt.key) { - t.Errorf("make test %d: key mismatch: have 0x%x, want 0x%x", i, key, tt.key) - } - id, field := splitKey(tt.key) - if !bytes.Equal(id[:], tt.id[:]) { - t.Errorf("split test %d: id mismatch: have 0x%x, want 0x%x", i, id, tt.id) - } - if field != tt.field { - t.Errorf("split test %d: field mismatch: have 0x%x, want 0x%x", i, field, tt.field) - } - } -} - -var nodeDBInt64Tests = []struct { - key []byte - value int64 -}{ - {key: []byte{0x01}, value: 1}, - {key: []byte{0x02}, value: 2}, - {key: []byte{0x03}, value: 3}, -} - -func TestNodeDBInt64(t *testing.T) { - db, _ := newNodeDB("", Version, NodeID{}) - defer db.close() - - tests := nodeDBInt64Tests - for i := 0; i < len(tests); i++ { - // Insert the next value - if err := db.storeInt64(tests[i].key, tests[i].value); err != nil { - t.Errorf("test %d: failed to store value: %v", i, err) - } - // Check all existing and non existing values - for j := 0; j < len(tests); j++ { - num := db.fetchInt64(tests[j].key) - switch { - case j <= i && num != tests[j].value: - t.Errorf("test %d, item %d: value mismatch: have %v, want %v", i, j, num, tests[j].value) - case j > i && num != 0: - t.Errorf("test %d, item %d: value mismatch: have %v, want %v", i, j, num, 0) - } - } - } -} - -func TestNodeDBFetchStore(t *testing.T) { - node := NewNode( - MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - net.IP{192, 168, 0, 1}, - 30303, - 30303, - ) - inst := time.Now() - num := 314 - - db, _ := newNodeDB("", Version, NodeID{}) - defer db.close() - - // Check fetch/store operations on a node ping object - if stored := db.lastPing(node.ID); stored.Unix() != 0 { - t.Errorf("ping: non-existing object: %v", stored) - } - if err := db.updateLastPing(node.ID, inst); err != nil { - t.Errorf("ping: failed to update: %v", err) - } - if stored := db.lastPing(node.ID); stored.Unix() != inst.Unix() { - t.Errorf("ping: value mismatch: have %v, want %v", stored, inst) - } - // Check fetch/store operations on a node pong object - if stored := db.lastPong(node.ID); stored.Unix() != 0 { - t.Errorf("pong: non-existing object: %v", stored) - } - if err := db.updateLastPong(node.ID, inst); err != nil { - t.Errorf("pong: failed to update: %v", err) - } - if stored := db.lastPong(node.ID); stored.Unix() != inst.Unix() { - t.Errorf("pong: value mismatch: have %v, want %v", stored, inst) - } - // Check fetch/store operations on a node findnode-failure object - if stored := db.findFails(node.ID); stored != 0 { - t.Errorf("find-node fails: non-existing object: %v", stored) - } - if err := db.updateFindFails(node.ID, num); err != nil { - t.Errorf("find-node fails: failed to update: %v", err) - } - if stored := db.findFails(node.ID); stored != num { - t.Errorf("find-node fails: value mismatch: have %v, want %v", stored, num) - } - // Check fetch/store operations on an actual node object - if stored := db.node(node.ID); stored != nil { - t.Errorf("node: non-existing object: %v", stored) - } - if err := db.updateNode(node); err != nil { - t.Errorf("node: failed to update: %v", err) - } - if stored := db.node(node.ID); stored == nil { - t.Errorf("node: not found") - } else if !reflect.DeepEqual(stored, node) { - t.Errorf("node: data mismatch: have %v, want %v", stored, node) - } -} - -var nodeDBSeedQueryNodes = []struct { - node *Node - pong time.Time -}{ - // This one should not be in the result set because its last - // pong time is too far in the past. - { - node: NewNode( - MustHexID("0x84d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - net.IP{127, 0, 0, 3}, - 30303, - 30303, - ), - pong: time.Now().Add(-3 * time.Hour), - }, - // This one shouldn't be in the result set because its - // nodeID is the local node's ID. - { - node: NewNode( - MustHexID("0x57d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - net.IP{127, 0, 0, 3}, - 30303, - 30303, - ), - pong: time.Now().Add(-4 * time.Second), - }, - - // These should be in the result set. - { - node: NewNode( - MustHexID("0x22d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - net.IP{127, 0, 0, 1}, - 30303, - 30303, - ), - pong: time.Now().Add(-2 * time.Second), - }, - { - node: NewNode( - MustHexID("0x44d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - net.IP{127, 0, 0, 2}, - 30303, - 30303, - ), - pong: time.Now().Add(-3 * time.Second), - }, - { - node: NewNode( - MustHexID("0xe2d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - net.IP{127, 0, 0, 3}, - 30303, - 30303, - ), - pong: time.Now().Add(-1 * time.Second), - }, -} - -func TestNodeDBSeedQuery(t *testing.T) { - db, _ := newNodeDB("", Version, nodeDBSeedQueryNodes[1].node.ID) - defer db.close() - - // Insert a batch of nodes for querying - for i, seed := range nodeDBSeedQueryNodes { - if err := db.updateNode(seed.node); err != nil { - t.Fatalf("node %d: failed to insert: %v", i, err) - } - if err := db.updateLastPong(seed.node.ID, seed.pong); err != nil { - t.Fatalf("node %d: failed to insert lastPong: %v", i, err) - } - } - - // Retrieve the entire batch and check for duplicates - seeds := db.querySeeds(len(nodeDBSeedQueryNodes)*2, time.Hour) - have := make(map[NodeID]struct{}) - for _, seed := range seeds { - have[seed.ID] = struct{}{} - } - want := make(map[NodeID]struct{}) - for _, seed := range nodeDBSeedQueryNodes[2:] { - want[seed.node.ID] = struct{}{} - } - if len(seeds) != len(want) { - t.Errorf("seed count mismatch: have %v, want %v", len(seeds), len(want)) - } - for id := range have { - if _, ok := want[id]; !ok { - t.Errorf("extra seed: %v", id) - } - } - for id := range want { - if _, ok := have[id]; !ok { - t.Errorf("missing seed: %v", id) - } - } -} - -func TestNodeDBPersistency(t *testing.T) { - root, err := ioutil.TempDir("", "nodedb-") - if err != nil { - t.Fatalf("failed to create temporary data folder: %v", err) - } - defer os.RemoveAll(root) - - var ( - testKey = []byte("somekey") - testInt = int64(314) - ) - - // Create a persistent database and store some values - db, err := newNodeDB(filepath.Join(root, "database"), Version, NodeID{}) - if err != nil { - t.Fatalf("failed to create persistent database: %v", err) - } - if err := db.storeInt64(testKey, testInt); err != nil { - t.Fatalf("failed to store value: %v.", err) - } - db.close() - - // Reopen the database and check the value - db, err = newNodeDB(filepath.Join(root, "database"), Version, NodeID{}) - if err != nil { - t.Fatalf("failed to open persistent database: %v", err) - } - if val := db.fetchInt64(testKey); val != testInt { - t.Fatalf("value mismatch: have %v, want %v", val, testInt) - } - db.close() - - // Change the database version and check flush - db, err = newNodeDB(filepath.Join(root, "database"), Version+1, NodeID{}) - if err != nil { - t.Fatalf("failed to open persistent database: %v", err) - } - if val := db.fetchInt64(testKey); val != 0 { - t.Fatalf("value mismatch: have %v, want %v", val, 0) - } - db.close() -} - -var nodeDBExpirationNodes = []struct { - node *Node - pong time.Time - exp bool -}{ - { - node: NewNode( - MustHexID("0x01d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - net.IP{127, 0, 0, 1}, - 30303, - 30303, - ), - pong: time.Now().Add(-nodeDBNodeExpiration + time.Minute), - exp: false, - }, { - node: NewNode( - MustHexID("0x02d9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - net.IP{127, 0, 0, 2}, - 30303, - 30303, - ), - pong: time.Now().Add(-nodeDBNodeExpiration - time.Minute), - exp: true, - }, -} - -func TestNodeDBExpiration(t *testing.T) { - db, _ := newNodeDB("", Version, NodeID{}) - defer db.close() - - // Add all the test nodes and set their last pong time - for i, seed := range nodeDBExpirationNodes { - if err := db.updateNode(seed.node); err != nil { - t.Fatalf("node %d: failed to insert: %v", i, err) - } - if err := db.updateLastPong(seed.node.ID, seed.pong); err != nil { - t.Fatalf("node %d: failed to update pong: %v", i, err) - } - } - // Expire some of them, and check the rest - if err := db.expireNodes(); err != nil { - t.Fatalf("failed to expire nodes: %v", err) - } - for i, seed := range nodeDBExpirationNodes { - node := db.node(seed.node.ID) - if (node == nil && !seed.exp) || (node != nil && seed.exp) { - t.Errorf("node %d: expiration mismatch: have %v, want %v", i, node, seed.exp) - } - } -} - -func TestNodeDBSelfExpiration(t *testing.T) { - // Find a node in the tests that shouldn't expire, and assign it as self - var self NodeID - for _, node := range nodeDBExpirationNodes { - if !node.exp { - self = node.node.ID - break - } - } - db, _ := newNodeDB("", Version, self) - defer db.close() - - // Add all the test nodes and set their last pong time - for i, seed := range nodeDBExpirationNodes { - if err := db.updateNode(seed.node); err != nil { - t.Fatalf("node %d: failed to insert: %v", i, err) - } - if err := db.updateLastPong(seed.node.ID, seed.pong); err != nil { - t.Fatalf("node %d: failed to update pong: %v", i, err) - } - } - // Expire the nodes and make sure self has been evacuated too - if err := db.expireNodes(); err != nil { - t.Fatalf("failed to expire nodes: %v", err) - } - node := db.node(self) - if node != nil { - t.Errorf("self not evacuated") - } -} diff --git a/p2p/discv5/net.go b/p2p/discv5/net.go deleted file mode 100644 index 53e00a3881..0000000000 --- a/p2p/discv5/net.go +++ /dev/null @@ -1,1269 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discv5 - -import ( - "bytes" - "crypto/ecdsa" - "errors" - "fmt" - "net" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/netutil" - "github.com/ethereum/go-ethereum/rlp" - "golang.org/x/crypto/sha3" -) - -var ( - errInvalidEvent = errors.New("invalid in current state") - errNoQuery = errors.New("no pending query") -) - -const ( - autoRefreshInterval = 1 * time.Hour - bucketRefreshInterval = 1 * time.Minute - seedCount = 30 - seedMaxAge = 5 * 24 * time.Hour - lowPort = 1024 -) - -const testTopic = "foo" - -const ( - printTestImgLogs = false -) - -// Network manages the table and all protocol interaction. -type Network struct { - db *nodeDB // database of known nodes - conn transport - netrestrict *netutil.Netlist - - closed chan struct{} // closed when loop is done - closeReq chan struct{} // 'request to close' - refreshReq chan []*Node // lookups ask for refresh on this channel - refreshResp chan (<-chan struct{}) // ...and get the channel to block on from this one - read chan ingressPacket // ingress packets arrive here - timeout chan timeoutEvent - queryReq chan *findnodeQuery // lookups submit findnode queries on this channel - tableOpReq chan func() - tableOpResp chan struct{} - topicRegisterReq chan topicRegisterReq - topicSearchReq chan topicSearchReq - - // State of the main loop. - tab *Table - topictab *topicTable - ticketStore *ticketStore - nursery []*Node - nodes map[NodeID]*Node // tracks active nodes with state != known - timeoutTimers map[timeoutEvent]*time.Timer -} - -// transport is implemented by the UDP transport. -// it is an interface so we can test without opening lots of UDP -// sockets and without generating a private key. -type transport interface { - sendPing(remote *Node, remoteAddr *net.UDPAddr, topics []Topic) (hash []byte) - sendNeighbours(remote *Node, nodes []*Node) - sendFindnodeHash(remote *Node, target common.Hash) - sendTopicRegister(remote *Node, topics []Topic, topicIdx int, pong []byte) - sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node) - - send(remote *Node, ptype nodeEvent, p interface{}) (hash []byte) - - localAddr() *net.UDPAddr - Close() -} - -type findnodeQuery struct { - remote *Node - target common.Hash - reply chan<- []*Node -} - -type topicRegisterReq struct { - add bool - topic Topic -} - -type topicSearchReq struct { - topic Topic - found chan<- *Node - lookup chan<- bool - delay time.Duration -} - -type topicSearchResult struct { - target lookupInfo - nodes []*Node -} - -type timeoutEvent struct { - ev nodeEvent - node *Node -} - -func newNetwork(conn transport, ourPubkey ecdsa.PublicKey, dbPath string, netrestrict *netutil.Netlist) (*Network, error) { - ourID := PubkeyID(&ourPubkey) - - var db *nodeDB - if dbPath != "" { - var err error - if db, err = newNodeDB(dbPath, Version, ourID); err != nil { - return nil, err - } - } - - tab := newTable(ourID, conn.localAddr()) - net := &Network{ - db: db, - conn: conn, - netrestrict: netrestrict, - tab: tab, - topictab: newTopicTable(db, tab.self), - ticketStore: newTicketStore(), - refreshReq: make(chan []*Node), - refreshResp: make(chan (<-chan struct{})), - closed: make(chan struct{}), - closeReq: make(chan struct{}), - read: make(chan ingressPacket, 100), - timeout: make(chan timeoutEvent), - timeoutTimers: make(map[timeoutEvent]*time.Timer), - tableOpReq: make(chan func()), - tableOpResp: make(chan struct{}), - queryReq: make(chan *findnodeQuery), - topicRegisterReq: make(chan topicRegisterReq), - topicSearchReq: make(chan topicSearchReq), - nodes: make(map[NodeID]*Node), - } - go net.loop() - return net, nil -} - -// Close terminates the network listener and flushes the node database. -func (net *Network) Close() { - net.conn.Close() - select { - case <-net.closed: - case net.closeReq <- struct{}{}: - <-net.closed - } -} - -// Self returns the local node. -// The returned node should not be modified by the caller. -func (net *Network) Self() *Node { - return net.tab.self -} - -// ReadRandomNodes fills the given slice with random nodes from the -// table. It will not write the same node more than once. The nodes in -// the slice are copies and can be modified by the caller. -func (net *Network) ReadRandomNodes(buf []*Node) (n int) { - net.reqTableOp(func() { n = net.tab.readRandomNodes(buf) }) - return n -} - -// SetFallbackNodes sets the initial points of contact. These nodes -// are used to connect to the network if the table is empty and there -// are no known nodes in the database. -func (net *Network) SetFallbackNodes(nodes []*Node) error { - nursery := make([]*Node, 0, len(nodes)) - for _, n := range nodes { - if err := n.validateComplete(); err != nil { - return fmt.Errorf("bad bootstrap/fallback node %q (%v)", n, err) - } - // Recompute cpy.sha because the node might not have been - // created by NewNode or ParseNode. - cpy := *n - cpy.sha = crypto.Keccak256Hash(n.ID[:]) - nursery = append(nursery, &cpy) - } - net.reqRefresh(nursery) - return nil -} - -// Resolve searches for a specific node with the given ID. -// It returns nil if the node could not be found. -func (net *Network) Resolve(targetID NodeID) *Node { - result := net.lookup(crypto.Keccak256Hash(targetID[:]), true) - for _, n := range result { - if n.ID == targetID { - return n - } - } - return nil -} - -// Lookup performs a network search for nodes close -// to the given target. It approaches the target by querying -// nodes that are closer to it on each iteration. -// The given target does not need to be an actual node -// identifier. -// -// The local node may be included in the result. -func (net *Network) Lookup(targetID NodeID) []*Node { - return net.lookup(crypto.Keccak256Hash(targetID[:]), false) -} - -func (net *Network) lookup(target common.Hash, stopOnMatch bool) []*Node { - var ( - asked = make(map[NodeID]bool) - seen = make(map[NodeID]bool) - reply = make(chan []*Node, alpha) - result = nodesByDistance{target: target} - pendingQueries = 0 - ) - // Get initial answers from the local node. - result.push(net.tab.self, bucketSize) - for { - // Ask the α closest nodes that we haven't asked yet. - for i := 0; i < len(result.entries) && pendingQueries < alpha; i++ { - n := result.entries[i] - if !asked[n.ID] { - asked[n.ID] = true - pendingQueries++ - net.reqQueryFindnode(n, target, reply) - } - } - if pendingQueries == 0 { - // We have asked all closest nodes, stop the search. - break - } - // Wait for the next reply. - select { - case nodes := <-reply: - for _, n := range nodes { - if n != nil && !seen[n.ID] { - seen[n.ID] = true - result.push(n, bucketSize) - if stopOnMatch && n.sha == target { - return result.entries - } - } - } - pendingQueries-- - case <-time.After(respTimeout): - // forget all pending requests, start new ones - pendingQueries = 0 - reply = make(chan []*Node, alpha) - } - } - return result.entries -} - -func (net *Network) RegisterTopic(topic Topic, stop <-chan struct{}) { - select { - case net.topicRegisterReq <- topicRegisterReq{true, topic}: - case <-net.closed: - return - } - select { - case <-net.closed: - case <-stop: - select { - case net.topicRegisterReq <- topicRegisterReq{false, topic}: - case <-net.closed: - } - } -} - -func (net *Network) SearchTopic(topic Topic, setPeriod <-chan time.Duration, found chan<- *Node, lookup chan<- bool) { - for { - select { - case <-net.closed: - return - case delay, ok := <-setPeriod: - select { - case net.topicSearchReq <- topicSearchReq{topic: topic, found: found, lookup: lookup, delay: delay}: - case <-net.closed: - return - } - if !ok { - return - } - } - } -} - -func (net *Network) reqRefresh(nursery []*Node) <-chan struct{} { - select { - case net.refreshReq <- nursery: - return <-net.refreshResp - case <-net.closed: - return net.closed - } -} - -func (net *Network) reqQueryFindnode(n *Node, target common.Hash, reply chan []*Node) bool { - q := &findnodeQuery{remote: n, target: target, reply: reply} - select { - case net.queryReq <- q: - return true - case <-net.closed: - return false - } -} - -func (net *Network) reqReadPacket(pkt ingressPacket) { - select { - case net.read <- pkt: - case <-net.closed: - } -} - -func (net *Network) reqTableOp(f func()) (called bool) { - select { - case net.tableOpReq <- f: - <-net.tableOpResp - return true - case <-net.closed: - return false - } -} - -// TODO: external address handling. - -type topicSearchInfo struct { - lookupChn chan<- bool - period time.Duration -} - -const maxSearchCount = 5 - -func (net *Network) loop() { - var ( - refreshTimer = time.NewTicker(autoRefreshInterval) - bucketRefreshTimer = time.NewTimer(bucketRefreshInterval) - refreshDone chan struct{} // closed when the 'refresh' lookup has ended - ) - defer refreshTimer.Stop() - defer bucketRefreshTimer.Stop() - - // Tracking the next ticket to register. - var ( - nextTicket *ticketRef - nextRegisterTimer *time.Timer - nextRegisterTime <-chan time.Time - ) - defer func() { - if nextRegisterTimer != nil { - nextRegisterTimer.Stop() - } - }() - resetNextTicket := func() { - ticket, timeout := net.ticketStore.nextFilteredTicket() - if nextTicket != ticket { - nextTicket = ticket - if nextRegisterTimer != nil { - nextRegisterTimer.Stop() - nextRegisterTime = nil - } - if ticket != nil { - nextRegisterTimer = time.NewTimer(timeout) - nextRegisterTime = nextRegisterTimer.C - } - } - } - - // Tracking registration and search lookups. - var ( - topicRegisterLookupTarget lookupInfo - topicRegisterLookupDone chan []*Node - topicRegisterLookupTick = time.NewTimer(0) - searchReqWhenRefreshDone []topicSearchReq - searchInfo = make(map[Topic]topicSearchInfo) - activeSearchCount int - ) - defer topicRegisterLookupTick.Stop() - topicSearchLookupDone := make(chan topicSearchResult, 100) - topicSearch := make(chan Topic, 100) - <-topicRegisterLookupTick.C - - statsDump := time.NewTicker(10 * time.Second) - defer statsDump.Stop() - -loop: - for { - resetNextTicket() - - select { - case <-net.closeReq: - log.Trace("<-net.closeReq") - break loop - - // Ingress packet handling. - case pkt := <-net.read: - //fmt.Println("read", pkt.ev) - log.Trace("<-net.read") - n := net.internNode(&pkt) - prestate := n.state - status := "ok" - if err := net.handle(n, pkt.ev, &pkt); err != nil { - status = err.Error() - } - log.Trace("", "msg", log.Lazy{Fn: func() string { - return fmt.Sprintf("<<< (%d) %v from %x@%v: %v -> %v (%v)", - net.tab.count, pkt.ev, pkt.remoteID[:8], pkt.remoteAddr, prestate, n.state, status) - }}) - // TODO: persist state if n.state goes >= known, delete if it goes <= known - - // State transition timeouts. - case timeout := <-net.timeout: - log.Trace("<-net.timeout") - if net.timeoutTimers[timeout] == nil { - // Stale timer (was aborted). - continue - } - delete(net.timeoutTimers, timeout) - prestate := timeout.node.state - status := "ok" - if err := net.handle(timeout.node, timeout.ev, nil); err != nil { - status = err.Error() - } - log.Trace("", "msg", log.Lazy{Fn: func() string { - return fmt.Sprintf("--- (%d) %v for %x@%v: %v -> %v (%v)", - net.tab.count, timeout.ev, timeout.node.ID[:8], timeout.node.addr(), prestate, timeout.node.state, status) - }}) - - // Querying. - case q := <-net.queryReq: - log.Trace("<-net.queryReq") - if !q.start(net) { - q.remote.deferQuery(q) - } - - // Interacting with the table. - case f := <-net.tableOpReq: - log.Trace("<-net.tableOpReq") - f() - net.tableOpResp <- struct{}{} - - // Topic registration stuff. - case req := <-net.topicRegisterReq: - log.Trace("<-net.topicRegisterReq") - if !req.add { - net.ticketStore.removeRegisterTopic(req.topic) - continue - } - net.ticketStore.addTopic(req.topic, true) - // If we're currently waiting idle (nothing to look up), give the ticket store a - // chance to start it sooner. This should speed up convergence of the radius - // determination for new topics. - // if topicRegisterLookupDone == nil { - if topicRegisterLookupTarget.target == (common.Hash{}) { - log.Trace("topicRegisterLookupTarget == null") - if topicRegisterLookupTick.Stop() { - <-topicRegisterLookupTick.C - } - target, delay := net.ticketStore.nextRegisterLookup() - topicRegisterLookupTarget = target - topicRegisterLookupTick.Reset(delay) - } - - case nodes := <-topicRegisterLookupDone: - log.Trace("<-topicRegisterLookupDone") - net.ticketStore.registerLookupDone(topicRegisterLookupTarget, nodes, func(n *Node) []byte { - net.ping(n, n.addr()) - return n.pingEcho - }) - target, delay := net.ticketStore.nextRegisterLookup() - topicRegisterLookupTarget = target - topicRegisterLookupTick.Reset(delay) - topicRegisterLookupDone = nil - - case <-topicRegisterLookupTick.C: - log.Trace("<-topicRegisterLookupTick") - if (topicRegisterLookupTarget.target == common.Hash{}) { - target, delay := net.ticketStore.nextRegisterLookup() - topicRegisterLookupTarget = target - topicRegisterLookupTick.Reset(delay) - topicRegisterLookupDone = nil - } else { - topicRegisterLookupDone = make(chan []*Node) - target := topicRegisterLookupTarget.target - go func() { topicRegisterLookupDone <- net.lookup(target, false) }() - } - - case <-nextRegisterTime: - log.Trace("<-nextRegisterTime") - net.ticketStore.ticketRegistered(*nextTicket) - //fmt.Println("sendTopicRegister", nextTicket.t.node.addr().String(), nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong) - net.conn.sendTopicRegister(nextTicket.t.node, nextTicket.t.topics, nextTicket.idx, nextTicket.t.pong) - - case req := <-net.topicSearchReq: - if refreshDone == nil { - log.Trace("<-net.topicSearchReq") - info, ok := searchInfo[req.topic] - if ok { - if req.delay == time.Duration(0) { - delete(searchInfo, req.topic) - net.ticketStore.removeSearchTopic(req.topic) - } else { - info.period = req.delay - searchInfo[req.topic] = info - } - continue - } - if req.delay != time.Duration(0) { - var info topicSearchInfo - info.period = req.delay - info.lookupChn = req.lookup - searchInfo[req.topic] = info - net.ticketStore.addSearchTopic(req.topic, req.found) - topicSearch <- req.topic - } - } else { - searchReqWhenRefreshDone = append(searchReqWhenRefreshDone, req) - } - - case topic := <-topicSearch: - if activeSearchCount < maxSearchCount { - activeSearchCount++ - target := net.ticketStore.nextSearchLookup(topic) - go func() { - nodes := net.lookup(target.target, false) - topicSearchLookupDone <- topicSearchResult{target: target, nodes: nodes} - }() - } - period := searchInfo[topic].period - if period != time.Duration(0) { - go func() { - time.Sleep(period) - topicSearch <- topic - }() - } - - case res := <-topicSearchLookupDone: - activeSearchCount-- - if lookupChn := searchInfo[res.target.topic].lookupChn; lookupChn != nil { - lookupChn <- net.ticketStore.radius[res.target.topic].converged - } - net.ticketStore.searchLookupDone(res.target, res.nodes, func(n *Node, topic Topic) []byte { - if n.state != nil && n.state.canQuery { - return net.conn.send(n, topicQueryPacket, topicQuery{Topic: topic}) // TODO: set expiration - } - if n.state == unknown { - net.ping(n, n.addr()) - } - return nil - }) - - case <-statsDump.C: - log.Trace("<-statsDump.C") - /*r, ok := net.ticketStore.radius[testTopic] - if !ok { - fmt.Printf("(%x) no radius @ %v\n", net.tab.self.ID[:8], time.Now()) - } else { - topics := len(net.ticketStore.tickets) - tickets := len(net.ticketStore.nodes) - rad := r.radius / (maxRadius/10000+1) - fmt.Printf("(%x) topics:%d radius:%d tickets:%d @ %v\n", net.tab.self.ID[:8], topics, rad, tickets, time.Now()) - }*/ - - tm := mclock.Now() - for topic, r := range net.ticketStore.radius { - if printTestImgLogs { - rad := r.radius / (maxRadius/1000000 + 1) - minrad := r.minRadius / (maxRadius/1000000 + 1) - fmt.Printf("*R %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], rad) - fmt.Printf("*MR %d %v %016x %v\n", tm/1000000, topic, net.tab.self.sha[:8], minrad) - } - } - for topic, t := range net.topictab.topics { - wp := t.wcl.nextWaitPeriod(tm) - if printTestImgLogs { - fmt.Printf("*W %d %v %016x %d\n", tm/1000000, topic, net.tab.self.sha[:8], wp/1000000) - } - } - - // Periodic / lookup-initiated bucket refresh. - case <-refreshTimer.C: - log.Trace("<-refreshTimer.C") - // TODO: ideally we would start the refresh timer after - // fallback nodes have been set for the first time. - if refreshDone == nil { - refreshDone = make(chan struct{}) - net.refresh(refreshDone) - } - case <-bucketRefreshTimer.C: - target := net.tab.chooseBucketRefreshTarget() - go func() { - net.lookup(target, false) - bucketRefreshTimer.Reset(bucketRefreshInterval) - }() - case newNursery := <-net.refreshReq: - log.Trace("<-net.refreshReq") - if newNursery != nil { - net.nursery = newNursery - } - if refreshDone == nil { - refreshDone = make(chan struct{}) - net.refresh(refreshDone) - } - net.refreshResp <- refreshDone - case <-refreshDone: - log.Trace("<-net.refreshDone", "table size", net.tab.count) - if net.tab.count != 0 { - refreshDone = nil - list := searchReqWhenRefreshDone - searchReqWhenRefreshDone = nil - go func() { - for _, req := range list { - net.topicSearchReq <- req - } - }() - } else { - refreshDone = make(chan struct{}) - net.refresh(refreshDone) - } - } - } - log.Trace("loop stopped") - - log.Debug("shutting down") - if net.conn != nil { - net.conn.Close() - } - // TODO: wait for pending refresh. - // if refreshDone != nil { - // <-refreshResults - // } - // Cancel all pending timeouts. - for _, timer := range net.timeoutTimers { - timer.Stop() - } - if net.db != nil { - net.db.close() - } - close(net.closed) -} - -// Everything below runs on the Network.loop goroutine -// and can modify Node, Table and Network at any time without locking. - -func (net *Network) refresh(done chan<- struct{}) { - var seeds []*Node - if net.db != nil { - seeds = net.db.querySeeds(seedCount, seedMaxAge) - } - if len(seeds) == 0 { - seeds = net.nursery - } - if len(seeds) == 0 { - log.Trace("no seed nodes found") - time.AfterFunc(time.Second*10, func() { close(done) }) - return - } - for _, n := range seeds { - log.Debug("", "msg", log.Lazy{Fn: func() string { - var age string - if net.db != nil { - age = time.Since(net.db.lastPong(n.ID)).String() - } else { - age = "unknown" - } - return fmt.Sprintf("seed node (age %s): %v", age, n) - }}) - n = net.internNodeFromDB(n) - if n.state == unknown { - net.transition(n, verifyinit) - } - // Force-add the seed node so Lookup does something. - // It will be deleted again if verification fails. - net.tab.add(n) - } - // Start self lookup to fill up the buckets. - go func() { - net.Lookup(net.tab.self.ID) - close(done) - }() -} - -// Node Interning. - -func (net *Network) internNode(pkt *ingressPacket) *Node { - if n := net.nodes[pkt.remoteID]; n != nil { - n.IP = pkt.remoteAddr.IP - n.UDP = uint16(pkt.remoteAddr.Port) - n.TCP = uint16(pkt.remoteAddr.Port) - return n - } - n := NewNode(pkt.remoteID, pkt.remoteAddr.IP, uint16(pkt.remoteAddr.Port), uint16(pkt.remoteAddr.Port)) - n.state = unknown - net.nodes[pkt.remoteID] = n - return n -} - -func (net *Network) internNodeFromDB(dbn *Node) *Node { - if n := net.nodes[dbn.ID]; n != nil { - return n - } - n := NewNode(dbn.ID, dbn.IP, dbn.UDP, dbn.TCP) - n.state = unknown - net.nodes[n.ID] = n - return n -} - -func (net *Network) internNodeFromNeighbours(sender *net.UDPAddr, rn rpcNode) (n *Node, err error) { - if rn.ID == net.tab.self.ID { - return nil, errors.New("is self") - } - if rn.UDP <= lowPort { - return nil, errors.New("low port") - } - n = net.nodes[rn.ID] - if n == nil { - // We haven't seen this node before. - n, err = nodeFromRPC(sender, rn) - if net.netrestrict != nil && !net.netrestrict.Contains(n.IP) { - return n, errors.New("not contained in netrestrict whitelist") - } - if err == nil { - n.state = unknown - net.nodes[n.ID] = n - } - return n, err - } - if !n.IP.Equal(rn.IP) || n.UDP != rn.UDP || n.TCP != rn.TCP { - if n.state == known { - // reject address change if node is known by us - err = fmt.Errorf("metadata mismatch: got %v, want %v", rn, n) - } else { - // accept otherwise; this will be handled nicer with signed ENRs - n.IP = rn.IP - n.UDP = rn.UDP - n.TCP = rn.TCP - } - } - return n, err -} - -// nodeNetGuts is embedded in Node and contains fields. -type nodeNetGuts struct { - // This is a cached copy of sha3(ID) which is used for node - // distance calculations. This is part of Node in order to make it - // possible to write tests that need a node at a certain distance. - // In those tests, the content of sha will not actually correspond - // with ID. - sha common.Hash - - // State machine fields. Access to these fields - // is restricted to the Network.loop goroutine. - state *nodeState - pingEcho []byte // hash of last ping sent by us - pingTopics []Topic // topic set sent by us in last ping - deferredQueries []*findnodeQuery // queries that can't be sent yet - pendingNeighbours *findnodeQuery // current query, waiting for reply - queryTimeouts int -} - -func (n *nodeNetGuts) deferQuery(q *findnodeQuery) { - n.deferredQueries = append(n.deferredQueries, q) -} - -func (n *nodeNetGuts) startNextQuery(net *Network) { - if len(n.deferredQueries) == 0 { - return - } - nextq := n.deferredQueries[0] - if nextq.start(net) { - n.deferredQueries = append(n.deferredQueries[:0], n.deferredQueries[1:]...) - } -} - -func (q *findnodeQuery) start(net *Network) bool { - // Satisfy queries against the local node directly. - if q.remote == net.tab.self { - closest := net.tab.closest(q.target, bucketSize) - q.reply <- closest.entries - return true - } - if q.remote.state.canQuery && q.remote.pendingNeighbours == nil { - net.conn.sendFindnodeHash(q.remote, q.target) - net.timedEvent(respTimeout, q.remote, neighboursTimeout) - q.remote.pendingNeighbours = q - return true - } - // If the node is not known yet, it won't accept queries. - // Initiate the transition to known. - // The request will be sent later when the node reaches known state. - if q.remote.state == unknown { - net.transition(q.remote, verifyinit) - } - return false -} - -// Node Events (the input to the state machine). - -type nodeEvent uint - -//go:generate stringer -type=nodeEvent - -const ( - - // Packet type events. - // These correspond to packet types in the UDP protocol. - pingPacket = iota + 1 - pongPacket - findnodePacket - neighborsPacket - findnodeHashPacket - topicRegisterPacket - topicQueryPacket - topicNodesPacket - - // Non-packet events. - // Event values in this category are allocated outside - // the packet type range (packet types are encoded as a single byte). - pongTimeout nodeEvent = iota + 256 - pingTimeout - neighboursTimeout -) - -// Node State Machine. - -type nodeState struct { - name string - handle func(*Network, *Node, nodeEvent, *ingressPacket) (next *nodeState, err error) - enter func(*Network, *Node) - canQuery bool -} - -func (s *nodeState) String() string { - return s.name -} - -var ( - unknown *nodeState - verifyinit *nodeState - verifywait *nodeState - remoteverifywait *nodeState - known *nodeState - contested *nodeState - unresponsive *nodeState -) - -func init() { - unknown = &nodeState{ - name: "unknown", - enter: func(net *Network, n *Node) { - net.tab.delete(n) - n.pingEcho = nil - // Abort active queries. - for _, q := range n.deferredQueries { - q.reply <- nil - } - n.deferredQueries = nil - if n.pendingNeighbours != nil { - n.pendingNeighbours.reply <- nil - n.pendingNeighbours = nil - } - n.queryTimeouts = 0 - }, - handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { - switch ev { - case pingPacket: - net.handlePing(n, pkt) - net.ping(n, pkt.remoteAddr) - return verifywait, nil - default: - return unknown, errInvalidEvent - } - }, - } - - verifyinit = &nodeState{ - name: "verifyinit", - enter: func(net *Network, n *Node) { - net.ping(n, n.addr()) - }, - handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { - switch ev { - case pingPacket: - net.handlePing(n, pkt) - return verifywait, nil - case pongPacket: - err := net.handleKnownPong(n, pkt) - return remoteverifywait, err - case pongTimeout: - return unknown, nil - default: - return verifyinit, errInvalidEvent - } - }, - } - - verifywait = &nodeState{ - name: "verifywait", - handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { - switch ev { - case pingPacket: - net.handlePing(n, pkt) - return verifywait, nil - case pongPacket: - err := net.handleKnownPong(n, pkt) - return known, err - case pongTimeout: - return unknown, nil - default: - return verifywait, errInvalidEvent - } - }, - } - - remoteverifywait = &nodeState{ - name: "remoteverifywait", - enter: func(net *Network, n *Node) { - net.timedEvent(respTimeout, n, pingTimeout) - }, - handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { - switch ev { - case pingPacket: - net.handlePing(n, pkt) - return remoteverifywait, nil - case pingTimeout: - return known, nil - default: - return remoteverifywait, errInvalidEvent - } - }, - } - - known = &nodeState{ - name: "known", - canQuery: true, - enter: func(net *Network, n *Node) { - n.queryTimeouts = 0 - n.startNextQuery(net) - // Insert into the table and start revalidation of the last node - // in the bucket if it is full. - last := net.tab.add(n) - if last != nil && last.state == known { - // TODO: do this asynchronously - net.transition(last, contested) - } - }, - handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { - switch ev { - case pingPacket: - net.handlePing(n, pkt) - return known, nil - case pongPacket: - err := net.handleKnownPong(n, pkt) - return known, err - default: - return net.handleQueryEvent(n, ev, pkt) - } - }, - } - - contested = &nodeState{ - name: "contested", - canQuery: true, - enter: func(net *Network, n *Node) { - net.ping(n, n.addr()) - }, - handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { - switch ev { - case pongPacket: - // Node is still alive. - err := net.handleKnownPong(n, pkt) - return known, err - case pongTimeout: - net.tab.deleteReplace(n) - return unresponsive, nil - case pingPacket: - net.handlePing(n, pkt) - return contested, nil - default: - return net.handleQueryEvent(n, ev, pkt) - } - }, - } - - unresponsive = &nodeState{ - name: "unresponsive", - canQuery: true, - handle: func(net *Network, n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { - switch ev { - case pingPacket: - net.handlePing(n, pkt) - return known, nil - case pongPacket: - err := net.handleKnownPong(n, pkt) - return known, err - default: - return net.handleQueryEvent(n, ev, pkt) - } - }, - } -} - -// handle processes packets sent by n and events related to n. -func (net *Network) handle(n *Node, ev nodeEvent, pkt *ingressPacket) error { - //fmt.Println("handle", n.addr().String(), n.state, ev) - if pkt != nil { - if err := net.checkPacket(n, ev, pkt); err != nil { - //fmt.Println("check err:", err) - return err - } - // Start the background expiration goroutine after the first - // successful communication. Subsequent calls have no effect if it - // is already running. We do this here instead of somewhere else - // so that the search for seed nodes also considers older nodes - // that would otherwise be removed by the expirer. - if net.db != nil { - net.db.ensureExpirer() - } - } - if ev == pongTimeout { - n.pingEcho = nil // clean up if pongtimeout - } - if n.state == nil { - n.state = unknown //??? - } - next, err := n.state.handle(net, n, ev, pkt) - net.transition(n, next) - //fmt.Println("new state:", n.state) - return err -} - -func (net *Network) checkPacket(n *Node, ev nodeEvent, pkt *ingressPacket) error { - // Replay prevention checks. - switch ev { - case pingPacket, findnodeHashPacket, neighborsPacket: - // TODO: check date is > last date seen - // TODO: check ping version - case pongPacket: - if !bytes.Equal(pkt.data.(*pong).ReplyTok, n.pingEcho) { - // fmt.Println("pong reply token mismatch") - return fmt.Errorf("pong reply token mismatch") - } - n.pingEcho = nil - } - // Address validation. - // TODO: Ideally we would do the following: - // - reject all packets with wrong address except ping. - // - for ping with new address, transition to verifywait but keep the - // previous node (with old address) around. if the new one reaches known, - // swap it out. - return nil -} - -func (net *Network) transition(n *Node, next *nodeState) { - if n.state != next { - n.state = next - if next.enter != nil { - next.enter(net, n) - } - } - - // TODO: persist/unpersist node -} - -func (net *Network) timedEvent(d time.Duration, n *Node, ev nodeEvent) { - timeout := timeoutEvent{ev, n} - net.timeoutTimers[timeout] = time.AfterFunc(d, func() { - select { - case net.timeout <- timeout: - case <-net.closed: - } - }) -} - -func (net *Network) abortTimedEvent(n *Node, ev nodeEvent) { - timer := net.timeoutTimers[timeoutEvent{ev, n}] - if timer != nil { - timer.Stop() - delete(net.timeoutTimers, timeoutEvent{ev, n}) - } -} - -func (net *Network) ping(n *Node, addr *net.UDPAddr) { - //fmt.Println("ping", n.addr().String(), n.ID.String(), n.sha.Hex()) - if n.pingEcho != nil || n.ID == net.tab.self.ID { - //fmt.Println(" not sent") - return - } - log.Trace("Pinging remote node", "node", n.ID) - n.pingTopics = net.ticketStore.regTopicSet() - n.pingEcho = net.conn.sendPing(n, addr, n.pingTopics) - net.timedEvent(respTimeout, n, pongTimeout) -} - -func (net *Network) handlePing(n *Node, pkt *ingressPacket) { - log.Trace("Handling remote ping", "node", n.ID) - ping := pkt.data.(*ping) - n.TCP = ping.From.TCP - t := net.topictab.getTicket(n, ping.Topics) - - pong := &pong{ - To: makeEndpoint(n.addr(), n.TCP), // TODO: maybe use known TCP port from DB - ReplyTok: pkt.hash, - Expiration: uint64(time.Now().Add(expiration).Unix()), - } - ticketToPong(t, pong) - net.conn.send(n, pongPacket, pong) -} - -func (net *Network) handleKnownPong(n *Node, pkt *ingressPacket) error { - log.Trace("Handling known pong", "node", n.ID) - net.abortTimedEvent(n, pongTimeout) - now := mclock.Now() - ticket, err := pongToTicket(now, n.pingTopics, n, pkt) - if err == nil { - // fmt.Printf("(%x) ticket: %+v\n", net.tab.self.ID[:8], pkt.data) - net.ticketStore.addTicket(now, pkt.data.(*pong).ReplyTok, ticket) - } else { - log.Trace("Failed to convert pong to ticket", "err", err) - } - n.pingEcho = nil - n.pingTopics = nil - return err -} - -func (net *Network) handleQueryEvent(n *Node, ev nodeEvent, pkt *ingressPacket) (*nodeState, error) { - switch ev { - case findnodePacket: - target := crypto.Keccak256Hash(pkt.data.(*findnode).Target[:]) - results := net.tab.closest(target, bucketSize).entries - net.conn.sendNeighbours(n, results) - return n.state, nil - case neighborsPacket: - err := net.handleNeighboursPacket(n, pkt) - return n.state, err - case neighboursTimeout: - if n.pendingNeighbours != nil { - n.pendingNeighbours.reply <- nil - n.pendingNeighbours = nil - } - n.queryTimeouts++ - if n.queryTimeouts > maxFindnodeFailures && n.state == known { - return contested, errors.New("too many timeouts") - } - return n.state, nil - - // v5 - - case findnodeHashPacket: - results := net.tab.closest(pkt.data.(*findnodeHash).Target, bucketSize).entries - net.conn.sendNeighbours(n, results) - return n.state, nil - case topicRegisterPacket: - //fmt.Println("got topicRegisterPacket") - regdata := pkt.data.(*topicRegister) - pong, err := net.checkTopicRegister(regdata) - if err != nil { - //fmt.Println(err) - return n.state, fmt.Errorf("bad waiting ticket: %v", err) - } - net.topictab.useTicket(n, pong.TicketSerial, regdata.Topics, int(regdata.Idx), pong.Expiration, pong.WaitPeriods) - return n.state, nil - case topicQueryPacket: - // TODO: handle expiration - topic := pkt.data.(*topicQuery).Topic - results := net.topictab.getEntries(topic) - if _, ok := net.ticketStore.tickets[topic]; ok { - results = append(results, net.tab.self) // we're not registering in our own table but if we're advertising, return ourselves too - } - if len(results) > 10 { - results = results[:10] - } - var hash common.Hash - copy(hash[:], pkt.hash) - net.conn.sendTopicNodes(n, hash, results) - return n.state, nil - case topicNodesPacket: - p := pkt.data.(*topicNodes) - if net.ticketStore.gotTopicNodes(n, p.Echo, p.Nodes) { - n.queryTimeouts++ - if n.queryTimeouts > maxFindnodeFailures && n.state == known { - return contested, errors.New("too many timeouts") - } - } - return n.state, nil - - default: - return n.state, errInvalidEvent - } -} - -func (net *Network) checkTopicRegister(data *topicRegister) (*pong, error) { - var pongpkt ingressPacket - if err := decodePacket(data.Pong, &pongpkt); err != nil { - return nil, err - } - if pongpkt.ev != pongPacket { - return nil, errors.New("is not pong packet") - } - if pongpkt.remoteID != net.tab.self.ID { - return nil, errors.New("not signed by us") - } - // check that we previously authorised all topics - // that the other side is trying to register. - if rlpHash(data.Topics) != pongpkt.data.(*pong).TopicHash { - return nil, errors.New("topic hash mismatch") - } - if data.Idx >= uint(len(data.Topics)) { - return nil, errors.New("topic index out of range") - } - return pongpkt.data.(*pong), nil -} - -func rlpHash(x interface{}) (h common.Hash) { - hw := sha3.NewLegacyKeccak256() - rlp.Encode(hw, x) - hw.Sum(h[:0]) - return h -} - -func (net *Network) handleNeighboursPacket(n *Node, pkt *ingressPacket) error { - if n.pendingNeighbours == nil { - return errNoQuery - } - net.abortTimedEvent(n, neighboursTimeout) - - req := pkt.data.(*neighbors) - nodes := make([]*Node, len(req.Nodes)) - for i, rn := range req.Nodes { - nn, err := net.internNodeFromNeighbours(pkt.remoteAddr, rn) - if err != nil { - log.Debug(fmt.Sprintf("invalid neighbour (%v) from %x@%v: %v", rn.IP, n.ID[:8], pkt.remoteAddr, err)) - continue - } - nodes[i] = nn - // Start validation of query results immediately. - // This fills the table quickly. - // TODO: generates way too many packets, maybe do it via queue. - if nn.state == unknown { - net.transition(nn, verifyinit) - } - } - // TODO: don't ignore second packet - n.pendingNeighbours.reply <- nodes - n.pendingNeighbours = nil - // Now that this query is done, start the next one. - n.startNextQuery(net) - return nil -} diff --git a/p2p/discv5/net_test.go b/p2p/discv5/net_test.go deleted file mode 100644 index 29321bc86f..0000000000 --- a/p2p/discv5/net_test.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discv5 - -import ( - "net" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -func TestNetwork_Lookup(t *testing.T) { - key, _ := crypto.GenerateKey() - network, err := newNetwork(lookupTestnet, key.PublicKey, "", nil) - if err != nil { - t.Fatal(err) - } - lookupTestnet.net = network - defer network.Close() - - // lookup on empty table returns no nodes - // if results := network.Lookup(lookupTestnet.target, false); len(results) > 0 { - // t.Fatalf("lookup on empty table returned %d results: %#v", len(results), results) - // } - // seed table with initial node (otherwise lookup will terminate immediately) - seeds := []*Node{NewNode(lookupTestnet.dists[256][0], net.IP{10, 0, 2, 99}, lowPort+256, 999)} - if err := network.SetFallbackNodes(seeds); err != nil { - t.Fatal(err) - } - time.Sleep(3 * time.Second) - - results := network.Lookup(lookupTestnet.target) - t.Logf("results:") - for _, e := range results { - t.Logf(" ld=%d, %x", logdist(lookupTestnet.targetSha, e.sha), e.sha[:]) - } - if len(results) != bucketSize { - t.Errorf("wrong number of results: got %d, want %d", len(results), bucketSize) - } - if hasDuplicates(results) { - t.Errorf("result set contains duplicate entries") - } - if !sortedByDistanceTo(lookupTestnet.targetSha, results) { - t.Errorf("result set not sorted by distance to target") - } - // TODO: check result nodes are actually closest -} - -// This is the test network for the Lookup test. -// The nodes were obtained by running testnet.mine with a random NodeID as target. -var lookupTestnet = &preminedTestnet{ - target: MustHexID("166aea4f556532c6d34e8b740e5d314af7e9ac0ca79833bd751d6b665f12dfd38ec563c363b32f02aef4a80b44fd3def94612d497b99cb5f17fd24de454927ec"), - targetSha: common.Hash{0x5c, 0x94, 0x4e, 0xe5, 0x1c, 0x5a, 0xe9, 0xf7, 0x2a, 0x95, 0xec, 0xcb, 0x8a, 0xed, 0x3, 0x74, 0xee, 0xcb, 0x51, 0x19, 0xd7, 0x20, 0xcb, 0xea, 0x68, 0x13, 0xe8, 0xe0, 0xd6, 0xad, 0x92, 0x61}, - dists: [257][]NodeID{ - 240: { - MustHexID("2001ad5e3e80c71b952161bc0186731cf5ffe942d24a79230a0555802296238e57ea7a32f5b6f18564eadc1c65389448481f8c9338df0a3dbd18f708cbc2cbcb"), - MustHexID("6ba3f4f57d084b6bf94cc4555b8c657e4a8ac7b7baf23c6874efc21dd1e4f56b7eb2721e07f5242d2f1d8381fc8cae535e860197c69236798ba1ad231b105794"), - }, - 244: { - MustHexID("696ba1f0a9d55c59246f776600542a9e6432490f0cd78f8bb55a196918df2081a9b521c3c3ba48e465a75c10768807717f8f689b0b4adce00e1c75737552a178"), - }, - 246: { - MustHexID("d6d32178bdc38416f46ffb8b3ec9e4cb2cfff8d04dd7e4311a70e403cb62b10be1b447311b60b4f9ee221a8131fc2cbd45b96dd80deba68a949d467241facfa8"), - MustHexID("3ea3d04a43a3dfb5ac11cffc2319248cf41b6279659393c2f55b8a0a5fc9d12581a9d97ef5d8ff9b5abf3321a290e8f63a4f785f450dc8a672aba3ba2ff4fdab"), - MustHexID("2fc897f05ae585553e5c014effd3078f84f37f9333afacffb109f00ca8e7a3373de810a3946be971cbccdfd40249f9fe7f322118ea459ac71acca85a1ef8b7f4"), - }, - 247: { - MustHexID("3155e1427f85f10a5c9a7755877748041af1bcd8d474ec065eb33df57a97babf54bfd2103575fa829115d224c523596b401065a97f74010610fce76382c0bf32"), - MustHexID("312c55512422cf9b8a4097e9a6ad79402e87a15ae909a4bfefa22398f03d20951933beea1e4dfa6f968212385e829f04c2d314fc2d4e255e0d3bc08792b069db"), - MustHexID("38643200b172dcfef857492156971f0e6aa2c538d8b74010f8e140811d53b98c765dd2d96126051913f44582e8c199ad7c6d6819e9a56483f637feaac9448aac"), - MustHexID("8dcab8618c3253b558d459da53bd8fa68935a719aff8b811197101a4b2b47dd2d47295286fc00cc081bb542d760717d1bdd6bec2c37cd72eca367d6dd3b9df73"), - MustHexID("8b58c6073dd98bbad4e310b97186c8f822d3a5c7d57af40e2136e88e315afd115edb27d2d0685a908cfe5aa49d0debdda6e6e63972691d6bd8c5af2d771dd2a9"), - MustHexID("2cbb718b7dc682da19652e7d9eb4fefaf7b7147d82c1c2b6805edf77b85e29fde9f6da195741467ff2638dc62c8d3e014ea5686693c15ed0080b6de90354c137"), - MustHexID("e84027696d3f12f2de30a9311afea8fbd313c2360daff52bb5fc8c7094d5295758bec3134e4eef24e4cdf377b40da344993284628a7a346eba94f74160998feb"), - MustHexID("f1357a4f04f9d33753a57c0b65ba20a5d8777abbffd04e906014491c9103fb08590e45548d37aa4bd70965e2e81ddba94f31860348df01469eec8c1829200a68"), - MustHexID("4ab0a75941b12892369b4490a1928c8ca52a9ad6d3dffbd1d8c0b907bc200fe74c022d011ec39b64808a39c0ca41f1d3254386c3e7733e7044c44259486461b6"), - MustHexID("d45150a72dc74388773e68e03133a3b5f51447fe91837d566706b3c035ee4b56f160c878c6273394daee7f56cc398985269052f22f75a8057df2fe6172765354"), - }, - 248: { - MustHexID("6aadfce366a189bab08ac84721567483202c86590642ea6d6a14f37ca78d82bdb6509eb7b8b2f6f63c78ae3ae1d8837c89509e41497d719b23ad53dd81574afa"), - MustHexID("a605ecfd6069a4cf4cf7f5840e5bc0ce10d23a3ac59e2aaa70c6afd5637359d2519b4524f56fc2ca180cdbebe54262f720ccaae8c1b28fd553c485675831624d"), - MustHexID("29701451cb9448ca33fc33680b44b840d815be90146eb521641efbffed0859c154e8892d3906eae9934bfacee72cd1d2fa9dd050fd18888eea49da155ab0efd2"), - MustHexID("3ed426322dee7572b08592e1e079f8b6c6b30e10e6243edd144a6a48fdbdb83df73a6e41b1143722cb82604f2203a32758610b5d9544f44a1a7921ba001528c1"), - MustHexID("b2e2a2b7fdd363572a3256e75435fab1da3b16f7891a8bd2015f30995dae665d7eabfd194d87d99d5df628b4bbc7b04e5b492c596422dd8272746c7a1b0b8e4f"), - MustHexID("0c69c9756162c593e85615b814ce57a2a8ca2df6c690b9c4e4602731b61e1531a3bbe3f7114271554427ffabea80ad8f36fa95a49fa77b675ae182c6ccac1728"), - MustHexID("8d28be21d5a97b0876442fa4f5e5387f5bf3faad0b6f13b8607b64d6e448c0991ca28dd7fe2f64eb8eadd7150bff5d5666aa6ed868b84c71311f4ba9a38569dd"), - MustHexID("2c677e1c64b9c9df6359348a7f5f33dc79e22f0177042486d125f8b6ca7f0dc756b1f672aceee5f1746bcff80aaf6f92a8dc0c9fbeb259b3fa0da060de5ab7e8"), - MustHexID("3994880f94a8678f0cd247a43f474a8af375d2a072128da1ad6cae84a244105ff85e94fc7d8496f639468de7ee998908a91c7e33ef7585fff92e984b210941a1"), - MustHexID("b45a9153c08d002a48090d15d61a7c7dad8c2af85d4ff5bd36ce23a9a11e0709bf8d56614c7b193bc028c16cbf7f20dfbcc751328b64a924995d47b41e452422"), - MustHexID("057ab3a9e53c7a84b0f3fc586117a525cdd18e313f52a67bf31798d48078e325abe5cfee3f6c2533230cb37d0549289d692a29dd400e899b8552d4b928f6f907"), - MustHexID("0ddf663d308791eb92e6bd88a2f8cb45e4f4f35bb16708a0e6ff7f1362aa6a73fedd0a1b1557fb3365e38e1b79d6918e2fae2788728b70c9ab6b51a3b94a4338"), - MustHexID("f637e07ff50cc1e3731735841c4798411059f2023abcf3885674f3e8032531b0edca50fd715df6feb489b6177c345374d64f4b07d257a7745de393a107b013a5"), - MustHexID("e24ec7c6eec094f63c7b3239f56d311ec5a3e45bc4e622a1095a65b95eea6fe13e29f3b6b7a2cbfe40906e3989f17ac834c3102dd0cadaaa26e16ee06d782b72"), - MustHexID("b76ea1a6fd6506ef6e3506a4f1f60ed6287fff8114af6141b2ff13e61242331b54082b023cfea5b3083354a4fb3f9eb8be01fb4a518f579e731a5d0707291a6b"), - MustHexID("9b53a37950ca8890ee349b325032d7b672cab7eced178d3060137b24ef6b92a43977922d5bdfb4a3409a2d80128e02f795f9dae6d7d99973ad0e23a2afb8442f"), - }, - 249: { - MustHexID("675ae65567c3c72c50c73bc0fd4f61f202ea5f93346ca57b551de3411ccc614fad61cb9035493af47615311b9d44ee7a161972ee4d77c28fe1ec029d01434e6a"), - MustHexID("8eb81408389da88536ae5800392b16ef5109d7ea132c18e9a82928047ecdb502693f6e4a4cdd18b54296caf561db937185731456c456c98bfe7de0baf0eaa495"), - MustHexID("2adba8b1612a541771cb93a726a38a4b88e97b18eced2593eb7daf82f05a5321ca94a72cc780c306ff21e551a932fc2c6d791e4681907b5ceab7f084c3fa2944"), - MustHexID("b1b4bfbda514d9b8f35b1c28961da5d5216fe50548f4066f69af3b7666a3b2e06eac646735e963e5c8f8138a2fb95af15b13b23ff00c6986eccc0efaa8ee6fb4"), - MustHexID("d2139281b289ad0e4d7b4243c4364f5c51aac8b60f4806135de06b12b5b369c9e43a6eb494eab860d115c15c6fbb8c5a1b0e382972e0e460af395b8385363de7"), - MustHexID("4a693df4b8fc5bdc7cec342c3ed2e228d7c5b4ab7321ddaa6cccbeb45b05a9f1d95766b4002e6d4791c2deacb8a667aadea6a700da28a3eea810a30395701bbc"), - MustHexID("ab41611195ec3c62bb8cd762ee19fb182d194fd141f4a66780efbef4b07ce916246c022b841237a3a6b512a93431157edd221e854ed2a259b72e9c5351f44d0c"), - MustHexID("68e8e26099030d10c3c703ae7045c0a48061fb88058d853b3e67880014c449d4311014da99d617d3150a20f1a3da5e34bf0f14f1c51fe4dd9d58afd222823176"), - MustHexID("3fbcacf546fb129cd70fc48de3b593ba99d3c473798bc309292aca280320e0eacc04442c914cad5c4cf6950345ba79b0d51302df88285d4e83ee3fe41339eee7"), - MustHexID("1d4a623659f7c8f80b6c3939596afdf42e78f892f682c768ad36eb7bfba402dbf97aea3a268f3badd8fe7636be216edf3d67ee1e08789ebbc7be625056bd7109"), - MustHexID("a283c474ab09da02bbc96b16317241d0627646fcc427d1fe790b76a7bf1989ced90f92101a973047ae9940c92720dffbac8eff21df8cae468a50f72f9e159417"), - MustHexID("dbf7e5ad7f87c3dfecae65d87c3039e14ed0bdc56caf00ce81931073e2e16719d746295512ff7937a15c3b03603e7c41a4f9df94fcd37bb200dd8f332767e9cb"), - MustHexID("caaa070a26692f64fc77f30d7b5ae980d419b4393a0f442b1c821ef58c0862898b0d22f74a4f8c5d83069493e3ec0b92f17dc1fe6e4cd437c1ec25039e7ce839"), - MustHexID("874cc8d1213beb65c4e0e1de38ef5d8165235893ac74ab5ea937c885eaab25c8d79dad0456e9fd3e9450626cac7e107b004478fb59842f067857f39a47cee695"), - MustHexID("d94193f236105010972f5df1b7818b55846592a0445b9cdc4eaed811b8c4c0f7c27dc8cc9837a4774656d6b34682d6d329d42b6ebb55da1d475c2474dc3dfdf4"), - MustHexID("edd9af6aded4094e9785637c28fccbd3980cbe28e2eb9a411048a23c2ace4bd6b0b7088a7817997b49a3dd05fc6929ca6c7abbb69438dbdabe65e971d2a794b2"), - }, - 250: { - MustHexID("53a5bd1215d4ab709ae8fdc2ced50bba320bced78bd9c5dc92947fb402250c914891786db0978c898c058493f86fc68b1c5de8a5cb36336150ac7a88655b6c39"), - MustHexID("b7f79e3ab59f79262623c9ccefc8f01d682323aee56ffbe295437487e9d5acaf556a9c92e1f1c6a9601f2b9eb6b027ae1aeaebac71d61b9b78e88676efd3e1a3"), - MustHexID("d374bf7e8d7ffff69cc00bebff38ef5bc1dcb0a8d51c1a3d70e61ac6b2e2d6617109254b0ac224354dfbf79009fe4239e09020c483cc60c071e00b9238684f30"), - MustHexID("1e1eac1c9add703eb252eb991594f8f5a173255d526a855fab24ae57dc277e055bc3c7a7ae0b45d437c4f47a72d97eb7b126f2ba344ba6c0e14b2c6f27d4b1e6"), - MustHexID("ae28953f63d4bc4e706712a59319c111f5ff8f312584f65d7436b4cd3d14b217b958f8486bad666b4481fe879019fb1f767cf15b3e3e2711efc33b56d460448a"), - MustHexID("934bb1edf9c7a318b82306aca67feb3d6b434421fa275d694f0b4927afd8b1d3935b727fd4ff6e3d012e0c82f1824385174e8c6450ade59c2a43281a4b3446b6"), - MustHexID("9eef3f28f70ce19637519a0916555bf76d26de31312ac656cf9d3e379899ea44e4dd7ffcce923b4f3563f8a00489a34bd6936db0cbb4c959d32c49f017e07d05"), - MustHexID("82200872e8f871c48f1fad13daec6478298099b591bb3dbc4ef6890aa28ebee5860d07d70be62f4c0af85085a90ae8179ee8f937cf37915c67ea73e704b03ee7"), - MustHexID("6c75a5834a08476b7fc37ff3dc2011dc3ea3b36524bad7a6d319b18878fad813c0ba76d1f4555cacd3890c865438c21f0e0aed1f80e0a157e642124c69f43a11"), - MustHexID("995b873742206cb02b736e73a88580c2aacb0bd4a3c97a647b647bcab3f5e03c0e0736520a8b3600da09edf4248991fb01091ec7ff3ec7cdc8a1beae011e7aae"), - MustHexID("c773a056594b5cdef2e850d30891ff0e927c3b1b9c35cd8e8d53a1017001e237468e1ece3ae33d612ca3e6abb0a9169aa352e9dcda358e5af2ad982b577447db"), - MustHexID("2b46a5f6923f475c6be99ec6d134437a6d11f6bb4b4ac6bcd94572fa1092639d1c08aeefcb51f0912f0a060f71d4f38ee4da70ecc16010b05dd4a674aab14c3a"), - MustHexID("af6ab501366debbaa0d22e20e9688f32ef6b3b644440580fd78de4fe0e99e2a16eb5636bbae0d1c259df8ddda77b35b9a35cbc36137473e9c68fbc9d203ba842"), - MustHexID("c9f6f2dd1a941926f03f770695bda289859e85fabaf94baaae20b93e5015dc014ba41150176a36a1884adb52f405194693e63b0c464a6891cc9cc1c80d450326"), - MustHexID("5b116f0751526868a909b61a30b0c5282c37df6925cc03ddea556ef0d0602a9595fd6c14d371f8ed7d45d89918a032dcd22be4342a8793d88fdbeb3ca3d75bd7"), - MustHexID("50f3222fb6b82481c7c813b2172e1daea43e2710a443b9c2a57a12bd160dd37e20f87aa968c82ad639af6972185609d47036c0d93b4b7269b74ebd7073221c10"), - }, - 251: { - MustHexID("9b8f702a62d1bee67bedfeb102eca7f37fa1713e310f0d6651cc0c33ea7c5477575289ccd463e5a2574a00a676a1fdce05658ba447bb9d2827f0ba47b947e894"), - MustHexID("b97532eb83054ed054b4abdf413bb30c00e4205545c93521554dbe77faa3cfaa5bd31ef466a107b0b34a71ec97214c0c83919720142cddac93aa7a3e928d4708"), - MustHexID("2f7a5e952bfb67f2f90b8441b5fadc9ee13b1dcde3afeeb3dd64bf937f86663cc5c55d1fa83952b5422763c7df1b7f2794b751c6be316ebc0beb4942e65ab8c1"), - MustHexID("42c7483781727051a0b3660f14faf39e0d33de5e643702ae933837d036508ab856ce7eec8ec89c4929a4901256e5233a3d847d5d4893f91bcf21835a9a880fee"), - MustHexID("873bae27bf1dc854408fba94046a53ab0c965cebe1e4e12290806fc62b88deb1f4a47f9e18f78fc0e7913a0c6e42ac4d0fc3a20cea6bc65f0c8a0ca90b67521e"), - MustHexID("a7e3a370bbd761d413f8d209e85886f68bf73d5c3089b2dc6fa42aab1ecb5162635497eed95dee2417f3c9c74a3e76319625c48ead2e963c7de877cd4551f347"), - MustHexID("528597534776a40df2addaaea15b6ff832ce36b9748a265768368f657e76d58569d9f30dbb91e91cf0ae7efe8f402f17aa0ae15f5c55051ba03ba830287f4c42"), - MustHexID("461d8bd4f13c3c09031fdb84f104ed737a52f630261463ce0bdb5704259bab4b737dda688285b8444dbecaecad7f50f835190b38684ced5e90c54219e5adf1bc"), - MustHexID("6ec50c0be3fd232737090fc0111caaf0bb6b18f72be453428087a11a97fd6b52db0344acbf789a689bd4f5f50f79017ea784f8fd6fe723ad6ae675b9e3b13e21"), - MustHexID("12fc5e2f77a83fdcc727b79d8ae7fe6a516881138d3011847ee136b400fed7cfba1f53fd7a9730253c7aa4f39abeacd04f138417ba7fcb0f36cccc3514e0dab6"), - MustHexID("4fdbe75914ccd0bce02101606a1ccf3657ec963e3b3c20239d5fec87673fe446d649b4f15f1fe1a40e6cfbd446dda2d31d40bb602b1093b8fcd5f139ba0eb46a"), - MustHexID("3753668a0f6281e425ea69b52cb2d17ab97afbe6eb84cf5d25425bc5e53009388857640668fadd7c110721e6047c9697803bd8a6487b43bb343bfa32ebf24039"), - MustHexID("2e81b16346637dec4410fd88e527346145b9c0a849dbf2628049ac7dae016c8f4305649d5659ec77f1e8a0fac0db457b6080547226f06283598e3740ad94849a"), - MustHexID("802c3cc27f91c89213223d758f8d2ecd41135b357b6d698f24d811cdf113033a81c38e0bdff574a5c005b00a8c193dc2531f8c1fa05fa60acf0ab6f2858af09f"), - MustHexID("fcc9a2e1ac3667026ff16192876d1813bb75abdbf39b929a92863012fe8b1d890badea7a0de36274d5c1eb1e8f975785532c50d80fd44b1a4b692f437303393f"), - MustHexID("6d8b3efb461151dd4f6de809b62726f5b89e9b38e9ba1391967f61cde844f7528fecf821b74049207cee5a527096b31f3ad623928cd3ce51d926fa345a6b2951"), - }, - 252: { - MustHexID("f1ae93157cc48c2075dd5868fbf523e79e06caf4b8198f352f6e526680b78ff4227263de92612f7d63472bd09367bb92a636fff16fe46ccf41614f7a72495c2a"), - MustHexID("587f482d111b239c27c0cb89b51dd5d574db8efd8de14a2e6a1400c54d4567e77c65f89c1da52841212080b91604104768350276b6682f2f961cdaf4039581c7"), - MustHexID("e3f88274d35cefdaabdf205afe0e80e936cc982b8e3e47a84ce664c413b29016a4fb4f3a3ebae0a2f79671f8323661ed462bf4390af94c424dc8ace0c301b90f"), - MustHexID("0ddc736077da9a12ba410dc5ea63cbcbe7659dd08596485b2bff3435221f82c10d263efd9af938e128464be64a178b7cd22e19f400d5802f4c9df54bf89f2619"), - MustHexID("784aa34d833c6ce63fcc1279630113c3272e82c4ae8c126c5a52a88ac461b6baeed4244e607b05dc14e5b2f41c70a273c3804dea237f14f7a1e546f6d1309d14"), - MustHexID("f253a2c354ee0e27cfcae786d726753d4ad24be6516b279a936195a487de4a59dbc296accf20463749ff55293263ed8c1b6365eecb248d44e75e9741c0d18205"), - MustHexID("a1910b80357b3ad9b4593e0628922939614dc9056a5fbf477279c8b2c1d0b4b31d89a0c09d0d41f795271d14d3360ef08a3f821e65e7e1f56c07a36afe49c7c5"), - MustHexID("f1168552c2efe541160f0909b0b4a9d6aeedcf595cdf0e9b165c97e3e197471a1ee6320e93389edfba28af6eaf10de98597ad56e7ab1b504ed762451996c3b98"), - MustHexID("b0c8e5d2c8634a7930e1a6fd082e448c6cf9d2d8b7293558b59238815a4df926c286bf297d2049f14e8296a6eb3256af614ec1812c4f2bbe807673b58bf14c8c"), - MustHexID("0fb346076396a38badc342df3679b55bd7f40a609ab103411fe45082c01f12ea016729e95914b2b5540e987ff5c9b133e85862648e7f36abdfd23100d248d234"), - MustHexID("f736e0cc83417feaa280d9483f5d4d72d1b036cd0c6d9cbdeb8ac35ceb2604780de46dddaa32a378474e1d5ccdf79b373331c30c7911ade2ae32f98832e5de1f"), - MustHexID("8b02991457602f42b38b342d3f2259ae4100c354b3843885f7e4e07bd644f64dab94bb7f38a3915f8b7f11d8e3f81c28e07a0078cf79d7397e38a7b7e0c857e2"), - MustHexID("9221d9f04a8a184993d12baa91116692bb685f887671302999d69300ad103eb2d2c75a09d8979404c6dd28f12362f58a1a43619c493d9108fd47588a23ce5824"), - MustHexID("652797801744dada833fff207d67484742eea6835d695925f3e618d71b68ec3c65bdd85b4302b2cdcb835ad3f94fd00d8da07e570b41bc0d2bcf69a8de1b3284"), - MustHexID("d84f06fe64debc4cd0625e36d19b99014b6218375262cc2209202bdbafd7dffcc4e34ce6398e182e02fd8faeed622c3e175545864902dfd3d1ac57647cddf4c6"), - MustHexID("d0ed87b294f38f1d741eb601020eeec30ac16331d05880fe27868f1e454446de367d7457b41c79e202eaf9525b029e4f1d7e17d85a55f83a557c005c68d7328a"), - }, - 253: { - MustHexID("ad4485e386e3cc7c7310366a7c38fb810b8896c0d52e55944bfd320ca294e7912d6c53c0a0cf85e7ce226e92491d60430e86f8f15cda0161ed71893fb4a9e3a1"), - MustHexID("36d0e7e5b7734f98c6183eeeb8ac5130a85e910a925311a19c4941b1290f945d4fc3996b12ef4966960b6fa0fb29b1604f83a0f81bd5fd6398d2e1a22e46af0c"), - MustHexID("7d307d8acb4a561afa23bdf0bd945d35c90245e26345ec3a1f9f7df354222a7cdcb81339c9ed6744526c27a1a0c8d10857e98df942fa433602facac71ac68a31"), - MustHexID("d97bf55f88c83fae36232661af115d66ca600fc4bd6d1fb35ff9bb4dad674c02cf8c8d05f317525b5522250db58bb1ecafb7157392bf5aa61b178c61f098d995"), - MustHexID("7045d678f1f9eb7a4613764d17bd5698796494d0bf977b16f2dbc272b8a0f7858a60805c022fc3d1fe4f31c37e63cdaca0416c0d053ef48a815f8b19121605e0"), - MustHexID("14e1f21418d445748de2a95cd9a8c3b15b506f86a0acabd8af44bb968ce39885b19c8822af61b3dd58a34d1f265baec30e3ae56149dc7d2aa4a538f7319f69c8"), - MustHexID("b9453d78281b66a4eac95a1546017111eaaa5f92a65d0de10b1122940e92b319728a24edf4dec6acc412321b1c95266d39c7b3a5d265c629c3e49a65fb022c09"), - MustHexID("e8a49248419e3824a00d86af422f22f7366e2d4922b304b7169937616a01d9d6fa5abf5cc01061a352dc866f48e1fa2240dbb453d872b1d7be62bdfc1d5e248c"), - MustHexID("bebcff24b52362f30e0589ee573ce2d86f073d58d18e6852a592fa86ceb1a6c9b96d7fb9ec7ed1ed98a51b6743039e780279f6bb49d0a04327ac7a182d9a56f6"), - MustHexID("d0835e5a4291db249b8d2fca9f503049988180c7d247bedaa2cf3a1bad0a76709360a85d4f9a1423b2cbc82bb4d94b47c0cde20afc430224834c49fe312a9ae3"), - MustHexID("6b087fe2a2da5e4f0b0f4777598a4a7fb66bf77dbd5bfc44e8a7eaa432ab585a6e226891f56a7d4f5ed11a7c57b90f1661bba1059590ca4267a35801c2802913"), - MustHexID("d901e5bde52d1a0f4ddf010a686a53974cdae4ebe5c6551b3c37d6b6d635d38d5b0e5f80bc0186a2c7809dbf3a42870dd09643e68d32db896c6da8ba734579e7"), - MustHexID("96419fb80efae4b674402bb969ebaab86c1274f29a83a311e24516d36cdf148fe21754d46c97688cdd7468f24c08b13e4727c29263393638a3b37b99ff60ebca"), - MustHexID("7b9c1889ae916a5d5abcdfb0aaedcc9c6f9eb1c1a4f68d0c2d034fe79ac610ce917c3abc670744150fa891bfcd8ab14fed6983fca964de920aa393fa7b326748"), - MustHexID("7a369b2b8962cc4c65900be046482fbf7c14f98a135bbbae25152c82ad168fb2097b3d1429197cf46d3ce9fdeb64808f908a489cc6019725db040060fdfe5405"), - MustHexID("47bcae48288da5ecc7f5058dfa07cf14d89d06d6e449cb946e237aa6652ea050d9f5a24a65efdc0013ccf232bf88670979eddef249b054f63f38da9d7796dbd8"), - }, - 254: { - MustHexID("099739d7abc8abd38ecc7a816c521a1168a4dbd359fa7212a5123ab583ffa1cf485a5fed219575d6475dbcdd541638b2d3631a6c7fce7474e7fe3cba1d4d5853"), - MustHexID("c2b01603b088a7182d0cf7ef29fb2b04c70acb320fccf78526bf9472e10c74ee70b3fcfa6f4b11d167bd7d3bc4d936b660f2c9bff934793d97cb21750e7c3d31"), - MustHexID("20e4d8f45f2f863e94b45548c1ef22a11f7d36f263e4f8623761e05a64c4572379b000a52211751e2561b0f14f4fc92dd4130410c8ccc71eb4f0e95a700d4ca9"), - MustHexID("27f4a16cc085e72d86e25c98bd2eca173eaaee7565c78ec5a52e9e12b2211f35de81b5b45e9195de2ebfe29106742c59112b951a04eb7ae48822911fc1f9389e"), - MustHexID("55db5ee7d98e7f0b1c3b9d5be6f2bc619a1b86c3cdd513160ad4dcf267037a5fffad527ac15d50aeb32c59c13d1d4c1e567ebbf4de0d25236130c8361f9aac63"), - MustHexID("883df308b0130fc928a8559fe50667a0fff80493bc09685d18213b2db241a3ad11310ed86b0ef662b3ce21fc3d9aa7f3fc24b8d9afe17c7407e9afd3345ae548"), - MustHexID("c7af968cc9bc8200c3ee1a387405f7563be1dce6710a3439f42ea40657d0eae9d2b3c16c42d779605351fcdece4da637b9804e60ca08cfb89aec32c197beffa6"), - MustHexID("3e66f2b788e3ff1d04106b80597915cd7afa06c405a7ae026556b6e583dca8e05cfbab5039bb9a1b5d06083ffe8de5780b1775550e7218f5e98624bf7af9a0a8"), - MustHexID("4fc7f53764de3337fdaec0a711d35d3a923e72fa65025444d12230b3552ed43d9b2d1ad08ccb11f2d50c58809e6dd74dde910e195294fca3b47ae5a3967cc479"), - MustHexID("bafdfdcf6ccaa989436752fa97c77477b6baa7deb374b16c095492c529eb133e8e2f99e1977012b64767b9d34b2cf6d2048ed489bd822b5139b523f6a423167b"), - MustHexID("7f5d78008a4312fe059104ce80202c82b8915c2eb4411c6b812b16f7642e57c00f2c9425121f5cbac4257fe0b3e81ef5dea97ea2dbaa98f6a8b6fd4d1e5980bb"), - MustHexID("598c37fe78f922751a052f463aeb0cb0bc7f52b7c2a4cf2da72ec0931c7c32175d4165d0f8998f7320e87324ac3311c03f9382a5385c55f0407b7a66b2acd864"), - MustHexID("f758c4136e1c148777a7f3275a76e2db0b2b04066fd738554ec398c1c6cc9fb47e14a3b4c87bd47deaeab3ffd2110514c3855685a374794daff87b605b27ee2e"), - MustHexID("0307bb9e4fd865a49dcf1fe4333d1b944547db650ab580af0b33e53c4fef6c789531110fac801bbcbce21fc4d6f61b6d5b24abdf5b22e3030646d579f6dca9c2"), - MustHexID("82504b6eb49bb2c0f91a7006ce9cefdbaf6df38706198502c2e06601091fc9dc91e4f15db3410d45c6af355bc270b0f268d3dff560f956985c7332d4b10bd1ed"), - MustHexID("b39b5b677b45944ceebe76e76d1f051de2f2a0ec7b0d650da52135743e66a9a5dba45f638258f9a7545d9a790c7fe6d3fdf82c25425c7887323e45d27d06c057"), - }, - 255: { - MustHexID("5c4d58d46e055dd1f093f81ee60a675e1f02f54da6206720adee4dccef9b67a31efc5c2a2949c31a04ee31beadc79aba10da31440a1f9ff2a24093c63c36d784"), - MustHexID("ea72161ffdd4b1e124c7b93b0684805f4c4b58d617ed498b37a145c670dbc2e04976f8785583d9c805ffbf343c31d492d79f841652bbbd01b61ed85640b23495"), - MustHexID("51caa1d93352d47a8e531692a3612adac1e8ac68d0a200d086c1c57ae1e1a91aa285ab242e8c52ef9d7afe374c9485b122ae815f1707b875569d0433c1c3ce85"), - MustHexID("c08397d5751b47bd3da044b908be0fb0e510d3149574dff7aeab33749b023bb171b5769990fe17469dbebc100bc150e798aeda426a2dcc766699a225fddd75c6"), - MustHexID("0222c1c194b749736e593f937fad67ee348ac57287a15c7e42877aa38a9b87732a408bca370f812efd0eedbff13e6d5b854bf3ba1dec431a796ed47f32552b09"), - MustHexID("03d859cd46ef02d9bfad5268461a6955426845eef4126de6be0fa4e8d7e0727ba2385b78f1a883a8239e95ebb814f2af8379632c7d5b100688eebc5841209582"), - MustHexID("64d5004b7e043c39ff0bd10cb20094c287721d5251715884c280a612b494b3e9e1c64ba6f67614994c7d969a0d0c0295d107d53fc225d47c44c4b82852d6f960"), - MustHexID("b0a5eefb2dab6f786670f35bf9641eefe6dd87fd3f1362bcab4aaa792903500ab23d88fae68411372e0813b057535a601d46e454323745a948017f6063a47b1f"), - MustHexID("0cc6df0a3433d448b5684d2a3ffa9d1a825388177a18f44ad0008c7bd7702f1ec0fc38b83506f7de689c3b6ecb552599927e29699eed6bb867ff08f80068b287"), - MustHexID("50772f7b8c03a4e153355fbbf79c8a80cf32af656ff0c7873c99911099d04a0dae0674706c357e0145ad017a0ade65e6052cb1b0d574fcd6f67da3eee0ace66b"), - MustHexID("1ae37829c9ef41f8b508b82259ebac76b1ed900d7a45c08b7970f25d2d48ddd1829e2f11423a18749940b6dab8598c6e416cef0efd47e46e51f29a0bc65b37cd"), - MustHexID("ba973cab31c2af091fc1644a93527d62b2394999e2b6ccbf158dd5ab9796a43d408786f1803ef4e29debfeb62fce2b6caa5ab2b24d1549c822a11c40c2856665"), - MustHexID("bc413ad270dd6ea25bddba78f3298b03b8ba6f8608ac03d06007d4116fa78ef5a0cfe8c80155089382fc7a193243ee5500082660cb5d7793f60f2d7d18650964"), - MustHexID("5a6a9ef07634d9eec3baa87c997b529b92652afa11473dfee41ef7037d5c06e0ddb9fe842364462d79dd31cff8a59a1b8d5bc2b810dea1d4cbbd3beb80ecec83"), - MustHexID("f492c6ee2696d5f682f7f537757e52744c2ae560f1090a07024609e903d334e9e174fc01609c5a229ddbcac36c9d21adaf6457dab38a25bfd44f2f0ee4277998"), - MustHexID("459e4db99298cb0467a90acee6888b08bb857450deac11015cced5104853be5adce5b69c740968bc7f931495d671a70cad9f48546d7cd203357fe9af0e8d2164"), - }, - 256: { - MustHexID("a8593af8a4aef7b806b5197612017951bac8845a1917ca9a6a15dd6086d608505144990b245785c4cd2d67a295701c7aac2aa18823fb0033987284b019656268"), - MustHexID("d2eebef914928c3aad77fc1b2a495f52d2294acf5edaa7d8a530b540f094b861a68fe8348a46a7c302f08ab609d85912a4968eacfea0740847b29421b4795d9e"), - MustHexID("b14bfcb31495f32b650b63cf7d08492e3e29071fdc73cf2da0da48d4b191a70ba1a65f42ad8c343206101f00f8a48e8db4b08bf3f622c0853e7323b250835b91"), - MustHexID("7feaee0d818c03eb30e4e0bf03ade0f3c21ca38e938a761aa1781cf70bda8cc5cd631a6cc53dd44f1d4a6d3e2dae6513c6c66ee50cb2f0e9ad6f7e319b309fd9"), - MustHexID("4ca3b657b139311db8d583c25dd5963005e46689e1317620496cc64129c7f3e52870820e0ec7941d28809311df6db8a2867bbd4f235b4248af24d7a9c22d1232"), - MustHexID("1181defb1d16851d42dd951d84424d6bd1479137f587fa184d5a8152be6b6b16ed08bcdb2c2ed8539bcde98c80c432875f9f724737c316a2bd385a39d3cab1d8"), - MustHexID("d9dd818769fa0c3ec9f553c759b92476f082817252a04a47dc1777740b1731d280058c66f982812f173a294acf4944a85ba08346e2de153ba3ba41ce8a62cb64"), - MustHexID("bd7c4f8a9e770aa915c771b15e107ca123d838762da0d3ffc53aa6b53e9cd076cffc534ec4d2e4c334c683f1f5ea72e0e123f6c261915ed5b58ac1b59f003d88"), - MustHexID("3dd5739c73649d510456a70e9d6b46a855864a4a3f744e088fd8c8da11b18e4c9b5f2d7da50b1c147b2bae5ca9609ae01f7a3cdea9dce34f80a91d29cd82f918"), - MustHexID("f0d7df1efc439b4bcc0b762118c1cfa99b2a6143a9f4b10e3c9465125f4c9fca4ab88a2504169bbcad65492cf2f50da9dd5d077c39574a944f94d8246529066b"), - MustHexID("dd598b9ba441448e5fb1a6ec6c5f5aa9605bad6e223297c729b1705d11d05f6bfd3d41988b694681ae69bb03b9a08bff4beab5596503d12a39bffb5cd6e94c7c"), - MustHexID("3fce284ac97e567aebae681b15b7a2b6df9d873945536335883e4bbc26460c064370537f323fd1ada828ea43154992d14ac0cec0940a2bd2a3f42ec156d60c83"), - MustHexID("7c8dfa8c1311cb14fb29a8ac11bca23ecc115e56d9fcf7b7ac1db9066aa4eb39f8b1dabf46e192a65be95ebfb4e839b5ab4533fef414921825e996b210dd53bd"), - MustHexID("cafa6934f82120456620573d7f801390ed5e16ed619613a37e409e44ab355ef755e83565a913b48a9466db786f8d4fbd590bfec474c2524d4a2608d4eafd6abd"), - MustHexID("9d16600d0dd310d77045769fed2cb427f32db88cd57d86e49390c2ba8a9698cfa856f775be2013237226e7bf47b248871cf865d23015937d1edeb20db5e3e760"), - MustHexID("17be6b6ba54199b1d80eff866d348ea11d8a4b341d63ad9a6681d3ef8a43853ac564d153eb2a8737f0afc9ab320f6f95c55aa11aaa13bbb1ff422fd16bdf8188"), - }, - }, -} - -type preminedTestnet struct { - target NodeID - targetSha common.Hash // sha3(target) - dists [hashBits + 1][]NodeID - net *Network -} - -func (tn *preminedTestnet) sendFindnodeHash(to *Node, target common.Hash) { - // current log distance is encoded in port number - // fmt.Println("findnode query at dist", toaddr.Port) - if to.UDP <= lowPort { - panic("query to node at or below distance 0") - } - next := to.UDP - 1 - var result []rpcNode - for i, id := range tn.dists[to.UDP-lowPort] { - result = append(result, nodeToRPC(NewNode(id, net.ParseIP("10.0.2.99"), next, uint16(i)+1+lowPort))) - } - injectResponse(tn.net, to, neighborsPacket, &neighbors{Nodes: result}) -} - -func (tn *preminedTestnet) sendPing(to *Node, addr *net.UDPAddr, topics []Topic) []byte { - injectResponse(tn.net, to, pongPacket, &pong{ReplyTok: []byte{1}}) - return []byte{1} -} - -func (tn *preminedTestnet) send(to *Node, ptype nodeEvent, data interface{}) (hash []byte) { - switch ptype { - case pingPacket: - injectResponse(tn.net, to, pongPacket, &pong{ReplyTok: []byte{1}}) - case pongPacket: - // ignored - case findnodeHashPacket: - // current log distance is encoded in port number - // fmt.Println("findnode query at dist", toaddr.Port-lowPort) - if to.UDP <= lowPort { - panic("query to node at or below distance 0") - } - next := to.UDP - 1 - var result []rpcNode - for i, id := range tn.dists[to.UDP-lowPort] { - result = append(result, nodeToRPC(NewNode(id, net.ParseIP("10.0.2.99"), next, uint16(i)+1+lowPort))) - } - injectResponse(tn.net, to, neighborsPacket, &neighbors{Nodes: result}) - default: - panic("send(" + ptype.String() + ")") - } - return []byte{2} -} - -func (tn *preminedTestnet) sendNeighbours(to *Node, nodes []*Node) { - panic("sendNeighbours called") -} - -func (tn *preminedTestnet) sendTopicNodes(to *Node, queryHash common.Hash, nodes []*Node) { - panic("sendTopicNodes called") -} - -func (tn *preminedTestnet) sendTopicRegister(to *Node, topics []Topic, idx int, pong []byte) { - panic("sendTopicRegister called") -} - -func (*preminedTestnet) Close() {} - -func (*preminedTestnet) localAddr() *net.UDPAddr { - return &net.UDPAddr{IP: net.ParseIP("10.0.1.1"), Port: 40000} -} - -func injectResponse(net *Network, from *Node, ev nodeEvent, packet interface{}) { - go net.reqReadPacket(ingressPacket{remoteID: from.ID, remoteAddr: from.addr(), ev: ev, data: packet}) -} diff --git a/p2p/discv5/node.go b/p2p/discv5/node.go deleted file mode 100644 index 44d3025b70..0000000000 --- a/p2p/discv5/node.go +++ /dev/null @@ -1,413 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discv5 - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "encoding/hex" - "errors" - "fmt" - "math/big" - "math/rand" - "net" - "net/url" - "regexp" - "strconv" - "strings" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -// Node represents a host on the network. -// The public fields of Node may not be modified. -type Node struct { - IP net.IP // len 4 for IPv4 or 16 for IPv6 - UDP, TCP uint16 // port numbers - ID NodeID // the node's public key - - // Network-related fields are contained in nodeNetGuts. - // These fields are not supposed to be used off the - // Network.loop goroutine. - nodeNetGuts -} - -// NewNode creates a new node. It is mostly meant to be used for -// testing purposes. -func NewNode(id NodeID, ip net.IP, udpPort, tcpPort uint16) *Node { - if ipv4 := ip.To4(); ipv4 != nil { - ip = ipv4 - } - return &Node{ - IP: ip, - UDP: udpPort, - TCP: tcpPort, - ID: id, - nodeNetGuts: nodeNetGuts{sha: crypto.Keccak256Hash(id[:])}, - } -} - -func (n *Node) addr() *net.UDPAddr { - return &net.UDPAddr{IP: n.IP, Port: int(n.UDP)} -} - -// Incomplete returns true for nodes with no IP address. -func (n *Node) Incomplete() bool { - return n.IP == nil -} - -// checks whether n is a valid complete node. -func (n *Node) validateComplete() error { - if n.Incomplete() { - return errors.New("incomplete node") - } - if n.UDP == 0 { - return errors.New("missing UDP port") - } - if n.TCP == 0 { - return errors.New("missing TCP port") - } - if n.IP.IsMulticast() || n.IP.IsUnspecified() { - return errors.New("invalid IP (multicast/unspecified)") - } - _, err := n.ID.Pubkey() // validate the key (on curve, etc.) - return err -} - -// The string representation of a Node is a URL. -// Please see ParseNode for a description of the format. -func (n *Node) String() string { - u := url.URL{Scheme: "enode"} - if n.Incomplete() { - u.Host = fmt.Sprintf("%x", n.ID[:]) - } else { - addr := net.TCPAddr{IP: n.IP, Port: int(n.TCP)} - u.User = url.User(fmt.Sprintf("%x", n.ID[:])) - u.Host = addr.String() - if n.UDP != n.TCP { - u.RawQuery = "discport=" + strconv.Itoa(int(n.UDP)) - } - } - return u.String() -} - -var incompleteNodeURL = regexp.MustCompile("(?i)^(?:enode://)?([0-9a-f]+)$") - -// ParseNode parses a node designator. -// -// There are two basic forms of node designators -// - incomplete nodes, which only have the public key (node ID) -// - complete nodes, which contain the public key and IP/Port information -// -// For incomplete nodes, the designator must look like one of these -// -// enode:// -// -// -// For complete nodes, the node ID is encoded in the username portion -// of the URL, separated from the host by an @ sign. The hostname can -// only be given as an IP address, DNS domain names are not allowed. -// The port in the host name section is the TCP listening port. If the -// TCP and UDP (discovery) ports differ, the UDP port is specified as -// query parameter "discport". -// -// In the following example, the node URL describes -// a node with IP address 10.3.58.6, TCP listening port 30303 -// and UDP discovery port 30301. -// -// enode://@10.3.58.6:30303?discport=30301 -func ParseNode(rawurl string) (*Node, error) { - if m := incompleteNodeURL.FindStringSubmatch(rawurl); m != nil { - id, err := HexID(m[1]) - if err != nil { - return nil, fmt.Errorf("invalid node ID (%v)", err) - } - return NewNode(id, nil, 0, 0), nil - } - return parseComplete(rawurl) -} - -func parseComplete(rawurl string) (*Node, error) { - var ( - id NodeID - ip net.IP - tcpPort, udpPort uint64 - ) - u, err := url.Parse(rawurl) - if err != nil { - return nil, err - } - if u.Scheme != "enode" { - return nil, errors.New("invalid URL scheme, want \"enode\"") - } - // Parse the Node ID from the user portion. - if u.User == nil { - return nil, errors.New("does not contain node ID") - } - if id, err = HexID(u.User.String()); err != nil { - return nil, fmt.Errorf("invalid node ID (%v)", err) - } - // Parse the IP address. - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - return nil, fmt.Errorf("invalid host: %v", err) - } - if ip = net.ParseIP(host); ip == nil { - return nil, errors.New("invalid IP address") - } - // Ensure the IP is 4 bytes long for IPv4 addresses. - if ipv4 := ip.To4(); ipv4 != nil { - ip = ipv4 - } - // Parse the port numbers. - if tcpPort, err = strconv.ParseUint(port, 10, 16); err != nil { - return nil, errors.New("invalid port") - } - udpPort = tcpPort - qv := u.Query() - if qv.Get("discport") != "" { - udpPort, err = strconv.ParseUint(qv.Get("discport"), 10, 16) - if err != nil { - return nil, errors.New("invalid discport in query") - } - } - return NewNode(id, ip, uint16(udpPort), uint16(tcpPort)), nil -} - -// MustParseNode parses a node URL. It panics if the URL is not valid. -func MustParseNode(rawurl string) *Node { - n, err := ParseNode(rawurl) - if err != nil { - panic("invalid node URL: " + err.Error()) - } - return n -} - -// MarshalText implements encoding.TextMarshaler. -func (n *Node) MarshalText() ([]byte, error) { - return []byte(n.String()), nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (n *Node) UnmarshalText(text []byte) error { - dec, err := ParseNode(string(text)) - if err == nil { - *n = *dec - } - return err -} - -// type nodeQueue []*Node -// -// // pushNew adds n to the end if it is not present. -// func (nl *nodeList) appendNew(n *Node) { -// for _, entry := range n { -// if entry == n { -// return -// } -// } -// *nq = append(*nq, n) -// } -// -// // popRandom removes a random node. Nodes closer to -// // to the head of the beginning of the have a slightly higher probability. -// func (nl *nodeList) popRandom() *Node { -// ix := rand.Intn(len(*nq)) -// //TODO: probability as mentioned above. -// nl.removeIndex(ix) -// } -// -// func (nl *nodeList) removeIndex(i int) *Node { -// slice = *nl -// if len(*slice) <= i { -// return nil -// } -// *nl = append(slice[:i], slice[i+1:]...) -// } - -const nodeIDBits = 512 - -// NodeID is a unique identifier for each node. -// The node identifier is a marshaled elliptic curve public key. -type NodeID [nodeIDBits / 8]byte - -// NodeID prints as a long hexadecimal number. -func (n NodeID) String() string { - return fmt.Sprintf("%x", n[:]) -} - -// The Go syntax representation of a NodeID is a call to HexID. -func (n NodeID) GoString() string { - return fmt.Sprintf("discover.HexID(\"%x\")", n[:]) -} - -// TerminalString returns a shortened hex string for terminal logging. -func (n NodeID) TerminalString() string { - return hex.EncodeToString(n[:8]) -} - -// HexID converts a hex string to a NodeID. -// The string may be prefixed with 0x. -func HexID(in string) (NodeID, error) { - var id NodeID - b, err := hex.DecodeString(strings.TrimPrefix(in, "0x")) - if err != nil { - return id, err - } else if len(b) != len(id) { - return id, fmt.Errorf("wrong length, want %d hex chars", len(id)*2) - } - copy(id[:], b) - return id, nil -} - -// MustHexID converts a hex string to a NodeID. -// It panics if the string is not a valid NodeID. -func MustHexID(in string) NodeID { - id, err := HexID(in) - if err != nil { - panic(err) - } - return id -} - -// PubkeyID returns a marshaled representation of the given public key. -func PubkeyID(pub *ecdsa.PublicKey) NodeID { - var id NodeID - pbytes := elliptic.Marshal(pub.Curve, pub.X, pub.Y) - if len(pbytes)-1 != len(id) { - panic(fmt.Errorf("need %d bit pubkey, got %d bits", (len(id)+1)*8, len(pbytes))) - } - copy(id[:], pbytes[1:]) - return id -} - -// Pubkey returns the public key represented by the node ID. -// It returns an error if the ID is not a point on the curve. -func (n NodeID) Pubkey() (*ecdsa.PublicKey, error) { - p := &ecdsa.PublicKey{Curve: crypto.S256(), X: new(big.Int), Y: new(big.Int)} - half := len(n) / 2 - p.X.SetBytes(n[:half]) - p.Y.SetBytes(n[half:]) - if !p.Curve.IsOnCurve(p.X, p.Y) { - return nil, errors.New("id is invalid secp256k1 curve point") - } - return p, nil -} - -// recoverNodeID computes the public key used to sign the -// given hash from the signature. -func recoverNodeID(hash, sig []byte) (id NodeID, err error) { - pubkey, err := crypto.Ecrecover(hash, sig) - if err != nil { - return id, err - } - if len(pubkey)-1 != len(id) { - return id, fmt.Errorf("recovered pubkey has %d bits, want %d bits", len(pubkey)*8, (len(id)+1)*8) - } - for i := range id { - id[i] = pubkey[i+1] - } - return id, nil -} - -// distcmp compares the distances a->target and b->target. -// Returns -1 if a is closer to target, 1 if b is closer to target -// and 0 if they are equal. -func distcmp(target, a, b common.Hash) int { - for i := range target { - da := a[i] ^ target[i] - db := b[i] ^ target[i] - if da > db { - return 1 - } else if da < db { - return -1 - } - } - return 0 -} - -// table of leading zero counts for bytes [0..255] -var lzcount = [256]int{ - 8, 7, 6, 6, 5, 5, 5, 5, - 4, 4, 4, 4, 4, 4, 4, 4, - 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, - 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, -} - -// logdist returns the logarithmic distance between a and b, log2(a ^ b). -func logdist(a, b common.Hash) int { - lz := 0 - for i := range a { - x := a[i] ^ b[i] - if x == 0 { - lz += 8 - } else { - lz += lzcount[x] - break - } - } - return len(a)*8 - lz -} - -// hashAtDistance returns a random hash such that logdist(a, b) == n -func hashAtDistance(a common.Hash, n int) (b common.Hash) { - if n == 0 { - return a - } - // flip bit at position n, fill the rest with random bits - b = a - pos := len(a) - n/8 - 1 - bit := byte(0x01) << (byte(n%8) - 1) - if bit == 0 { - pos++ - bit = 0x80 - } - b[pos] = a[pos]&^bit | ^a[pos]&bit // TODO: randomize end bits - for i := pos + 1; i < len(a); i++ { - b[i] = byte(rand.Intn(255)) - } - return b -} diff --git a/p2p/discv5/node_test.go b/p2p/discv5/node_test.go deleted file mode 100644 index 4e0fdbe3db..0000000000 --- a/p2p/discv5/node_test.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2015 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discv5 - -import ( - "fmt" - "math/big" - "math/rand" - "net" - "reflect" - "strings" - "testing" - "testing/quick" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -func ExampleNewNode() { - id := MustHexID("1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439") - - // Complete nodes contain UDP and TCP endpoints: - n1 := NewNode(id, net.ParseIP("2001:db8:3c4d:15::abcd:ef12"), 52150, 30303) - fmt.Println("n1:", n1) - fmt.Println("n1.Incomplete() ->", n1.Incomplete()) - - // An incomplete node can be created by passing zero values - // for all parameters except id. - n2 := NewNode(id, nil, 0, 0) - fmt.Println("n2:", n2) - fmt.Println("n2.Incomplete() ->", n2.Incomplete()) - - // Output: - // n1: enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:30303?discport=52150 - // n1.Incomplete() -> false - // n2: enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439 - // n2.Incomplete() -> true -} - -var parseNodeTests = []struct { - rawurl string - wantError string - wantResult *Node -}{ - { - rawurl: "http://foobar", - wantError: `invalid URL scheme, want "enode"`, - }, - { - rawurl: "enode://01010101@123.124.125.126:3", - wantError: `invalid node ID (wrong length, want 128 hex chars)`, - }, - // Complete nodes with IP address. - { - rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@hostname:3", - wantError: `invalid IP address`, - }, - { - rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:foo", - wantError: `invalid port`, - }, - { - rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:3?discport=foo", - wantError: `invalid discport in query`, - }, - { - rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:52150", - wantResult: NewNode( - MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - net.IP{0x7f, 0x0, 0x0, 0x1}, - 52150, - 52150, - ), - }, - { - rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[::]:52150", - wantResult: NewNode( - MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - net.ParseIP("::"), - 52150, - 52150, - ), - }, - { - rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@[2001:db8:3c4d:15::abcd:ef12]:52150", - wantResult: NewNode( - MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - net.ParseIP("2001:db8:3c4d:15::abcd:ef12"), - 52150, - 52150, - ), - }, - { - rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439@127.0.0.1:52150?discport=22334", - wantResult: NewNode( - MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - net.IP{0x7f, 0x0, 0x0, 0x1}, - 22334, - 52150, - ), - }, - // Incomplete nodes with no address. - { - rawurl: "1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439", - wantResult: NewNode( - MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - nil, 0, 0, - ), - }, - { - rawurl: "enode://1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439", - wantResult: NewNode( - MustHexID("0x1dd9d65c4552b5eb43d5ad55a2ee3f56c6cbc1c64a5c8d659f51fcd51bace24351232b8d7821617d2b29b54b81cdefb9b3e9c37d7fd5f63270bcc9e1a6f6a439"), - nil, 0, 0, - ), - }, - // Invalid URLs - { - rawurl: "01010101", - wantError: `invalid node ID (wrong length, want 128 hex chars)`, - }, - { - rawurl: "enode://01010101", - wantError: `invalid node ID (wrong length, want 128 hex chars)`, - }, - { - // This test checks that errors from url.Parse are handled. - rawurl: "://foo", - wantError: `missing protocol scheme`, - }, -} - -func TestParseNode(t *testing.T) { - for _, test := range parseNodeTests { - n, err := ParseNode(test.rawurl) - if test.wantError != "" { - if err == nil { - t.Errorf("test %q:\n got nil error, expected %#q", test.rawurl, test.wantError) - continue - } else if !strings.Contains(err.Error(), test.wantError) { - t.Errorf("test %q:\n got error %#q, expected %#q", test.rawurl, err.Error(), test.wantError) - continue - } - } else { - if err != nil { - t.Errorf("test %q:\n unexpected error: %v", test.rawurl, err) - continue - } - if !reflect.DeepEqual(n, test.wantResult) { - t.Errorf("test %q:\n result mismatch:\ngot: %#v, want: %#v", test.rawurl, n, test.wantResult) - } - } - } -} - -func TestNodeString(t *testing.T) { - for i, test := range parseNodeTests { - if test.wantError == "" && strings.HasPrefix(test.rawurl, "enode://") { - str := test.wantResult.String() - if str != test.rawurl { - t.Errorf("test %d: Node.String() mismatch:\ngot: %s\nwant: %s", i, str, test.rawurl) - } - } - } -} - -func TestHexID(t *testing.T) { - ref := NodeID{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 128, 106, 217, 182, 31, 165, 174, 1, 67, 7, 235, 220, 150, 66, 83, 173, 205, 159, 44, 10, 57, 42, 161, 26, 188} - id1 := MustHexID("0x000000000000000000000000000000000000000000000000000000000000000000000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc") - id2 := MustHexID("000000000000000000000000000000000000000000000000000000000000000000000000000000806ad9b61fa5ae014307ebdc964253adcd9f2c0a392aa11abc") - - if id1 != ref { - t.Errorf("wrong id1\ngot %v\nwant %v", id1[:], ref[:]) - } - if id2 != ref { - t.Errorf("wrong id2\ngot %v\nwant %v", id2[:], ref[:]) - } -} - -func TestNodeID_recover(t *testing.T) { - prv := newkey() - hash := make([]byte, 32) - sig, err := crypto.Sign(hash, prv) - if err != nil { - t.Fatalf("signing error: %v", err) - } - - pub := PubkeyID(&prv.PublicKey) - recpub, err := recoverNodeID(hash, sig) - if err != nil { - t.Fatalf("recovery error: %v", err) - } - if pub != recpub { - t.Errorf("recovered wrong pubkey:\ngot: %v\nwant: %v", recpub, pub) - } - - ecdsa, err := pub.Pubkey() - if err != nil { - t.Errorf("Pubkey error: %v", err) - } - if !reflect.DeepEqual(ecdsa, &prv.PublicKey) { - t.Errorf("Pubkey mismatch:\n got: %#v\n want: %#v", ecdsa, &prv.PublicKey) - } -} - -func TestNodeID_pubkeyBad(t *testing.T) { - ecdsa, err := NodeID{}.Pubkey() - if err == nil { - t.Error("expected error for zero ID") - } - if ecdsa != nil { - t.Error("expected nil result") - } -} - -func TestNodeID_distcmp(t *testing.T) { - distcmpBig := func(target, a, b common.Hash) int { - tbig := new(big.Int).SetBytes(target[:]) - abig := new(big.Int).SetBytes(a[:]) - bbig := new(big.Int).SetBytes(b[:]) - return new(big.Int).Xor(tbig, abig).Cmp(new(big.Int).Xor(tbig, bbig)) - } - if err := quick.CheckEqual(distcmp, distcmpBig, quickcfg()); err != nil { - t.Error(err) - } -} - -// the random tests is likely to miss the case where they're equal. -func TestNodeID_distcmpEqual(t *testing.T) { - base := common.Hash{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - x := common.Hash{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0} - if distcmp(base, x, x) != 0 { - t.Errorf("distcmp(base, x, x) != 0") - } -} - -func TestNodeID_logdist(t *testing.T) { - logdistBig := func(a, b common.Hash) int { - abig, bbig := new(big.Int).SetBytes(a[:]), new(big.Int).SetBytes(b[:]) - return new(big.Int).Xor(abig, bbig).BitLen() - } - if err := quick.CheckEqual(logdist, logdistBig, quickcfg()); err != nil { - t.Error(err) - } -} - -// the random tests is likely to miss the case where they're equal. -func TestNodeID_logdistEqual(t *testing.T) { - x := common.Hash{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} - if logdist(x, x) != 0 { - t.Errorf("logdist(x, x) != 0") - } -} - -func TestNodeID_hashAtDistance(t *testing.T) { - // we don't use quick.Check here because its output isn't - // very helpful when the test fails. - cfg := quickcfg() - for i := 0; i < cfg.MaxCount; i++ { - a := gen(common.Hash{}, cfg.Rand).(common.Hash) - dist := cfg.Rand.Intn(len(common.Hash{}) * 8) - result := hashAtDistance(a, dist) - actualdist := logdist(result, a) - - if dist != actualdist { - t.Log("a: ", a) - t.Log("result:", result) - t.Fatalf("#%d: distance of result is %d, want %d", i, actualdist, dist) - } - } -} - -func quickcfg() *quick.Config { - return &quick.Config{ - MaxCount: 5000, - Rand: rand.New(rand.NewSource(time.Now().Unix())), - } -} - -// TODO: The Generate method can be dropped when we require Go >= 1.5 -// because testing/quick learned to generate arrays in 1.5. - -func (NodeID) Generate(rand *rand.Rand, size int) reflect.Value { - var id NodeID - m := rand.Intn(len(id)) - for i := len(id) - 1; i > m; i-- { - id[i] = byte(rand.Uint32()) - } - return reflect.ValueOf(id) -} diff --git a/p2p/discv5/nodeevent_string.go b/p2p/discv5/nodeevent_string.go deleted file mode 100644 index 38c1993bac..0000000000 --- a/p2p/discv5/nodeevent_string.go +++ /dev/null @@ -1,17 +0,0 @@ -// Code generated by "stringer -type=nodeEvent"; DO NOT EDIT. - -package discv5 - -import "strconv" - -const _nodeEvent_name = "pongTimeoutpingTimeoutneighboursTimeout" - -var _nodeEvent_index = [...]uint8{0, 11, 22, 39} - -func (i nodeEvent) String() string { - i -= 264 - if i >= nodeEvent(len(_nodeEvent_index)-1) { - return "nodeEvent(" + strconv.FormatInt(int64(i+264), 10) + ")" - } - return _nodeEvent_name[_nodeEvent_index[i]:_nodeEvent_index[i+1]] -} diff --git a/p2p/discv5/sim_run_test.go b/p2p/discv5/sim_run_test.go deleted file mode 100644 index bded0cc023..0000000000 --- a/p2p/discv5/sim_run_test.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discv5 - -import ( - "bufio" - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "os" - "os/exec" - "runtime" - "strings" - "testing" -) - -func getnacl() (string, error) { - switch runtime.GOARCH { - case "amd64": - _, err := exec.LookPath("sel_ldr_x86_64") - return "amd64p32", err - case "i386": - _, err := exec.LookPath("sel_ldr_i386") - return "i386", err - default: - return "", errors.New("nacl is not supported on " + runtime.GOARCH) - } -} - -// runWithPlaygroundTime executes the caller -// in the NaCl sandbox with faketime enabled. -// -// This function must be called from a Test* function -// and the caller must skip the actual test when isHost is true. -func runWithPlaygroundTime(t *testing.T) (isHost bool) { - if runtime.GOOS == "nacl" { - return false - } - - // Get the caller. - callerPC, _, _, ok := runtime.Caller(1) - if !ok { - panic("can't get caller") - } - callerFunc := runtime.FuncForPC(callerPC) - if callerFunc == nil { - panic("can't get caller") - } - callerName := callerFunc.Name()[strings.LastIndexByte(callerFunc.Name(), '.')+1:] - if !strings.HasPrefix(callerName, "Test") { - panic("must be called from witin a Test* function") - } - testPattern := "^" + callerName + "$" - - // Unfortunately runtime.faketime (playground time mode) only works on NaCl. The NaCl - // SDK must be installed and linked into PATH for this to work. - arch, err := getnacl() - if err != nil { - t.Skip(err) - } - - // Compile and run the calling test using NaCl. - // The extra tag ensures that the TestMain function in sim_main_test.go is used. - cmd := exec.Command("go", "test", "-v", "-tags", "faketime_simulation", "-timeout", "100h", "-run", testPattern, ".") - cmd.Env = append([]string{"GOOS=nacl", "GOARCH=" + arch}, os.Environ()...) - stdout, _ := cmd.StdoutPipe() - stderr, _ := cmd.StderrPipe() - go skipPlaygroundOutputHeaders(os.Stdout, stdout) - go skipPlaygroundOutputHeaders(os.Stderr, stderr) - if err := cmd.Run(); err != nil { - t.Error(err) - } - - // Ensure that the test function doesn't run in the (non-NaCl) host process. - return true -} - -func skipPlaygroundOutputHeaders(out io.Writer, in io.Reader) { - // Additional output can be printed without the headers - // before the NaCl binary starts running (e.g. compiler error messages). - bufin := bufio.NewReader(in) - output, err := bufin.ReadBytes(0) - output = bytes.TrimSuffix(output, []byte{0}) - if len(output) > 0 { - out.Write(output) - } - if err != nil { - return - } - bufin.UnreadByte() - - // Playback header: 0 0 P B <8-byte time> <4-byte data length> - head := make([]byte, 4+8+4) - for { - if _, err := io.ReadFull(bufin, head); err != nil { - if err != io.EOF { - fmt.Fprintln(out, "read error:", err) - } - return - } - if !bytes.HasPrefix(head, []byte{0x00, 0x00, 'P', 'B'}) { - fmt.Fprintf(out, "expected playback header, got %q\n", head) - io.Copy(out, bufin) - return - } - // Copy data until next header. - size := binary.BigEndian.Uint32(head[12:]) - io.CopyN(out, bufin, int64(size)) - } -} diff --git a/p2p/discv5/sim_test.go b/p2p/discv5/sim_test.go deleted file mode 100644 index 3d1e610d3a..0000000000 --- a/p2p/discv5/sim_test.go +++ /dev/null @@ -1,432 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discv5 - -import ( - "crypto/ecdsa" - "encoding/binary" - "fmt" - "math/rand" - "net" - "strconv" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" -) - -// In this test, nodes try to randomly resolve each other. -func TestSimRandomResolve(t *testing.T) { - t.Skip("boring") - if runWithPlaygroundTime(t) { - return - } - - sim := newSimulation() - bootnode := sim.launchNode(false) - - // A new node joins every 10s. - launcher := time.NewTicker(10 * time.Second) - defer launcher.Stop() - go func() { - for range launcher.C { - net := sim.launchNode(false) - go randomResolves(t, sim, net) - if err := net.SetFallbackNodes([]*Node{bootnode.Self()}); err != nil { - panic(err) - } - t.Logf("launched @ %v: %x\n", time.Now(), net.Self().ID[:16]) - } - }() - - time.Sleep(3 * time.Hour) - sim.shutdown() - sim.printStats() -} - -func TestSimTopics(t *testing.T) { - t.Skip("NaCl test") - if runWithPlaygroundTime(t) { - return - } - sim := newSimulation() - bootnode := sim.launchNode(false) - - go func() { - nets := make([]*Network, 1024) - for i := range nets { - net := sim.launchNode(false) - nets[i] = net - if err := net.SetFallbackNodes([]*Node{bootnode.Self()}); err != nil { - panic(err) - } - time.Sleep(time.Second * 5) - } - - for i, net := range nets { - if i < 256 { - stop := make(chan struct{}) - go net.RegisterTopic(testTopic, stop) - go func() { - //time.Sleep(time.Second * 36000) - time.Sleep(time.Second * 40000) - close(stop) - }() - time.Sleep(time.Millisecond * 100) - } - // time.Sleep(time.Second * 10) - //time.Sleep(time.Second) - /*if i%500 == 499 { - time.Sleep(time.Second * 9501) - } else { - time.Sleep(time.Second) - }*/ - } - }() - - // A new node joins every 10s. - /* launcher := time.NewTicker(5 * time.Second) - cnt := 0 - var printNet *Network - go func() { - for range launcher.C { - cnt++ - if cnt <= 1000 { - log := false //(cnt == 500) - net := sim.launchNode(log) - if log { - printNet = net - } - if cnt > 500 { - go net.RegisterTopic(testTopic, nil) - } - if err := net.SetFallbackNodes([]*Node{bootnode.Self()}); err != nil { - panic(err) - } - } - //fmt.Printf("launched @ %v: %x\n", time.Now(), net.Self().ID[:16]) - } - }() - */ - time.Sleep(55000 * time.Second) - //launcher.Stop() - sim.shutdown() - //sim.printStats() - //printNet.log.printLogs() -} - -/*func testHierarchicalTopics(i int) []Topic { - digits := strconv.FormatInt(int64(256+i/4), 4) - res := make([]Topic, 5) - for i, _ := range res { - res[i] = Topic("foo" + digits[1:i+1]) - } - return res -}*/ - -func testHierarchicalTopics(i int) []Topic { - digits := strconv.FormatInt(int64(128+i/8), 2) - res := make([]Topic, 8) - for i := range res { - res[i] = Topic("foo" + digits[1:i+1]) - } - return res -} - -func TestSimTopicHierarchy(t *testing.T) { - t.Skip("NaCl test") - if runWithPlaygroundTime(t) { - return - } - sim := newSimulation() - bootnode := sim.launchNode(false) - - go func() { - nets := make([]*Network, 1024) - for i := range nets { - net := sim.launchNode(false) - nets[i] = net - if err := net.SetFallbackNodes([]*Node{bootnode.Self()}); err != nil { - panic(err) - } - time.Sleep(time.Second * 5) - } - - stop := make(chan struct{}) - for i, net := range nets { - //if i < 256 { - for _, topic := range testHierarchicalTopics(i)[:5] { - //fmt.Println("reg", topic) - go net.RegisterTopic(topic, stop) - } - time.Sleep(time.Millisecond * 100) - //} - } - time.Sleep(time.Second * 90000) - close(stop) - }() - - time.Sleep(100000 * time.Second) - sim.shutdown() -} - -func randomResolves(t *testing.T, s *simulation, net *Network) { - randtime := func() time.Duration { - return time.Duration(rand.Intn(50)+20) * time.Second - } - lookup := func(target NodeID) bool { - result := net.Resolve(target) - return result != nil && result.ID == target - } - - timer := time.NewTimer(randtime()) - defer timer.Stop() - for { - select { - case <-timer.C: - target := s.randomNode().Self().ID - if !lookup(target) { - t.Errorf("node %x: target %x not found", net.Self().ID[:8], target[:8]) - } - timer.Reset(randtime()) - case <-net.closed: - return - } - } -} - -type simulation struct { - mu sync.RWMutex - nodes map[NodeID]*Network - nodectr uint32 -} - -func newSimulation() *simulation { - return &simulation{nodes: make(map[NodeID]*Network)} -} - -func (s *simulation) shutdown() { - s.mu.RLock() - alive := make([]*Network, 0, len(s.nodes)) - for _, n := range s.nodes { - alive = append(alive, n) - } - defer s.mu.RUnlock() - - for _, n := range alive { - n.Close() - } -} - -func (s *simulation) printStats() { - s.mu.Lock() - defer s.mu.Unlock() - fmt.Println("node counter:", s.nodectr) - fmt.Println("alive nodes:", len(s.nodes)) - - // for _, n := range s.nodes { - // fmt.Printf("%x\n", n.tab.self.ID[:8]) - // transport := n.conn.(*simTransport) - // fmt.Println(" joined:", transport.joinTime) - // fmt.Println(" sends:", transport.hashctr) - // fmt.Println(" table size:", n.tab.count) - // } - - /*for _, n := range s.nodes { - fmt.Println() - fmt.Printf("*** Node %x\n", n.tab.self.ID[:8]) - n.log.printLogs() - }*/ - -} - -func (s *simulation) randomNode() *Network { - s.mu.Lock() - defer s.mu.Unlock() - - n := rand.Intn(len(s.nodes)) - for _, net := range s.nodes { - if n == 0 { - return net - } - n-- - } - return nil -} - -func (s *simulation) launchNode(log bool) *Network { - var ( - num = s.nodectr - key = newkey() - id = PubkeyID(&key.PublicKey) - ip = make(net.IP, 4) - ) - s.nodectr++ - binary.BigEndian.PutUint32(ip, num) - ip[0] = 10 - addr := &net.UDPAddr{IP: ip, Port: 30303} - - transport := &simTransport{joinTime: time.Now(), sender: id, senderAddr: addr, sim: s, priv: key} - net, err := newNetwork(transport, key.PublicKey, "", nil) - if err != nil { - panic("cannot launch new node: " + err.Error()) - } - - s.mu.Lock() - s.nodes[id] = net - s.mu.Unlock() - - return net -} - -type simTransport struct { - joinTime time.Time - sender NodeID - senderAddr *net.UDPAddr - sim *simulation - hashctr uint64 - priv *ecdsa.PrivateKey -} - -func (st *simTransport) localAddr() *net.UDPAddr { - return st.senderAddr -} - -func (st *simTransport) Close() {} - -func (st *simTransport) send(remote *Node, ptype nodeEvent, data interface{}) (hash []byte) { - hash = st.nextHash() - var raw []byte - if ptype == pongPacket { - var err error - raw, _, err = encodePacket(st.priv, byte(ptype), data) - if err != nil { - panic(err) - } - } - - st.sendPacket(remote.ID, ingressPacket{ - remoteID: st.sender, - remoteAddr: st.senderAddr, - hash: hash, - ev: ptype, - data: data, - rawData: raw, - }) - return hash -} - -func (st *simTransport) sendPing(remote *Node, remoteAddr *net.UDPAddr, topics []Topic) []byte { - hash := st.nextHash() - st.sendPacket(remote.ID, ingressPacket{ - remoteID: st.sender, - remoteAddr: st.senderAddr, - hash: hash, - ev: pingPacket, - data: &ping{ - Version: 4, - From: rpcEndpoint{IP: st.senderAddr.IP, UDP: uint16(st.senderAddr.Port), TCP: 30303}, - To: rpcEndpoint{IP: remoteAddr.IP, UDP: uint16(remoteAddr.Port), TCP: 30303}, - Expiration: uint64(time.Now().Unix() + int64(expiration)), - Topics: topics, - }, - }) - return hash -} - -func (st *simTransport) sendFindnodeHash(remote *Node, target common.Hash) { - st.sendPacket(remote.ID, ingressPacket{ - remoteID: st.sender, - remoteAddr: st.senderAddr, - hash: st.nextHash(), - ev: findnodeHashPacket, - data: &findnodeHash{ - Target: target, - Expiration: uint64(time.Now().Unix() + int64(expiration)), - }, - }) -} - -func (st *simTransport) sendTopicRegister(remote *Node, topics []Topic, idx int, pong []byte) { - //fmt.Println("send", topics, pong) - st.sendPacket(remote.ID, ingressPacket{ - remoteID: st.sender, - remoteAddr: st.senderAddr, - hash: st.nextHash(), - ev: topicRegisterPacket, - data: &topicRegister{ - Topics: topics, - Idx: uint(idx), - Pong: pong, - }, - }) -} - -func (st *simTransport) sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node) { - rnodes := make([]rpcNode, len(nodes)) - for i := range nodes { - rnodes[i] = nodeToRPC(nodes[i]) - } - st.sendPacket(remote.ID, ingressPacket{ - remoteID: st.sender, - remoteAddr: st.senderAddr, - hash: st.nextHash(), - ev: topicNodesPacket, - data: &topicNodes{Echo: queryHash, Nodes: rnodes}, - }) -} - -func (st *simTransport) sendNeighbours(remote *Node, nodes []*Node) { - // TODO: send multiple packets - rnodes := make([]rpcNode, len(nodes)) - for i := range nodes { - rnodes[i] = nodeToRPC(nodes[i]) - } - st.sendPacket(remote.ID, ingressPacket{ - remoteID: st.sender, - remoteAddr: st.senderAddr, - hash: st.nextHash(), - ev: neighborsPacket, - data: &neighbors{ - Nodes: rnodes, - Expiration: uint64(time.Now().Unix() + int64(expiration)), - }, - }) -} - -func (st *simTransport) nextHash() []byte { - v := atomic.AddUint64(&st.hashctr, 1) - var hash common.Hash - binary.BigEndian.PutUint64(hash[:], v) - return hash[:] -} - -const packetLoss = 0 // 1/1000 - -func (st *simTransport) sendPacket(remote NodeID, p ingressPacket) { - if rand.Int31n(1000) >= packetLoss { - st.sim.mu.RLock() - recipient := st.sim.nodes[remote] - st.sim.mu.RUnlock() - - time.AfterFunc(200*time.Millisecond, func() { - recipient.reqReadPacket(p) - }) - } -} diff --git a/p2p/discv5/table.go b/p2p/discv5/table.go deleted file mode 100644 index 64c3ecd1c7..0000000000 --- a/p2p/discv5/table.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -// Package discv5 is a prototype implementation of Discvery v5. -// Deprecated: do not use this package. -package discv5 - -import ( - "crypto/rand" - "encoding/binary" - "fmt" - "net" - "sort" - - "github.com/ethereum/go-ethereum/common" -) - -const ( - alpha = 3 // Kademlia concurrency factor - bucketSize = 16 // Kademlia bucket size - hashBits = len(common.Hash{}) * 8 - nBuckets = hashBits + 1 // Number of buckets - - maxFindnodeFailures = 5 -) - -type Table struct { - count int // number of nodes - buckets [nBuckets]*bucket // index of known nodes by distance - nodeAddedHook func(*Node) // for testing - self *Node // metadata of the local node -} - -// bucket contains nodes, ordered by their last activity. the entry -// that was most recently active is the first element in entries. -type bucket struct { - entries []*Node - replacements []*Node -} - -func newTable(ourID NodeID, ourAddr *net.UDPAddr) *Table { - self := NewNode(ourID, ourAddr.IP, uint16(ourAddr.Port), uint16(ourAddr.Port)) - tab := &Table{self: self} - for i := range tab.buckets { - tab.buckets[i] = new(bucket) - } - return tab -} - -const printTable = false - -// chooseBucketRefreshTarget selects random refresh targets to keep all Kademlia -// buckets filled with live connections and keep the network topology healthy. -// This requires selecting addresses closer to our own with a higher probability -// in order to refresh closer buckets too. -// -// This algorithm approximates the distance distribution of existing nodes in the -// table by selecting a random node from the table and selecting a target address -// with a distance less than twice of that of the selected node. -// This algorithm will be improved later to specifically target the least recently -// used buckets. -func (tab *Table) chooseBucketRefreshTarget() common.Hash { - entries := 0 - if printTable { - fmt.Println() - } - for i, b := range &tab.buckets { - entries += len(b.entries) - if printTable { - for _, e := range b.entries { - fmt.Println(i, e.state, e.addr().String(), e.ID.String(), e.sha.Hex()) - } - } - } - - prefix := binary.BigEndian.Uint64(tab.self.sha[0:8]) - dist := ^uint64(0) - entry := int(randUint(uint32(entries + 1))) - for _, b := range &tab.buckets { - if entry < len(b.entries) { - n := b.entries[entry] - dist = binary.BigEndian.Uint64(n.sha[0:8]) ^ prefix - break - } - entry -= len(b.entries) - } - - ddist := ^uint64(0) - if dist+dist > dist { - ddist = dist - } - targetPrefix := prefix ^ randUint64n(ddist) - - var target common.Hash - binary.BigEndian.PutUint64(target[0:8], targetPrefix) - rand.Read(target[8:]) - return target -} - -// readRandomNodes fills the given slice with random nodes from the -// table. It will not write the same node more than once. The nodes in -// the slice are copies and can be modified by the caller. -func (tab *Table) readRandomNodes(buf []*Node) (n int) { - // TODO: tree-based buckets would help here - // Find all non-empty buckets and get a fresh slice of their entries. - var buckets [][]*Node - for _, b := range &tab.buckets { - if len(b.entries) > 0 { - buckets = append(buckets, b.entries) - } - } - if len(buckets) == 0 { - return 0 - } - // Shuffle the buckets. - for i := uint32(len(buckets)) - 1; i > 0; i-- { - j := randUint(i) - buckets[i], buckets[j] = buckets[j], buckets[i] - } - // Move head of each bucket into buf, removing buckets that become empty. - var i, j int - for ; i < len(buf); i, j = i+1, (j+1)%len(buckets) { - b := buckets[j] - buf[i] = &(*b[0]) - buckets[j] = b[1:] - if len(b) == 1 { - buckets = append(buckets[:j], buckets[j+1:]...) - } - if len(buckets) == 0 { - break - } - } - return i + 1 -} - -func randUint(max uint32) uint32 { - if max < 2 { - return 0 - } - var b [4]byte - rand.Read(b[:]) - return binary.BigEndian.Uint32(b[:]) % max -} - -func randUint64n(max uint64) uint64 { - if max < 2 { - return 0 - } - var b [8]byte - rand.Read(b[:]) - return binary.BigEndian.Uint64(b[:]) % max -} - -// closest returns the n nodes in the table that are closest to the -// given id. The caller must hold tab.mutex. -func (tab *Table) closest(target common.Hash, nresults int) *nodesByDistance { - // This is a very wasteful way to find the closest nodes but - // obviously correct. I believe that tree-based buckets would make - // this easier to implement efficiently. - close := &nodesByDistance{target: target} - for _, b := range &tab.buckets { - for _, n := range b.entries { - close.push(n, nresults) - } - } - return close -} - -// add attempts to add the given node its corresponding bucket. If the -// bucket has space available, adding the node succeeds immediately. -// Otherwise, the node is added to the replacement cache for the bucket. -func (tab *Table) add(n *Node) (contested *Node) { - //fmt.Println("add", n.addr().String(), n.ID.String(), n.sha.Hex()) - if n.ID == tab.self.ID { - return - } - b := tab.buckets[logdist(tab.self.sha, n.sha)] - switch { - case b.bump(n): - // n exists in b. - return nil - case len(b.entries) < bucketSize: - // b has space available. - b.addFront(n) - tab.count++ - if tab.nodeAddedHook != nil { - tab.nodeAddedHook(n) - } - return nil - default: - // b has no space left, add to replacement cache - // and revalidate the last entry. - // TODO: drop previous node - b.replacements = append(b.replacements, n) - if len(b.replacements) > bucketSize { - copy(b.replacements, b.replacements[1:]) - b.replacements = b.replacements[:len(b.replacements)-1] - } - return b.entries[len(b.entries)-1] - } -} - -// stuff adds nodes the table to the end of their corresponding bucket -// if the bucket is not full. -func (tab *Table) stuff(nodes []*Node) { -outer: - for _, n := range nodes { - if n.ID == tab.self.ID { - continue // don't add self - } - bucket := tab.buckets[logdist(tab.self.sha, n.sha)] - for i := range bucket.entries { - if bucket.entries[i].ID == n.ID { - continue outer // already in bucket - } - } - if len(bucket.entries) < bucketSize { - bucket.entries = append(bucket.entries, n) - tab.count++ - if tab.nodeAddedHook != nil { - tab.nodeAddedHook(n) - } - } - } -} - -// delete removes an entry from the node table (used to evacuate -// failed/non-bonded discovery peers). -func (tab *Table) delete(node *Node) { - //fmt.Println("delete", node.addr().String(), node.ID.String(), node.sha.Hex()) - bucket := tab.buckets[logdist(tab.self.sha, node.sha)] - for i := range bucket.entries { - if bucket.entries[i].ID == node.ID { - bucket.entries = append(bucket.entries[:i], bucket.entries[i+1:]...) - tab.count-- - return - } - } -} - -func (tab *Table) deleteReplace(node *Node) { - b := tab.buckets[logdist(tab.self.sha, node.sha)] - i := 0 - for i < len(b.entries) { - if b.entries[i].ID == node.ID { - b.entries = append(b.entries[:i], b.entries[i+1:]...) - tab.count-- - } else { - i++ - } - } - // refill from replacement cache - // TODO: maybe use random index - if len(b.entries) < bucketSize && len(b.replacements) > 0 { - ri := len(b.replacements) - 1 - b.addFront(b.replacements[ri]) - tab.count++ - b.replacements[ri] = nil - b.replacements = b.replacements[:ri] - } -} - -func (b *bucket) addFront(n *Node) { - b.entries = append(b.entries, nil) - copy(b.entries[1:], b.entries) - b.entries[0] = n -} - -func (b *bucket) bump(n *Node) bool { - for i := range b.entries { - if b.entries[i].ID == n.ID { - // move it to the front - copy(b.entries[1:], b.entries[:i]) - b.entries[0] = n - return true - } - } - return false -} - -// nodesByDistance is a list of nodes, ordered by -// distance to target. -type nodesByDistance struct { - entries []*Node - target common.Hash -} - -// push adds the given node to the list, keeping the total size below maxElems. -func (h *nodesByDistance) push(n *Node, maxElems int) { - ix := sort.Search(len(h.entries), func(i int) bool { - return distcmp(h.target, h.entries[i].sha, n.sha) > 0 - }) - if len(h.entries) < maxElems { - h.entries = append(h.entries, n) - } - if ix == len(h.entries) { - // farther away than all nodes we already have. - // if there was room for it, the node is now the last element. - } else { - // slide existing entries down to make room - // this will overwrite the entry we just appended. - copy(h.entries[ix+1:], h.entries[ix:]) - h.entries[ix] = n - } -} diff --git a/p2p/discv5/table_test.go b/p2p/discv5/table_test.go deleted file mode 100644 index 872a4f6836..0000000000 --- a/p2p/discv5/table_test.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discv5 - -import ( - "crypto/ecdsa" - "fmt" - "math/rand" - - "net" - "reflect" - "testing" - "testing/quick" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" -) - -func TestBucket_bumpNoDuplicates(t *testing.T) { - t.Parallel() - cfg := &quick.Config{ - MaxCount: 1000, - Rand: rand.New(rand.NewSource(time.Now().Unix())), - Values: func(args []reflect.Value, rand *rand.Rand) { - // generate a random list of nodes. this will be the content of the bucket. - n := rand.Intn(bucketSize-1) + 1 - nodes := make([]*Node, n) - for i := range nodes { - nodes[i] = nodeAtDistance(common.Hash{}, 200) - } - args[0] = reflect.ValueOf(nodes) - // generate random bump positions. - bumps := make([]int, rand.Intn(100)) - for i := range bumps { - bumps[i] = rand.Intn(len(nodes)) - } - args[1] = reflect.ValueOf(bumps) - }, - } - - prop := func(nodes []*Node, bumps []int) (ok bool) { - b := &bucket{entries: make([]*Node, len(nodes))} - copy(b.entries, nodes) - for i, pos := range bumps { - b.bump(b.entries[pos]) - if hasDuplicates(b.entries) { - t.Logf("bucket has duplicates after %d/%d bumps:", i+1, len(bumps)) - for _, n := range b.entries { - t.Logf(" %p", n) - } - return false - } - } - return true - } - if err := quick.Check(prop, cfg); err != nil { - t.Error(err) - } -} - -// nodeAtDistance creates a node for which logdist(base, n.sha) == ld. -// The node's ID does not correspond to n.sha. -func nodeAtDistance(base common.Hash, ld int) (n *Node) { - n = new(Node) - n.sha = hashAtDistance(base, ld) - copy(n.ID[:], n.sha[:]) // ensure the node still has a unique ID - return n -} - -func TestTable_closest(t *testing.T) { - t.Parallel() - - test := func(test *closeTest) bool { - // for any node table, Target and N - tab := newTable(test.Self, &net.UDPAddr{}) - tab.stuff(test.All) - - // check that doClosest(Target, N) returns nodes - result := tab.closest(test.Target, test.N).entries - if hasDuplicates(result) { - t.Errorf("result contains duplicates") - return false - } - if !sortedByDistanceTo(test.Target, result) { - t.Errorf("result is not sorted by distance to target") - return false - } - - // check that the number of results is min(N, tablen) - wantN := test.N - if tab.count < test.N { - wantN = tab.count - } - if len(result) != wantN { - t.Errorf("wrong number of nodes: got %d, want %d", len(result), wantN) - return false - } else if len(result) == 0 { - return true // no need to check distance - } - - // check that the result nodes have minimum distance to target. - for _, b := range tab.buckets { - for _, n := range b.entries { - if contains(result, n.ID) { - continue // don't run the check below for nodes in result - } - farthestResult := result[len(result)-1].sha - if distcmp(test.Target, n.sha, farthestResult) < 0 { - t.Errorf("table contains node that is closer to target but it's not in result") - t.Logf(" Target: %v", test.Target) - t.Logf(" Farthest Result: %v", farthestResult) - t.Logf(" ID: %v", n.ID) - return false - } - } - } - return true - } - if err := quick.Check(test, quickcfg()); err != nil { - t.Error(err) - } -} - -func TestTable_ReadRandomNodesGetAll(t *testing.T) { - cfg := &quick.Config{ - MaxCount: 200, - Rand: rand.New(rand.NewSource(time.Now().Unix())), - Values: func(args []reflect.Value, rand *rand.Rand) { - args[0] = reflect.ValueOf(make([]*Node, rand.Intn(1000))) - }, - } - test := func(buf []*Node) bool { - tab := newTable(NodeID{}, &net.UDPAddr{}) - for i := 0; i < len(buf); i++ { - ld := cfg.Rand.Intn(len(tab.buckets)) - tab.stuff([]*Node{nodeAtDistance(tab.self.sha, ld)}) - } - gotN := tab.readRandomNodes(buf) - if gotN != tab.count { - t.Errorf("wrong number of nodes, got %d, want %d", gotN, tab.count) - return false - } - if hasDuplicates(buf[:gotN]) { - t.Errorf("result contains duplicates") - return false - } - return true - } - if err := quick.Check(test, cfg); err != nil { - t.Error(err) - } -} - -type closeTest struct { - Self NodeID - Target common.Hash - All []*Node - N int -} - -func (*closeTest) Generate(rand *rand.Rand, size int) reflect.Value { - t := &closeTest{ - Self: gen(NodeID{}, rand).(NodeID), - Target: gen(common.Hash{}, rand).(common.Hash), - N: rand.Intn(bucketSize), - } - for _, id := range gen([]NodeID{}, rand).([]NodeID) { - t.All = append(t.All, &Node{ID: id}) - } - return reflect.ValueOf(t) -} - -func hasDuplicates(slice []*Node) bool { - seen := make(map[NodeID]bool) - for i, e := range slice { - if e == nil { - panic(fmt.Sprintf("nil *Node at %d", i)) - } - if seen[e.ID] { - return true - } - seen[e.ID] = true - } - return false -} - -func sortedByDistanceTo(distbase common.Hash, slice []*Node) bool { - var last common.Hash - for i, e := range slice { - if i > 0 && distcmp(distbase, e.sha, last) < 0 { - return false - } - last = e.sha - } - return true -} - -func contains(ns []*Node, id NodeID) bool { - for _, n := range ns { - if n.ID == id { - return true - } - } - return false -} - -// gen wraps quick.Value so it's easier to use. -// it generates a random value of the given value's type. -func gen(typ interface{}, rand *rand.Rand) interface{} { - v, ok := quick.Value(reflect.TypeOf(typ), rand) - if !ok { - panic(fmt.Sprintf("couldn't generate random value of type %T", typ)) - } - return v.Interface() -} - -func newkey() *ecdsa.PrivateKey { - key, err := crypto.GenerateKey() - if err != nil { - panic("couldn't generate key: " + err.Error()) - } - return key -} diff --git a/p2p/discv5/ticket.go b/p2p/discv5/ticket.go deleted file mode 100644 index c5e3d6c08f..0000000000 --- a/p2p/discv5/ticket.go +++ /dev/null @@ -1,884 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discv5 - -import ( - "bytes" - "encoding/binary" - "fmt" - "math" - "math/rand" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" -) - -const ( - ticketTimeBucketLen = time.Minute - collectFrequency = time.Second * 30 - registerFrequency = time.Second * 60 - maxCollectDebt = 10 - maxRegisterDebt = 5 - keepTicketConst = time.Minute * 10 - keepTicketExp = time.Minute * 5 - targetWaitTime = time.Minute * 10 - topicQueryTimeout = time.Second * 5 - topicQueryResend = time.Minute - // topic radius detection - maxRadius = 0xffffffffffffffff - radiusTC = time.Minute * 20 - radiusBucketsPerBit = 8 - minSlope = 1 - minPeakSize = 40 - maxNoAdjust = 20 - lookupWidth = 8 - minRightSum = 20 - searchForceQuery = 4 -) - -// timeBucket represents absolute monotonic time in minutes. -// It is used as the index into the per-topic ticket buckets. -type timeBucket int - -type ticket struct { - topics []Topic - regTime []mclock.AbsTime // Per-topic local absolute time when the ticket can be used. - - // The serial number that was issued by the server. - serial uint32 - // Used by registrar, tracks absolute time when the ticket was created. - issueTime mclock.AbsTime - - // Fields used only by registrants - node *Node // the registrar node that signed this ticket - refCnt int // tracks number of topics that will be registered using this ticket - pong []byte // encoded pong packet signed by the registrar -} - -// ticketRef refers to a single topic in a ticket. -type ticketRef struct { - t *ticket - idx int // index of the topic in t.topics and t.regTime -} - -func (ref ticketRef) topic() Topic { - return ref.t.topics[ref.idx] -} - -func (ref ticketRef) topicRegTime() mclock.AbsTime { - return ref.t.regTime[ref.idx] -} - -func pongToTicket(localTime mclock.AbsTime, topics []Topic, node *Node, p *ingressPacket) (*ticket, error) { - wps := p.data.(*pong).WaitPeriods - if len(topics) != len(wps) { - return nil, fmt.Errorf("bad wait period list: got %d values, want %d", len(topics), len(wps)) - } - if rlpHash(topics) != p.data.(*pong).TopicHash { - return nil, fmt.Errorf("bad topic hash") - } - t := &ticket{ - issueTime: localTime, - node: node, - topics: topics, - pong: p.rawData, - regTime: make([]mclock.AbsTime, len(wps)), - } - // Convert wait periods to local absolute time. - for i, wp := range wps { - t.regTime[i] = localTime + mclock.AbsTime(time.Second*time.Duration(wp)) - } - return t, nil -} - -func ticketToPong(t *ticket, pong *pong) { - pong.Expiration = uint64(t.issueTime / mclock.AbsTime(time.Second)) - pong.TopicHash = rlpHash(t.topics) - pong.TicketSerial = t.serial - pong.WaitPeriods = make([]uint32, len(t.regTime)) - for i, regTime := range t.regTime { - pong.WaitPeriods[i] = uint32(time.Duration(regTime-t.issueTime) / time.Second) - } -} - -type ticketStore struct { - // radius detector and target address generator - // exists for both searched and registered topics - radius map[Topic]*topicRadius - - // Contains buckets (for each absolute minute) of tickets - // that can be used in that minute. - // This is only set if the topic is being registered. - tickets map[Topic]*topicTickets - - regQueue []Topic // Topic registration queue for round robin attempts - regSet map[Topic]struct{} // Topic registration queue contents for fast filling - - nodes map[*Node]*ticket - nodeLastReq map[*Node]reqInfo - - lastBucketFetched timeBucket - nextTicketCached *ticketRef - - searchTopicMap map[Topic]searchTopic - nextTopicQueryCleanup mclock.AbsTime - queriesSent map[*Node]map[common.Hash]sentQuery -} - -type searchTopic struct { - foundChn chan<- *Node -} - -type sentQuery struct { - sent mclock.AbsTime - lookup lookupInfo -} - -type topicTickets struct { - buckets map[timeBucket][]ticketRef - nextLookup mclock.AbsTime - nextReg mclock.AbsTime -} - -func newTicketStore() *ticketStore { - return &ticketStore{ - radius: make(map[Topic]*topicRadius), - tickets: make(map[Topic]*topicTickets), - regSet: make(map[Topic]struct{}), - nodes: make(map[*Node]*ticket), - nodeLastReq: make(map[*Node]reqInfo), - searchTopicMap: make(map[Topic]searchTopic), - queriesSent: make(map[*Node]map[common.Hash]sentQuery), - } -} - -// addTopic starts tracking a topic. If register is true, -// the local node will register the topic and tickets will be collected. -func (s *ticketStore) addTopic(topic Topic, register bool) { - log.Trace("Adding discovery topic", "topic", topic, "register", register) - if s.radius[topic] == nil { - s.radius[topic] = newTopicRadius(topic) - } - if register && s.tickets[topic] == nil { - s.tickets[topic] = &topicTickets{buckets: make(map[timeBucket][]ticketRef)} - } -} - -func (s *ticketStore) addSearchTopic(t Topic, foundChn chan<- *Node) { - s.addTopic(t, false) - if s.searchTopicMap[t].foundChn == nil { - s.searchTopicMap[t] = searchTopic{foundChn: foundChn} - } -} - -func (s *ticketStore) removeSearchTopic(t Topic) { - if st := s.searchTopicMap[t]; st.foundChn != nil { - delete(s.searchTopicMap, t) - } -} - -// removeRegisterTopic deletes all tickets for the given topic. -func (s *ticketStore) removeRegisterTopic(topic Topic) { - log.Trace("Removing discovery topic", "topic", topic) - if s.tickets[topic] == nil { - log.Warn("Removing non-existent discovery topic", "topic", topic) - return - } - for _, list := range s.tickets[topic].buckets { - for _, ref := range list { - ref.t.refCnt-- - if ref.t.refCnt == 0 { - delete(s.nodes, ref.t.node) - delete(s.nodeLastReq, ref.t.node) - } - } - } - delete(s.tickets, topic) -} - -func (s *ticketStore) regTopicSet() []Topic { - topics := make([]Topic, 0, len(s.tickets)) - for topic := range s.tickets { - topics = append(topics, topic) - } - return topics -} - -// nextRegisterLookup returns the target of the next lookup for ticket collection. -func (s *ticketStore) nextRegisterLookup() (lookupInfo, time.Duration) { - // Queue up any new topics (or discarded ones), preserving iteration order - for topic := range s.tickets { - if _, ok := s.regSet[topic]; !ok { - s.regQueue = append(s.regQueue, topic) - s.regSet[topic] = struct{}{} - } - } - // Iterate over the set of all topics and look up the next suitable one - for len(s.regQueue) > 0 { - // Fetch the next topic from the queue, and ensure it still exists - topic := s.regQueue[0] - s.regQueue = s.regQueue[1:] - delete(s.regSet, topic) - - if s.tickets[topic] == nil { - continue - } - // If the topic needs more tickets, return it - if s.tickets[topic].nextLookup < mclock.Now() { - next, delay := s.radius[topic].nextTarget(false), 100*time.Millisecond - log.Trace("Found discovery topic to register", "topic", topic, "target", next.target, "delay", delay) - return next, delay - } - } - // No registration topics found or all exhausted, sleep - delay := 40 * time.Second - log.Trace("No topic found to register", "delay", delay) - return lookupInfo{}, delay -} - -func (s *ticketStore) nextSearchLookup(topic Topic) lookupInfo { - tr := s.radius[topic] - target := tr.nextTarget(tr.radiusLookupCnt >= searchForceQuery) - if target.radiusLookup { - tr.radiusLookupCnt++ - } else { - tr.radiusLookupCnt = 0 - } - return target -} - -func (s *ticketStore) addTicketRef(r ticketRef) { - topic := r.t.topics[r.idx] - tickets := s.tickets[topic] - if tickets == nil { - log.Warn("Adding ticket to non-existent topic", "topic", topic) - return - } - bucket := timeBucket(r.t.regTime[r.idx] / mclock.AbsTime(ticketTimeBucketLen)) - tickets.buckets[bucket] = append(tickets.buckets[bucket], r) - r.t.refCnt++ - - min := mclock.Now() - mclock.AbsTime(collectFrequency)*maxCollectDebt - if tickets.nextLookup < min { - tickets.nextLookup = min - } - tickets.nextLookup += mclock.AbsTime(collectFrequency) - - //s.removeExcessTickets(topic) -} - -func (s *ticketStore) nextFilteredTicket() (*ticketRef, time.Duration) { - now := mclock.Now() - for { - ticket, wait := s.nextRegisterableTicket() - if ticket == nil { - return ticket, wait - } - log.Trace("Found discovery ticket to register", "node", ticket.t.node, "serial", ticket.t.serial, "wait", wait) - - regTime := now + mclock.AbsTime(wait) - topic := ticket.t.topics[ticket.idx] - if s.tickets[topic] != nil && regTime >= s.tickets[topic].nextReg { - return ticket, wait - } - s.removeTicketRef(*ticket) - } -} - -func (s *ticketStore) ticketRegistered(ref ticketRef) { - now := mclock.Now() - - topic := ref.t.topics[ref.idx] - tickets := s.tickets[topic] - min := now - mclock.AbsTime(registerFrequency)*maxRegisterDebt - if min > tickets.nextReg { - tickets.nextReg = min - } - tickets.nextReg += mclock.AbsTime(registerFrequency) - s.tickets[topic] = tickets - - s.removeTicketRef(ref) -} - -// nextRegisterableTicket returns the next ticket that can be used -// to register. -// -// If the returned wait time <= zero the ticket can be used. For a positive -// wait time, the caller should requery the next ticket later. -// -// A ticket can be returned more than once with <= zero wait time in case -// the ticket contains multiple topics. -func (s *ticketStore) nextRegisterableTicket() (*ticketRef, time.Duration) { - now := mclock.Now() - if s.nextTicketCached != nil { - return s.nextTicketCached, time.Duration(s.nextTicketCached.topicRegTime() - now) - } - - for bucket := s.lastBucketFetched; ; bucket++ { - var ( - empty = true // true if there are no tickets - nextTicket ticketRef // uninitialized if this bucket is empty - ) - for _, tickets := range s.tickets { - //s.removeExcessTickets(topic) - if len(tickets.buckets) != 0 { - empty = false - - list := tickets.buckets[bucket] - for _, ref := range list { - //debugLog(fmt.Sprintf(" nrt bucket = %d node = %x sn = %v wait = %v", bucket, ref.t.node.ID[:8], ref.t.serial, time.Duration(ref.topicRegTime()-now))) - if nextTicket.t == nil || ref.topicRegTime() < nextTicket.topicRegTime() { - nextTicket = ref - } - } - } - } - if empty { - return nil, 0 - } - if nextTicket.t != nil { - s.nextTicketCached = &nextTicket - return &nextTicket, time.Duration(nextTicket.topicRegTime() - now) - } - s.lastBucketFetched = bucket - } -} - -// removeTicket removes a ticket from the ticket store -func (s *ticketStore) removeTicketRef(ref ticketRef) { - log.Trace("Removing discovery ticket reference", "node", ref.t.node.ID, "serial", ref.t.serial) - - // Make nextRegisterableTicket return the next available ticket. - s.nextTicketCached = nil - - topic := ref.topic() - tickets := s.tickets[topic] - - if tickets == nil { - log.Trace("Removing tickets from unknown topic", "topic", topic) - return - } - bucket := timeBucket(ref.t.regTime[ref.idx] / mclock.AbsTime(ticketTimeBucketLen)) - list := tickets.buckets[bucket] - idx := -1 - for i, bt := range list { - if bt.t == ref.t { - idx = i - break - } - } - if idx == -1 { - panic(nil) - } - list = append(list[:idx], list[idx+1:]...) - if len(list) != 0 { - tickets.buckets[bucket] = list - } else { - delete(tickets.buckets, bucket) - } - ref.t.refCnt-- - if ref.t.refCnt == 0 { - delete(s.nodes, ref.t.node) - delete(s.nodeLastReq, ref.t.node) - } -} - -type lookupInfo struct { - target common.Hash - topic Topic - radiusLookup bool -} - -type reqInfo struct { - pingHash []byte - lookup lookupInfo - time mclock.AbsTime -} - -// returns -1 if not found -func (t *ticket) findIdx(topic Topic) int { - for i, tt := range t.topics { - if tt == topic { - return i - } - } - return -1 -} - -func (s *ticketStore) registerLookupDone(lookup lookupInfo, nodes []*Node, ping func(n *Node) []byte) { - now := mclock.Now() - for i, n := range nodes { - if i == 0 || (binary.BigEndian.Uint64(n.sha[:8])^binary.BigEndian.Uint64(lookup.target[:8])) < s.radius[lookup.topic].minRadius { - if lookup.radiusLookup { - if lastReq, ok := s.nodeLastReq[n]; !ok || time.Duration(now-lastReq.time) > radiusTC { - s.nodeLastReq[n] = reqInfo{pingHash: ping(n), lookup: lookup, time: now} - } - } else { - if s.nodes[n] == nil { - s.nodeLastReq[n] = reqInfo{pingHash: ping(n), lookup: lookup, time: now} - } - } - } - } -} - -func (s *ticketStore) searchLookupDone(lookup lookupInfo, nodes []*Node, query func(n *Node, topic Topic) []byte) { - now := mclock.Now() - for i, n := range nodes { - if i == 0 || (binary.BigEndian.Uint64(n.sha[:8])^binary.BigEndian.Uint64(lookup.target[:8])) < s.radius[lookup.topic].minRadius { - if lookup.radiusLookup { - if lastReq, ok := s.nodeLastReq[n]; !ok || time.Duration(now-lastReq.time) > radiusTC { - s.nodeLastReq[n] = reqInfo{pingHash: nil, lookup: lookup, time: now} - } - } // else { - if s.canQueryTopic(n, lookup.topic) { - hash := query(n, lookup.topic) - if hash != nil { - s.addTopicQuery(common.BytesToHash(hash), n, lookup) - } - } - //} - } - } -} - -func (s *ticketStore) adjustWithTicket(now mclock.AbsTime, targetHash common.Hash, t *ticket) { - for i, topic := range t.topics { - if tt, ok := s.radius[topic]; ok { - tt.adjustWithTicket(now, targetHash, ticketRef{t, i}) - } - } -} - -func (s *ticketStore) addTicket(localTime mclock.AbsTime, pingHash []byte, ticket *ticket) { - log.Trace("Adding discovery ticket", "node", ticket.node.ID, "serial", ticket.serial) - - lastReq, ok := s.nodeLastReq[ticket.node] - if !(ok && bytes.Equal(pingHash, lastReq.pingHash)) { - return - } - s.adjustWithTicket(localTime, lastReq.lookup.target, ticket) - - if lastReq.lookup.radiusLookup || s.nodes[ticket.node] != nil { - return - } - - topic := lastReq.lookup.topic - topicIdx := ticket.findIdx(topic) - if topicIdx == -1 { - return - } - - bucket := timeBucket(localTime / mclock.AbsTime(ticketTimeBucketLen)) - if s.lastBucketFetched == 0 || bucket < s.lastBucketFetched { - s.lastBucketFetched = bucket - } - - if _, ok := s.tickets[topic]; ok { - wait := ticket.regTime[topicIdx] - localTime - rnd := rand.ExpFloat64() - if rnd > 10 { - rnd = 10 - } - if float64(wait) < float64(keepTicketConst)+float64(keepTicketExp)*rnd { - // use the ticket to register this topic - //fmt.Println("addTicket", ticket.node.ID[:8], ticket.node.addr().String(), ticket.serial, ticket.pong) - s.addTicketRef(ticketRef{ticket, topicIdx}) - } - } - - if ticket.refCnt > 0 { - s.nextTicketCached = nil - s.nodes[ticket.node] = ticket - } -} - -func (s *ticketStore) canQueryTopic(node *Node, topic Topic) bool { - qq := s.queriesSent[node] - if qq != nil { - now := mclock.Now() - for _, sq := range qq { - if sq.lookup.topic == topic && sq.sent > now-mclock.AbsTime(topicQueryResend) { - return false - } - } - } - return true -} - -func (s *ticketStore) addTopicQuery(hash common.Hash, node *Node, lookup lookupInfo) { - now := mclock.Now() - qq := s.queriesSent[node] - if qq == nil { - qq = make(map[common.Hash]sentQuery) - s.queriesSent[node] = qq - } - qq[hash] = sentQuery{sent: now, lookup: lookup} - s.cleanupTopicQueries(now) -} - -func (s *ticketStore) cleanupTopicQueries(now mclock.AbsTime) { - if s.nextTopicQueryCleanup > now { - return - } - exp := now - mclock.AbsTime(topicQueryResend) - for n, qq := range s.queriesSent { - for h, q := range qq { - if q.sent < exp { - delete(qq, h) - } - } - if len(qq) == 0 { - delete(s.queriesSent, n) - } - } - s.nextTopicQueryCleanup = now + mclock.AbsTime(topicQueryTimeout) -} - -func (s *ticketStore) gotTopicNodes(from *Node, hash common.Hash, nodes []rpcNode) (timeout bool) { - now := mclock.Now() - //fmt.Println("got", from.addr().String(), hash, len(nodes)) - qq := s.queriesSent[from] - if qq == nil { - return true - } - q, ok := qq[hash] - if !ok || now > q.sent+mclock.AbsTime(topicQueryTimeout) { - return true - } - inside := float64(0) - if len(nodes) > 0 { - inside = 1 - } - s.radius[q.lookup.topic].adjust(now, q.lookup.target, from.sha, inside) - chn := s.searchTopicMap[q.lookup.topic].foundChn - if chn == nil { - //fmt.Println("no channel") - return false - } - for _, node := range nodes { - ip := node.IP - if ip.IsUnspecified() || ip.IsLoopback() { - ip = from.IP - } - n := NewNode(node.ID, ip, node.UDP, node.TCP) - select { - case chn <- n: - default: - return false - } - } - return false -} - -type topicRadius struct { - topic Topic - topicHashPrefix uint64 - radius, minRadius uint64 - buckets []topicRadiusBucket - converged bool - radiusLookupCnt int -} - -type topicRadiusEvent int - -const ( - trOutside topicRadiusEvent = iota - trInside - trNoAdjust - trCount -) - -type topicRadiusBucket struct { - weights [trCount]float64 - lastTime mclock.AbsTime - value float64 - lookupSent map[common.Hash]mclock.AbsTime -} - -func (b *topicRadiusBucket) update(now mclock.AbsTime) { - if now == b.lastTime { - return - } - exp := math.Exp(-float64(now-b.lastTime) / float64(radiusTC)) - for i, w := range b.weights { - b.weights[i] = w * exp - } - b.lastTime = now - - for target, tm := range b.lookupSent { - if now-tm > mclock.AbsTime(respTimeout) { - b.weights[trNoAdjust] += 1 - delete(b.lookupSent, target) - } - } -} - -func (b *topicRadiusBucket) adjust(now mclock.AbsTime, inside float64) { - b.update(now) - if inside <= 0 { - b.weights[trOutside] += 1 - } else { - if inside >= 1 { - b.weights[trInside] += 1 - } else { - b.weights[trInside] += inside - b.weights[trOutside] += 1 - inside - } - } -} - -func newTopicRadius(t Topic) *topicRadius { - topicHash := crypto.Keccak256Hash([]byte(t)) - topicHashPrefix := binary.BigEndian.Uint64(topicHash[0:8]) - - return &topicRadius{ - topic: t, - topicHashPrefix: topicHashPrefix, - radius: maxRadius, - minRadius: maxRadius, - } -} - -func (r *topicRadius) getBucketIdx(addrHash common.Hash) int { - prefix := binary.BigEndian.Uint64(addrHash[0:8]) - var log2 float64 - if prefix != r.topicHashPrefix { - log2 = math.Log2(float64(prefix ^ r.topicHashPrefix)) - } - bucket := int((64 - log2) * radiusBucketsPerBit) - max := 64*radiusBucketsPerBit - 1 - if bucket > max { - return max - } - if bucket < 0 { - return 0 - } - return bucket -} - -func (r *topicRadius) targetForBucket(bucket int) common.Hash { - min := math.Pow(2, 64-float64(bucket+1)/radiusBucketsPerBit) - max := math.Pow(2, 64-float64(bucket)/radiusBucketsPerBit) - a := uint64(min) - b := randUint64n(uint64(max - min)) - xor := a + b - if xor < a { - xor = ^uint64(0) - } - prefix := r.topicHashPrefix ^ xor - var target common.Hash - binary.BigEndian.PutUint64(target[0:8], prefix) - globalRandRead(target[8:]) - return target -} - -// package rand provides a Read function in Go 1.6 and later, but -// we can't use it yet because we still support Go 1.5. -func globalRandRead(b []byte) { - pos := 0 - val := 0 - for n := 0; n < len(b); n++ { - if pos == 0 { - val = rand.Int() - pos = 7 - } - b[n] = byte(val) - val >>= 8 - pos-- - } -} - -func (r *topicRadius) chooseLookupBucket(a, b int) int { - if a < 0 { - a = 0 - } - if a > b { - return -1 - } - c := 0 - for i := a; i <= b; i++ { - if i >= len(r.buckets) || r.buckets[i].weights[trNoAdjust] < maxNoAdjust { - c++ - } - } - if c == 0 { - return -1 - } - rnd := randUint(uint32(c)) - for i := a; i <= b; i++ { - if i >= len(r.buckets) || r.buckets[i].weights[trNoAdjust] < maxNoAdjust { - if rnd == 0 { - return i - } - rnd-- - } - } - panic(nil) // should never happen -} - -func (r *topicRadius) needMoreLookups(a, b int, maxValue float64) bool { - var max float64 - if a < 0 { - a = 0 - } - if b >= len(r.buckets) { - b = len(r.buckets) - 1 - if r.buckets[b].value > max { - max = r.buckets[b].value - } - } - if b >= a { - for i := a; i <= b; i++ { - if r.buckets[i].value > max { - max = r.buckets[i].value - } - } - } - return maxValue-max < minPeakSize -} - -func (r *topicRadius) recalcRadius() (radius uint64, radiusLookup int) { - maxBucket := 0 - maxValue := float64(0) - now := mclock.Now() - v := float64(0) - for i := range r.buckets { - r.buckets[i].update(now) - v += r.buckets[i].weights[trOutside] - r.buckets[i].weights[trInside] - r.buckets[i].value = v - //fmt.Printf("%v %v | ", v, r.buckets[i].weights[trNoAdjust]) - } - //fmt.Println() - slopeCross := -1 - for i, b := range r.buckets { - v := b.value - if v < float64(i)*minSlope { - slopeCross = i - break - } - if v > maxValue { - maxValue = v - maxBucket = i + 1 - } - } - - minRadBucket := len(r.buckets) - sum := float64(0) - for minRadBucket > 0 && sum < minRightSum { - minRadBucket-- - b := r.buckets[minRadBucket] - sum += b.weights[trInside] + b.weights[trOutside] - } - r.minRadius = uint64(math.Pow(2, 64-float64(minRadBucket)/radiusBucketsPerBit)) - - lookupLeft := -1 - if r.needMoreLookups(0, maxBucket-lookupWidth-1, maxValue) { - lookupLeft = r.chooseLookupBucket(maxBucket-lookupWidth, maxBucket-1) - } - lookupRight := -1 - if slopeCross != maxBucket && (minRadBucket <= maxBucket || r.needMoreLookups(maxBucket+lookupWidth, len(r.buckets)-1, maxValue)) { - for len(r.buckets) <= maxBucket+lookupWidth { - r.buckets = append(r.buckets, topicRadiusBucket{lookupSent: make(map[common.Hash]mclock.AbsTime)}) - } - lookupRight = r.chooseLookupBucket(maxBucket, maxBucket+lookupWidth-1) - } - if lookupLeft == -1 { - radiusLookup = lookupRight - } else { - if lookupRight == -1 { - radiusLookup = lookupLeft - } else { - if randUint(2) == 0 { - radiusLookup = lookupLeft - } else { - radiusLookup = lookupRight - } - } - } - - //fmt.Println("mb", maxBucket, "sc", slopeCross, "mrb", minRadBucket, "ll", lookupLeft, "lr", lookupRight, "mv", maxValue) - - if radiusLookup == -1 { - // no more radius lookups needed at the moment, return a radius - r.converged = true - rad := maxBucket - if minRadBucket < rad { - rad = minRadBucket - } - radius = ^uint64(0) - if rad > 0 { - radius = uint64(math.Pow(2, 64-float64(rad)/radiusBucketsPerBit)) - } - r.radius = radius - } - - return -} - -func (r *topicRadius) nextTarget(forceRegular bool) lookupInfo { - if !forceRegular { - _, radiusLookup := r.recalcRadius() - if radiusLookup != -1 { - target := r.targetForBucket(radiusLookup) - r.buckets[radiusLookup].lookupSent[target] = mclock.Now() - return lookupInfo{target: target, topic: r.topic, radiusLookup: true} - } - } - - radExt := r.radius / 2 - if radExt > maxRadius-r.radius { - radExt = maxRadius - r.radius - } - rnd := randUint64n(r.radius) + randUint64n(2*radExt) - if rnd > radExt { - rnd -= radExt - } else { - rnd = radExt - rnd - } - - prefix := r.topicHashPrefix ^ rnd - var target common.Hash - binary.BigEndian.PutUint64(target[0:8], prefix) - globalRandRead(target[8:]) - return lookupInfo{target: target, topic: r.topic, radiusLookup: false} -} - -func (r *topicRadius) adjustWithTicket(now mclock.AbsTime, targetHash common.Hash, t ticketRef) { - wait := t.t.regTime[t.idx] - t.t.issueTime - inside := float64(wait)/float64(targetWaitTime) - 0.5 - if inside > 1 { - inside = 1 - } - if inside < 0 { - inside = 0 - } - r.adjust(now, targetHash, t.t.node.sha, inside) -} - -func (r *topicRadius) adjust(now mclock.AbsTime, targetHash, addrHash common.Hash, inside float64) { - bucket := r.getBucketIdx(addrHash) - //fmt.Println("adjust", bucket, len(r.buckets), inside) - if bucket >= len(r.buckets) { - return - } - r.buckets[bucket].adjust(now, inside) - delete(r.buckets[bucket].lookupSent, targetHash) -} diff --git a/p2p/discv5/topic.go b/p2p/discv5/topic.go deleted file mode 100644 index 609a41297f..0000000000 --- a/p2p/discv5/topic.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discv5 - -import ( - "container/heap" - "fmt" - "math" - "math/rand" - "time" - - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/log" -) - -const ( - maxEntries = 10000 - maxEntriesPerTopic = 50 - - fallbackRegistrationExpiry = 1 * time.Hour -) - -type Topic string - -type topicEntry struct { - topic Topic - fifoIdx uint64 - node *Node - expire mclock.AbsTime -} - -type topicInfo struct { - entries map[uint64]*topicEntry - fifoHead, fifoTail uint64 - rqItem *topicRequestQueueItem - wcl waitControlLoop -} - -// removes tail element from the fifo -func (t *topicInfo) getFifoTail() *topicEntry { - for t.entries[t.fifoTail] == nil { - t.fifoTail++ - } - tail := t.entries[t.fifoTail] - t.fifoTail++ - return tail -} - -type nodeInfo struct { - entries map[Topic]*topicEntry - lastIssuedTicket, lastUsedTicket uint32 - // you can't register a ticket newer than lastUsedTicket before noRegUntil (absolute time) - noRegUntil mclock.AbsTime -} - -type topicTable struct { - db *nodeDB - self *Node - nodes map[*Node]*nodeInfo - topics map[Topic]*topicInfo - globalEntries uint64 - requested topicRequestQueue - requestCnt uint64 - lastGarbageCollection mclock.AbsTime -} - -func newTopicTable(db *nodeDB, self *Node) *topicTable { - if printTestImgLogs { - fmt.Printf("*N %016x\n", self.sha[:8]) - } - return &topicTable{ - db: db, - nodes: make(map[*Node]*nodeInfo), - topics: make(map[Topic]*topicInfo), - self: self, - } -} - -func (t *topicTable) getOrNewTopic(topic Topic) *topicInfo { - ti := t.topics[topic] - if ti == nil { - rqItem := &topicRequestQueueItem{ - topic: topic, - priority: t.requestCnt, - } - ti = &topicInfo{ - entries: make(map[uint64]*topicEntry), - rqItem: rqItem, - } - t.topics[topic] = ti - heap.Push(&t.requested, rqItem) - } - return ti -} - -func (t *topicTable) checkDeleteTopic(topic Topic) { - ti := t.topics[topic] - if ti == nil { - return - } - if len(ti.entries) == 0 && ti.wcl.hasMinimumWaitPeriod() { - delete(t.topics, topic) - heap.Remove(&t.requested, ti.rqItem.index) - } -} - -func (t *topicTable) getOrNewNode(node *Node) *nodeInfo { - n := t.nodes[node] - if n == nil { - //fmt.Printf("newNode %016x %016x\n", t.self.sha[:8], node.sha[:8]) - var issued, used uint32 - if t.db != nil { - issued, used = t.db.fetchTopicRegTickets(node.ID) - } - n = &nodeInfo{ - entries: make(map[Topic]*topicEntry), - lastIssuedTicket: issued, - lastUsedTicket: used, - } - t.nodes[node] = n - } - return n -} - -func (t *topicTable) checkDeleteNode(node *Node) { - if n, ok := t.nodes[node]; ok && len(n.entries) == 0 && n.noRegUntil < mclock.Now() { - //fmt.Printf("deleteNode %016x %016x\n", t.self.sha[:8], node.sha[:8]) - delete(t.nodes, node) - } -} - -func (t *topicTable) storeTicketCounters(node *Node) { - n := t.getOrNewNode(node) - if t.db != nil { - t.db.updateTopicRegTickets(node.ID, n.lastIssuedTicket, n.lastUsedTicket) - } -} - -func (t *topicTable) getEntries(topic Topic) []*Node { - t.collectGarbage() - - te := t.topics[topic] - if te == nil { - return nil - } - nodes := make([]*Node, len(te.entries)) - i := 0 - for _, e := range te.entries { - nodes[i] = e.node - i++ - } - t.requestCnt++ - t.requested.update(te.rqItem, t.requestCnt) - return nodes -} - -func (t *topicTable) addEntry(node *Node, topic Topic) { - n := t.getOrNewNode(node) - // clear previous entries by the same node - for _, e := range n.entries { - t.deleteEntry(e) - } - // *** - n = t.getOrNewNode(node) - - tm := mclock.Now() - te := t.getOrNewTopic(topic) - - if len(te.entries) == maxEntriesPerTopic { - t.deleteEntry(te.getFifoTail()) - } - - if t.globalEntries == maxEntries { - t.deleteEntry(t.leastRequested()) // not empty, no need to check for nil - } - - fifoIdx := te.fifoHead - te.fifoHead++ - entry := &topicEntry{ - topic: topic, - fifoIdx: fifoIdx, - node: node, - expire: tm + mclock.AbsTime(fallbackRegistrationExpiry), - } - if printTestImgLogs { - fmt.Printf("*+ %d %v %016x %016x\n", tm/1000000, topic, t.self.sha[:8], node.sha[:8]) - } - te.entries[fifoIdx] = entry - n.entries[topic] = entry - t.globalEntries++ - te.wcl.registered(tm) -} - -// removes least requested element from the fifo -func (t *topicTable) leastRequested() *topicEntry { - for t.requested.Len() > 0 && t.topics[t.requested[0].topic] == nil { - heap.Pop(&t.requested) - } - if t.requested.Len() == 0 { - return nil - } - return t.topics[t.requested[0].topic].getFifoTail() -} - -// entry should exist -func (t *topicTable) deleteEntry(e *topicEntry) { - if printTestImgLogs { - fmt.Printf("*- %d %v %016x %016x\n", mclock.Now()/1000000, e.topic, t.self.sha[:8], e.node.sha[:8]) - } - ne := t.nodes[e.node].entries - delete(ne, e.topic) - if len(ne) == 0 { - t.checkDeleteNode(e.node) - } - te := t.topics[e.topic] - delete(te.entries, e.fifoIdx) - if len(te.entries) == 0 { - t.checkDeleteTopic(e.topic) - } - t.globalEntries-- -} - -// It is assumed that topics and waitPeriods have the same length. -func (t *topicTable) useTicket(node *Node, serialNo uint32, topics []Topic, idx int, issueTime uint64, waitPeriods []uint32) (registered bool) { - log.Trace("Using discovery ticket", "serial", serialNo, "topics", topics, "waits", waitPeriods) - //fmt.Println("useTicket", serialNo, topics, waitPeriods) - t.collectGarbage() - - n := t.getOrNewNode(node) - if serialNo < n.lastUsedTicket { - return false - } - - tm := mclock.Now() - if serialNo > n.lastUsedTicket && tm < n.noRegUntil { - return false - } - if serialNo != n.lastUsedTicket { - n.lastUsedTicket = serialNo - n.noRegUntil = tm + mclock.AbsTime(noRegTimeout()) - t.storeTicketCounters(node) - } - - currTime := uint64(tm / mclock.AbsTime(time.Second)) - regTime := issueTime + uint64(waitPeriods[idx]) - relTime := int64(currTime - regTime) - if relTime >= -1 && relTime <= regTimeWindow+1 { // give clients a little security margin on both ends - if e := n.entries[topics[idx]]; e == nil { - t.addEntry(node, topics[idx]) - } else { - // if there is an active entry, don't move to the front of the FIFO but prolong expire time - e.expire = tm + mclock.AbsTime(fallbackRegistrationExpiry) - } - return true - } - - return false -} - -func (t *topicTable) getTicket(node *Node, topics []Topic) *ticket { - t.collectGarbage() - - now := mclock.Now() - n := t.getOrNewNode(node) - n.lastIssuedTicket++ - t.storeTicketCounters(node) - - tic := &ticket{ - issueTime: now, - topics: topics, - serial: n.lastIssuedTicket, - regTime: make([]mclock.AbsTime, len(topics)), - } - for i, topic := range topics { - var waitPeriod time.Duration - if topic := t.topics[topic]; topic != nil { - waitPeriod = topic.wcl.waitPeriod - } else { - waitPeriod = minWaitPeriod - } - - tic.regTime[i] = now + mclock.AbsTime(waitPeriod) - } - return tic -} - -const gcInterval = time.Minute - -func (t *topicTable) collectGarbage() { - tm := mclock.Now() - if time.Duration(tm-t.lastGarbageCollection) < gcInterval { - return - } - t.lastGarbageCollection = tm - - for node, n := range t.nodes { - for _, e := range n.entries { - if e.expire <= tm { - t.deleteEntry(e) - } - } - - t.checkDeleteNode(node) - } - - for topic := range t.topics { - t.checkDeleteTopic(topic) - } -} - -const ( - minWaitPeriod = time.Minute - regTimeWindow = 10 // seconds - avgnoRegTimeout = time.Minute * 10 - // target average interval between two incoming ad requests - wcTargetRegInterval = time.Minute * 10 / maxEntriesPerTopic - // - wcTimeConst = time.Minute * 10 -) - -// initialization is not required, will set to minWaitPeriod at first registration -type waitControlLoop struct { - lastIncoming mclock.AbsTime - waitPeriod time.Duration -} - -func (w *waitControlLoop) registered(tm mclock.AbsTime) { - w.waitPeriod = w.nextWaitPeriod(tm) - w.lastIncoming = tm -} - -func (w *waitControlLoop) nextWaitPeriod(tm mclock.AbsTime) time.Duration { - period := tm - w.lastIncoming - wp := time.Duration(float64(w.waitPeriod) * math.Exp((float64(wcTargetRegInterval)-float64(period))/float64(wcTimeConst))) - if wp < minWaitPeriod { - wp = minWaitPeriod - } - return wp -} - -func (w *waitControlLoop) hasMinimumWaitPeriod() bool { - return w.nextWaitPeriod(mclock.Now()) == minWaitPeriod -} - -func noRegTimeout() time.Duration { - e := rand.ExpFloat64() - if e > 100 { - e = 100 - } - return time.Duration(float64(avgnoRegTimeout) * e) -} - -type topicRequestQueueItem struct { - topic Topic - priority uint64 - index int -} - -// A topicRequestQueue implements heap.Interface and holds topicRequestQueueItems. -type topicRequestQueue []*topicRequestQueueItem - -func (tq topicRequestQueue) Len() int { return len(tq) } - -func (tq topicRequestQueue) Less(i, j int) bool { - return tq[i].priority < tq[j].priority -} - -func (tq topicRequestQueue) Swap(i, j int) { - tq[i], tq[j] = tq[j], tq[i] - tq[i].index = i - tq[j].index = j -} - -func (tq *topicRequestQueue) Push(x interface{}) { - n := len(*tq) - item := x.(*topicRequestQueueItem) - item.index = n - *tq = append(*tq, item) -} - -func (tq *topicRequestQueue) Pop() interface{} { - old := *tq - n := len(old) - item := old[n-1] - item.index = -1 - *tq = old[0 : n-1] - return item -} - -func (tq *topicRequestQueue) update(item *topicRequestQueueItem, priority uint64) { - item.priority = priority - heap.Fix(tq, item.index) -} diff --git a/p2p/discv5/topic_test.go b/p2p/discv5/topic_test.go deleted file mode 100644 index ba79993f29..0000000000 --- a/p2p/discv5/topic_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discv5 - -import ( - "encoding/binary" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/mclock" -) - -func TestTopicRadius(t *testing.T) { - now := mclock.Now() - topic := Topic("qwerty") - rad := newTopicRadius(topic) - targetRad := (^uint64(0)) / 100 - - waitFn := func(addr common.Hash) time.Duration { - prefix := binary.BigEndian.Uint64(addr[0:8]) - dist := prefix ^ rad.topicHashPrefix - relDist := float64(dist) / float64(targetRad) - relTime := (1 - relDist/2) * 2 - if relTime < 0 { - relTime = 0 - } - return time.Duration(float64(targetWaitTime) * relTime) - } - - bcnt := 0 - cnt := 0 - var sum float64 - for cnt < 100 { - addr := rad.nextTarget(false).target - wait := waitFn(addr) - ticket := &ticket{ - topics: []Topic{topic}, - regTime: []mclock.AbsTime{mclock.AbsTime(wait)}, - node: &Node{nodeNetGuts: nodeNetGuts{sha: addr}}, - } - rad.adjustWithTicket(now, addr, ticketRef{ticket, 0}) - if rad.radius != maxRadius { - cnt++ - sum += float64(rad.radius) - } else { - bcnt++ - if bcnt > 500 { - t.Errorf("Radius did not converge in 500 iterations") - } - } - } - avgRel := sum / float64(cnt) / float64(targetRad) - if avgRel > 1.05 || avgRel < 0.95 { - t.Errorf("Average/target ratio is too far from 1 (%v)", avgRel) - } -} diff --git a/p2p/discv5/udp.go b/p2p/discv5/udp.go deleted file mode 100644 index 088f95cac6..0000000000 --- a/p2p/discv5/udp.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package discv5 - -import ( - "bytes" - "crypto/ecdsa" - "errors" - "fmt" - "net" - "time" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/netutil" - "github.com/ethereum/go-ethereum/rlp" -) - -const Version = 4 - -// Errors -var ( - errPacketTooSmall = errors.New("too small") - errBadPrefix = errors.New("bad prefix") -) - -// Timeouts -const ( - respTimeout = 500 * time.Millisecond - expiration = 20 * time.Second -) - -// RPC request structures -type ( - ping struct { - Version uint - From, To rpcEndpoint - Expiration uint64 - - // v5 - Topics []Topic - - // Ignore additional fields (for forward compatibility). - Rest []rlp.RawValue `rlp:"tail"` - } - - // pong is the reply to ping. - pong struct { - // This field should mirror the UDP envelope address - // of the ping packet, which provides a way to discover the - // the external address (after NAT). - To rpcEndpoint - - ReplyTok []byte // This contains the hash of the ping packet. - Expiration uint64 // Absolute timestamp at which the packet becomes invalid. - - // v5 - TopicHash common.Hash - TicketSerial uint32 - WaitPeriods []uint32 - - // Ignore additional fields (for forward compatibility). - Rest []rlp.RawValue `rlp:"tail"` - } - - // findnode is a query for nodes close to the given target. - findnode struct { - Target NodeID // doesn't need to be an actual public key - Expiration uint64 - // Ignore additional fields (for forward compatibility). - Rest []rlp.RawValue `rlp:"tail"` - } - - // findnode is a query for nodes close to the given target. - findnodeHash struct { - Target common.Hash - Expiration uint64 - // Ignore additional fields (for forward compatibility). - Rest []rlp.RawValue `rlp:"tail"` - } - - // reply to findnode - neighbors struct { - Nodes []rpcNode - Expiration uint64 - // Ignore additional fields (for forward compatibility). - Rest []rlp.RawValue `rlp:"tail"` - } - - topicRegister struct { - Topics []Topic - Idx uint - Pong []byte - } - - topicQuery struct { - Topic Topic - Expiration uint64 - } - - // reply to topicQuery - topicNodes struct { - Echo common.Hash - Nodes []rpcNode - } - - rpcNode struct { - IP net.IP // len 4 for IPv4 or 16 for IPv6 - UDP uint16 // for discovery protocol - TCP uint16 // for RLPx protocol - ID NodeID - } - - rpcEndpoint struct { - IP net.IP // len 4 for IPv4 or 16 for IPv6 - UDP uint16 // for discovery protocol - TCP uint16 // for RLPx protocol - } -) - -var ( - versionPrefix = []byte("temporary discovery v5") - versionPrefixSize = len(versionPrefix) - sigSize = 520 / 8 - headSize = versionPrefixSize + sigSize // space of packet frame data -) - -// Neighbors replies are sent across multiple packets to -// stay below the 1280 byte limit. We compute the maximum number -// of entries by stuffing a packet until it grows too large. -var maxNeighbors = func() int { - p := neighbors{Expiration: ^uint64(0)} - maxSizeNode := rpcNode{IP: make(net.IP, 16), UDP: ^uint16(0), TCP: ^uint16(0)} - for n := 0; ; n++ { - p.Nodes = append(p.Nodes, maxSizeNode) - size, _, err := rlp.EncodeToReader(p) - if err != nil { - // If this ever happens, it will be caught by the unit tests. - panic("cannot encode: " + err.Error()) - } - if headSize+size+1 >= 1280 { - return n - } - } -}() - -var maxTopicNodes = func() int { - p := topicNodes{} - maxSizeNode := rpcNode{IP: make(net.IP, 16), UDP: ^uint16(0), TCP: ^uint16(0)} - for n := 0; ; n++ { - p.Nodes = append(p.Nodes, maxSizeNode) - size, _, err := rlp.EncodeToReader(p) - if err != nil { - // If this ever happens, it will be caught by the unit tests. - panic("cannot encode: " + err.Error()) - } - if headSize+size+1 >= 1280 { - return n - } - } -}() - -func makeEndpoint(addr *net.UDPAddr, tcpPort uint16) rpcEndpoint { - ip := addr.IP.To4() - if ip == nil { - ip = addr.IP.To16() - } - return rpcEndpoint{IP: ip, UDP: uint16(addr.Port), TCP: tcpPort} -} - -func nodeFromRPC(sender *net.UDPAddr, rn rpcNode) (*Node, error) { - if err := netutil.CheckRelayIP(sender.IP, rn.IP); err != nil { - return nil, err - } - n := NewNode(rn.ID, rn.IP, rn.UDP, rn.TCP) - err := n.validateComplete() - return n, err -} - -func nodeToRPC(n *Node) rpcNode { - return rpcNode{ID: n.ID, IP: n.IP, UDP: n.UDP, TCP: n.TCP} -} - -type ingressPacket struct { - remoteID NodeID - remoteAddr *net.UDPAddr - ev nodeEvent - hash []byte - data interface{} // one of the RPC structs - rawData []byte -} - -type conn interface { - ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) - WriteToUDP(b []byte, addr *net.UDPAddr) (n int, err error) - Close() error - LocalAddr() net.Addr -} - -// udp implements the RPC protocol. -type udp struct { - conn conn - priv *ecdsa.PrivateKey - ourEndpoint rpcEndpoint - net *Network -} - -// ListenUDP returns a new table that listens for UDP packets on laddr. -func ListenUDP(priv *ecdsa.PrivateKey, conn conn, nodeDBPath string, netrestrict *netutil.Netlist) (*Network, error) { - realaddr := conn.LocalAddr().(*net.UDPAddr) - transport, err := listenUDP(priv, conn, realaddr) - if err != nil { - return nil, err - } - net, err := newNetwork(transport, priv.PublicKey, nodeDBPath, netrestrict) - if err != nil { - return nil, err - } - log.Info("UDP listener up", "net", net.tab.self) - transport.net = net - go transport.readLoop() - return net, nil -} - -func listenUDP(priv *ecdsa.PrivateKey, conn conn, realaddr *net.UDPAddr) (*udp, error) { - return &udp{conn: conn, priv: priv, ourEndpoint: makeEndpoint(realaddr, uint16(realaddr.Port))}, nil -} - -func (t *udp) localAddr() *net.UDPAddr { - return t.conn.LocalAddr().(*net.UDPAddr) -} - -func (t *udp) Close() { - t.conn.Close() -} - -func (t *udp) send(remote *Node, ptype nodeEvent, data interface{}) (hash []byte) { - hash, _ = t.sendPacket(remote.ID, remote.addr(), byte(ptype), data) - return hash -} - -func (t *udp) sendPing(remote *Node, toaddr *net.UDPAddr, topics []Topic) (hash []byte) { - hash, _ = t.sendPacket(remote.ID, toaddr, byte(pingPacket), ping{ - Version: Version, - From: t.ourEndpoint, - To: makeEndpoint(toaddr, uint16(toaddr.Port)), // TODO: maybe use known TCP port from DB - Expiration: uint64(time.Now().Add(expiration).Unix()), - Topics: topics, - }) - return hash -} - -func (t *udp) sendNeighbours(remote *Node, results []*Node) { - // Send neighbors in chunks with at most maxNeighbors per packet - // to stay below the 1280 byte limit. - p := neighbors{Expiration: uint64(time.Now().Add(expiration).Unix())} - for i, result := range results { - p.Nodes = append(p.Nodes, nodeToRPC(result)) - if len(p.Nodes) == maxNeighbors || i == len(results)-1 { - t.sendPacket(remote.ID, remote.addr(), byte(neighborsPacket), p) - p.Nodes = p.Nodes[:0] - } - } -} - -func (t *udp) sendFindnodeHash(remote *Node, target common.Hash) { - t.sendPacket(remote.ID, remote.addr(), byte(findnodeHashPacket), findnodeHash{ - Target: target, - Expiration: uint64(time.Now().Add(expiration).Unix()), - }) -} - -func (t *udp) sendTopicRegister(remote *Node, topics []Topic, idx int, pong []byte) { - t.sendPacket(remote.ID, remote.addr(), byte(topicRegisterPacket), topicRegister{ - Topics: topics, - Idx: uint(idx), - Pong: pong, - }) -} - -func (t *udp) sendTopicNodes(remote *Node, queryHash common.Hash, nodes []*Node) { - p := topicNodes{Echo: queryHash} - var sent bool - for _, result := range nodes { - if result.IP.Equal(t.net.tab.self.IP) || netutil.CheckRelayIP(remote.IP, result.IP) == nil { - p.Nodes = append(p.Nodes, nodeToRPC(result)) - } - if len(p.Nodes) == maxTopicNodes { - t.sendPacket(remote.ID, remote.addr(), byte(topicNodesPacket), p) - p.Nodes = p.Nodes[:0] - sent = true - } - } - if !sent || len(p.Nodes) > 0 { - t.sendPacket(remote.ID, remote.addr(), byte(topicNodesPacket), p) - } -} - -func (t *udp) sendPacket(toid NodeID, toaddr *net.UDPAddr, ptype byte, req interface{}) (hash []byte, err error) { - //fmt.Println("sendPacket", nodeEvent(ptype), toaddr.String(), toid.String()) - packet, hash, err := encodePacket(t.priv, ptype, req) - if err != nil { - //fmt.Println(err) - return hash, err - } - log.Trace(fmt.Sprintf(">>> %v to %x@%v", nodeEvent(ptype), toid[:8], toaddr)) - if nbytes, err := t.conn.WriteToUDP(packet, toaddr); err != nil { - log.Trace(fmt.Sprint("UDP send failed:", err)) - } else { - egressTrafficMeter.Mark(int64(nbytes)) - } - //fmt.Println(err) - return hash, err -} - -// zeroed padding space for encodePacket. -var headSpace = make([]byte, headSize) - -func encodePacket(priv *ecdsa.PrivateKey, ptype byte, req interface{}) (p, hash []byte, err error) { - b := new(bytes.Buffer) - b.Write(headSpace) - b.WriteByte(ptype) - if err := rlp.Encode(b, req); err != nil { - log.Error(fmt.Sprint("error encoding packet:", err)) - return nil, nil, err - } - packet := b.Bytes() - sig, err := crypto.Sign(crypto.Keccak256(packet[headSize:]), priv) - if err != nil { - log.Error(fmt.Sprint("could not sign packet:", err)) - return nil, nil, err - } - copy(packet, versionPrefix) - copy(packet[versionPrefixSize:], sig) - hash = crypto.Keccak256(packet[versionPrefixSize:]) - return packet, hash, nil -} - -// readLoop runs in its own goroutine. it injects ingress UDP packets -// into the network loop. -func (t *udp) readLoop() { - defer t.conn.Close() - // Discovery packets are defined to be no larger than 1280 bytes. - // Packets larger than this size will be cut at the end and treated - // as invalid because their hash won't match. - buf := make([]byte, 1280) - for { - nbytes, from, err := t.conn.ReadFromUDP(buf) - ingressTrafficMeter.Mark(int64(nbytes)) - if netutil.IsTemporaryError(err) { - // Ignore temporary read errors. - log.Debug(fmt.Sprintf("Temporary read error: %v", err)) - continue - } else if err != nil { - // Shut down the loop for permament errors. - log.Debug(fmt.Sprintf("Read error: %v", err)) - return - } - t.handlePacket(from, buf[:nbytes]) - } -} - -func (t *udp) handlePacket(from *net.UDPAddr, buf []byte) error { - pkt := ingressPacket{remoteAddr: from} - if err := decodePacket(buf, &pkt); err != nil { - log.Debug(fmt.Sprintf("Bad packet from %v: %v", from, err)) - //fmt.Println("bad packet", err) - return err - } - t.net.reqReadPacket(pkt) - return nil -} - -func decodePacket(buffer []byte, pkt *ingressPacket) error { - if len(buffer) < headSize+1 { - return errPacketTooSmall - } - buf := make([]byte, len(buffer)) - copy(buf, buffer) - prefix, sig, sigdata := buf[:versionPrefixSize], buf[versionPrefixSize:headSize], buf[headSize:] - if !bytes.Equal(prefix, versionPrefix) { - return errBadPrefix - } - fromID, err := recoverNodeID(crypto.Keccak256(buf[headSize:]), sig) - if err != nil { - return err - } - pkt.rawData = buf - pkt.hash = crypto.Keccak256(buf[versionPrefixSize:]) - pkt.remoteID = fromID - switch pkt.ev = nodeEvent(sigdata[0]); pkt.ev { - case pingPacket: - pkt.data = new(ping) - case pongPacket: - pkt.data = new(pong) - case findnodePacket: - pkt.data = new(findnode) - case neighborsPacket: - pkt.data = new(neighbors) - case findnodeHashPacket: - pkt.data = new(findnodeHash) - case topicRegisterPacket: - pkt.data = new(topicRegister) - case topicQueryPacket: - pkt.data = new(topicQuery) - case topicNodesPacket: - pkt.data = new(topicNodes) - default: - return fmt.Errorf("unknown packet type: %d", sigdata[0]) - } - s := rlp.NewStream(bytes.NewReader(sigdata[1:]), 0) - err = s.Decode(pkt.data) - return err -} diff --git a/p2p/dnsdisc/client.go b/p2p/dnsdisc/client.go index b872784828..d3e8111ab5 100644 --- a/p2p/dnsdisc/client.go +++ b/p2p/dnsdisc/client.go @@ -217,8 +217,11 @@ type randomIterator struct { c *Client mu sync.Mutex - trees map[string]*clientTree // all trees lc linkCache // tracks tree dependencies + trees map[string]*clientTree // all trees + // buffers for syncableTrees + syncableList []*clientTree + disabledList []*clientTree } func (c *Client) newRandomIterator() *randomIterator { @@ -238,10 +241,10 @@ func (it *randomIterator) Node() *enode.Node { // Close closes the iterator. func (it *randomIterator) Close() { + it.cancelFn() + it.mu.Lock() defer it.mu.Unlock() - - it.cancelFn() it.trees = nil } @@ -264,7 +267,7 @@ func (it *randomIterator) addTree(url string) error { // nextNode syncs random tree entries until it finds a node. func (it *randomIterator) nextNode() *enode.Node { for { - ct := it.nextTree() + ct := it.pickTree() if ct == nil { return nil } @@ -282,26 +285,79 @@ func (it *randomIterator) nextNode() *enode.Node { } } -// nextTree returns a random tree. -func (it *randomIterator) nextTree() *clientTree { +// pickTree returns a random tree to sync from. +func (it *randomIterator) pickTree() *clientTree { it.mu.Lock() defer it.mu.Unlock() + // Rebuild the trees map if any links have changed. if it.lc.changed { it.rebuildTrees() it.lc.changed = false } - if len(it.trees) == 0 { - return nil + + for { + canSync, trees := it.syncableTrees() + switch { + case canSync: + // Pick a random tree. + return trees[rand.Intn(len(trees))] + case len(trees) > 0: + // No sync action can be performed on any tree right now. The only meaningful + // thing to do is waiting for any root record to get updated. + if !it.waitForRootUpdates(trees) { + // Iterator was closed while waiting. + return nil + } + default: + // There are no trees left, the iterator was closed. + return nil + } } - limit := rand.Intn(len(it.trees)) +} + +// syncableTrees finds trees on which any meaningful sync action can be performed. +func (it *randomIterator) syncableTrees() (canSync bool, trees []*clientTree) { + // Resize tree lists. + it.syncableList = it.syncableList[:0] + it.disabledList = it.disabledList[:0] + + // Partition them into the two lists. for _, ct := range it.trees { - if limit == 0 { - return ct + if ct.canSyncRandom() { + it.syncableList = append(it.syncableList, ct) + } else { + it.disabledList = append(it.disabledList, ct) } - limit-- } - return nil + if len(it.syncableList) > 0 { + return true, it.syncableList + } + return false, it.disabledList +} + +// waitForRootUpdates waits for the closest scheduled root check time on the given trees. +func (it *randomIterator) waitForRootUpdates(trees []*clientTree) bool { + var minTree *clientTree + var nextCheck mclock.AbsTime + for _, ct := range trees { + check := ct.nextScheduledRootCheck() + if minTree == nil || check < nextCheck { + minTree = ct + nextCheck = check + } + } + + sleep := nextCheck.Sub(it.c.clock.Now()) + it.c.cfg.Logger.Debug("DNS iterator waiting for root updates", "sleep", sleep, "tree", minTree.loc.domain) + timeout := it.c.clock.NewTimer(sleep) + defer timeout.Stop() + select { + case <-timeout.C(): + return true + case <-it.ctx.Done(): + return false // Iterator was closed. + } } // rebuildTrees rebuilds the 'trees' map. diff --git a/p2p/dnsdisc/client_test.go b/p2p/dnsdisc/client_test.go index 6a6705abf2..741bee4230 100644 --- a/p2p/dnsdisc/client_test.go +++ b/p2p/dnsdisc/client_test.go @@ -231,6 +231,53 @@ func TestIteratorRootRecheckOnFail(t *testing.T) { checkIterator(t, it, nodes) } +// This test checks that the iterator works correctly when the tree is initially empty. +func TestIteratorEmptyTree(t *testing.T) { + var ( + clock = new(mclock.Simulated) + nodes = testNodes(nodesSeed1, 1) + resolver = newMapResolver() + c = NewClient(Config{ + Resolver: resolver, + Logger: testlog.Logger(t, log.LvlTrace), + RecheckInterval: 20 * time.Minute, + RateLimit: 500, + }) + ) + c.clock = clock + tree1, url := makeTestTree("n", nil, nil) + tree2, _ := makeTestTree("n", nodes, nil) + resolver.add(tree1.ToTXT("n")) + + // Start the iterator. + node := make(chan *enode.Node) + it, err := c.NewIterator(url) + if err != nil { + t.Fatal(err) + } + go func() { + it.Next() + node <- it.Node() + }() + + // Wait for the client to get stuck in waitForRootUpdates. + clock.WaitForTimers(1) + + // Now update the root. + resolver.add(tree2.ToTXT("n")) + + // Wait for it to pick up the root change. + clock.Run(c.cfg.RecheckInterval) + select { + case n := <-node: + if n.ID() != nodes[0].ID() { + t.Fatalf("wrong node returned") + } + case <-time.After(5 * time.Second): + t.Fatal("it.Next() did not unblock within 5s of real time") + } +} + // updateSomeNodes applies ENR updates to some of the given nodes. func updateSomeNodes(keySeed int64, nodes []*enode.Node) { keys := testKeys(nodesSeed1, len(nodes)) diff --git a/p2p/dnsdisc/sync.go b/p2p/dnsdisc/sync.go index 36f02acba6..073547c90d 100644 --- a/p2p/dnsdisc/sync.go +++ b/p2p/dnsdisc/sync.go @@ -25,9 +25,9 @@ import ( "github.com/ethereum/go-ethereum/p2p/enode" ) -const ( - rootRecheckFailCount = 5 // update root if this many leaf requests fail -) +// This is the number of consecutive leaf requests that may fail before +// we consider re-resolving the tree root. +const rootRecheckFailCount = 5 // clientTree is a full tree being synced. type clientTree struct { @@ -89,13 +89,22 @@ func (ct *clientTree) syncRandom(ctx context.Context) (n *enode.Node, err error) ct.gcLinks() // Sync next random entry in ENR tree. Once every node has been visited, we simply - // start over. This is fine because entries are cached. + // start over. This is fine because entries are cached internally by the client LRU + // also by DNS resolvers. if ct.enrs.done() { ct.enrs = newSubtreeSync(ct.c, ct.loc, ct.root.eroot, false) } return ct.syncNextRandomENR(ctx) } +// canSyncRandom checks if any meaningful action can be performed by syncRandom. +func (ct *clientTree) canSyncRandom() bool { + // Note: the check for non-zero leaf count is very important here. + // If we're done syncing all nodes, and no leaves were found, the tree + // is empty and we can't use it for sync. + return ct.rootUpdateDue() || !ct.links.done() || !ct.enrs.done() || ct.enrs.leaves != 0 +} + // gcLinks removes outdated links from the global link cache. GC runs once // when the link sync finishes. func (ct *clientTree) gcLinks() { @@ -184,10 +193,14 @@ func (ct *clientTree) updateRoot(ctx context.Context) error { // rootUpdateDue returns true when a root update is needed. func (ct *clientTree) rootUpdateDue() bool { tooManyFailures := ct.leafFailCount > rootRecheckFailCount - scheduledCheck := ct.c.clock.Now().Sub(ct.lastRootCheck) > ct.c.cfg.RecheckInterval + scheduledCheck := ct.c.clock.Now() >= ct.nextScheduledRootCheck() return ct.root == nil || tooManyFailures || scheduledCheck } +func (ct *clientTree) nextScheduledRootCheck() mclock.AbsTime { + return ct.lastRootCheck.Add(ct.c.cfg.RecheckInterval) +} + // slowdownRootUpdate applies a delay to root resolution if is tried // too frequently. This avoids busy polling when the client is offline. // Returns true if the timeout passed, false if sync was canceled. @@ -218,10 +231,11 @@ type subtreeSync struct { root string missing []string // missing tree node hashes link bool // true if this sync is for the link tree + leaves int // counter of synced leaves } func newSubtreeSync(c *Client, loc *linkEntry, root string, link bool) *subtreeSync { - return &subtreeSync{c, loc, root, []string{root}, link} + return &subtreeSync{c, loc, root, []string{root}, link, 0} } func (ts *subtreeSync) done() bool { @@ -253,10 +267,12 @@ func (ts *subtreeSync) resolveNext(ctx context.Context, hash string) (entry, err if ts.link { return nil, errENRInLinkTree } + ts.leaves++ case *linkEntry: if !ts.link { return nil, errLinkInENRTree } + ts.leaves++ case *branchEntry: ts.missing = append(ts.missing, e.children...) } diff --git a/p2p/enode/nodedb.go b/p2p/enode/nodedb.go index bd066ce857..d62f383f0b 100644 --- a/p2p/enode/nodedb.go +++ b/p2p/enode/nodedb.go @@ -61,6 +61,10 @@ const ( dbVersion = 9 ) +var ( + errInvalidIP = errors.New("invalid IP") +) + var zeroIP = make(net.IP, 16) // DB is the node database, storing previously seen nodes and any collected metadata about @@ -163,7 +167,7 @@ func splitNodeItemKey(key []byte) (id ID, ip net.IP, field string) { } key = key[len(dbDiscoverRoot)+1:] // Split out the IP. - ip = net.IP(key[:16]) + ip = key[:16] if ip4 := ip.To4(); ip4 != nil { ip = ip4 } @@ -359,16 +363,25 @@ func (db *DB) expireNodes() { // LastPingReceived retrieves the time of the last ping packet received from // a remote node. func (db *DB) LastPingReceived(id ID, ip net.IP) time.Time { + if ip = ip.To16(); ip == nil { + return time.Time{} + } return time.Unix(db.fetchInt64(nodeItemKey(id, ip, dbNodePing)), 0) } // UpdateLastPingReceived updates the last time we tried contacting a remote node. func (db *DB) UpdateLastPingReceived(id ID, ip net.IP, instance time.Time) error { + if ip = ip.To16(); ip == nil { + return errInvalidIP + } return db.storeInt64(nodeItemKey(id, ip, dbNodePing), instance.Unix()) } // LastPongReceived retrieves the time of the last successful pong from remote node. func (db *DB) LastPongReceived(id ID, ip net.IP) time.Time { + if ip = ip.To16(); ip == nil { + return time.Time{} + } // Launch expirer db.ensureExpirer() return time.Unix(db.fetchInt64(nodeItemKey(id, ip, dbNodePong)), 0) @@ -376,26 +389,41 @@ func (db *DB) LastPongReceived(id ID, ip net.IP) time.Time { // UpdateLastPongReceived updates the last pong time of a node. func (db *DB) UpdateLastPongReceived(id ID, ip net.IP, instance time.Time) error { + if ip = ip.To16(); ip == nil { + return errInvalidIP + } return db.storeInt64(nodeItemKey(id, ip, dbNodePong), instance.Unix()) } // FindFails retrieves the number of findnode failures since bonding. func (db *DB) FindFails(id ID, ip net.IP) int { + if ip = ip.To16(); ip == nil { + return 0 + } return int(db.fetchInt64(nodeItemKey(id, ip, dbNodeFindFails))) } // UpdateFindFails updates the number of findnode failures since bonding. func (db *DB) UpdateFindFails(id ID, ip net.IP, fails int) error { + if ip = ip.To16(); ip == nil { + return errInvalidIP + } return db.storeInt64(nodeItemKey(id, ip, dbNodeFindFails), int64(fails)) } // FindFailsV5 retrieves the discv5 findnode failure counter. func (db *DB) FindFailsV5(id ID, ip net.IP) int { + if ip = ip.To16(); ip == nil { + return 0 + } return int(db.fetchInt64(v5Key(id, ip, dbNodeFindFails))) } // UpdateFindFailsV5 stores the discv5 findnode failure counter. func (db *DB) UpdateFindFailsV5(id ID, ip net.IP, fails int) error { + if ip = ip.To16(); ip == nil { + return errInvalidIP + } return db.storeInt64(v5Key(id, ip, dbNodeFindFails), int64(fails)) } diff --git a/p2p/message.go b/p2p/message.go index 10b55a939c..bd048138c3 100644 --- a/p2p/message.go +++ b/p2p/message.go @@ -70,6 +70,10 @@ func (msg Msg) Discard() error { return err } +func (msg Msg) Time() time.Time { + return msg.ReceivedAt +} + type MsgReader interface { ReadMsg() (Msg, error) } diff --git a/p2p/nat/nat.go b/p2p/nat/nat.go index f47534784c..9d5519b9c4 100644 --- a/p2p/nat/nat.go +++ b/p2p/nat/nat.go @@ -219,9 +219,8 @@ func (n *autodisc) String() string { defer n.mu.Unlock() if n.found == nil { return n.what - } else { - return n.found.String() } + return n.found.String() } // wait blocks until auto-discovery has been performed. diff --git a/p2p/nodestate/nodestate.go b/p2p/nodestate/nodestate.go index ab28b47a15..d3166f1d87 100644 --- a/p2p/nodestate/nodestate.go +++ b/p2p/nodestate/nodestate.go @@ -599,6 +599,7 @@ func (ns *NodeStateMachine) updateEnode(n *enode.Node) (enode.ID, *nodeInfo) { node := ns.nodes[id] if node != nil && n.Seq() > node.node.Seq() { node.node = n + node.dirty = true } return id, node } @@ -725,7 +726,7 @@ func (ns *NodeStateMachine) opFinish() { } ns.opPending = nil ns.opFlag = false - ns.opWait.Signal() + ns.opWait.Broadcast() } // Operation calls the given function as an operation callback. This allows the caller diff --git a/p2p/nodestate/nodestate_test.go b/p2p/nodestate/nodestate_test.go index 5f99a3da74..d06ad755e2 100644 --- a/p2p/nodestate/nodestate_test.go +++ b/p2p/nodestate/nodestate_test.go @@ -240,9 +240,8 @@ func uint64FieldEnc(field interface{}) ([]byte, error) { if u, ok := field.(uint64); ok { enc, err := rlp.EncodeToBytes(&u) return enc, err - } else { - return nil, errors.New("invalid field type") } + return nil, errors.New("invalid field type") } func uint64FieldDec(enc []byte) (interface{}, error) { @@ -254,9 +253,8 @@ func uint64FieldDec(enc []byte) (interface{}, error) { func stringFieldEnc(field interface{}) ([]byte, error) { if s, ok := field.(string); ok { return []byte(s), nil - } else { - return nil, errors.New("invalid field type") } + return nil, errors.New("invalid field type") } func stringFieldDec(enc []byte) (interface{}, error) { diff --git a/p2p/peer.go b/p2p/peer.go index a9c3cf01da..8ebc858392 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -158,6 +158,20 @@ func (p *Peer) Caps() []Cap { return p.rw.caps } +// RunningCap returns true if the peer is actively connected using any of the +// enumerated versions of a specific protocol, meaning that at least one of the +// versions is supported by both this node and the peer p. +func (p *Peer) RunningCap(protocol string, versions []uint) bool { + if proto, ok := p.running[protocol]; ok { + for _, ver := range versions { + if proto.Version == ver { + return true + } + } + } + return false +} + // RemoteAddr returns the remote address of the network connection. func (p *Peer) RemoteAddr() net.Addr { return p.rw.fd.RemoteAddr() diff --git a/p2p/server.go b/p2p/server.go index dd52297f8a..fc71548554 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -35,7 +35,6 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/p2p/discover" - "github.com/ethereum/go-ethereum/p2p/discv5" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/nat" @@ -105,7 +104,7 @@ type Config struct { // BootstrapNodesV5 are used to establish connectivity // with the rest of the network using the V5 discovery // protocol. - BootstrapNodesV5 []*discv5.Node `toml:",omitempty"` + BootstrapNodesV5 []*enode.Node `toml:",omitempty"` // Static nodes are used as pre-configured connections which are always // maintained and re-connected on disconnects. @@ -182,7 +181,7 @@ type Server struct { nodedb *enode.DB localnode *enode.LocalNode ntab *discover.UDPv4 - DiscV5 *discv5.Network + DiscV5 *discover.UDPv5 discmix *enode.FairMix dialsched *dialScheduler @@ -413,7 +412,7 @@ type sharedUDPConn struct { unhandled chan discover.ReadPacket } -// ReadFromUDP implements discv5.conn +// ReadFromUDP implements discover.UDPConn func (s *sharedUDPConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err error) { packet, ok := <-s.unhandled if !ok { @@ -427,7 +426,7 @@ func (s *sharedUDPConn) ReadFromUDP(b []byte) (n int, addr *net.UDPAddr, err err return l, packet.Addr, nil } -// Close implements discv5.conn +// Close implements discover.UDPConn func (s *sharedUDPConn) Close() error { return nil } @@ -586,7 +585,7 @@ func (srv *Server) setupDiscovery() error { Unhandled: unhandled, Log: srv.log, } - ntab, err := discover.ListenUDP(conn, srv.localnode, cfg) + ntab, err := discover.ListenV4(conn, srv.localnode, cfg) if err != nil { return err } @@ -596,20 +595,21 @@ func (srv *Server) setupDiscovery() error { // Discovery V5 if srv.DiscoveryV5 { - var ntab *discv5.Network + cfg := discover.Config{ + PrivateKey: srv.PrivateKey, + NetRestrict: srv.NetRestrict, + Bootnodes: srv.BootstrapNodesV5, + Log: srv.log, + } var err error if sconn != nil { - ntab, err = discv5.ListenUDP(srv.PrivateKey, sconn, "", srv.NetRestrict) + srv.DiscV5, err = discover.ListenV5(sconn, srv.localnode, cfg) } else { - ntab, err = discv5.ListenUDP(srv.PrivateKey, conn, "", srv.NetRestrict) + srv.DiscV5, err = discover.ListenV5(conn, srv.localnode, cfg) } if err != nil { return err } - if err := ntab.SetFallbackNodes(srv.BootstrapNodesV5); err != nil { - return err - } - srv.DiscV5 = ntab } return nil } @@ -854,13 +854,18 @@ func (srv *Server) listenLoop() { <-slots var ( - fd net.Conn - err error + fd net.Conn + err error + lastLog time.Time ) for { fd, err = srv.listener.Accept() if netutil.IsTemporaryError(err) { - srv.log.Debug("Temporary read error", "err", err) + if time.Since(lastLog) > 1*time.Second { + srv.log.Debug("Temporary read error", "err", err) + lastLog = time.Now() + } + time.Sleep(time.Millisecond * 200) continue } else if err != nil { srv.log.Debug("Read error", "err", err) diff --git a/p2p/simulations/adapters/exec.go b/p2p/simulations/adapters/exec.go index 0ed3deab38..35ccdfb068 100644 --- a/p2p/simulations/adapters/exec.go +++ b/p2p/simulations/adapters/exec.go @@ -115,7 +115,6 @@ func (e *ExecAdapter) NewNode(config *NodeConfig) (Node, error) { conf.Stack.P2P.EnableMsgEvents = config.EnableMsgEvents conf.Stack.P2P.NoDiscovery = true conf.Stack.P2P.NAT = nil - conf.Stack.NoUSB = true // Listen on a localhost port, which we set when we // initialise NodeConfig (usually a random port) diff --git a/p2p/simulations/adapters/inproc.go b/p2p/simulations/adapters/inproc.go index 4fc7abc06a..1cb26a8ea0 100644 --- a/p2p/simulations/adapters/inproc.go +++ b/p2p/simulations/adapters/inproc.go @@ -100,7 +100,6 @@ func (s *SimAdapter) NewNode(config *NodeConfig) (Node, error) { EnableMsgEvents: config.EnableMsgEvents, }, ExternalSigner: config.ExternalSigner, - NoUSB: true, Logger: log.New("node.id", id.String()), }) if err != nil { diff --git a/p2p/simulations/network.go b/p2p/simulations/network.go index a54db4ea68..9b5e2c37f5 100644 --- a/p2p/simulations/network.go +++ b/p2p/simulations/network.go @@ -454,9 +454,8 @@ func (net *Network) getNodeIDs(excludeIDs []enode.ID) []enode.ID { if len(excludeIDs) > 0 { // Return the difference of nodeIDs and excludeIDs return filterIDs(nodeIDs, excludeIDs) - } else { - return nodeIDs } + return nodeIDs } // GetNodes returns the existing nodes. @@ -472,9 +471,8 @@ func (net *Network) getNodes(excludeIDs []enode.ID) []*Node { if len(excludeIDs) > 0 { nodeIDs := net.getNodeIDs(excludeIDs) return net.getNodesByID(nodeIDs) - } else { - return net.Nodes } + return net.Nodes } // GetNodesByID returns existing nodes with the given enode.IDs. @@ -1098,7 +1096,6 @@ func (net *Network) executeNodeEvent(e *Event) error { func (net *Network) executeConnEvent(e *Event) error { if e.Conn.Up { return net.Connect(e.Conn.One, e.Conn.Other) - } else { - return net.Disconnect(e.Conn.One, e.Conn.Other) } + return net.Disconnect(e.Conn.One, e.Conn.Other) } diff --git a/params/bootnodes.go b/params/bootnodes.go index d4512bf789..f36ad61729 100644 --- a/params/bootnodes.go +++ b/params/bootnodes.go @@ -67,12 +67,31 @@ var GoerliBootnodes = []string{ "enode://a59e33ccd2b3e52d578f1fbd70c6f9babda2650f0760d6ff3b37742fdcdfdb3defba5d56d315b40c46b70198c7621e63ffa3f987389c7118634b0fefbbdfa7fd@51.15.119.157:40303", } -// YoloV2Bootnodes are the enode URLs of the P2P bootstrap nodes running on the -// YOLOv2 ephemeral test network. -var YoloV2Bootnodes = []string{ +// YoloV3Bootnodes are the enode URLs of the P2P bootstrap nodes running on the +// YOLOv3 ephemeral test network. +// TODO: Set Yolov3 bootnodes +var YoloV3Bootnodes = []string{ "enode://9e1096aa59862a6f164994cb5cb16f5124d6c992cdbf4535ff7dea43ea1512afe5448dca9df1b7ab0726129603f1a3336b631e4d7a1a44c94daddd03241587f9@3.9.20.133:30303", } +var V5Bootnodes = []string{ + // Teku team's bootnode + "enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2Gxb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNlY3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA", + "enr:-KG4QDyytgmE4f7AnvW-ZaUOIi9i79qX4JwjRAiXBZCU65wOfBu-3Nb5I7b_Rmg3KCOcZM_C3y5pg7EBU5XGrcLTduQEhGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQ2_DUbiXNlY3AyNTZrMaEDKnz_-ps3UUOfHWVYaskI5kWYO_vtYMGYCQRAR3gHDouDdGNwgiMog3VkcIIjKA", + // Prylab team's bootnodes + "enr:-Ku4QImhMc1z8yCiNJ1TyUxdcfNucje3BGwEHzodEZUan8PherEo4sF7pPHPSIB1NNuSg5fZy7qFsjmUKs2ea1Whi0EBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQOVphkDqal4QzPMksc5wnpuC3gvSC8AfbFOnZY_On34wIN1ZHCCIyg", + "enr:-Ku4QP2xDnEtUXIjzJ_DhlCRN9SN99RYQPJL92TMlSv7U5C1YnYLjwOQHgZIUXw6c-BvRg2Yc2QsZxxoS_pPRVe0yK8Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMeFF5GrS7UZpAH2Ly84aLK-TyvH-dRo0JM1i8yygH50YN1ZHCCJxA", + "enr:-Ku4QPp9z1W4tAO8Ber_NQierYaOStqhDqQdOPY3bB3jDgkjcbk6YrEnVYIiCBbTxuar3CzS528d2iE7TdJsrL-dEKoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpD1pf1CAAAAAP__________gmlkgnY0gmlwhBLf22SJc2VjcDI1NmsxoQMw5fqqkw2hHC4F5HZZDPsNmPdB1Gi8JPQK7pRc9XHh-oN1ZHCCKvg", + // Lighthouse team's bootnodes + "enr:-IS4QLkKqDMy_ExrpOEWa59NiClemOnor-krjp4qoeZwIw2QduPC-q7Kz4u1IOWf3DDbdxqQIgC4fejavBOuUPy-HE4BgmlkgnY0gmlwhCLzAHqJc2VjcDI1NmsxoQLQSJfEAHZApkm5edTCZ_4qps_1k_ub2CxHFxi-gr2JMIN1ZHCCIyg", + "enr:-IS4QDAyibHCzYZmIYZCjXwU9BqpotWmv2BsFlIq1V31BwDDMJPFEbox1ijT5c2Ou3kvieOKejxuaCqIcjxBjJ_3j_cBgmlkgnY0gmlwhAMaHiCJc2VjcDI1NmsxoQJIdpj_foZ02MXz4It8xKD7yUHTBx7lVFn3oeRP21KRV4N1ZHCCIyg", + // EF bootnodes + "enr:-Ku4QHqVeJ8PPICcWk1vSn_XcSkjOkNiTg6Fmii5j6vUQgvzMc9L1goFnLKgXqBJspJjIsB91LTOleFmyWWrFVATGngBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhAMRHkWJc2VjcDI1NmsxoQKLVXFOhp2uX6jeT0DvvDpPcU8FWMjQdR4wMuORMhpX24N1ZHCCIyg", + "enr:-Ku4QG-2_Md3sZIAUebGYT6g0SMskIml77l6yR-M_JXc-UdNHCmHQeOiMLbylPejyJsdAPsTHJyjJB2sYGDLe0dn8uYBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhBLY-NyJc2VjcDI1NmsxoQORcM6e19T1T9gi7jxEZjk_sjVLGFscUNqAY9obgZaxbIN1ZHCCIyg", + "enr:-Ku4QPn5eVhcoF1opaFEvg1b6JNFD2rqVkHQ8HApOKK61OIcIXD127bKWgAtbwI7pnxx6cDyk_nI88TrZKQaGMZj0q0Bh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDayLMaJc2VjcDI1NmsxoQK2sBOLGcUb4AwuYzFuAVCaNHA-dy24UuEKkeFNgCVCsIN1ZHCCIyg", + "enr:-Ku4QEWzdnVtXc2Q0ZVigfCGggOVB2Vc1ZCPEc6j21NIFLODSJbvNaef1g4PxhPwl_3kax86YPheFUSLXPRs98vvYsoBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpC1MD8qAAAAAP__________gmlkgnY0gmlwhDZBrP2Jc2VjcDI1NmsxoQM6jr8Rb1ktLEsVcKAPa08wCsKUmvoQ8khiOl_SLozf9IN1ZHCCIyg", +} + const dnsPrefix = "enrtree://AKA3AM6LPBYEUDMVNU3BSVQJ5AD45Y7YPOHJLEF6W26QOE4VTUDPE@" // KnownDNSNetwork returns the address of a public DNS-based node list for the given diff --git a/params/config.go b/params/config.go index ade81408a8..9b8c5cf4ed 100644 --- a/params/config.go +++ b/params/config.go @@ -31,8 +31,7 @@ var ( RopstenGenesisHash = common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d") RinkebyGenesisHash = common.HexToHash("0x6341fd3daf94b748c72ced5a5b26028f2474f5f00d824504e4fa37a75767e177") GoerliGenesisHash = common.HexToHash("0xbf7e331f7f7c1dd2e05159666b3bf8bc7a8a3a9eb1d518969eab529dd9b88c1a") - // TODO: update with yolov2 values - YoloV2GenesisHash = common.HexToHash("0x498a7239036dd2cd09e2bb8a80922b78632017958c332b42044c250d603a8a3e") + YoloV3GenesisHash = common.HexToHash("0x374f07cc7fa7c251fc5f36849f574b43db43600526410349efdca2bcea14101a") ) // TrustedCheckpoints associates each known checkpoint with the genesis hash of @@ -57,27 +56,27 @@ var ( // MainnetChainConfig is the chain parameters to run a node on the main network. MainnetChainConfig = &ChainConfig{ ChainID: big.NewInt(1), - HomesteadBlock: big.NewInt(1150000), - DAOForkBlock: big.NewInt(1920000), + HomesteadBlock: big.NewInt(1_150_000), + DAOForkBlock: big.NewInt(1_920_000), DAOForkSupport: true, - EIP150Block: big.NewInt(2463000), + EIP150Block: big.NewInt(2_463_000), EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), - EIP155Block: big.NewInt(2675000), - EIP158Block: big.NewInt(2675000), - ByzantiumBlock: big.NewInt(4370000), - ConstantinopleBlock: big.NewInt(7280000), - PetersburgBlock: big.NewInt(7280000), - IstanbulBlock: big.NewInt(9069000), - MuirGlacierBlock: big.NewInt(9200000), + EIP155Block: big.NewInt(2_675_000), + EIP158Block: big.NewInt(2_675_000), + ByzantiumBlock: big.NewInt(4_370_000), + ConstantinopleBlock: big.NewInt(7_280_000), + PetersburgBlock: big.NewInt(7_280_000), + IstanbulBlock: big.NewInt(9_069_000), + MuirGlacierBlock: big.NewInt(9_200_000), Ethash: new(EthashConfig), } // MainnetTrustedCheckpoint contains the light client trusted checkpoint for the main network. MainnetTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 336, - SectionHead: common.HexToHash("0xd42b78902b6527a80337bf1bc372a3ccc3db97e9cc7cf421ca047ae9076c716b"), - CHTRoot: common.HexToHash("0xd97f3b30f7e0cb958e4c67c53ec27745e5a165e33e56821b86523dfee62b783a"), - BloomRoot: common.HexToHash("0xf3cbfd070fababfe2adc9b23fc02c731f6ca2cce6646b3ede4ef2db06092ccce"), + SectionIndex: 364, + SectionHead: common.HexToHash("0x3fd20ff221f5e962bb66f57a61973bfc2ba959879a6509384a80a45d208b5afc"), + CHTRoot: common.HexToHash("0xe35b3b807f4e9427fb4e2929961c78a9dc10f503a538319031cc7d00946a0591"), + BloomRoot: common.HexToHash("0x340553b378b2db214b898be15c80ac5be7caffc2e6448fd6f7aff23290d89296"), } // MainnetCheckpointOracle contains a set of configs for the main network oracle. @@ -103,20 +102,20 @@ var ( EIP150Hash: common.HexToHash("0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d"), EIP155Block: big.NewInt(10), EIP158Block: big.NewInt(10), - ByzantiumBlock: big.NewInt(1700000), - ConstantinopleBlock: big.NewInt(4230000), - PetersburgBlock: big.NewInt(4939394), - IstanbulBlock: big.NewInt(6485846), - MuirGlacierBlock: big.NewInt(7117117), + ByzantiumBlock: big.NewInt(1_700_000), + ConstantinopleBlock: big.NewInt(4_230_000), + PetersburgBlock: big.NewInt(4_939_394), + IstanbulBlock: big.NewInt(6_485_846), + MuirGlacierBlock: big.NewInt(7_117_117), Ethash: new(EthashConfig), } // RopstenTrustedCheckpoint contains the light client trusted checkpoint for the Ropsten test network. RopstenTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 269, - SectionHead: common.HexToHash("0x290a9eb65e65c64601d1b05522533ed502098a246736b348502a170818a33d64"), - CHTRoot: common.HexToHash("0x530ebac02264227277d0a16b0819ef96a2011a6e1e66523ebff8040f4a3437ca"), - BloomRoot: common.HexToHash("0x480cd5b3198a0767022902130546854a2e8867cce573c1cf0ce54e67a7bf5efb"), + SectionIndex: 279, + SectionHead: common.HexToHash("0x4a4912848d4c06090097073357c10015d11c6f4544a0f93cbdd584701c3b7d58"), + CHTRoot: common.HexToHash("0x9053b7867ae921e80a4e2f5a4b15212e4af3d691ca712fb33dc150e9c6ea221c"), + BloomRoot: common.HexToHash("0x3dc04cb1be7ddc271f3f83469b47b76184a79d7209ef51d85b1539ea6d25a645"), } // RopstenCheckpointOracle contains a set of configs for the Ropsten test network oracle. @@ -142,10 +141,10 @@ var ( EIP150Hash: common.HexToHash("0x9b095b36c15eaf13044373aef8ee0bd3a382a5abb92e402afa44b8249c3a90e9"), EIP155Block: big.NewInt(3), EIP158Block: big.NewInt(3), - ByzantiumBlock: big.NewInt(1035301), - ConstantinopleBlock: big.NewInt(3660663), - PetersburgBlock: big.NewInt(4321234), - IstanbulBlock: big.NewInt(5435345), + ByzantiumBlock: big.NewInt(1_035_301), + ConstantinopleBlock: big.NewInt(3_660_663), + PetersburgBlock: big.NewInt(4_321_234), + IstanbulBlock: big.NewInt(5_435_345), MuirGlacierBlock: nil, Clique: &CliqueConfig{ Period: 15, @@ -155,10 +154,10 @@ var ( // RinkebyTrustedCheckpoint contains the light client trusted checkpoint for the Rinkeby test network. RinkebyTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 223, - SectionHead: common.HexToHash("0x03ca0d5e3a931c77cd7a97bbaa2d9e4edc4549c621dc1d223a29f10c86a4a16a"), - CHTRoot: common.HexToHash("0x6573dbdd91b2958b446bd04d67c23e5f14b2510ac96e8df1b6a894dc49e37c6c"), - BloomRoot: common.HexToHash("0x28a35042a4e88efbac55fe566faf7fce000dc436f17fd4cb4b081c9cd793e1a7"), + SectionIndex: 248, + SectionHead: common.HexToHash("0x26874cf023695778cc3175d1bec19894204d8d0b756b587e81e35f300dc5b33c"), + CHTRoot: common.HexToHash("0xc129d1ed6673c5d3e1068e9d97244e72952b7ca08acbd7b3bfa58bc3085c442c"), + BloomRoot: common.HexToHash("0x1dafe79dcd7d348782aa834a4a4397890d9ad90643736791132ed5c16879a037"), } // RinkebyCheckpointOracle contains a set of configs for the Rinkeby test network oracle. @@ -185,7 +184,7 @@ var ( ByzantiumBlock: big.NewInt(0), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), - IstanbulBlock: big.NewInt(1561651), + IstanbulBlock: big.NewInt(1_561_651), MuirGlacierBlock: nil, Clique: &CliqueConfig{ Period: 15, @@ -195,10 +194,10 @@ var ( // GoerliTrustedCheckpoint contains the light client trusted checkpoint for the Görli test network. GoerliTrustedCheckpoint = &TrustedCheckpoint{ - SectionIndex: 107, - SectionHead: common.HexToHash("0xff3ae39199fa191894de419e7f673c8627aa8cc7af924b90f36635b6add375f2"), - CHTRoot: common.HexToHash("0x27d59d60c652425b6b593a882f55a4ff57f24e470a810a6e3c8ba71833a20220"), - BloomRoot: common.HexToHash("0x3c14066d8bb3733780c06b8165768dbb9dd23b75f56012fe5f2fb3c2fb70cadb"), + SectionIndex: 132, + SectionHead: common.HexToHash("0x29fa240c97b47ecbfef3fea8b3cff035d93154d1d48b25e3333cf2f7067c5324"), + CHTRoot: common.HexToHash("0x85e5c59e5b202284291405dadc40dc36ab6417bd189fb18be24f6dcab6b80511"), + BloomRoot: common.HexToHash("0x0b7afdd200477f46e982e2cabc822ac454424986fa50d899685dfaeede1f882d"), } // GoerliCheckpointOracle contains a set of configs for the Goerli test network oracle. @@ -214,9 +213,9 @@ var ( Threshold: 2, } - // YoloV2ChainConfig contains the chain parameters to run a node on the YOLOv2 test network. - YoloV2ChainConfig = &ChainConfig{ - ChainID: big.NewInt(133519467574834), + // YoloV3ChainConfig contains the chain parameters to run a node on the YOLOv3 test network. + YoloV3ChainConfig = &ChainConfig{ + ChainID: new(big.Int).SetBytes([]byte("yolov3x")), HomesteadBlock: big.NewInt(0), DAOForkBlock: nil, DAOForkSupport: true, @@ -228,7 +227,8 @@ var ( PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), MuirGlacierBlock: nil, - YoloV2Block: big.NewInt(0), + BerlinBlock: nil, // Don't enable Berlin directly, we're YOLOing it + YoloV3Block: big.NewInt(0), Clique: &CliqueConfig{ Period: 15, Epoch: 30000, @@ -240,16 +240,16 @@ var ( // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, new(EthashConfig), nil} + AllEthashProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil} // AllCliqueProtocolChanges contains every protocol change (EIPs) introduced // and accepted by the Ethereum core developers into the Clique consensus. // // This configuration is intentionally not using keyed fields to force anyone // adding flags to the config to also have to set these fields. - AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}} + AllCliqueProtocolChanges = &ChainConfig{big.NewInt(1337), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, &CliqueConfig{Period: 0, Epoch: 30000}} - TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, new(EthashConfig), nil} + TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, new(EthashConfig), nil} TestRules = TestChainConfig.Rules(new(big.Int)) ) @@ -320,8 +320,9 @@ type ChainConfig struct { PetersburgBlock *big.Int `json:"petersburgBlock,omitempty"` // Petersburg switch block (nil = same as Constantinople) IstanbulBlock *big.Int `json:"istanbulBlock,omitempty"` // Istanbul switch block (nil = no fork, 0 = already on istanbul) MuirGlacierBlock *big.Int `json:"muirGlacierBlock,omitempty"` // Eip-2384 (bomb delay) switch block (nil = no fork, 0 = already activated) + BerlinBlock *big.Int `json:"berlinBlock,omitempty"` // Berlin switch block (nil = no fork, 0 = already on berlin) - YoloV2Block *big.Int `json:"yoloV2Block,omitempty"` // YOLO v2: Gas repricings TODO @holiman add EIP references + YoloV3Block *big.Int `json:"yoloV3Block,omitempty"` // YOLO v3: Gas repricings TODO @holiman add EIP references EWASMBlock *big.Int `json:"ewasmBlock,omitempty"` // EWASM switch block (nil = no fork, 0 = already activated) // Various consensus engines @@ -359,7 +360,7 @@ func (c *ChainConfig) String() string { default: engine = "unknown" } - return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, YOLO v2: %v, Engine: %v}", + return fmt.Sprintf("{ChainID: %v Homestead: %v DAO: %v DAOSupport: %v EIP150: %v EIP155: %v EIP158: %v Byzantium: %v Constantinople: %v Petersburg: %v Istanbul: %v, Muir Glacier: %v, Berlin: %v, YOLO v3: %v, Engine: %v}", c.ChainID, c.HomesteadBlock, c.DAOForkBlock, @@ -372,7 +373,8 @@ func (c *ChainConfig) String() string { c.PetersburgBlock, c.IstanbulBlock, c.MuirGlacierBlock, - c.YoloV2Block, + c.BerlinBlock, + c.YoloV3Block, engine, ) } @@ -429,9 +431,9 @@ func (c *ChainConfig) IsIstanbul(num *big.Int) bool { return isForked(c.IstanbulBlock, num) } -// IsYoloV2 returns whether num is either equal to the YoloV1 fork block or greater. -func (c *ChainConfig) IsYoloV2(num *big.Int) bool { - return isForked(c.YoloV2Block, num) +// IsBerlin returns whether num is either equal to the Berlin fork block or greater. +func (c *ChainConfig) IsBerlin(num *big.Int) bool { + return isForked(c.BerlinBlock, num) || isForked(c.YoloV3Block, num) } // IsEWASM returns whether num represents a block number after the EWASM fork @@ -477,7 +479,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "petersburgBlock", block: c.PetersburgBlock}, {name: "istanbulBlock", block: c.IstanbulBlock}, {name: "muirGlacierBlock", block: c.MuirGlacierBlock, optional: true}, - {name: "yoloV2Block", block: c.YoloV2Block}, + {name: "berlinBlock", block: c.BerlinBlock}, } { if lastFork.name != "" { // Next one must be higher number @@ -541,8 +543,11 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, head *big.Int) *Confi if isForkIncompatible(c.MuirGlacierBlock, newcfg.MuirGlacierBlock, head) { return newCompatError("Muir Glacier fork block", c.MuirGlacierBlock, newcfg.MuirGlacierBlock) } - if isForkIncompatible(c.YoloV2Block, newcfg.YoloV2Block, head) { - return newCompatError("YOLOv2 fork block", c.YoloV2Block, newcfg.YoloV2Block) + if isForkIncompatible(c.BerlinBlock, newcfg.BerlinBlock, head) { + return newCompatError("Berlin fork block", c.BerlinBlock, newcfg.BerlinBlock) + } + if isForkIncompatible(c.YoloV3Block, newcfg.YoloV3Block, head) { + return newCompatError("YOLOv3 fork block", c.YoloV3Block, newcfg.YoloV3Block) } if isForkIncompatible(c.EWASMBlock, newcfg.EWASMBlock, head) { return newCompatError("ewasm fork block", c.EWASMBlock, newcfg.EWASMBlock) @@ -614,7 +619,7 @@ type Rules struct { ChainID *big.Int IsHomestead, IsEIP150, IsEIP155, IsEIP158 bool IsByzantium, IsConstantinople, IsPetersburg, IsIstanbul bool - IsYoloV2 bool + IsBerlin bool } // Rules ensures c's ChainID is not nil. @@ -633,6 +638,6 @@ func (c *ChainConfig) Rules(num *big.Int) Rules { IsConstantinople: c.IsConstantinople(num), IsPetersburg: c.IsPetersburg(num), IsIstanbul: c.IsIstanbul(num), - IsYoloV2: c.IsYoloV2(num), + IsBerlin: c.IsBerlin(num), } } diff --git a/params/protocol_params.go b/params/protocol_params.go index fd5452bf15..88f1a06e12 100644 --- a/params/protocol_params.go +++ b/params/protocol_params.go @@ -60,20 +60,23 @@ const ( JumpdestGas uint64 = 1 // Once per JUMPDEST operation. EpochDuration uint64 = 30000 // Duration between proof-of-work epochs. - CreateDataGas uint64 = 200 // - CallCreateDepth uint64 = 1024 // Maximum depth of call/create stack. - ExpGas uint64 = 10 // Once per EXP instruction - LogGas uint64 = 375 // Per LOG* operation. - CopyGas uint64 = 3 // - StackLimit uint64 = 1024 // Maximum size of VM stack allowed. - TierStepGas uint64 = 0 // Once per operation, for a selection of them. - LogTopicGas uint64 = 375 // Multiplied by the * of the LOG*, per LOG transaction. e.g. LOG0 incurs 0 * c_txLogTopicGas, LOG4 incurs 4 * c_txLogTopicGas. - CreateGas uint64 = 32000 // Once per CREATE operation & contract-creation transaction. - Create2Gas uint64 = 32000 // Once per CREATE2 operation - SelfdestructRefundGas uint64 = 24000 // Refunded following a selfdestruct operation. - MemoryGas uint64 = 3 // Times the address of the (highest referenced byte in memory + 1). NOTE: referencing happens on read, write and in instructions such as RETURN and CALL. - TxDataNonZeroGasFrontier uint64 = 68 // Per byte of data attached to a transaction that is not equal to zero. NOTE: Not payable on data of calls between transactions. - TxDataNonZeroGasEIP2028 uint64 = 16 // Per byte of non zero data attached to a transaction after EIP 2028 (part in Istanbul) + CreateDataGas uint64 = 200 // + CallCreateDepth uint64 = 1024 // Maximum depth of call/create stack. + ExpGas uint64 = 10 // Once per EXP instruction + LogGas uint64 = 375 // Per LOG* operation. + CopyGas uint64 = 3 // + StackLimit uint64 = 1024 // Maximum size of VM stack allowed. + TierStepGas uint64 = 0 // Once per operation, for a selection of them. + LogTopicGas uint64 = 375 // Multiplied by the * of the LOG*, per LOG transaction. e.g. LOG0 incurs 0 * c_txLogTopicGas, LOG4 incurs 4 * c_txLogTopicGas. + CreateGas uint64 = 32000 // Once per CREATE operation & contract-creation transaction. + Create2Gas uint64 = 32000 // Once per CREATE2 operation + SelfdestructRefundGas uint64 = 24000 // Refunded following a selfdestruct operation. + MemoryGas uint64 = 3 // Times the address of the (highest referenced byte in memory + 1). NOTE: referencing happens on read, write and in instructions such as RETURN and CALL. + + TxDataNonZeroGasFrontier uint64 = 68 // Per byte of data attached to a transaction that is not equal to zero. NOTE: Not payable on data of calls between transactions. + TxDataNonZeroGasEIP2028 uint64 = 16 // Per byte of non zero data attached to a transaction after EIP 2028 (part in Istanbul) + TxAccessListAddressGas uint64 = 2400 // Per address specified in EIP 2930 access list + TxAccessListStorageKeyGas uint64 = 1900 // Per storage key specified in EIP 2930 access list // These have been changed during the course of the chain CallGasFrontier uint64 = 40 // Once per CALL operation & message call transaction. @@ -116,7 +119,6 @@ const ( Ripemd160PerWordGas uint64 = 120 // Per-word price for a RIPEMD160 operation IdentityBaseGas uint64 = 15 // Base price for a data copy operation IdentityPerWordGas uint64 = 3 // Per-work price for a data copy operation - ModExpQuadCoeffDiv uint64 = 20 // Divisor for the quadratic particle of the big int modular exponentiation Bn256AddGasByzantium uint64 = 500 // Byzantium gas needed for an elliptic curve addition Bn256AddGasIstanbul uint64 = 150 // Gas needed for an elliptic curve addition diff --git a/params/version.go b/params/version.go index c0f356889a..de5eac3af9 100644 --- a/params/version.go +++ b/params/version.go @@ -22,8 +22,8 @@ import ( const ( VersionMajor = 1 // Major version component of the current release - VersionMinor = 9 // Minor version component of the current release - VersionPatch = 24 // Patch version component of the current release + VersionMinor = 10 // Minor version component of the current release + VersionPatch = 0 // Patch version component of the current release VersionMeta = "stable" // Version metadata to append to the version string ) diff --git a/rlp/decode.go b/rlp/decode.go index 5f3f5eedfd..79b7ef0626 100644 --- a/rlp/decode.go +++ b/rlp/decode.go @@ -952,7 +952,13 @@ func (s *Stream) readFull(buf []byte) (err error) { n += nn } if err == io.EOF { - err = io.ErrUnexpectedEOF + if n < len(buf) { + err = io.ErrUnexpectedEOF + } else { + // Readers are allowed to give EOF even though the read succeeded. + // In such cases, we discard the EOF, like io.ReadFull() does. + err = nil + } } return err } diff --git a/rlp/decode_test.go b/rlp/decode_test.go index 167e9974b9..d94c3969b2 100644 --- a/rlp/decode_test.go +++ b/rlp/decode_test.go @@ -665,6 +665,26 @@ func TestDecodeWithByteReader(t *testing.T) { }) } +func testDecodeWithEncReader(t *testing.T, n int) { + s := strings.Repeat("0", n) + _, r, _ := EncodeToReader(s) + var decoded string + err := Decode(r, &decoded) + if err != nil { + t.Errorf("Unexpected decode error with n=%v: %v", n, err) + } + if decoded != s { + t.Errorf("Decode mismatch with n=%v", n) + } +} + +// This is a regression test checking that decoding from encReader +// works for RLP values of size 8192 bytes or more. +func TestDecodeWithEncReader(t *testing.T) { + testDecodeWithEncReader(t, 8188) // length with header is 8191 + testDecodeWithEncReader(t, 8189) // length with header is 8192 +} + // plainReader reads from a byte slice but does not // implement ReadByte. It is also not recognized by the // size validation. This is useful to test how the decoder diff --git a/rlp/encode_test.go b/rlp/encode_test.go index 6803745197..418ee10a35 100644 --- a/rlp/encode_test.go +++ b/rlp/encode_test.go @@ -39,9 +39,8 @@ func (e *testEncoder) EncodeRLP(w io.Writer) error { } if e.err != nil { return e.err - } else { - w.Write([]byte{0, 1, 0, 1, 0, 1, 0, 1, 0, 1}) } + w.Write([]byte{0, 1, 0, 1, 0, 1, 0, 1, 0, 1}) return nil } diff --git a/rpc/client.go b/rpc/client.go index 91e68e73e6..198ce63573 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -404,9 +404,8 @@ func (c *Client) Notify(ctx context.Context, method string, args ...interface{}) if c.isHTTP { return c.sendHTTP(ctx, op, msg) - } else { - return c.send(ctx, op, msg) } + return c.send(ctx, op, msg) } // EthSubscribe registers a subscripion under the "eth" namespace. @@ -415,6 +414,7 @@ func (c *Client) EthSubscribe(ctx context.Context, channel interface{}, args ... } // ShhSubscribe registers a subscripion under the "shh" namespace. +// Deprecated: use Subscribe(ctx, "shh", ...). func (c *Client) ShhSubscribe(ctx context.Context, channel interface{}, args ...interface{}) (*ClientSubscription, error) { return c.Subscribe(ctx, "shh", channel, args...) } diff --git a/rpc/client_test.go b/rpc/client_test.go index 5b1f960352..5d301a07a7 100644 --- a/rpc/client_test.go +++ b/rpc/client_test.go @@ -427,7 +427,7 @@ func TestClientNotificationStorm(t *testing.T) { } doTest(8000, false) - doTest(23000, true) + doTest(24000, true) } func TestClientSetHeader(t *testing.T) { diff --git a/rpc/endpoints.go b/rpc/endpoints.go index 9fc0705172..d78ebe2858 100644 --- a/rpc/endpoints.go +++ b/rpc/endpoints.go @@ -18,6 +18,7 @@ package rpc import ( "net" + "strings" "github.com/ethereum/go-ethereum/log" ) @@ -25,13 +26,22 @@ import ( // StartIPCEndpoint starts an IPC endpoint. func StartIPCEndpoint(ipcEndpoint string, apis []API) (net.Listener, *Server, error) { // Register all the APIs exposed by the services. - handler := NewServer() + var ( + handler = NewServer() + regMap = make(map[string]struct{}) + registered []string + ) for _, api := range apis { if err := handler.RegisterName(api.Namespace, api.Service); err != nil { + log.Info("IPC registration failed", "namespace", api.Namespace, "error", err) return nil, nil, err } - log.Debug("IPC registered", "namespace", api.Namespace) + if _, ok := regMap[api.Namespace]; !ok { + registered = append(registered, api.Namespace) + regMap[api.Namespace] = struct{}{} + } } + log.Debug("IPCs registered", "namespaces", strings.Join(registered, ",")) // All APIs registered, start the IPC listener. listener, err := ipcListen(ipcEndpoint) if err != nil { diff --git a/rpc/http_test.go b/rpc/http_test.go index fc939ae48f..b75af67c52 100644 --- a/rpc/http_test.go +++ b/rpc/http_test.go @@ -98,3 +98,28 @@ func confirmHTTPRequestYieldsStatusCode(t *testing.T, method, contentType, body func TestHTTPResponseWithEmptyGet(t *testing.T) { confirmHTTPRequestYieldsStatusCode(t, http.MethodGet, "", "", http.StatusOK) } + +// This checks that maxRequestContentLength is not applied to the response of a request. +func TestHTTPRespBodyUnlimited(t *testing.T) { + const respLength = maxRequestContentLength * 3 + + s := NewServer() + defer s.Stop() + s.RegisterName("test", largeRespService{respLength}) + ts := httptest.NewServer(s) + defer ts.Close() + + c, err := DialHTTP(ts.URL) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + var r string + if err := c.Call(&r, "test_largeResp"); err != nil { + t.Fatal(err) + } + if len(r) != respLength { + t.Fatalf("response has wrong length %d, want %d", len(r), respLength) + } +} diff --git a/rpc/testservice_test.go b/rpc/testservice_test.go index 6f948a1bac..62afc1df44 100644 --- a/rpc/testservice_test.go +++ b/rpc/testservice_test.go @@ -20,6 +20,7 @@ import ( "context" "encoding/binary" "errors" + "strings" "sync" "time" ) @@ -194,3 +195,12 @@ func (s *notificationTestService) HangSubscription(ctx context.Context, val int) }() return subscription, nil } + +// largeRespService generates arbitrary-size JSON responses. +type largeRespService struct { + length int +} + +func (x largeRespService) LargeResp() string { + return strings.Repeat("x", x.length) +} diff --git a/rpc/websocket.go b/rpc/websocket.go index a716383be9..ab55ae69c1 100644 --- a/rpc/websocket.go +++ b/rpc/websocket.go @@ -37,6 +37,7 @@ const ( wsWriteBuffer = 1024 wsPingInterval = 60 * time.Second wsPingWriteTimeout = 5 * time.Second + wsMessageSizeLimit = 15 * 1024 * 1024 ) var wsBufferPool = new(sync.Pool) @@ -75,14 +76,14 @@ func wsHandshakeValidator(allowedOrigins []string) func(*http.Request) bool { allowAllOrigins = true } if origin != "" { - origins.Add(strings.ToLower(origin)) + origins.Add(origin) } } // allow localhost if no allowedOrigins are specified. if len(origins.ToSlice()) == 0 { origins.Add("http://localhost") if hostname, err := os.Hostname(); err == nil { - origins.Add("http://" + strings.ToLower(hostname)) + origins.Add("http://" + hostname) } } log.Debug(fmt.Sprintf("Allowed origin(s) for WS RPC interface %v", origins.ToSlice())) @@ -97,7 +98,7 @@ func wsHandshakeValidator(allowedOrigins []string) func(*http.Request) bool { } // Verify origin against whitelist. origin := strings.ToLower(req.Header.Get("Origin")) - if allowAllOrigins || origins.Contains(origin) { + if allowAllOrigins || originIsAllowed(origins, origin) { return true } log.Warn("Rejected WebSocket connection", "origin", origin) @@ -120,6 +121,65 @@ func (e wsHandshakeError) Error() string { return s } +func originIsAllowed(allowedOrigins mapset.Set, browserOrigin string) bool { + it := allowedOrigins.Iterator() + for origin := range it.C { + if ruleAllowsOrigin(origin.(string), browserOrigin) { + return true + } + } + return false +} + +func ruleAllowsOrigin(allowedOrigin string, browserOrigin string) bool { + var ( + allowedScheme, allowedHostname, allowedPort string + browserScheme, browserHostname, browserPort string + err error + ) + allowedScheme, allowedHostname, allowedPort, err = parseOriginURL(allowedOrigin) + if err != nil { + log.Warn("Error parsing allowed origin specification", "spec", allowedOrigin, "error", err) + return false + } + browserScheme, browserHostname, browserPort, err = parseOriginURL(browserOrigin) + if err != nil { + log.Warn("Error parsing browser 'Origin' field", "Origin", browserOrigin, "error", err) + return false + } + if allowedScheme != "" && allowedScheme != browserScheme { + return false + } + if allowedHostname != "" && allowedHostname != browserHostname { + return false + } + if allowedPort != "" && allowedPort != browserPort { + return false + } + return true +} + +func parseOriginURL(origin string) (string, string, string, error) { + parsedURL, err := url.Parse(strings.ToLower(origin)) + if err != nil { + return "", "", "", err + } + var scheme, hostname, port string + if strings.Contains(origin, "://") { + scheme = parsedURL.Scheme + hostname = parsedURL.Hostname() + port = parsedURL.Port() + } else { + scheme = "" + hostname = parsedURL.Scheme + port = parsedURL.Opaque + if hostname == "" { + hostname = origin + } + } + return scheme, hostname, port, nil +} + // DialWebsocketWithDialer creates a new RPC client that communicates with a JSON-RPC server // that is listening on the given endpoint using the provided dialer. func DialWebsocketWithDialer(ctx context.Context, endpoint, origin string, dialer websocket.Dialer) (*Client, error) { @@ -180,7 +240,7 @@ type websocketCodec struct { } func newWebsocketCodec(conn *websocket.Conn) ServerCodec { - conn.SetReadLimit(maxRequestContentLength) + conn.SetReadLimit(wsMessageSizeLimit) wc := &websocketCodec{ jsonCodec: NewFuncCodec(conn, conn.WriteJSON, conn.ReadJSON).(*jsonCodec), conn: conn, diff --git a/rpc/websocket_test.go b/rpc/websocket_test.go index f54fc3cd54..37ed19476f 100644 --- a/rpc/websocket_test.go +++ b/rpc/websocket_test.go @@ -157,6 +157,33 @@ func TestClientWebsocketPing(t *testing.T) { } } +// This checks that the websocket transport can deal with large messages. +func TestClientWebsocketLargeMessage(t *testing.T) { + var ( + srv = NewServer() + httpsrv = httptest.NewServer(srv.WebsocketHandler(nil)) + wsURL = "ws:" + strings.TrimPrefix(httpsrv.URL, "http:") + ) + defer srv.Stop() + defer httpsrv.Close() + + respLength := wsMessageSizeLimit - 50 + srv.RegisterName("test", largeRespService{respLength}) + + c, err := DialWebsocket(context.Background(), wsURL, "") + if err != nil { + t.Fatal(err) + } + + var r string + if err := c.Call(&r, "test_largeResp"); err != nil { + t.Fatal("call failed:", err) + } + if len(r) != respLength { + t.Fatalf("response has wrong length %d, want %d", len(r), respLength) + } +} + // wsPingTestServer runs a WebSocket server which accepts a single subscription request. // When a value arrives on sendPing, the server sends a ping frame, waits for a matching // pong and finally delivers a single subscription result. diff --git a/signer/core/api.go b/signer/core/api.go index 43926a75ff..968dcfb2ed 100644 --- a/signer/core/api.go +++ b/signer/core/api.go @@ -33,7 +33,6 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/internal/ethapi" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/signer/storage" ) @@ -322,62 +321,65 @@ func (api *SignerAPI) openTrezor(url accounts.URL) { // startUSBListener starts a listener for USB events, for hardware wallet interaction func (api *SignerAPI) startUSBListener() { - events := make(chan accounts.WalletEvent, 16) + eventCh := make(chan accounts.WalletEvent, 16) am := api.am - am.Subscribe(events) - go func() { + am.Subscribe(eventCh) + // Open any wallets already attached + for _, wallet := range am.Wallets() { + if err := wallet.Open(""); err != nil { + log.Warn("Failed to open wallet", "url", wallet.URL(), "err", err) + if err == usbwallet.ErrTrezorPINNeeded { + go api.openTrezor(wallet.URL()) + } + } + } + go api.derivationLoop(eventCh) +} - // Open any wallets already attached - for _, wallet := range am.Wallets() { - if err := wallet.Open(""); err != nil { - log.Warn("Failed to open wallet", "url", wallet.URL(), "err", err) +// derivationLoop listens for wallet events +func (api *SignerAPI) derivationLoop(events chan accounts.WalletEvent) { + // Listen for wallet event till termination + for event := range events { + switch event.Kind { + case accounts.WalletArrived: + if err := event.Wallet.Open(""); err != nil { + log.Warn("New wallet appeared, failed to open", "url", event.Wallet.URL(), "err", err) if err == usbwallet.ErrTrezorPINNeeded { - go api.openTrezor(wallet.URL()) + go api.openTrezor(event.Wallet.URL()) } } - } - // Listen for wallet event till termination - for event := range events { - switch event.Kind { - case accounts.WalletArrived: - if err := event.Wallet.Open(""); err != nil { - log.Warn("New wallet appeared, failed to open", "url", event.Wallet.URL(), "err", err) - if err == usbwallet.ErrTrezorPINNeeded { - go api.openTrezor(event.Wallet.URL()) + case accounts.WalletOpened: + status, _ := event.Wallet.Status() + log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", status) + var derive = func(limit int, next func() accounts.DerivationPath) { + // Derive first N accounts, hardcoded for now + for i := 0; i < limit; i++ { + path := next() + if acc, err := event.Wallet.Derive(path, true); err != nil { + log.Warn("Account derivation failed", "error", err) + } else { + log.Info("Derived account", "address", acc.Address, "path", path) } } - case accounts.WalletOpened: - status, _ := event.Wallet.Status() - log.Info("New wallet appeared", "url", event.Wallet.URL(), "status", status) - var derive = func(numToDerive int, base accounts.DerivationPath) { - // Derive first N accounts, hardcoded for now - var nextPath = make(accounts.DerivationPath, len(base)) - copy(nextPath[:], base[:]) - - for i := 0; i < numToDerive; i++ { - acc, err := event.Wallet.Derive(nextPath, true) - if err != nil { - log.Warn("Account derivation failed", "error", err) - } else { - log.Info("Derived account", "address", acc.Address, "path", nextPath) - } - nextPath[len(nextPath)-1]++ - } - } - if event.Wallet.URL().Scheme == "ledger" { - log.Info("Deriving ledger default paths") - derive(numberOfAccountsToDerive/2, accounts.DefaultBaseDerivationPath) - log.Info("Deriving ledger legacy paths") - derive(numberOfAccountsToDerive/2, accounts.LegacyLedgerBaseDerivationPath) - } else { - derive(numberOfAccountsToDerive, accounts.DefaultBaseDerivationPath) - } - case accounts.WalletDropped: - log.Info("Old wallet dropped", "url", event.Wallet.URL()) - event.Wallet.Close() } + log.Info("Deriving default paths") + derive(numberOfAccountsToDerive, accounts.DefaultIterator(accounts.DefaultBaseDerivationPath)) + if event.Wallet.URL().Scheme == "ledger" { + log.Info("Deriving ledger legacy paths") + derive(numberOfAccountsToDerive, accounts.DefaultIterator(accounts.LegacyLedgerBaseDerivationPath)) + log.Info("Deriving ledger live paths") + // For ledger live, since it's based off the same (DefaultBaseDerivationPath) + // as one we've already used, we need to step it forward one step to avoid + // hitting the same path again + nextFn := accounts.LedgerLiveIterator(accounts.DefaultBaseDerivationPath) + nextFn() + derive(numberOfAccountsToDerive, nextFn) + } + case accounts.WalletDropped: + log.Info("Old wallet dropped", "url", event.Wallet.URL()) + event.Wallet.Close() } - }() + } } // List returns the set of wallet this signer manages. Each wallet can contain @@ -436,7 +438,7 @@ func (api *SignerAPI) newAccount() (common.Address, error) { continue } if pwErr := ValidatePasswordFormat(resp.Text); pwErr != nil { - api.UI.ShowError(fmt.Sprintf("Account creation attempt #%d failed due to password requirements: %v", (i + 1), pwErr)) + api.UI.ShowError(fmt.Sprintf("Account creation attempt #%d failed due to password requirements: %v", i+1, pwErr)) } else { // No error acc, err := be[0].(*keystore.KeyStore).NewAccount(resp.Text) @@ -571,11 +573,11 @@ func (api *SignerAPI) SignTransaction(ctx context.Context, args SendTxArgs, meth return nil, err } - rlpdata, err := rlp.EncodeToBytes(signedTx) + data, err := signedTx.MarshalBinary() if err != nil { return nil, err } - response := ethapi.SignTransactionResult{Raw: rlpdata, Tx: signedTx} + response := ethapi.SignTransactionResult{Raw: data, Tx: signedTx} // Finally, send the signed tx to the UI api.UI.OnApprovedTx(response) diff --git a/signer/core/signed_data.go b/signer/core/signed_data.go index 19377a521b..3bff1e1f20 100644 --- a/signer/core/signed_data.go +++ b/signer/core/signed_data.go @@ -506,7 +506,7 @@ func parseBytes(encType interface{}) ([]byte, bool) { case []byte: return v, true case hexutil.Bytes: - return []byte(v), true + return v, true case string: bytes, err := hexutil.Decode(v) if err != nil { diff --git a/signer/fourbyte/abi_test.go b/signer/fourbyte/abi_test.go index 314c12735b..68c027ecea 100644 --- a/signer/fourbyte/abi_test.go +++ b/signer/fourbyte/abi_test.go @@ -68,7 +68,7 @@ func TestNewUnpacker(t *testing.T) { [10]byte{49, 50, 51, 52, 53, 54, 55, 56, 57, 48}, common.Hex2Bytes("48656c6c6f2c20776f726c6421"), }, - }, { // https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI#examples + }, { // https://docs.soliditylang.org/en/develop/abi-spec.html#examples `[{"type":"function","name":"sam","inputs":[{"type":"bytes"},{"type":"bool"},{"type":"uint256[]"}]}]`, // "dave", true and [1,2,3] "a5643bf20000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000464617665000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003", @@ -124,7 +124,7 @@ func TestCalldataDecoding(t *testing.T) { "42958b5400000000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000042", // Too short compareAndApprove "a52c101e00ff0000000000000000000000000000000000000000000000000000000000120000000000000000000000000000000000000000000000000000000000000042", - // From https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI + // From https://docs.soliditylang.org/en/develop/abi-spec.html // contains a bool with illegal values "a5643bf20000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000001100000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000464617665000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003", } { @@ -135,7 +135,7 @@ func TestCalldataDecoding(t *testing.T) { } // Expected success for i, hexdata := range []string{ - // From https://github.com/ethereum/wiki/wiki/Ethereum-Contract-ABI + // From https://docs.soliditylang.org/en/develop/abi-spec.html "a5643bf20000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000464617665000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003", "a52c101e0000000000000000000000000000000000000000000000000000000000000012", "a52c101eFFffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", diff --git a/tests/block_test.go b/tests/block_test.go index 8fa90e3e39..2649bae85a 100644 --- a/tests/block_test.go +++ b/tests/block_test.go @@ -25,7 +25,11 @@ func TestBlockchain(t *testing.T) { bt := new(testMatcher) // General state tests are 'exported' as blockchain tests, but we can run them natively. - bt.skipLoad(`^GeneralStateTests/`) + // For speedier CI-runs, the line below can be uncommented, so those are skipped. + // For now, in hardfork-times (Berlin), we run the tests both as StateTests and + // as blockchain tests, since the latter also covers things like receipt root + //bt.skipLoad(`^GeneralStateTests/`) + // Skip random failures due to selfish mining test bt.skipLoad(`.*bcForgedTest/bcForkUncle\.json`) @@ -43,7 +47,6 @@ func TestBlockchain(t *testing.T) { // test takes a lot for time and goes easily OOM because of sha3 calculation on a huge range, // using 4.6 TGas bt.skipLoad(`.*randomStatetest94.json.*`) - bt.walk(t, blockTestDir, func(t *testing.T, name string, test *BlockTest) { if err := bt.checkFailure(t, name+"/trie", test.Run(false)); err != nil { t.Errorf("test without snapshotter failed: %v", err) diff --git a/tests/block_test_util.go b/tests/block_test_util.go index be9cdb70cd..745208b5b8 100644 --- a/tests/block_test_util.go +++ b/tests/block_test_util.go @@ -32,7 +32,6 @@ import ( "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" - "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/params" @@ -147,7 +146,7 @@ func (t *BlockTest) Run(snapshotter bool) error { } // Cross-check the snapshot-to-hash against the trie hash if snapshotter { - if err := snapshot.VerifyState(chain.Snapshot(), chain.CurrentBlock().Root()); err != nil { + if err := chain.Snapshots().Verify(chain.CurrentBlock().Root()); err != nil { return err } } diff --git a/tests/fuzzers/abi/abifuzzer.go b/tests/fuzzers/abi/abifuzzer.go index 76d3c800f7..60233d158a 100644 --- a/tests/fuzzers/abi/abifuzzer.go +++ b/tests/fuzzers/abi/abifuzzer.go @@ -17,38 +17,53 @@ package abi import ( - "bytes" "fmt" - "math/rand" "reflect" "strings" "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/crypto" fuzz "github.com/google/gofuzz" ) -func unpackPack(abi abi.ABI, method string, inputType []interface{}, input []byte) bool { - outptr := reflect.New(reflect.TypeOf(inputType)) - if err := abi.UnpackIntoInterface(outptr.Interface(), method, input); err == nil { - output, err := abi.Pack(method, input) +var ( + names = []string{"_name", "name", "NAME", "name_", "__", "_name_", "n"} + stateMut = []string{"", "pure", "view", "payable"} + stateMutabilites = []*string{&stateMut[0], &stateMut[1], &stateMut[2], &stateMut[3]} + pays = []string{"", "true", "false"} + payables = []*string{&pays[0], &pays[1]} + vNames = []string{"a", "b", "c", "d", "e", "f", "g"} + varNames = append(vNames, names...) + varTypes = []string{"bool", "address", "bytes", "string", + "uint8", "int8", "uint8", "int8", "uint16", "int16", + "uint24", "int24", "uint32", "int32", "uint40", "int40", "uint48", "int48", "uint56", "int56", + "uint64", "int64", "uint72", "int72", "uint80", "int80", "uint88", "int88", "uint96", "int96", + "uint104", "int104", "uint112", "int112", "uint120", "int120", "uint128", "int128", "uint136", "int136", + "uint144", "int144", "uint152", "int152", "uint160", "int160", "uint168", "int168", "uint176", "int176", + "uint184", "int184", "uint192", "int192", "uint200", "int200", "uint208", "int208", "uint216", "int216", + "uint224", "int224", "uint232", "int232", "uint240", "int240", "uint248", "int248", "uint256", "int256", + "bytes1", "bytes2", "bytes3", "bytes4", "bytes5", "bytes6", "bytes7", "bytes8", "bytes9", "bytes10", "bytes11", + "bytes12", "bytes13", "bytes14", "bytes15", "bytes16", "bytes17", "bytes18", "bytes19", "bytes20", "bytes21", + "bytes22", "bytes23", "bytes24", "bytes25", "bytes26", "bytes27", "bytes28", "bytes29", "bytes30", "bytes31", + "bytes32", "bytes"} +) + +func unpackPack(abi abi.ABI, method string, input []byte) ([]interface{}, bool) { + if out, err := abi.Unpack(method, input); err == nil { + _, err := abi.Pack(method, out...) if err != nil { // We have some false positives as we can unpack these type successfully, but not pack them if err.Error() == "abi: cannot use []uint8 as type [0]int8 as argument" || err.Error() == "abi: cannot use uint8 as type int8 as argument" { - return false + return out, false } panic(err) } - if !bytes.Equal(input, output[4:]) { - panic(fmt.Sprintf("unpackPack is not equal, \ninput : %x\noutput: %x", input, output[4:])) - } - return true + return out, true } - return false + return nil, false } -func packUnpack(abi abi.ABI, method string, input []interface{}) bool { +func packUnpack(abi abi.ABI, method string, input *[]interface{}) bool { if packed, err := abi.Pack(method, input); err == nil { outptr := reflect.New(reflect.TypeOf(input)) err := abi.UnpackIntoInterface(outptr.Interface(), method, packed) @@ -100,64 +115,23 @@ func createABI(name string, stateMutability, payable *string, inputs []args) (ab return abi.JSON(strings.NewReader(sig)) } -func fillStruct(structs []interface{}, data []byte) { - if structs != nil && len(data) != 0 { - fuzz.NewFromGoFuzz(data).Fuzz(&structs) - } -} - -func createStructs(args []args) []interface{} { - structs := make([]interface{}, len(args)) - for i, arg := range args { - t, err := abi.NewType(arg.typ, "", nil) - if err != nil { - panic(err) - } - structs[i] = reflect.New(t.GetType()).Elem() - } - return structs -} - func runFuzzer(input []byte) int { good := false + fuzzer := fuzz.NewFromGoFuzz(input) - names := []string{"_name", "name", "NAME", "name_", "__", "_name_", "n"} - stateMut := []string{"", "pure", "view", "payable"} - stateMutabilites := []*string{nil, &stateMut[0], &stateMut[1], &stateMut[2], &stateMut[3]} - pays := []string{"true", "false"} - payables := []*string{nil, &pays[0], &pays[1]} - varNames := []string{"a", "b", "c", "d", "e", "f", "g"} - varNames = append(varNames, names...) - varTypes := []string{"bool", "address", "bytes", "string", - "uint8", "int8", "uint8", "int8", "uint16", "int16", - "uint24", "int24", "uint32", "int32", "uint40", "int40", "uint48", "int48", "uint56", "int56", - "uint64", "int64", "uint72", "int72", "uint80", "int80", "uint88", "int88", "uint96", "int96", - "uint104", "int104", "uint112", "int112", "uint120", "int120", "uint128", "int128", "uint136", "int136", - "uint144", "int144", "uint152", "int152", "uint160", "int160", "uint168", "int168", "uint176", "int176", - "uint184", "int184", "uint192", "int192", "uint200", "int200", "uint208", "int208", "uint216", "int216", - "uint224", "int224", "uint232", "int232", "uint240", "int240", "uint248", "int248", "uint256", "int256", - "bytes1", "bytes2", "bytes3", "bytes4", "bytes5", "bytes6", "bytes7", "bytes8", "bytes9", "bytes10", "bytes11", - "bytes12", "bytes13", "bytes14", "bytes15", "bytes16", "bytes17", "bytes18", "bytes19", "bytes20", "bytes21", - "bytes22", "bytes23", "bytes24", "bytes25", "bytes26", "bytes27", "bytes28", "bytes29", "bytes30", "bytes31", - "bytes32", "bytes"} - rnd := rand.New(rand.NewSource(123456)) - if len(input) > 0 { - kec := crypto.Keccak256(input) - rnd = rand.New(rand.NewSource(int64(kec[0]))) - } - name := names[rnd.Intn(len(names))] - stateM := stateMutabilites[rnd.Intn(len(stateMutabilites))] - payable := payables[rnd.Intn(len(payables))] + name := names[getUInt(fuzzer)%len(names)] + stateM := stateMutabilites[getUInt(fuzzer)%len(stateMutabilites)] + payable := payables[getUInt(fuzzer)%len(payables)] maxLen := 5 for k := 1; k < maxLen; k++ { var arg []args for i := k; i > 0; i-- { argName := varNames[i] - argTyp := varTypes[rnd.Int31n(int32(len(varTypes)))] - if rnd.Int31n(10) == 0 { + argTyp := varTypes[getUInt(fuzzer)%len(varTypes)] + if getUInt(fuzzer)%10 == 0 { argTyp += "[]" - } else if rnd.Int31n(10) == 0 { - arrayArgs := rnd.Int31n(30) + 1 + } else if getUInt(fuzzer)%10 == 0 { + arrayArgs := getUInt(fuzzer)%30 + 1 argTyp += fmt.Sprintf("[%d]", arrayArgs) } arg = append(arg, args{ @@ -169,10 +143,8 @@ func runFuzzer(input []byte) int { if err != nil { continue } - structs := createStructs(arg) - b := unpackPack(abi, name, structs, input) - fillStruct(structs, input) - c := packUnpack(abi, name, structs) + structs, b := unpackPack(abi, name, input) + c := packUnpack(abi, name, &structs) good = good || b || c } if good { @@ -184,3 +156,15 @@ func runFuzzer(input []byte) int { func Fuzz(input []byte) int { return runFuzzer(input) } + +func getUInt(fuzzer *fuzz.Fuzzer) int { + var i int + fuzzer.Fuzz(&i) + if i < 0 { + i = -i + if i < 0 { + return 0 + } + } + return i +} diff --git a/tests/fuzzers/abi/abifuzzer_test.go b/tests/fuzzers/abi/abifuzzer_test.go index c72577e9ee..423a3cd232 100644 --- a/tests/fuzzers/abi/abifuzzer_test.go +++ b/tests/fuzzers/abi/abifuzzer_test.go @@ -23,14 +23,7 @@ import ( // TestReplicate can be used to replicate crashers from the fuzzing tests. // Just replace testString with the data in .quoted func TestReplicate(t *testing.T) { - testString := "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00 \x00\x00\x00\x00\x00\x00\x00\x00" + - "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" + - "\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00000000000" + - "00000000000000000000" + - "00000000000000000000" + - "00000001" - + testString := "\x20\x20\x20\x20\x20\x20\x20\x20\x80\x00\x00\x00\x20\x20\x20\x20\x00" data := []byte(testString) runFuzzer(data) } diff --git a/common/bitutil/compress_fuzz.go b/tests/fuzzers/bitutil/compress_fuzz.go similarity index 68% rename from common/bitutil/compress_fuzz.go rename to tests/fuzzers/bitutil/compress_fuzz.go index 1b87f50edc..5903cf2f93 100644 --- a/common/bitutil/compress_fuzz.go +++ b/tests/fuzzers/bitutil/compress_fuzz.go @@ -14,17 +14,19 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// +build gofuzz - package bitutil -import "bytes" +import ( + "bytes" + + "github.com/ethereum/go-ethereum/common/bitutil" +) // Fuzz implements a go-fuzz fuzzer method to test various encoding method // invocations. func Fuzz(data []byte) int { if len(data) == 0 { - return -1 + return 0 } if data[0]%2 == 0 { return fuzzEncode(data[1:]) @@ -35,22 +37,34 @@ func Fuzz(data []byte) int { // fuzzEncode implements a go-fuzz fuzzer method to test the bitset encoding and // decoding algorithm. func fuzzEncode(data []byte) int { - proc, _ := bitsetDecodeBytes(bitsetEncodeBytes(data), len(data)) + proc, _ := bitutil.DecompressBytes(bitutil.CompressBytes(data), len(data)) if !bytes.Equal(data, proc) { panic("content mismatch") } - return 0 + return 1 } // fuzzDecode implements a go-fuzz fuzzer method to test the bit decoding and // reencoding algorithm. func fuzzDecode(data []byte) int { - blob, err := bitsetDecodeBytes(data, 1024) + blob, err := bitutil.DecompressBytes(data, 1024) if err != nil { return 0 } - if comp := bitsetEncodeBytes(blob); !bytes.Equal(comp, data) { + // re-compress it (it's OK if the re-compressed differs from the + // original - the first input may not have been compressed at all) + comp := bitutil.CompressBytes(blob) + if len(comp) > len(blob) { + // After compression, it must be smaller or equal + panic("bad compression") + } + // But decompressing it once again should work + decomp, err := bitutil.DecompressBytes(data, 1024) + if err != nil { + panic(err) + } + if !bytes.Equal(decomp, blob) { panic("content mismatch") } - return 0 + return 1 } diff --git a/tests/fuzzers/bls12381/bls_fuzzer.go b/tests/fuzzers/bls12381/bls_fuzzer.go new file mode 100644 index 0000000000..bc3c456526 --- /dev/null +++ b/tests/fuzzers/bls12381/bls_fuzzer.go @@ -0,0 +1,101 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package bls + +import ( + "bytes" + "fmt" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/vm" +) + +const ( + blsG1Add = byte(10) + blsG1Mul = byte(11) + blsG1MultiExp = byte(12) + blsG2Add = byte(13) + blsG2Mul = byte(14) + blsG2MultiExp = byte(15) + blsPairing = byte(16) + blsMapG1 = byte(17) + blsMapG2 = byte(18) +) + +func FuzzG1Add(data []byte) int { return fuzz(blsG1Add, data) } +func FuzzG1Mul(data []byte) int { return fuzz(blsG1Mul, data) } +func FuzzG1MultiExp(data []byte) int { return fuzz(blsG1MultiExp, data) } +func FuzzG2Add(data []byte) int { return fuzz(blsG2Add, data) } +func FuzzG2Mul(data []byte) int { return fuzz(blsG2Mul, data) } +func FuzzG2MultiExp(data []byte) int { return fuzz(blsG2MultiExp, data) } +func FuzzPairing(data []byte) int { return fuzz(blsPairing, data) } +func FuzzMapG1(data []byte) int { return fuzz(blsMapG1, data) } +func FuzzMapG2(data []byte) int { return fuzz(blsMapG2, data) } + +func checkInput(id byte, inputLen int) bool { + switch id { + case blsG1Add: + return inputLen == 256 + case blsG1Mul: + return inputLen == 160 + case blsG1MultiExp: + return inputLen%160 == 0 + case blsG2Add: + return inputLen == 512 + case blsG2Mul: + return inputLen == 288 + case blsG2MultiExp: + return inputLen%288 == 0 + case blsPairing: + return inputLen%384 == 0 + case blsMapG1: + return inputLen == 64 + case blsMapG2: + return inputLen == 128 + } + panic("programmer error") +} + +// The fuzzer functions must return +// 1 if the fuzzer should increase priority of the +// given input during subsequent fuzzing (for example, the input is lexically +// correct and was parsed successfully); +// -1 if the input must not be added to corpus even if gives new coverage; and +// 0 otherwise +// other values are reserved for future use. +func fuzz(id byte, data []byte) int { + // Even on bad input, it should not crash, so we still test the gas calc + precompile := vm.PrecompiledContractsBLS[common.BytesToAddress([]byte{id})] + gas := precompile.RequiredGas(data) + if !checkInput(id, len(data)) { + return 0 + } + // If the gas cost is too large (25M), bail out + if gas > 25*1000*1000 { + return 0 + } + cpy := make([]byte, len(data)) + copy(cpy, data) + _, err := precompile.Run(cpy) + if !bytes.Equal(cpy, data) { + panic(fmt.Sprintf("input data modified, precompile %d: %x %x", id, data, cpy)) + } + if err != nil { + return 0 + } + return 1 +} diff --git a/tests/fuzzers/bls12381/testdata/fuzz_g1_add_seed_corpus.zip b/tests/fuzzers/bls12381/testdata/fuzz_g1_add_seed_corpus.zip new file mode 100644 index 0000000000..16498c1cba Binary files /dev/null and b/tests/fuzzers/bls12381/testdata/fuzz_g1_add_seed_corpus.zip differ diff --git a/tests/fuzzers/bls12381/testdata/fuzz_g1_mul_seed_corpus.zip b/tests/fuzzers/bls12381/testdata/fuzz_g1_mul_seed_corpus.zip new file mode 100644 index 0000000000..57f9d6696d Binary files /dev/null and b/tests/fuzzers/bls12381/testdata/fuzz_g1_mul_seed_corpus.zip differ diff --git a/tests/fuzzers/bls12381/testdata/fuzz_g1_multiexp_seed_corpus.zip b/tests/fuzzers/bls12381/testdata/fuzz_g1_multiexp_seed_corpus.zip new file mode 100644 index 0000000000..7271f040f3 Binary files /dev/null and b/tests/fuzzers/bls12381/testdata/fuzz_g1_multiexp_seed_corpus.zip differ diff --git a/tests/fuzzers/bls12381/testdata/fuzz_g2_add_seed_corpus.zip b/tests/fuzzers/bls12381/testdata/fuzz_g2_add_seed_corpus.zip new file mode 100644 index 0000000000..cd5206ca0b Binary files /dev/null and b/tests/fuzzers/bls12381/testdata/fuzz_g2_add_seed_corpus.zip differ diff --git a/tests/fuzzers/bls12381/testdata/fuzz_g2_mul_seed_corpus.zip b/tests/fuzzers/bls12381/testdata/fuzz_g2_mul_seed_corpus.zip new file mode 100644 index 0000000000..f784a5a3d7 Binary files /dev/null and b/tests/fuzzers/bls12381/testdata/fuzz_g2_mul_seed_corpus.zip differ diff --git a/tests/fuzzers/bls12381/testdata/fuzz_g2_multiexp_seed_corpus.zip b/tests/fuzzers/bls12381/testdata/fuzz_g2_multiexp_seed_corpus.zip new file mode 100644 index 0000000000..c205117a46 Binary files /dev/null and b/tests/fuzzers/bls12381/testdata/fuzz_g2_multiexp_seed_corpus.zip differ diff --git a/tests/fuzzers/bls12381/testdata/fuzz_map_g1_seed_corpus.zip b/tests/fuzzers/bls12381/testdata/fuzz_map_g1_seed_corpus.zip new file mode 100644 index 0000000000..70382fbe53 Binary files /dev/null and b/tests/fuzzers/bls12381/testdata/fuzz_map_g1_seed_corpus.zip differ diff --git a/tests/fuzzers/bls12381/testdata/fuzz_map_g2_seed_corpus.zip b/tests/fuzzers/bls12381/testdata/fuzz_map_g2_seed_corpus.zip new file mode 100644 index 0000000000..67adc5b5e8 Binary files /dev/null and b/tests/fuzzers/bls12381/testdata/fuzz_map_g2_seed_corpus.zip differ diff --git a/tests/fuzzers/bls12381/testdata/fuzz_pairing_seed_corpus.zip b/tests/fuzzers/bls12381/testdata/fuzz_pairing_seed_corpus.zip new file mode 100644 index 0000000000..e24d2b0a52 Binary files /dev/null and b/tests/fuzzers/bls12381/testdata/fuzz_pairing_seed_corpus.zip differ diff --git a/tests/fuzzers/bn256/bn256_fuzz.go b/tests/fuzzers/bn256/bn256_fuzz.go new file mode 100644 index 0000000000..c98fbc33ae --- /dev/null +++ b/tests/fuzzers/bn256/bn256_fuzz.go @@ -0,0 +1,154 @@ +// Copyright 2018 Péter Szilágyi. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +// +build gofuzz + +package bn256 + +import ( + "bytes" + "fmt" + "io" + "math/big" + + gurvy "github.com/consensys/gurvy/bn256" + cloudflare "github.com/ethereum/go-ethereum/crypto/bn256/cloudflare" + google "github.com/ethereum/go-ethereum/crypto/bn256/google" +) + +func getG1Points(input io.Reader) (*cloudflare.G1, *google.G1, *gurvy.G1Affine) { + _, xc, err := cloudflare.RandomG1(input) + if err != nil { + // insufficient input + return nil, nil, nil + } + xg := new(google.G1) + if _, err := xg.Unmarshal(xc.Marshal()); err != nil { + panic(fmt.Sprintf("Could not marshal cloudflare -> google:", err)) + } + xs := new(gurvy.G1Affine) + if err := xs.Unmarshal(xc.Marshal()); err != nil { + panic(fmt.Sprintf("Could not marshal cloudflare -> consensys:", err)) + } + return xc, xg, xs +} + +func getG2Points(input io.Reader) (*cloudflare.G2, *google.G2, *gurvy.G2Affine) { + _, xc, err := cloudflare.RandomG2(input) + if err != nil { + // insufficient input + return nil, nil, nil + } + xg := new(google.G2) + if _, err := xg.Unmarshal(xc.Marshal()); err != nil { + panic(fmt.Sprintf("Could not marshal cloudflare -> google:", err)) + } + xs := new(gurvy.G2Affine) + if err := xs.Unmarshal(xc.Marshal()); err != nil { + panic(fmt.Sprintf("Could not marshal cloudflare -> consensys:", err)) + } + return xc, xg, xs +} + +// FuzzAdd fuzzez bn256 addition between the Google and Cloudflare libraries. +func FuzzAdd(data []byte) int { + input := bytes.NewReader(data) + xc, xg, xs := getG1Points(input) + if xc == nil { + return 0 + } + yc, yg, ys := getG1Points(input) + if yc == nil { + return 0 + } + // Ensure both libs can parse the second curve point + // Add the two points and ensure they result in the same output + rc := new(cloudflare.G1) + rc.Add(xc, yc) + + rg := new(google.G1) + rg.Add(xg, yg) + + tmpX := new(gurvy.G1Jac).FromAffine(xs) + tmpY := new(gurvy.G1Jac).FromAffine(ys) + rs := new(gurvy.G1Affine).FromJacobian(tmpX.AddAssign(tmpY)) + + if !bytes.Equal(rc.Marshal(), rg.Marshal()) { + panic("add mismatch: cloudflare/google") + } + + if !bytes.Equal(rc.Marshal(), rs.Marshal()) { + panic("add mismatch: cloudflare/consensys") + } + return 1 +} + +// FuzzMul fuzzez bn256 scalar multiplication between the Google and Cloudflare +// libraries. +func FuzzMul(data []byte) int { + input := bytes.NewReader(data) + pc, pg, ps := getG1Points(input) + if pc == nil { + return 0 + } + // Add the two points and ensure they result in the same output + remaining := input.Len() + if remaining == 0 { + return 0 + } + if remaining > 128 { + // The evm only ever uses 32 byte integers, we need to cap this otherwise + // we run into slow exec. A 236Kb byte integer cause oss-fuzz to report it as slow. + // 128 bytes should be fine though + return 0 + } + buf := make([]byte, remaining) + input.Read(buf) + + rc := new(cloudflare.G1) + rc.ScalarMult(pc, new(big.Int).SetBytes(buf)) + + rg := new(google.G1) + rg.ScalarMult(pg, new(big.Int).SetBytes(buf)) + + rs := new(gurvy.G1Jac) + psJac := new(gurvy.G1Jac).FromAffine(ps) + rs.ScalarMultiplication(psJac, new(big.Int).SetBytes(buf)) + rsAffine := new(gurvy.G1Affine).FromJacobian(rs) + + if !bytes.Equal(rc.Marshal(), rg.Marshal()) { + panic("scalar mul mismatch: cloudflare/google") + } + if !bytes.Equal(rc.Marshal(), rsAffine.Marshal()) { + panic("scalar mul mismatch: cloudflare/consensys") + } + return 1 +} + +func FuzzPair(data []byte) int { + input := bytes.NewReader(data) + pc, pg, ps := getG1Points(input) + if pc == nil { + return 0 + } + tc, tg, ts := getG2Points(input) + if tc == nil { + return 0 + } + // Pair the two points and ensure they result in the same output + clPair := cloudflare.PairingCheck([]*cloudflare.G1{pc}, []*cloudflare.G2{tc}) + if clPair != google.PairingCheck([]*google.G1{pg}, []*google.G2{tg}) { + panic("pairing mismatch: cloudflare/google") + } + + coPair, err := gurvy.PairingCheck([]gurvy.G1Affine{*ps}, []gurvy.G2Affine{*ts}) + if err != nil { + panic(fmt.Sprintf("gurvy encountered error: %v", err)) + } + if clPair != coPair { + panic("pairing mismatch: cloudflare/consensys") + } + + return 1 +} diff --git a/tests/fuzzers/difficulty/debug/main.go b/tests/fuzzers/difficulty/debug/main.go new file mode 100644 index 0000000000..23516b3a0d --- /dev/null +++ b/tests/fuzzers/difficulty/debug/main.go @@ -0,0 +1,23 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/ethereum/go-ethereum/tests/fuzzers/difficulty" +) + +func main() { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, "Usage: debug ") + os.Exit(1) + } + crasher := os.Args[1] + data, err := ioutil.ReadFile(crasher) + if err != nil { + fmt.Fprintf(os.Stderr, "error loading crasher %v: %v", crasher, err) + os.Exit(1) + } + difficulty.Fuzz(data) +} diff --git a/tests/fuzzers/difficulty/difficulty-fuzz.go b/tests/fuzzers/difficulty/difficulty-fuzz.go new file mode 100644 index 0000000000..e4c5dcf57c --- /dev/null +++ b/tests/fuzzers/difficulty/difficulty-fuzz.go @@ -0,0 +1,145 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package difficulty + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "math/big" + + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core/types" +) + +type fuzzer struct { + input io.Reader + exhausted bool + debugging bool +} + +func (f *fuzzer) read(size int) []byte { + out := make([]byte, size) + if _, err := f.input.Read(out); err != nil { + f.exhausted = true + } + return out +} + +func (f *fuzzer) readSlice(min, max int) []byte { + var a uint16 + binary.Read(f.input, binary.LittleEndian, &a) + size := min + int(a)%(max-min) + out := make([]byte, size) + if _, err := f.input.Read(out); err != nil { + f.exhausted = true + } + return out +} + +func (f *fuzzer) readUint64(min, max uint64) uint64 { + if min == max { + return min + } + var a uint64 + if err := binary.Read(f.input, binary.LittleEndian, &a); err != nil { + f.exhausted = true + } + a = min + a%(max-min) + return a +} +func (f *fuzzer) readBool() bool { + return f.read(1)[0]&0x1 == 0 +} + +// The function must return +// 1 if the fuzzer should increase priority of the +// given input during subsequent fuzzing (for example, the input is lexically +// correct and was parsed successfully); +// -1 if the input must not be added to corpus even if gives new coverage; and +// 0 otherwise +// other values are reserved for future use. +func Fuzz(data []byte) int { + f := fuzzer{ + input: bytes.NewReader(data), + exhausted: false, + } + return f.fuzz() +} + +var minDifficulty = big.NewInt(0x2000) + +type calculator func(time uint64, parent *types.Header) *big.Int + +func (f *fuzzer) fuzz() int { + // A parent header + header := &types.Header{} + if f.readBool() { + header.UncleHash = types.EmptyUncleHash + } + // Difficulty can range between 0x2000 (2 bytes) and up to 32 bytes + { + diff := new(big.Int).SetBytes(f.readSlice(2, 32)) + if diff.Cmp(minDifficulty) < 0 { + diff.Set(minDifficulty) + } + header.Difficulty = diff + } + // Number can range between 0 and up to 32 bytes (but not so that the child exceeds it) + { + // However, if we use astronomic numbers, then the bomb exp karatsuba calculation + // in the legacy methods) + // times out, so we limit it to fit within reasonable bounds + number := new(big.Int).SetBytes(f.readSlice(0, 4)) // 4 bytes: 32 bits: block num max 4 billion + header.Number = number + } + // Both parent and child time must fit within uint64 + var time uint64 + { + childTime := f.readUint64(1, 0xFFFFFFFFFFFFFFFF) + //fmt.Printf("childTime: %x\n",childTime) + delta := f.readUint64(1, childTime) + //fmt.Printf("delta: %v\n", delta) + pTime := childTime - delta + header.Time = pTime + time = childTime + } + // Bomb delay will never exceed uint64 + bombDelay := new(big.Int).SetUint64(f.readUint64(1, 0xFFFFFFFFFFFFFFFe)) + + if f.exhausted { + return 0 + } + + for i, pair := range []struct { + bigFn calculator + u256Fn calculator + }{ + {ethash.FrontierDifficultyCalulator, ethash.CalcDifficultyFrontierU256}, + {ethash.HomesteadDifficultyCalulator, ethash.CalcDifficultyHomesteadU256}, + {ethash.DynamicDifficultyCalculator(bombDelay), ethash.MakeDifficultyCalculatorU256(bombDelay)}, + } { + want := pair.bigFn(time, header) + have := pair.u256Fn(time, header) + if want.Cmp(have) != 0 { + panic(fmt.Sprintf("pair %d: want %x have %x\nparent.Number: %x\np.Time: %x\nc.Time: %x\nBombdelay: %v\n", i, want, have, + header.Number, header.Time, time, bombDelay)) + } + } + return 1 +} diff --git a/tests/fuzzers/keystore/keystore-fuzzer.go b/tests/fuzzers/keystore/keystore-fuzzer.go index 704f29dc48..e3bcae92e1 100644 --- a/tests/fuzzers/keystore/keystore-fuzzer.go +++ b/tests/fuzzers/keystore/keystore-fuzzer.go @@ -33,5 +33,5 @@ func Fuzz(input []byte) int { panic(err) } os.Remove(a.URL.Path) - return 0 + return 1 } diff --git a/p2p/discv5/sim_testmain_test.go b/tests/fuzzers/les/debug/main.go similarity index 54% rename from p2p/discv5/sim_testmain_test.go rename to tests/fuzzers/les/debug/main.go index 77e751c419..09e087d4c8 100644 --- a/p2p/discv5/sim_testmain_test.go +++ b/tests/fuzzers/les/debug/main.go @@ -1,4 +1,4 @@ -// Copyright 2016 The go-ethereum Authors +// Copyright 2020 The go-ethereum Authors // This file is part of the go-ethereum library. // // The go-ethereum library is free software: you can redistribute it and/or modify @@ -14,30 +14,28 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// +build go1.4,nacl,faketime_simulation - -package discv5 +package main import ( + "fmt" + "io/ioutil" "os" - "runtime" - "testing" - "unsafe" -) - -// Enable fake time mode in the runtime, like on the go playground. -// There is a slight chance that this won't work because some go code -// might have executed before the variable is set. -//go:linkname faketime runtime.faketime -var faketime = 1 - -func TestMain(m *testing.M) { - // We need to use unsafe somehow in order to get access to go:linkname. - _ = unsafe.Sizeof(0) + "github.com/ethereum/go-ethereum/tests/fuzzers/les" +) - // Run the actual test. runWithPlaygroundTime ensures that the only test - // that runs is the one calling it. - runtime.GOMAXPROCS(8) - os.Exit(m.Run()) +func main() { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, "Usage: debug \n") + fmt.Fprintf(os.Stderr, "Example\n") + fmt.Fprintf(os.Stderr, " $ debug ../crashers/4bbef6857c733a87ecf6fd8b9e7238f65eb9862a\n") + os.Exit(1) + } + crasher := os.Args[1] + data, err := ioutil.ReadFile(crasher) + if err != nil { + fmt.Fprintf(os.Stderr, "error loading crasher %v: %v", crasher, err) + os.Exit(1) + } + les.Fuzz(data) } diff --git a/tests/fuzzers/les/les-fuzzer.go b/tests/fuzzers/les/les-fuzzer.go new file mode 100644 index 0000000000..9e896c2c1b --- /dev/null +++ b/tests/fuzzers/les/les-fuzzer.go @@ -0,0 +1,407 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package les + +import ( + "bytes" + "encoding/binary" + "io" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/ethash" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/core/vm" + "github.com/ethereum/go-ethereum/crypto" + l "github.com/ethereum/go-ethereum/les" + "github.com/ethereum/go-ethereum/params" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ethereum/go-ethereum/trie" +) + +var ( + bankKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + bankAddr = crypto.PubkeyToAddress(bankKey.PublicKey) + bankFunds = new(big.Int).Mul(big.NewInt(100), big.NewInt(params.Ether)) + + testChainLen = 256 + testContractCode = common.Hex2Bytes("606060405260cc8060106000396000f360606040526000357c01000000000000000000000000000000000000000000000000000000009004806360cd2685146041578063c16431b914606b57603f565b005b6055600480803590602001909190505060a9565b6040518082815260200191505060405180910390f35b60886004808035906020019091908035906020019091905050608a565b005b80600060005083606481101560025790900160005b50819055505b5050565b6000600060005082606481101560025790900160005b5054905060c7565b91905056") + + chain *core.BlockChain + addrHashes []common.Hash + txHashes []common.Hash + + chtTrie *trie.Trie + bloomTrie *trie.Trie + chtKeys [][]byte + bloomKeys [][]byte +) + +func makechain() (bc *core.BlockChain, addrHashes, txHashes []common.Hash) { + db := rawdb.NewMemoryDatabase() + gspec := core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{bankAddr: {Balance: bankFunds}}, + GasLimit: 100000000, + } + genesis := gspec.MustCommit(db) + signer := types.HomesteadSigner{} + blocks, _ := core.GenerateChain(gspec.Config, genesis, ethash.NewFaker(), db, testChainLen, + func(i int, gen *core.BlockGen) { + var ( + tx *types.Transaction + addr common.Address + ) + nonce := uint64(i) + if i%4 == 0 { + tx, _ = types.SignTx(types.NewContractCreation(nonce, big.NewInt(0), 200000, big.NewInt(0), testContractCode), signer, bankKey) + addr = crypto.CreateAddress(bankAddr, nonce) + } else { + addr = common.BigToAddress(big.NewInt(int64(i))) + tx, _ = types.SignTx(types.NewTransaction(nonce, addr, big.NewInt(10000), params.TxGas, big.NewInt(params.GWei), nil), signer, bankKey) + } + gen.AddTx(tx) + addrHashes = append(addrHashes, crypto.Keccak256Hash(addr[:])) + txHashes = append(txHashes, tx.Hash()) + }) + bc, _ = core.NewBlockChain(db, nil, gspec.Config, ethash.NewFaker(), vm.Config{}, nil, nil) + if _, err := bc.InsertChain(blocks); err != nil { + panic(err) + } + return +} + +func makeTries() (chtTrie *trie.Trie, bloomTrie *trie.Trie, chtKeys, bloomKeys [][]byte) { + chtTrie, _ = trie.New(common.Hash{}, trie.NewDatabase(rawdb.NewMemoryDatabase())) + bloomTrie, _ = trie.New(common.Hash{}, trie.NewDatabase(rawdb.NewMemoryDatabase())) + for i := 0; i < testChainLen; i++ { + // The element in CHT is -> + key := make([]byte, 8) + binary.BigEndian.PutUint64(key, uint64(i+1)) + chtTrie.Update(key, []byte{0x1, 0xf}) + chtKeys = append(chtKeys, key) + + // The element in Bloom trie is <2 byte bit index> + -> bloom + key2 := make([]byte, 10) + binary.BigEndian.PutUint64(key2[2:], uint64(i+1)) + bloomTrie.Update(key2, []byte{0x2, 0xe}) + bloomKeys = append(bloomKeys, key2) + } + return +} + +func init() { + chain, addrHashes, txHashes = makechain() + chtTrie, bloomTrie, chtKeys, bloomKeys = makeTries() +} + +type fuzzer struct { + chain *core.BlockChain + pool *core.TxPool + + chainLen int + addr, txs []common.Hash + nonce uint64 + + chtKeys [][]byte + bloomKeys [][]byte + chtTrie *trie.Trie + bloomTrie *trie.Trie + + input io.Reader + exhausted bool +} + +func newFuzzer(input []byte) *fuzzer { + return &fuzzer{ + chain: chain, + chainLen: testChainLen, + addr: addrHashes, + txs: txHashes, + chtTrie: chtTrie, + bloomTrie: bloomTrie, + chtKeys: chtKeys, + bloomKeys: bloomKeys, + nonce: uint64(len(txHashes)), + pool: core.NewTxPool(core.DefaultTxPoolConfig, params.TestChainConfig, chain), + input: bytes.NewReader(input), + } +} + +func (f *fuzzer) read(size int) []byte { + out := make([]byte, size) + if _, err := f.input.Read(out); err != nil { + f.exhausted = true + } + return out +} + +func (f *fuzzer) randomByte() byte { + d := f.read(1) + return d[0] +} + +func (f *fuzzer) randomBool() bool { + d := f.read(1) + return d[0]&1 == 1 +} + +func (f *fuzzer) randomInt(max int) int { + if max == 0 { + return 0 + } + if max <= 256 { + return int(f.randomByte()) % max + } + var a uint16 + if err := binary.Read(f.input, binary.LittleEndian, &a); err != nil { + f.exhausted = true + } + return int(a % uint16(max)) +} + +func (f *fuzzer) randomX(max int) uint64 { + var a uint16 + if err := binary.Read(f.input, binary.LittleEndian, &a); err != nil { + f.exhausted = true + } + if a < 0x8000 { + return uint64(a%uint16(max+1)) - 1 + } + return (uint64(1)<<(a%64+1) - 1) & (uint64(a) * 343897772345826595) +} + +func (f *fuzzer) randomBlockHash() common.Hash { + h := f.chain.GetCanonicalHash(uint64(f.randomInt(3 * f.chainLen))) + if h != (common.Hash{}) { + return h + } + return common.BytesToHash(f.read(common.HashLength)) +} + +func (f *fuzzer) randomAddrHash() []byte { + i := f.randomInt(3 * len(f.addr)) + if i < len(f.addr) { + return f.addr[i].Bytes() + } + return f.read(common.HashLength) +} + +func (f *fuzzer) randomCHTTrieKey() []byte { + i := f.randomInt(3 * len(f.chtKeys)) + if i < len(f.chtKeys) { + return f.chtKeys[i] + } + return f.read(8) +} + +func (f *fuzzer) randomBloomTrieKey() []byte { + i := f.randomInt(3 * len(f.bloomKeys)) + if i < len(f.bloomKeys) { + return f.bloomKeys[i] + } + return f.read(10) +} + +func (f *fuzzer) randomTxHash() common.Hash { + i := f.randomInt(3 * len(f.txs)) + if i < len(f.txs) { + return f.txs[i] + } + return common.BytesToHash(f.read(common.HashLength)) +} + +func (f *fuzzer) BlockChain() *core.BlockChain { + return f.chain +} + +func (f *fuzzer) TxPool() *core.TxPool { + return f.pool +} + +func (f *fuzzer) ArchiveMode() bool { + return false +} + +func (f *fuzzer) AddTxsSync() bool { + return false +} + +func (f *fuzzer) GetHelperTrie(typ uint, index uint64) *trie.Trie { + if typ == 0 { + return f.chtTrie + } else if typ == 1 { + return f.bloomTrie + } + return nil +} + +type dummyMsg struct { + data []byte +} + +func (d dummyMsg) Decode(val interface{}) error { + return rlp.DecodeBytes(d.data, val) +} + +func (f *fuzzer) doFuzz(msgCode uint64, packet interface{}) { + version := f.randomInt(3) + 2 // [LES2, LES3, LES4] + peer := l.NewFuzzerPeer(version) + enc, err := rlp.EncodeToBytes(packet) + if err != nil { + panic(err) + } + fn, _, _, err := l.Les3[msgCode].Handle(dummyMsg{enc}) + if err != nil { + panic(err) + } + fn(f, peer, func() bool { return true }) + +} + +func Fuzz(input []byte) int { + // We expect some large inputs + if len(input) < 100 { + return -1 + } + f := newFuzzer(input) + if f.exhausted { + return -1 + } + for !f.exhausted { + switch f.randomInt(8) { + case 0: + req := &l.GetBlockHeadersPacket{ + Query: l.GetBlockHeadersData{ + Amount: f.randomX(l.MaxHeaderFetch + 1), + Skip: f.randomX(10), + Reverse: f.randomBool(), + }, + } + if f.randomBool() { + req.Query.Origin.Hash = f.randomBlockHash() + } else { + req.Query.Origin.Number = uint64(f.randomInt(f.chainLen * 2)) + } + f.doFuzz(l.GetBlockHeadersMsg, req) + + case 1: + req := &l.GetBlockBodiesPacket{Hashes: make([]common.Hash, f.randomInt(l.MaxBodyFetch+1))} + for i := range req.Hashes { + req.Hashes[i] = f.randomBlockHash() + } + f.doFuzz(l.GetBlockBodiesMsg, req) + + case 2: + req := &l.GetCodePacket{Reqs: make([]l.CodeReq, f.randomInt(l.MaxCodeFetch+1))} + for i := range req.Reqs { + req.Reqs[i] = l.CodeReq{ + BHash: f.randomBlockHash(), + AccKey: f.randomAddrHash(), + } + } + f.doFuzz(l.GetCodeMsg, req) + + case 3: + req := &l.GetReceiptsPacket{Hashes: make([]common.Hash, f.randomInt(l.MaxReceiptFetch+1))} + for i := range req.Hashes { + req.Hashes[i] = f.randomBlockHash() + } + f.doFuzz(l.GetReceiptsMsg, req) + + case 4: + req := &l.GetProofsPacket{Reqs: make([]l.ProofReq, f.randomInt(l.MaxProofsFetch+1))} + for i := range req.Reqs { + if f.randomBool() { + req.Reqs[i] = l.ProofReq{ + BHash: f.randomBlockHash(), + AccKey: f.randomAddrHash(), + Key: f.randomAddrHash(), + FromLevel: uint(f.randomX(3)), + } + } else { + req.Reqs[i] = l.ProofReq{ + BHash: f.randomBlockHash(), + Key: f.randomAddrHash(), + FromLevel: uint(f.randomX(3)), + } + } + } + f.doFuzz(l.GetProofsV2Msg, req) + + case 5: + req := &l.GetHelperTrieProofsPacket{Reqs: make([]l.HelperTrieReq, f.randomInt(l.MaxHelperTrieProofsFetch+1))} + for i := range req.Reqs { + switch f.randomInt(3) { + case 0: + // Canonical hash trie + req.Reqs[i] = l.HelperTrieReq{ + Type: 0, + TrieIdx: f.randomX(3), + Key: f.randomCHTTrieKey(), + FromLevel: uint(f.randomX(3)), + AuxReq: uint(2), + } + case 1: + // Bloom trie + req.Reqs[i] = l.HelperTrieReq{ + Type: 1, + TrieIdx: f.randomX(3), + Key: f.randomBloomTrieKey(), + FromLevel: uint(f.randomX(3)), + AuxReq: 0, + } + default: + // Random trie + req.Reqs[i] = l.HelperTrieReq{ + Type: 2, + TrieIdx: f.randomX(3), + Key: f.randomCHTTrieKey(), + FromLevel: uint(f.randomX(3)), + AuxReq: 0, + } + } + } + f.doFuzz(l.GetHelperTrieProofsMsg, req) + + case 6: + req := &l.SendTxPacket{Txs: make([]*types.Transaction, f.randomInt(l.MaxTxSend+1))} + signer := types.HomesteadSigner{} + for i := range req.Txs { + var nonce uint64 + if f.randomBool() { + nonce = uint64(f.randomByte()) + } else { + nonce = f.nonce + f.nonce += 1 + } + req.Txs[i], _ = types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(10000), params.TxGas, big.NewInt(1000000000*int64(f.randomByte())), nil), signer, bankKey) + } + f.doFuzz(l.SendTxV2Msg, req) + + case 7: + req := &l.GetTxStatusPacket{Hashes: make([]common.Hash, f.randomInt(l.MaxTxStatus+1))} + for i := range req.Hashes { + req.Hashes[i] = f.randomTxHash() + } + f.doFuzz(l.GetTxStatusMsg, req) + } + } + return 0 +} diff --git a/tests/fuzzers/rangeproof/corpus/1c14030f26872e57bf1481084f151d71eed8161c-1 b/tests/fuzzers/rangeproof/corpus/1c14030f26872e57bf1481084f151d71eed8161c-1 new file mode 100644 index 0000000000..31c08bafaf Binary files /dev/null and b/tests/fuzzers/rangeproof/corpus/1c14030f26872e57bf1481084f151d71eed8161c-1 differ diff --git a/tests/fuzzers/rangeproof/corpus/27e54254422543060a13ea8a4bc913d768e4adb6-2 b/tests/fuzzers/rangeproof/corpus/27e54254422543060a13ea8a4bc913d768e4adb6-2 new file mode 100644 index 0000000000..7bce13ef80 Binary files /dev/null and b/tests/fuzzers/rangeproof/corpus/27e54254422543060a13ea8a4bc913d768e4adb6-2 differ diff --git a/tests/fuzzers/rangeproof/corpus/6bfc2cbe2d7a43361e240118439785445a0fdfb7-5 b/tests/fuzzers/rangeproof/corpus/6bfc2cbe2d7a43361e240118439785445a0fdfb7-5 new file mode 100644 index 0000000000..613e76a020 Binary files /dev/null and b/tests/fuzzers/rangeproof/corpus/6bfc2cbe2d7a43361e240118439785445a0fdfb7-5 differ diff --git a/tests/fuzzers/rangeproof/corpus/a67e63bc0c0004bd009944a6061297cb7d4ac238-1 b/tests/fuzzers/rangeproof/corpus/a67e63bc0c0004bd009944a6061297cb7d4ac238-1 new file mode 100644 index 0000000000..805ad8df77 Binary files /dev/null and b/tests/fuzzers/rangeproof/corpus/a67e63bc0c0004bd009944a6061297cb7d4ac238-1 differ diff --git a/tests/fuzzers/rangeproof/corpus/ae892bbae0a843950bc8316496e595b1a194c009-4 b/tests/fuzzers/rangeproof/corpus/ae892bbae0a843950bc8316496e595b1a194c009-4 new file mode 100644 index 0000000000..605acf81c1 Binary files /dev/null and b/tests/fuzzers/rangeproof/corpus/ae892bbae0a843950bc8316496e595b1a194c009-4 differ diff --git a/tests/fuzzers/rangeproof/corpus/ee05d0d813f6261b3dba16506f9ea03d9c5e993d-2 b/tests/fuzzers/rangeproof/corpus/ee05d0d813f6261b3dba16506f9ea03d9c5e993d-2 new file mode 100644 index 0000000000..8f32dd775a Binary files /dev/null and b/tests/fuzzers/rangeproof/corpus/ee05d0d813f6261b3dba16506f9ea03d9c5e993d-2 differ diff --git a/tests/fuzzers/rangeproof/corpus/f50a6d57a46d30184aa294af5b252ab9701af7c9-2 b/tests/fuzzers/rangeproof/corpus/f50a6d57a46d30184aa294af5b252ab9701af7c9-2 new file mode 100644 index 0000000000..af96210f20 Binary files /dev/null and b/tests/fuzzers/rangeproof/corpus/f50a6d57a46d30184aa294af5b252ab9701af7c9-2 differ diff --git a/tests/fuzzers/rangeproof/corpus/random.dat b/tests/fuzzers/rangeproof/corpus/random.dat new file mode 100644 index 0000000000..2c998ad812 Binary files /dev/null and b/tests/fuzzers/rangeproof/corpus/random.dat differ diff --git a/tests/fuzzers/rangeproof/debug/main.go b/tests/fuzzers/rangeproof/debug/main.go new file mode 100644 index 0000000000..a81c69fea5 --- /dev/null +++ b/tests/fuzzers/rangeproof/debug/main.go @@ -0,0 +1,41 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/ethereum/go-ethereum/tests/fuzzers/rangeproof" +) + +func main() { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, "Usage: debug \n") + fmt.Fprintf(os.Stderr, "Example\n") + fmt.Fprintf(os.Stderr, " $ debug ../crashers/4bbef6857c733a87ecf6fd8b9e7238f65eb9862a\n") + os.Exit(1) + } + crasher := os.Args[1] + data, err := ioutil.ReadFile(crasher) + if err != nil { + fmt.Fprintf(os.Stderr, "error loading crasher %v: %v", crasher, err) + os.Exit(1) + } + rangeproof.Fuzz(data) +} diff --git a/tests/fuzzers/rangeproof/rangeproof-fuzzer.go b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go new file mode 100644 index 0000000000..b82a380723 --- /dev/null +++ b/tests/fuzzers/rangeproof/rangeproof-fuzzer.go @@ -0,0 +1,218 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rangeproof + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "sort" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/ethereum/go-ethereum/trie" +) + +type kv struct { + k, v []byte + t bool +} + +type entrySlice []*kv + +func (p entrySlice) Len() int { return len(p) } +func (p entrySlice) Less(i, j int) bool { return bytes.Compare(p[i].k, p[j].k) < 0 } +func (p entrySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type fuzzer struct { + input io.Reader + exhausted bool +} + +func (f *fuzzer) randBytes(n int) []byte { + r := make([]byte, n) + if _, err := f.input.Read(r); err != nil { + f.exhausted = true + } + return r +} + +func (f *fuzzer) readInt() uint64 { + var x uint64 + if err := binary.Read(f.input, binary.LittleEndian, &x); err != nil { + f.exhausted = true + } + return x +} + +func (f *fuzzer) randomTrie(n int) (*trie.Trie, map[string]*kv) { + + trie := new(trie.Trie) + vals := make(map[string]*kv) + size := f.readInt() + // Fill it with some fluff + for i := byte(0); i < byte(size); i++ { + value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} + value2 := &kv{common.LeftPadBytes([]byte{i + 10}, 32), []byte{i}, false} + trie.Update(value.k, value.v) + trie.Update(value2.k, value2.v) + vals[string(value.k)] = value + vals[string(value2.k)] = value2 + } + if f.exhausted { + return nil, nil + } + // And now fill with some random + for i := 0; i < n; i++ { + k := f.randBytes(32) + v := f.randBytes(20) + value := &kv{k, v, false} + trie.Update(k, v) + vals[string(k)] = value + if f.exhausted { + return nil, nil + } + } + return trie, vals +} + +func (f *fuzzer) fuzz() int { + maxSize := 200 + tr, vals := f.randomTrie(1 + int(f.readInt())%maxSize) + if f.exhausted { + return 0 // input too short + } + var entries entrySlice + for _, kv := range vals { + entries = append(entries, kv) + } + if len(entries) <= 1 { + return 0 + } + sort.Sort(entries) + + var ok = 0 + for { + start := int(f.readInt() % uint64(len(entries))) + end := 1 + int(f.readInt()%uint64(len(entries)-1)) + testcase := int(f.readInt() % uint64(6)) + index := int(f.readInt() & 0xFFFFFFFF) + index2 := int(f.readInt() & 0xFFFFFFFF) + if f.exhausted { + break + } + proof := memorydb.New() + if err := tr.Prove(entries[start].k, 0, proof); err != nil { + panic(fmt.Sprintf("Failed to prove the first node %v", err)) + } + if err := tr.Prove(entries[end-1].k, 0, proof); err != nil { + panic(fmt.Sprintf("Failed to prove the last node %v", err)) + } + var keys [][]byte + var vals [][]byte + for i := start; i < end; i++ { + keys = append(keys, entries[i].k) + vals = append(vals, entries[i].v) + } + if len(keys) == 0 { + return 0 + } + var first, last = keys[0], keys[len(keys)-1] + testcase %= 6 + switch testcase { + case 0: + // Modified key + keys[index%len(keys)] = f.randBytes(32) // In theory it can't be same + case 1: + // Modified val + vals[index%len(vals)] = f.randBytes(20) // In theory it can't be same + case 2: + // Gapped entry slice + index = index % len(keys) + keys = append(keys[:index], keys[index+1:]...) + vals = append(vals[:index], vals[index+1:]...) + case 3: + // Out of order + index1 := index % len(keys) + index2 := index2 % len(keys) + keys[index1], keys[index2] = keys[index2], keys[index1] + vals[index1], vals[index2] = vals[index2], vals[index1] + case 4: + // Set random key to nil, do nothing + keys[index%len(keys)] = nil + case 5: + // Set random value to nil, deletion + vals[index%len(vals)] = nil + + // Other cases: + // Modify something in the proof db + // add stuff to proof db + // drop stuff from proof db + + } + if f.exhausted { + break + } + ok = 1 + //nodes, subtrie + nodes, subtrie, notary, hasMore, err := trie.VerifyRangeProof(tr.Hash(), first, last, keys, vals, proof) + if err != nil { + if nodes != nil { + panic("err != nil && nodes != nil") + } + if subtrie != nil { + panic("err != nil && subtrie != nil") + } + if notary != nil { + panic("err != nil && notary != nil") + } + if hasMore { + panic("err != nil && hasMore == true") + } + } else { + if nodes == nil { + panic("err == nil && nodes == nil") + } + if subtrie == nil { + panic("err == nil && subtrie == nil") + } + if notary == nil { + panic("err == nil && subtrie == nil") + } + } + } + return ok +} + +// The function must return +// 1 if the fuzzer should increase priority of the +// given input during subsequent fuzzing (for example, the input is lexically +// correct and was parsed successfully); +// -1 if the input must not be added to corpus even if gives new coverage; and +// 0 otherwise; other values are reserved for future use. +func Fuzz(input []byte) int { + if len(input) < 100 { + return 0 + } + r := bytes.NewReader(input) + f := fuzzer{ + input: r, + exhausted: false, + } + return f.fuzz() +} diff --git a/tests/fuzzers/rlp/rlp_fuzzer.go b/tests/fuzzers/rlp/rlp_fuzzer.go index 534540476c..18b36287b5 100644 --- a/tests/fuzzers/rlp/rlp_fuzzer.go +++ b/tests/fuzzers/rlp/rlp_fuzzer.go @@ -37,17 +37,17 @@ func decodeEncode(input []byte, val interface{}, i int) { } func Fuzz(input []byte) int { + if len(input) == 0 { + return 0 + } + var i int { - if len(input) > 0 { - rlp.Split(input) - } + rlp.Split(input) } { - if len(input) > 0 { - if elems, _, err := rlp.SplitList(input); err == nil { - rlp.CountValues(elems) - } + if elems, _, err := rlp.SplitList(input); err == nil { + rlp.CountValues(elems) } } @@ -123,5 +123,5 @@ func Fuzz(input []byte) int { var rs types.Receipts decodeEncode(input, &rs, i) } - return 0 + return 1 } diff --git a/core/vm/runtime/fuzz.go b/tests/fuzzers/runtime/runtime_fuzz.go similarity index 82% rename from core/vm/runtime/fuzz.go rename to tests/fuzzers/runtime/runtime_fuzz.go index cb9ff08b5b..9b96045752 100644 --- a/core/vm/runtime/fuzz.go +++ b/tests/fuzzers/runtime/runtime_fuzz.go @@ -14,23 +14,23 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -// +build gofuzz - package runtime +import ( + "github.com/ethereum/go-ethereum/core/vm/runtime" +) + // Fuzz is the basic entry point for the go-fuzz tool // // This returns 1 for valid parsable/runable code, 0 // for invalid opcode. func Fuzz(input []byte) int { - _, _, err := Execute(input, input, &Config{ - GasLimit: 3000000, + _, _, err := runtime.Execute(input, input, &runtime.Config{ + GasLimit: 12000000, }) - // invalid opcode - if err != nil && len(err.Error()) > 6 && string(err.Error()[:7]) == "invalid" { + if err != nil && len(err.Error()) > 6 && err.Error()[:7] == "invalid" { return 0 } - return 1 } diff --git a/tests/fuzzers/stacktrie/trie_fuzzer.go b/tests/fuzzers/stacktrie/trie_fuzzer.go index a072ff772d..5cea7769c2 100644 --- a/tests/fuzzers/stacktrie/trie_fuzzer.go +++ b/tests/fuzzers/stacktrie/trie_fuzzer.go @@ -148,6 +148,8 @@ func (f *fuzzer) fuzz() int { vals kvs useful bool maxElements = 10000 + // operate on unique keys only + keys = make(map[string]struct{}) ) // Fill the trie with elements for i := 0; !f.exhausted && i < maxElements; i++ { @@ -158,6 +160,11 @@ func (f *fuzzer) fuzz() int { // thus 'deletion' which is not supported on stacktrie break } + if _, present := keys[string(k)]; present { + // This key is a duplicate, ignore it + continue + } + keys[string(k)] = struct{}{} vals = append(vals, kv{k: k, v: v}) trieA.Update(k, v) useful = true diff --git a/tests/fuzzers/txfetcher/txfetcher_fuzzer.go b/tests/fuzzers/txfetcher/txfetcher_fuzzer.go index 10c7eb9424..d1d6fdc665 100644 --- a/tests/fuzzers/txfetcher/txfetcher_fuzzer.go +++ b/tests/fuzzers/txfetcher/txfetcher_fuzzer.go @@ -51,8 +51,9 @@ func init() { func Fuzz(input []byte) int { // Don't generate insanely large test cases, not much value in them if len(input) > 16*1024 { - return -1 + return 0 } + verbose := false r := bytes.NewReader(input) // Reduce the problem space for certain fuzz runs. Small tx space is better @@ -124,7 +125,9 @@ func Fuzz(input []byte) int { announceIdxs[i] = (int(annBuf[0])*256 + int(annBuf[1])) % len(txs) announces[i] = txs[announceIdxs[i]].Hash() } - fmt.Println("Notify", peer, announceIdxs) + if verbose { + fmt.Println("Notify", peer, announceIdxs) + } if err := f.Notify(peer, announces); err != nil { panic(err) } @@ -163,8 +166,9 @@ func Fuzz(input []byte) int { return 0 } direct := (directFlag % 2) == 0 - - fmt.Println("Enqueue", peer, deliverIdxs, direct) + if verbose { + fmt.Println("Enqueue", peer, deliverIdxs, direct) + } if err := f.Enqueue(peer, deliveries, direct); err != nil { panic(err) } @@ -177,8 +181,9 @@ func Fuzz(input []byte) int { return 0 } peer := peers[int(peerIdx)%len(peers)] - - fmt.Println("Drop", peer) + if verbose { + fmt.Println("Drop", peer) + } if err := f.Drop(peer); err != nil { panic(err) } @@ -191,8 +196,9 @@ func Fuzz(input []byte) int { return 0 } tick := time.Duration(tickCnt) * 100 * time.Millisecond - - fmt.Println("Sleep", tick) + if verbose { + fmt.Println("Sleep", tick) + } clock.Run(tick) } } diff --git a/tests/gen_stenv.go b/tests/gen_stenv.go index 1d4baf2fd7..bfecc145b4 100644 --- a/tests/gen_stenv.go +++ b/tests/gen_stenv.go @@ -13,6 +13,7 @@ import ( var _ = (*stEnvMarshaling)(nil) +// MarshalJSON marshals as JSON. func (s stEnv) MarshalJSON() ([]byte, error) { type stEnv struct { Coinbase common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` @@ -30,6 +31,7 @@ func (s stEnv) MarshalJSON() ([]byte, error) { return json.Marshal(&enc) } +// UnmarshalJSON unmarshals from JSON. func (s *stEnv) UnmarshalJSON(input []byte) error { type stEnv struct { Coinbase *common.UnprefixedAddress `json:"currentCoinbase" gencodec:"required"` diff --git a/tests/gen_sttransaction.go b/tests/gen_sttransaction.go index 451ffcbf43..2670f4f9c8 100644 --- a/tests/gen_sttransaction.go +++ b/tests/gen_sttransaction.go @@ -8,25 +8,29 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/types" ) var _ = (*stTransactionMarshaling)(nil) +// MarshalJSON marshals as JSON. func (s stTransaction) MarshalJSON() ([]byte, error) { type stTransaction struct { - GasPrice *math.HexOrDecimal256 `json:"gasPrice"` - Nonce math.HexOrDecimal64 `json:"nonce"` - To string `json:"to"` - Data []string `json:"data"` - GasLimit []math.HexOrDecimal64 `json:"gasLimit"` - Value []string `json:"value"` - PrivateKey hexutil.Bytes `json:"secretKey"` + GasPrice *math.HexOrDecimal256 `json:"gasPrice"` + Nonce math.HexOrDecimal64 `json:"nonce"` + To string `json:"to"` + Data []string `json:"data"` + AccessLists []*types.AccessList `json:"accessLists,omitempty"` + GasLimit []math.HexOrDecimal64 `json:"gasLimit"` + Value []string `json:"value"` + PrivateKey hexutil.Bytes `json:"secretKey"` } var enc stTransaction enc.GasPrice = (*math.HexOrDecimal256)(s.GasPrice) enc.Nonce = math.HexOrDecimal64(s.Nonce) enc.To = s.To enc.Data = s.Data + enc.AccessLists = s.AccessLists if s.GasLimit != nil { enc.GasLimit = make([]math.HexOrDecimal64, len(s.GasLimit)) for k, v := range s.GasLimit { @@ -38,15 +42,17 @@ func (s stTransaction) MarshalJSON() ([]byte, error) { return json.Marshal(&enc) } +// UnmarshalJSON unmarshals from JSON. func (s *stTransaction) UnmarshalJSON(input []byte) error { type stTransaction struct { - GasPrice *math.HexOrDecimal256 `json:"gasPrice"` - Nonce *math.HexOrDecimal64 `json:"nonce"` - To *string `json:"to"` - Data []string `json:"data"` - GasLimit []math.HexOrDecimal64 `json:"gasLimit"` - Value []string `json:"value"` - PrivateKey *hexutil.Bytes `json:"secretKey"` + GasPrice *math.HexOrDecimal256 `json:"gasPrice"` + Nonce *math.HexOrDecimal64 `json:"nonce"` + To *string `json:"to"` + Data []string `json:"data"` + AccessLists []*types.AccessList `json:"accessLists,omitempty"` + GasLimit []math.HexOrDecimal64 `json:"gasLimit"` + Value []string `json:"value"` + PrivateKey *hexutil.Bytes `json:"secretKey"` } var dec stTransaction if err := json.Unmarshal(input, &dec); err != nil { @@ -64,6 +70,9 @@ func (s *stTransaction) UnmarshalJSON(input []byte) error { if dec.Data != nil { s.Data = dec.Data } + if dec.AccessLists != nil { + s.AccessLists = dec.AccessLists + } if dec.GasLimit != nil { s.GasLimit = make([]uint64, len(dec.GasLimit)) for k, v := range dec.GasLimit { diff --git a/tests/gen_vmexec.go b/tests/gen_vmexec.go index a5f01cf456..2fe155152d 100644 --- a/tests/gen_vmexec.go +++ b/tests/gen_vmexec.go @@ -14,6 +14,7 @@ import ( var _ = (*vmExecMarshaling)(nil) +// MarshalJSON marshals as JSON. func (v vmExec) MarshalJSON() ([]byte, error) { type vmExec struct { Address common.UnprefixedAddress `json:"address" gencodec:"required"` @@ -37,6 +38,7 @@ func (v vmExec) MarshalJSON() ([]byte, error) { return json.Marshal(&enc) } +// UnmarshalJSON unmarshals from JSON. func (v *vmExec) UnmarshalJSON(input []byte) error { type vmExec struct { Address *common.UnprefixedAddress `json:"address" gencodec:"required"` diff --git a/tests/init.go b/tests/init.go index 607c69ddb3..67f706eb50 100644 --- a/tests/init.go +++ b/tests/init.go @@ -141,7 +141,7 @@ var Forks = map[string]*params.ChainConfig{ PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(5), }, - "YOLOv2": { + "YOLOv3": { ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), EIP150Block: big.NewInt(0), @@ -151,9 +151,9 @@ var Forks = map[string]*params.ChainConfig{ ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), - YoloV2Block: big.NewInt(0), + YoloV3Block: big.NewInt(0), }, - // This specification is subject to change, but is for now identical to YOLOv2 + // This specification is subject to change, but is for now identical to YOLOv3 // for cross-client testing purposes "Berlin": { ChainID: big.NewInt(1), @@ -165,7 +165,7 @@ var Forks = map[string]*params.ChainConfig{ ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(0), - YoloV2Block: big.NewInt(0), + BerlinBlock: big.NewInt(0), }, } diff --git a/tests/state_test_util.go b/tests/state_test_util.go index 28a5313129..46834de6da 100644 --- a/tests/state_test_util.go +++ b/tests/state_test_util.go @@ -24,14 +24,13 @@ import ( "strconv" "strings" - "github.com/ethereum/go-ethereum/core/state/snapshot" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/state/snapshot" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/core/vm" "github.com/ethereum/go-ethereum/crypto" @@ -96,13 +95,14 @@ type stEnvMarshaling struct { //go:generate gencodec -type stTransaction -field-override stTransactionMarshaling -out gen_sttransaction.go type stTransaction struct { - GasPrice *big.Int `json:"gasPrice"` - Nonce uint64 `json:"nonce"` - To string `json:"to"` - Data []string `json:"data"` - GasLimit []uint64 `json:"gasLimit"` - Value []string `json:"value"` - PrivateKey []byte `json:"secretKey"` + GasPrice *big.Int `json:"gasPrice"` + Nonce uint64 `json:"nonce"` + To string `json:"to"` + Data []string `json:"data"` + AccessLists []*types.AccessList `json:"accessLists,omitempty"` + GasLimit []uint64 `json:"gasLimit"` + Value []string `json:"value"` + PrivateKey []byte `json:"secretKey"` } type stTransactionMarshaling struct { @@ -182,26 +182,21 @@ func (t *StateTest) RunNoVerify(subtest StateSubtest, vmconfig vm.Config, snapsh if err != nil { return nil, nil, common.Hash{}, err } - context := core.NewEVMContext(msg, block.Header(), nil, &t.json.Env.Coinbase) + + // Prepare the EVM. + txContext := core.NewEVMTxContext(msg) + context := core.NewEVMBlockContext(block.Header(), nil, &t.json.Env.Coinbase) context.GetHash = vmTestBlockHash - evm := vm.NewEVM(context, statedb, config, vmconfig) + evm := vm.NewEVM(context, txContext, statedb, config, vmconfig) - if config.IsYoloV2(context.BlockNumber) { - statedb.AddAddressToAccessList(msg.From()) - if dst := msg.To(); dst != nil { - statedb.AddAddressToAccessList(*dst) - // If it's a create-tx, the destination will be added inside evm.create - } - for _, addr := range evm.ActivePrecompiles() { - statedb.AddAddressToAccessList(addr) - } - } + // Execute the message. + snapshot := statedb.Snapshot() gaspool := new(core.GasPool) gaspool.AddGas(block.GasLimit()) - snapshot := statedb.Snapshot() if _, err := core.ApplyMessage(evm, msg, gaspool); err != nil { statedb.RevertToSnapshot(snapshot) } + // Commit block statedb.Commit(config.IsEIP158(block.Number())) // Add 0-value mining reward. This only makes a difference in the cases @@ -235,7 +230,7 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo var snaps *snapshot.Tree if snapshotter { - snaps = snapshot.New(db, sdb.TrieDB(), 1, root, false, false) + snaps, _ = snapshot.New(db, sdb.TrieDB(), 1, root, false, true, false) } statedb, _ = state.New(root, sdb, snaps) return snaps, statedb @@ -298,8 +293,11 @@ func (tx *stTransaction) toMessage(ps stPostState) (core.Message, error) { if err != nil { return nil, fmt.Errorf("invalid tx data %q", dataHex) } - - msg := types.NewMessage(from, to, tx.Nonce, value, gasLimit, tx.GasPrice, data, true) + var accessList types.AccessList + if tx.AccessLists != nil && tx.AccessLists[ps.Indexes.Data] != nil { + accessList = *tx.AccessLists[ps.Indexes.Data] + } + msg := types.NewMessage(from, to, tx.Nonce, value, gasLimit, tx.GasPrice, data, accessList, true) return msg, nil } diff --git a/tests/testdata b/tests/testdata index 7497b116a0..31d663076b 160000 --- a/tests/testdata +++ b/tests/testdata @@ -1 +1 @@ -Subproject commit 7497b116a019beb26215cbea4028df068dea06be +Subproject commit 31d663076b6678df18983d6da912d7cad4ad3416 diff --git a/tests/transaction_test_util.go b/tests/transaction_test_util.go index aea90535c3..82ee01de15 100644 --- a/tests/transaction_test_util.go +++ b/tests/transaction_test_util.go @@ -55,7 +55,7 @@ func (tt *TransactionTest) Run(config *params.ChainConfig) error { return nil, nil, err } // Intrinsic gas - requiredGas, err := core.IntrinsicGas(tx.Data(), tx.To() == nil, isHomestead, isIstanbul) + requiredGas, err := core.IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, isHomestead, isIstanbul) if err != nil { return nil, nil, err } diff --git a/tests/vm_test_util.go b/tests/vm_test_util.go index ad124b7b25..418cc67168 100644 --- a/tests/vm_test_util.go +++ b/tests/vm_test_util.go @@ -138,20 +138,22 @@ func (t *VMTest) newEVM(statedb *state.StateDB, vmconfig vm.Config) *vm.EVM { return core.CanTransfer(db, address, amount) } transfer := func(db vm.StateDB, sender, recipient common.Address, amount *big.Int) {} - context := vm.Context{ + txContext := vm.TxContext{ + Origin: t.json.Exec.Origin, + GasPrice: t.json.Exec.GasPrice, + } + context := vm.BlockContext{ CanTransfer: canTransfer, Transfer: transfer, GetHash: vmTestBlockHash, - Origin: t.json.Exec.Origin, Coinbase: t.json.Env.Coinbase, BlockNumber: new(big.Int).SetUint64(t.json.Env.Number), Time: new(big.Int).SetUint64(t.json.Env.Timestamp), GasLimit: t.json.Env.GasLimit, Difficulty: t.json.Env.Difficulty, - GasPrice: t.json.Exec.GasPrice, } vmconfig.NoRecursion = true - return vm.NewEVM(context, statedb, params.MainnetChainConfig, vmconfig) + return vm.NewEVM(context, txContext, statedb, params.MainnetChainConfig, vmconfig) } func vmTestBlockHash(n uint64) common.Hash { diff --git a/trie/committer.go b/trie/committer.go index 20c95bed08..33fd9e9823 100644 --- a/trie/committer.go +++ b/trie/committer.go @@ -267,6 +267,5 @@ func estimateSize(n node) int { return 1 + len(n) default: panic(fmt.Sprintf("node type %T", n)) - } } diff --git a/trie/database.go b/trie/database.go index c0c8870f8f..b18665770e 100644 --- a/trie/database.go +++ b/trie/database.go @@ -100,7 +100,7 @@ func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } func (n rawNode) EncodeRLP(w io.Writer) error { - _, err := w.Write([]byte(n)) + _, err := w.Write(n) return err } @@ -272,33 +272,43 @@ func expandNode(hash hashNode, n node) node { } } +// Config defines all necessary options for database. +type Config struct { + Cache int // Memory allowance (MB) to use for caching trie nodes in memory + Journal string // Journal of clean cache to survive node restarts + Preimages bool // Flag whether the preimage of trie key is recorded +} + // NewDatabase creates a new trie database to store ephemeral trie content before // its written out to disk or garbage collected. No read cache is created, so all // data retrievals will hit the underlying disk database. func NewDatabase(diskdb ethdb.KeyValueStore) *Database { - return NewDatabaseWithCache(diskdb, 0, "") + return NewDatabaseWithConfig(diskdb, nil) } -// NewDatabaseWithCache creates a new trie database to store ephemeral trie content +// NewDatabaseWithConfig creates a new trie database to store ephemeral trie content // before its written out to disk or garbage collected. It also acts as a read cache // for nodes loaded from disk. -func NewDatabaseWithCache(diskdb ethdb.KeyValueStore, cache int, journal string) *Database { +func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database { var cleans *fastcache.Cache - if cache > 0 { - if journal == "" { - cleans = fastcache.New(cache * 1024 * 1024) + if config != nil && config.Cache > 0 { + if config.Journal == "" { + cleans = fastcache.New(config.Cache * 1024 * 1024) } else { - cleans = fastcache.LoadFromFileOrNew(journal, cache*1024*1024) + cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024) } } - return &Database{ + db := &Database{ diskdb: diskdb, cleans: cleans, dirties: map[common.Hash]*cachedNode{{}: { children: make(map[common.Hash]uint16), }}, - preimages: make(map[common.Hash][]byte), } + if config == nil || config.Preimages { // TODO(karalabe): Flip to default off in the future + db.preimages = make(map[common.Hash][]byte) + } + return db } // DiskDB retrieves the persistent storage backing the trie database. @@ -345,6 +355,11 @@ func (db *Database) insert(hash common.Hash, size int, node node) { // // Note, this method assumes that the database's lock is held! func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { + // Short circuit if preimage collection is disabled + if db.preimages == nil { + return + } + // Track the preimage if a yet unknown one if _, ok := db.preimages[hash]; ok { return } @@ -431,6 +446,10 @@ func (db *Database) Node(hash common.Hash) ([]byte, error) { // preimage retrieves a cached trie node pre-image from memory. If it cannot be // found cached, the method queries the persistent database for the content. func (db *Database) preimage(hash common.Hash) []byte { + // Short circuit if preimage collection is disabled + if db.preimages == nil { + return nil + } // Retrieve the node from cache if available db.lock.RLock() preimage := db.preimages[hash] @@ -588,12 +607,16 @@ func (db *Database) Cap(limit common.StorageSize) error { // leave for later to deduplicate writes. flushPreimages := db.preimagesSize > 4*1024*1024 if flushPreimages { - rawdb.WritePreimages(batch, db.preimages) - if batch.ValueSize() > ethdb.IdealBatchSize { - if err := batch.Write(); err != nil { - return err + if db.preimages == nil { + log.Error("Attempted to write preimages whilst disabled") + } else { + rawdb.WritePreimages(batch, db.preimages) + if batch.ValueSize() > ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + return err + } + batch.Reset() } - batch.Reset() } } // Keep committing nodes from the flush-list until we're below allowance @@ -630,7 +653,11 @@ func (db *Database) Cap(limit common.StorageSize) error { defer db.lock.Unlock() if flushPreimages { - db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 + if db.preimages == nil { + log.Error("Attempted to reset preimage cache whilst disabled") + } else { + db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 + } } for db.oldest != oldest { node := db.dirties[db.oldest] @@ -674,20 +701,21 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H batch := db.diskdb.NewBatch() // Move all of the accumulated preimages into a write batch - rawdb.WritePreimages(batch, db.preimages) - if batch.ValueSize() > ethdb.IdealBatchSize { + if db.preimages != nil { + rawdb.WritePreimages(batch, db.preimages) + if batch.ValueSize() > ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + return err + } + batch.Reset() + } + // Since we're going to replay trie node writes into the clean cache, flush out + // any batched pre-images before continuing. if err := batch.Write(); err != nil { return err } batch.Reset() } - // Since we're going to replay trie node writes into the clean cache, flush out - // any batched pre-images before continuing. - if err := batch.Write(); err != nil { - return err - } - batch.Reset() - // Move the trie itself into the batch, flushing if enough data is accumulated nodes, storage := len(db.dirties), db.dirtiesSize @@ -708,9 +736,10 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H batch.Replay(uncacher) batch.Reset() - // Reset the storage counters and bumpd metrics - db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 - + // Reset the storage counters and bumped metrics + if db.preimages != nil { + db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 + } memcacheCommitTimeTimer.Update(time.Since(start)) memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) diff --git a/trie/iterator.go b/trie/iterator.go index bb4025d8f3..76d437c403 100644 --- a/trie/iterator.go +++ b/trie/iterator.go @@ -173,7 +173,7 @@ func (it *nodeIterator) LeafKey() []byte { func (it *nodeIterator) LeafBlob() []byte { if len(it.stack) > 0 { if node, ok := it.stack[len(it.stack)-1].node.(valueNode); ok { - return []byte(node) + return node } } panic("not at leaf") diff --git a/trie/notary.go b/trie/notary.go new file mode 100644 index 0000000000..5a64727aa7 --- /dev/null +++ b/trie/notary.go @@ -0,0 +1,57 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package trie + +import ( + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/ethdb/memorydb" +) + +// KeyValueNotary tracks which keys have been accessed through a key-value reader +// with te scope of verifying if certain proof datasets are maliciously bloated. +type KeyValueNotary struct { + ethdb.KeyValueReader + reads map[string]struct{} +} + +// NewKeyValueNotary wraps a key-value database with an access notary to track +// which items have bene accessed. +func NewKeyValueNotary(db ethdb.KeyValueReader) *KeyValueNotary { + return &KeyValueNotary{ + KeyValueReader: db, + reads: make(map[string]struct{}), + } +} + +// Get retrieves an item from the underlying database, but also tracks it as an +// accessed slot for bloat checks. +func (k *KeyValueNotary) Get(key []byte) ([]byte, error) { + k.reads[string(key)] = struct{}{} + return k.KeyValueReader.Get(key) +} + +// Accessed returns s snapshot of the original key-value store containing only the +// data accessed through the notary. +func (k *KeyValueNotary) Accessed() ethdb.KeyValueStore { + db := memorydb.New() + for keystr := range k.reads { + key := []byte(keystr) + val, _ := k.KeyValueReader.Get(key) + db.Put(key, val) + } + return db +} diff --git a/trie/proof.go b/trie/proof.go index 2f52438f98..61c35a8423 100644 --- a/trie/proof.go +++ b/trie/proof.go @@ -216,7 +216,7 @@ func proofToPath(rootHash common.Hash, root node, key []byte, proofDb ethdb.KeyV // // Note we have the assumption here the given boundary keys are different // and right is larger than left. -func unsetInternal(n node, left []byte, right []byte) error { +func unsetInternal(n node, left []byte, right []byte) (bool, error) { left, right = keybytesToHex(left), keybytesToHex(right) // Step down to the fork point. There are two scenarios can happen: @@ -278,45 +278,55 @@ findFork: // - left proof points to the shortnode, but right proof is greater // - right proof points to the shortnode, but left proof is less if shortForkLeft == -1 && shortForkRight == -1 { - return errors.New("empty range") + return false, errors.New("empty range") } if shortForkLeft == 1 && shortForkRight == 1 { - return errors.New("empty range") + return false, errors.New("empty range") } if shortForkLeft != 0 && shortForkRight != 0 { + // The fork point is root node, unset the entire trie + if parent == nil { + return true, nil + } parent.(*fullNode).Children[left[pos-1]] = nil - return nil + return false, nil } // Only one proof points to non-existent key. if shortForkRight != 0 { - // Unset left proof's path if _, ok := rn.Val.(valueNode); ok { + // The fork point is root node, unset the entire trie + if parent == nil { + return true, nil + } parent.(*fullNode).Children[left[pos-1]] = nil - return nil + return false, nil } - return unset(rn, rn.Val, left[pos:], len(rn.Key), false) + return false, unset(rn, rn.Val, left[pos:], len(rn.Key), false) } if shortForkLeft != 0 { - // Unset right proof's path. if _, ok := rn.Val.(valueNode); ok { + // The fork point is root node, unset the entire trie + if parent == nil { + return true, nil + } parent.(*fullNode).Children[right[pos-1]] = nil - return nil + return false, nil } - return unset(rn, rn.Val, right[pos:], len(rn.Key), true) + return false, unset(rn, rn.Val, right[pos:], len(rn.Key), true) } - return nil + return false, nil case *fullNode: // unset all internal nodes in the forkpoint for i := left[pos] + 1; i < right[pos]; i++ { rn.Children[i] = nil } if err := unset(rn, rn.Children[left[pos]], left[pos:], 1, false); err != nil { - return err + return false, err } if err := unset(rn, rn.Children[right[pos]], right[pos:], 1, true); err != nil { - return err + return false, err } - return nil + return false, nil default: panic(fmt.Sprintf("%T: invalid node: %v", n, n)) } @@ -426,7 +436,7 @@ func hasRightElement(node node, key []byte) bool { // VerifyRangeProof checks whether the given leaf nodes and edge proof // can prove the given trie leaves range is matched with the specific root. -// Besides, the range should be consecutive(no gap inside) and monotonic +// Besides, the range should be consecutive (no gap inside) and monotonic // increasing. // // Note the given proof actually contains two edge proofs. Both of them can @@ -454,96 +464,140 @@ func hasRightElement(node node, key []byte) bool { // // Except returning the error to indicate the proof is valid or not, the function will // also return a flag to indicate whether there exists more accounts/slots in the trie. -func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (error, bool) { +func VerifyRangeProof(rootHash common.Hash, firstKey []byte, lastKey []byte, keys [][]byte, values [][]byte, proof ethdb.KeyValueReader) (ethdb.KeyValueStore, *Trie, *KeyValueNotary, bool, error) { if len(keys) != len(values) { - return fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values)), false + return nil, nil, nil, false, fmt.Errorf("inconsistent proof data, keys: %d, values: %d", len(keys), len(values)) } // Ensure the received batch is monotonic increasing. for i := 0; i < len(keys)-1; i++ { if bytes.Compare(keys[i], keys[i+1]) >= 0 { - return errors.New("range is not monotonically increasing"), false + return nil, nil, nil, false, errors.New("range is not monotonically increasing") } } + // Create a key-value notary to track which items from the given proof the + // range prover actually needed to verify the data + notary := NewKeyValueNotary(proof) + // Special case, there is no edge proof at all. The given range is expected // to be the whole leaf-set in the trie. if proof == nil { - emptytrie, err := New(common.Hash{}, NewDatabase(memorydb.New())) + var ( + diskdb = memorydb.New() + triedb = NewDatabase(diskdb) + ) + tr, err := New(common.Hash{}, triedb) if err != nil { - return err, false + return nil, nil, nil, false, err } for index, key := range keys { - emptytrie.TryUpdate(key, values[index]) + tr.TryUpdate(key, values[index]) + } + if tr.Hash() != rootHash { + return nil, nil, nil, false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash()) + } + // Proof seems valid, serialize all the nodes into the database + if _, err := tr.Commit(nil); err != nil { + return nil, nil, nil, false, err } - if emptytrie.Hash() != rootHash { - return fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, emptytrie.Hash()), false + if err := triedb.Commit(rootHash, false, nil); err != nil { + return nil, nil, nil, false, err } - return nil, false // no more element. + return diskdb, tr, notary, false, nil // No more elements } // Special case, there is a provided edge proof but zero key/value // pairs, ensure there are no more accounts / slots in the trie. if len(keys) == 0 { - root, val, err := proofToPath(rootHash, nil, firstKey, proof, true) + root, val, err := proofToPath(rootHash, nil, firstKey, notary, true) if err != nil { - return err, false + return nil, nil, nil, false, err } if val != nil || hasRightElement(root, firstKey) { - return errors.New("more entries available"), false + return nil, nil, nil, false, errors.New("more entries available") } - return nil, false + // Since the entire proof is a single path, we can construct a trie and a + // node database directly out of the inputs, no need to generate them + diskdb := notary.Accessed() + tr := &Trie{ + db: NewDatabase(diskdb), + root: root, + } + return diskdb, tr, notary, hasRightElement(root, firstKey), nil } // Special case, there is only one element and two edge keys are same. // In this case, we can't construct two edge paths. So handle it here. if len(keys) == 1 && bytes.Equal(firstKey, lastKey) { - root, val, err := proofToPath(rootHash, nil, firstKey, proof, false) + root, val, err := proofToPath(rootHash, nil, firstKey, notary, false) if err != nil { - return err, false + return nil, nil, nil, false, err } if !bytes.Equal(firstKey, keys[0]) { - return errors.New("correct proof but invalid key"), false + return nil, nil, nil, false, errors.New("correct proof but invalid key") } if !bytes.Equal(val, values[0]) { - return errors.New("correct proof but invalid data"), false + return nil, nil, nil, false, errors.New("correct proof but invalid data") + } + // Since the entire proof is a single path, we can construct a trie and a + // node database directly out of the inputs, no need to generate them + diskdb := notary.Accessed() + tr := &Trie{ + db: NewDatabase(diskdb), + root: root, } - return nil, hasRightElement(root, firstKey) + return diskdb, tr, notary, hasRightElement(root, firstKey), nil } // Ok, in all other cases, we require two edge paths available. // First check the validity of edge keys. if bytes.Compare(firstKey, lastKey) >= 0 { - return errors.New("invalid edge keys"), false + return nil, nil, nil, false, errors.New("invalid edge keys") } // todo(rjl493456442) different length edge keys should be supported if len(firstKey) != len(lastKey) { - return errors.New("inconsistent edge keys"), false + return nil, nil, nil, false, errors.New("inconsistent edge keys") } // Convert the edge proofs to edge trie paths. Then we can // have the same tree architecture with the original one. // For the first edge proof, non-existent proof is allowed. - root, _, err := proofToPath(rootHash, nil, firstKey, proof, true) + root, _, err := proofToPath(rootHash, nil, firstKey, notary, true) if err != nil { - return err, false + return nil, nil, nil, false, err } // Pass the root node here, the second path will be merged // with the first one. For the last edge proof, non-existent // proof is also allowed. - root, _, err = proofToPath(rootHash, root, lastKey, proof, true) + root, _, err = proofToPath(rootHash, root, lastKey, notary, true) if err != nil { - return err, false + return nil, nil, nil, false, err } // Remove all internal references. All the removed parts should // be re-filled(or re-constructed) by the given leaves range. - if err := unsetInternal(root, firstKey, lastKey); err != nil { - return err, false + empty, err := unsetInternal(root, firstKey, lastKey) + if err != nil { + return nil, nil, nil, false, err } - // Rebuild the trie with the leave stream, the shape of trie + // Rebuild the trie with the leaf stream, the shape of trie // should be same with the original one. - newtrie := &Trie{root: root, db: NewDatabase(memorydb.New())} + var ( + diskdb = memorydb.New() + triedb = NewDatabase(diskdb) + ) + tr := &Trie{root: root, db: triedb} + if empty { + tr.root = nil + } for index, key := range keys { - newtrie.TryUpdate(key, values[index]) + tr.TryUpdate(key, values[index]) + } + if tr.Hash() != rootHash { + return nil, nil, nil, false, fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, tr.Hash()) + } + // Proof seems valid, serialize all the nodes into the database + if _, err := tr.Commit(nil); err != nil { + return nil, nil, nil, false, err } - if newtrie.Hash() != rootHash { - return fmt.Errorf("invalid proof, want hash %x, got %x", rootHash, newtrie.Hash()), false + if err := triedb.Commit(rootHash, false, nil); err != nil { + return nil, nil, nil, false, err } - return nil, hasRightElement(root, keys[len(keys)-1]) + return diskdb, tr, notary, hasRightElement(root, keys[len(keys)-1]), nil } // get returns the child of the given node. Return nil if the diff --git a/trie/proof_test.go b/trie/proof_test.go index 6cdc242d9a..304affa9f2 100644 --- a/trie/proof_test.go +++ b/trie/proof_test.go @@ -19,6 +19,7 @@ package trie import ( "bytes" crand "crypto/rand" + "encoding/binary" mrand "math/rand" "sort" "testing" @@ -181,7 +182,7 @@ func TestRangeProof(t *testing.T) { keys = append(keys, entries[i].k) vals = append(vals, entries[i].v) } - err, _ := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof) + _, _, _, _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof) if err != nil { t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err) } @@ -232,7 +233,7 @@ func TestRangeProofWithNonExistentProof(t *testing.T) { keys = append(keys, entries[i].k) vals = append(vals, entries[i].v) } - err, _ := VerifyRangeProof(trie.Hash(), first, last, keys, vals, proof) + _, _, _, _, err := VerifyRangeProof(trie.Hash(), first, last, keys, vals, proof) if err != nil { t.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err) } @@ -253,7 +254,7 @@ func TestRangeProofWithNonExistentProof(t *testing.T) { k = append(k, entries[i].k) v = append(v, entries[i].v) } - err, _ := VerifyRangeProof(trie.Hash(), first, last, k, v, proof) + _, _, _, _, err := VerifyRangeProof(trie.Hash(), first, last, k, v, proof) if err != nil { t.Fatal("Failed to verify whole rang with non-existent edges") } @@ -288,7 +289,7 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) { k = append(k, entries[i].k) v = append(v, entries[i].v) } - err, _ := VerifyRangeProof(trie.Hash(), first, k[len(k)-1], k, v, proof) + _, _, _, _, err := VerifyRangeProof(trie.Hash(), first, k[len(k)-1], k, v, proof) if err == nil { t.Fatalf("Expected to detect the error, got nil") } @@ -310,7 +311,7 @@ func TestRangeProofWithInvalidNonExistentProof(t *testing.T) { k = append(k, entries[i].k) v = append(v, entries[i].v) } - err, _ = VerifyRangeProof(trie.Hash(), k[0], last, k, v, proof) + _, _, _, _, err = VerifyRangeProof(trie.Hash(), k[0], last, k, v, proof) if err == nil { t.Fatalf("Expected to detect the error, got nil") } @@ -334,7 +335,7 @@ func TestOneElementRangeProof(t *testing.T) { if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - err, _ := VerifyRangeProof(trie.Hash(), entries[start].k, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) + _, _, _, _, err := VerifyRangeProof(trie.Hash(), entries[start].k, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -349,7 +350,7 @@ func TestOneElementRangeProof(t *testing.T) { if err := trie.Prove(entries[start].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } - err, _ = VerifyRangeProof(trie.Hash(), first, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) + _, _, _, _, err = VerifyRangeProof(trie.Hash(), first, entries[start].k, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -364,7 +365,7 @@ func TestOneElementRangeProof(t *testing.T) { if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } - err, _ = VerifyRangeProof(trie.Hash(), entries[start].k, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) + _, _, _, _, err = VerifyRangeProof(trie.Hash(), entries[start].k, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -379,7 +380,26 @@ func TestOneElementRangeProof(t *testing.T) { if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } - err, _ = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) + _, _, _, _, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[start].k}, [][]byte{entries[start].v}, proof) + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + + // Test the mini trie with only a single element. + tinyTrie := new(Trie) + entry := &kv{randBytes(32), randBytes(20), false} + tinyTrie.Update(entry.k, entry.v) + + first = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000").Bytes() + last = entry.k + proof = memorydb.New() + if err := tinyTrie.Prove(first, 0, proof); err != nil { + t.Fatalf("Failed to prove the first node %v", err) + } + if err := tinyTrie.Prove(last, 0, proof); err != nil { + t.Fatalf("Failed to prove the last node %v", err) + } + _, _, _, _, err = VerifyRangeProof(tinyTrie.Hash(), first, last, [][]byte{entry.k}, [][]byte{entry.v}, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -401,7 +421,7 @@ func TestAllElementsProof(t *testing.T) { k = append(k, entries[i].k) v = append(v, entries[i].v) } - err, _ := VerifyRangeProof(trie.Hash(), nil, nil, k, v, nil) + _, _, _, _, err := VerifyRangeProof(trie.Hash(), nil, nil, k, v, nil) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -414,7 +434,7 @@ func TestAllElementsProof(t *testing.T) { if err := trie.Prove(entries[len(entries)-1].k, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } - err, _ = VerifyRangeProof(trie.Hash(), k[0], k[len(k)-1], k, v, proof) + _, _, _, _, err = VerifyRangeProof(trie.Hash(), k[0], k[len(k)-1], k, v, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -429,7 +449,7 @@ func TestAllElementsProof(t *testing.T) { if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } - err, _ = VerifyRangeProof(trie.Hash(), first, last, k, v, proof) + _, _, _, _, err = VerifyRangeProof(trie.Hash(), first, last, k, v, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -462,7 +482,7 @@ func TestSingleSideRangeProof(t *testing.T) { k = append(k, entries[i].k) v = append(v, entries[i].v) } - err, _ := VerifyRangeProof(trie.Hash(), common.Hash{}.Bytes(), k[len(k)-1], k, v, proof) + _, _, _, _, err := VerifyRangeProof(trie.Hash(), common.Hash{}.Bytes(), k[len(k)-1], k, v, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -498,7 +518,7 @@ func TestReverseSingleSideRangeProof(t *testing.T) { k = append(k, entries[i].k) v = append(v, entries[i].v) } - err, _ := VerifyRangeProof(trie.Hash(), k[0], last.Bytes(), k, v, proof) + _, _, _, _, err := VerifyRangeProof(trie.Hash(), k[0], last.Bytes(), k, v, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -570,7 +590,7 @@ func TestBadRangeProof(t *testing.T) { index = mrand.Intn(end - start) vals[index] = nil } - err, _ := VerifyRangeProof(trie.Hash(), first, last, keys, vals, proof) + _, _, _, _, err := VerifyRangeProof(trie.Hash(), first, last, keys, vals, proof) if err == nil { t.Fatalf("%d Case %d index %d range: (%d->%d) expect error, got nil", i, testcase, index, start, end-1) } @@ -604,7 +624,7 @@ func TestGappedRangeProof(t *testing.T) { keys = append(keys, entries[i].k) vals = append(vals, entries[i].v) } - err, _ := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof) + _, _, _, _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof) if err == nil { t.Fatal("expect error, got nil") } @@ -631,7 +651,7 @@ func TestSameSideProofs(t *testing.T) { if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } - err, _ := VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof) + _, _, _, _, err := VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof) if err == nil { t.Fatalf("Expected error, got nil") } @@ -647,7 +667,7 @@ func TestSameSideProofs(t *testing.T) { if err := trie.Prove(last, 0, proof); err != nil { t.Fatalf("Failed to prove the last node %v", err) } - err, _ = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof) + _, _, _, _, err = VerifyRangeProof(trie.Hash(), first, last, [][]byte{entries[pos].k}, [][]byte{entries[pos].v}, proof) if err == nil { t.Fatalf("Expected error, got nil") } @@ -715,7 +735,7 @@ func TestHasRightElement(t *testing.T) { k = append(k, entries[i].k) v = append(v, entries[i].v) } - err, hasMore := VerifyRangeProof(trie.Hash(), firstKey, lastKey, k, v, proof) + _, _, _, hasMore, err := VerifyRangeProof(trie.Hash(), firstKey, lastKey, k, v, proof) if err != nil { t.Fatalf("Expected no error, got %v", err) } @@ -748,13 +768,57 @@ func TestEmptyRangeProof(t *testing.T) { if err := trie.Prove(first, 0, proof); err != nil { t.Fatalf("Failed to prove the first node %v", err) } - err, _ := VerifyRangeProof(trie.Hash(), first, nil, nil, nil, proof) + db, tr, not, _, err := VerifyRangeProof(trie.Hash(), first, nil, nil, nil, proof) if c.err && err == nil { t.Fatalf("Expected error, got nil") } if !c.err && err != nil { t.Fatalf("Expected no error, got %v", err) } + // If no error was returned, ensure the returned trie and database contains + // the entire proof, since there's no value + if !c.err { + if err := tr.Prove(first, 0, memorydb.New()); err != nil { + t.Errorf("returned trie doesn't contain original proof: %v", err) + } + if memdb := db.(*memorydb.Database); memdb.Len() != proof.Len() { + t.Errorf("database entry count mismatch: have %d, want %d", memdb.Len(), proof.Len()) + } + if not == nil { + t.Errorf("missing notary") + } + } + } +} + +// TestBloatedProof tests a malicious proof, where the proof is more or less the +// whole trie. +func TestBloatedProof(t *testing.T) { + // Use a small trie + trie, kvs := nonRandomTrie(100) + var entries entrySlice + for _, kv := range kvs { + entries = append(entries, kv) + } + sort.Sort(entries) + var keys [][]byte + var vals [][]byte + + proof := memorydb.New() + for i, entry := range entries { + trie.Prove(entry.k, 0, proof) + if i == 50 { + keys = append(keys, entry.k) + vals = append(vals, entry.v) + } + } + want := memorydb.New() + trie.Prove(keys[0], 0, want) + trie.Prove(keys[len(keys)-1], 0, want) + + _, _, notary, _, _ := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, vals, proof) + if used := notary.Accessed().(*memorydb.Database); used.Len() != want.Len() { + t.Fatalf("notary proof size mismatch: have %d, want %d", used.Len(), want.Len()) } } @@ -858,7 +922,7 @@ func benchmarkVerifyRangeProof(b *testing.B, size int) { b.ResetTimer() for i := 0; i < b.N; i++ { - err, _ := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, values, proof) + _, _, _, _, err := VerifyRangeProof(trie.Hash(), keys[0], keys[len(keys)-1], keys, values, proof) if err != nil { b.Fatalf("Case %d(%d->%d) expect no error, got %v", i, start, end-1, err) } @@ -889,3 +953,20 @@ func randBytes(n int) []byte { crand.Read(r) return r } + +func nonRandomTrie(n int) (*Trie, map[string]*kv) { + trie := new(Trie) + vals := make(map[string]*kv) + max := uint64(0xffffffffffffffff) + for i := uint64(0); i < uint64(n); i++ { + value := make([]byte, 32) + key := make([]byte, 32) + binary.LittleEndian.PutUint64(key, i) + binary.LittleEndian.PutUint64(value, i-max) + //value := &kv{common.LeftPadBytes([]byte{i}, 32), []byte{i}, false} + elem := &kv{key, value, false} + trie.Update(elem.k, elem.v) + vals[string(elem.k)] = elem + } + return trie, vals +} diff --git a/trie/secure_trie.go b/trie/secure_trie.go index 87b364fb1b..e38471c1b7 100644 --- a/trie/secure_trie.go +++ b/trie/secure_trie.go @@ -147,12 +147,13 @@ func (t *SecureTrie) GetKey(shaKey []byte) []byte { func (t *SecureTrie) Commit(onleaf LeafCallback) (root common.Hash, err error) { // Write all the pre-images to the actual disk database if len(t.getSecKeyCache()) > 0 { - t.trie.db.lock.Lock() - for hk, key := range t.secKeyCache { - t.trie.db.insertPreimage(common.BytesToHash([]byte(hk)), key) + if t.trie.db.preimages != nil { // Ugly direct check but avoids the below write lock + t.trie.db.lock.Lock() + for hk, key := range t.secKeyCache { + t.trie.db.insertPreimage(common.BytesToHash([]byte(hk)), key) + } + t.trie.db.lock.Unlock() } - t.trie.db.lock.Unlock() - t.secKeyCache = make(map[string][]byte) } // Commit the trie to its intermediate node database diff --git a/trie/stacktrie.go b/trie/stacktrie.go index 575a04022f..a198eb204b 100644 --- a/trie/stacktrie.go +++ b/trie/stacktrie.go @@ -35,7 +35,7 @@ var stPool = sync.Pool{ }, } -func stackTrieFromPool(db ethdb.KeyValueStore) *StackTrie { +func stackTrieFromPool(db ethdb.KeyValueWriter) *StackTrie { st := stPool.Get().(*StackTrie) st.db = db return st @@ -50,24 +50,23 @@ func returnToPool(st *StackTrie) { // in order. Once it determines that a subtree will no longer be inserted // into, it will hash it and free up the memory it uses. type StackTrie struct { - nodeType uint8 // node type (as in branch, ext, leaf) - val []byte // value contained by this node if it's a leaf - key []byte // key chunk covered by this (full|ext) node - keyOffset int // offset of the key chunk inside a full key - children [16]*StackTrie // list of children (for fullnodes and exts) - - db ethdb.KeyValueStore // Pointer to the commit db, can be nil + nodeType uint8 // node type (as in branch, ext, leaf) + val []byte // value contained by this node if it's a leaf + key []byte // key chunk covered by this (full|ext) node + keyOffset int // offset of the key chunk inside a full key + children [16]*StackTrie // list of children (for fullnodes and exts) + db ethdb.KeyValueWriter // Pointer to the commit db, can be nil } // NewStackTrie allocates and initializes an empty trie. -func NewStackTrie(db ethdb.KeyValueStore) *StackTrie { +func NewStackTrie(db ethdb.KeyValueWriter) *StackTrie { return &StackTrie{ nodeType: emptyNode, db: db, } } -func newLeaf(ko int, key, val []byte, db ethdb.KeyValueStore) *StackTrie { +func newLeaf(ko int, key, val []byte, db ethdb.KeyValueWriter) *StackTrie { st := stackTrieFromPool(db) st.nodeType = leafNode st.keyOffset = ko @@ -76,7 +75,7 @@ func newLeaf(ko int, key, val []byte, db ethdb.KeyValueStore) *StackTrie { return st } -func newExt(ko int, key []byte, child *StackTrie, db ethdb.KeyValueStore) *StackTrie { +func newExt(ko int, key []byte, child *StackTrie, db ethdb.KeyValueWriter) *StackTrie { st := stackTrieFromPool(db) st.nodeType = extNode st.keyOffset = ko diff --git a/trie/stacktrie_test.go b/trie/stacktrie_test.go index d4488b4029..29706f2e9d 100644 --- a/trie/stacktrie_test.go +++ b/trie/stacktrie_test.go @@ -1,16 +1,9 @@ package trie import ( - "bytes" - "fmt" - "math/big" - mrand "math/rand" "testing" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/ethdb/memorydb" ) @@ -78,169 +71,6 @@ func TestValLength56(t *testing.T) { } } -func genTxs(num uint64) (types.Transactions, error) { - key, err := crypto.HexToECDSA("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef") - if err != nil { - return nil, err - } - var addr = crypto.PubkeyToAddress(key.PublicKey) - newTx := func(i uint64) (*types.Transaction, error) { - signer := types.NewEIP155Signer(big.NewInt(18)) - tx, err := types.SignTx(types.NewTransaction(i, addr, new(big.Int), 0, new(big.Int).SetUint64(10000000), nil), signer, key) - return tx, err - } - var txs types.Transactions - for i := uint64(0); i < num; i++ { - tx, err := newTx(i) - if err != nil { - return nil, err - } - txs = append(txs, tx) - } - return txs, nil -} - -func TestDeriveSha(t *testing.T) { - txs, err := genTxs(0) - if err != nil { - t.Fatal(err) - } - for len(txs) < 1000 { - exp := types.DeriveSha(txs, newEmpty()) - got := types.DeriveSha(txs, NewStackTrie(nil)) - if !bytes.Equal(got[:], exp[:]) { - t.Fatalf("%d txs: got %x exp %x", len(txs), got, exp) - } - newTxs, err := genTxs(uint64(len(txs) + 1)) - if err != nil { - t.Fatal(err) - } - txs = append(txs, newTxs...) - } -} - -func BenchmarkDeriveSha200(b *testing.B) { - txs, err := genTxs(200) - if err != nil { - b.Fatal(err) - } - var exp common.Hash - var got common.Hash - b.Run("std_trie", func(b *testing.B) { - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - exp = types.DeriveSha(txs, newEmpty()) - } - }) - - b.Run("stack_trie", func(b *testing.B) { - b.ResetTimer() - b.ReportAllocs() - for i := 0; i < b.N; i++ { - got = types.DeriveSha(txs, NewStackTrie(nil)) - } - }) - if got != exp { - b.Errorf("got %x exp %x", got, exp) - } -} - -type dummyDerivableList struct { - len int - seed int -} - -func newDummy(seed int) *dummyDerivableList { - d := &dummyDerivableList{} - src := mrand.NewSource(int64(seed)) - // don't use lists longer than 4K items - d.len = int(src.Int63() & 0x0FFF) - d.seed = seed - return d -} - -func (d *dummyDerivableList) Len() int { - return d.len -} - -func (d *dummyDerivableList) GetRlp(i int) []byte { - src := mrand.NewSource(int64(d.seed + i)) - // max item size 256, at least 1 byte per item - size := 1 + src.Int63()&0x00FF - data := make([]byte, size) - _, err := mrand.New(src).Read(data) - if err != nil { - panic(err) - } - return data -} - -func printList(l types.DerivableList) { - fmt.Printf("list length: %d\n", l.Len()) - fmt.Printf("{\n") - for i := 0; i < l.Len(); i++ { - v := l.GetRlp(i) - fmt.Printf("\"0x%x\",\n", v) - } - fmt.Printf("},\n") -} - -func TestFuzzDeriveSha(t *testing.T) { - // increase this for longer runs -- it's set to quite low for travis - rndSeed := mrand.Int() - for i := 0; i < 10; i++ { - seed := rndSeed + i - exp := types.DeriveSha(newDummy(i), newEmpty()) - got := types.DeriveSha(newDummy(i), NewStackTrie(nil)) - if !bytes.Equal(got[:], exp[:]) { - printList(newDummy(seed)) - t.Fatalf("seed %d: got %x exp %x", seed, got, exp) - } - } -} - -type flatList struct { - rlpvals []string -} - -func newFlatList(rlpvals []string) *flatList { - return &flatList{rlpvals} -} -func (f *flatList) Len() int { - return len(f.rlpvals) -} -func (f *flatList) GetRlp(i int) []byte { - return hexutil.MustDecode(f.rlpvals[i]) -} - -// TestDerivableList contains testcases found via fuzzing -func TestDerivableList(t *testing.T) { - type tcase []string - tcs := []tcase{ - { - "0xc041", - }, - { - "0xf04cf757812428b0763112efb33b6f4fad7deb445e", - "0xf04cf757812428b0763112efb33b6f4fad7deb445e", - }, - { - "0xca410605310cdc3bb8d4977ae4f0143df54a724ed873457e2272f39d66e0460e971d9d", - "0x6cd850eca0a7ac46bb1748d7b9cb88aa3bd21c57d852c28198ad8fa422c4595032e88a4494b4778b36b944fe47a52b8c5cd312910139dfcb4147ab8e972cc456bcb063f25dd78f54c4d34679e03142c42c662af52947d45bdb6e555751334ace76a5080ab5a0256a1d259855dfc5c0b8023b25befbb13fd3684f9f755cbd3d63544c78ee2001452dd54633a7593ade0b183891a0a4e9c7844e1254005fbe592b1b89149a502c24b6e1dca44c158aebedf01beae9c30cabe16a", - "0x14abd5c47c0be87b0454596baad2", - "0xca410605310cdc3bb8d4977ae4f0143df54a724ed873457e2272f39d66e0460e971d9d", - }, - } - for i, tc := range tcs[1:] { - exp := types.DeriveSha(newFlatList(tc), newEmpty()) - got := types.DeriveSha(newFlatList(tc), NewStackTrie(nil)) - if !bytes.Equal(got[:], exp[:]) { - t.Fatalf("case %d: got %x exp %x", i, got, exp) - } - } -} - // TestUpdateSmallNodes tests a case where the leaves are small (both key and value), // which causes a lot of node-within-node. This case was found via fuzzing. func TestUpdateSmallNodes(t *testing.T) { diff --git a/trie/sync.go b/trie/sync.go index bc93ddd3fb..dd8279b665 100644 --- a/trie/sync.go +++ b/trie/sync.go @@ -313,11 +313,15 @@ func (s *Sync) Commit(dbw ethdb.Batch) error { // Dump the membatch into a database dbw for key, value := range s.membatch.nodes { rawdb.WriteTrieNode(dbw, key, value) - s.bloom.Add(key[:]) + if s.bloom != nil { + s.bloom.Add(key[:]) + } } for key, value := range s.membatch.codes { rawdb.WriteCode(dbw, key, value) - s.bloom.Add(key[:]) + if s.bloom != nil { + s.bloom.Add(key[:]) + } } // Drop the membatch data and return s.membatch = newSyncMemBatch() @@ -410,7 +414,7 @@ func (s *Sync) children(req *request, object node) ([]*request, error) { // Bloom filter says this might be a duplicate, double check. // If database says yes, then at least the trie node is present // and we hold the assumption that it's NOT legacy contract code. - if blob := rawdb.ReadTrieNode(s.database, common.BytesToHash(node)); len(blob) > 0 { + if blob := rawdb.ReadTrieNode(s.database, hash); len(blob) > 0 { continue } // False positive, bump fault meter diff --git a/trie/sync_bloom.go b/trie/sync_bloom.go index 89f61d66d9..1afcce21da 100644 --- a/trie/sync_bloom.go +++ b/trie/sync_bloom.go @@ -19,7 +19,6 @@ package trie import ( "encoding/binary" "fmt" - "math" "sync" "sync/atomic" "time" @@ -29,7 +28,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" - "github.com/steakknife/bloomfilter" + bloomfilter "github.com/holiman/bloomfilter/v2" ) var ( @@ -41,18 +40,6 @@ var ( bloomErrorGauge = metrics.NewRegisteredGauge("trie/bloom/error", nil) ) -// syncBloomHasher is a wrapper around a byte blob to satisfy the interface API -// requirements of the bloom library used. It's used to convert a trie hash or -// contract code hash into a 64 bit mini hash. -type syncBloomHasher []byte - -func (f syncBloomHasher) Write(p []byte) (n int, err error) { panic("not implemented") } -func (f syncBloomHasher) Sum(b []byte) []byte { panic("not implemented") } -func (f syncBloomHasher) Reset() { panic("not implemented") } -func (f syncBloomHasher) BlockSize() int { panic("not implemented") } -func (f syncBloomHasher) Size() int { return 8 } -func (f syncBloomHasher) Sum64() uint64 { return binary.BigEndian.Uint64(f) } - // SyncBloom is a bloom filter used during fast sync to quickly decide if a trie // node or contract code already exists on disk or not. It self populates from the // provided disk database on creation in a background thread and will only start @@ -69,7 +56,7 @@ type SyncBloom struct { // initializes it from the database. The bloom is hard coded to use 3 filters. func NewSyncBloom(memory uint64, database ethdb.Iteratee) *SyncBloom { // Create the bloom filter to track known trie nodes - bloom, err := bloomfilter.New(memory*1024*1024*8, 3) + bloom, err := bloomfilter.New(memory*1024*1024*8, 4) if err != nil { panic(fmt.Sprintf("failed to create bloom: %v", err)) } @@ -110,12 +97,11 @@ func (b *SyncBloom) init(database ethdb.Iteratee) { // If the database entry is a trie node, add it to the bloom key := it.Key() if len(key) == common.HashLength { - b.bloom.Add(syncBloomHasher(key)) + b.bloom.AddHash(binary.BigEndian.Uint64(key)) bloomLoadMeter.Mark(1) - } - // If the database entry is a contract code, add it to the bloom - if ok, hash := rawdb.IsCodeKey(key); ok { - b.bloom.Add(syncBloomHasher(hash)) + } else if ok, hash := rawdb.IsCodeKey(key); ok { + // If the database entry is a contract code, add it to the bloom + b.bloom.AddHash(binary.BigEndian.Uint64(hash)) bloomLoadMeter.Mark(1) } // If enough time elapsed since the last iterator swap, restart @@ -125,14 +111,14 @@ func (b *SyncBloom) init(database ethdb.Iteratee) { it.Release() it = database.NewIterator(nil, key) - log.Info("Initializing fast sync bloom", "items", b.bloom.N(), "errorrate", b.errorRate(), "elapsed", common.PrettyDuration(time.Since(start))) + log.Info("Initializing state bloom", "items", b.bloom.N(), "errorrate", b.bloom.FalsePosititveProbability(), "elapsed", common.PrettyDuration(time.Since(start))) swap = time.Now() } } it.Release() // Mark the bloom filter inited and return - log.Info("Initialized fast sync bloom", "items", b.bloom.N(), "errorrate", b.errorRate(), "elapsed", common.PrettyDuration(time.Since(start))) + log.Info("Initialized state bloom", "items", b.bloom.N(), "errorrate", b.bloom.FalsePosititveProbability(), "elapsed", common.PrettyDuration(time.Since(start))) atomic.StoreUint32(&b.inited, 1) } @@ -141,7 +127,7 @@ func (b *SyncBloom) init(database ethdb.Iteratee) { func (b *SyncBloom) meter() { for { // Report the current error ration. No floats, lame, scale it up. - bloomErrorGauge.Update(int64(b.errorRate() * 100000)) + bloomErrorGauge.Update(int64(b.bloom.FalsePosititveProbability() * 100000)) // Wait one second, but check termination more frequently for i := 0; i < 10; i++ { @@ -162,7 +148,7 @@ func (b *SyncBloom) Close() error { b.pend.Wait() // Wipe the bloom, but mark it "uninited" just in case someone attempts an access - log.Info("Deallocated fast sync bloom", "items", b.bloom.N(), "errorrate", b.errorRate()) + log.Info("Deallocated state bloom", "items", b.bloom.N(), "errorrate", b.bloom.FalsePosititveProbability()) atomic.StoreUint32(&b.inited, 0) b.bloom = nil @@ -175,7 +161,7 @@ func (b *SyncBloom) Add(hash []byte) { if atomic.LoadUint32(&b.closed) == 1 { return } - b.bloom.Add(syncBloomHasher(hash)) + b.bloom.AddHash(binary.BigEndian.Uint64(hash)) bloomAddMeter.Mark(1) } @@ -193,22 +179,9 @@ func (b *SyncBloom) Contains(hash []byte) bool { return true } // Bloom initialized, check the real one and report any successful misses - maybe := b.bloom.Contains(syncBloomHasher(hash)) + maybe := b.bloom.ContainsHash(binary.BigEndian.Uint64(hash)) if !maybe { bloomMissMeter.Mark(1) } return maybe } - -// errorRate calculates the probability of a random containment test returning a -// false positive. -// -// We're calculating it ourselves because the bloom library we used missed a -// parentheses in the formula and calculates it wrong. And it's discontinued... -func (b *SyncBloom) errorRate() float64 { - k := float64(b.bloom.K()) - n := float64(b.bloom.N()) - m := float64(b.bloom.M()) - - return math.Pow(1.0-math.Exp((-k)*(n+0.5)/(m-1)), k) -} diff --git a/trie/sync_test.go b/trie/sync_test.go index 39e0f9575e..cb3283875d 100644 --- a/trie/sync_test.go +++ b/trie/sync_test.go @@ -377,7 +377,6 @@ func TestIncompleteSync(t *testing.T) { nodes, _, codes := sched.Missing(1) queue := append(append([]common.Hash{}, nodes...), codes...) - for len(queue) > 0 { // Fetch a batch of trie nodes results := make([]SyncResult, len(queue)) @@ -401,10 +400,8 @@ func TestIncompleteSync(t *testing.T) { batch.Write() for _, result := range results { added = append(added, result.Hash) - } - // Check that all known sub-tries in the synced trie are complete - for _, root := range added { - if err := checkTrieConsistency(triedb, root); err != nil { + // Check that all known sub-tries in the synced trie are complete + if err := checkTrieConsistency(triedb, result.Hash); err != nil { t.Fatalf("trie inconsistent: %v", err) } } diff --git a/trie/trie.go b/trie/trie.go index 6ddbbd78d3..87b72ecf17 100644 --- a/trie/trie.go +++ b/trie/trie.go @@ -19,13 +19,13 @@ package trie import ( "bytes" + "errors" "fmt" "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" ) var ( @@ -159,29 +159,26 @@ func (t *Trie) TryGetNode(path []byte) ([]byte, int, error) { if item == nil { return nil, resolved, nil } - enc, err := rlp.EncodeToBytes(item) - if err != nil { - log.Error("Encoding existing trie node failed", "err", err) - return nil, resolved, err - } - return enc, resolved, err + return item, resolved, err } -func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item node, newnode node, resolved int, err error) { +func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, newnode node, resolved int, err error) { // If we reached the requested path, return the current node if pos >= len(path) { - // Don't return collapsed hash nodes though - if _, ok := origNode.(hashNode); !ok { - // Short nodes have expanded keys, compact them before returning - item := origNode - if sn, ok := item.(*shortNode); ok { - item = &shortNode{ - Key: hexToCompact(sn.Key), - Val: sn.Val, - } - } - return item, origNode, 0, nil + // Although we most probably have the original node expanded, encoding + // that into consensus form can be nasty (needs to cascade down) and + // time consuming. Instead, just pull the hash up from disk directly. + var hash hashNode + if node, ok := origNode.(hashNode); ok { + hash = node + } else { + hash, _ = origNode.cache() + } + if hash == nil { + return nil, origNode, 0, errors.New("non-consensus node") } + blob, err := t.db.Node(common.BytesToHash(hash)) + return blob, origNode, 1, err } // Path still needs to be traversed, descend into children switch n := (origNode).(type) { @@ -491,7 +488,7 @@ func (t *Trie) resolveHash(n hashNode, prefix []byte) (node, error) { // Hash returns the root hash of the trie. It does not write to the // database and can be used even if the trie doesn't have one. func (t *Trie) Hash() common.Hash { - hash, cached, _ := t.hashRoot(nil) + hash, cached, _ := t.hashRoot() t.root = cached return common.BytesToHash(hash.(hashNode)) } @@ -545,7 +542,7 @@ func (t *Trie) Commit(onleaf LeafCallback) (root common.Hash, err error) { } // hashRoot calculates the root hash of the given trie -func (t *Trie) hashRoot(db *Database) (node, node, error) { +func (t *Trie) hashRoot() (node, node, error) { if t.root == nil { return hashNode(emptyRoot.Bytes()), nil, nil } diff --git a/trie/trie_test.go b/trie/trie_test.go index 682dec157c..87bce9abca 100644 --- a/trie/trie_test.go +++ b/trie/trie_test.go @@ -322,7 +322,7 @@ func TestLargeValue(t *testing.T) { // TestRandomCases tests som cases that were found via random fuzzing func TestRandomCases(t *testing.T) { - var rt []randTestStep = []randTestStep{ + var rt = []randTestStep{ {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 0 {op: 6, key: common.Hex2Bytes(""), value: common.Hex2Bytes("")}, // step 1 {op: 0, key: common.Hex2Bytes("d51b182b95d677e5f1c82508c0228de96b73092d78ce78b2230cd948674f66fd1483bd"), value: common.Hex2Bytes("0000000000000002")}, // step 2 @@ -594,21 +594,20 @@ func benchmarkCommitAfterHash(b *testing.B, onleaf LeafCallback) { func TestTinyTrie(t *testing.T) { // Create a realistic account trie to hash - _, accounts := makeAccounts(10000) + _, accounts := makeAccounts(5) trie := newEmpty() trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001337"), accounts[3]) - if exp, root := common.HexToHash("4fa6efd292cffa2db0083b8bedd23add2798ae73802442f52486e95c3df7111c"), trie.Hash(); exp != root { - t.Fatalf("1: got %x, exp %x", root, exp) + if exp, root := common.HexToHash("8c6a85a4d9fda98feff88450299e574e5378e32391f75a055d470ac0653f1005"), trie.Hash(); exp != root { + t.Errorf("1: got %x, exp %x", root, exp) } trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001338"), accounts[4]) - if exp, root := common.HexToHash("cb5fb1213826dad9e604f095f8ceb5258fe6b5c01805ce6ef019a50699d2d479"), trie.Hash(); exp != root { - t.Fatalf("2: got %x, exp %x", root, exp) + if exp, root := common.HexToHash("ec63b967e98a5720e7f720482151963982890d82c9093c0d486b7eb8883a66b1"), trie.Hash(); exp != root { + t.Errorf("2: got %x, exp %x", root, exp) } trie.Update(common.Hex2Bytes("0000000000000000000000000000000000000000000000000000000000001339"), accounts[4]) - if exp, root := common.HexToHash("ed7e06b4010057d8703e7b9a160a6d42cf4021f9020da3c8891030349a646987"), trie.Hash(); exp != root { - t.Fatalf("3: got %x, exp %x", root, exp) + if exp, root := common.HexToHash("0608c1d1dc3905fa22204c7a0e43644831c3b6d3def0f274be623a948197e64a"), trie.Hash(); exp != root { + t.Errorf("3: got %x, exp %x", root, exp) } - checktr, _ := New(common.Hash{}, trie.db) it := NewIterator(trie.NodeIterator(nil)) for it.Next() { @@ -630,7 +629,7 @@ func TestCommitAfterHash(t *testing.T) { trie.Hash() trie.Commit(nil) root := trie.Hash() - exp := common.HexToHash("e5e9c29bb50446a4081e6d1d748d2892c6101c1e883a1f77cf21d4094b697822") + exp := common.HexToHash("72f9d3f3fe1e1dd7b8936442e7642aef76371472d94319900790053c493f3fe6") if exp != root { t.Errorf("got %x, exp %x", root, exp) } @@ -646,19 +645,27 @@ func makeAccounts(size int) (addresses [][20]byte, accounts [][]byte) { // Create a realistic account trie to hash addresses = make([][20]byte, size) for i := 0; i < len(addresses); i++ { - for j := 0; j < len(addresses[i]); j++ { - addresses[i][j] = byte(random.Intn(256)) - } + data := make([]byte, 20) + random.Read(data) + copy(addresses[i][:], data) } accounts = make([][]byte, len(addresses)) for i := 0; i < len(accounts); i++ { var ( - nonce = uint64(random.Int63()) - balance = new(big.Int).Rand(random, new(big.Int).Exp(common.Big2, common.Big256, nil)) - root = emptyRoot - code = crypto.Keccak256(nil) + nonce = uint64(random.Int63()) + root = emptyRoot + code = crypto.Keccak256(nil) ) - accounts[i], _ = rlp.EncodeToBytes(&account{nonce, balance, root, code}) + // The big.Rand function is not deterministic with regards to 64 vs 32 bit systems, + // and will consume different amount of data from the rand source. + //balance = new(big.Int).Rand(random, new(big.Int).Exp(common.Big2, common.Big256, nil)) + // Therefore, we instead just read via byte buffer + numBytes := random.Uint32() % 33 // [0, 32] bytes + balanceBytes := make([]byte, numBytes) + random.Read(balanceBytes) + balance := new(big.Int).SetBytes(balanceBytes) + data, _ := rlp.EncodeToBytes(&account{nonce, balance, root, code}) + accounts[i] = data } return addresses, accounts } @@ -714,12 +721,12 @@ func TestCommitSequence(t *testing.T) { expWriteSeqHash []byte expCallbackSeqHash []byte }{ - {20, common.FromHex("68c495e45209e243eb7e4f4e8ca8f9f7be71003bd9cafb8061b4534373740193"), - common.FromHex("01783213033d6b7781a641ab499e680d959336d025ac16f44d02f4f0c021bbf5")}, - {200, common.FromHex("3b20d16c13c4bc3eb3b8d0ad7a169fef3b1600e056c0665895d03d3d2b2ff236"), - common.FromHex("fb8db0ec82e8f02729f11228940885b181c3047ab0d654ed0110291ca57111a8")}, - {2000, common.FromHex("34eff3d1048bebdf77e9ae8bd939f2e7c742edc3dcd1173cff1aad9dbd20451a"), - common.FromHex("1c981604b1a9f8ffa40e0ae66b14830a87f5a4ed8345146a3912e6b2dcb05e63")}, + {20, common.FromHex("873c78df73d60e59d4a2bcf3716e8bfe14554549fea2fc147cb54129382a8066"), + common.FromHex("ff00f91ac05df53b82d7f178d77ada54fd0dca64526f537034a5dbe41b17df2a")}, + {200, common.FromHex("ba03d891bb15408c940eea5ee3d54d419595102648d02774a0268d892add9c8e"), + common.FromHex("f3cd509064c8d319bbdd1c68f511850a902ad275e6ed5bea11547e23d492a926")}, + {2000, common.FromHex("f7a184f20df01c94f09537401d11e68d97ad0c00115233107f51b9c287ce60c7"), + common.FromHex("ff795ea898ba1e4cfed4a33b4cf5535a347a02cf931f88d88719faf810f9a1c9")}, } { addresses, accounts := makeAccounts(tc.count) // This spongeDb is used to check the sequence of disk-db-writes @@ -740,10 +747,10 @@ func TestCommitSequence(t *testing.T) { callbackSponge.Write(c[:]) }) if got, exp := s.sponge.Sum(nil), tc.expWriteSeqHash; !bytes.Equal(got, exp) { - t.Fatalf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp) + t.Errorf("test %d, disk write sequence wrong:\ngot %x exp %x\n", i, got, exp) } if got, exp := callbackSponge.Sum(nil), tc.expCallbackSeqHash; !bytes.Equal(got, exp) { - t.Fatalf("test %d, call back sequence wrong:\ngot: %x exp %x\n", i, got, exp) + t.Errorf("test %d, call back sequence wrong:\ngot: %x exp %x\n", i, got, exp) } } }