diff --git a/.circleci/codecov.sh b/.circleci/codecov.sh new file mode 100644 index 00000000000..1ef332b1b31 --- /dev/null +++ b/.circleci/codecov.sh @@ -0,0 +1,1550 @@ +#!/usr/bin/env bash + +# Apache License Version 2.0, January 2004 +# https://github.com/codecov/codecov-bash/blob/master/LICENSE + + +set -e +o pipefail + +VERSION="0b37652" + +url="https://codecov.io" +env="$CODECOV_ENV" +service="" +token="" +search_in="" +flags="" +exit_with=0 +curlargs="" +curlawsargs="" +dump="0" +clean="0" +curl_s="-s" +name="$CODECOV_NAME" +include_cov="" +exclude_cov="" +ddp="$(echo ~)/Library/Developer/Xcode/DerivedData" +xp="" +files="" +cacert="$CODECOV_CA_BUNDLE" +gcov_ignore="-not -path './bower_components/**' -not -path './node_modules/**' -not -path './vendor/**'" +gcov_include="" + +ft_gcov="1" +ft_coveragepy="1" +ft_fix="1" +ft_search="1" +ft_s3="1" +ft_network="1" +ft_xcodellvm="1" +ft_xcodeplist="0" + +_git_root=$(git rev-parse --show-toplevel 2>/dev/null || hg root 2>/dev/null || echo $PWD) +git_root="$_git_root" +codecov_yml="" +remote_addr="" +if [ "$git_root" = "$PWD" ]; +then + git_root="." +fi + +url_o="" +pr_o="" +build_o="" +commit_o="" +search_in_o="" +tag_o="" +branch_o="" +slug_o="" +prefix_o="" + +commit="$VCS_COMMIT_ID" +branch="$VCS_BRANCH_NAME" +pr="$VCS_PULL_REQUEST" +slug="$VCS_SLUG" +tag="$VCS_TAG" +build_url="$CI_BUILD_URL" +build="$CI_BUILD_ID" +job="$CI_JOB_ID" + +beta_xcode_partials="" + +proj_root="$git_root" +gcov_exe="gcov" +gcov_arg="" + +b="\033[0;36m" +g="\033[0;32m" +r="\033[0;31m" +e="\033[0;90m" +x="\033[0m" + +show_help() { +cat << EOF + + Codecov Bash $VERSION + + Global report uploading tool for Codecov + Documentation at https://docs.codecov.io/docs + Contribute at https://github.com/codecov/codecov-bash + + + -h Display this help and exit + -f FILE Target file(s) to upload + + -f "path/to/file" only upload this file + skips searching unless provided patterns below + + -f '!*.bar' ignore all files at pattern *.bar + -f '*.foo' include all files at pattern *.foo + Must use single quotes. + This is non-exclusive, use -s "*.foo" to match specific paths. + + -s DIR Directory to search for coverage reports. + Already searches project root and artifact folders. + -t TOKEN Set the private repository token + (option) set environment variable CODECOV_TOKEN=:uuid + + -t @/path/to/token_file + -t uuid + + -n NAME Custom defined name of the upload. Visible in Codecov UI + + -e ENV Specify environment variables to be included with this build + Also accepting environment variables: CODECOV_ENV=VAR,VAR2 + + -e VAR,VAR2 + + -X feature Toggle functionalities + + -X gcov Disable gcov + -X coveragepy Disable python coverage + -X fix Disable report fixing + -X search Disable searching for reports + -X xcode Disable xcode processing + -X network Disable uploading the file network + + -R root dir Used when not in git/hg project to identify project root directory + -y conf file Used to specify the location of the .codecov.yml config file + -F flag Flag the upload to group coverage metrics + + -F unittests This upload is only unittests + -F integration This upload is only integration tests + -F ui,chrome This upload is Chrome - UI tests + + -c Move discovered coverage reports to the trash + -Z Exit with 1 if not successful. Default will Exit with 0 + + -- xcode -- + -D Custom Derived Data Path for Coverage.profdata and gcov processing + Default '~/Library/Developer/Xcode/DerivedData' + -J Specify packages to build coverage. + This can significantly reduces time to build coverage reports. + + -J 'MyAppName' Will match "MyAppName" and "MyAppNameTests" + -J '^ExampleApp$' Will match only "ExampleApp" not "ExampleAppTests" + + -- gcov -- + -g GLOB Paths to ignore during gcov gathering + -G GLOB Paths to include during gcov gathering + -p dir Project root directory + Also used when preparing gcov + -k prefix Prefix filepaths to help resolve path fixing: https://github.com/codecov/support/issues/472 + -x gcovexe gcov executable to run. Defaults to 'gcov' + -a gcovargs extra arguments to pass to gcov + + -- Override CI Environment Variables -- + These variables are automatically detected by popular CI providers + + -B branch Specify the branch name + -C sha Specify the commit sha + -P pr Specify the pull request number + -b build Specify the build number + -T tag Specify the git tag + + -- Enterprise -- + -u URL Set the target url for Enterprise customers + Not required when retrieving the bash uploader from your CCE + (option) Set environment variable CODECOV_URL=https://my-hosted-codecov.com + -r SLUG owner/repo slug used instead of the private repo token in Enterprise + (option) set environment variable CODECOV_SLUG=:owner/:repo + (option) set in your codecov.yml "codecov.slug" + -S PATH File path to your cacert.pem file used to verify ssl with Codecov Enterprise (optional) + (option) Set environment variable: CODECOV_CA_BUNDLE="/path/to/ca.pem" + -U curlargs Extra curl arguments to communicate with Codecov. e.g., -U "--proxy http://http-proxy" + -A curlargs Extra curl arguments to communicate with AWS. + + -- Debugging -- + -d Don't upload, but dump upload file to stdout + -K Remove color from the output + -v Verbose mode + +EOF +} + + +say() { + echo -e "$1" +} + + +urlencode() { + echo "$1" | curl -Gso /dev/null -w %{url_effective} --data-urlencode @- "" | cut -c 3- | sed -e 's/%0A//' +} + + +swiftcov() { + _dir=$(dirname "$1" | sed 's/\(Build\).*/\1/g') + for _type in app framework xctest + do + find "$_dir" -name "*.$_type" | while read f + do + _proj=${f##*/} + _proj=${_proj%."$_type"} + if [ "$2" = "" ] || [ "$(echo "$_proj" | grep -i "$2")" != "" ]; + then + say " $g+$x Building reports for $_proj $_type" + dest=$([ -f "$f/$_proj" ] && echo "$f/$_proj" || echo "$f/Contents/MacOS/$_proj") + _proj_name=$(echo "$_proj" | sed -e 's/[[:space:]]//g') + xcrun llvm-cov show $beta_xcode_partials -instr-profile "$1" "$dest" > "$_proj_name.$_type.coverage.txt" \ + || say " ${r}x>${x} llvm-cov failed to produce results for $dest" + fi + done + done +} + + +# Credits to: https://gist.github.com/pkuczynski/8665367 +parse_yaml() { + local prefix=$2 + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | + awk -F$fs '{ + indent = length($1)/2; + vname[indent] = $2; + for (i in vname) {if (i > indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; if (indent > 0) {vn=(vn)(vname[0])("_")} + printf("%s%s%s=\"%s\"\n", "'$prefix'",vn, $2, $3); + } + }' +} + + +if [ $# != 0 ]; +then + while getopts "a:A:b:B:cC:dD:e:f:F:g:G:hJ:k:Kn:p:P:r:R:y:s:S:t:T:u:U:vx:X:Z" o + do + case "$o" in + "a") + gcov_arg=$OPTARG + ;; + "A") + curlawsargs="$OPTARG" + ;; + "b") + build_o="$OPTARG" + ;; + "B") + branch_o="$OPTARG" + ;; + "c") + clean="1" + ;; + "C") + commit_o="$OPTARG" + ;; + "d") + dump="1" + ;; + "D") + ddp="$OPTARG" + ;; + "e") + env="$env,$OPTARG" + ;; + "f") + if [ "${OPTARG::1}" = "!" ]; + then + exclude_cov="$exclude_cov -not -path '${OPTARG:1}'" + + elif [[ "$OPTARG" = *"*"* ]]; + then + include_cov="$include_cov -or -name '$OPTARG'" + + else + ft_search=0 + if [ "$files" = "" ]; + then + files="$OPTARG" + else + files="$files +$OPTARG" + fi + fi + ;; + "F") + if [ "$flags" = "" ]; + then + flags="$OPTARG" + else + flags="$flags,$OPTARG" + fi + ;; + "g") + gcov_ignore="$gcov_ignore -not -path '$OPTARG'" + ;; + "G") + gcov_include="$gcov_include -path '$OPTARG'" + ;; + "h") + show_help + exit 0; + ;; + "J") + ft_xcodellvm="1" + ft_xcodeplist="0" + if [ "$xp" = "" ]; + then + xp="$OPTARG" + else + xp="$xp\|$OPTARG" + fi + ;; + "k") + prefix_o=$(echo "$OPTARG" | sed -e 's:^/*::' -e 's:/*$::') + ;; + "K") + b="" + g="" + r="" + e="" + x="" + ;; + "n") + name="$OPTARG" + ;; + "p") + proj_root="$OPTARG" + ;; + "P") + pr_o="$OPTARG" + ;; + "r") + slug_o="$OPTARG" + ;; + "R") + git_root="$OPTARG" + ;; + "s") + if [ "$search_in_o" = "" ]; + then + search_in_o="$OPTARG" + else + search_in_o="$search_in_o $OPTARG" + fi + ;; + "S") + cacert="--cacert \"$OPTARG\"" + ;; + "t") + if [ "${OPTARG::1}" = "@" ]; + then + token=$(cat "${OPTARG:1}" | tr -d ' \n') + else + token="$OPTARG" + fi + ;; + "T") + tag_o="$OPTARG" + ;; + "u") + url_o=$(echo "$OPTARG" | sed -e 's/\/$//') + ;; + "U") + curlargs="$OPTARG" + ;; + "v") + set -x + curl_s="" + ;; + "x") + gcov_exe=$OPTARG + ;; + "X") + if [ "$OPTARG" = "gcov" ]; + then + ft_gcov="0" + elif [ "$OPTARG" = "coveragepy" ] || [ "$OPTARG" = "py" ]; + then + ft_coveragepy="0" + elif [ "$OPTARG" = "xcodellvm" ]; + then + ft_xcodellvm="1" + ft_xcodeplist="0" + elif [ "$OPTARG" = "fix" ] || [ "$OPTARG" = "fixes" ]; + then + ft_fix="0" + elif [ "$OPTARG" = "xcode" ]; + then + ft_xcodellvm="0" + ft_xcodeplist="0" + elif [ "$OPTARG" = "search" ]; + then + ft_search="0" + elif [ "$OPTARG" = "xcodepartials" ]; + then + beta_xcode_partials="-use-color" + elif [ "$OPTARG" = "network" ]; + then + ft_network="0" + elif [ "$OPTARG" = "s3" ]; + then + ft_s3="0" + fi + ;; + "y") + codecov_yml="$OPTARG" + ;; + "Z") + exit_with=1 + ;; + esac + done +fi + +say " + _____ _ + / ____| | | +| | ___ __| | ___ ___ _____ __ +| | / _ \\ / _\` |/ _ \\/ __/ _ \\ \\ / / +| |___| (_) | (_| | __/ (_| (_) \\ V / + \\_____\\___/ \\__,_|\\___|\\___\\___/ \\_/ + Bash-$VERSION + +" + +search_in="$proj_root" + +if [ "$JENKINS_URL" != "" ]; +then + say "$e==>$x Jenkins CI detected." + # https://wiki.jenkins-ci.org/display/JENKINS/Building+a+software+project + # https://wiki.jenkins-ci.org/display/JENKINS/GitHub+pull+request+builder+plugin#GitHubpullrequestbuilderplugin-EnvironmentVariables + service="jenkins" + + if [ "$ghprbSourceBranch" != "" ]; + then + branch="$ghprbSourceBranch" + elif [ "$GIT_BRANCH" != "" ]; + then + branch="$GIT_BRANCH" + elif [ "$BRANCH_NAME" != "" ]; + then + branch="$BRANCH_NAME" + fi + + if [ "$ghprbActualCommit" != "" ]; + then + commit="$ghprbActualCommit" + elif [ "$GIT_COMMIT" != "" ]; + then + commit="$GIT_COMMIT" + fi + + if [ "$ghprbPullId" != "" ]; + then + pr="$ghprbPullId" + elif [ "$CHANGE_ID" != "" ]; + then + pr="$CHANGE_ID" + fi + + build="$BUILD_NUMBER" + build_url=$(urlencode "$BUILD_URL") + +elif [ "$CI" = "true" ] && [ "$TRAVIS" = "true" ] && [ "$SHIPPABLE" != "true" ]; +then + say "$e==>$x Travis CI detected." + # https://docs.travis-ci.com/user/environment-variables/ + service="travis" + commit="${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT}" + build="$TRAVIS_JOB_NUMBER" + pr="$TRAVIS_PULL_REQUEST" + job="$TRAVIS_JOB_ID" + slug="$TRAVIS_REPO_SLUG" + env="$env,TRAVIS_OS_NAME" + tag="$TRAVIS_TAG" + if [ "$TRAVIS_BRANCH" != "$TRAVIS_TAG" ]; + then + branch="$TRAVIS_BRANCH" + fi + + language=$(printenv | grep "TRAVIS_.*_VERSION" | head -1) + if [ "$language" != "" ]; + then + env="$env,${language%=*}" + fi + +elif [ "$DOCKER_REPO" != "" ]; +then + say "$e==>$x Docker detected." + # https://docs.docker.com/docker-cloud/builds/advanced/ + service="docker" + branch="$SOURCE_BRANCH" + commit="$SOURCE_COMMIT" + slug="$DOCKER_REPO" + tag="$CACHE_TAG" + env="$env,IMAGE_NAME" + +elif [ "$CI" = "true" ] && [ "$CI_NAME" = "codeship" ]; +then + say "$e==>$x Codeship CI detected." + # https://www.codeship.io/documentation/continuous-integration/set-environment-variables/ + service="codeship" + branch="$CI_BRANCH" + build="$CI_BUILD_NUMBER" + build_url=$(urlencode "$CI_BUILD_URL") + commit="$CI_COMMIT_ID" + +elif [ ! -z "$CF_BUILD_URL" ] && [ ! -z "$CF_BUILD_ID" ]; +then + say "$e==>$x Codefresh CI detected." + # https://docs.codefresh.io/v1.0/docs/variables + service="codefresh" + branch="$CF_BRANCH" + build="$CF_BUILD_ID" + build_url=$(urlencode "$CF_BUILD_URL") + commit="$CF_REVISION" + +elif [ "$TEAMCITY_VERSION" != "" ]; +then + say "$e==>$x TeamCity CI detected." + # https://confluence.jetbrains.com/display/TCD8/Predefined+Build+Parameters + # https://confluence.jetbrains.com/plugins/servlet/mobile#content/view/74847298 + if [ "$TEAMCITY_BUILD_BRANCH" = '' ]; + then + echo " Teamcity does not automatically make build parameters available as environment variables." + echo " Add the following environment parameters to the build configuration" + echo " env.TEAMCITY_BUILD_BRANCH = %teamcity.build.branch%" + echo " env.TEAMCITY_BUILD_ID = %teamcity.build.id%" + echo " env.TEAMCITY_BUILD_URL = %teamcity.serverUrl%/viewLog.html?buildId=%teamcity.build.id%" + echo " env.TEAMCITY_BUILD_COMMIT = %system.build.vcs.number%" + echo " env.TEAMCITY_BUILD_REPOSITORY = %vcsroot..url%" + fi + service="teamcity" + branch="$TEAMCITY_BUILD_BRANCH" + build="$TEAMCITY_BUILD_ID" + build_url=$(urlencode "$TEAMCITY_BUILD_URL") + if [ "$TEAMCITY_BUILD_COMMIT" != "" ]; + then + commit="$TEAMCITY_BUILD_COMMIT" + else + commit="$BUILD_VCS_NUMBER" + fi + remote_addr="$TEAMCITY_BUILD_REPOSITORY" + +elif [ "$CI" = "true" ] && [ "$CIRCLECI" = "true" ]; +then + say "$e==>$x Circle CI detected." + # https://circleci.com/docs/environment-variables + service="circleci" + branch="$CIRCLE_BRANCH" + build="$CIRCLE_BUILD_NUM" + job="$CIRCLE_NODE_INDEX" + if [ "$CIRCLE_PROJECT_REPONAME" != "" ]; + then + slug="$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME" + else + # git@github.com:owner/repo.git + slug="${CIRCLE_REPOSITORY_URL##*:}" + # owner/repo.git + slug="${slug%%.git}" + fi + pr="$CIRCLE_PR_NUMBER" + commit="$CIRCLE_SHA1" + search_in="$search_in $CIRCLE_ARTIFACTS $CIRCLE_TEST_REPORTS" + +elif [ "$BUDDYBUILD_BRANCH" != "" ]; +then + say "$e==>$x buddybuild detected" + # http://docs.buddybuild.com/v6/docs/custom-prebuild-and-postbuild-steps + service="buddybuild" + branch="$BUDDYBUILD_BRANCH" + build="$BUDDYBUILD_BUILD_NUMBER" + build_url="https://dashboard.buddybuild.com/public/apps/$BUDDYBUILD_APP_ID/build/$BUDDYBUILD_BUILD_ID" + # BUDDYBUILD_TRIGGERED_BY + if [ "$ddp" = "$(echo ~)/Library/Developer/Xcode/DerivedData" ]; + then + ddp="/private/tmp/sandbox/${BUDDYBUILD_APP_ID}/bbtest" + fi + +elif [ "${bamboo_planRepository_revision}" != "" ]; +then + say "$e==>$x Bamboo detected" + # https://confluence.atlassian.com/bamboo/bamboo-variables-289277087.html#Bamboovariables-Build-specificvariables + service="bamboo" + commit="${bamboo_planRepository_revision}" + branch="${bamboo_planRepository_branch}" + build="${bamboo_buildNumber}" + build_url="${bamboo_buildResultsUrl}" + remote_addr="${bamboo_planRepository_repositoryUrl}" + +elif [ "$CI" = "true" ] && [ "$BITRISE_IO" = "true" ]; +then + # http://devcenter.bitrise.io/faq/available-environment-variables/ + say "$e==>$x Bitrise CI detected." + service="bitrise" + branch="$BITRISE_GIT_BRANCH" + build="$BITRISE_BUILD_NUMBER" + build_url=$(urlencode "$BITRISE_BUILD_URL") + pr="$BITRISE_PULL_REQUEST" + if [ "$GIT_CLONE_COMMIT_HASH" != "" ]; + then + commit="$GIT_CLONE_COMMIT_HASH" + fi + +elif [ "$CI" = "true" ] && [ "$SEMAPHORE" = "true" ]; +then + say "$e==>$x Semaphore CI detected." + # https://semaphoreapp.com/docs/available-environment-variables.html + service="semaphore" + branch="$BRANCH_NAME" + build="$SEMAPHORE_BUILD_NUMBER" + job="$SEMAPHORE_CURRENT_THREAD" + pr="$PULL_REQUEST_NUMBER" + slug="$SEMAPHORE_REPO_SLUG" + commit="$REVISION" + env="$env,SEMAPHORE_TRIGGER_SOURCE" + +elif [ "$CI" = "true" ] && [ "$BUILDKITE" = "true" ]; +then + say "$e==>$x Buildkite CI detected." + # https://buildkite.com/docs/guides/environment-variables + service="buildkite" + branch="$BUILDKITE_BRANCH" + build="$BUILDKITE_BUILD_NUMBER" + job="$BUILDKITE_JOB_ID" + build_url=$(urlencode "$BUILDKITE_BUILD_URL") + slug="$BUILDKITE_PROJECT_SLUG" + commit="$BUILDKITE_COMMIT" + if [[ "$BUILDKITE_PULL_REQUEST" != "false" ]]; then + pr="$BUILDKITE_PULL_REQUEST" + fi + tag="$BUILDKITE_TAG" + +elif [ "$CI" = "drone" ] || [ "$DRONE" = "true" ]; +then + say "$e==>$x Drone CI detected." + # http://docs.drone.io/env.html + # drone commits are not full shas + service="drone.io" + branch="$DRONE_BRANCH" + build="$DRONE_BUILD_NUMBER" + build_url=$(urlencode "${DRONE_BUILD_LINK}") + pr="$DRONE_PULL_REQUEST" + job="$DRONE_JOB_NUMBER" + tag="$DRONE_TAG" + +elif [ "$HEROKU_TEST_RUN_BRANCH" != "" ]; +then + say "$e==>$x Heroku CI detected." + # https://devcenter.heroku.com/articles/heroku-ci#environment-variables + service="heroku" + branch="$HEROKU_TEST_RUN_BRANCH" + build="$HEROKU_TEST_RUN_ID" + +elif [ "$CI" = "True" ] && [ "$APPVEYOR" = "True" ]; +then + say "$e==>$x Appveyor CI detected." + # http://www.appveyor.com/docs/environment-variables + service="appveyor" + branch="$APPVEYOR_REPO_BRANCH" + build=$(urlencode "$APPVEYOR_JOB_ID") + pr="$APPVEYOR_PULL_REQUEST_NUMBER" + job="$APPVEYOR_ACCOUNT_NAME%2F$APPVEYOR_PROJECT_SLUG%2F$APPVEYOR_BUILD_VERSION" + slug="$APPVEYOR_REPO_NAME" + commit="$APPVEYOR_REPO_COMMIT" + +elif [ "$CI" = "true" ] && [ "$WERCKER_GIT_BRANCH" != "" ]; +then + say "$e==>$x Wercker CI detected." + # http://devcenter.wercker.com/articles/steps/variables.html + service="wercker" + branch="$WERCKER_GIT_BRANCH" + build="$WERCKER_MAIN_PIPELINE_STARTED" + slug="$WERCKER_GIT_OWNER/$WERCKER_GIT_REPOSITORY" + commit="$WERCKER_GIT_COMMIT" + +elif [ "$CI" = "true" ] && [ "$MAGNUM" = "true" ]; +then + say "$e==>$x Magnum CI detected." + # https://magnum-ci.com/docs/environment + service="magnum" + branch="$CI_BRANCH" + build="$CI_BUILD_NUMBER" + commit="$CI_COMMIT" + +elif [ "$SHIPPABLE" = "true" ]; +then + say "$e==>$x Shippable CI detected." + # http://docs.shippable.com/ci_configure/ + service="shippable" + branch=$([ "$HEAD_BRANCH" != "" ] && echo "$HEAD_BRANCH" || echo "$BRANCH") + build="$BUILD_NUMBER" + build_url=$(urlencode "$BUILD_URL") + pr="$PULL_REQUEST" + slug="$REPO_FULL_NAME" + commit="$COMMIT" + +elif [ "$TDDIUM" = "true" ]; +then + say "Solano CI detected." + # http://docs.solanolabs.com/Setup/tddium-set-environment-variables/ + service="solano" + commit="$TDDIUM_CURRENT_COMMIT" + branch="$TDDIUM_CURRENT_BRANCH" + build="$TDDIUM_TID" + pr="$TDDIUM_PR_ID" + +elif [ "$GREENHOUSE" = "true" ]; +then + say "$e==>$x Greenhouse CI detected." + # http://docs.greenhouseci.com/docs/environment-variables-files + service="greenhouse" + branch="$GREENHOUSE_BRANCH" + build="$GREENHOUSE_BUILD_NUMBER" + build_url=$(urlencode "$GREENHOUSE_BUILD_URL") + pr="$GREENHOUSE_PULL_REQUEST" + commit="$GREENHOUSE_COMMIT" + search_in="$search_in $GREENHOUSE_EXPORT_DIR" + +elif [ "$GITLAB_CI" != "" ]; +then + say "$e==>$x GitLab CI detected." + # http://doc.gitlab.com/ce/ci/variables/README.html + service="gitlab" + branch="${CI_BUILD_REF_NAME:-$CI_COMMIT_REF_NAME}" + build="${CI_BUILD_ID:-$CI_JOB_ID}" + remote_addr="${CI_BUILD_REPO:-$CI_REPOSITORY_URL}" + commit="${CI_BUILD_REF:-$CI_COMMIT_SHA}" + +else + say "${r}x>${x} No CI provider detected." + say " Testing inside Docker? ${b}http://docs.codecov.io/docs/testing-with-docker${x}" + say " Testing with Tox? ${b}https://docs.codecov.io/docs/python#section-testing-with-tox${x}" + +fi + +say " ${e}project root:${x} $git_root" + +# find branch, commit, repo from git command +if [ "$GIT_BRANCH" != "" ]; +then + branch="$GIT_BRANCH" + +elif [ "$branch" = "" ]; +then + branch=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || hg branch 2>/dev/null || echo "") + if [ "$branch" = "HEAD" ]; + then + branch="" + fi +fi + +if [ "$commit_o" = "" ]; +then + # merge commit -> actual commit + mc= + if [ -n "$pr" ] && [ "$pr" != false ]; + then + mc=$(git show --no-patch --format="%P" 2>/dev/null || echo "") + fi + if [[ "$mc" =~ ^[a-z0-9]{40}[[:space:]][a-z0-9]{40}$ ]]; + then + say " Fixing merge commit SHA" + commit=$(echo "$mc" | cut -d' ' -f2) + elif [ "$GIT_COMMIT" != "" ]; + then + commit="$GIT_COMMIT" + elif [ "$commit" = "" ]; + then + commit=$(git log -1 --format="%H" 2>/dev/null || hg id -i --debug 2>/dev/null | tr -d '+' || echo "") + fi +else + commit="$commit_o" +fi + +if [ "$CODECOV_TOKEN" != "" ] && [ "$token" = "" ]; +then + say "${e}-->${x} token set from env" + token="$CODECOV_TOKEN" +fi + +if [ "$CODECOV_URL" != "" ] && [ "$url_o" = "" ]; +then + say "${e}-->${x} url set from env" + url_o=$(echo "$CODECOV_URL" | sed -e 's/\/$//') +fi + +if [ "$CODECOV_SLUG" != "" ]; +then + say "${e}-->${x} slug set from env" + slug_o="$CODECOV_SLUG" + +elif [ "$slug" = "" ]; +then + if [ "$remote_addr" = "" ]; + then + remote_addr=$(git config --get remote.origin.url || hg paths default || echo '') + fi + if [ "$remote_addr" != "" ]; + then + if echo "$remote_addr" | grep -q "//"; then + # https + slug=$(echo "$remote_addr" | cut -d / -f 4,5 | sed -e 's/\.git$//') + else + # ssh + slug=$(echo "$remote_addr" | cut -d : -f 2 | sed -e 's/\.git$//') + fi + fi + if [ "$slug" = "/" ]; + then + slug="" + fi +fi + +yaml=$(test -n "$codecov_yml" && echo "$codecov_yml" \ + || cd "$git_root" && \ + git ls-files "*codecov.yml" "*codecov.yaml" 2>/dev/null \ + || hg locate "*codecov.yml" "*codecov.yaml" 2>/dev/null \ + || cd $proj_root && find . -type f -name '*codecov.y*ml' -depth 1 2>/dev/null \ + || echo '') +yaml=$(echo "$yaml" | head -1) + +if [ "$yaml" != "" ]; +then + say " ${e}Yaml found at:${x} $yaml" + config=$(parse_yaml "$git_root/$yaml" || echo '') + + # TODO validate the yaml here + + if [ "$(echo "$config" | grep 'codecov_token="')" != "" ] && [ "$token" = "" ]; + then + say "${e}-->${x} token set from yaml" + token="$(echo "$config" | grep 'codecov_token="' | sed -e 's/codecov_token="//' | sed -e 's/"\.*//')" + fi + + if [ "$(echo "$config" | grep 'codecov_url="')" != "" ] && [ "$url_o" = "" ]; + then + say "${e}-->${x} url set from yaml" + url_o="$(echo "$config" | grep 'codecov_url="' | sed -e 's/codecov_url="//' | sed -e 's/"\.*//')" + fi + + if [ "$(echo "$config" | grep 'codecov_slug="')" != "" ] && [ "$slug_o" = "" ]; + then + say "${e}-->${x} slug set from yaml" + slug_o="$(echo "$config" | grep 'codecov_slug="' | sed -e 's/codecov_slug="//' | sed -e 's/"\.*//')" + fi +else + say " ${g}Yaml not found, that's ok! Learn more at${x} ${b}http://docs.codecov.io/docs/codecov-yaml${x}" + +fi + +if [ "$branch_o" != "" ]; +then + branch=$(urlencode "$branch_o") +else + branch=$(urlencode "$branch") +fi + +query="branch=$branch\ + &commit=$commit\ + &build=$([ "$build_o" = "" ] && echo "$build" || echo "$build_o")\ + &build_url=$build_url\ + &name=$(urlencode "$name")\ + &tag=$([ "$tag_o" = "" ] && echo "$tag" || echo "$tag_o")\ + &slug=$([ "$slug_o" = "" ] && urlencode "$slug" || urlencode "$slug_o")\ + &service=$service\ + &flags=$flags\ + &pr=$([ "$pr_o" = "" ] && echo "${pr##\#}" || echo "${pr_o##\#}")\ + &job=$job" + +if [ "$ft_search" = "1" ]; +then + # detect bower comoponents location + bower_components="bower_components" + bower_rc=$(cd "$git_root" && cat .bowerrc 2>/dev/null || echo "") + if [ "$bower_rc" != "" ]; + then + bower_components=$(echo "$bower_rc" | tr -d '\n' | grep '"directory"' | cut -d'"' -f4 | sed -e 's/\/$//') + if [ "$bower_components" = "" ]; + then + bower_components="bower_components" + fi + fi + + # Swift Coverage + if [ "$ft_xcodellvm" = "1" ] && [ -d "$ddp" ]; + then + say "${e}==>${x} Processing Xcode reports via llvm-cov" + say " DerivedData folder: $ddp" + profdata_files=$(find "$ddp" -name '*.profdata' 2>/dev/null || echo '') + if [ "$profdata_files" != "" ]; + then + # xcode via profdata + if [ "$xp" = "" ]; + then + # xp=$(xcodebuild -showBuildSettings 2>/dev/null | grep -i "^\s*PRODUCT_NAME" | sed -e 's/.*= \(.*\)/\1/') + # say " ${e}->${x} Speed up Xcode processing by adding ${e}-J '$xp'${x}" + say " ${g}hint${x} Speed up Swift processing by using use ${g}-J 'AppName'${x} (regexp accepted)" + say " ${g}hint${x} This will remove Pods/ from your report. Also ${b}https://docs.codecov.io/docs/ignoring-paths${x}" + fi + while read -r profdata; + do + if [ "$profdata" != "" ]; + then + swiftcov "$profdata" "$xp" + fi + done <<< "$profdata_files" + else + say " ${e}->${x} No Swift coverage found" + fi + + # Obj-C Gcov Coverage + if [ "$ft_gcov" = "1" ]; + then + say " ${e}->${x} Running $gcov_exe for Obj-C" + bash -c "find $ddp -type f -name '*.gcda' $gcov_include $gcov_ignore -exec $gcov_exe -p $gcov_arg {} +" || true + fi + fi + + if [ "$ft_xcodeplist" = "1" ] && [ -d "$ddp" ]; + then + say "${e}==>${x} Processing Xcode plists" + plists_files=$(find "$ddp" -name '*.xccoverage' 2>/dev/null || echo '') + if [ "$plists_files" != "" ]; + then + while read -r plist; + do + if [ "$plist" != "" ]; + then + say " ${g}Found${x} plist file at $plist" + plutil -convert xml1 -o "$(basename "$plist").plist" -- $plist + fi + done <<< "$plists_files" + fi + fi + + # Gcov Coverage + if [ "$ft_gcov" = "1" ]; + then + say "${e}==>${x} Running gcov in $proj_root ${e}(disable via -X gcov)${x}" + bash -c "find $proj_root -type f -name '*.gcno' $gcov_include $gcov_ignore -exec $gcov_exe -pb $gcov_arg {} +" || true + else + say "${e}==>${x} gcov disabled" + fi + + # Python Coverage + if [ "$ft_coveragepy" = "1" ]; + then + if [ ! -f coverage.xml ]; + then + if which coverage >/dev/null 2>&1; + then + say "${e}==>${x} Python coveragepy exists ${e}disable via -X coveragepy${x}" + + dotcoverage=$(find "$git_root" -name '.coverage' -or -name '.coverage.*' | head -1 || echo '') + if [ "$dotcoverage" != "" ]; + then + cd "$(dirname "$dotcoverage")" + if [ ! -f .coverage ]; + then + say " ${e}->${x} Running coverage combine" + coverage combine -a + fi + say " ${e}->${x} Running coverage xml" + if [ "$(coverage xml -i)" != "No data to report." ]; + then + files="$files +$PWD/coverage.xml" + else + say " ${r}No data to report.${x}" + fi + cd "$proj_root" + else + say " ${r}No .coverage file found.${x}" + fi + else + say "${e}==>${x} Python coveragepy not found" + fi + fi + else + say "${e}==>${x} Python coveragepy disabled" + fi + + if [ "$search_in_o" != "" ]; + then + # location override + search_in="$search_in_o" + fi + + say "$e==>$x Searching for coverage reports in:" + for _path in $search_in + do + say " ${g}+${x} $_path" + done + + patterns="find $search_in \( \ + -name vendor \ + -or -name htmlcov \ + -or -name virtualenv \ + -or -name js/generated/coverage \ + -or -name .virtualenv \ + -or -name virtualenvs \ + -or -name .virtualenvs \ + -or -name .env \ + -or -name .envs \ + -or -name env \ + -or -name .yarn-cache \ + -or -name envs \ + -or -name .venv \ + -or -name .venvs \ + -or -name venv \ + -or -name venvs \ + -or -name .git \ + -or -name .hg \ + -or -name .tox \ + -or -name __pycache__ \ + -or -name '.egg-info*' \ + -or -name '$bower_components' \ + -or -name node_modules \ + -or -name 'conftest_*.c.gcov' \ + \) -prune -or \ + -type f \( -name '*coverage*.*' \ + -or -name 'nosetests.xml' \ + -or -name 'jacoco*.xml' \ + -or -name 'clover.xml' \ + -or -name 'report.xml' \ + -or -name '*.codecov.*' \ + -or -name 'codecov.*' \ + -or -name 'cobertura.xml' \ + -or -name 'excoveralls.json' \ + -or -name 'luacov.report.out' \ + -or -name 'coverage-final.json' \ + -or -name 'naxsi.info' \ + -or -name 'lcov.info' \ + -or -name 'lcov.dat' \ + -or -name '*.lcov' \ + -or -name '*.clover' \ + -or -name 'cover.out' \ + -or -name 'gcov.info' \ + -or -name '*.gcov' \ + -or -name '*.lst' \ + $include_cov \) \ + $exclude_cov \ + -not -name '*.profdata' \ + -not -name 'coverage-summary.json' \ + -not -name 'phpunit-code-coverage.xml' \ + -not -name '*/classycle/report.xml' \ + -not -name 'remapInstanbul.coverage*.json' \ + -not -name 'phpunit-coverage.xml' \ + -not -name '*codecov.yml' \ + -not -name '*.serialized' \ + -not -name '.coverage*' \ + -not -name '.*coveragerc' \ + -not -name '*.sh' \ + -not -name '*.bat' \ + -not -name '*.ps1' \ + -not -name '*.env' \ + -not -name '*.cmake' \ + -not -name '*.dox' \ + -not -name '*.ec' \ + -not -name '*.rst' \ + -not -name '*.h' \ + -not -name '*.scss' \ + -not -name '*.o' \ + -not -name '*.proto' \ + -not -name '*.sbt' \ + -not -name '*.xcoverage.*' \ + -not -name '*.gz' \ + -not -name '*.conf' \ + -not -name '*.p12' \ + -not -name '*.csv' \ + -not -name '*.rsp' \ + -not -name '*.m4' \ + -not -name '*.pem' \ + -not -name '*~' \ + -not -name '*.exe' \ + -not -name '*.am' \ + -not -name '*.template' \ + -not -name '*.cp' \ + -not -name '*.bw' \ + -not -name '*.crt' \ + -not -name '*.log' \ + -not -name '*.cmake' \ + -not -name '*.pth' \ + -not -name '*.in' \ + -not -name '*.jar*' \ + -not -name '*.pom*' \ + -not -name '*.png' \ + -not -name '*.jpg' \ + -not -name '*.sql' \ + -not -name '*.jpeg' \ + -not -name '*.svg' \ + -not -name '*.gif' \ + -not -name '*.csv' \ + -not -name '*.snapshot' \ + -not -name '*.mak*' \ + -not -name '*.bash' \ + -not -name '*.data' \ + -not -name '*.py' \ + -not -name '*.class' \ + -not -name '*.xcconfig' \ + -not -name '*.ec' \ + -not -name '*.coverage' \ + -not -name '*.pyc' \ + -not -name '*.cfg' \ + -not -name '*.egg' \ + -not -name '*.ru' \ + -not -name '*.css' \ + -not -name '*.less' \ + -not -name '*.pyo' \ + -not -name '*.whl' \ + -not -name '*.html' \ + -not -name '*.ftl' \ + -not -name '*.erb' \ + -not -name '*.rb' \ + -not -name '*.js' \ + -not -name '*.jade' \ + -not -name '*.db' \ + -not -name '*.md' \ + -not -name '*.cpp' \ + -not -name '*.gradle' \ + -not -name '*.tar.tz' \ + -not -name '*.scss' \ + -not -name 'include.lst' \ + -not -name 'fullLocaleNames.lst' \ + -not -name 'inputFiles.lst' \ + -not -name 'createdFiles.lst' \ + -not -name 'scoverage.measurements.*' \ + -not -name 'test_*_coverage.txt' \ + -not -name 'testrunner-coverage*' \ + -print 2>/dev/null" + files=$(eval "$patterns" || echo '') + +elif [ "$include_cov" != "" ]; +then + files=$(eval "find $search_in -type f \( ${include_cov:5} \)$exclude_cov 2>/dev/null" || echo '') +fi + +num_of_files=$(echo "$files" | wc -l | tr -d ' ') +if [ "$num_of_files" != '' ] && [ "$files" != '' ]; +then + say " ${e}->${x} Found $num_of_files reports" +fi + +# no files found +if [ "$files" = "" ]; +then + say "${r}-->${x} No coverage report found." + say " Please visit ${b}http://docs.codecov.io/docs/supported-languages${x}" + exit ${exit_with}; +fi + +if [ "$ft_network" == "1" ]; +then + say "${e}==>${x} Detecting git/mercurial file structure" + network=$(cd "$git_root" && git ls-files 2>/dev/null || hg locate 2>/dev/null || echo "") + if [ "$network" = "" ]; + then + network=$(find "$git_root" \( \ + -name virtualenv \ + -name .virtualenv \ + -name virtualenvs \ + -name .virtualenvs \ + -name '*.png' \ + -name '*.gif' \ + -name '*.jpg' \ + -name '*.jpeg' \ + -name '*.md' \ + -name .env \ + -name .envs \ + -name env \ + -name envs \ + -name .venv \ + -name .venvs \ + -name venv \ + -name venvs \ + -name .git \ + -name .egg-info \ + -name shunit2-2.1.6 \ + -name vendor \ + -name __pycache__ \ + -name node_modules \ + -path '*/$bower_components/*' \ + -path '*/target/delombok/*' \ + -path '*/build/lib/*' \ + -path '*/js/generated/coverage/*' \ + \) -prune -or \ + -type f -print 2>/dev/null || echo '') + fi + + if [ "$prefix_o" != "" ]; + then + network=$(echo "$network" | awk "{print \"$prefix_o/\"\$0}") + fi +fi + +upload_file=`mktemp /tmp/codecov.XXXXXX` +adjustments_file=`mktemp /tmp/codecov.adjustments.XXXXXX` + +cleanup() { + rm -f $upload_file $adjustments_file $upload_file.gz +} + +trap cleanup INT ABRT TERM + +if [ "$env" != "" ]; +then + inc_env="" + say "${e}==>${x} Appending build variables" + for varname in $(echo "$env" | tr ',' ' ') + do + if [ "$varname" != "" ]; + then + say " ${g}+${x} $varname" + inc_env="${inc_env}${varname}=$(eval echo "\$${varname}") +" + fi + done + +echo "$inc_env<<<<<< ENV" >> $upload_file +fi + +# Append git file list +# write discovered yaml location +echo "$yaml" >> $upload_file +if [ "$ft_network" == "1" ]; +then + i="woff|eot|otf" # fonts + i="$i|gif|png|jpg|jpeg|psd" # images + i="$i|ptt|pptx|numbers|pages|md|txt|xlsx|docx|doc|pdf|html|csv" # docs + i="$i|yml|yaml|.gitignore" # supporting docs + echo "$network" | grep -vwE "($i)$" >> $upload_file +fi +echo "<<<<<< network" >> $upload_file + +fr=0 +say "${e}==>${x} Reading reports" +while IFS='' read -r file; +do + # read the coverage file + if [ "$(echo "$file" | tr -d ' ')" != '' ]; + then + if [ -f "$file" ]; + then + report_len=$(wc -c < "$file") + if [ "$report_len" -ne 0 ]; + then + say " ${g}+${x} $file ${e}bytes=$(echo "$report_len" | tr -d ' ')${x}" + # append to to upload + _filename=$(basename "$file") + if [ "${_filename##*.}" = 'gcov' ]; + then + echo "# path=$(echo "$file.reduced" | sed "s|^$git_root/||")" >> $upload_file + # get file name + head -1 $file >> $upload_file + # 1. remove source code + # 2. remove ending bracket lines + # 3. remove whitespace + # 4. remove contextual lines + # 5. remove function names + awk -F': *' '{print $1":"$2":"}' $file \ + | sed '\/: *} *$/d' \ + | sed 's/^ *//' \ + | sed '/^-/d' \ + | sed 's/^function.*/func/' >> $upload_file + else + echo "# path=$(echo "$file" | sed "s|^$git_root/||")" >> $upload_file + cat "$file" >> $upload_file + fi + echo "<<<<<< EOF" >> $upload_file + fr=1 + if [ "$clean" = "1" ]; + then + rm "$file" + fi + else + say " ${r}-${x} Skipping empty file $file" + fi + else + say " ${r}-${x} file not found at $file" + fi + fi +done <<< "$(echo -e "$files")" + +if [ "$fr" = "0" ]; +then + say "${r}-->${x} No coverage data found." + say " Please visit ${b}http://docs.codecov.io/docs/supported-languages${x}" + say " search for your projects language to learn how to collect reports." + exit ${exit_with}; +fi + +if [ "$ft_fix" = "1" ]; +then + say "${e}==>${x} Appending adjustments" + say " ${b}http://docs.codecov.io/docs/fixing-reports${x}" + + empty_line='^[[:space:]]*$' + # // + syntax_comment='^[[:space:]]*//.*' + # /* or */ + syntax_comment_block='^[[:space:]]*(\/\*|\*\/)[[:space:]]*$' + # { or } + syntax_bracket='^[[:space:]]*[\{\}][[:space:]]*(//.*)?$' + # [ or ] + syntax_list='^[[:space:]]*[][][[:space:]]*(//.*)?$' + + skip_dirs="-not -path '*/$bower_components/*' \ + -not -path '*/node_modules/*'" + + cut_and_join() { + awk 'BEGIN { FS=":" } + $3 ~ /\/\*/ || $3 ~ /\*\// { print $0 ; next } + $1!=key { if (key!="") print out ; key=$1 ; out=$1":"$2 ; next } + { out=out","$2 } + END { print out }' 2>/dev/null + } + + if echo "$network" | grep -m1 '.kt$' 1>/dev/null; + then + # skip brackets and comments + find "$git_root" -type f \ + -name '*.kt' \ + -exec \ + grep -nIHE -e $syntax_bracket \ + -e $syntax_comment_block {} \; \ + | cut_and_join \ + >> $adjustments_file \ + || echo '' + + # last line in file + find "$git_root" -type f \ + -name '*.kt' -exec \ + wc -l {} \; \ + | while read l; do echo "EOF: $l"; done \ + 2>/dev/null \ + >> $adjustments_file \ + || echo '' + + fi + + if echo "$network" | grep -m1 '.go$' 1>/dev/null; + then + # skip empty lines, comments, and brackets + find "$git_root" -not -path '*/vendor/*' \ + -type f \ + -name '*.go' \ + -exec \ + grep -nIHE \ + -e $empty_line \ + -e $syntax_comment \ + -e $syntax_comment_block \ + -e $syntax_bracket \ + {} \; \ + | cut_and_join \ + >> $adjustments_file \ + || echo '' + fi + + if echo "$network" | grep -m1 '.dart$' 1>/dev/null; + then + # skip brackets + find "$git_root" -type f \ + -name '*.dart' \ + -exec \ + grep -nIHE \ + -e $syntax_bracket \ + {} \; \ + | cut_and_join \ + >> $adjustments_file \ + || echo '' + fi + + if echo "$network" | grep -m1 '.php$' 1>/dev/null; + then + # skip empty lines, comments, and brackets + find "$git_root" -not -path "*/vendor/*" \ + -type f \ + -name '*.php' \ + -exec \ + grep -nIHE \ + -e $syntax_list \ + -e $syntax_bracket \ + -e '^[[:space:]]*\);[[:space:]]*(//.*)?$' \ + {} \; \ + | cut_and_join \ + >> $adjustments_file \ + || echo '' + fi + + if echo "$network" | grep -m1 '\(.cpp\|.h\|.cxx\|.c\|.hpp\|.m\)$' 1>/dev/null; + then + # skip brackets + find "$git_root" -type f \ + $skip_dirs \ + \( \ + -name '*.h' \ + -or -name '*.cpp' \ + -or -name '*.cxx' \ + -or -name '*.m' \ + -or -name '*.c' \ + -or -name '*.hpp' \ + \) -exec \ + grep -nIHE \ + -e $empty_line \ + -e $syntax_bracket \ + -e '// LCOV_EXCL' \ + {} \; \ + | cut_and_join \ + >> $adjustments_file \ + || echo '' + + # skip brackets + find "$git_root" -type f \ + $skip_dirs \ + \( \ + -name '*.h' \ + -or -name '*.cpp' \ + -or -name '*.cxx' \ + -or -name '*.m' \ + -or -name '*.c' \ + -or -name '*.hpp' \ + \) -exec \ + grep -nIH '// LCOV_EXCL' \ + {} \; \ + >> $adjustments_file \ + || echo '' + + fi + + found=$(cat $adjustments_file | tr -d ' ') + + if [ "$found" != "" ]; + then + say " ${g}+${x} Found adjustments" + echo "# path=fixes" >> $upload_file + cat $adjustments_file >> $upload_file + echo "<<<<<< EOF" >> $upload_file + rm -rf $adjustments_file + else + say " ${e}->${x} No adjustments found" + fi +fi + +if [ "$url_o" != "" ]; +then + url="$url_o" +fi + +if [ "$dump" != "0" ]; +then + # trim whitespace from query + say " ${e}->${x} Dumping upload file (no upload)" + echo "$url/upload/v4?$(echo "package=bash-$VERSION&token=$token&$query" | tr -d ' ')" + cat $upload_file +else + + say "${e}==>${x} Gzipping contents" + gzip -nf9 $upload_file + + query=$(echo "${query}" | tr -d ' ') + say "${e}==>${x} Uploading reports" + say " ${e}url:${x} $url" + say " ${e}query:${x} $query" + + # now add token to query + query=$(echo "package=bash-$VERSION&token=$token&$query" | tr -d ' ') + + if [ "$ft_s3" = "1" ]; + then + i="0" + while [ $i -lt 4 ] + do + i=$[$i+1] + say " ${e}->${x} Pinging Codecov" + res=$(curl $curl_s -X POST $curlargs $cacert \ + -H 'X-Reduced-Redundancy: false' \ + -H 'X-Content-Type: application/x-gzip' \ + "$url/upload/v4?$query" || true) + # a good replay is "https://codecov.io" + "\n" + "https://codecov.s3.amazonaws.com/..." + status=$(echo "$res" | head -1 | grep 'HTTP ' | cut -d' ' -f2) + if [ "$status" = "" ]; + then + s3target=$(echo "$res" | sed -n 2p) + say " ${e}->${x} Uploading" + s3=$(curl $curl_s -fiX PUT $curlawsargs \ + --data-binary @$upload_file.gz \ + -H 'Content-Type: application/x-gzip' \ + -H 'Content-Encoding: gzip' \ + -H 'x-amz-acl: public-read' \ + "$s3target" || true) + if [ "$s3" != "" ]; + then + say " ${g}->${x} View reports at ${b}$(echo "$res" | sed -n 1p)${x}" + exit 0 + else + say " ${r}X>${x} Failed to upload" + fi + elif [ "$status" = "400" ]; + then + # 400 Error + say "${g}${res}${x}" + exit ${exit_with} + fi + say " ${e}->${x} Sleeping for 30s and trying again..." + sleep 30 + done + fi + + say " ${e}->${x} Uploading to Codecov" + i="0" + while [ $i -lt 4 ] + do + i=$[$i+1] + + res=$(curl $curl_s -X POST $curlargs $cacert \ + --data-binary @$upload_file.gz \ + -H 'Content-Type: text/plain' \ + -H 'Content-Encoding: gzip' \ + -H 'X-Content-Encoding: gzip' \ + -H 'Accept: text/plain' \ + "$url/upload/v2?$query" || echo 'HTTP 500') + # HTTP 200 + # http://.... + status=$(echo "$res" | head -1 | cut -d' ' -f2) + if [ "$status" = "" ]; + then + say " View reports at ${b}$(echo "$res" | head -2 | tail -1)${x}" + exit 0 + + elif [ "${status:0:1}" = "5" ]; + then + say " ${e}->${x} Sleeping for 30s and trying again..." + sleep 30 + + else + say " ${g}${res}${x}" + exit 0 + exit ${exit_with} + fi + + done + + say " ${r}X> Failed to upload coverage reports${x}" +fi + +exit ${exit_with} diff --git a/.circleci/config.yml b/.circleci/config.yml index 8d321e9e690..55a3da4f9ee 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -41,10 +41,10 @@ jobs: key: v3-pkg-cache paths: - /go/pkg - - save_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - paths: - - /go/src/github.com/tendermint/tendermint + # - save_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # paths: + # - /go/src/github.com/tendermint/tendermint build_slate: <<: *defaults @@ -53,8 +53,23 @@ jobs: at: /tmp/workspace - restore_cache: key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # https://discuss.circleci.com/t/saving-cache-stopped-working-warning-skipping-this-step-disabled-in-configuration/24423/2 + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: name: slate docs command: | @@ -69,8 +84,22 @@ jobs: at: /tmp/workspace - restore_cache: key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: name: metalinter command: | @@ -91,8 +120,22 @@ jobs: at: /tmp/workspace - restore_cache: key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: name: Run abci apps tests command: | @@ -108,8 +151,22 @@ jobs: at: /tmp/workspace - restore_cache: key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: name: Run abci-cli tests command: | @@ -123,8 +180,22 @@ jobs: at: /tmp/workspace - restore_cache: key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils - run: name: Run tests @@ -138,8 +209,22 @@ jobs: at: /tmp/workspace - restore_cache: key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: mkdir -p /tmp/logs - run: name: Run tests @@ -163,12 +248,48 @@ jobs: at: /tmp/workspace - restore_cache: key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: name: Run tests command: bash test/persist/test_failure_indices.sh + localnet: + working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint + machine: + image: circleci/classic:latest + environment: + GOBIN: /home/circleci/.go_workspace/bin + GOPATH: /home/circleci/.go_workspace/ + GOOS: linux + GOARCH: amd64 + parallelism: 1 + steps: + - checkout + - run: + name: run localnet and exit on failure + command: | + set -x + make get_tools + make get_vendor_deps + make build-linux + make localnet-start & + ./scripts/localnet-blocks-test.sh 40 5 10 localhost + test_p2p: environment: GOBIN: /home/circleci/.go_workspace/bin @@ -180,14 +301,30 @@ jobs: - run: mkdir -p $GOPATH/src/github.com/tendermint - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint - run: bash test/p2p/circleci.sh + - store_artifacts: + path: /home/circleci/project/test/p2p/logs upload_coverage: <<: *defaults steps: - attach_workspace: at: /tmp/workspace - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: name: gather command: | @@ -199,7 +336,7 @@ jobs: done - run: name: upload - command: bash <(curl -s https://codecov.io/bash) -f coverage.txt + command: bash .circleci/codecov.sh -f coverage.txt workflows: version: 2 @@ -224,6 +361,9 @@ workflows: - test_persistence: requires: - setup_dependencies + - localnet: + requires: + - setup_dependencies - test_p2p - upload_coverage: requires: diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 9586a870287..9d2fc15be57 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -4,4 +4,4 @@ * @ebuchman @melekes @xla # Precious documentation -/docs/ @zramsay @jolesbi +/docs/ @zramsay diff --git a/.gitignore b/.gitignore index 4f8481603bc..1cf9cdb9030 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,7 @@ test/p2p/data/ test/logs coverage.txt docs/_build +docs/dist *.log abci-cli docs/node_modules/ @@ -25,12 +26,19 @@ scripts/cutWALUntil/cutWALUntil .idea/ *.iml +.vscode/ + libs/pubsub/query/fuzz_test/output shunit2 +.tendermint-lite +addrbook.json + */vendor */.glide .terraform terraform.tfstate terraform.tfstate.backup terraform.tfstate.d + +.vscode diff --git a/CHANGELOG.md b/CHANGELOG.md index 364af117cc8..eb22e8b8452 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,339 @@ # Changelog +## v0.26.1 + +*November 11, 2018* + +Special thanks to external contributors on this release: @katakonst + +Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). + +### IMPROVEMENTS: + +- [consensus] [\#2704](https://github.com/tendermint/tendermint/issues/2704) Simplify valid POL round logic +- [docs] [\#2749](https://github.com/tendermint/tendermint/issues/2749) Deduplicate some ABCI docs +- [mempool] More detailed log messages + - [\#2724](https://github.com/tendermint/tendermint/issues/2724) + - [\#2762](https://github.com/tendermint/tendermint/issues/2762) + +### BUG FIXES: + +- [autofile] [\#2703](https://github.com/tendermint/tendermint/issues/2703) Do not panic when checking Head size +- [crypto/merkle] [\#2756](https://github.com/tendermint/tendermint/issues/2756) Fix crypto/merkle ProofOperators.Verify to check bounds on keypath parts. +- [mempool] fix a bug where we create a WAL despite `wal_dir` being empty +- [p2p] [\#2771](https://github.com/tendermint/tendermint/issues/2771) Fix `peer-id` label name to `peer_id` in prometheus metrics +- [p2p] [\#2797](https://github.com/tendermint/tendermint/pull/2797) Fix IDs in peer NodeInfo and require them for addresses + in AddressBook +- [p2p] [\#2797](https://github.com/tendermint/tendermint/pull/2797) Do not close conn immediately after sending pex addrs in seed mode. Partial fix for [\#2092](https://github.com/tendermint/tendermint/issues/2092). + +## v0.26.0 + +*November 2, 2018* + +Special thanks to external contributors on this release: +@bradyjoestar, @connorwstein, @goolAdapter, @HaoyangLiu, +@james-ray, @overbool, @phymbert, @Slamper, @Uzair1995, @yutianwu. + +Special thanks to @Slamper for a series of bug reports in our [bug bounty +program](https://hackerone.com/tendermint) which are fixed in this release. + +This release is primarily about adding Version fields to various data structures, +optimizing consensus messages for signing and verification in +restricted environments (like HSMs and the Ethereum Virtual Machine), and +aligning the consensus code with the [specification](https://arxiv.org/abs/1807.04938). +It also includes our first take at a generalized merkle proof system, and +changes the length of hashes used for hashing data structures from 20 to 32 +bytes. + +See the [UPGRADING.md](UPGRADING.md#v0.26.0) for details on upgrading to the new +version. + +Please note that we are still making breaking changes to the protocols. +While the new Version fields should help us to keep the software backwards compatible +even while upgrading the protocols, we cannot guarantee that new releases will +be compatible with old chains just yet. We expect there will be another breaking +release or two before the Cosmos Hub launch, but we will otherwise be paying +increasing attention to backwards compatibility. Thanks for bearing with us! + +### BREAKING CHANGES: + +* CLI/RPC/Config + * [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) Timeouts are now strings like "3s" and "100ms", not ints + * [config] [\#2505](https://github.com/tendermint/tendermint/issues/2505) Remove Mempool.RecheckEmpty (it was effectively useless anyways) + * [config] [\#2490](https://github.com/tendermint/tendermint/issues/2490) `mempool.wal` is disabled by default + * [privval] [\#2459](https://github.com/tendermint/tendermint/issues/2459) Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer) + * [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version field to State, breaking the format of State as + encoded on disk. + * [rpc] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `/abci_query` takes `prove` argument instead of `trusted` and switches the default + behaviour to `prove=false` + * [rpc] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Remove all `node_info.other.*_version` fields in `/status` and + `/net_info` + * [rpc] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Remove + `_params` suffix from fields in `consensus_params`. + +* Apps + * [abci] [\#2298](https://github.com/tendermint/tendermint/issues/2298) ResponseQuery.Proof is now a structured merkle.Proof, not just + arbitrary bytes + * [abci] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version to Header and shift all fields by one + * [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Bump the field numbers for some `ResponseInfo` fields to make room for + `AppVersion` + * [abci] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Updates to ConsensusParams + * Remove `Params` suffix from field names + * Add `Params` suffix to message types + * Add new field and type, `Validator ValidatorParams`, to control what types of validator keys are allowed. + +* Go API + * [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) Timeouts are time.Duration, not ints + * [crypto/merkle & lite] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Various changes to accomodate General Merkle trees + * [crypto/merkle] [\#2595](https://github.com/tendermint/tendermint/issues/2595) Remove all Hasher objects in favor of byte slices + * [crypto/merkle] [\#2635](https://github.com/tendermint/tendermint/issues/2635) merkle.SimpleHashFromTwoHashes is no longer exported + * [node] [\#2479](https://github.com/tendermint/tendermint/issues/2479) Remove node.RunForever + * [rpc/client] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `ABCIQueryOptions.Trusted` -> `ABCIQueryOptions.Prove` + * [types] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Remove `Index` and `Total` fields from `TxProof`. + * [types] [\#2598](https://github.com/tendermint/tendermint/issues/2598) + `VoteTypeXxx` are now of type `SignedMsgType byte` and named `XxxType`, eg. + `PrevoteType`, `PrecommitType`. + * [types] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Rename fields in ConsensusParams to remove `Params` suffixes + * [types] [\#2735](https://github.com/tendermint/tendermint/issues/2735) Simplify Proposal message to align with spec + +* Blockchain Protocol + * [crypto/tmhash] [\#2732](https://github.com/tendermint/tendermint/issues/2732) TMHASH is now full 32-byte SHA256 + * All hashes in the block header and Merkle trees are now 32-bytes + * PubKey Addresses are still only 20-bytes + * [state] [\#2587](https://github.com/tendermint/tendermint/issues/2587) Require block.Time of the fist block to be genesis time + * [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Require block.Version to match state.Version + * [types] Update SignBytes for `Vote`/`Proposal`/`Heartbeat`: + * [\#2459](https://github.com/tendermint/tendermint/issues/2459) Use amino encoding instead of JSON in `SignBytes`. + * [\#2598](https://github.com/tendermint/tendermint/issues/2598) Reorder fields and use fixed sized encoding. + * [\#2598](https://github.com/tendermint/tendermint/issues/2598) Change `Type` field from `string` to `byte` and use new + `SignedMsgType` to enumerate. + * [types] [\#2730](https://github.com/tendermint/tendermint/issues/2730) Use + same order for fields in `Vote` as in the SignBytes + * [types] [\#2732](https://github.com/tendermint/tendermint/issues/2732) Remove the address field from the validator hash + * [types] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version struct to Header + * [types] [\#2609](https://github.com/tendermint/tendermint/issues/2609) ConsensusParams.Hash() is the hash of the amino encoded + struct instead of the Merkle tree of the fields + * [types] [\#2670](https://github.com/tendermint/tendermint/issues/2670) Header.Hash() builds Merkle tree out of fields in the same + order they appear in the header, instead of sorting by field name + * [types] [\#2682](https://github.com/tendermint/tendermint/issues/2682) Use proto3 `varint` encoding for ints that are usually unsigned (instead of zigzag encoding). + * [types] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Add Validator field to ConsensusParams + (Used to control which pubkey types validators can use, by abci type). + +* P2P Protocol + * [consensus] [\#2652](https://github.com/tendermint/tendermint/issues/2652) + Replace `CommitStepMessage` with `NewValidBlockMessage` + * [consensus] [\#2735](https://github.com/tendermint/tendermint/issues/2735) Simplify `Proposal` message to align with spec + * [consensus] [\#2730](https://github.com/tendermint/tendermint/issues/2730) + Add `Type` field to `Proposal` and use same order of fields as in the + SignBytes for both `Proposal` and `Vote` + * [p2p] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Add `ProtocolVersion` struct with protocol versions to top of + DefaultNodeInfo and require `ProtocolVersion.Block` to match during peer handshake + + +### FEATURES: +- [abci] [\#2557](https://github.com/tendermint/tendermint/issues/2557) Add `Codespace` field to `Response{CheckTx, DeliverTx, Query}` +- [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Add `BlockVersion` and `P2PVersion` to `RequestInfo` +- [crypto/merkle] [\#2298](https://github.com/tendermint/tendermint/issues/2298) General Merkle Proof scheme for chaining various types of Merkle trees together + +### IMPROVEMENTS: +- Additional Metrics + - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) + - [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) +- [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) Added ValidateBasic method, which performs basic checks +- [crypto/ed25519] [\#2558](https://github.com/tendermint/tendermint/issues/2558) Switch to use latest `golang.org/x/crypto` through our fork at + github.com/tendermint/crypto +- [libs/log] [\#2707](https://github.com/tendermint/tendermint/issues/2707) Add year to log format (@yutianwu) +- [tools] [\#2238](https://github.com/tendermint/tendermint/issues/2238) Binary dependencies are now locked to a specific git commit + +### BUG FIXES: +- [\#2711](https://github.com/tendermint/tendermint/issues/2711) Validate all incoming reactor messages. Fixes various bugs due to negative ints. +- [autofile] [\#2428](https://github.com/tendermint/tendermint/issues/2428) Group.RotateFile need call Flush() before rename (@goolAdapter) +- [common] [\#2533](https://github.com/tendermint/tendermint/issues/2533) Fixed a bug in the `BitArray.Or` method +- [common] [\#2506](https://github.com/tendermint/tendermint/issues/2506) Fixed a bug in the `BitArray.Sub` method (@james-ray) +- [common] [\#2534](https://github.com/tendermint/tendermint/issues/2534) Fix `BitArray.PickRandom` to choose uniformly from true bits +- [consensus] [\#1690](https://github.com/tendermint/tendermint/issues/1690) Wait for + timeoutPrecommit before starting next round +- [consensus] [\#1745](https://github.com/tendermint/tendermint/issues/1745) Wait for + Proposal or timeoutProposal before entering prevote +- [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Only propose ValidBlock, not LockedBlock +- [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Initialized ValidRound and LockedRound to -1 +- [consensus] [\#1637](https://github.com/tendermint/tendermint/issues/1637) Limit the amount of evidence that can be included in a + block +- [consensus] [\#2652](https://github.com/tendermint/tendermint/issues/2652) Ensure valid block property with faulty proposer +- [evidence] [\#2515](https://github.com/tendermint/tendermint/issues/2515) Fix db iter leak (@goolAdapter) +- [libs/event] [\#2518](https://github.com/tendermint/tendermint/issues/2518) Fix event concurrency flaw (@goolAdapter) +- [node] [\#2434](https://github.com/tendermint/tendermint/issues/2434) Make node respond to signal interrupts while sleeping for genesis time +- [state] [\#2616](https://github.com/tendermint/tendermint/issues/2616) Pass nil to NewValidatorSet() when genesis file's Validators field is nil +- [p2p] [\#2555](https://github.com/tendermint/tendermint/issues/2555) Fix p2p switch FlushThrottle value (@goolAdapter) +- [p2p] [\#2668](https://github.com/tendermint/tendermint/issues/2668) Reconnect to originally dialed address (not self-reported address) for persistent peers + +## v0.25.0 + +*September 22, 2018* + +Special thanks to external contributors on this release: +@scriptionist, @bradyjoestar, @WALL-E + +This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas. +It also addresses some issues found via security audit, removes various unused +functions from `libs/common`, and implements +[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md). + +Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). + +BREAKING CHANGES: + +* CLI/RPC/Config + * [rpc] [\#2391](https://github.com/tendermint/tendermint/issues/2391) /status `result.node_info.other` became a map + * [types] [\#2364](https://github.com/tendermint/tendermint/issues/2364) Remove `TxSize` and `BlockGossip` from `ConsensusParams` + * Maximum tx size is now set implicitly via the `BlockSize.MaxBytes` + * The size of block parts in the consensus is now fixed to 64kB + +* Apps + * [mempool] [\#2360](https://github.com/tendermint/tendermint/issues/2360) Mempool tracks the `ResponseCheckTx.GasWanted` and + `ConsensusParams.BlockSize.MaxGas` and enforces: + - `GasWanted <= MaxGas` for every tx + - `(sum of GasWanted in block) <= MaxGas` for block proposal + +* Go API + * [libs/common] [\#2431](https://github.com/tendermint/tendermint/issues/2431) Remove Word256 due to lack of use + * [libs/common] [\#2452](https://github.com/tendermint/tendermint/issues/2452) Remove the following functions due to lack of use: + * byteslice.go: cmn.IsZeros, cmn.RightPadBytes, cmn.LeftPadBytes, cmn.PrefixEndBytes + * strings.go: cmn.IsHex, cmn.StripHex + * int.go: Uint64Slice, all put/get int64 methods + +FEATURES: +- [rpc] [\#2415](https://github.com/tendermint/tendermint/issues/2415) New `/consensus_params?height=X` endpoint to query the consensus + params at any height (@scriptonist) +- [types] [\#1714](https://github.com/tendermint/tendermint/issues/1714) Add Address to GenesisValidator +- [metrics] [\#2337](https://github.com/tendermint/tendermint/issues/2337) `consensus.block_interval_metrics` is now gauge, not histogram (you will be able to see spikes, if any) +- [libs] [\#2286](https://github.com/tendermint/tendermint/issues/2286) Panic if `autofile` or `db/fsdb` permissions change from 0600. + +IMPROVEMENTS: +- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialised (@bradyjoestar) +- [mempool] [\#2399](https://github.com/tendermint/tendermint/issues/2399) Make mempool cache a proper LRU (@bradyjoestar) +- [p2p] [\#2126](https://github.com/tendermint/tendermint/issues/2126) Introduce PeerTransport interface to improve isolation of concerns +- [libs/common] [\#2326](https://github.com/tendermint/tendermint/issues/2326) Service returns ErrNotStarted + +BUG FIXES: +- [node] [\#2294](https://github.com/tendermint/tendermint/issues/2294) Delay starting node until Genesis time +- [consensus] [\#2048](https://github.com/tendermint/tendermint/issues/2048) Correct peer statistics for marking peer as good +- [rpc] [\#2460](https://github.com/tendermint/tendermint/issues/2460) StartHTTPAndTLSServer() now passes StartTLS() errors back to the caller rather than hanging forever. +- [p2p] [\#2047](https://github.com/tendermint/tendermint/issues/2047) Accept new connections asynchronously +- [tm-bench] [\#2410](https://github.com/tendermint/tendermint/issues/2410) Enforce minimum transaction size (@WALL-E) + +## 0.24.0 + +*September 6th, 2018* + +Special thanks to external contributors with PRs included in this release: ackratos, james-ray, bradyjoestar, +peerlink, Ahmah2009, bluele, b00f. + +This release includes breaking upgrades in the block header, +including the long awaited changes for delaying validator set updates by one +block to better support light clients. +It also fixes enforcement on the maximum size of blocks, and includes a BFT +timestamp in each block that can be safely used by applications. +There are also some minor breaking changes to the rpc, config, and ABCI. + +See the [UPGRADING.md](UPGRADING.md#v0.24.0) for details on upgrading to the new +version. + +From here on, breaking changes will be broken down to better reflect how users +are affected by a change. + +A few more breaking changes are in the works - each will come with a clear +Architecture Decision Record (ADR) explaining the change. You can review ADRs +[here](https://github.com/tendermint/tendermint/tree/develop/docs/architecture) +or in the [open Pull Requests](https://github.com/tendermint/tendermint/pulls). +You can also check in on the [issues marked as +breaking](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3Abreaking). + +BREAKING CHANGES: + +* CLI/RPC/Config + - [config] [\#2169](https://github.com/tendermint/tendermint/issues/2169) Replace MaxNumPeers with MaxNumInboundPeers and MaxNumOutboundPeers + - [config] [\#2300](https://github.com/tendermint/tendermint/issues/2300) Reduce default mempool size from 100k to 5k, until ABCI rechecking is implemented. + - [rpc] [\#1815](https://github.com/tendermint/tendermint/issues/1815) `/commit` returns a `signed_header` field instead of everything being top-level + +* Apps + - [abci] Added address of the original proposer of the block to Header + - [abci] Change ABCI Header to match Tendermint exactly + - [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see + [ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)): + - Remove PubKey from `Validator` (so it's just Address and Power) + - Introduce `ValidatorUpdate` (with just PubKey and Power) + - InitChain and EndBlock use ValidatorUpdate + - Update field names and types in BeginBlock + - [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block + - updates returned in ResponseEndBlock for block H will be included in RequestBeginBlock for block H+2 + +* Go API + - [lite] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Complete refactor of the package + - [node] [\#2212](https://github.com/tendermint/tendermint/issues/2212) NewNode now accepts a `*p2p.NodeKey` (@bradyjoestar) + - [libs/common] [\#2199](https://github.com/tendermint/tendermint/issues/2199) Remove Fmt, in favor of fmt.Sprintf + - [libs/common] SplitAndTrim was deleted + - [libs/common] [\#2274](https://github.com/tendermint/tendermint/issues/2274) Remove unused Math functions like MaxInt, MaxInt64, + MinInt, MinInt64 (@Ahmah2009) + - [libs/clist] Panics if list extends beyond MaxLength + - [crypto] [\#2205](https://github.com/tendermint/tendermint/issues/2205) Rename AminoRoute variables to no longer be prefixed by signature type. + +* Blockchain Protocol + - [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!) + - Add NextValidatorSet to State, changes on-disk representation of state + - [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See + [ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)). + - Remove ConsensusParams.BlockSize.MaxTxs + - Introduce maximum sizes for all components of a block, including ChainID + - [types] Updates to the block Header: + - [\#1815](https://github.com/tendermint/tendermint/issues/1815) NextValidatorsHash - hash of the validator set for the next block, + so the current validators actually sign over the hash for the new + validators + - [\#2106](https://github.com/tendermint/tendermint/issues/2106) ProposerAddress - address of the block's original proposer + - [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time + - Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit + - [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See + [ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)): + - format changed from DER to `r || s`, both little endian encoded as 32 bytes. + - malleability removed by requiring `s` to be in canonical form. + +* P2P Protocol + - [p2p] [\#2263](https://github.com/tendermint/tendermint/issues/2263) Update secret connection to use a little endian encoded nonce + - [blockchain] [\#2213](https://github.com/tendermint/tendermint/issues/2213) Fix Amino routes for blockchain reactor messages + (@peerlink) + + +FEATURES: +- [types] [\#2015](https://github.com/tendermint/tendermint/issues/2015) Allow genesis file to have 0 validators (@b00f) + - Initial validator set can be determined by the app in ResponseInitChain +- [rpc] [\#2161](https://github.com/tendermint/tendermint/issues/2161) New event `ValidatorSetUpdates` for when the validator set changes +- [crypto/multisig] [\#2164](https://github.com/tendermint/tendermint/issues/2164) Introduce multisig pubkey and signature format +- [libs/db] [\#2293](https://github.com/tendermint/tendermint/issues/2293) Allow passing options through when creating instances of leveldb dbs + +IMPROVEMENTS: +- [docs] Lint documentation with `write-good` and `stop-words`. +- [docs] [\#2249](https://github.com/tendermint/tendermint/issues/2249) Refactor, deduplicate, and improve the ABCI docs and spec (with thanks to @ttmc). +- [scripts] [\#2196](https://github.com/tendermint/tendermint/issues/2196) Added json2wal tool, which is supposed to help our users restore (@bradyjoestar) + corrupted WAL files and compose test WAL files (@bradyjoestar) +- [mempool] [\#2234](https://github.com/tendermint/tendermint/issues/2234) Now stores txs by hash inside of the cache, to mitigate memory leakage +- [mempool] [\#2166](https://github.com/tendermint/tendermint/issues/2166) Set explicit capacity for map when updating txs (@bluele) + +BUG FIXES: +- [config] [\#2284](https://github.com/tendermint/tendermint/issues/2284) Replace `db_path` with `db_dir` from automatically generated configuration files. +- [mempool] [\#2188](https://github.com/tendermint/tendermint/issues/2188) Fix OOM issue from cache map and list getting out of sync +- [state] [\#2051](https://github.com/tendermint/tendermint/issues/2051) KV store index supports searching by `tx.height` (@ackratos) +- [rpc] [\#2327](https://github.com/tendermint/tendermint/issues/2327) `/dial_peers` does not try to dial existing peers +- [node] [\#2323](https://github.com/tendermint/tendermint/issues/2323) Filter empty strings from config lists (@james-ray) +- [abci/client] [\#2236](https://github.com/tendermint/tendermint/issues/2236) Fix closing GRPC connection (@bradyjoestar) + +## 0.23.1 + +*August 22nd, 2018* + +BUG FIXES: +- [libs/autofile] [\#2261](https://github.com/tendermint/tendermint/issues/2261) Fix log rotation so it actually happens. + - Fixes issues with consensus WAL growing unbounded ala [\#2259](https://github.com/tendermint/tendermint/issues/2259) + ## 0.23.0 *August 5th, 2018* @@ -638,7 +972,7 @@ BREAKING CHANGES: - use scripts/wal2json to convert to json for debugging FEATURES: - - new `certifiers` pkg contains the tendermint light-client library (name subject to change)! + - new `Verifiers` pkg contains the tendermint light-client library (name subject to change)! - rpc: `/genesis` includes the `app_options` . - rpc: `/abci_query` takes an additional `height` parameter to support historical queries. - rpc/client: new ABCIQueryWithOptions supports options like `trusted` (set false to get a proof) and `height` to query a historical height. diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md new file mode 100644 index 00000000000..ea3b975994d --- /dev/null +++ b/CHANGELOG_PENDING.md @@ -0,0 +1,28 @@ +# Pending + +## v0.26.2 + +*TBA* + +Special thanks to external contributors on this release: + +Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). + +### BREAKING CHANGES: + +* CLI/RPC/Config + +* Apps + +* Go API + +* Blockchain Protocol + +* P2P Protocol + +### FEATURES: + +### IMPROVEMENTS: + +### BUG FIXES: + diff --git a/Gopkg.lock b/Gopkg.lock index 557e2b181e5..35542bf6279 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -11,11 +11,11 @@ [[projects]] branch = "master" - digest = "1:2c00f064ba355903866cbfbf3f7f4c0fe64af6638cc7d1b8bdcf3181bc67f1d8" + digest = "1:c0decf632843204d2b8781de7b26e7038584e2dcccc7e2f401e88ae85b1df2b7" name = "github.com/btcsuite/btcd" packages = ["btcec"] pruneopts = "UT" - revision = "f5e261fc9ec3437697fb31d8b38453c293204b29" + revision = "67e573d211ace594f1366b4ce9d39726c4b19bd0" [[projects]] digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf" @@ -28,19 +28,12 @@ revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" [[projects]] - digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39" + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" packages = ["spew"] pruneopts = "UT" - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - digest = "1:c7644c73a3d23741fdba8a99b1464e021a224b7e205be497271a8003a15ca41b" - name = "github.com/ebuchman/fail-test" - packages = ["."] - pruneopts = "UT" - revision = "95f809107225be108efcf10a3509e4ea6ceef3c4" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" [[projects]] digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70" @@ -83,12 +76,12 @@ version = "v0.3.0" [[projects]] - digest = "1:c4a2528ccbcabf90f9f3c464a5fc9e302d592861bbfd0b7135a7de8a943d0406" + digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d" name = "github.com/go-stack/stack" packages = ["."] pruneopts = "UT" - revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc" - version = "v1.7.0" + revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a" + version = "v1.8.0" [[projects]] digest = "1:35621fe20f140f05a0c4ef662c26c0ab4ee50bca78aa30fe87d33120bd28165e" @@ -136,8 +129,7 @@ version = "v1.2.0" [[projects]] - branch = "master" - digest = "1:12247a2e99a060cc692f6680e5272c8adf0b8f572e6bce0d7095e624c958a240" + digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8" name = "github.com/hashicorp/hcl" packages = [ ".", @@ -151,7 +143,8 @@ "json/token", ] pruneopts = "UT" - revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" + revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241" + version = "v1.0.0" [[projects]] digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be" @@ -193,12 +186,12 @@ version = "v1.0.1" [[projects]] - branch = "master" - digest = "1:5ab79470a1d0fb19b041a624415612f8236b3c06070161a910562f2b2d064355" + digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318" name = "github.com/mitchellh/mapstructure" packages = ["."] pruneopts = "UT" - revision = "f15292f7a699fcc1a38a80977f80a046874ba8ac" + revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" + version = "v1.1.2" [[projects]] digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e" @@ -244,7 +237,7 @@ [[projects]] branch = "master" - digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" + digest = "1:db712fde5d12d6cdbdf14b777f0c230f4ff5ab0be8e35b239fc319953ed577a4" name = "github.com/prometheus/common" packages = [ "expfmt", @@ -252,11 +245,11 @@ "model", ] pruneopts = "UT" - revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" + revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6" [[projects]] branch = "master" - digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" + digest = "1:ef74914912f99c79434d9c09658274678bc85080ebe3ab32bec3940ebce5e1fc" name = "github.com/prometheus/procfs" packages = [ ".", @@ -265,7 +258,7 @@ "xfs", ] pruneopts = "UT" - revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" + revision = "185b4288413d2a0dd0806f78c90dde719829e5ae" [[projects]] digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c" @@ -275,23 +268,23 @@ revision = "e2704e165165ec55d062f5919b4b29494e9fa790" [[projects]] - digest = "1:bd1ae00087d17c5a748660b8e89e1043e1e5479d0fea743352cda2f8dd8c4f84" + digest = "1:6a4a11ba764a56d2758899ec6f3848d24698d48442ebce85ee7a3f63284526cd" name = "github.com/spf13/afero" packages = [ ".", "mem", ] pruneopts = "UT" - revision = "787d034dfe70e44075ccc060d346146ef53270ad" - version = "v1.1.1" + revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd" + version = "v1.1.2" [[projects]] - digest = "1:516e71bed754268937f57d4ecb190e01958452336fa73dbac880894164e91c1f" + digest = "1:08d65904057412fc0270fc4812a1c90c594186819243160dc779a402d4b6d0bc" name = "github.com/spf13/cast" packages = ["."] pruneopts = "UT" - revision = "8965335b8c7107321228e3e3702cab9832751bac" - version = "v1.2.0" + revision = "8c9545af88b134710ab1cd196795e7f2388358d7" + version = "v1.3.0" [[projects]] digest = "1:7ffc0983035bc7e297da3688d9fe19d60a420e9c38bef23f845c53788ed6a05e" @@ -302,20 +295,20 @@ version = "v0.0.1" [[projects]] - branch = "master" - digest = "1:080e5f630945ad754f4b920e60b4d3095ba0237ebf88dc462eb28002932e3805" + digest = "1:68ea4e23713989dc20b1bded5d9da2c5f9be14ff9885beef481848edd18c26cb" name = "github.com/spf13/jwalterweatherman" packages = ["."] pruneopts = "UT" - revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394" + revision = "4a4406e478ca629068e7768fc33f3f044173c0a6" + version = "v1.0.0" [[projects]] - digest = "1:9424f440bba8f7508b69414634aef3b2b3a877e522d8a4624692412805407bb7" + digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2" name = "github.com/spf13/pflag" packages = ["."] pruneopts = "UT" - revision = "583c0c0531f06d5278b7d917446061adc344b5cd" - version = "v1.0.1" + revision = "298182f68c66c05229eb03ac171abe6e309ee79a" + version = "v1.0.3" [[projects]] digest = "1:f8e1a678a2571e265f4bf91a3e5e32aa6b1474a55cb0ea849750cc177b664d96" @@ -338,7 +331,7 @@ [[projects]] branch = "master" - digest = "1:b3cfb8d82b1601a846417c3f31c03a7961862cb2c98dcf0959c473843e6d9a2b" + digest = "1:59483b8e8183f10ab21a85ba1f4cbb4a2335d48891801f79ed7b9499f44d383c" name = "github.com/syndtr/goleveldb" packages = [ "leveldb", @@ -355,37 +348,33 @@ "leveldb/util", ] pruneopts = "UT" - revision = "c4c61651e9e37fa117f53c5a906d3b63090d8445" + revision = "6b91fda63f2e36186f1c9d0e48578defb69c5d43" [[projects]] - branch = "master" - digest = "1:087aaa7920e5d0bf79586feb57ce01c35c830396ab4392798112e8aae8c47722" - name = "github.com/tendermint/ed25519" - packages = [ - ".", - "edwards25519", - "extra25519", - ] + digest = "1:605b6546f3f43745695298ec2d342d3e952b6d91cdf9f349bea9315f677d759f" + name = "github.com/tendermint/btcd" + packages = ["btcec"] pruneopts = "UT" - revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057" + revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df" [[projects]] - digest = "1:e9113641c839c21d8eaeb2c907c7276af1eddeed988df8322168c56b7e06e0e1" + digest = "1:10b3a599325740c84a7c81f3f3cb2e1fdb70b3ea01b7fa28495567a2519df431" name = "github.com/tendermint/go-amino" packages = ["."] pruneopts = "UT" - revision = "2106ca61d91029c931fd54968c2bb02dc96b1412" - version = "0.10.1" + revision = "6dcc6ddc143e116455c94b25c1004c99e0d0ca12" + version = "v0.14.0" [[projects]] - branch = "master" - digest = "1:c31a37cafc12315b8bd745c8ad6a006ac25350472488162a821e557b3e739d67" + digest = "1:72b71e3a29775e5752ed7a8012052a3dee165e27ec18cedddae5288058f09acf" name = "golang.org/x/crypto" packages = [ "bcrypt", "blowfish", "chacha20poly1305", "curve25519", + "ed25519", + "ed25519/internal/edwards25519", "hkdf", "internal/chacha20", "internal/subtle", @@ -398,7 +387,8 @@ "salsa20/salsa", ] pruneopts = "UT" - revision = "c126467f60eb25f8f27e5a981f32a87e3965053f" + revision = "3764759f34a542a3aef74d6b02e35be7ab893bba" + source = "github.com/tendermint/crypto" [[projects]] digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1" @@ -418,14 +408,14 @@ [[projects]] branch = "master" - digest = "1:bb0fe59917bdd5b89f49b9a8b26e5f465e325d9223b3a8e32254314bdf51e0f1" + digest = "1:6f86e2f2e2217cd4d74dec6786163cf80e4d2b99adb341ecc60a45113b844dca" name = "golang.org/x/sys" packages = [ "cpu", "unix", ] pruneopts = "UT" - revision = "3dc4335d56c789b04b0ba99b7a37249d9b614314" + revision = "7e31e0c00fa05cb5fbf4347b585621d6709e19a4" [[projects]] digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" @@ -452,11 +442,11 @@ [[projects]] branch = "master" - digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" + digest = "1:56b0bca90b7e5d1facf5fbdacba23e4e0ce069d25381b8e2f70ef1e7ebfb9c1a" name = "google.golang.org/genproto" packages = ["googleapis/rpc/status"] pruneopts = "UT" - revision = "daca94659cb50e9f37c1b834680f2e46358f10b0" + revision = "b69ba1387ce2108ac9bc8e8e5e5a46e7d5c72313" [[projects]] digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74" @@ -504,10 +494,8 @@ analyzer-name = "dep" analyzer-version = 1 input-imports = [ - "github.com/btcsuite/btcd/btcec", "github.com/btcsuite/btcutil/base58", "github.com/btcsuite/btcutil/bech32", - "github.com/ebuchman/fail-test", "github.com/fortytw2/leaktest", "github.com/go-kit/kit/log", "github.com/go-kit/kit/log/level", @@ -536,12 +524,12 @@ "github.com/syndtr/goleveldb/leveldb/errors", "github.com/syndtr/goleveldb/leveldb/iterator", "github.com/syndtr/goleveldb/leveldb/opt", - "github.com/tendermint/ed25519", - "github.com/tendermint/ed25519/extra25519", + "github.com/tendermint/btcd/btcec", "github.com/tendermint/go-amino", "golang.org/x/crypto/bcrypt", "golang.org/x/crypto/chacha20poly1305", "golang.org/x/crypto/curve25519", + "golang.org/x/crypto/ed25519", "golang.org/x/crypto/hkdf", "golang.org/x/crypto/nacl/box", "golang.org/x/crypto/nacl/secretbox", diff --git a/Gopkg.toml b/Gopkg.toml index 9d605f6cce1..47418bef371 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -58,7 +58,7 @@ [[constraint]] name = "github.com/tendermint/go-amino" - version = "=v0.10.1" + version = "v0.14.0" [[constraint]] name = "google.golang.org/grpc" @@ -72,19 +72,24 @@ ## Some repos dont have releases. ## Pin to revision +[[constraint]] + name = "golang.org/x/crypto" + source = "github.com/tendermint/crypto" + revision = "3764759f34a542a3aef74d6b02e35be7ab893bba" + [[override]] name = "github.com/jmhodges/levigo" revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9" -[[constraint]] - name = "github.com/ebuchman/fail-test" - revision = "95f809107225be108efcf10a3509e4ea6ceef3c4" - # last revision used by go-crypto [[constraint]] name = "github.com/btcsuite/btcutil" revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" +[[constraint]] + name = "github.com/tendermint/btcd" + revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df" + # Haven't made a release since 2016. [[constraint]] name = "github.com/prometheus/client_golang" diff --git a/Makefile b/Makefile index 22e9288612f..4390b1fbbe2 100644 --- a/Makefile +++ b/Makefile @@ -1,16 +1,18 @@ GOTOOLS = \ github.com/mitchellh/gox \ github.com/golang/dep/cmd/dep \ - gopkg.in/alecthomas/gometalinter.v2 \ + github.com/alecthomas/gometalinter \ github.com/gogo/protobuf/protoc-gen-gogo \ - github.com/gogo/protobuf/gogoproto \ github.com/square/certstrap +GOBIN?=${GOPATH}/bin PACKAGES=$(shell go list ./...) INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf BUILD_TAGS?='tendermint' BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" +LINT_FLAGS = --exclude '.*\.pb\.go' --exclude 'vendor/*' --vendor --deadline=600s + all: check build test install check: check_tools get_vendor_deps @@ -22,30 +24,36 @@ check: check_tools get_vendor_deps build: CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ +build_c: + CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o build/tendermint ./cmd/tendermint/ + build_race: CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint install: - CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint + CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint ######################################## ### Protobuf -protoc_all: protoc_libs protoc_abci protoc_grpc +protoc_all: protoc_libs protoc_merkle protoc_abci protoc_grpc protoc_proto3types %.pb.go: %.proto ## If you get the following error, ## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory" ## See https://stackoverflow.com/a/25518702 + ## Note the $< here is substituted for the %.proto + ## Note the $@ here is substituted for the %.pb.go protoc $(INCLUDE) $< --gogo_out=Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,plugins=grpc:. - @echo "--> adding nolint declarations to protobuf generated files" - @awk -i inplace '/^\s*package \w+/ { print "//nolint" }1' $@ ######################################## ### Build ABCI +# see protobuf section above protoc_abci: abci/types/types.pb.go +protoc_proto3types: types/proto3/block.pb.go + build_abci: @go build -i ./abci/cmd/... @@ -70,12 +78,13 @@ check_tools: get_tools: @echo "--> Installing tools" - go get -u -v $(GOTOOLS) - @gometalinter.v2 --install + ./scripts/get_tools.sh + @echo "--> Downloading linters (this may take awhile)" + $(GOPATH)/src/github.com/alecthomas/gometalinter/scripts/install.sh -b $(GOBIN) update_tools: @echo "--> Updating tools" - @go get -u $(GOTOOLS) + ./scripts/get_tools.sh #Update dependencies get_vendor_deps: @@ -85,13 +94,15 @@ get_vendor_deps: #For ABCI and libs get_protoc: @# https://github.com/google/protobuf/releases - curl -L https://github.com/google/protobuf/releases/download/v3.4.1/protobuf-cpp-3.4.1.tar.gz | tar xvz && \ - cd protobuf-3.4.1 && \ + curl -L https://github.com/google/protobuf/releases/download/v3.6.1/protobuf-cpp-3.6.1.tar.gz | tar xvz && \ + cd protobuf-3.6.1 && \ DIST_LANG=cpp ./configure && \ make && \ - make install && \ + make check && \ + sudo make install && \ + sudo ldconfig && \ cd .. && \ - rm -rf protobuf-3.4.1 + rm -rf protobuf-3.6.1 draw_deps: @# requires brew install graphviz or apt-get install graphviz @@ -130,6 +141,8 @@ grpc_dbserver: protoc_grpc: rpc/grpc/types.pb.go +protoc_merkle: crypto/merkle/merkle.pb.go + ######################################## ### Testing @@ -173,6 +186,9 @@ test_p2p: cd .. # requires 'tester' the image from above bash test/p2p/test.sh tester + # the `docker cp` takes a really long time; uncomment for debugging + # + # mkdir -p test/p2p/logs && docker cp rsyslog:/var/log test/p2p/logs test_integrations: make build_docker_test_image @@ -200,11 +216,11 @@ vagrant_test: ### go tests test: @echo "--> Running go test" - @GOCACHE=off go test $(PACKAGES) + @GOCACHE=off go test -p 1 $(PACKAGES) test_race: @echo "--> Running go test --race" - @go test -v -race $(PACKAGES) + @GOCACHE=off go test -p 1 -v -race $(PACKAGES) ######################################## @@ -215,7 +231,7 @@ fmt: metalinter: @echo "--> Running linter" - @gometalinter.v2 --vendor --deadline=600s --disable-all \ + @gometalinter $(LINT_FLAGS) --disable-all \ --enable=deadcode \ --enable=gosimple \ --enable=misspell \ @@ -244,7 +260,7 @@ metalinter: metalinter_all: @echo "--> Running linter (all)" - gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./... + gometalinter $(LINT_FLAGS) --enable-all --disable=lll ./... DESTINATION = ./index.html.md diff --git a/README.md b/README.md index 94d8d7f01b9..328557ae322 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short. [![API Reference]( https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 )](https://godoc.org/github.com/tendermint/tendermint) -[![Go version](https://img.shields.io/badge/go-1.9.2-blue.svg)](https://github.com/moovweb/gvm) +[![Go version](https://img.shields.io/badge/go-1.10.4-blue.svg)](https://github.com/moovweb/gvm) [![riot.im](https://img.shields.io/badge/riot.im-JOIN%20CHAT-green.svg)](https://riot.im/app/#/room/#tendermint:matrix.org) [![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE) [![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint) @@ -22,7 +22,10 @@ develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/deve Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines. -For protocol details, see [the specification](/docs/spec). For a consensus proof and detailed protocol analysis checkout our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)". +For protocol details, see [the specification](/docs/spec). + +For detailed analysis of the consensus protocol, including safety and liveness proofs, +see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)". ## A Note on Production Readiness @@ -30,10 +33,10 @@ While Tendermint is being used in production in private, permissioned environments, we are still working actively to harden and audit it in preparation for use in public blockchains, such as the [Cosmos Network](https://cosmos.network/). We are also still making breaking changes to the protocol and the APIs. -Thus we tag the releases as *alpha software*. +Thus, we tag the releases as *alpha software*. In any case, if you intend to run Tendermint in production, -please [contact us](https://riot.im/app/#/room/#tendermint:matrix.org) :) +please [contact us](mailto:partners@tendermint.com) and [join the chat](https://riot.im/app/#/room/#tendermint:matrix.org). ## Security @@ -46,7 +49,7 @@ For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY. Requirement|Notes ---|--- -Go version | Go1.9 or higher +Go version | Go1.10 or higher ## Install @@ -54,10 +57,10 @@ See the [install instructions](/docs/introduction/install.md) ## Quick Start -- [Single node](/docs/using-tendermint.md) +- [Single node](/docs/tendermint-core/using-tendermint.md) - [Local cluster using docker-compose](/networks/local) - [Remote cluster using terraform and ansible](/docs/networks/terraform-and-ansible.md) -- [Join the public testnet](https://cosmos.network/testnet) +- [Join the Cosmos testnet](https://cosmos.network/testnet) ## Resources @@ -66,30 +69,31 @@ See the [install instructions](/docs/introduction/install.md) For details about the blockchain data structures and the p2p protocols, see the the [Tendermint specification](/docs/spec). -For details on using the software, [Read The Docs](https://tendermint.readthedocs.io/en/master/). -Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs. +For details on using the software, see the [documentation](/docs/) which is also +hosted at: https://tendermint.com/docs/ + +### Tools +Benchmarking and monitoring is provided by `tm-bench` and `tm-monitor`, respectively. +Their code is found [here](/tools) and these binaries need to be built seperately. +Additional documentation is found [here](/docs/tools). ### Sub-projects * [Amino](http://github.com/tendermint/go-amino), a reflection-based improvement on proto3 * [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation -### Tools -* [Deployment, Benchmarking, and Monitoring](http://tendermint.readthedocs.io/projects/tools/en/develop/index.html#tendermint-tools) - ### Applications * [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework -* [Ethermint](http://github.com/tendermint/ethermint); Ethereum on Tendermint -* [Many more](https://tendermint.readthedocs.io/en/master/ecosystem.html#abci-applications) +* [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint +* [Many more](https://tendermint.com/ecosystem) -### More +### Research * [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769) * [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf) -* [Tendermint Blog](https://blog.cosmos.network/tendermint/home) -* [Cosmos Blog](https://blog.cosmos.network) +* [Blog](https://blog.cosmos.network/tendermint/home) ## Contributing @@ -114,6 +118,12 @@ CHANGELOG even if they don't lead to MINOR version bumps: - rpc/client - config - node +- libs + - bech32 + - common + - db + - errors + - log Exported objects in these packages that are not covered by the versioning scheme are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any @@ -130,6 +140,8 @@ data into the new chain. However, any bump in the PATCH version should be compatible with existing histories (if not please open an [issue](https://github.com/tendermint/tendermint/issues)). +For more information on upgrading, see [here](./UPGRADING.md) + ## Code of Conduct Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md). diff --git a/UPGRADING.md b/UPGRADING.md new file mode 100644 index 00000000000..055dbec475e --- /dev/null +++ b/UPGRADING.md @@ -0,0 +1,152 @@ +# Upgrading Tendermint Core + +This guide provides steps to be followed when you upgrade your applications to +a newer version of Tendermint Core. + +## v0.26.0 + +New 0.26.0 release contains a lot of changes to core data types and protocols. It is not +compatible to the old versions and there is no straight forward way to update +old data to be compatible with the new version. + +To reset the state do: + +``` +$ tendermint unsafe_reset_all +``` + +Here we summarize some other notable changes to be mindful of. + +### Config Changes + +All timeouts must be changed from integers to strings with their duration, for +instance `flush_throttle_timeout = 100` would be changed to +`flush_throttle_timeout = "100ms"` and `timeout_propose = 3000` would be changed +to `timeout_propose = "3s"`. + +### RPC Changes + +The default behaviour of `/abci_query` has been changed to not return a proof, +and the name of the parameter that controls this has been changed from `trusted` +to `prove`. To get proofs with your queries, ensure you set `prove=true`. + +Various version fields like `amino_version`, `p2p_version`, `consensus_version`, +and `rpc_version` have been removed from the `node_info.other` and are +consolidated under the tendermint semantic version (ie. `node_info.version`) and +the new `block` and `p2p` protocol versions under `node_info.protocol_version`. + +### ABCI Changes + +Field numbers were bumped in the `Header` and `ResponseInfo` messages to make +room for new `version` fields. It should be straight forward to recompile the +protobuf file for these changes. + +#### Proofs + +The `ResponseQuery.Proof` field is now structured as a `[]ProofOp` to support +generalized Merkle tree constructions where the leaves of one Merkle tree are +the root of another. If you don't need this functionality, and you used to +return `` here, you should instead return a single `ProofOp` with +just the `Data` field set: + +``` +[]ProofOp{ + ProofOp{ + Data: , + } +} +``` + +For more information, see: + +- [ADR-026](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/docs/architecture/adr-026-general-merkle-proof.md) +- [Relevant ABCI + documentation](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/docs/spec/abci/apps.md#query-proofs) +- [Description of + keys](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/crypto/merkle/proof_key_path.go#L14) + +### Go API Changes + +#### crypto.merkle + +The `merkle.Hasher` interface was removed. Functions which used to take `Hasher` +now simply take `[]byte`. This means that any objects being Merklized should be +serialized before they are passed in. + +#### node + +The `node.RunForever` function was removed. Signal handling and running forever +should instead be explicitly configured by the caller. See how we do it +[here](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/cmd/tendermint/commands/run_node.go#L60). + +### Other + +All hashes, except for public key addresses, are now 32-bytes. + +## v0.25.0 + +This release has minimal impact. + +If you use GasWanted in ABCI and want to enforce it, set the MaxGas in the genesis file (default is no max). + +## v0.24.0 + +New 0.24.0 release contains a lot of changes to the state and types. It's not +compatible to the old versions and there is no straight forward way to update +old data to be compatible with the new version. + +To reset the state do: + +``` +$ tendermint unsafe_reset_all +``` + +Here we summarize some other notable changes to be mindful of. + +### Config changes + +`p2p.max_num_peers` was removed in favor of `p2p.max_num_inbound_peers` and +`p2p.max_num_outbound_peers`. + +``` +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 +``` + +As you can see, the default ratio of inbound/outbound peers is 4/1. The reason +is we want it to be easier for new nodes to connect to the network. You can +tweak these parameters to alter the network topology. + +### RPC Changes + +The result of `/commit` used to contain `header` and `commit` fields at the top level. These are now contained under the `signed_header` field. + +### ABCI Changes + +The header has been upgraded and contains new fields, but none of the existing +fields were changed, except their order. + +The `Validator` type was split into two, one containing an `Address` and one +containing a `PubKey`. When processing `RequestBeginBlock`, use the `Validator` +type, which contains just the `Address`. When returning `ResponseEndBlock`, use +the `ValidatorUpdate` type, which contains just the `PubKey`. + +### Validator Set Updates + +Validator set updates returned in ResponseEndBlock for height `H` used to take +effect immediately at height `H+1`. Now they will be delayed one block, to take +effect at height `H+2`. Note this means that the change will be seen by the ABCI +app in the `RequestBeginBlock.LastCommitInfo` at block `H+3`. Apps were already +required to maintain a map from validator addresses to pubkeys since v0.23 (when +pubkeys were removed from RequestBeginBlock), but now they may need to track +multiple validator sets at once to accomodate this delay. + + +### Block Size + +The `ConsensusParams.BlockSize.MaxTxs` was removed in favour of +`ConsensusParams.BlockSize.MaxBytes`, which is now enforced. This means blocks +are limitted only by byte-size, not by number of transactions. diff --git a/Vagrantfile b/Vagrantfile index 095a6b06187..320f3b1c329 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -29,10 +29,10 @@ Vagrant.configure("2") do |config| usermod -a -G docker vagrant # install go - wget -q https://dl.google.com/go/go1.10.1.linux-amd64.tar.gz - tar -xvf go1.10.1.linux-amd64.tar.gz + wget -q https://dl.google.com/go/go1.11.linux-amd64.tar.gz + tar -xvf go1.11.linux-amd64.tar.gz mv go /usr/local - rm -f go1.10.1.linux-amd64.tar.gz + rm -f go1.11.linux-amd64.tar.gz # cleanup apt-get autoremove -y diff --git a/abci/README.md b/abci/README.md index 493d862f03a..110ad40e959 100644 --- a/abci/README.md +++ b/abci/README.md @@ -1,7 +1,5 @@ # Application BlockChain Interface (ABCI) -[![CircleCI](https://circleci.com/gh/tendermint/abci.svg?style=svg)](https://circleci.com/gh/tendermint/abci) - Blockchains are systems for multi-master state machine replication. **ABCI** is an interface that defines the boundary between the replication engine (the blockchain), and the state machine (the application). @@ -12,157 +10,28 @@ Previously, the ABCI was referred to as TMSP. The community has provided a number of addtional implementations, see the [Tendermint Ecosystem](https://tendermint.com/ecosystem) -## Specification -A detailed description of the ABCI methods and message types is contained in: +## Installation & Usage -- [A prose specification](specification.md) -- [A protobuf file](https://github.com/tendermint/abci/blob/master/types/types.proto) -- [A Go interface](https://github.com/tendermint/abci/blob/master/types/application.go). +To get up and running quickly, see the [getting started guide](../docs/app-dev/getting-started.md) along with the [abci-cli documentation](../docs/app-dev/abci-cli.md) which will go through the examples found in the [examples](./example/) directory. -For more background information on ABCI, motivations, and tendermint, please visit [the documentation](http://tendermint.readthedocs.io/en/master/). -The two guides to focus on are the `Application Development Guide` and `Using ABCI-CLI`. +## Specification + +A detailed description of the ABCI methods and message types is contained in: +- [The main spec](../docs/spec/abci/abci.md) +- [A protobuf file](./types/types.proto) +- [A Go interface](./types/application.go) -## Protocl Buffers +## Protocol Buffers -To compile the protobuf file, run: +To compile the protobuf file, run (from the root of the repo): ``` -cd $GOPATH/src/github.com/tendermint/tendermint/; make protoc_abci +make protoc_abci ``` See `protoc --help` and [the Protocol Buffers site](https://developers.google.com/protocol-buffers) -for details on compiling for other languages. Note we also include a [GRPC](http://www.grpc.io/docs) +for details on compiling for other languages. Note we also include a [GRPC](https://www.grpc.io/docs) service definition. -## Install ABCI-CLI - -The `abci-cli` is a simple tool for debugging ABCI servers and running some -example apps. To install it: - -``` -go get github.com/tendermint/abci -cd $GOPATH/src/github.com/tendermint/abci -make get_vendor_deps -make install -``` - -## Implementation - -We provide three implementations of the ABCI in Go: - -- Golang in-process -- ABCI-socket -- GRPC - -Note the GRPC version is maintained primarily to simplify onboarding and prototyping and is not receiving the same -attention to security and performance as the others - -### In Process - -The simplest implementation just uses function calls within Go. -This means ABCI applications written in Golang can be compiled with TendermintCore and run as a single binary. - -See the [examples](#examples) below for more information. - -### Socket (TSP) - -ABCI is best implemented as a streaming protocol. -The socket implementation provides for asynchronous, ordered message passing over unix or tcp. -Messages are serialized using Protobuf3 and length-prefixed with a [signed Varint](https://developers.google.com/protocol-buffers/docs/encoding?csw=1#signed-integers) - -For example, if the Protobuf3 encoded ABCI message is `0xDEADBEEF` (4 bytes), the length-prefixed message is `0x08DEADBEEF`, since `0x08` is the signed varint -encoding of `4`. If the Protobuf3 encoded ABCI message is 65535 bytes long, the length-prefixed message would be like `0xFEFF07...`. - -Note the benefit of using this `varint` encoding over the old version (where integers were encoded as `` is that -it is the standard way to encode integers in Protobuf. It is also generally shorter. - -### GRPC - -GRPC is an rpc framework native to Protocol Buffers with support in many languages. -Implementing the ABCI using GRPC can allow for faster prototyping, but is expected to be much slower than -the ordered, asynchronous socket protocol. The implementation has also not received as much testing or review. - -Note the length-prefixing used in the socket implementation does not apply for GRPC. - -## Usage - -The `abci-cli` tool wraps an ABCI client and can be used for probing/testing an ABCI server. -For instance, `abci-cli test` will run a test sequence against a listening server running the Counter application (see below). -It can also be used to run some example applications. -See [the documentation](http://tendermint.readthedocs.io/en/master/) for more details. - -### Examples - -Check out the variety of example applications in the [example directory](example/). -It also contains the code refered to by the `counter` and `kvstore` apps; these apps come -built into the `abci-cli` binary. - -#### Counter - -The `abci-cli counter` application illustrates nonce checking in transactions. It's code looks like: - -```golang -func cmdCounter(cmd *cobra.Command, args []string) error { - - app := counter.NewCounterApplication(flagSerial) - - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) - - // Start the listener - srv, err := server.NewServer(flagAddrC, flagAbci, app) - if err != nil { - return err - } - srv.SetLogger(logger.With("module", "abci-server")) - if err := srv.Start(); err != nil { - return err - } - - // Wait forever - cmn.TrapSignal(func() { - // Cleanup - srv.Stop() - }) - return nil -} -``` - -and can be found in [this file](cmd/abci-cli/abci-cli.go). - -#### kvstore - -The `abci-cli kvstore` application, which illustrates a simple key-value Merkle tree - -```golang -func cmdKVStore(cmd *cobra.Command, args []string) error { - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) - - // Create the application - in memory or persisted to disk - var app types.Application - if flagPersist == "" { - app = kvstore.NewKVStoreApplication() - } else { - app = kvstore.NewPersistentKVStoreApplication(flagPersist) - app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore")) - } - - // Start the listener - srv, err := server.NewServer(flagAddrD, flagAbci, app) - if err != nil { - return err - } - srv.SetLogger(logger.With("module", "abci-server")) - if err := srv.Start(); err != nil { - return err - } - - // Wait forever - cmn.TrapSignal(func() { - // Cleanup - srv.Stop() - }) - return nil -} -``` diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index a1f0994684f..4f37b17b660 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -22,6 +22,7 @@ type grpcClient struct { mustConnect bool client types.ABCIApplicationClient + conn *grpc.ClientConn mtx sync.Mutex addr string @@ -60,6 +61,7 @@ RETRY_LOOP: cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr) client := types.NewABCIApplicationClient(conn) + cli.conn = conn ENSURE_CONNECTED: for { @@ -78,12 +80,10 @@ RETRY_LOOP: func (cli *grpcClient) OnStop() { cli.BaseService.OnStop() - cli.mtx.Lock() - defer cli.mtx.Unlock() - // TODO: how to close conn? its not a net.Conn and grpc doesn't expose a Close() - /*if cli.client.conn != nil { - cli.client.conn.Close() - }*/ + + if cli.conn != nil { + cli.conn.Close() + } } func (cli *grpcClient) StopForError(err error) { diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 00bceec216d..50972ec30a8 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -22,6 +22,7 @@ import ( servertest "github.com/tendermint/tendermint/abci/tests/server" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/abci/version" + "github.com/tendermint/tendermint/crypto/merkle" ) // client is a global variable so it can be reused by the console @@ -100,7 +101,7 @@ type queryResponse struct { Key []byte Value []byte Height int64 - Proof []byte + Proof *merkle.Proof } func Execute() error { @@ -477,11 +478,8 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error { } func cmdUnimplemented(cmd *cobra.Command, args []string) error { - // TODO: Print out all the sub-commands available msg := "unimplemented command" - if err := cmd.Help(); err != nil { - msg = err.Error() - } + if len(args) > 0 { msg += fmt.Sprintf(" args: [%s]", strings.Join(args, " ")) } @@ -489,6 +487,17 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error { Code: codeBad, Log: msg, }) + + fmt.Println("Available commands:") + fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short) + fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short) + fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short) + fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short) + fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short) + fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short) + fmt.Printf("%s: %s\n", setOptionCmd.Use, setOptionCmd.Short) + fmt.Println("Use \"[command] --help\" for more information about a command.") + return nil } @@ -740,7 +749,7 @@ func printResponse(cmd *cobra.Command, args []string, rsp response) { fmt.Printf("-> value.hex: %X\n", rsp.Query.Value) } if rsp.Query.Proof != nil { - fmt.Printf("-> proof: %X\n", rsp.Query.Proof) + fmt.Printf("-> proof: %#v\n", rsp.Query.Proof) } } } diff --git a/abci/example/code/code.go b/abci/example/code/code.go index 94e9d015edd..988b2a93eae 100644 --- a/abci/example/code/code.go +++ b/abci/example/code/code.go @@ -6,4 +6,5 @@ const ( CodeTypeEncodingError uint32 = 1 CodeTypeBadNonce uint32 = 2 CodeTypeUnauthorized uint32 = 3 + CodeTypeUnknownError uint32 = 4 ) diff --git a/abci/example/counter/counter.go b/abci/example/counter/counter.go index 857e82bafdf..a77e7821f4a 100644 --- a/abci/example/counter/counter.go +++ b/abci/example/counter/counter.go @@ -6,7 +6,6 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tendermint/libs/common" ) type CounterApplication struct { @@ -22,7 +21,7 @@ func NewCounterApplication(serial bool) *CounterApplication { } func (app *CounterApplication) Info(req types.RequestInfo) types.ResponseInfo { - return types.ResponseInfo{Data: cmn.Fmt("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)} + return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)} } func (app *CounterApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption { @@ -34,7 +33,7 @@ func (app *CounterApplication) SetOption(req types.RequestSetOption) types.Respo TODO Panic and have the ABCI server pass an exception. The client can call SetOptionSync() and get an `error`. return types.ResponseSetOption{ - Error: cmn.Fmt("Unknown key (%s) or value (%s)", key, value), + Error: fmt.Sprintf("Unknown key (%s) or value (%s)", key, value), } */ return types.ResponseSetOption{} @@ -95,10 +94,10 @@ func (app *CounterApplication) Commit() (resp types.ResponseCommit) { func (app *CounterApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery { switch reqQuery.Path { case "hash": - return types.ResponseQuery{Value: []byte(cmn.Fmt("%v", app.hashCount))} + return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))} case "tx": - return types.ResponseQuery{Value: []byte(cmn.Fmt("%v", app.txCount))} + return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))} default: - return types.ResponseQuery{Log: cmn.Fmt("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)} + return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)} } } diff --git a/abci/example/example_test.go b/abci/example/example_test.go index 8fa3ae021b2..677a2a481a8 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -39,7 +39,7 @@ func TestGRPC(t *testing.T) { } func testStream(t *testing.T, app types.Application) { - numDeliverTxs := 200000 + numDeliverTxs := 20000 // Start the listener server := abciserver.NewSocketServer("unix://test.sock", app) @@ -72,7 +72,7 @@ func testStream(t *testing.T, app types.Application) { } if counter == numDeliverTxs { go func() { - time.Sleep(time.Second * 2) // Wait for a bit to allow counter overflow + time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow close(done) }() return @@ -148,7 +148,7 @@ func testGRPCSync(t *testing.T, app *types.GRPCApplication) { t.Log("response", counter) if counter == numDeliverTxs { go func() { - time.Sleep(time.Second * 2) // Wait for a bit to allow counter overflow + time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow }() } diff --git a/abci/example/kvstore/helpers.go b/abci/example/kvstore/helpers.go index 0e69fab9f6c..bb086dec0b9 100644 --- a/abci/example/kvstore/helpers.go +++ b/abci/example/kvstore/helpers.go @@ -7,12 +7,10 @@ import ( // RandVal creates one random validator, with a key derived // from the input value -func RandVal(i int) types.Validator { - addr := cmn.RandBytes(20) +func RandVal(i int) types.ValidatorUpdate { pubkey := cmn.RandBytes(32) power := cmn.RandUint16() + 1 - v := types.Ed25519Validator(pubkey, int64(power)) - v.Address = addr + v := types.Ed25519ValidatorUpdate(pubkey, int64(power)) return v } @@ -20,8 +18,8 @@ func RandVal(i int) types.Validator { // the application. Note that the keys are deterministically // derived from the index in the array, while the power is // random (Change this if not desired) -func RandVals(cnt int) []types.Validator { - res := make([]types.Validator, cnt) +func RandVals(cnt int) []types.ValidatorUpdate { + res := make([]types.ValidatorUpdate, cnt) for i := 0; i < cnt; i++ { res[i] = RandVal(i) } diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 0f72b44eafa..955baefb4db 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -10,11 +10,14 @@ import ( "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/version" ) var ( stateKey = []byte("stateKey") kvPairPrefixKey = []byte("kvPairKey:") + + ProtocolVersion version.Protocol = 0x1 ) type State struct { @@ -65,7 +68,11 @@ func NewKVStoreApplication() *KVStoreApplication { } func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) { - return types.ResponseInfo{Data: fmt.Sprintf("{\"size\":%v}", app.state.Size)} + return types.ResponseInfo{ + Data: fmt.Sprintf("{\"size\":%v}", app.state.Size), + Version: version.ABCIVersion, + AppVersion: ProtocolVersion.Uint64(), + } } // tx is either "key=value" or just arbitrary bytes @@ -81,14 +88,14 @@ func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { app.state.Size += 1 tags := []cmn.KVPair{ - {[]byte("app.creator"), []byte("jae")}, - {[]byte("app.key"), key}, + {Key: []byte("app.creator"), Value: []byte("Cosmoshi Netowoko")}, + {Key: []byte("app.key"), Value: key}, } return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags} } func (app *KVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx { - return types.ResponseCheckTx{Code: code.CodeTypeOK} + return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1} } func (app *KVStoreApplication) Commit() types.ResponseCommit { @@ -114,6 +121,7 @@ func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery type } return } else { + resQuery.Key = reqQuery.Data value := app.state.db.Get(prefixKey(reqQuery.Data)) resQuery.Value = value if value != nil { diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 6ef5a08f986..a18fb8d3c77 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -2,6 +2,7 @@ package kvstore import ( "bytes" + "fmt" "io/ioutil" "sort" "testing" @@ -121,11 +122,11 @@ func TestValUpdates(t *testing.T) { vals1, vals2 := vals[:nInit], kvstore.Validators() valsEqual(t, vals1, vals2) - var v1, v2, v3 types.Validator + var v1, v2, v3 types.ValidatorUpdate // add some validators v1, v2 = vals[nInit], vals[nInit+1] - diff := []types.Validator{v1, v2} + diff := []types.ValidatorUpdate{v1, v2} tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power) tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power) @@ -139,7 +140,7 @@ func TestValUpdates(t *testing.T) { v1.Power = 0 v2.Power = 0 v3.Power = 0 - diff = []types.Validator{v1, v2, v3} + diff = []types.ValidatorUpdate{v1, v2, v3} tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power) tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power) tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power) @@ -157,18 +158,18 @@ func TestValUpdates(t *testing.T) { } else { v1.Power = 5 } - diff = []types.Validator{v1} + diff = []types.ValidatorUpdate{v1} tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power) makeApplyBlock(t, kvstore, 3, diff, tx1) - vals1 = append([]types.Validator{v1}, vals1[1:]...) + vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...) vals2 = kvstore.Validators() valsEqual(t, vals1, vals2) } -func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.Validator, txs ...[]byte) { +func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, txs ...[]byte) { // make and apply block height := int64(heightInt) hash := []byte("foo") @@ -190,12 +191,12 @@ func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff } // order doesn't matter -func valsEqual(t *testing.T, vals1, vals2 []types.Validator) { +func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) { if len(vals1) != len(vals2) { t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1)) } - sort.Sort(types.Validators(vals1)) - sort.Sort(types.Validators(vals2)) + sort.Sort(types.ValidatorUpdates(vals1)) + sort.Sort(types.ValidatorUpdates(vals2)) for i, v1 := range vals1 { v2 := vals2[i] if !bytes.Equal(v1.PubKey.Data, v2.PubKey.Data) || @@ -207,7 +208,7 @@ func valsEqual(t *testing.T, vals1, vals2 []types.Validator) { func makeSocketClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) { // Start the listener - socket := cmn.Fmt("unix://%s.sock", name) + socket := fmt.Sprintf("unix://%s.sock", name) logger := log.TestingLogger() server := abciserver.NewSocketServer(socket, app) @@ -229,7 +230,7 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client, func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) { // Start the listener - socket := cmn.Fmt("unix://%s.sock", name) + socket := fmt.Sprintf("unix://%s.sock", name) logger := log.TestingLogger() gapp := types.NewGRPCApplication(app) diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index 12ccbab782b..f969eebfefa 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -9,7 +9,6 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" ) @@ -26,7 +25,7 @@ type PersistentKVStoreApplication struct { app *KVStoreApplication // validator set - ValUpdates []types.Validator + ValUpdates []types.ValidatorUpdate logger log.Logger } @@ -102,7 +101,7 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t // Track the block hash and header information func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock { // reset valset changes - app.ValUpdates = make([]types.Validator, 0) + app.ValUpdates = make([]types.ValidatorUpdate, 0) return types.ResponseBeginBlock{} } @@ -114,11 +113,11 @@ func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) typ //--------------------------------------------- // update validators -func (app *PersistentKVStoreApplication) Validators() (validators []types.Validator) { +func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) { itr := app.app.state.db.Iterator(nil, nil) for ; itr.Valid(); itr.Next() { if isValidatorTx(itr.Key()) { - validator := new(types.Validator) + validator := new(types.ValidatorUpdate) err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator) if err != nil { panic(err) @@ -130,7 +129,7 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida } func MakeValSetChangeTx(pubkey types.PubKey, power int64) []byte { - return []byte(cmn.Fmt("val:%X/%d", pubkey.Data, power)) + return []byte(fmt.Sprintf("val:%X/%d", pubkey.Data, power)) } func isValidatorTx(tx []byte) bool { @@ -168,11 +167,11 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon } // update - return app.updateValidator(types.Ed25519Validator(pubkey, int64(power))) + return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, int64(power))) } // add, update, or remove a validator -func (app *PersistentKVStoreApplication) updateValidator(v types.Validator) types.ResponseDeliverTx { +func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx { key := []byte("val:" + string(v.PubKey.Data)) if v.Power == 0 { // remove validator diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index f67297cd700..5daa1e6af10 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -12,11 +12,11 @@ import ( func InitChain(client abcicli.Client) error { total := 10 - vals := make([]types.Validator, total) + vals := make([]types.ValidatorUpdate, total) for i := 0; i < total; i++ { pubkey := cmn.RandBytes(33) power := cmn.RandInt() - vals[i] = types.Ed25519Validator(pubkey, int64(power)) + vals[i] = types.Ed25519ValidatorUpdate(pubkey, int64(power)) } _, err := client.InitChainSync(types.RequestInitChain{ Validators: vals, diff --git a/abci/tests/test_cli/ex1.abci.out b/abci/tests/test_cli/ex1.abci.out index 5d4c196dcbc..0cdd43df6a0 100644 --- a/abci/tests/test_cli/ex1.abci.out +++ b/abci/tests/test_cli/ex1.abci.out @@ -28,6 +28,8 @@ -> code: OK -> log: exists -> height: 0 +-> key: abc +-> key.hex: 616263 -> value: abc -> value.hex: 616263 @@ -42,6 +44,8 @@ -> code: OK -> log: exists -> height: 0 +-> key: def +-> key.hex: 646566 -> value: xyz -> value.hex: 78797A diff --git a/abci/types/messages_test.go b/abci/types/messages_test.go index 603e602aef2..14bc5718f1e 100644 --- a/abci/types/messages_test.go +++ b/abci/types/messages_test.go @@ -22,7 +22,7 @@ func TestMarshalJSON(t *testing.T) { Data: []byte("hello"), GasWanted: 43, Tags: []cmn.KVPair{ - {[]byte("pho"), []byte("bo")}, + {Key: []byte("pho"), Value: []byte("bo")}, }, } b, err = json.Marshal(&r1) @@ -83,7 +83,7 @@ func TestWriteReadMessage2(t *testing.T) { Log: phrase, GasWanted: 10, Tags: []cmn.KVPair{ - cmn.KVPair{[]byte("abc"), []byte("def")}, + cmn.KVPair{Key: []byte("abc"), Value: []byte("def")}, }, }, // TODO: add the rest diff --git a/abci/types/pubkey.go b/abci/types/pubkey.go index e5cd5fbf3e4..46cd8c5e887 100644 --- a/abci/types/pubkey.go +++ b/abci/types/pubkey.go @@ -4,8 +4,8 @@ const ( PubKeyEd25519 = "ed25519" ) -func Ed25519Validator(pubkey []byte, power int64) Validator { - return Validator{ +func Ed25519ValidatorUpdate(pubkey []byte, power int64) ValidatorUpdate { + return ValidatorUpdate{ // Address: PubKey: PubKey{ Type: PubKeyEd25519, diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index ac71d91c8f9..c867dffc81e 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1,7 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: abci/types/types.proto -//nolint package types import proto "github.com/gogo/protobuf/proto" @@ -10,14 +9,17 @@ import fmt "fmt" import math "math" import _ "github.com/gogo/protobuf/gogoproto" import _ "github.com/golang/protobuf/ptypes/timestamp" +import merkle "github.com/tendermint/tendermint/crypto/merkle" import common "github.com/tendermint/tendermint/libs/common" import time "time" import bytes "bytes" -import context "golang.org/x/net/context" -import grpc "google.golang.org/grpc" +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" @@ -59,7 +61,7 @@ func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{0} + return fileDescriptor_types_5b877df1938afe10, []int{0} } func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -481,7 +483,7 @@ func (m *RequestEcho) Reset() { *m = RequestEcho{} } func (m *RequestEcho) String() string { return proto.CompactTextString(m) } func (*RequestEcho) ProtoMessage() {} func (*RequestEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{1} + return fileDescriptor_types_5b877df1938afe10, []int{1} } func (m *RequestEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +529,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} } func (m *RequestFlush) String() string { return proto.CompactTextString(m) } func (*RequestFlush) ProtoMessage() {} func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{2} + return fileDescriptor_types_5b877df1938afe10, []int{2} } func (m *RequestFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -558,6 +560,8 @@ var xxx_messageInfo_RequestFlush proto.InternalMessageInfo type RequestInfo struct { Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + BlockVersion uint64 `protobuf:"varint,2,opt,name=block_version,json=blockVersion,proto3" json:"block_version,omitempty"` + P2PVersion uint64 `protobuf:"varint,3,opt,name=p2p_version,json=p2pVersion,proto3" json:"p2p_version,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -567,7 +571,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (m *RequestInfo) String() string { return proto.CompactTextString(m) } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{3} + return fileDescriptor_types_5b877df1938afe10, []int{3} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -603,6 +607,20 @@ func (m *RequestInfo) GetVersion() string { return "" } +func (m *RequestInfo) GetBlockVersion() uint64 { + if m != nil { + return m.BlockVersion + } + return 0 +} + +func (m *RequestInfo) GetP2PVersion() uint64 { + if m != nil { + return m.P2PVersion + } + return 0 +} + // nondeterministic type RequestSetOption struct { Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` @@ -616,7 +634,7 @@ func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } func (*RequestSetOption) ProtoMessage() {} func (*RequestSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{4} + return fileDescriptor_types_5b877df1938afe10, []int{4} } func (m *RequestSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -660,21 +678,21 @@ func (m *RequestSetOption) GetValue() string { } type RequestInitChain struct { - Time time.Time `protobuf:"bytes,1,opt,name=time,stdtime" json:"time"` - ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - ConsensusParams *ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams" json:"consensus_params,omitempty"` - Validators []Validator `protobuf:"bytes,4,rep,name=validators" json:"validators"` - AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Time time.Time `protobuf:"bytes,1,opt,name=time,stdtime" json:"time"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + ConsensusParams *ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams" json:"consensus_params,omitempty"` + Validators []ValidatorUpdate `protobuf:"bytes,4,rep,name=validators" json:"validators"` + AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } func (*RequestInitChain) ProtoMessage() {} func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{5} + return fileDescriptor_types_5b877df1938afe10, []int{5} } func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +742,7 @@ func (m *RequestInitChain) GetConsensusParams() *ConsensusParams { return nil } -func (m *RequestInitChain) GetValidators() []Validator { +func (m *RequestInitChain) GetValidators() []ValidatorUpdate { if m != nil { return m.Validators } @@ -752,7 +770,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} } func (m *RequestQuery) String() string { return proto.CompactTextString(m) } func (*RequestQuery) ProtoMessage() {} func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{6} + return fileDescriptor_types_5b877df1938afe10, []int{6} } func (m *RequestQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -809,7 +827,6 @@ func (m *RequestQuery) GetProve() bool { return false } -// NOTE: validators here have empty pubkeys. type RequestBeginBlock struct { Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` Header Header `protobuf:"bytes,2,opt,name=header" json:"header"` @@ -824,7 +841,7 @@ func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } func (*RequestBeginBlock) ProtoMessage() {} func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{7} + return fileDescriptor_types_5b877df1938afe10, []int{7} } func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +909,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{8} + return fileDescriptor_types_5b877df1938afe10, []int{8} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -939,7 +956,7 @@ func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } func (*RequestDeliverTx) ProtoMessage() {} func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{9} + return fileDescriptor_types_5b877df1938afe10, []int{9} } func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -986,7 +1003,7 @@ func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } func (*RequestEndBlock) ProtoMessage() {} func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{10} + return fileDescriptor_types_5b877df1938afe10, []int{10} } func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1049,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{11} + return fileDescriptor_types_5b877df1938afe10, []int{11} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1085,7 +1102,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{12} + return fileDescriptor_types_5b877df1938afe10, []int{12} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1538,7 +1555,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{13} + return fileDescriptor_types_5b877df1938afe10, []int{13} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1585,7 +1602,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{14} + return fileDescriptor_types_5b877df1938afe10, []int{14} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1631,7 +1648,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{15} + return fileDescriptor_types_5b877df1938afe10, []int{15} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1663,8 +1680,9 @@ var xxx_messageInfo_ResponseFlush proto.InternalMessageInfo type ResponseInfo struct { Data string `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - LastBlockHeight int64 `protobuf:"varint,3,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` - LastBlockAppHash []byte `protobuf:"bytes,4,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` + AppVersion uint64 `protobuf:"varint,3,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` + LastBlockHeight int64 `protobuf:"varint,4,opt,name=last_block_height,json=lastBlockHeight,proto3" json:"last_block_height,omitempty"` + LastBlockAppHash []byte `protobuf:"bytes,5,opt,name=last_block_app_hash,json=lastBlockAppHash,proto3" json:"last_block_app_hash,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1674,7 +1692,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{16} + return fileDescriptor_types_5b877df1938afe10, []int{16} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1717,6 +1735,13 @@ func (m *ResponseInfo) GetVersion() string { return "" } +func (m *ResponseInfo) GetAppVersion() uint64 { + if m != nil { + return m.AppVersion + } + return 0 +} + func (m *ResponseInfo) GetLastBlockHeight() int64 { if m != nil { return m.LastBlockHeight @@ -1746,7 +1771,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } func (*ResponseSetOption) ProtoMessage() {} func (*ResponseSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{17} + return fileDescriptor_types_5b877df1938afe10, []int{17} } func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1797,18 +1822,18 @@ func (m *ResponseSetOption) GetInfo() string { } type ResponseInitChain struct { - ConsensusParams *ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams" json:"consensus_params,omitempty"` - Validators []Validator `protobuf:"bytes,2,rep,name=validators" json:"validators"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ConsensusParams *ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams" json:"consensus_params,omitempty"` + Validators []ValidatorUpdate `protobuf:"bytes,2,rep,name=validators" json:"validators"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{18} + return fileDescriptor_types_5b877df1938afe10, []int{18} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1869,7 @@ func (m *ResponseInitChain) GetConsensusParams() *ConsensusParams { return nil } -func (m *ResponseInitChain) GetValidators() []Validator { +func (m *ResponseInitChain) GetValidators() []ValidatorUpdate { if m != nil { return m.Validators } @@ -1854,23 +1879,24 @@ func (m *ResponseInitChain) GetValidators() []Validator { type ResponseQuery struct { Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // bytes data = 2; // use "value" instead. - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` - Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` - Proof []byte `protobuf:"bytes,8,opt,name=proof,proto3" json:"proof,omitempty"` - Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` + Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` + Proof *merkle.Proof `protobuf:"bytes,8,opt,name=proof" json:"proof,omitempty"` + Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` + Codespace string `protobuf:"bytes,10,opt,name=codespace,proto3" json:"codespace,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{19} + return fileDescriptor_types_5b877df1938afe10, []int{19} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1941,7 +1967,7 @@ func (m *ResponseQuery) GetValue() []byte { return nil } -func (m *ResponseQuery) GetProof() []byte { +func (m *ResponseQuery) GetProof() *merkle.Proof { if m != nil { return m.Proof } @@ -1955,6 +1981,13 @@ func (m *ResponseQuery) GetHeight() int64 { return 0 } +func (m *ResponseQuery) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + type ResponseBeginBlock struct { Tags []common.KVPair `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -1966,7 +1999,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{20} + return fileDescriptor_types_5b877df1938afe10, []int{20} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2010,6 +2043,7 @@ type ResponseCheckTx struct { GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` Tags []common.KVPair `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2019,7 +2053,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{21} + return fileDescriptor_types_5b877df1938afe10, []int{21} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2097,6 +2131,13 @@ func (m *ResponseCheckTx) GetTags() []common.KVPair { return nil } +func (m *ResponseCheckTx) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + type ResponseDeliverTx struct { Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` @@ -2105,6 +2146,7 @@ type ResponseDeliverTx struct { GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` Tags []common.KVPair `protobuf:"bytes,7,rep,name=tags" json:"tags,omitempty"` + Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2114,7 +2156,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{22} + return fileDescriptor_types_5b877df1938afe10, []int{22} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2192,20 +2234,27 @@ func (m *ResponseDeliverTx) GetTags() []common.KVPair { return nil } +func (m *ResponseDeliverTx) GetCodespace() string { + if m != nil { + return m.Codespace + } + return "" +} + type ResponseEndBlock struct { - ValidatorUpdates []Validator `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates" json:"validator_updates"` - ConsensusParamUpdates *ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates" json:"consensus_param_updates,omitempty"` - Tags []common.KVPair `protobuf:"bytes,3,rep,name=tags" json:"tags,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates" json:"validator_updates"` + ConsensusParamUpdates *ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates" json:"consensus_param_updates,omitempty"` + Tags []common.KVPair `protobuf:"bytes,3,rep,name=tags" json:"tags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{23} + return fileDescriptor_types_5b877df1938afe10, []int{23} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2234,7 +2283,7 @@ func (m *ResponseEndBlock) XXX_DiscardUnknown() { var xxx_messageInfo_ResponseEndBlock proto.InternalMessageInfo -func (m *ResponseEndBlock) GetValidatorUpdates() []Validator { +func (m *ResponseEndBlock) GetValidatorUpdates() []ValidatorUpdate { if m != nil { return m.ValidatorUpdates } @@ -2267,7 +2316,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{24} + return fileDescriptor_types_5b877df1938afe10, []int{24} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2306,19 +2355,19 @@ func (m *ResponseCommit) GetData() []byte { // ConsensusParams contains all consensus-relevant parameters // that can be adjusted by the abci app type ConsensusParams struct { - BlockSize *BlockSize `protobuf:"bytes,1,opt,name=block_size,json=blockSize" json:"block_size,omitempty"` - TxSize *TxSize `protobuf:"bytes,2,opt,name=tx_size,json=txSize" json:"tx_size,omitempty"` - BlockGossip *BlockGossip `protobuf:"bytes,3,opt,name=block_gossip,json=blockGossip" json:"block_gossip,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + BlockSize *BlockSizeParams `protobuf:"bytes,1,opt,name=block_size,json=blockSize" json:"block_size,omitempty"` + Evidence *EvidenceParams `protobuf:"bytes,2,opt,name=evidence" json:"evidence,omitempty"` + Validator *ValidatorParams `protobuf:"bytes,3,opt,name=validator" json:"validator,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } func (*ConsensusParams) ProtoMessage() {} func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{25} + return fileDescriptor_types_5b877df1938afe10, []int{25} } func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2347,49 +2396,50 @@ func (m *ConsensusParams) XXX_DiscardUnknown() { var xxx_messageInfo_ConsensusParams proto.InternalMessageInfo -func (m *ConsensusParams) GetBlockSize() *BlockSize { +func (m *ConsensusParams) GetBlockSize() *BlockSizeParams { if m != nil { return m.BlockSize } return nil } -func (m *ConsensusParams) GetTxSize() *TxSize { +func (m *ConsensusParams) GetEvidence() *EvidenceParams { if m != nil { - return m.TxSize + return m.Evidence } return nil } -func (m *ConsensusParams) GetBlockGossip() *BlockGossip { +func (m *ConsensusParams) GetValidator() *ValidatorParams { if m != nil { - return m.BlockGossip + return m.Validator } return nil } // BlockSize contains limits on the block size. -type BlockSize struct { - MaxBytes int32 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` - MaxTxs int32 `protobuf:"varint,2,opt,name=max_txs,json=maxTxs,proto3" json:"max_txs,omitempty"` - MaxGas int64 `protobuf:"varint,3,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` +type BlockSizeParams struct { + // Note: must be greater than 0 + MaxBytes int64 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` + // Note: must be greater or equal to -1 + MaxGas int64 `protobuf:"varint,2,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *BlockSize) Reset() { *m = BlockSize{} } -func (m *BlockSize) String() string { return proto.CompactTextString(m) } -func (*BlockSize) ProtoMessage() {} -func (*BlockSize) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{26} +func (m *BlockSizeParams) Reset() { *m = BlockSizeParams{} } +func (m *BlockSizeParams) String() string { return proto.CompactTextString(m) } +func (*BlockSizeParams) ProtoMessage() {} +func (*BlockSizeParams) Descriptor() ([]byte, []int) { + return fileDescriptor_types_5b877df1938afe10, []int{26} } -func (m *BlockSize) XXX_Unmarshal(b []byte) error { +func (m *BlockSizeParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *BlockSize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *BlockSizeParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_BlockSize.Marshal(b, m, deterministic) + return xxx_messageInfo_BlockSizeParams.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) @@ -2399,60 +2449,53 @@ func (m *BlockSize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (dst *BlockSize) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockSize.Merge(dst, src) +func (dst *BlockSizeParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockSizeParams.Merge(dst, src) } -func (m *BlockSize) XXX_Size() int { +func (m *BlockSizeParams) XXX_Size() int { return m.Size() } -func (m *BlockSize) XXX_DiscardUnknown() { - xxx_messageInfo_BlockSize.DiscardUnknown(m) +func (m *BlockSizeParams) XXX_DiscardUnknown() { + xxx_messageInfo_BlockSizeParams.DiscardUnknown(m) } -var xxx_messageInfo_BlockSize proto.InternalMessageInfo +var xxx_messageInfo_BlockSizeParams proto.InternalMessageInfo -func (m *BlockSize) GetMaxBytes() int32 { +func (m *BlockSizeParams) GetMaxBytes() int64 { if m != nil { return m.MaxBytes } return 0 } -func (m *BlockSize) GetMaxTxs() int32 { - if m != nil { - return m.MaxTxs - } - return 0 -} - -func (m *BlockSize) GetMaxGas() int64 { +func (m *BlockSizeParams) GetMaxGas() int64 { if m != nil { return m.MaxGas } return 0 } -// TxSize contains limits on the tx size. -type TxSize struct { - MaxBytes int32 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` - MaxGas int64 `protobuf:"varint,2,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` +// EvidenceParams contains limits on the evidence. +type EvidenceParams struct { + // Note: must be greater than 0 + MaxAge int64 `protobuf:"varint,1,opt,name=max_age,json=maxAge,proto3" json:"max_age,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *TxSize) Reset() { *m = TxSize{} } -func (m *TxSize) String() string { return proto.CompactTextString(m) } -func (*TxSize) ProtoMessage() {} -func (*TxSize) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{27} +func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } +func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } +func (*EvidenceParams) ProtoMessage() {} +func (*EvidenceParams) Descriptor() ([]byte, []int) { + return fileDescriptor_types_5b877df1938afe10, []int{27} } -func (m *TxSize) XXX_Unmarshal(b []byte) error { +func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *TxSize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *EvidenceParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_TxSize.Marshal(b, m, deterministic) + return xxx_messageInfo_EvidenceParams.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) @@ -2462,54 +2505,45 @@ func (m *TxSize) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return b[:n], nil } } -func (dst *TxSize) XXX_Merge(src proto.Message) { - xxx_messageInfo_TxSize.Merge(dst, src) +func (dst *EvidenceParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_EvidenceParams.Merge(dst, src) } -func (m *TxSize) XXX_Size() int { +func (m *EvidenceParams) XXX_Size() int { return m.Size() } -func (m *TxSize) XXX_DiscardUnknown() { - xxx_messageInfo_TxSize.DiscardUnknown(m) +func (m *EvidenceParams) XXX_DiscardUnknown() { + xxx_messageInfo_EvidenceParams.DiscardUnknown(m) } -var xxx_messageInfo_TxSize proto.InternalMessageInfo - -func (m *TxSize) GetMaxBytes() int32 { - if m != nil { - return m.MaxBytes - } - return 0 -} +var xxx_messageInfo_EvidenceParams proto.InternalMessageInfo -func (m *TxSize) GetMaxGas() int64 { +func (m *EvidenceParams) GetMaxAge() int64 { if m != nil { - return m.MaxGas + return m.MaxAge } return 0 } -// BlockGossip determine consensus critical -// elements of how blocks are gossiped -type BlockGossip struct { - // Note: must not be 0 - BlockPartSizeBytes int32 `protobuf:"varint,1,opt,name=block_part_size_bytes,json=blockPartSizeBytes,proto3" json:"block_part_size_bytes,omitempty"` +// ValidatorParams contains limits on validators. +type ValidatorParams struct { + PubKeyTypes []string `protobuf:"bytes,1,rep,name=pub_key_types,json=pubKeyTypes" json:"pub_key_types,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } -func (m *BlockGossip) Reset() { *m = BlockGossip{} } -func (m *BlockGossip) String() string { return proto.CompactTextString(m) } -func (*BlockGossip) ProtoMessage() {} -func (*BlockGossip) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{28} +func (m *ValidatorParams) Reset() { *m = ValidatorParams{} } +func (m *ValidatorParams) String() string { return proto.CompactTextString(m) } +func (*ValidatorParams) ProtoMessage() {} +func (*ValidatorParams) Descriptor() ([]byte, []int) { + return fileDescriptor_types_5b877df1938afe10, []int{28} } -func (m *BlockGossip) XXX_Unmarshal(b []byte) error { +func (m *ValidatorParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *BlockGossip) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ValidatorParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_BlockGossip.Marshal(b, m, deterministic) + return xxx_messageInfo_ValidatorParams.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) @@ -2519,38 +2553,38 @@ func (m *BlockGossip) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (dst *BlockGossip) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlockGossip.Merge(dst, src) +func (dst *ValidatorParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorParams.Merge(dst, src) } -func (m *BlockGossip) XXX_Size() int { +func (m *ValidatorParams) XXX_Size() int { return m.Size() } -func (m *BlockGossip) XXX_DiscardUnknown() { - xxx_messageInfo_BlockGossip.DiscardUnknown(m) +func (m *ValidatorParams) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorParams.DiscardUnknown(m) } -var xxx_messageInfo_BlockGossip proto.InternalMessageInfo +var xxx_messageInfo_ValidatorParams proto.InternalMessageInfo -func (m *BlockGossip) GetBlockPartSizeBytes() int32 { +func (m *ValidatorParams) GetPubKeyTypes() []string { if m != nil { - return m.BlockPartSizeBytes + return m.PubKeyTypes } - return 0 + return nil } type LastCommitInfo struct { - CommitRound int32 `protobuf:"varint,1,opt,name=commit_round,json=commitRound,proto3" json:"commit_round,omitempty"` - Validators []SigningValidator `protobuf:"bytes,2,rep,name=validators" json:"validators"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` + Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes" json:"votes"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{29} + return fileDescriptor_types_5b877df1938afe10, []int{29} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2579,45 +2613,52 @@ func (m *LastCommitInfo) XXX_DiscardUnknown() { var xxx_messageInfo_LastCommitInfo proto.InternalMessageInfo -func (m *LastCommitInfo) GetCommitRound() int32 { +func (m *LastCommitInfo) GetRound() int32 { if m != nil { - return m.CommitRound + return m.Round } return 0 } -func (m *LastCommitInfo) GetValidators() []SigningValidator { +func (m *LastCommitInfo) GetVotes() []VoteInfo { if m != nil { - return m.Validators + return m.Votes } return nil } -// just the minimum the app might need type Header struct { - // basics - ChainID string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` - Time time.Time `protobuf:"bytes,3,opt,name=time,stdtime" json:"time"` - // txs - NumTxs int32 `protobuf:"varint,4,opt,name=num_txs,json=numTxs,proto3" json:"num_txs,omitempty"` - TotalTxs int64 `protobuf:"varint,5,opt,name=total_txs,json=totalTxs,proto3" json:"total_txs,omitempty"` - // hashes - LastBlockHash []byte `protobuf:"bytes,6,opt,name=last_block_hash,json=lastBlockHash,proto3" json:"last_block_hash,omitempty"` - ValidatorsHash []byte `protobuf:"bytes,7,opt,name=validators_hash,json=validatorsHash,proto3" json:"validators_hash,omitempty"` - AppHash []byte `protobuf:"bytes,8,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` - // consensus - Proposer Validator `protobuf:"bytes,9,opt,name=proposer" json:"proposer"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // basic block info + Version Version `protobuf:"bytes,1,opt,name=version" json:"version"` + ChainID string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + Height int64 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,4,opt,name=time,stdtime" json:"time"` + NumTxs int64 `protobuf:"varint,5,opt,name=num_txs,json=numTxs,proto3" json:"num_txs,omitempty"` + TotalTxs int64 `protobuf:"varint,6,opt,name=total_txs,json=totalTxs,proto3" json:"total_txs,omitempty"` + // prev block info + LastBlockId BlockID `protobuf:"bytes,7,opt,name=last_block_id,json=lastBlockId" json:"last_block_id"` + // hashes of block data + LastCommitHash []byte `protobuf:"bytes,8,opt,name=last_commit_hash,json=lastCommitHash,proto3" json:"last_commit_hash,omitempty"` + DataHash []byte `protobuf:"bytes,9,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + // hashes from the app output from the prev block + ValidatorsHash []byte `protobuf:"bytes,10,opt,name=validators_hash,json=validatorsHash,proto3" json:"validators_hash,omitempty"` + NextValidatorsHash []byte `protobuf:"bytes,11,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + ConsensusHash []byte `protobuf:"bytes,12,opt,name=consensus_hash,json=consensusHash,proto3" json:"consensus_hash,omitempty"` + AppHash []byte `protobuf:"bytes,13,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + LastResultsHash []byte `protobuf:"bytes,14,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` + // consensus info + EvidenceHash []byte `protobuf:"bytes,15,opt,name=evidence_hash,json=evidenceHash,proto3" json:"evidence_hash,omitempty"` + ProposerAddress []byte `protobuf:"bytes,16,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{30} + return fileDescriptor_types_5b877df1938afe10, []int{30} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2646,6 +2687,13 @@ func (m *Header) XXX_DiscardUnknown() { var xxx_messageInfo_Header proto.InternalMessageInfo +func (m *Header) GetVersion() Version { + if m != nil { + return m.Version + } + return Version{} +} + func (m *Header) GetChainID() string { if m != nil { return m.ChainID @@ -2667,7 +2715,7 @@ func (m *Header) GetTime() time.Time { return time.Time{} } -func (m *Header) GetNumTxs() int32 { +func (m *Header) GetNumTxs() int64 { if m != nil { return m.NumTxs } @@ -2681,9 +2729,23 @@ func (m *Header) GetTotalTxs() int64 { return 0 } -func (m *Header) GetLastBlockHash() []byte { +func (m *Header) GetLastBlockId() BlockID { + if m != nil { + return m.LastBlockId + } + return BlockID{} +} + +func (m *Header) GetLastCommitHash() []byte { + if m != nil { + return m.LastCommitHash + } + return nil +} + +func (m *Header) GetDataHash() []byte { if m != nil { - return m.LastBlockHash + return m.DataHash } return nil } @@ -2695,6 +2757,20 @@ func (m *Header) GetValidatorsHash() []byte { return nil } +func (m *Header) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash + } + return nil +} + +func (m *Header) GetConsensusHash() []byte { + if m != nil { + return m.ConsensusHash + } + return nil +} + func (m *Header) GetAppHash() []byte { if m != nil { return m.AppHash @@ -2702,17 +2778,196 @@ func (m *Header) GetAppHash() []byte { return nil } -func (m *Header) GetProposer() Validator { +func (m *Header) GetLastResultsHash() []byte { if m != nil { - return m.Proposer + return m.LastResultsHash } - return Validator{} + return nil +} + +func (m *Header) GetEvidenceHash() []byte { + if m != nil { + return m.EvidenceHash + } + return nil +} + +func (m *Header) GetProposerAddress() []byte { + if m != nil { + return m.ProposerAddress + } + return nil +} + +type Version struct { + Block uint64 `protobuf:"varint,1,opt,name=Block,proto3" json:"Block,omitempty"` + App uint64 `protobuf:"varint,2,opt,name=App,proto3" json:"App,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { + return fileDescriptor_types_5b877df1938afe10, []int{31} +} +func (m *Version) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(dst, src) +} +func (m *Version) XXX_Size() int { + return m.Size() +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func (m *Version) GetBlock() uint64 { + if m != nil { + return m.Block + } + return 0 +} + +func (m *Version) GetApp() uint64 { + if m != nil { + return m.App + } + return 0 +} + +type BlockID struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + PartsHeader PartSetHeader `protobuf:"bytes,2,opt,name=parts_header,json=partsHeader" json:"parts_header"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockID) Reset() { *m = BlockID{} } +func (m *BlockID) String() string { return proto.CompactTextString(m) } +func (*BlockID) ProtoMessage() {} +func (*BlockID) Descriptor() ([]byte, []int) { + return fileDescriptor_types_5b877df1938afe10, []int{32} +} +func (m *BlockID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *BlockID) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockID.Merge(dst, src) +} +func (m *BlockID) XXX_Size() int { + return m.Size() +} +func (m *BlockID) XXX_DiscardUnknown() { + xxx_messageInfo_BlockID.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockID proto.InternalMessageInfo + +func (m *BlockID) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *BlockID) GetPartsHeader() PartSetHeader { + if m != nil { + return m.PartsHeader + } + return PartSetHeader{} +} + +type PartSetHeader struct { + Total int32 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } +func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } +func (*PartSetHeader) ProtoMessage() {} +func (*PartSetHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_types_5b877df1938afe10, []int{33} +} +func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartSetHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PartSetHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PartSetHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartSetHeader.Merge(dst, src) +} +func (m *PartSetHeader) XXX_Size() int { + return m.Size() +} +func (m *PartSetHeader) XXX_DiscardUnknown() { + xxx_messageInfo_PartSetHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_PartSetHeader proto.InternalMessageInfo + +func (m *PartSetHeader) GetTotal() int32 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *PartSetHeader) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil } // Validator type Validator struct { - Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` - PubKey PubKey `protobuf:"bytes,2,opt,name=pub_key,json=pubKey" json:"pub_key"` + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // PubKey pub_key = 2 [(gogoproto.nullable)=false]; Power int64 `protobuf:"varint,3,opt,name=power,proto3" json:"power,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -2723,7 +2978,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{31} + return fileDescriptor_types_5b877df1938afe10, []int{34} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2759,22 +3014,71 @@ func (m *Validator) GetAddress() []byte { return nil } -func (m *Validator) GetPubKey() PubKey { +func (m *Validator) GetPower() int64 { + if m != nil { + return m.Power + } + return 0 +} + +// ValidatorUpdate +type ValidatorUpdate struct { + PubKey PubKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey" json:"pub_key"` + Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } +func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } +func (*ValidatorUpdate) ProtoMessage() {} +func (*ValidatorUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_types_5b877df1938afe10, []int{35} +} +func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorUpdate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ValidatorUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorUpdate.Merge(dst, src) +} +func (m *ValidatorUpdate) XXX_Size() int { + return m.Size() +} +func (m *ValidatorUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorUpdate.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorUpdate proto.InternalMessageInfo + +func (m *ValidatorUpdate) GetPubKey() PubKey { if m != nil { return m.PubKey } return PubKey{} } -func (m *Validator) GetPower() int64 { +func (m *ValidatorUpdate) GetPower() int64 { if m != nil { return m.Power } return 0 } -// Validator with an extra bool -type SigningValidator struct { +// VoteInfo +type VoteInfo struct { Validator Validator `protobuf:"bytes,1,opt,name=validator" json:"validator"` SignedLastBlock bool `protobuf:"varint,2,opt,name=signed_last_block,json=signedLastBlock,proto3" json:"signed_last_block,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -2782,18 +3086,18 @@ type SigningValidator struct { XXX_sizecache int32 `json:"-"` } -func (m *SigningValidator) Reset() { *m = SigningValidator{} } -func (m *SigningValidator) String() string { return proto.CompactTextString(m) } -func (*SigningValidator) ProtoMessage() {} -func (*SigningValidator) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{32} +func (m *VoteInfo) Reset() { *m = VoteInfo{} } +func (m *VoteInfo) String() string { return proto.CompactTextString(m) } +func (*VoteInfo) ProtoMessage() {} +func (*VoteInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_types_5b877df1938afe10, []int{36} } -func (m *SigningValidator) XXX_Unmarshal(b []byte) error { +func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *SigningValidator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *VoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_SigningValidator.Marshal(b, m, deterministic) + return xxx_messageInfo_VoteInfo.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) @@ -2803,26 +3107,26 @@ func (m *SigningValidator) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return b[:n], nil } } -func (dst *SigningValidator) XXX_Merge(src proto.Message) { - xxx_messageInfo_SigningValidator.Merge(dst, src) +func (dst *VoteInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteInfo.Merge(dst, src) } -func (m *SigningValidator) XXX_Size() int { +func (m *VoteInfo) XXX_Size() int { return m.Size() } -func (m *SigningValidator) XXX_DiscardUnknown() { - xxx_messageInfo_SigningValidator.DiscardUnknown(m) +func (m *VoteInfo) XXX_DiscardUnknown() { + xxx_messageInfo_VoteInfo.DiscardUnknown(m) } -var xxx_messageInfo_SigningValidator proto.InternalMessageInfo +var xxx_messageInfo_VoteInfo proto.InternalMessageInfo -func (m *SigningValidator) GetValidator() Validator { +func (m *VoteInfo) GetValidator() Validator { if m != nil { return m.Validator } return Validator{} } -func (m *SigningValidator) GetSignedLastBlock() bool { +func (m *VoteInfo) GetSignedLastBlock() bool { if m != nil { return m.SignedLastBlock } @@ -2841,7 +3145,7 @@ func (m *PubKey) Reset() { *m = PubKey{} } func (m *PubKey) String() string { return proto.CompactTextString(m) } func (*PubKey) ProtoMessage() {} func (*PubKey) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{33} + return fileDescriptor_types_5b877df1938afe10, []int{37} } func (m *PubKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2899,7 +3203,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{34} + return fileDescriptor_types_5b877df1938afe10, []int{38} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3016,20 +3320,28 @@ func init() { golang_proto.RegisterType((*ResponseCommit)(nil), "types.ResponseCommit") proto.RegisterType((*ConsensusParams)(nil), "types.ConsensusParams") golang_proto.RegisterType((*ConsensusParams)(nil), "types.ConsensusParams") - proto.RegisterType((*BlockSize)(nil), "types.BlockSize") - golang_proto.RegisterType((*BlockSize)(nil), "types.BlockSize") - proto.RegisterType((*TxSize)(nil), "types.TxSize") - golang_proto.RegisterType((*TxSize)(nil), "types.TxSize") - proto.RegisterType((*BlockGossip)(nil), "types.BlockGossip") - golang_proto.RegisterType((*BlockGossip)(nil), "types.BlockGossip") + proto.RegisterType((*BlockSizeParams)(nil), "types.BlockSizeParams") + golang_proto.RegisterType((*BlockSizeParams)(nil), "types.BlockSizeParams") + proto.RegisterType((*EvidenceParams)(nil), "types.EvidenceParams") + golang_proto.RegisterType((*EvidenceParams)(nil), "types.EvidenceParams") + proto.RegisterType((*ValidatorParams)(nil), "types.ValidatorParams") + golang_proto.RegisterType((*ValidatorParams)(nil), "types.ValidatorParams") proto.RegisterType((*LastCommitInfo)(nil), "types.LastCommitInfo") golang_proto.RegisterType((*LastCommitInfo)(nil), "types.LastCommitInfo") proto.RegisterType((*Header)(nil), "types.Header") golang_proto.RegisterType((*Header)(nil), "types.Header") + proto.RegisterType((*Version)(nil), "types.Version") + golang_proto.RegisterType((*Version)(nil), "types.Version") + proto.RegisterType((*BlockID)(nil), "types.BlockID") + golang_proto.RegisterType((*BlockID)(nil), "types.BlockID") + proto.RegisterType((*PartSetHeader)(nil), "types.PartSetHeader") + golang_proto.RegisterType((*PartSetHeader)(nil), "types.PartSetHeader") proto.RegisterType((*Validator)(nil), "types.Validator") golang_proto.RegisterType((*Validator)(nil), "types.Validator") - proto.RegisterType((*SigningValidator)(nil), "types.SigningValidator") - golang_proto.RegisterType((*SigningValidator)(nil), "types.SigningValidator") + proto.RegisterType((*ValidatorUpdate)(nil), "types.ValidatorUpdate") + golang_proto.RegisterType((*ValidatorUpdate)(nil), "types.ValidatorUpdate") + proto.RegisterType((*VoteInfo)(nil), "types.VoteInfo") + golang_proto.RegisterType((*VoteInfo)(nil), "types.VoteInfo") proto.RegisterType((*PubKey)(nil), "types.PubKey") golang_proto.RegisterType((*PubKey)(nil), "types.PubKey") proto.RegisterType((*Evidence)(nil), "types.Evidence") @@ -3405,6 +3717,12 @@ func (this *RequestInfo) Equal(that interface{}) bool { if this.Version != that1.Version { return false } + if this.BlockVersion != that1.BlockVersion { + return false + } + if this.P2PVersion != that1.P2PVersion { + return false + } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } @@ -4090,6 +4408,9 @@ func (this *ResponseInfo) Equal(that interface{}) bool { if this.Version != that1.Version { return false } + if this.AppVersion != that1.AppVersion { + return false + } if this.LastBlockHeight != that1.LastBlockHeight { return false } @@ -4206,12 +4527,15 @@ func (this *ResponseQuery) Equal(that interface{}) bool { if !bytes.Equal(this.Value, that1.Value) { return false } - if !bytes.Equal(this.Proof, that1.Proof) { + if !this.Proof.Equal(that1.Proof) { return false } if this.Height != that1.Height { return false } + if this.Codespace != that1.Codespace { + return false + } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } @@ -4294,6 +4618,9 @@ func (this *ResponseCheckTx) Equal(that interface{}) bool { return false } } + if this.Codespace != that1.Codespace { + return false + } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } @@ -4344,6 +4671,9 @@ func (this *ResponseDeliverTx) Equal(that interface{}) bool { return false } } + if this.Codespace != that1.Codespace { + return false + } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } @@ -4441,10 +4771,10 @@ func (this *ConsensusParams) Equal(that interface{}) bool { if !this.BlockSize.Equal(that1.BlockSize) { return false } - if !this.TxSize.Equal(that1.TxSize) { + if !this.Evidence.Equal(that1.Evidence) { return false } - if !this.BlockGossip.Equal(that1.BlockGossip) { + if !this.Validator.Equal(that1.Validator) { return false } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { @@ -4452,14 +4782,14 @@ func (this *ConsensusParams) Equal(that interface{}) bool { } return true } -func (this *BlockSize) Equal(that interface{}) bool { +func (this *BlockSizeParams) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*BlockSize) + that1, ok := that.(*BlockSizeParams) if !ok { - that2, ok := that.(BlockSize) + that2, ok := that.(BlockSizeParams) if ok { that1 = &that2 } else { @@ -4474,9 +4804,6 @@ func (this *BlockSize) Equal(that interface{}) bool { if this.MaxBytes != that1.MaxBytes { return false } - if this.MaxTxs != that1.MaxTxs { - return false - } if this.MaxGas != that1.MaxGas { return false } @@ -4485,14 +4812,14 @@ func (this *BlockSize) Equal(that interface{}) bool { } return true } -func (this *TxSize) Equal(that interface{}) bool { +func (this *EvidenceParams) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*TxSize) + that1, ok := that.(*EvidenceParams) if !ok { - that2, ok := that.(TxSize) + that2, ok := that.(EvidenceParams) if ok { that1 = &that2 } else { @@ -4504,10 +4831,7 @@ func (this *TxSize) Equal(that interface{}) bool { } else if this == nil { return false } - if this.MaxBytes != that1.MaxBytes { - return false - } - if this.MaxGas != that1.MaxGas { + if this.MaxAge != that1.MaxAge { return false } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { @@ -4515,14 +4839,14 @@ func (this *TxSize) Equal(that interface{}) bool { } return true } -func (this *BlockGossip) Equal(that interface{}) bool { +func (this *ValidatorParams) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*BlockGossip) + that1, ok := that.(*ValidatorParams) if !ok { - that2, ok := that.(BlockGossip) + that2, ok := that.(ValidatorParams) if ok { that1 = &that2 } else { @@ -4534,10 +4858,15 @@ func (this *BlockGossip) Equal(that interface{}) bool { } else if this == nil { return false } - if this.BlockPartSizeBytes != that1.BlockPartSizeBytes { + if len(this.PubKeyTypes) != len(that1.PubKeyTypes) { return false } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + for i := range this.PubKeyTypes { + if this.PubKeyTypes[i] != that1.PubKeyTypes[i] { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } return true @@ -4561,14 +4890,14 @@ func (this *LastCommitInfo) Equal(that interface{}) bool { } else if this == nil { return false } - if this.CommitRound != that1.CommitRound { + if this.Round != that1.Round { return false } - if len(this.Validators) != len(that1.Validators) { + if len(this.Votes) != len(that1.Votes) { return false } - for i := range this.Validators { - if !this.Validators[i].Equal(&that1.Validators[i]) { + for i := range this.Votes { + if !this.Votes[i].Equal(&that1.Votes[i]) { return false } } @@ -4596,6 +4925,9 @@ func (this *Header) Equal(that interface{}) bool { } else if this == nil { return false } + if !this.Version.Equal(&that1.Version) { + return false + } if this.ChainID != that1.ChainID { return false } @@ -4611,16 +4943,124 @@ func (this *Header) Equal(that interface{}) bool { if this.TotalTxs != that1.TotalTxs { return false } - if !bytes.Equal(this.LastBlockHash, that1.LastBlockHash) { + if !this.LastBlockId.Equal(&that1.LastBlockId) { + return false + } + if !bytes.Equal(this.LastCommitHash, that1.LastCommitHash) { + return false + } + if !bytes.Equal(this.DataHash, that1.DataHash) { return false } if !bytes.Equal(this.ValidatorsHash, that1.ValidatorsHash) { return false } + if !bytes.Equal(this.NextValidatorsHash, that1.NextValidatorsHash) { + return false + } + if !bytes.Equal(this.ConsensusHash, that1.ConsensusHash) { + return false + } if !bytes.Equal(this.AppHash, that1.AppHash) { return false } - if !this.Proposer.Equal(&that1.Proposer) { + if !bytes.Equal(this.LastResultsHash, that1.LastResultsHash) { + return false + } + if !bytes.Equal(this.EvidenceHash, that1.EvidenceHash) { + return false + } + if !bytes.Equal(this.ProposerAddress, that1.ProposerAddress) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Version) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Version) + if !ok { + that2, ok := that.(Version) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Block != that1.Block { + return false + } + if this.App != that1.App { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BlockID) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BlockID) + if !ok { + that2, ok := that.(BlockID) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Hash, that1.Hash) { + return false + } + if !this.PartsHeader.Equal(&that1.PartsHeader) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *PartSetHeader) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PartSetHeader) + if !ok { + that2, ok := that.(PartSetHeader) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Total != that1.Total { + return false + } + if !bytes.Equal(this.Hash, that1.Hash) { return false } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { @@ -4650,6 +5090,33 @@ func (this *Validator) Equal(that interface{}) bool { if !bytes.Equal(this.Address, that1.Address) { return false } + if this.Power != that1.Power { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *ValidatorUpdate) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ValidatorUpdate) + if !ok { + that2, ok := that.(ValidatorUpdate) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } if !this.PubKey.Equal(&that1.PubKey) { return false } @@ -4661,14 +5128,14 @@ func (this *Validator) Equal(that interface{}) bool { } return true } -func (this *SigningValidator) Equal(that interface{}) bool { +func (this *VoteInfo) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*SigningValidator) + that1, ok := that.(*VoteInfo) if !ok { - that2, ok := that.(SigningValidator) + that2, ok := that.(VoteInfo) if ok { that1 = &that2 } else { @@ -4769,8 +5236,9 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// Client API for ABCIApplication service - +// ABCIApplicationClient is the client API for ABCIApplication service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ABCIApplicationClient interface { Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) @@ -4892,8 +5360,7 @@ func (c *aBCIApplicationClient) EndBlock(ctx context.Context, in *RequestEndBloc return out, nil } -// Server API for ABCIApplication service - +// ABCIApplicationServer is the server API for ABCIApplication service. type ABCIApplicationServer interface { Echo(context.Context, *RequestEcho) (*ResponseEcho, error) Flush(context.Context, *RequestFlush) (*ResponseFlush, error) @@ -5416,6 +5883,16 @@ func (m *RequestInfo) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) i += copy(dAtA[i:], m.Version) } + if m.BlockVersion != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.BlockVersion)) + } + if m.P2PVersion != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.P2PVersion)) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -6020,13 +6497,18 @@ func (m *ResponseInfo) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Version))) i += copy(dAtA[i:], m.Version) } - if m.LastBlockHeight != 0 { + if m.AppVersion != 0 { dAtA[i] = 0x18 i++ + i = encodeVarintTypes(dAtA, i, uint64(m.AppVersion)) + } + if m.LastBlockHeight != 0 { + dAtA[i] = 0x20 + i++ i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockHeight)) } if len(m.LastBlockAppHash) > 0 { - dAtA[i] = 0x22 + dAtA[i] = 0x2a i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.LastBlockAppHash))) i += copy(dAtA[i:], m.LastBlockAppHash) @@ -6167,17 +6649,27 @@ func (m *ResponseQuery) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } - if len(m.Proof) > 0 { + if m.Proof != nil { dAtA[i] = 0x42 i++ - i = encodeVarintTypes(dAtA, i, uint64(len(m.Proof))) - i += copy(dAtA[i:], m.Proof) + i = encodeVarintTypes(dAtA, i, uint64(m.Proof.Size())) + n31, err := m.Proof.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 } if m.Height != 0 { dAtA[i] = 0x48 i++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) } + if len(m.Codespace) > 0 { + dAtA[i] = 0x52 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i += copy(dAtA[i:], m.Codespace) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -6277,6 +6769,12 @@ func (m *ResponseCheckTx) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.Codespace) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i += copy(dAtA[i:], m.Codespace) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -6343,6 +6841,12 @@ func (m *ResponseDeliverTx) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.Codespace) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Codespace))) + i += copy(dAtA[i:], m.Codespace) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -6380,11 +6884,11 @@ func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.ConsensusParamUpdates.Size())) - n31, err := m.ConsensusParamUpdates.MarshalTo(dAtA[i:]) + n32, err := m.ConsensusParamUpdates.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n32 } if len(m.Tags) > 0 { for _, msg := range m.Tags { @@ -6450,31 +6954,31 @@ func (m *ConsensusParams) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.BlockSize.Size())) - n32, err := m.BlockSize.MarshalTo(dAtA[i:]) + n33, err := m.BlockSize.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n33 } - if m.TxSize != nil { + if m.Evidence != nil { dAtA[i] = 0x12 i++ - i = encodeVarintTypes(dAtA, i, uint64(m.TxSize.Size())) - n33, err := m.TxSize.MarshalTo(dAtA[i:]) + i = encodeVarintTypes(dAtA, i, uint64(m.Evidence.Size())) + n34, err := m.Evidence.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n34 } - if m.BlockGossip != nil { + if m.Validator != nil { dAtA[i] = 0x1a i++ - i = encodeVarintTypes(dAtA, i, uint64(m.BlockGossip.Size())) - n34, err := m.BlockGossip.MarshalTo(dAtA[i:]) + i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) + n35, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n35 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -6482,7 +6986,7 @@ func (m *ConsensusParams) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *BlockSize) Marshal() (dAtA []byte, err error) { +func (m *BlockSizeParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -6492,7 +6996,7 @@ func (m *BlockSize) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BlockSize) MarshalTo(dAtA []byte) (int, error) { +func (m *BlockSizeParams) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -6502,13 +7006,8 @@ func (m *BlockSize) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintTypes(dAtA, i, uint64(m.MaxBytes)) } - if m.MaxTxs != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintTypes(dAtA, i, uint64(m.MaxTxs)) - } if m.MaxGas != 0 { - dAtA[i] = 0x18 + dAtA[i] = 0x10 i++ i = encodeVarintTypes(dAtA, i, uint64(m.MaxGas)) } @@ -6518,7 +7017,7 @@ func (m *BlockSize) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *TxSize) Marshal() (dAtA []byte, err error) { +func (m *EvidenceParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -6528,20 +7027,15 @@ func (m *TxSize) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TxSize) MarshalTo(dAtA []byte) (int, error) { +func (m *EvidenceParams) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.MaxBytes != 0 { + if m.MaxAge != 0 { dAtA[i] = 0x8 i++ - i = encodeVarintTypes(dAtA, i, uint64(m.MaxBytes)) - } - if m.MaxGas != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintTypes(dAtA, i, uint64(m.MaxGas)) + i = encodeVarintTypes(dAtA, i, uint64(m.MaxAge)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -6549,7 +7043,7 @@ func (m *TxSize) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *BlockGossip) Marshal() (dAtA []byte, err error) { +func (m *ValidatorParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -6559,15 +7053,25 @@ func (m *BlockGossip) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BlockGossip) MarshalTo(dAtA []byte) (int, error) { +func (m *ValidatorParams) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.BlockPartSizeBytes != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintTypes(dAtA, i, uint64(m.BlockPartSizeBytes)) + if len(m.PubKeyTypes) > 0 { + for _, s := range m.PubKeyTypes { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -6590,13 +7094,13 @@ func (m *LastCommitInfo) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.CommitRound != 0 { + if m.Round != 0 { dAtA[i] = 0x8 i++ - i = encodeVarintTypes(dAtA, i, uint64(m.CommitRound)) + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) } - if len(m.Validators) > 0 { - for _, msg := range m.Validators { + if len(m.Votes) > 0 { + for _, msg := range m.Votes { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) @@ -6628,68 +7132,114 @@ func (m *Header) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Version.Size())) + n36, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 if len(m.ChainID) > 0 { - dAtA[i] = 0xa + dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.ChainID))) i += copy(dAtA[i:], m.ChainID) } if m.Height != 0 { - dAtA[i] = 0x10 + dAtA[i] = 0x18 i++ i = encodeVarintTypes(dAtA, i, uint64(m.Height)) } - dAtA[i] = 0x1a + dAtA[i] = 0x22 i++ i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) - n35, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + n37, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n37 if m.NumTxs != 0 { - dAtA[i] = 0x20 + dAtA[i] = 0x28 i++ i = encodeVarintTypes(dAtA, i, uint64(m.NumTxs)) } if m.TotalTxs != 0 { - dAtA[i] = 0x28 + dAtA[i] = 0x30 i++ i = encodeVarintTypes(dAtA, i, uint64(m.TotalTxs)) } - if len(m.LastBlockHash) > 0 { - dAtA[i] = 0x32 + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockId.Size())) + n38, err := m.LastBlockId.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + if len(m.LastCommitHash) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastCommitHash))) + i += copy(dAtA[i:], m.LastCommitHash) + } + if len(m.DataHash) > 0 { + dAtA[i] = 0x4a i++ - i = encodeVarintTypes(dAtA, i, uint64(len(m.LastBlockHash))) - i += copy(dAtA[i:], m.LastBlockHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.DataHash))) + i += copy(dAtA[i:], m.DataHash) } if len(m.ValidatorsHash) > 0 { - dAtA[i] = 0x3a + dAtA[i] = 0x52 i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorsHash))) i += copy(dAtA[i:], m.ValidatorsHash) } + if len(m.NextValidatorsHash) > 0 { + dAtA[i] = 0x5a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i += copy(dAtA[i:], m.NextValidatorsHash) + } + if len(m.ConsensusHash) > 0 { + dAtA[i] = 0x62 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ConsensusHash))) + i += copy(dAtA[i:], m.ConsensusHash) + } if len(m.AppHash) > 0 { - dAtA[i] = 0x42 + dAtA[i] = 0x6a i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) i += copy(dAtA[i:], m.AppHash) } - dAtA[i] = 0x4a - i++ - i = encodeVarintTypes(dAtA, i, uint64(m.Proposer.Size())) - n36, err := m.Proposer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err + if len(m.LastResultsHash) > 0 { + dAtA[i] = 0x72 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) + i += copy(dAtA[i:], m.LastResultsHash) + } + if len(m.EvidenceHash) > 0 { + dAtA[i] = 0x7a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.EvidenceHash))) + i += copy(dAtA[i:], m.EvidenceHash) + } + if len(m.ProposerAddress) > 0 { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) + i += copy(dAtA[i:], m.ProposerAddress) } - i += n36 if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } -func (m *Validator) Marshal() (dAtA []byte, err error) { +func (m *Version) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -6699,29 +7249,20 @@ func (m *Validator) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Validator) MarshalTo(dAtA []byte) (int, error) { +func (m *Version) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.Address) > 0 { - dAtA[i] = 0xa + if m.Block != 0 { + dAtA[i] = 0x8 i++ - i = encodeVarintTypes(dAtA, i, uint64(len(m.Address))) - i += copy(dAtA[i:], m.Address) + i = encodeVarintTypes(dAtA, i, uint64(m.Block)) } - dAtA[i] = 0x12 - i++ - i = encodeVarintTypes(dAtA, i, uint64(m.PubKey.Size())) - n37, err := m.PubKey.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - if m.Power != 0 { - dAtA[i] = 0x18 + if m.App != 0 { + dAtA[i] = 0x10 i++ - i = encodeVarintTypes(dAtA, i, uint64(m.Power)) + i = encodeVarintTypes(dAtA, i, uint64(m.App)) } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -6729,7 +7270,7 @@ func (m *Validator) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *SigningValidator) Marshal() (dAtA []byte, err error) { +func (m *BlockID) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -6739,22 +7280,155 @@ func (m *SigningValidator) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SigningValidator) MarshalTo(dAtA []byte) (int, error) { +func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - dAtA[i] = 0xa + if len(m.Hash) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i += copy(dAtA[i:], m.Hash) + } + dAtA[i] = 0x12 i++ - i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n38, err := m.Validator.MarshalTo(dAtA[i:]) + i = encodeVarintTypes(dAtA, i, uint64(m.PartsHeader.Size())) + n39, err := m.PartsHeader.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 - if m.SignedLastBlock { - dAtA[i] = 0x10 - i++ + i += n39 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartSetHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Total != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Total)) + } + if len(m.Hash) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i += copy(dAtA[i:], m.Hash) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Validator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Validator) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Address) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Address))) + i += copy(dAtA[i:], m.Address) + } + if m.Power != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Power)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ValidatorUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorUpdate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PubKey.Size())) + n40, err := m.PubKey.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + if m.Power != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Power)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *VoteInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VoteInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) + n41, err := m.Validator.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + if m.SignedLastBlock { + dAtA[i] = 0x10 + i++ if m.SignedLastBlock { dAtA[i] = 1 } else { @@ -6825,11 +7499,11 @@ func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n39, err := m.Validator.MarshalTo(dAtA[i:]) + n42, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n42 if m.Height != 0 { dAtA[i] = 0x18 i++ @@ -6838,11 +7512,11 @@ func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) - n40, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + n43, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n43 if m.TotalVotingPower != 0 { dAtA[i] = 0x28 i++ @@ -6971,8 +7645,10 @@ func NewPopulatedRequestFlush(r randyTypes, easy bool) *RequestFlush { func NewPopulatedRequestInfo(r randyTypes, easy bool) *RequestInfo { this := &RequestInfo{} this.Version = string(randStringTypes(r)) + this.BlockVersion = uint64(uint64(r.Uint32())) + this.P2PVersion = uint64(uint64(r.Uint32())) if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 2) + this.XXX_unrecognized = randUnrecognizedTypes(r, 4) } return this } @@ -6997,9 +7673,9 @@ func NewPopulatedRequestInitChain(r randyTypes, easy bool) *RequestInitChain { } if r.Intn(10) != 0 { v2 := r.Intn(5) - this.Validators = make([]Validator, v2) + this.Validators = make([]ValidatorUpdate, v2) for i := 0; i < v2; i++ { - v3 := NewPopulatedValidator(r, easy) + v3 := NewPopulatedValidatorUpdate(r, easy) this.Validators[i] = *v3 } } @@ -7229,6 +7905,7 @@ func NewPopulatedResponseInfo(r randyTypes, easy bool) *ResponseInfo { this := &ResponseInfo{} this.Data = string(randStringTypes(r)) this.Version = string(randStringTypes(r)) + this.AppVersion = uint64(uint64(r.Uint32())) this.LastBlockHeight = int64(r.Int63()) if r.Intn(2) == 0 { this.LastBlockHeight *= -1 @@ -7239,7 +7916,7 @@ func NewPopulatedResponseInfo(r randyTypes, easy bool) *ResponseInfo { this.LastBlockAppHash[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 5) + this.XXX_unrecognized = randUnrecognizedTypes(r, 6) } return this } @@ -7262,9 +7939,9 @@ func NewPopulatedResponseInitChain(r randyTypes, easy bool) *ResponseInitChain { } if r.Intn(10) != 0 { v14 := r.Intn(5) - this.Validators = make([]Validator, v14) + this.Validators = make([]ValidatorUpdate, v14) for i := 0; i < v14; i++ { - v15 := NewPopulatedValidator(r, easy) + v15 := NewPopulatedValidatorUpdate(r, easy) this.Validators[i] = *v15 } } @@ -7293,17 +7970,16 @@ func NewPopulatedResponseQuery(r randyTypes, easy bool) *ResponseQuery { for i := 0; i < v17; i++ { this.Value[i] = byte(r.Intn(256)) } - v18 := r.Intn(100) - this.Proof = make([]byte, v18) - for i := 0; i < v18; i++ { - this.Proof[i] = byte(r.Intn(256)) + if r.Intn(10) != 0 { + this.Proof = merkle.NewPopulatedProof(r, easy) } this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } + this.Codespace = string(randStringTypes(r)) if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 10) + this.XXX_unrecognized = randUnrecognizedTypes(r, 11) } return this } @@ -7311,11 +7987,11 @@ func NewPopulatedResponseQuery(r randyTypes, easy bool) *ResponseQuery { func NewPopulatedResponseBeginBlock(r randyTypes, easy bool) *ResponseBeginBlock { this := &ResponseBeginBlock{} if r.Intn(10) != 0 { - v19 := r.Intn(5) - this.Tags = make([]common.KVPair, v19) - for i := 0; i < v19; i++ { - v20 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v20 + v18 := r.Intn(5) + this.Tags = make([]common.KVPair, v18) + for i := 0; i < v18; i++ { + v19 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v19 } } if !easy && r.Intn(10) != 0 { @@ -7327,9 +8003,9 @@ func NewPopulatedResponseBeginBlock(r randyTypes, easy bool) *ResponseBeginBlock func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { this := &ResponseCheckTx{} this.Code = uint32(r.Uint32()) - v21 := r.Intn(100) - this.Data = make([]byte, v21) - for i := 0; i < v21; i++ { + v20 := r.Intn(100) + this.Data = make([]byte, v20) + for i := 0; i < v20; i++ { this.Data[i] = byte(r.Intn(256)) } this.Log = string(randStringTypes(r)) @@ -7343,15 +8019,16 @@ func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { this.GasUsed *= -1 } if r.Intn(10) != 0 { - v22 := r.Intn(5) - this.Tags = make([]common.KVPair, v22) - for i := 0; i < v22; i++ { - v23 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v23 + v21 := r.Intn(5) + this.Tags = make([]common.KVPair, v21) + for i := 0; i < v21; i++ { + v22 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v22 } } + this.Codespace = string(randStringTypes(r)) if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 8) + this.XXX_unrecognized = randUnrecognizedTypes(r, 9) } return this } @@ -7359,9 +8036,9 @@ func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { this := &ResponseDeliverTx{} this.Code = uint32(r.Uint32()) - v24 := r.Intn(100) - this.Data = make([]byte, v24) - for i := 0; i < v24; i++ { + v23 := r.Intn(100) + this.Data = make([]byte, v23) + for i := 0; i < v23; i++ { this.Data[i] = byte(r.Intn(256)) } this.Log = string(randStringTypes(r)) @@ -7375,15 +8052,16 @@ func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { this.GasUsed *= -1 } if r.Intn(10) != 0 { - v25 := r.Intn(5) - this.Tags = make([]common.KVPair, v25) - for i := 0; i < v25; i++ { - v26 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v26 + v24 := r.Intn(5) + this.Tags = make([]common.KVPair, v24) + for i := 0; i < v24; i++ { + v25 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v25 } } + this.Codespace = string(randStringTypes(r)) if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 8) + this.XXX_unrecognized = randUnrecognizedTypes(r, 9) } return this } @@ -7391,22 +8069,22 @@ func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { this := &ResponseEndBlock{} if r.Intn(10) != 0 { - v27 := r.Intn(5) - this.ValidatorUpdates = make([]Validator, v27) - for i := 0; i < v27; i++ { - v28 := NewPopulatedValidator(r, easy) - this.ValidatorUpdates[i] = *v28 + v26 := r.Intn(5) + this.ValidatorUpdates = make([]ValidatorUpdate, v26) + for i := 0; i < v26; i++ { + v27 := NewPopulatedValidatorUpdate(r, easy) + this.ValidatorUpdates[i] = *v27 } } if r.Intn(10) != 0 { this.ConsensusParamUpdates = NewPopulatedConsensusParams(r, easy) } if r.Intn(10) != 0 { - v29 := r.Intn(5) - this.Tags = make([]common.KVPair, v29) - for i := 0; i < v29; i++ { - v30 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v30 + v28 := r.Intn(5) + this.Tags = make([]common.KVPair, v28) + for i := 0; i < v28; i++ { + v29 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v29 } } if !easy && r.Intn(10) != 0 { @@ -7417,9 +8095,9 @@ func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { func NewPopulatedResponseCommit(r randyTypes, easy bool) *ResponseCommit { this := &ResponseCommit{} - v31 := r.Intn(100) - this.Data = make([]byte, v31) - for i := 0; i < v31; i++ { + v30 := r.Intn(100) + this.Data = make([]byte, v30) + for i := 0; i < v30; i++ { this.Data[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -7431,13 +8109,13 @@ func NewPopulatedResponseCommit(r randyTypes, easy bool) *ResponseCommit { func NewPopulatedConsensusParams(r randyTypes, easy bool) *ConsensusParams { this := &ConsensusParams{} if r.Intn(10) != 0 { - this.BlockSize = NewPopulatedBlockSize(r, easy) + this.BlockSize = NewPopulatedBlockSizeParams(r, easy) } if r.Intn(10) != 0 { - this.TxSize = NewPopulatedTxSize(r, easy) + this.Evidence = NewPopulatedEvidenceParams(r, easy) } if r.Intn(10) != 0 { - this.BlockGossip = NewPopulatedBlockGossip(r, easy) + this.Validator = NewPopulatedValidatorParams(r, easy) } if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 4) @@ -7445,47 +8123,40 @@ func NewPopulatedConsensusParams(r randyTypes, easy bool) *ConsensusParams { return this } -func NewPopulatedBlockSize(r randyTypes, easy bool) *BlockSize { - this := &BlockSize{} - this.MaxBytes = int32(r.Int31()) +func NewPopulatedBlockSizeParams(r randyTypes, easy bool) *BlockSizeParams { + this := &BlockSizeParams{} + this.MaxBytes = int64(r.Int63()) if r.Intn(2) == 0 { this.MaxBytes *= -1 } - this.MaxTxs = int32(r.Int31()) - if r.Intn(2) == 0 { - this.MaxTxs *= -1 - } this.MaxGas = int64(r.Int63()) if r.Intn(2) == 0 { this.MaxGas *= -1 } if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 4) + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } return this } -func NewPopulatedTxSize(r randyTypes, easy bool) *TxSize { - this := &TxSize{} - this.MaxBytes = int32(r.Int31()) - if r.Intn(2) == 0 { - this.MaxBytes *= -1 - } - this.MaxGas = int64(r.Int63()) +func NewPopulatedEvidenceParams(r randyTypes, easy bool) *EvidenceParams { + this := &EvidenceParams{} + this.MaxAge = int64(r.Int63()) if r.Intn(2) == 0 { - this.MaxGas *= -1 + this.MaxAge *= -1 } if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + this.XXX_unrecognized = randUnrecognizedTypes(r, 2) } return this } -func NewPopulatedBlockGossip(r randyTypes, easy bool) *BlockGossip { - this := &BlockGossip{} - this.BlockPartSizeBytes = int32(r.Int31()) - if r.Intn(2) == 0 { - this.BlockPartSizeBytes *= -1 +func NewPopulatedValidatorParams(r randyTypes, easy bool) *ValidatorParams { + this := &ValidatorParams{} + v31 := r.Intn(10) + this.PubKeyTypes = make([]string, v31) + for i := 0; i < v31; i++ { + this.PubKeyTypes[i] = string(randStringTypes(r)) } if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 2) @@ -7495,16 +8166,16 @@ func NewPopulatedBlockGossip(r randyTypes, easy bool) *BlockGossip { func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { this := &LastCommitInfo{} - this.CommitRound = int32(r.Int31()) + this.Round = int32(r.Int31()) if r.Intn(2) == 0 { - this.CommitRound *= -1 + this.Round *= -1 } if r.Intn(10) != 0 { v32 := r.Intn(5) - this.Validators = make([]SigningValidator, v32) + this.Votes = make([]VoteInfo, v32) for i := 0; i < v32; i++ { - v33 := NewPopulatedSigningValidator(r, easy) - this.Validators[i] = *v33 + v33 := NewPopulatedVoteInfo(r, easy) + this.Votes[i] = *v33 } } if !easy && r.Intn(10) != 0 { @@ -7515,14 +8186,16 @@ func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { func NewPopulatedHeader(r randyTypes, easy bool) *Header { this := &Header{} + v34 := NewPopulatedVersion(r, easy) + this.Version = *v34 this.ChainID = string(randStringTypes(r)) this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v34 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v34 - this.NumTxs = int32(r.Int31()) + v35 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v35 + this.NumTxs = int64(r.Int63()) if r.Intn(2) == 0 { this.NumTxs *= -1 } @@ -7530,38 +8203,108 @@ func NewPopulatedHeader(r randyTypes, easy bool) *Header { if r.Intn(2) == 0 { this.TotalTxs *= -1 } - v35 := r.Intn(100) - this.LastBlockHash = make([]byte, v35) - for i := 0; i < v35; i++ { - this.LastBlockHash[i] = byte(r.Intn(256)) + v36 := NewPopulatedBlockID(r, easy) + this.LastBlockId = *v36 + v37 := r.Intn(100) + this.LastCommitHash = make([]byte, v37) + for i := 0; i < v37; i++ { + this.LastCommitHash[i] = byte(r.Intn(256)) + } + v38 := r.Intn(100) + this.DataHash = make([]byte, v38) + for i := 0; i < v38; i++ { + this.DataHash[i] = byte(r.Intn(256)) } - v36 := r.Intn(100) - this.ValidatorsHash = make([]byte, v36) - for i := 0; i < v36; i++ { + v39 := r.Intn(100) + this.ValidatorsHash = make([]byte, v39) + for i := 0; i < v39; i++ { this.ValidatorsHash[i] = byte(r.Intn(256)) } - v37 := r.Intn(100) - this.AppHash = make([]byte, v37) - for i := 0; i < v37; i++ { + v40 := r.Intn(100) + this.NextValidatorsHash = make([]byte, v40) + for i := 0; i < v40; i++ { + this.NextValidatorsHash[i] = byte(r.Intn(256)) + } + v41 := r.Intn(100) + this.ConsensusHash = make([]byte, v41) + for i := 0; i < v41; i++ { + this.ConsensusHash[i] = byte(r.Intn(256)) + } + v42 := r.Intn(100) + this.AppHash = make([]byte, v42) + for i := 0; i < v42; i++ { this.AppHash[i] = byte(r.Intn(256)) } - v38 := NewPopulatedValidator(r, easy) - this.Proposer = *v38 + v43 := r.Intn(100) + this.LastResultsHash = make([]byte, v43) + for i := 0; i < v43; i++ { + this.LastResultsHash[i] = byte(r.Intn(256)) + } + v44 := r.Intn(100) + this.EvidenceHash = make([]byte, v44) + for i := 0; i < v44; i++ { + this.EvidenceHash[i] = byte(r.Intn(256)) + } + v45 := r.Intn(100) + this.ProposerAddress = make([]byte, v45) + for i := 0; i < v45; i++ { + this.ProposerAddress[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 17) + } + return this +} + +func NewPopulatedVersion(r randyTypes, easy bool) *Version { + this := &Version{} + this.Block = uint64(uint64(r.Uint32())) + this.App = uint64(uint64(r.Uint32())) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + } + return this +} + +func NewPopulatedBlockID(r randyTypes, easy bool) *BlockID { + this := &BlockID{} + v46 := r.Intn(100) + this.Hash = make([]byte, v46) + for i := 0; i < v46; i++ { + this.Hash[i] = byte(r.Intn(256)) + } + v47 := NewPopulatedPartSetHeader(r, easy) + this.PartsHeader = *v47 + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + } + return this +} + +func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { + this := &PartSetHeader{} + this.Total = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Total *= -1 + } + v48 := r.Intn(100) + this.Hash = make([]byte, v48) + for i := 0; i < v48; i++ { + this.Hash[i] = byte(r.Intn(256)) + } if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 10) + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } return this } func NewPopulatedValidator(r randyTypes, easy bool) *Validator { this := &Validator{} - v39 := r.Intn(100) - this.Address = make([]byte, v39) - for i := 0; i < v39; i++ { + v49 := r.Intn(100) + this.Address = make([]byte, v49) + for i := 0; i < v49; i++ { this.Address[i] = byte(r.Intn(256)) } - v40 := NewPopulatedPubKey(r, easy) - this.PubKey = *v40 this.Power = int64(r.Int63()) if r.Intn(2) == 0 { this.Power *= -1 @@ -7572,10 +8315,24 @@ func NewPopulatedValidator(r randyTypes, easy bool) *Validator { return this } -func NewPopulatedSigningValidator(r randyTypes, easy bool) *SigningValidator { - this := &SigningValidator{} - v41 := NewPopulatedValidator(r, easy) - this.Validator = *v41 +func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { + this := &ValidatorUpdate{} + v50 := NewPopulatedPubKey(r, easy) + this.PubKey = *v50 + this.Power = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Power *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + } + return this +} + +func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { + this := &VoteInfo{} + v51 := NewPopulatedValidator(r, easy) + this.Validator = *v51 this.SignedLastBlock = bool(bool(r.Intn(2) == 0)) if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) @@ -7586,9 +8343,9 @@ func NewPopulatedSigningValidator(r randyTypes, easy bool) *SigningValidator { func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { this := &PubKey{} this.Type = string(randStringTypes(r)) - v42 := r.Intn(100) - this.Data = make([]byte, v42) - for i := 0; i < v42; i++ { + v52 := r.Intn(100) + this.Data = make([]byte, v52) + for i := 0; i < v52; i++ { this.Data[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -7600,14 +8357,14 @@ func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { func NewPopulatedEvidence(r randyTypes, easy bool) *Evidence { this := &Evidence{} this.Type = string(randStringTypes(r)) - v43 := NewPopulatedValidator(r, easy) - this.Validator = *v43 + v53 := NewPopulatedValidator(r, easy) + this.Validator = *v53 this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v44 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v44 + v54 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v54 this.TotalVotingPower = int64(r.Int63()) if r.Intn(2) == 0 { this.TotalVotingPower *= -1 @@ -7637,9 +8394,9 @@ func randUTF8RuneTypes(r randyTypes) rune { return rune(ru + 61) } func randStringTypes(r randyTypes) string { - v45 := r.Intn(100) - tmps := make([]rune, v45) - for i := 0; i < v45; i++ { + v55 := r.Intn(100) + tmps := make([]rune, v55) + for i := 0; i < v55; i++ { tmps[i] = randUTF8RuneTypes(r) } return string(tmps) @@ -7661,11 +8418,11 @@ func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte switch wire { case 0: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - v46 := r.Int63() + v56 := r.Int63() if r.Intn(2) == 0 { - v46 *= -1 + v56 *= -1 } - dAtA = encodeVarintPopulateTypes(dAtA, uint64(v46)) + dAtA = encodeVarintPopulateTypes(dAtA, uint64(v56)) case 1: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) @@ -7830,6 +8587,12 @@ func (m *RequestInfo) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.BlockVersion != 0 { + n += 1 + sovTypes(uint64(m.BlockVersion)) + } + if m.P2PVersion != 0 { + n += 1 + sovTypes(uint64(m.P2PVersion)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -8141,6 +8904,9 @@ func (m *ResponseInfo) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.AppVersion != 0 { + n += 1 + sovTypes(uint64(m.AppVersion)) + } if m.LastBlockHeight != 0 { n += 1 + sovTypes(uint64(m.LastBlockHeight)) } @@ -8218,13 +8984,17 @@ func (m *ResponseQuery) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = len(m.Proof) - if l > 0 { + if m.Proof != nil { + l = m.Proof.Size() n += 1 + l + sovTypes(uint64(l)) } if m.Height != 0 { n += 1 + sovTypes(uint64(m.Height)) } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -8276,6 +9046,10 @@ func (m *ResponseCheckTx) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -8312,6 +9086,10 @@ func (m *ResponseDeliverTx) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + l = len(m.Codespace) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -8363,12 +9141,12 @@ func (m *ConsensusParams) Size() (n int) { l = m.BlockSize.Size() n += 1 + l + sovTypes(uint64(l)) } - if m.TxSize != nil { - l = m.TxSize.Size() + if m.Evidence != nil { + l = m.Evidence.Size() n += 1 + l + sovTypes(uint64(l)) } - if m.BlockGossip != nil { - l = m.BlockGossip.Size() + if m.Validator != nil { + l = m.Validator.Size() n += 1 + l + sovTypes(uint64(l)) } if m.XXX_unrecognized != nil { @@ -8377,15 +9155,12 @@ func (m *ConsensusParams) Size() (n int) { return n } -func (m *BlockSize) Size() (n int) { +func (m *BlockSizeParams) Size() (n int) { var l int _ = l if m.MaxBytes != 0 { n += 1 + sovTypes(uint64(m.MaxBytes)) } - if m.MaxTxs != 0 { - n += 1 + sovTypes(uint64(m.MaxTxs)) - } if m.MaxGas != 0 { n += 1 + sovTypes(uint64(m.MaxGas)) } @@ -8395,14 +9170,11 @@ func (m *BlockSize) Size() (n int) { return n } -func (m *TxSize) Size() (n int) { +func (m *EvidenceParams) Size() (n int) { var l int _ = l - if m.MaxBytes != 0 { - n += 1 + sovTypes(uint64(m.MaxBytes)) - } - if m.MaxGas != 0 { - n += 1 + sovTypes(uint64(m.MaxGas)) + if m.MaxAge != 0 { + n += 1 + sovTypes(uint64(m.MaxAge)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -8410,11 +9182,14 @@ func (m *TxSize) Size() (n int) { return n } -func (m *BlockGossip) Size() (n int) { +func (m *ValidatorParams) Size() (n int) { var l int _ = l - if m.BlockPartSizeBytes != 0 { - n += 1 + sovTypes(uint64(m.BlockPartSizeBytes)) + if len(m.PubKeyTypes) > 0 { + for _, s := range m.PubKeyTypes { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -8425,11 +9200,11 @@ func (m *BlockGossip) Size() (n int) { func (m *LastCommitInfo) Size() (n int) { var l int _ = l - if m.CommitRound != 0 { - n += 1 + sovTypes(uint64(m.CommitRound)) + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) } - if len(m.Validators) > 0 { - for _, e := range m.Validators { + if len(m.Votes) > 0 { + for _, e := range m.Votes { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -8443,6 +9218,8 @@ func (m *LastCommitInfo) Size() (n int) { func (m *Header) Size() (n int) { var l int _ = l + l = m.Version.Size() + n += 1 + l + sovTypes(uint64(l)) l = len(m.ChainID) if l > 0 { n += 1 + l + sovTypes(uint64(l)) @@ -8458,7 +9235,13 @@ func (m *Header) Size() (n int) { if m.TotalTxs != 0 { n += 1 + sovTypes(uint64(m.TotalTxs)) } - l = len(m.LastBlockHash) + l = m.LastBlockId.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.LastCommitHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.DataHash) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } @@ -8466,29 +9249,29 @@ func (m *Header) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ConsensusHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } l = len(m.AppHash) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = m.Proposer.Size() - n += 1 + l + sovTypes(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + l = len(m.LastResultsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) } - return n -} - -func (m *Validator) Size() (n int) { - var l int - _ = l - l = len(m.Address) + l = len(m.EvidenceHash) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = m.PubKey.Size() - n += 1 + l + sovTypes(uint64(l)) - if m.Power != 0 { - n += 1 + sovTypes(uint64(m.Power)) + l = len(m.ProposerAddress) + if l > 0 { + n += 2 + l + sovTypes(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -8496,13 +9279,14 @@ func (m *Validator) Size() (n int) { return n } -func (m *SigningValidator) Size() (n int) { +func (m *Version) Size() (n int) { var l int _ = l - l = m.Validator.Size() - n += 1 + l + sovTypes(uint64(l)) - if m.SignedLastBlock { - n += 2 + if m.Block != 0 { + n += 1 + sovTypes(uint64(m.Block)) + } + if m.App != 0 { + n += 1 + sovTypes(uint64(m.App)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -8510,10 +9294,85 @@ func (m *SigningValidator) Size() (n int) { return n } -func (m *PubKey) Size() (n int) { +func (m *BlockID) Size() (n int) { var l int _ = l - l = len(m.Type) + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.PartsHeader.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PartSetHeader) Size() (n int) { + var l int + _ = l + if m.Total != 0 { + n += 1 + sovTypes(uint64(m.Total)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Validator) Size() (n int) { + var l int + _ = l + l = len(m.Address) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Power != 0 { + n += 1 + sovTypes(uint64(m.Power)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ValidatorUpdate) Size() (n int) { + var l int + _ = l + l = m.PubKey.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.Power != 0 { + n += 1 + sovTypes(uint64(m.Power)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *VoteInfo) Size() (n int) { + var l int + _ = l + l = m.Validator.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.SignedLastBlock { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *PubKey) Size() (n int) { + var l int + _ = l + l = len(m.Type) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } @@ -9155,6 +10014,44 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { } m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockVersion", wireType) + } + m.BlockVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BlockVersion |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field P2PVersion", wireType) + } + m.P2PVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.P2PVersion |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -9433,7 +10330,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Validators = append(m.Validators, Validator{}) + m.Validators = append(m.Validators, ValidatorUpdate{}) if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -10833,6 +11730,25 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AppVersion", wireType) + } + m.AppVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AppVersion |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field LastBlockHeight", wireType) } @@ -10851,7 +11767,7 @@ func (m *ResponseInfo) Unmarshal(dAtA []byte) error { break } } - case 4: + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field LastBlockAppHash", wireType) } @@ -11120,7 +12036,7 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Validators = append(m.Validators, Validator{}) + m.Validators = append(m.Validators, ValidatorUpdate{}) if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -11338,7 +12254,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11348,21 +12264,23 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) if m.Proof == nil { - m.Proof = []byte{} + m.Proof = &merkle.Proof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 9: @@ -11384,6 +12302,35 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { break } } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -11694,6 +12641,35 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -11922,6 +12898,35 @@ func (m *ResponseDeliverTx) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Codespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Codespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -11999,7 +13004,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ValidatorUpdates = append(m.ValidatorUpdates, Validator{}) + m.ValidatorUpdates = append(m.ValidatorUpdates, ValidatorUpdate{}) if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12228,7 +13233,7 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.BlockSize == nil { - m.BlockSize = &BlockSize{} + m.BlockSize = &BlockSizeParams{} } if err := m.BlockSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -12236,7 +13241,7 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TxSize", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Evidence", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12260,16 +13265,16 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TxSize == nil { - m.TxSize = &TxSize{} + if m.Evidence == nil { + m.Evidence = &EvidenceParams{} } - if err := m.TxSize.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Evidence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockGossip", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12293,10 +13298,10 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.BlockGossip == nil { - m.BlockGossip = &BlockGossip{} + if m.Validator == nil { + m.Validator = &ValidatorParams{} } - if err := m.BlockGossip.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12322,7 +13327,7 @@ func (m *ConsensusParams) Unmarshal(dAtA []byte) error { } return nil } -func (m *BlockSize) Unmarshal(dAtA []byte) error { +func (m *BlockSizeParams) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12345,10 +13350,10 @@ func (m *BlockSize) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BlockSize: wiretype end group for non-group") + return fmt.Errorf("proto: BlockSizeParams: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BlockSize: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BlockSizeParams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -12365,31 +13370,12 @@ func (m *BlockSize) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxBytes |= (int32(b) & 0x7F) << shift + m.MaxBytes |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxTxs", wireType) - } - m.MaxTxs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxTxs |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxGas", wireType) } @@ -12430,7 +13416,7 @@ func (m *BlockSize) Unmarshal(dAtA []byte) error { } return nil } -func (m *TxSize) Unmarshal(dAtA []byte) error { +func (m *EvidenceParams) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12453,36 +13439,17 @@ func (m *TxSize) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TxSize: wiretype end group for non-group") + return fmt.Errorf("proto: EvidenceParams: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TxSize: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EvidenceParams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxBytes", wireType) - } - m.MaxBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxBytes |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxGas", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxAge", wireType) } - m.MaxGas = 0 + m.MaxAge = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12492,7 +13459,7 @@ func (m *TxSize) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxGas |= (int64(b) & 0x7F) << shift + m.MaxAge |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12519,7 +13486,7 @@ func (m *TxSize) Unmarshal(dAtA []byte) error { } return nil } -func (m *BlockGossip) Unmarshal(dAtA []byte) error { +func (m *ValidatorParams) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -12542,17 +13509,17 @@ func (m *BlockGossip) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BlockGossip: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatorParams: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BlockGossip: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatorParams: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockPartSizeBytes", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKeyTypes", wireType) } - m.BlockPartSizeBytes = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12562,13 +13529,23 @@ func (m *BlockGossip) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BlockPartSizeBytes |= (int32(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PubKeyTypes = append(m.PubKeyTypes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) if err != nil { return err @@ -12620,9 +13597,9 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CommitRound", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) } - m.CommitRound = 0 + m.Round = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12632,14 +13609,14 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CommitRound |= (int32(b) & 0x7F) << shift + m.Round |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12663,8 +13640,8 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Validators = append(m.Validators, SigningValidator{}) - if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Votes = append(m.Votes, VoteInfo{}) + if err := m.Votes[len(m.Votes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12720,6 +13697,36 @@ func (m *Header) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ChainID", wireType) } @@ -12748,7 +13755,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } m.ChainID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) } @@ -12767,7 +13774,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { break } } - case 3: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } @@ -12797,7 +13804,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: + case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field NumTxs", wireType) } @@ -12811,12 +13818,12 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumTxs |= (int32(b) & 0x7F) << shift + m.NumTxs |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - case 5: + case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field TotalTxs", wireType) } @@ -12835,9 +13842,39 @@ func (m *Header) Unmarshal(dAtA []byte) error { break } } - case 6: + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastBlockId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastBlockHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitHash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -12861,12 +13898,43 @@ func (m *Header) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.LastBlockHash = append(m.LastBlockHash[:0], dAtA[iNdEx:postIndex]...) - if m.LastBlockHash == nil { - m.LastBlockHash = []byte{} + m.LastCommitHash = append(m.LastCommitHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastCommitHash == nil { + m.LastCommitHash = []byte{} } iNdEx = postIndex - case 7: + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) + if m.DataHash == nil { + m.DataHash = []byte{} + } + iNdEx = postIndex + case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ValidatorsHash", wireType) } @@ -12897,7 +13965,69 @@ func (m *Header) Unmarshal(dAtA []byte) error { m.ValidatorsHash = []byte{} } iNdEx = postIndex - case 8: + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConsensusHash = append(m.ConsensusHash[:0], dAtA[iNdEx:postIndex]...) + if m.ConsensusHash == nil { + m.ConsensusHash = []byte{} + } + iNdEx = postIndex + case 13: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) } @@ -12928,11 +14058,11 @@ func (m *Header) Unmarshal(dAtA []byte) error { m.AppHash = []byte{} } iNdEx = postIndex - case 9: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proposer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12942,20 +14072,83 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Proposer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastResultsHash == nil { + m.LastResultsHash = []byte{} + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvidenceHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvidenceHash = append(m.EvidenceHash[:0], dAtA[iNdEx:postIndex]...) + if m.EvidenceHash == nil { + m.EvidenceHash = []byte{} + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerAddress == nil { + m.ProposerAddress = []byte{} } iNdEx = postIndex default: @@ -12980,7 +14173,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } return nil } -func (m *Validator) Unmarshal(dAtA []byte) error { +func (m *Version) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13003,17 +14196,17 @@ func (m *Validator) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Validator: wiretype end group for non-group") + return fmt.Errorf("proto: Version: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Validator: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) } - var byteLen int + m.Block = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -13023,28 +14216,16 @@ func (m *Validator) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + m.Block |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Address = append(m.Address[:0], dAtA[iNdEx:postIndex]...) - if m.Address == nil { - m.Address = []byte{} - } - iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field App", wireType) } - var msglen int + m.App = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -13054,22 +14235,306 @@ func (m *Validator) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.App |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex > l { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PartsHeader", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PartsHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartSetHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartSetHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartSetHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Validator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Validator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Validator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = append(m.Address[:0], dAtA[iNdEx:postIndex]...) + if m.Address == nil { + m.Address = []byte{} + } + iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Power", wireType) @@ -13111,7 +14576,107 @@ func (m *Validator) Unmarshal(dAtA []byte) error { } return nil } -func (m *SigningValidator) Unmarshal(dAtA []byte) error { +func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Power", wireType) + } + m.Power = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Power |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VoteInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13134,10 +14699,10 @@ func (m *SigningValidator) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SigningValidator: wiretype end group for non-group") + return fmt.Errorf("proto: VoteInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SigningValidator: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VoteInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13606,134 +15171,150 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_d8da2202f45d32c0) } +func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_5b877df1938afe10) } func init() { - golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_d8da2202f45d32c0) -} - -var fileDescriptor_types_d8da2202f45d32c0 = []byte{ - // 1959 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4f, 0x73, 0x1b, 0x49, - 0x15, 0xf7, 0xc8, 0xb2, 0xa4, 0x79, 0xb2, 0x2d, 0xa7, 0x9d, 0xd8, 0x8a, 0x16, 0xec, 0x30, 0x45, - 0x65, 0x1d, 0xd6, 0x2b, 0x83, 0x97, 0x6c, 0x39, 0xbb, 0xb0, 0x85, 0xe5, 0x0d, 0x2b, 0xd7, 0x2e, - 0x60, 0x26, 0xd9, 0x50, 0xc5, 0x45, 0xd5, 0xd2, 0xb4, 0x47, 0x53, 0x91, 0x66, 0x66, 0xa7, 0x5b, - 0x5e, 0x39, 0x1f, 0x81, 0xda, 0xa2, 0xb8, 0x71, 0xe6, 0xc6, 0x17, 0xa0, 0x8a, 0x23, 0x27, 0x6a, - 0x8f, 0x1c, 0xa0, 0xe0, 0x14, 0xc0, 0x5b, 0x5c, 0xf8, 0x04, 0x1c, 0xa9, 0xd7, 0xdd, 0xf3, 0xd7, - 0xa3, 0x54, 0x12, 0x6e, 0x5c, 0xa4, 0xee, 0x7e, 0xef, 0xf5, 0xf4, 0x7b, 0xfd, 0xde, 0xfb, 0xbd, - 0xd7, 0xb0, 0x45, 0x87, 0x23, 0xef, 0x40, 0x5c, 0x86, 0x8c, 0xab, 0xdf, 0x6e, 0x18, 0x05, 0x22, - 0x20, 0x2b, 0x72, 0xd2, 0x79, 0xdb, 0xf5, 0xc4, 0x78, 0x36, 0xec, 0x8e, 0x82, 0xe9, 0x81, 0x1b, - 0xb8, 0xc1, 0x81, 0xa4, 0x0e, 0x67, 0xe7, 0x72, 0x26, 0x27, 0x72, 0xa4, 0xa4, 0x3a, 0xbb, 0x6e, - 0x10, 0xb8, 0x13, 0x96, 0x72, 0x09, 0x6f, 0xca, 0xb8, 0xa0, 0xd3, 0x50, 0x33, 0x1c, 0x65, 0xf6, - 0x13, 0xcc, 0x77, 0x58, 0x34, 0xf5, 0x7c, 0x91, 0x1d, 0x4e, 0xbc, 0x21, 0x3f, 0x18, 0x05, 0xd3, - 0x69, 0xe0, 0x67, 0x0f, 0x64, 0xfd, 0xb1, 0x0a, 0x75, 0x9b, 0x7d, 0x36, 0x63, 0x5c, 0x90, 0x3d, - 0xa8, 0xb2, 0xd1, 0x38, 0x68, 0x57, 0xee, 0x18, 0x7b, 0xcd, 0x43, 0xd2, 0x55, 0x7c, 0x9a, 0xfa, - 0x70, 0x34, 0x0e, 0xfa, 0x4b, 0xb6, 0xe4, 0x20, 0x6f, 0xc1, 0xca, 0xf9, 0x64, 0xc6, 0xc7, 0xed, - 0x65, 0xc9, 0xba, 0x99, 0x67, 0xfd, 0x21, 0x92, 0xfa, 0x4b, 0xb6, 0xe2, 0xc1, 0x6d, 0x3d, 0xff, - 0x3c, 0x68, 0x57, 0xcb, 0xb6, 0x3d, 0xf5, 0xcf, 0xe5, 0xb6, 0xc8, 0x41, 0x8e, 0x00, 0x38, 0x13, - 0x83, 0x20, 0x14, 0x5e, 0xe0, 0xb7, 0x57, 0x24, 0xff, 0x76, 0x9e, 0xff, 0x11, 0x13, 0x3f, 0x91, - 0xe4, 0xfe, 0x92, 0x6d, 0xf2, 0x78, 0x82, 0x92, 0x9e, 0xef, 0x89, 0xc1, 0x68, 0x4c, 0x3d, 0xbf, - 0x5d, 0x2b, 0x93, 0x3c, 0xf5, 0x3d, 0x71, 0x82, 0x64, 0x94, 0xf4, 0xe2, 0x09, 0xaa, 0xf2, 0xd9, - 0x8c, 0x45, 0x97, 0xed, 0x7a, 0x99, 0x2a, 0x3f, 0x45, 0x12, 0xaa, 0x22, 0x79, 0xc8, 0xfb, 0xd0, - 0x1c, 0x32, 0xd7, 0xf3, 0x07, 0xc3, 0x49, 0x30, 0x7a, 0xda, 0x6e, 0x48, 0x91, 0x76, 0x5e, 0xa4, - 0x87, 0x0c, 0x3d, 0xa4, 0xf7, 0x97, 0x6c, 0x18, 0x26, 0x33, 0x72, 0x08, 0x8d, 0xd1, 0x98, 0x8d, - 0x9e, 0x0e, 0xc4, 0xbc, 0x6d, 0x4a, 0xc9, 0x5b, 0x79, 0xc9, 0x13, 0xa4, 0x3e, 0x9e, 0xf7, 0x97, - 0xec, 0xfa, 0x48, 0x0d, 0xc9, 0x7d, 0x30, 0x99, 0xef, 0xe8, 0xcf, 0x35, 0xa5, 0xd0, 0x56, 0xe1, - 0x5e, 0x7c, 0x27, 0xfe, 0x58, 0x83, 0xe9, 0x31, 0xe9, 0x42, 0x0d, 0xef, 0xda, 0x13, 0xed, 0x55, - 0x29, 0x73, 0xb3, 0xf0, 0x21, 0x49, 0xeb, 0x2f, 0xd9, 0x9a, 0x0b, 0xcd, 0xe7, 0xb0, 0x89, 0x77, - 0xc1, 0x22, 0x3c, 0xdc, 0x66, 0x99, 0xf9, 0x3e, 0x54, 0x74, 0x79, 0x3c, 0xd3, 0x89, 0x27, 0xbd, - 0x3a, 0xac, 0x5c, 0xd0, 0xc9, 0x8c, 0x59, 0x6f, 0x42, 0x33, 0xe3, 0x29, 0xa4, 0x0d, 0xf5, 0x29, - 0xe3, 0x9c, 0xba, 0xac, 0x6d, 0xdc, 0x31, 0xf6, 0x4c, 0x3b, 0x9e, 0x5a, 0xeb, 0xb0, 0x9a, 0xf5, - 0x93, 0x8c, 0x20, 0xfa, 0x02, 0x0a, 0x5e, 0xb0, 0x88, 0xa3, 0x03, 0x68, 0x41, 0x3d, 0xb5, 0xde, - 0x83, 0x8d, 0xa2, 0x13, 0x90, 0x0d, 0x58, 0x7e, 0xca, 0x2e, 0x35, 0x27, 0x0e, 0xc9, 0x4d, 0x7d, - 0x20, 0xe9, 0xc5, 0xa6, 0xad, 0x4f, 0xf7, 0x8b, 0x4a, 0x22, 0x9c, 0xf8, 0x01, 0x39, 0x82, 0x2a, - 0x06, 0x92, 0x94, 0x6e, 0x1e, 0x76, 0xba, 0x2a, 0xca, 0xba, 0x71, 0x94, 0x75, 0x1f, 0xc7, 0x51, - 0xd6, 0x6b, 0x7c, 0xf9, 0x7c, 0x77, 0xe9, 0x57, 0x7f, 0xdf, 0x35, 0x6c, 0x29, 0x41, 0x6e, 0xe3, - 0x55, 0x52, 0xcf, 0x1f, 0x78, 0x8e, 0xfe, 0x4e, 0x5d, 0xce, 0x4f, 0x1d, 0x72, 0x0c, 0x1b, 0xa3, - 0xc0, 0xe7, 0xcc, 0xe7, 0x33, 0x3e, 0x08, 0x69, 0x44, 0xa7, 0x5c, 0x47, 0x49, 0x7c, 0x71, 0x27, - 0x31, 0xf9, 0x4c, 0x52, 0xed, 0xd6, 0x28, 0xbf, 0x40, 0xde, 0x05, 0xb8, 0xa0, 0x13, 0xcf, 0xa1, - 0x22, 0x88, 0x78, 0xbb, 0x7a, 0x67, 0x79, 0xaf, 0x79, 0xb8, 0xa1, 0x85, 0x9f, 0xc4, 0x84, 0x5e, - 0x15, 0xcf, 0x64, 0x67, 0x38, 0xc9, 0x5d, 0x68, 0xd1, 0x30, 0x1c, 0x70, 0x41, 0x05, 0x1b, 0x0c, - 0x2f, 0x05, 0xe3, 0x32, 0x86, 0x56, 0xed, 0x35, 0x1a, 0x86, 0x8f, 0x70, 0xb5, 0x87, 0x8b, 0x96, - 0x93, 0xdc, 0x80, 0x74, 0x6f, 0x42, 0xa0, 0xea, 0x50, 0x41, 0xa5, 0x1d, 0x56, 0x6d, 0x39, 0xc6, - 0xb5, 0x90, 0x8a, 0xb1, 0xd6, 0x4e, 0x8e, 0xc9, 0x16, 0xd4, 0xc6, 0xcc, 0x73, 0xc7, 0x42, 0x2a, - 0xb4, 0x6c, 0xeb, 0x19, 0x9a, 0x3c, 0x8c, 0x82, 0x0b, 0x26, 0x23, 0xbc, 0x61, 0xab, 0x89, 0xf5, - 0x2f, 0x03, 0x6e, 0x5c, 0x0b, 0x09, 0xdc, 0x77, 0x4c, 0xf9, 0x38, 0xfe, 0x16, 0x8e, 0xc9, 0x5b, - 0xb8, 0x2f, 0x75, 0x58, 0xa4, 0x33, 0xcf, 0x9a, 0xd6, 0xb5, 0x2f, 0x17, 0xb5, 0xa2, 0x9a, 0x85, - 0x3c, 0x84, 0x8d, 0x09, 0xe5, 0x62, 0xa0, 0x3c, 0x77, 0x20, 0x33, 0xcb, 0x72, 0x2e, 0x9a, 0x3e, - 0xa1, 0xb1, 0x87, 0xa3, 0x43, 0x69, 0xf1, 0xf5, 0x49, 0x6e, 0x95, 0xf4, 0xe1, 0xe6, 0xf0, 0xf2, - 0x19, 0xf5, 0x85, 0xe7, 0xb3, 0xc1, 0x35, 0x6b, 0xb7, 0xf4, 0x56, 0x0f, 0x2f, 0x3c, 0x87, 0xf9, - 0x23, 0xa6, 0x37, 0xd9, 0x4c, 0x44, 0x92, 0x6b, 0xe0, 0xd6, 0x1d, 0x58, 0xcf, 0xc7, 0x2f, 0x59, - 0x87, 0x8a, 0x98, 0x6b, 0x0d, 0x2b, 0x62, 0x6e, 0x59, 0x89, 0xef, 0x25, 0x41, 0x74, 0x8d, 0xe7, - 0x1e, 0xb4, 0x0a, 0x01, 0x9d, 0x31, 0xb7, 0x91, 0x35, 0xb7, 0xd5, 0x82, 0xb5, 0x5c, 0x1c, 0x5b, - 0x5f, 0xac, 0x40, 0xc3, 0x66, 0x3c, 0x44, 0x37, 0x22, 0x47, 0x60, 0xb2, 0xf9, 0x88, 0xa9, 0x14, - 0x6a, 0x14, 0x12, 0x94, 0xe2, 0x79, 0x18, 0xd3, 0x31, 0x94, 0x13, 0x66, 0x72, 0x2f, 0x97, 0xfe, - 0x37, 0x8b, 0x42, 0xd9, 0xfc, 0xbf, 0x9f, 0xcf, 0xff, 0x37, 0x0b, 0xbc, 0x05, 0x00, 0xb8, 0x97, - 0x03, 0x80, 0xe2, 0xc6, 0x39, 0x04, 0x78, 0x50, 0x82, 0x00, 0xc5, 0xe3, 0x2f, 0x80, 0x80, 0x07, - 0x25, 0x10, 0xd0, 0xbe, 0xf6, 0xad, 0x52, 0x0c, 0xd8, 0xcf, 0x63, 0x40, 0x51, 0x9d, 0x02, 0x08, - 0x7c, 0xaf, 0x0c, 0x04, 0x6e, 0x17, 0x64, 0x16, 0xa2, 0xc0, 0x3b, 0xd7, 0x50, 0x60, 0xab, 0x20, - 0x5a, 0x02, 0x03, 0x0f, 0x72, 0xf9, 0x19, 0x4a, 0x75, 0x2b, 0x4f, 0xd0, 0xe4, 0xdd, 0xeb, 0x08, - 0xb2, 0x5d, 0xbc, 0xda, 0x32, 0x08, 0x39, 0x28, 0x40, 0xc8, 0xad, 0xe2, 0x29, 0x0b, 0x18, 0x92, - 0x22, 0xc1, 0x3d, 0x8c, 0xfb, 0x82, 0xa7, 0x61, 0x8e, 0x60, 0x51, 0x14, 0x44, 0x3a, 0x55, 0xab, - 0x89, 0xb5, 0x87, 0x99, 0x28, 0xf5, 0xaf, 0x17, 0xa0, 0x86, 0x74, 0xfa, 0x8c, 0x77, 0x59, 0xbf, - 0x36, 0x52, 0x59, 0x19, 0xd1, 0xd9, 0x2c, 0x66, 0xea, 0x2c, 0x96, 0x01, 0x93, 0x4a, 0x0e, 0x4c, - 0xc8, 0xb7, 0xe0, 0x86, 0x4c, 0x23, 0xd2, 0x2e, 0x83, 0x5c, 0x5a, 0x6b, 0x21, 0x41, 0x19, 0x44, - 0xe5, 0xb7, 0xb7, 0x61, 0x33, 0xc3, 0x8b, 0x29, 0x56, 0xa6, 0xb0, 0xaa, 0x0c, 0xde, 0x8d, 0x84, - 0xfb, 0x38, 0x0c, 0xfb, 0x94, 0x8f, 0xad, 0x1f, 0xa5, 0xfa, 0xa7, 0x40, 0x45, 0xa0, 0x3a, 0x0a, - 0x1c, 0xa5, 0xd6, 0x9a, 0x2d, 0xc7, 0x08, 0x5e, 0x93, 0xc0, 0x95, 0x5f, 0x35, 0x6d, 0x1c, 0x22, - 0x57, 0x12, 0x29, 0xa6, 0x0a, 0x09, 0xeb, 0x97, 0x46, 0xba, 0x5f, 0x8a, 0x5d, 0x65, 0x30, 0x63, - 0xfc, 0x2f, 0x30, 0x53, 0x79, 0x59, 0x98, 0xb1, 0x7e, 0x67, 0xa4, 0x77, 0x91, 0x00, 0xc8, 0xeb, - 0x29, 0x87, 0x6e, 0xe1, 0xf9, 0x0e, 0x9b, 0xcb, 0x50, 0x5f, 0xb6, 0xd5, 0x24, 0x46, 0xf5, 0x9a, - 0x34, 0x70, 0x1e, 0xd5, 0xeb, 0x72, 0x4d, 0x4d, 0x34, 0xf0, 0x04, 0xe7, 0x32, 0x06, 0x57, 0x6d, - 0x35, 0xc9, 0xe4, 0x4d, 0x33, 0x97, 0x37, 0xcf, 0x80, 0x5c, 0x8f, 0x4e, 0xf2, 0x1e, 0x54, 0x05, - 0x75, 0xd1, 0x78, 0xa8, 0xff, 0x7a, 0x57, 0xd5, 0xc8, 0xdd, 0x8f, 0x9f, 0x9c, 0x51, 0x2f, 0xea, - 0x6d, 0xa1, 0xf6, 0xff, 0x7e, 0xbe, 0xbb, 0x8e, 0x3c, 0xfb, 0xc1, 0xd4, 0x13, 0x6c, 0x1a, 0x8a, - 0x4b, 0x5b, 0xca, 0x58, 0x7f, 0x31, 0x30, 0x6b, 0xe7, 0xa2, 0xb6, 0xd4, 0x16, 0xb1, 0x6b, 0x56, - 0x32, 0x00, 0xfb, 0x72, 0xf6, 0xf9, 0x3a, 0x80, 0x4b, 0xf9, 0xe0, 0x73, 0xea, 0x0b, 0xe6, 0x68, - 0x23, 0x99, 0x2e, 0xe5, 0x3f, 0x93, 0x0b, 0x58, 0x87, 0x20, 0x79, 0xc6, 0x99, 0x23, 0xad, 0xb5, - 0x6c, 0xd7, 0x5d, 0xca, 0x3f, 0xe5, 0xcc, 0x49, 0xf4, 0xaa, 0xbf, 0x86, 0x5e, 0x7f, 0xcd, 0xb8, - 0x5c, 0x0a, 0x59, 0xff, 0x0f, 0x9a, 0x7d, 0x65, 0x20, 0x16, 0xe7, 0xd3, 0x1e, 0x39, 0x81, 0x1b, - 0x89, 0x7b, 0x0f, 0x66, 0xa1, 0x43, 0xb1, 0x72, 0x32, 0x5e, 0x18, 0x0f, 0x1b, 0x89, 0xc0, 0xa7, - 0x8a, 0x9f, 0xfc, 0x18, 0xb6, 0x0b, 0x01, 0x99, 0x6c, 0x55, 0x79, 0x61, 0x5c, 0xde, 0xca, 0xc7, - 0x65, 0xbc, 0x5f, 0xac, 0xe5, 0xf2, 0x6b, 0x68, 0xf9, 0x4d, 0x2c, 0x49, 0xb2, 0x69, 0xba, 0xec, - 0x9e, 0xac, 0xdf, 0x18, 0xd0, 0x2a, 0x1c, 0x86, 0x1c, 0x00, 0xa8, 0x2c, 0xc7, 0xbd, 0x67, 0x71, - 0x61, 0x1c, 0xdb, 0x40, 0x1a, 0xeb, 0x91, 0xf7, 0x8c, 0xd9, 0xe6, 0x30, 0x1e, 0x92, 0xbb, 0x50, - 0x17, 0x73, 0xc5, 0x9d, 0x2f, 0xde, 0x1e, 0xcf, 0x25, 0x6b, 0x4d, 0xc8, 0x7f, 0x72, 0x1f, 0x56, - 0xd5, 0xc6, 0x6e, 0xc0, 0xb9, 0x17, 0xea, 0xc2, 0x81, 0x64, 0xb7, 0xfe, 0x48, 0x52, 0xec, 0xe6, - 0x30, 0x9d, 0x58, 0x3f, 0x07, 0x33, 0xf9, 0x2c, 0x79, 0x03, 0xcc, 0x29, 0x9d, 0xeb, 0xca, 0x16, - 0xcf, 0xb6, 0x62, 0x37, 0xa6, 0x74, 0x2e, 0x8b, 0x5a, 0xb2, 0x0d, 0x75, 0x24, 0x8a, 0xb9, 0xb2, - 0xf7, 0x8a, 0x5d, 0x9b, 0xd2, 0xf9, 0xe3, 0x79, 0x42, 0x70, 0x29, 0x8f, 0xcb, 0xd6, 0x29, 0x9d, - 0x7f, 0x44, 0xb9, 0xf5, 0x01, 0xd4, 0xd4, 0x21, 0x5f, 0x6a, 0x63, 0x94, 0xaf, 0xe4, 0xe4, 0x7f, - 0x00, 0xcd, 0xcc, 0xb9, 0xc9, 0x77, 0xe0, 0x96, 0xd2, 0x30, 0xa4, 0x91, 0x90, 0x16, 0xc9, 0x6d, - 0x48, 0x24, 0xf1, 0x8c, 0x46, 0x02, 0x3f, 0xa9, 0x0a, 0xf1, 0x08, 0xd6, 0xf3, 0xc5, 0x2a, 0xf9, - 0x06, 0xac, 0xea, 0xc2, 0x36, 0x0a, 0x66, 0xbe, 0xa3, 0x65, 0x9b, 0x6a, 0xcd, 0xc6, 0x25, 0xf2, - 0xfd, 0x92, 0xb4, 0x1d, 0x23, 0xfa, 0x23, 0xcf, 0xf5, 0x3d, 0xdf, 0x7d, 0x51, 0xf6, 0xfe, 0x5b, - 0x05, 0x6a, 0xaa, 0xb0, 0x26, 0x77, 0x33, 0x5d, 0x8c, 0x44, 0xcd, 0x5e, 0xf3, 0xea, 0xf9, 0x6e, - 0x5d, 0x02, 0xcc, 0xe9, 0x87, 0x69, 0x4b, 0x93, 0x26, 0xd4, 0x4a, 0xae, 0xee, 0x8f, 0xfb, 0xa7, - 0xe5, 0x57, 0xee, 0x9f, 0xb6, 0xa1, 0xee, 0xcf, 0xa6, 0xf2, 0xb2, 0xaa, 0xea, 0xb2, 0xfc, 0xd9, - 0x14, 0x2f, 0xeb, 0x0d, 0x30, 0x45, 0x20, 0xe8, 0x44, 0x92, 0x54, 0x52, 0x68, 0xc8, 0x05, 0x24, - 0xde, 0x85, 0x56, 0x16, 0xb3, 0x11, 0x83, 0x15, 0x44, 0xac, 0xa5, 0x88, 0x8d, 0xfd, 0xc4, 0x9b, - 0xd0, 0x4a, 0x15, 0x56, 0x7c, 0x0a, 0x36, 0xd6, 0xd3, 0x65, 0xc9, 0x78, 0x1b, 0x1a, 0x09, 0x9a, - 0x2b, 0x08, 0xa9, 0x53, 0x05, 0xe2, 0xd8, 0xac, 0x87, 0x51, 0x10, 0x06, 0x9c, 0x45, 0xba, 0x4c, - 0x5b, 0x94, 0x0a, 0x12, 0x3e, 0xcb, 0x03, 0x33, 0x21, 0x62, 0xe9, 0x41, 0x1d, 0x27, 0x62, 0x9c, - 0xeb, 0x2a, 0x3f, 0x9e, 0x92, 0x7d, 0xa8, 0x87, 0xb3, 0xe1, 0x00, 0x11, 0x2e, 0x1f, 0x32, 0x67, - 0xb3, 0xe1, 0xc7, 0xec, 0x32, 0xee, 0x77, 0x42, 0x39, 0x93, 0x18, 0x17, 0x7c, 0xce, 0x22, 0xed, - 0xbc, 0x6a, 0x62, 0x09, 0xd8, 0x28, 0xde, 0x35, 0xf9, 0x2e, 0x98, 0x89, 0x7e, 0x85, 0xd0, 0x2d, - 0x9e, 0x39, 0x65, 0xc4, 0x42, 0x88, 0x7b, 0xae, 0xcf, 0x9c, 0x41, 0x6a, 0x5b, 0x79, 0xae, 0x86, - 0xdd, 0x52, 0x84, 0x4f, 0x62, 0xe3, 0x5a, 0xdf, 0x86, 0x9a, 0x3a, 0x23, 0xe6, 0x13, 0xdc, 0x39, - 0x2e, 0xb6, 0x70, 0x5c, 0x9a, 0x63, 0xfe, 0x6c, 0x40, 0x23, 0x6e, 0xa2, 0x4a, 0x85, 0x72, 0x87, - 0xae, 0xbc, 0xec, 0xa1, 0x17, 0x75, 0xa2, 0xb1, 0x47, 0x56, 0x5f, 0xd9, 0x23, 0xf7, 0x81, 0x28, - 0xc7, 0xbb, 0x08, 0x84, 0xe7, 0xbb, 0x03, 0x65, 0x73, 0xe5, 0x81, 0x1b, 0x92, 0xf2, 0x44, 0x12, - 0xce, 0x70, 0xfd, 0xf0, 0x8b, 0x15, 0x68, 0x1d, 0xf7, 0x4e, 0x4e, 0x8f, 0xc3, 0x70, 0xe2, 0x8d, - 0xa8, 0xac, 0xf0, 0x0e, 0xa0, 0x2a, 0x6b, 0xd8, 0x92, 0x77, 0xb3, 0x4e, 0x59, 0x33, 0x45, 0x0e, - 0x61, 0x45, 0x96, 0xb2, 0xa4, 0xec, 0xf9, 0xac, 0x53, 0xda, 0x53, 0xe1, 0x47, 0x54, 0xb1, 0x7b, - 0xfd, 0x15, 0xad, 0x53, 0xd6, 0x58, 0x91, 0x0f, 0xc0, 0x4c, 0x8b, 0xd0, 0x45, 0x6f, 0x69, 0x9d, - 0x85, 0x2d, 0x16, 0xca, 0xa7, 0x15, 0xc0, 0xa2, 0x27, 0xa1, 0xce, 0xc2, 0x5e, 0x84, 0x1c, 0x41, - 0x3d, 0xae, 0x8c, 0xca, 0x5f, 0xbb, 0x3a, 0x0b, 0xda, 0x1f, 0x34, 0x8f, 0xaa, 0x2e, 0xcb, 0x9e, - 0xe4, 0x3a, 0xa5, 0x3d, 0x1a, 0xb9, 0x0f, 0x35, 0x0d, 0x78, 0xa5, 0x2f, 0x5e, 0x9d, 0xf2, 0x26, - 0x06, 0x95, 0x4c, 0x2b, 0xeb, 0x45, 0xcf, 0x86, 0x9d, 0x85, 0xcd, 0x24, 0x39, 0x06, 0xc8, 0x54, - 0x94, 0x0b, 0xdf, 0x03, 0x3b, 0x8b, 0x9b, 0x44, 0xf2, 0x3e, 0x34, 0xd2, 0xc6, 0xbf, 0xfc, 0x85, - 0xaf, 0xb3, 0xa8, 0x6f, 0xeb, 0x7d, 0xed, 0x3f, 0xff, 0xdc, 0x31, 0x7e, 0x7b, 0xb5, 0x63, 0xfc, - 0xfe, 0x6a, 0xc7, 0xf8, 0xf2, 0x6a, 0xc7, 0xf8, 0xd3, 0xd5, 0x8e, 0xf1, 0x8f, 0xab, 0x1d, 0xe3, - 0x0f, 0x5f, 0xed, 0x18, 0xc3, 0x9a, 0x74, 0xff, 0x77, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xc1, - 0xc2, 0x93, 0xfb, 0x94, 0x16, 0x00, 0x00, + golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_5b877df1938afe10) +} + +var fileDescriptor_types_5b877df1938afe10 = []byte{ + // 2214 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x73, 0x1b, 0xc7, + 0xd1, 0xe7, 0x82, 0x20, 0x81, 0x6d, 0x10, 0x0f, 0x8d, 0x28, 0x09, 0xc2, 0xe7, 0x8f, 0x54, 0xad, + 0x12, 0x5b, 0x8c, 0x65, 0xd0, 0xa6, 0xa3, 0x14, 0x65, 0x39, 0xa9, 0x22, 0x24, 0xc5, 0x64, 0xd9, + 0x49, 0x98, 0x95, 0xc4, 0x5c, 0x52, 0xb5, 0x35, 0xc0, 0x8e, 0x80, 0x2d, 0x02, 0xbb, 0xeb, 0xdd, + 0x01, 0x0d, 0xea, 0x98, 0xb3, 0x0f, 0x3e, 0xe4, 0x8f, 0xc8, 0x35, 0x37, 0x1f, 0x73, 0x4a, 0xf9, + 0x98, 0x43, 0xce, 0x4a, 0xc2, 0x54, 0x0e, 0xc9, 0x35, 0x95, 0xaa, 0x1c, 0x53, 0xd3, 0x33, 0xb3, + 0x2f, 0x2e, 0x14, 0xcb, 0xc9, 0x29, 0x17, 0x60, 0xa6, 0x1f, 0xf3, 0xe8, 0xed, 0xee, 0x5f, 0xf7, + 0xc0, 0x75, 0x3a, 0x1c, 0x79, 0xbb, 0xfc, 0x3c, 0x64, 0xb1, 0xfc, 0xed, 0x87, 0x51, 0xc0, 0x03, + 0xb2, 0x86, 0x93, 0xde, 0x3b, 0x63, 0x8f, 0x4f, 0xe6, 0xc3, 0xfe, 0x28, 0x98, 0xed, 0x8e, 0x83, + 0x71, 0xb0, 0x8b, 0xdc, 0xe1, 0xfc, 0x39, 0xce, 0x70, 0x82, 0x23, 0xa9, 0xd5, 0xdb, 0x1e, 0x07, + 0xc1, 0x78, 0xca, 0x52, 0x29, 0xee, 0xcd, 0x58, 0xcc, 0xe9, 0x2c, 0x54, 0x02, 0xfb, 0x99, 0xf5, + 0x38, 0xf3, 0x5d, 0x16, 0xcd, 0x3c, 0x9f, 0x67, 0x87, 0x53, 0x6f, 0x18, 0xef, 0x8e, 0x82, 0xd9, + 0x2c, 0xf0, 0xb3, 0x07, 0xea, 0x3d, 0xf8, 0xb7, 0x9a, 0xa3, 0xe8, 0x3c, 0xe4, 0xc1, 0xee, 0x8c, + 0x45, 0xa7, 0x53, 0xa6, 0xfe, 0xa4, 0xb2, 0xf5, 0xdb, 0x2a, 0xd4, 0x6c, 0xf6, 0xe9, 0x9c, 0xc5, + 0x9c, 0xdc, 0x81, 0x2a, 0x1b, 0x4d, 0x82, 0x6e, 0xe5, 0x96, 0x71, 0xa7, 0xb1, 0x47, 0xfa, 0x72, + 0x13, 0xc5, 0x7d, 0x3c, 0x9a, 0x04, 0x87, 0x2b, 0x36, 0x4a, 0x90, 0xb7, 0x61, 0xed, 0xf9, 0x74, + 0x1e, 0x4f, 0xba, 0xab, 0x28, 0x7a, 0x35, 0x2f, 0xfa, 0x43, 0xc1, 0x3a, 0x5c, 0xb1, 0xa5, 0x8c, + 0x58, 0xd6, 0xf3, 0x9f, 0x07, 0xdd, 0x6a, 0xd9, 0xb2, 0x47, 0xfe, 0x73, 0x5c, 0x56, 0x48, 0x90, + 0x7d, 0x80, 0x98, 0x71, 0x27, 0x08, 0xb9, 0x17, 0xf8, 0xdd, 0x35, 0x94, 0xbf, 0x91, 0x97, 0x7f, + 0xc2, 0xf8, 0x4f, 0x90, 0x7d, 0xb8, 0x62, 0x9b, 0xb1, 0x9e, 0x08, 0x4d, 0xcf, 0xf7, 0xb8, 0x33, + 0x9a, 0x50, 0xcf, 0xef, 0xae, 0x97, 0x69, 0x1e, 0xf9, 0x1e, 0x7f, 0x28, 0xd8, 0x42, 0xd3, 0xd3, + 0x13, 0x71, 0x95, 0x4f, 0xe7, 0x2c, 0x3a, 0xef, 0xd6, 0xca, 0xae, 0xf2, 0x53, 0xc1, 0x12, 0x57, + 0x41, 0x19, 0xf2, 0x00, 0x1a, 0x43, 0x36, 0xf6, 0x7c, 0x67, 0x38, 0x0d, 0x46, 0xa7, 0xdd, 0x3a, + 0xaa, 0x74, 0xf3, 0x2a, 0x03, 0x21, 0x30, 0x10, 0xfc, 0xc3, 0x15, 0x1b, 0x86, 0xc9, 0x8c, 0xec, + 0x41, 0x7d, 0x34, 0x61, 0xa3, 0x53, 0x87, 0x2f, 0xba, 0x26, 0x6a, 0x5e, 0xcb, 0x6b, 0x3e, 0x14, + 0xdc, 0xa7, 0x8b, 0xc3, 0x15, 0xbb, 0x36, 0x92, 0x43, 0x72, 0x0f, 0x4c, 0xe6, 0xbb, 0x6a, 0xbb, + 0x06, 0x2a, 0x5d, 0x2f, 0x7c, 0x17, 0xdf, 0xd5, 0x9b, 0xd5, 0x99, 0x1a, 0x93, 0x3e, 0xac, 0x0b, + 0x47, 0xf1, 0x78, 0x77, 0x03, 0x75, 0x36, 0x0b, 0x1b, 0x21, 0xef, 0x70, 0xc5, 0x56, 0x52, 0xc2, + 0x7c, 0x2e, 0x9b, 0x7a, 0x67, 0x2c, 0x12, 0x87, 0xbb, 0x5a, 0x66, 0xbe, 0x47, 0x92, 0x8f, 0xc7, + 0x33, 0x5d, 0x3d, 0x19, 0xd4, 0x60, 0xed, 0x8c, 0x4e, 0xe7, 0xcc, 0x7a, 0x0b, 0x1a, 0x19, 0x4f, + 0x21, 0x5d, 0xa8, 0xcd, 0x58, 0x1c, 0xd3, 0x31, 0xeb, 0x1a, 0xb7, 0x8c, 0x3b, 0xa6, 0xad, 0xa7, + 0x56, 0x0b, 0x36, 0xb2, 0x7e, 0x62, 0xcd, 0x12, 0x45, 0xe1, 0x0b, 0x42, 0xf1, 0x8c, 0x45, 0xb1, + 0x70, 0x00, 0xa5, 0xa8, 0xa6, 0xe4, 0x36, 0x34, 0xd1, 0x0e, 0x8e, 0xe6, 0x0b, 0x3f, 0xad, 0xda, + 0x1b, 0x48, 0x3c, 0x51, 0x42, 0xdb, 0xd0, 0x08, 0xf7, 0xc2, 0x44, 0x64, 0x15, 0x45, 0x20, 0xdc, + 0x0b, 0x95, 0x80, 0xf5, 0x01, 0x74, 0x8a, 0xae, 0x44, 0x3a, 0xb0, 0x7a, 0xca, 0xce, 0xd5, 0x7e, + 0x62, 0x48, 0x36, 0xd5, 0xb5, 0x70, 0x0f, 0xd3, 0x56, 0x77, 0xfc, 0xa2, 0x92, 0x28, 0x27, 0xde, + 0x44, 0xf6, 0xa1, 0x2a, 0x62, 0x19, 0xb5, 0x1b, 0x7b, 0xbd, 0xbe, 0x0c, 0xf4, 0xbe, 0x0e, 0xf4, + 0xfe, 0x53, 0x1d, 0xe8, 0x83, 0xfa, 0x57, 0x2f, 0xb7, 0x57, 0xbe, 0xf8, 0xc3, 0xb6, 0x61, 0xa3, + 0x06, 0xb9, 0x29, 0x1c, 0x82, 0x7a, 0xbe, 0xe3, 0xb9, 0x6a, 0x9f, 0x1a, 0xce, 0x8f, 0x5c, 0x72, + 0x00, 0x9d, 0x51, 0xe0, 0xc7, 0xcc, 0x8f, 0xe7, 0xb1, 0x13, 0xd2, 0x88, 0xce, 0x62, 0x15, 0x6b, + 0xfa, 0xf3, 0x3f, 0xd4, 0xec, 0x63, 0xe4, 0xda, 0xed, 0x51, 0x9e, 0x40, 0x3e, 0x04, 0x38, 0xa3, + 0x53, 0xcf, 0xa5, 0x3c, 0x88, 0xe2, 0x6e, 0xf5, 0xd6, 0x6a, 0x46, 0xf9, 0x44, 0x33, 0x9e, 0x85, + 0x2e, 0xe5, 0x6c, 0x50, 0x15, 0x27, 0xb3, 0x33, 0xf2, 0xe4, 0x4d, 0x68, 0xd3, 0x30, 0x74, 0x62, + 0x4e, 0x39, 0x73, 0x86, 0xe7, 0x9c, 0xc5, 0x18, 0x8f, 0x1b, 0x76, 0x93, 0x86, 0xe1, 0x13, 0x41, + 0x1d, 0x08, 0xa2, 0xe5, 0x26, 0x5f, 0x13, 0x43, 0x85, 0x10, 0xa8, 0xba, 0x94, 0x53, 0xb4, 0xc6, + 0x86, 0x8d, 0x63, 0x41, 0x0b, 0x29, 0x9f, 0xa8, 0x3b, 0xe2, 0x98, 0x5c, 0x87, 0xf5, 0x09, 0xf3, + 0xc6, 0x13, 0x8e, 0xd7, 0x5a, 0xb5, 0xd5, 0x4c, 0x18, 0x3e, 0x8c, 0x82, 0x33, 0x86, 0xd9, 0xa2, + 0x6e, 0xcb, 0x89, 0xf5, 0x17, 0x03, 0xae, 0x5c, 0x0a, 0x2f, 0xb1, 0xee, 0x84, 0xc6, 0x13, 0xbd, + 0x97, 0x18, 0x93, 0xb7, 0xc5, 0xba, 0xd4, 0x65, 0x91, 0xca, 0x62, 0x4d, 0x75, 0xe3, 0x43, 0x24, + 0xaa, 0x8b, 0x2a, 0x11, 0xf2, 0x18, 0x3a, 0x53, 0x1a, 0x73, 0x47, 0x46, 0x81, 0x83, 0x59, 0x6a, + 0x35, 0x17, 0x99, 0x9f, 0x50, 0x1d, 0x2d, 0xc2, 0x39, 0x95, 0x7a, 0x6b, 0x9a, 0xa3, 0x92, 0x43, + 0xd8, 0x1c, 0x9e, 0xbf, 0xa0, 0x3e, 0xf7, 0x7c, 0xe6, 0x5c, 0xb2, 0x79, 0x5b, 0x2d, 0xf5, 0xf8, + 0xcc, 0x73, 0x99, 0x3f, 0xd2, 0xc6, 0xbe, 0x9a, 0xa8, 0x24, 0x1f, 0x23, 0xb6, 0x6e, 0x41, 0x2b, + 0x9f, 0x0b, 0x48, 0x0b, 0x2a, 0x7c, 0xa1, 0x6e, 0x58, 0xe1, 0x0b, 0xcb, 0x4a, 0x3c, 0x30, 0x09, + 0xc8, 0x4b, 0x32, 0x3b, 0xd0, 0x2e, 0x24, 0x87, 0x8c, 0xb9, 0x8d, 0xac, 0xb9, 0xad, 0x36, 0x34, + 0x73, 0x39, 0xc1, 0xfa, 0x7c, 0x0d, 0xea, 0x36, 0x8b, 0x43, 0xe1, 0x4c, 0x64, 0x1f, 0x4c, 0xb6, + 0x18, 0x31, 0x99, 0x8e, 0x8d, 0x42, 0xb2, 0x93, 0x32, 0x8f, 0x35, 0x5f, 0xa4, 0x85, 0x44, 0x98, + 0xec, 0xe4, 0xa0, 0xe4, 0x6a, 0x51, 0x29, 0x8b, 0x25, 0x77, 0xf3, 0x58, 0xb2, 0x59, 0x90, 0x2d, + 0x80, 0xc9, 0x4e, 0x0e, 0x4c, 0x8a, 0x0b, 0xe7, 0xd0, 0xe4, 0x7e, 0x09, 0x9a, 0x14, 0x8f, 0xbf, + 0x04, 0x4e, 0xee, 0x97, 0xc0, 0x49, 0xf7, 0xd2, 0x5e, 0xa5, 0x78, 0x72, 0x37, 0x8f, 0x27, 0xc5, + 0xeb, 0x14, 0x00, 0xe5, 0xc3, 0x32, 0x40, 0xb9, 0x59, 0xd0, 0x59, 0x8a, 0x28, 0xef, 0x5f, 0x42, + 0x94, 0xeb, 0x05, 0xd5, 0x12, 0x48, 0xb9, 0x9f, 0xcb, 0xf5, 0x50, 0x7a, 0xb7, 0xf2, 0x64, 0x4f, + 0xbe, 0x77, 0x19, 0x8d, 0x6e, 0x14, 0x3f, 0x6d, 0x19, 0x1c, 0xed, 0x16, 0xe0, 0xe8, 0x5a, 0xf1, + 0x94, 0x05, 0x3c, 0x4a, 0x51, 0x65, 0x47, 0xc4, 0x7d, 0xc1, 0xd3, 0x44, 0x8e, 0x60, 0x51, 0x14, + 0x44, 0x2a, 0x61, 0xcb, 0x89, 0x75, 0x47, 0x64, 0xa2, 0xd4, 0xbf, 0x5e, 0x81, 0x40, 0xe8, 0xf4, + 0x19, 0xef, 0xb2, 0xbe, 0x34, 0x52, 0x5d, 0x8c, 0xe8, 0x6c, 0x16, 0x33, 0x55, 0x16, 0xcb, 0x00, + 0x53, 0x25, 0x0f, 0x4c, 0xdb, 0xd0, 0x10, 0xb9, 0xb2, 0x80, 0x39, 0x34, 0xd4, 0x98, 0x43, 0xbe, + 0x03, 0x57, 0x30, 0xcf, 0x48, 0xf8, 0x52, 0x81, 0x58, 0xc5, 0x40, 0x6c, 0x0b, 0x86, 0xb4, 0x98, + 0x4c, 0x80, 0xef, 0xc0, 0xd5, 0x8c, 0xac, 0x58, 0x17, 0x73, 0x9c, 0x4c, 0xbe, 0x9d, 0x44, 0xfa, + 0x20, 0x0c, 0x0f, 0x69, 0x3c, 0xb1, 0x7e, 0x94, 0x1a, 0x28, 0xc5, 0x33, 0x02, 0xd5, 0x51, 0xe0, + 0xca, 0x7b, 0x37, 0x6d, 0x1c, 0x0b, 0x8c, 0x9b, 0x06, 0x63, 0x3c, 0x9c, 0x69, 0x8b, 0xa1, 0x90, + 0x4a, 0x42, 0xc9, 0x94, 0x31, 0x63, 0xfd, 0xd2, 0x48, 0xd7, 0x4b, 0x21, 0xae, 0x0c, 0x8d, 0x8c, + 0xff, 0x04, 0x8d, 0x2a, 0xaf, 0x87, 0x46, 0xd6, 0x85, 0x91, 0x7e, 0xb2, 0x04, 0x67, 0xbe, 0xd9, + 0x15, 0x85, 0xf7, 0x78, 0xbe, 0xcb, 0x16, 0x68, 0xd2, 0x55, 0x5b, 0x4e, 0x74, 0x09, 0xb0, 0x8e, + 0x66, 0xce, 0x97, 0x00, 0x35, 0xa4, 0xc9, 0x09, 0xb9, 0x8d, 0xf8, 0x14, 0x3c, 0x57, 0xa1, 0xda, + 0xec, 0xab, 0x6a, 0xfa, 0x58, 0x10, 0x6d, 0xc9, 0xcb, 0x64, 0x5b, 0x33, 0x07, 0x6e, 0x6f, 0x80, + 0x29, 0x0e, 0x1a, 0x87, 0x74, 0xc4, 0x30, 0xf2, 0x4c, 0x3b, 0x25, 0x58, 0xc7, 0x40, 0x2e, 0x47, + 0x3c, 0xf9, 0x00, 0xaa, 0x9c, 0x8e, 0x85, 0xbd, 0x85, 0xc9, 0x5a, 0x7d, 0xd9, 0x00, 0xf4, 0x3f, + 0x3e, 0x39, 0xa6, 0x5e, 0x34, 0xb8, 0x2e, 0x4c, 0xf5, 0xb7, 0x97, 0xdb, 0x2d, 0x21, 0x73, 0x37, + 0x98, 0x79, 0x9c, 0xcd, 0x42, 0x7e, 0x6e, 0xa3, 0x8e, 0xf5, 0x77, 0x43, 0x20, 0x41, 0x2e, 0x13, + 0x94, 0x1a, 0x4e, 0xbb, 0x7b, 0x25, 0x03, 0xda, 0x5f, 0xcf, 0x98, 0xff, 0x0f, 0x30, 0xa6, 0xb1, + 0xf3, 0x19, 0xf5, 0x39, 0x73, 0x95, 0x45, 0xcd, 0x31, 0x8d, 0x7f, 0x86, 0x04, 0x51, 0xe1, 0x08, + 0xf6, 0x3c, 0x66, 0x2e, 0x9a, 0x76, 0xd5, 0xae, 0x8d, 0x69, 0xfc, 0x2c, 0x66, 0x6e, 0x72, 0xaf, + 0xda, 0xeb, 0xdf, 0x2b, 0x6f, 0xc7, 0x7a, 0xd1, 0x8e, 0xff, 0xc8, 0xf8, 0x70, 0x0a, 0x92, 0xff, + 0xfb, 0xf7, 0xfe, 0xab, 0x21, 0x6a, 0x83, 0x7c, 0x1a, 0x26, 0x47, 0x70, 0x25, 0x89, 0x23, 0x67, + 0x8e, 0xf1, 0xa5, 0x7d, 0xe9, 0xd5, 0xe1, 0xd7, 0x39, 0xcb, 0x93, 0x63, 0xf2, 0x63, 0xb8, 0x51, + 0xc8, 0x02, 0xc9, 0x82, 0x95, 0x57, 0x26, 0x83, 0x6b, 0xf9, 0x64, 0xa0, 0xd7, 0xd3, 0x96, 0x58, + 0xfd, 0x06, 0x9e, 0xfd, 0x2d, 0x51, 0x28, 0x65, 0xc1, 0xa3, 0xec, 0x5b, 0x5a, 0xbf, 0x36, 0xa0, + 0x5d, 0x38, 0x0c, 0xb9, 0x07, 0x20, 0x53, 0x6b, 0xec, 0xbd, 0x60, 0x85, 0x2c, 0x86, 0x26, 0x7b, + 0xe2, 0xbd, 0x60, 0xea, 0xe0, 0xe6, 0x50, 0x13, 0xc8, 0x7b, 0x50, 0x67, 0xaa, 0x80, 0x53, 0xb7, + 0xbd, 0x56, 0xa8, 0xeb, 0x94, 0x4e, 0x22, 0x46, 0xbe, 0x0b, 0x66, 0x62, 0xc3, 0x42, 0xf1, 0x9e, + 0x98, 0x5c, 0x6f, 0x94, 0x08, 0x5a, 0x1f, 0x41, 0xbb, 0x70, 0x0c, 0xf2, 0x7f, 0x60, 0xce, 0xe8, + 0x42, 0x55, 0xe1, 0xb2, 0x7e, 0xab, 0xcf, 0xe8, 0x02, 0x0b, 0x70, 0x72, 0x03, 0x6a, 0x82, 0x39, + 0xa6, 0xf2, 0x2b, 0xac, 0xda, 0xeb, 0x33, 0xba, 0xf8, 0x88, 0xc6, 0xd6, 0x0e, 0xb4, 0xf2, 0x47, + 0xd3, 0xa2, 0x1a, 0x11, 0xa5, 0xe8, 0xc1, 0x98, 0x59, 0xf7, 0xa0, 0x5d, 0x38, 0x11, 0xb1, 0xa0, + 0x19, 0xce, 0x87, 0xce, 0x29, 0x3b, 0x77, 0xf0, 0xc8, 0xe8, 0x33, 0xa6, 0xdd, 0x08, 0xe7, 0xc3, + 0x8f, 0xd9, 0xf9, 0x53, 0x41, 0xb2, 0x9e, 0x40, 0x2b, 0x5f, 0x1f, 0x8b, 0x9c, 0x19, 0x05, 0x73, + 0xdf, 0xc5, 0xf5, 0xd7, 0x6c, 0x39, 0x11, 0x2d, 0xf6, 0x59, 0x20, 0xdd, 0x24, 0x5b, 0x10, 0x9f, + 0x04, 0x9c, 0x65, 0xaa, 0x6a, 0x29, 0x63, 0xfd, 0x62, 0x0d, 0xd6, 0x65, 0xb1, 0x4e, 0xfa, 0xf9, + 0x56, 0x50, 0xf8, 0x88, 0xd2, 0x94, 0x54, 0xa5, 0x98, 0xe0, 0xf0, 0x9b, 0xc5, 0x7e, 0x6a, 0xd0, + 0xb8, 0x78, 0xb9, 0x5d, 0x43, 0x0c, 0x3b, 0x7a, 0x94, 0x36, 0x57, 0xcb, 0x7a, 0x0f, 0xdd, 0xc9, + 0x55, 0x5f, 0xbb, 0x93, 0xbb, 0x01, 0x35, 0x7f, 0x3e, 0x73, 0xf8, 0x22, 0x56, 0xb9, 0x60, 0xdd, + 0x9f, 0xcf, 0x9e, 0x2e, 0xf0, 0xd3, 0xf1, 0x80, 0xd3, 0x29, 0xb2, 0x64, 0x26, 0xa8, 0x23, 0x41, + 0x30, 0xf7, 0xa1, 0x99, 0x81, 0x7a, 0xcf, 0x55, 0x25, 0x63, 0x2b, 0xeb, 0x8d, 0x47, 0x8f, 0xd4, + 0x2d, 0x1b, 0x09, 0xf4, 0x1f, 0xb9, 0xe4, 0x4e, 0xbe, 0x71, 0xc1, 0x0a, 0xa1, 0x8e, 0x8e, 0x9f, + 0xe9, 0x4d, 0x44, 0x7d, 0x20, 0x0e, 0x20, 0x42, 0x41, 0x8a, 0x98, 0x28, 0x52, 0x17, 0x04, 0x64, + 0xbe, 0x05, 0xed, 0x14, 0x64, 0xa5, 0x08, 0xc8, 0x55, 0x52, 0x32, 0x0a, 0xbe, 0x0b, 0x9b, 0x3e, + 0x5b, 0x70, 0xa7, 0x28, 0xdd, 0x40, 0x69, 0x22, 0x78, 0x27, 0x79, 0x8d, 0x6f, 0x43, 0x2b, 0x4d, + 0x16, 0x28, 0xbb, 0x21, 0xdb, 0xc7, 0x84, 0x8a, 0x62, 0x37, 0xa1, 0x9e, 0x94, 0x38, 0x4d, 0x14, + 0xa8, 0x51, 0x59, 0xd9, 0x24, 0x45, 0x53, 0xc4, 0xe2, 0xf9, 0x94, 0xab, 0x45, 0x5a, 0x28, 0x83, + 0x45, 0x93, 0x2d, 0xe9, 0x28, 0x7b, 0x1b, 0x9a, 0x3a, 0xec, 0xa4, 0x5c, 0x1b, 0xe5, 0x36, 0x34, + 0x11, 0x85, 0x76, 0xa0, 0x13, 0x46, 0x41, 0x18, 0xc4, 0x2c, 0x72, 0xa8, 0xeb, 0x46, 0x2c, 0x8e, + 0xbb, 0x1d, 0xb9, 0x9e, 0xa6, 0x1f, 0x48, 0xb2, 0xf5, 0x1e, 0xd4, 0x74, 0xed, 0xb6, 0x09, 0x6b, + 0x68, 0x75, 0x74, 0xc1, 0xaa, 0x2d, 0x27, 0x02, 0x25, 0x0e, 0xc2, 0x50, 0xbd, 0x40, 0x88, 0xa1, + 0xf5, 0x73, 0xa8, 0xa9, 0x0f, 0x56, 0xda, 0x97, 0x7e, 0x1f, 0x36, 0x42, 0x1a, 0x89, 0x6b, 0x64, + 0xbb, 0x53, 0xdd, 0x1d, 0x1c, 0xd3, 0x88, 0x3f, 0x61, 0x3c, 0xd7, 0xa4, 0x36, 0x50, 0x5e, 0x92, + 0xac, 0xfb, 0xd0, 0xcc, 0xc9, 0x88, 0x63, 0xa1, 0x1f, 0xe9, 0x48, 0xc3, 0x49, 0xb2, 0x73, 0x25, + 0xdd, 0xd9, 0x7a, 0x00, 0x66, 0xf2, 0x6d, 0x44, 0x11, 0xab, 0xaf, 0x6e, 0x28, 0x73, 0xcb, 0x29, + 0x36, 0xde, 0xc1, 0x67, 0x2c, 0x52, 0x31, 0x21, 0x27, 0xd6, 0xb3, 0x4c, 0x66, 0x90, 0x79, 0x9b, + 0xdc, 0x85, 0x9a, 0xca, 0x0c, 0x2a, 0x2a, 0x75, 0x8b, 0x7d, 0x8c, 0xa9, 0x41, 0xb7, 0xd8, 0x32, + 0x51, 0xa4, 0xcb, 0x56, 0xb2, 0xcb, 0x4e, 0xa1, 0xae, 0xa3, 0x3f, 0x9f, 0x26, 0xe5, 0x8a, 0x9d, + 0x62, 0x9a, 0x54, 0x8b, 0xa6, 0x82, 0xc2, 0x3b, 0x62, 0x6f, 0xec, 0x33, 0xd7, 0x49, 0x43, 0x08, + 0xf7, 0xa8, 0xdb, 0x6d, 0xc9, 0xf8, 0x44, 0xc7, 0x8b, 0xf5, 0x2e, 0xac, 0xcb, 0xb3, 0x09, 0xfb, + 0x88, 0x95, 0x75, 0x5d, 0x2f, 0xc6, 0xa5, 0xc0, 0xf1, 0x7b, 0x03, 0xea, 0x3a, 0x79, 0x96, 0x2a, + 0xe5, 0x0e, 0x5d, 0xf9, 0xba, 0x87, 0xfe, 0xef, 0x27, 0x9e, 0xbb, 0x40, 0x64, 0x7e, 0x39, 0x0b, + 0xb8, 0xe7, 0x8f, 0x1d, 0x69, 0x6b, 0x99, 0x83, 0x3a, 0xc8, 0x39, 0x41, 0xc6, 0xb1, 0xa0, 0xef, + 0x7d, 0xbe, 0x06, 0xed, 0x83, 0xc1, 0xc3, 0xa3, 0x83, 0x30, 0x9c, 0x7a, 0x23, 0x8a, 0xbd, 0xc2, + 0x2e, 0x54, 0xb1, 0x5d, 0x2a, 0x79, 0xee, 0xed, 0x95, 0xf5, 0xed, 0x64, 0x0f, 0xd6, 0xb0, 0x6b, + 0x22, 0x65, 0xaf, 0xbe, 0xbd, 0xd2, 0xf6, 0x5d, 0x6c, 0x22, 0xfb, 0xaa, 0xcb, 0x8f, 0xbf, 0xbd, + 0xb2, 0x1e, 0x9e, 0xfc, 0x00, 0xcc, 0xb4, 0x9d, 0x59, 0xf6, 0x04, 0xdc, 0x5b, 0xda, 0xcd, 0x0b, + 0xfd, 0xb4, 0xf4, 0x5b, 0xf6, 0x92, 0xd9, 0x5b, 0xda, 0xf6, 0x92, 0x7d, 0xa8, 0xe9, 0x82, 0xb9, + 0xfc, 0x91, 0xb6, 0xb7, 0xa4, 0xd3, 0x16, 0xe6, 0x91, 0x1d, 0x4a, 0xd9, 0x4b, 0x72, 0xaf, 0xf4, + 0x39, 0x80, 0xdc, 0x83, 0x75, 0x55, 0xc5, 0x94, 0x3e, 0xd4, 0xf6, 0xca, 0xfb, 0x65, 0x71, 0xc9, + 0xb4, 0x47, 0x5b, 0xf6, 0xda, 0xdd, 0x5b, 0xfa, 0x6e, 0x41, 0x0e, 0x00, 0x32, 0x8d, 0xc6, 0xd2, + 0x67, 0xec, 0xde, 0xf2, 0xf7, 0x08, 0xf2, 0x00, 0xea, 0xe9, 0x1b, 0x53, 0xf9, 0xc3, 0x74, 0x6f, + 0xd9, 0x13, 0xc1, 0xe0, 0x8d, 0x7f, 0xfe, 0x69, 0xcb, 0xf8, 0xd5, 0xc5, 0x96, 0xf1, 0xe5, 0xc5, + 0x96, 0xf1, 0xd5, 0xc5, 0x96, 0xf1, 0xbb, 0x8b, 0x2d, 0xe3, 0x8f, 0x17, 0x5b, 0xc6, 0x6f, 0xfe, + 0xbc, 0x65, 0x0c, 0xd7, 0xd1, 0xfd, 0xdf, 0xff, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x8d, + 0xcb, 0x04, 0x88, 0x19, 0x00, 0x00, } diff --git a/abci/types/types.proto b/abci/types/types.proto index 6e6b1cd3671..b48ff1e8bdf 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -6,6 +6,7 @@ package types; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "github.com/tendermint/tendermint/libs/common/types.proto"; +import "github.com/tendermint/tendermint/crypto/merkle/merkle.proto"; // This file is copied from http://github.com/tendermint/abci // NOTE: When using custom types, mind the warnings. @@ -48,6 +49,8 @@ message RequestFlush { message RequestInfo { string version = 1; + uint64 block_version = 2; + uint64 p2p_version = 3; } // nondeterministic @@ -60,7 +63,7 @@ message RequestInitChain { google.protobuf.Timestamp time = 1 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true]; string chain_id = 2; ConsensusParams consensus_params = 3; - repeated Validator validators = 4 [(gogoproto.nullable)=false]; + repeated ValidatorUpdate validators = 4 [(gogoproto.nullable)=false]; bytes app_state_bytes = 5; } @@ -71,7 +74,6 @@ message RequestQuery { bool prove = 4; } -// NOTE: validators here have empty pubkeys. message RequestBeginBlock { bytes hash = 1; Header header = 2 [(gogoproto.nullable)=false]; @@ -128,9 +130,12 @@ message ResponseFlush { message ResponseInfo { string data = 1; + string version = 2; - int64 last_block_height = 3; - bytes last_block_app_hash = 4; + uint64 app_version = 3; + + int64 last_block_height = 4; + bytes last_block_app_hash = 5; } // nondeterministic @@ -143,7 +148,7 @@ message ResponseSetOption { message ResponseInitChain { ConsensusParams consensus_params = 1; - repeated Validator validators = 2 [(gogoproto.nullable)=false]; + repeated ValidatorUpdate validators = 2 [(gogoproto.nullable)=false]; } message ResponseQuery { @@ -154,8 +159,9 @@ message ResponseQuery { int64 index = 5; bytes key = 6; bytes value = 7; - bytes proof = 8; + merkle.Proof proof = 8; int64 height = 9; + string codespace = 10; } message ResponseBeginBlock { @@ -170,6 +176,7 @@ message ResponseCheckTx { int64 gas_wanted = 5; int64 gas_used = 6; repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; + string codespace = 8; } message ResponseDeliverTx { @@ -180,10 +187,11 @@ message ResponseDeliverTx { int64 gas_wanted = 5; int64 gas_used = 6; repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; + string codespace = 8; } message ResponseEndBlock { - repeated Validator validator_updates = 1 [(gogoproto.nullable)=false]; + repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable)=false]; ConsensusParams consensus_param_updates = 2; repeated common.KVPair tags = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; } @@ -199,68 +207,97 @@ message ResponseCommit { // ConsensusParams contains all consensus-relevant parameters // that can be adjusted by the abci app message ConsensusParams { - BlockSize block_size = 1; - TxSize tx_size = 2; - BlockGossip block_gossip = 3; + BlockSizeParams block_size = 1; + EvidenceParams evidence = 2; + ValidatorParams validator = 3; } // BlockSize contains limits on the block size. -message BlockSize { - int32 max_bytes = 1; - int32 max_txs = 2; - int64 max_gas = 3; +message BlockSizeParams { + // Note: must be greater than 0 + int64 max_bytes = 1; + // Note: must be greater or equal to -1 + int64 max_gas = 2; } -// TxSize contains limits on the tx size. -message TxSize { - int32 max_bytes = 1; - int64 max_gas = 2; +// EvidenceParams contains limits on the evidence. +message EvidenceParams { + // Note: must be greater than 0 + int64 max_age = 1; } -// BlockGossip determine consensus critical -// elements of how blocks are gossiped -message BlockGossip { - // Note: must not be 0 - int32 block_part_size_bytes = 1; +// ValidatorParams contains limits on validators. +message ValidatorParams { + repeated string pub_key_types = 1; } message LastCommitInfo { - int32 commit_round = 1; - repeated SigningValidator validators = 2 [(gogoproto.nullable)=false]; + int32 round = 1; + repeated VoteInfo votes = 2 [(gogoproto.nullable)=false]; } //---------------------------------------- // Blockchain Types -// just the minimum the app might need message Header { - // basics - string chain_id = 1 [(gogoproto.customname)="ChainID"]; - int64 height = 2; - google.protobuf.Timestamp time = 3 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true]; + // basic block info + Version version = 1 [(gogoproto.nullable)=false]; + string chain_id = 2 [(gogoproto.customname)="ChainID"]; + int64 height = 3; + google.protobuf.Timestamp time = 4 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true]; + int64 num_txs = 5; + int64 total_txs = 6; + + // prev block info + BlockID last_block_id = 7 [(gogoproto.nullable)=false]; - // txs - int32 num_txs = 4; - int64 total_txs = 5; + // hashes of block data + bytes last_commit_hash = 8; // commit from validators from the last block + bytes data_hash = 9; // transactions - // hashes - bytes last_block_hash = 6; - bytes validators_hash = 7; - bytes app_hash = 8; + // hashes from the app output from the prev block + bytes validators_hash = 10; // validators for the current block + bytes next_validators_hash = 11; // validators for the next block + bytes consensus_hash = 12; // consensus params for current block + bytes app_hash = 13; // state after txs from the previous block + bytes last_results_hash = 14;// root hash of all results from the txs from the previous block + + // consensus info + bytes evidence_hash = 15; // evidence included in the block + bytes proposer_address = 16; // original proposer of the block +} - // consensus - Validator proposer = 9 [(gogoproto.nullable)=false]; +message Version { + uint64 Block = 1; + uint64 App = 2; +} + + +message BlockID { + bytes hash = 1; + PartSetHeader parts_header = 2 [(gogoproto.nullable)=false]; +} + +message PartSetHeader { + int32 total = 1; + bytes hash = 2; } // Validator message Validator { bytes address = 1; - PubKey pub_key = 2 [(gogoproto.nullable)=false]; + //PubKey pub_key = 2 [(gogoproto.nullable)=false]; int64 power = 3; } -// Validator with an extra bool -message SigningValidator { +// ValidatorUpdate +message ValidatorUpdate { + PubKey pub_key = 1 [(gogoproto.nullable)=false]; + int64 power = 2; +} + +// VoteInfo +message VoteInfo { Validator validator = 1 [(gogoproto.nullable)=false]; bool signed_last_block = 2; } diff --git a/abci/types/types_test.go b/abci/types/types_test.go deleted file mode 100644 index baa8155cd0c..00000000000 --- a/abci/types/types_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package types - -import ( - "testing" - - asrt "github.com/stretchr/testify/assert" -) - -func TestConsensusParams(t *testing.T) { - assert := asrt.New(t) - - params := &ConsensusParams{ - BlockSize: &BlockSize{MaxGas: 12345}, - BlockGossip: &BlockGossip{BlockPartSizeBytes: 54321}, - } - var noParams *ConsensusParams // nil - - // no error with nil fields - assert.Nil(noParams.GetBlockSize()) - assert.EqualValues(noParams.GetBlockSize().GetMaxGas(), 0) - - // get values with real fields - assert.NotNil(params.GetBlockSize()) - assert.EqualValues(params.GetBlockSize().GetMaxTxs(), 0) - assert.EqualValues(params.GetBlockSize().GetMaxGas(), 12345) - assert.NotNil(params.GetBlockGossip()) - assert.EqualValues(params.GetBlockGossip().GetBlockPartSizeBytes(), 54321) - assert.Nil(params.GetTxSize()) - assert.EqualValues(params.GetTxSize().GetMaxBytes(), 0) - -} diff --git a/abci/types/typespb_test.go b/abci/types/typespb_test.go index 33a368af443..9375cc7f190 100644 --- a/abci/types/typespb_test.go +++ b/abci/types/typespb_test.go @@ -14,6 +14,7 @@ import fmt "fmt" import math "math" import _ "github.com/gogo/protobuf/gogoproto" import _ "github.com/golang/protobuf/ptypes/timestamp" +import _ "github.com/tendermint/tendermint/crypto/merkle" import _ "github.com/tendermint/tendermint/libs/common" // Reference imports to suppress errors if they are not otherwise used. @@ -1478,15 +1479,15 @@ func TestConsensusParamsMarshalTo(t *testing.T) { } } -func TestBlockSizeProto(t *testing.T) { +func TestBlockSizeParamsProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockSize(popr, false) + p := NewPopulatedBlockSizeParams(popr, false) dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &BlockSize{} + msg := &BlockSizeParams{} if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -1509,10 +1510,10 @@ func TestBlockSizeProto(t *testing.T) { } } -func TestBlockSizeMarshalTo(t *testing.T) { +func TestBlockSizeParamsMarshalTo(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockSize(popr, false) + p := NewPopulatedBlockSizeParams(popr, false) size := p.Size() dAtA := make([]byte, size) for i := range dAtA { @@ -1522,7 +1523,7 @@ func TestBlockSizeMarshalTo(t *testing.T) { if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &BlockSize{} + msg := &BlockSizeParams{} if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -1534,15 +1535,15 @@ func TestBlockSizeMarshalTo(t *testing.T) { } } -func TestTxSizeProto(t *testing.T) { +func TestEvidenceParamsProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedTxSize(popr, false) + p := NewPopulatedEvidenceParams(popr, false) dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &TxSize{} + msg := &EvidenceParams{} if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -1565,10 +1566,10 @@ func TestTxSizeProto(t *testing.T) { } } -func TestTxSizeMarshalTo(t *testing.T) { +func TestEvidenceParamsMarshalTo(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedTxSize(popr, false) + p := NewPopulatedEvidenceParams(popr, false) size := p.Size() dAtA := make([]byte, size) for i := range dAtA { @@ -1578,7 +1579,7 @@ func TestTxSizeMarshalTo(t *testing.T) { if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &TxSize{} + msg := &EvidenceParams{} if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -1590,15 +1591,15 @@ func TestTxSizeMarshalTo(t *testing.T) { } } -func TestBlockGossipProto(t *testing.T) { +func TestValidatorParamsProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockGossip(popr, false) + p := NewPopulatedValidatorParams(popr, false) dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &BlockGossip{} + msg := &ValidatorParams{} if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -1621,10 +1622,10 @@ func TestBlockGossipProto(t *testing.T) { } } -func TestBlockGossipMarshalTo(t *testing.T) { +func TestValidatorParamsMarshalTo(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockGossip(popr, false) + p := NewPopulatedValidatorParams(popr, false) size := p.Size() dAtA := make([]byte, size) for i := range dAtA { @@ -1634,7 +1635,7 @@ func TestBlockGossipMarshalTo(t *testing.T) { if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &BlockGossip{} + msg := &ValidatorParams{} if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -1758,6 +1759,174 @@ func TestHeaderMarshalTo(t *testing.T) { } } +func TestVersionProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVersion(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Version{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestVersionMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVersion(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Version{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestBlockIDProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedBlockID(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &BlockID{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestBlockIDMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedBlockID(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &BlockID{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestPartSetHeaderProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPartSetHeader(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PartSetHeader{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestPartSetHeaderMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPartSetHeader(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PartSetHeader{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestValidatorProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -1814,15 +1983,15 @@ func TestValidatorMarshalTo(t *testing.T) { } } -func TestSigningValidatorProto(t *testing.T) { +func TestValidatorUpdateProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedSigningValidator(popr, false) + p := NewPopulatedValidatorUpdate(popr, false) dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &SigningValidator{} + msg := &ValidatorUpdate{} if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -1845,10 +2014,10 @@ func TestSigningValidatorProto(t *testing.T) { } } -func TestSigningValidatorMarshalTo(t *testing.T) { +func TestValidatorUpdateMarshalTo(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedSigningValidator(popr, false) + p := NewPopulatedValidatorUpdate(popr, false) size := p.Size() dAtA := make([]byte, size) for i := range dAtA { @@ -1858,7 +2027,63 @@ func TestSigningValidatorMarshalTo(t *testing.T) { if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &SigningValidator{} + msg := &ValidatorUpdate{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestVoteInfoProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVoteInfo(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &VoteInfo{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestVoteInfoMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVoteInfo(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &VoteInfo{} if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -2450,16 +2675,16 @@ func TestConsensusParamsJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } -func TestBlockSizeJSON(t *testing.T) { +func TestBlockSizeParamsJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockSize(popr, true) + p := NewPopulatedBlockSizeParams(popr, true) marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &BlockSize{} + msg := &BlockSizeParams{} err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) @@ -2468,16 +2693,16 @@ func TestBlockSizeJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } -func TestTxSizeJSON(t *testing.T) { +func TestEvidenceParamsJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedTxSize(popr, true) + p := NewPopulatedEvidenceParams(popr, true) marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &TxSize{} + msg := &EvidenceParams{} err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) @@ -2486,16 +2711,16 @@ func TestTxSizeJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } -func TestBlockGossipJSON(t *testing.T) { +func TestValidatorParamsJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockGossip(popr, true) + p := NewPopulatedValidatorParams(popr, true) marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &BlockGossip{} + msg := &ValidatorParams{} err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) @@ -2540,6 +2765,60 @@ func TestHeaderJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } +func TestVersionJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVersion(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Version{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestBlockIDJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedBlockID(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &BlockID{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestPartSetHeaderJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPartSetHeader(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PartSetHeader{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} func TestValidatorJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -2558,16 +2837,34 @@ func TestValidatorJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } -func TestSigningValidatorJSON(t *testing.T) { +func TestValidatorUpdateJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedSigningValidator(popr, true) + p := NewPopulatedValidatorUpdate(popr, true) marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &SigningValidator{} + msg := &ValidatorUpdate{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestVoteInfoJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVoteInfo(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &VoteInfo{} err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) @@ -3340,12 +3637,12 @@ func TestConsensusParamsProtoCompactText(t *testing.T) { } } -func TestBlockSizeProtoText(t *testing.T) { +func TestBlockSizeParamsProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockSize(popr, true) + p := NewPopulatedBlockSizeParams(popr, true) dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &BlockSize{} + msg := &BlockSizeParams{} if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -3354,12 +3651,12 @@ func TestBlockSizeProtoText(t *testing.T) { } } -func TestBlockSizeProtoCompactText(t *testing.T) { +func TestBlockSizeParamsProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockSize(popr, true) + p := NewPopulatedBlockSizeParams(popr, true) dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &BlockSize{} + msg := &BlockSizeParams{} if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -3368,12 +3665,12 @@ func TestBlockSizeProtoCompactText(t *testing.T) { } } -func TestTxSizeProtoText(t *testing.T) { +func TestEvidenceParamsProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedTxSize(popr, true) + p := NewPopulatedEvidenceParams(popr, true) dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &TxSize{} + msg := &EvidenceParams{} if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -3382,12 +3679,12 @@ func TestTxSizeProtoText(t *testing.T) { } } -func TestTxSizeProtoCompactText(t *testing.T) { +func TestEvidenceParamsProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedTxSize(popr, true) + p := NewPopulatedEvidenceParams(popr, true) dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &TxSize{} + msg := &EvidenceParams{} if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -3396,12 +3693,12 @@ func TestTxSizeProtoCompactText(t *testing.T) { } } -func TestBlockGossipProtoText(t *testing.T) { +func TestValidatorParamsProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockGossip(popr, true) + p := NewPopulatedValidatorParams(popr, true) dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &BlockGossip{} + msg := &ValidatorParams{} if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -3410,12 +3707,12 @@ func TestBlockGossipProtoText(t *testing.T) { } } -func TestBlockGossipProtoCompactText(t *testing.T) { +func TestValidatorParamsProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockGossip(popr, true) + p := NewPopulatedValidatorParams(popr, true) dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &BlockGossip{} + msg := &ValidatorParams{} if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -3480,6 +3777,90 @@ func TestHeaderProtoCompactText(t *testing.T) { } } +func TestVersionProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVersion(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &Version{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestVersionProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVersion(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &Version{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestBlockIDProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedBlockID(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &BlockID{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestBlockIDProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedBlockID(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &BlockID{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestPartSetHeaderProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPartSetHeader(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &PartSetHeader{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestPartSetHeaderProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPartSetHeader(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &PartSetHeader{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestValidatorProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -3508,12 +3889,40 @@ func TestValidatorProtoCompactText(t *testing.T) { } } -func TestSigningValidatorProtoText(t *testing.T) { +func TestValidatorUpdateProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorUpdate(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &ValidatorUpdate{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestValidatorUpdateProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorUpdate(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &ValidatorUpdate{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestVoteInfoProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedSigningValidator(popr, true) + p := NewPopulatedVoteInfo(popr, true) dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &SigningValidator{} + msg := &VoteInfo{} if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -3522,12 +3931,12 @@ func TestSigningValidatorProtoText(t *testing.T) { } } -func TestSigningValidatorProtoCompactText(t *testing.T) { +func TestVoteInfoProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedSigningValidator(popr, true) + p := NewPopulatedVoteInfo(popr, true) dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &SigningValidator{} + msg := &VoteInfo{} if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -4164,10 +4573,10 @@ func TestConsensusParamsSize(t *testing.T) { } } -func TestBlockSizeSize(t *testing.T) { +func TestBlockSizeParamsSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockSize(popr, true) + p := NewPopulatedBlockSizeParams(popr, true) size2 := github_com_gogo_protobuf_proto.Size(p) dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { @@ -4186,10 +4595,10 @@ func TestBlockSizeSize(t *testing.T) { } } -func TestTxSizeSize(t *testing.T) { +func TestEvidenceParamsSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedTxSize(popr, true) + p := NewPopulatedEvidenceParams(popr, true) size2 := github_com_gogo_protobuf_proto.Size(p) dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { @@ -4208,10 +4617,10 @@ func TestTxSizeSize(t *testing.T) { } } -func TestBlockGossipSize(t *testing.T) { +func TestValidatorParamsSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedBlockGossip(popr, true) + p := NewPopulatedValidatorParams(popr, true) size2 := github_com_gogo_protobuf_proto.Size(p) dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { @@ -4274,6 +4683,72 @@ func TestHeaderSize(t *testing.T) { } } +func TestVersionSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedVersion(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestBlockIDSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedBlockID(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestPartSetHeaderSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPartSetHeader(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + func TestValidatorSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -4296,10 +4771,32 @@ func TestValidatorSize(t *testing.T) { } } -func TestSigningValidatorSize(t *testing.T) { +func TestValidatorUpdateSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorUpdate(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestVoteInfoSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedSigningValidator(popr, true) + p := NewPopulatedVoteInfo(popr, true) size2 := github_com_gogo_protobuf_proto.Size(p) dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { diff --git a/abci/types/util.go b/abci/types/util.go index 458024c581b..3cde882320a 100644 --- a/abci/types/util.go +++ b/abci/types/util.go @@ -2,58 +2,33 @@ package types import ( "bytes" - "encoding/json" "sort" - - cmn "github.com/tendermint/tendermint/libs/common" ) //------------------------------------------------------------------------------ -// Validators is a list of validators that implements the Sort interface -type Validators []Validator +// ValidatorUpdates is a list of validators that implements the Sort interface +type ValidatorUpdates []ValidatorUpdate -var _ sort.Interface = (Validators)(nil) +var _ sort.Interface = (ValidatorUpdates)(nil) -// All these methods for Validators: +// All these methods for ValidatorUpdates: // Len, Less and Swap -// are for Validators to implement sort.Interface +// are for ValidatorUpdates to implement sort.Interface // which will be used by the sort package. // See Issue https://github.com/tendermint/abci/issues/212 -func (v Validators) Len() int { +func (v ValidatorUpdates) Len() int { return len(v) } // XXX: doesn't distinguish same validator with different power -func (v Validators) Less(i, j int) bool { +func (v ValidatorUpdates) Less(i, j int) bool { return bytes.Compare(v[i].PubKey.Data, v[j].PubKey.Data) <= 0 } -func (v Validators) Swap(i, j int) { +func (v ValidatorUpdates) Swap(i, j int) { v1 := v[i] v[i] = v[j] v[j] = v1 } - -func ValidatorsString(vs Validators) string { - s := make([]validatorPretty, len(vs)) - for i, v := range vs { - s[i] = validatorPretty{ - Address: v.Address, - PubKey: v.PubKey.Data, - Power: v.Power, - } - } - b, err := json.Marshal(s) - if err != nil { - panic(err.Error()) - } - return string(b) -} - -type validatorPretty struct { - Address cmn.HexBytes `json:"address"` - PubKey []byte `json:"pub_key"` - Power int64 `json:"power"` -} diff --git a/abci/version/version.go b/abci/version/version.go index 7223a86adaa..f4dc4d23586 100644 --- a/abci/version/version.go +++ b/abci/version/version.go @@ -1,9 +1,9 @@ package version -// NOTE: we should probably be versioning the ABCI and the abci-cli separately +import ( + "github.com/tendermint/tendermint/version" +) -const Maj = "0" -const Min = "12" -const Fix = "0" +// TODO: eliminate this after some version refactor -const Version = "0.12.0" +const Version = version.ABCIVersion diff --git a/benchmarks/codec_test.go b/benchmarks/codec_test.go index 09487563a04..3e02702865f 100644 --- a/benchmarks/codec_test.go +++ b/benchmarks/codec_test.go @@ -12,20 +12,28 @@ import ( ctypes "github.com/tendermint/tendermint/rpc/core/types" ) +func testNodeInfo(id p2p.ID) p2p.DefaultNodeInfo { + return p2p.DefaultNodeInfo{ + ProtocolVersion: p2p.ProtocolVersion{1, 2, 3}, + ID_: id, + Moniker: "SOMENAME", + Network: "SOMENAME", + ListenAddr: "SOMEADDR", + Version: "SOMEVER", + Other: p2p.DefaultNodeInfoOther{ + TxIndex: "on", + RPCAddress: "0.0.0.0:26657", + }, + } +} + func BenchmarkEncodeStatusWire(b *testing.B) { b.StopTimer() cdc := amino.NewCodec() ctypes.RegisterAmino(cdc) nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()} status := &ctypes.ResultStatus{ - NodeInfo: p2p.NodeInfo{ - ID: nodeKey.ID(), - Moniker: "SOMENAME", - Network: "SOMENAME", - ListenAddr: "SOMEADDR", - Version: "SOMEVER", - Other: []string{"SOMESTRING", "OTHERSTRING"}, - }, + NodeInfo: testNodeInfo(nodeKey.ID()), SyncInfo: ctypes.SyncInfo{ LatestBlockHash: []byte("SOMEBYTES"), LatestBlockHeight: 123, @@ -53,14 +61,7 @@ func BenchmarkEncodeNodeInfoWire(b *testing.B) { cdc := amino.NewCodec() ctypes.RegisterAmino(cdc) nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()} - nodeInfo := p2p.NodeInfo{ - ID: nodeKey.ID(), - Moniker: "SOMENAME", - Network: "SOMENAME", - ListenAddr: "SOMEADDR", - Version: "SOMEVER", - Other: []string{"SOMESTRING", "OTHERSTRING"}, - } + nodeInfo := testNodeInfo(nodeKey.ID()) b.StartTimer() counter := 0 @@ -78,14 +79,7 @@ func BenchmarkEncodeNodeInfoBinary(b *testing.B) { cdc := amino.NewCodec() ctypes.RegisterAmino(cdc) nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()} - nodeInfo := p2p.NodeInfo{ - ID: nodeKey.ID(), - Moniker: "SOMENAME", - Network: "SOMENAME", - ListenAddr: "SOMEADDR", - Version: "SOMEVER", - Other: []string{"SOMESTRING", "OTHERSTRING"}, - } + nodeInfo := testNodeInfo(nodeKey.ID()) b.StartTimer() counter := 0 diff --git a/benchmarks/simu/counter.go b/benchmarks/simu/counter.go index b7d2c4d6388..c24eddf8acc 100644 --- a/benchmarks/simu/counter.go +++ b/benchmarks/simu/counter.go @@ -6,8 +6,8 @@ import ( "fmt" "time" - rpcclient "github.com/tendermint/tendermint/rpc/lib/client" cmn "github.com/tendermint/tendermint/libs/common" + rpcclient "github.com/tendermint/tendermint/rpc/lib/client" ) func main() { diff --git a/blockchain/pool.go b/blockchain/pool.go index a881c7cb736..c7864a64630 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -365,10 +365,10 @@ func (pool *BlockPool) debug() string { nextHeight := pool.height + pool.requestersLen() for h := pool.height; h < nextHeight; h++ { if pool.requesters[h] == nil { - str += cmn.Fmt("H(%v):X ", h) + str += fmt.Sprintf("H(%v):X ", h) } else { - str += cmn.Fmt("H(%v):", h) - str += cmn.Fmt("B?(%v) ", pool.requesters[h].block != nil) + str += fmt.Sprintf("H(%v):", h) + str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil) } } return str diff --git a/blockchain/reactor.go b/blockchain/reactor.go index f00df50c328..59318dcc518 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -1,6 +1,7 @@ package blockchain import ( + "errors" "fmt" "reflect" "time" @@ -180,6 +181,12 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) return } + if err = msg.ValidateBasic(); err != nil { + bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) + bcR.Switch.StopPeerForError(src, err) + return + } + bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg) switch msg := msg.(type) { @@ -188,7 +195,6 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) // Unfortunately not queued since the queue is full. } case *bcBlockResponseMessage: - // Got a block. bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes)) case *bcStatusRequestMessage: // Send peer our state. @@ -201,7 +207,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) // Got a peer status. Unverified. bcR.pool.SetPeerHeight(src.ID(), msg.Height) default: - bcR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } } @@ -290,7 +296,7 @@ FOR_LOOP: didProcessCh <- struct{}{} } - firstParts := first.MakePartSet(state.ConsensusParams.BlockPartSizeBytes) + firstParts := first.MakePartSet(types.BlockPartSizeBytes) firstPartsHeader := firstParts.Header() firstID := types.BlockID{first.Hash(), firstPartsHeader} // Finally, verify the first block using the second's commit @@ -321,7 +327,7 @@ FOR_LOOP: state, err = bcR.blockExec.ApplyBlock(state, firstID, first) if err != nil { // TODO This is bad, are we zombie? - cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v", + cmn.PanicQ(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) } blocksSynced++ @@ -352,15 +358,17 @@ func (bcR *BlockchainReactor) BroadcastStatusRequest() error { // Messages // BlockchainMessage is a generic message for this reactor. -type BlockchainMessage interface{} +type BlockchainMessage interface { + ValidateBasic() error +} func RegisterBlockchainMessages(cdc *amino.Codec) { cdc.RegisterInterface((*BlockchainMessage)(nil), nil) - cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/mempool/BlockRequest", nil) - cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/mempool/BlockResponse", nil) - cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/mempool/NoBlockResponse", nil) - cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/mempool/StatusResponse", nil) - cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil) + cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil) + cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/blockchain/BlockResponse", nil) + cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/blockchain/NoBlockResponse", nil) + cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/blockchain/StatusResponse", nil) + cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/blockchain/StatusRequest", nil) } func decodeMsg(bz []byte) (msg BlockchainMessage, err error) { @@ -377,16 +385,32 @@ type bcBlockRequestMessage struct { Height int64 } +// ValidateBasic performs basic validation. +func (m *bcBlockRequestMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + return nil +} + func (m *bcBlockRequestMessage) String() string { - return cmn.Fmt("[bcBlockRequestMessage %v]", m.Height) + return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height) } type bcNoBlockResponseMessage struct { Height int64 } +// ValidateBasic performs basic validation. +func (m *bcNoBlockResponseMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + return nil +} + func (brm *bcNoBlockResponseMessage) String() string { - return cmn.Fmt("[bcNoBlockResponseMessage %d]", brm.Height) + return fmt.Sprintf("[bcNoBlockResponseMessage %d]", brm.Height) } //------------------------------------- @@ -395,8 +419,17 @@ type bcBlockResponseMessage struct { Block *types.Block } +// ValidateBasic performs basic validation. +func (m *bcBlockResponseMessage) ValidateBasic() error { + if err := m.Block.ValidateBasic(); err != nil { + return err + } + + return nil +} + func (m *bcBlockResponseMessage) String() string { - return cmn.Fmt("[bcBlockResponseMessage %v]", m.Block.Height) + return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height) } //------------------------------------- @@ -405,8 +438,16 @@ type bcStatusRequestMessage struct { Height int64 } +// ValidateBasic performs basic validation. +func (m *bcStatusRequestMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + return nil +} + func (m *bcStatusRequestMessage) String() string { - return cmn.Fmt("[bcStatusRequestMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height) } //------------------------------------- @@ -415,6 +456,14 @@ type bcStatusResponseMessage struct { Height int64 } +// ValidateBasic performs basic validation. +func (m *bcStatusResponseMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + return nil +} + func (m *bcStatusResponseMessage) String() string { - return cmn.Fmt("[bcStatusResponseMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height) } diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index 11991bda4c8..fca063e0ccf 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -42,13 +42,13 @@ func newBlockchainReactor(logger log.Logger, maxBlockHeight int64) *BlockchainRe bcReactor.SetLogger(logger.With("module", "blockchain")) // Next: we need to set a switch in order for peers to be added in - bcReactor.Switch = p2p.NewSwitch(cfg.DefaultP2PConfig()) + bcReactor.Switch = p2p.NewSwitch(cfg.DefaultP2PConfig(), nil) // Lastly: let's add some blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { firstBlock := makeBlock(blockHeight, state) secondBlock := makeBlock(blockHeight+1, state) - firstParts := firstBlock.MakePartSet(state.ConsensusParams.BlockGossip.BlockPartSizeBytes) + firstParts := firstBlock.MakePartSet(types.BlockPartSizeBytes) blockStore.SaveBlock(firstBlock, firstParts, secondBlock.LastCommit) } @@ -156,7 +156,7 @@ func makeTxs(height int64) (txs []types.Tx) { } func makeBlock(height int64, state sm.State) *types.Block { - block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit), nil) + block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit), nil, state.Validators.GetProposer().Address) return block } @@ -198,7 +198,7 @@ func (tp *bcrTestPeer) TrySend(chID byte, msgBytes []byte) bool { } func (tp *bcrTestPeer) Send(chID byte, msgBytes []byte) bool { return tp.TrySend(chID, msgBytes) } -func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} } +func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.DefaultNodeInfo{} } func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} } func (tp *bcrTestPeer) ID() p2p.ID { return tp.id } func (tp *bcrTestPeer) IsOutbound() bool { return false } diff --git a/blockchain/store.go b/blockchain/store.go index f02d4facbcf..498cca68dba 100644 --- a/blockchain/store.go +++ b/blockchain/store.go @@ -63,7 +63,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block { part := bs.LoadBlockPart(height, i) buf = append(buf, part.Bytes...) } - err := cdc.UnmarshalBinary(buf, block) + err := cdc.UnmarshalBinaryLengthPrefixed(buf, block) if err != nil { // NOTE: The existence of meta should imply the existence of the // block. So, make sure meta is only saved after blocks are saved. @@ -148,10 +148,10 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s } height := block.Height if g, w := height, bs.Height()+1; g != w { - cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g)) + cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g)) } if !blockParts.IsComplete() { - cmn.PanicSanity(cmn.Fmt("BlockStore can only save complete block part sets")) + cmn.PanicSanity(fmt.Sprintf("BlockStore can only save complete block part sets")) } // Save block meta @@ -188,7 +188,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) { if height != bs.Height()+1 { - cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) + cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) } partBytes := cdc.MustMarshalBinaryBare(part) bs.db.Set(calcBlockPartKey(height, index), partBytes) @@ -224,7 +224,7 @@ type BlockStoreStateJSON struct { func (bsj BlockStoreStateJSON) Save(db dbm.DB) { bytes, err := cdc.MarshalJSON(bsj) if err != nil { - cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err)) + cmn.PanicSanity(fmt.Sprintf("Could not marshal state bytes: %v", err)) } db.SetSync(blockStoreKey, bytes) } diff --git a/blockchain/store_test.go b/blockchain/store_test.go index c816540050f..9c8fdb23c7f 100644 --- a/blockchain/store_test.go +++ b/blockchain/store_test.go @@ -6,7 +6,6 @@ import ( "runtime/debug" "strings" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -14,6 +13,7 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) func TestLoadBlockStoreStateJSON(t *testing.T) { @@ -49,7 +49,7 @@ func TestNewBlockStore(t *testing.T) { return nil, nil }) require.NotNil(t, panicErr, "#%d panicCauser: %q expected a panic", i, tt.data) - assert.Contains(t, panicErr.Error(), tt.wantErr, "#%d data: %q", i, tt.data) + assert.Contains(t, fmt.Sprintf("%#v", panicErr), tt.wantErr, "#%d data: %q", i, tt.data) } db.Set(blockStoreKey, nil) @@ -70,7 +70,7 @@ var ( part1 = partSet.GetPart(0) part2 = partSet.GetPart(1) seenCommit1 = &types.Commit{Precommits: []*types.Vote{{Height: 10, - Timestamp: time.Now().UTC()}}} + Timestamp: tmtime.Now()}}} ) // TODO: This test should be simplified ... @@ -91,7 +91,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { block := makeBlock(bs.Height()+1, state) validPartSet := block.MakePartSet(2) seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10, - Timestamp: time.Now().UTC()}}} + Timestamp: tmtime.Now()}}} bs.SaveBlock(block, partSet, seenCommit) require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") @@ -103,7 +103,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { Height: 1, NumTxs: 100, ChainID: "block_test", - Time: time.Now(), + Time: tmtime.Now(), } header2 := header1 header2.Height = 4 @@ -111,7 +111,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { // End of setup, test data commitAtH10 := &types.Commit{Precommits: []*types.Vote{{Height: 10, - Timestamp: time.Now().UTC()}}} + Timestamp: tmtime.Now()}}} tuples := []struct { block *types.Block parts *types.PartSet @@ -238,7 +238,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { if subStr := tuple.wantPanic; subStr != "" { if panicErr == nil { t.Errorf("#%d: want a non-nil panic", i) - } else if got := panicErr.Error(); !strings.Contains(got, subStr) { + } else if got := fmt.Sprintf("%#v", panicErr); !strings.Contains(got, subStr) { t.Errorf("#%d:\n\tgotErr: %q\nwant substring: %q", i, got, subStr) } continue @@ -335,7 +335,7 @@ func TestBlockFetchAtHeight(t *testing.T) { partSet := block.MakePartSet(2) seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10, - Timestamp: time.Now().UTC()}}} + Timestamp: tmtime.Now()}}} bs.SaveBlock(block, partSet, seenCommit) require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") diff --git a/cmd/tendermint/commands/gen_node_key.go b/cmd/tendermint/commands/gen_node_key.go index 7aedcd0dcc1..38dc5c66d8a 100644 --- a/cmd/tendermint/commands/gen_node_key.go +++ b/cmd/tendermint/commands/gen_node_key.go @@ -5,8 +5,8 @@ import ( "github.com/spf13/cobra" - "github.com/tendermint/tendermint/p2p" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/p2p" ) // GenNodeKeyCmd allows the generation of a node key. It prints node's ID to diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index a44c73ebf12..85ee449164b 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -1,15 +1,16 @@ package commands import ( - "time" + "fmt" "github.com/spf13/cobra" cfg "github.com/tendermint/tendermint/config" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" ) // InitFilesCmd initialises a fresh Tendermint Core instance. @@ -52,13 +53,14 @@ func initFilesWithConfig(config *cfg.Config) error { logger.Info("Found genesis file", "path", genFile) } else { genDoc := types.GenesisDoc{ - ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)), - GenesisTime: time.Now(), + ChainID: fmt.Sprintf("test-chain-%v", cmn.RandStr(6)), + GenesisTime: tmtime.Now(), ConsensusParams: types.DefaultConsensusParams(), } genDoc.Validators = []types.GenesisValidator{{ - PubKey: pv.GetPubKey(), - Power: 10, + Address: pv.GetPubKey().Address(), + PubKey: pv.GetPubKey(), + Power: 10, }} if err := genDoc.SaveAs(genFile); err != nil { diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go index d5759881654..eb2817b601c 100644 --- a/cmd/tendermint/commands/lite.go +++ b/cmd/tendermint/commands/lite.go @@ -7,7 +7,6 @@ import ( "github.com/spf13/cobra" cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/lite/proxy" rpcclient "github.com/tendermint/tendermint/rpc/client" ) @@ -27,10 +26,12 @@ just with added trust and running locally.`, } var ( - listenAddr string - nodeAddr string - chainID string - home string + listenAddr string + nodeAddr string + chainID string + home string + maxOpenConnections int + cacheSize int ) func init() { @@ -38,6 +39,8 @@ func init() { LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:26657", "Connect to a Tendermint node at this address") LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID") LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory") + LiteCmd.Flags().IntVar(&maxOpenConnections, "max-open-connections", 900, "Maximum number of simultaneous connections (including WebSocket).") + LiteCmd.Flags().IntVar(&cacheSize, "cache-size", 10, "Specify the memory trust store cache size") } func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) { @@ -66,17 +69,21 @@ func runProxy(cmd *cobra.Command, args []string) error { } // First, connect a client + logger.Info("Connecting to source HTTP client...") node := rpcclient.NewHTTP(nodeAddr, "/websocket") - cert, err := proxy.GetCertifier(chainID, home, nodeAddr) + logger.Info("Constructing Verifier...") + cert, err := proxy.NewVerifier(chainID, home, node, logger, cacheSize) if err != nil { - return err + return cmn.ErrorWrap(err, "constructing Verifier") } + cert.SetLogger(logger) sc := proxy.SecureClient(node, cert) - err = proxy.StartProxy(sc, listenAddr, logger) + logger.Info("Starting proxy...") + err = proxy.StartProxy(sc, listenAddr, logger, maxOpenConnections) if err != nil { - return err + return cmn.ErrorWrap(err, "starting proxy") } cmn.TrapSignal(func() { diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index ef0ba301912..53d3471294b 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -5,8 +5,8 @@ import ( "github.com/spf13/cobra" - "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/privval" ) // ResetAllCmd removes the database of this Tendermint core diff --git a/cmd/tendermint/commands/root.go b/cmd/tendermint/commands/root.go index 3c67ddc14b5..89ffbe74923 100644 --- a/cmd/tendermint/commands/root.go +++ b/cmd/tendermint/commands/root.go @@ -1,6 +1,7 @@ package commands import ( + "fmt" "os" "github.com/spf13/cobra" @@ -35,6 +36,9 @@ func ParseConfig() (*cfg.Config, error) { } conf.SetRoot(conf.RootDir) cfg.EnsureRoot(conf.RootDir) + if err = conf.ValidateBasic(); err != nil { + return nil, fmt.Errorf("Error in config file: %v", err) + } return conf, err } diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index 542e5c99196..6dabacb1f0a 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -2,6 +2,9 @@ package commands import ( "fmt" + "os" + "os/signal" + "syscall" "github.com/spf13/cobra" @@ -49,19 +52,31 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command { Use: "node", Short: "Run the tendermint node", RunE: func(cmd *cobra.Command, args []string) error { - // Create & start node n, err := nodeProvider(config, logger) if err != nil { return fmt.Errorf("Failed to create node: %v", err) } + // Stop upon receiving SIGTERM or CTRL-C + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + for sig := range c { + logger.Error(fmt.Sprintf("captured %v, exiting...", sig)) + if n.IsRunning() { + n.Stop() + } + os.Exit(1) + } + }() + if err := n.Start(); err != nil { return fmt.Errorf("Failed to start node: %v", err) } logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo()) - // Trap signal, run forever. - n.RunForever() + // Run forever + select {} return nil }, diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index f7639fb275d..0f7dd79a4e4 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -6,15 +6,15 @@ import ( "os" "path/filepath" "strings" - "time" "github.com/spf13/cobra" cfg "github.com/tendermint/tendermint/config" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" ) var ( @@ -76,7 +76,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error { genVals := make([]types.GenesisValidator, nValidators) for i := 0; i < nValidators; i++ { - nodeDirName := cmn.Fmt("%s%d", nodeDirPrefix, i) + nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i) nodeDir := filepath.Join(outputDir, nodeDirName) config.SetRoot(nodeDir) @@ -91,14 +91,15 @@ func testnetFiles(cmd *cobra.Command, args []string) error { pvFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidator) pv := privval.LoadFilePV(pvFile) genVals[i] = types.GenesisValidator{ - PubKey: pv.GetPubKey(), - Power: 1, - Name: nodeDirName, + Address: pv.GetPubKey().Address(), + PubKey: pv.GetPubKey(), + Power: 1, + Name: nodeDirName, } } for i := 0; i < nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i+nValidators)) + nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i+nValidators)) config.SetRoot(nodeDir) err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm) @@ -112,14 +113,14 @@ func testnetFiles(cmd *cobra.Command, args []string) error { // Generate genesis doc from generated validators genDoc := &types.GenesisDoc{ - GenesisTime: time.Now(), + GenesisTime: tmtime.Now(), ChainID: "chain-" + cmn.RandStr(6), Validators: genVals, } // Write genesis file. for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i)) + nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil { _ = os.RemoveAll(outputDir) return err @@ -159,7 +160,7 @@ func hostnameOrIP(i int) string { func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error { persistentPeers := make([]string, nValidators+nNonValidators) for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i)) + nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) config.SetRoot(nodeDir) nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) if err != nil { @@ -170,7 +171,7 @@ func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error { persistentPeersList := strings.Join(persistentPeers, ",") for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i)) + nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) config.SetRoot(nodeDir) config.P2P.PersistentPeers = persistentPeersList config.P2P.AddrBookStrict = false diff --git a/config/config.go b/config/config.go index fb8e79086d0..fa6182114ad 100644 --- a/config/config.go +++ b/config/config.go @@ -5,6 +5,8 @@ import ( "os" "path/filepath" "time" + + "github.com/pkg/errors" ) const ( @@ -19,7 +21,7 @@ const ( // generate the config.toml. Please reflect any changes // made here in the defaultConfigTemplate constant in // config/toml.go -// NOTE: tmlibs/cli must know to look in the config dir! +// NOTE: libs/cli must know to look in the config dir! var ( DefaultTendermintDir = ".tendermint" defaultConfigDir = "config" @@ -89,12 +91,32 @@ func (cfg *Config) SetRoot(root string) *Config { return cfg } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *Config) ValidateBasic() error { + if err := cfg.RPC.ValidateBasic(); err != nil { + return errors.Wrap(err, "Error in [rpc] section") + } + if err := cfg.P2P.ValidateBasic(); err != nil { + return errors.Wrap(err, "Error in [p2p] section") + } + if err := cfg.Mempool.ValidateBasic(); err != nil { + return errors.Wrap(err, "Error in [mempool] section") + } + if err := cfg.Consensus.ValidateBasic(); err != nil { + return errors.Wrap(err, "Error in [consensus] section") + } + return errors.Wrap( + cfg.Instrumentation.ValidateBasic(), + "Error in [instrumentation] section", + ) +} + //----------------------------------------------------------------------------- // BaseConfig // BaseConfig defines the base configuration for a Tendermint node type BaseConfig struct { - // chainID is unexposed and immutable but here for convenience chainID string @@ -102,49 +124,49 @@ type BaseConfig struct { // This should be set in viper so it can unmarshal into this struct RootDir string `mapstructure:"home"` + // TCP or UNIX socket address of the ABCI application, + // or the name of an ABCI application compiled in with the Tendermint binary + ProxyApp string `mapstructure:"proxy_app"` + + // A custom human readable name for this node + Moniker string `mapstructure:"moniker"` + + // If this node is many blocks behind the tip of the chain, FastSync + // allows them to catchup quickly by downloading blocks in parallel + // and verifying their commits + FastSync bool `mapstructure:"fast_sync"` + + // Database backend: leveldb | memdb | cleveldb + DBBackend string `mapstructure:"db_backend"` + + // Database directory + DBPath string `mapstructure:"db_dir"` + + // Output level for logging + LogLevel string `mapstructure:"log_level"` + // Path to the JSON file containing the initial validator set and other meta data Genesis string `mapstructure:"genesis_file"` // Path to the JSON file containing the private key to use as a validator in the consensus protocol PrivValidator string `mapstructure:"priv_validator_file"` - // A JSON file containing the private key to use for p2p authenticated encryption - NodeKey string `mapstructure:"node_key_file"` - - // A custom human readable name for this node - Moniker string `mapstructure:"moniker"` - // TCP or UNIX socket address for Tendermint to listen on for // connections from an external PrivValidator process PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"` - // TCP or UNIX socket address of the ABCI application, - // or the name of an ABCI application compiled in with the Tendermint binary - ProxyApp string `mapstructure:"proxy_app"` + // A JSON file containing the private key to use for p2p authenticated encryption + NodeKey string `mapstructure:"node_key_file"` // Mechanism to connect to the ABCI application: socket | grpc ABCI string `mapstructure:"abci"` - // Output level for logging - LogLevel string `mapstructure:"log_level"` - // TCP or UNIX socket address for the profiling server to listen on ProfListenAddress string `mapstructure:"prof_laddr"` - // If this node is many blocks behind the tip of the chain, FastSync - // allows them to catchup quickly by downloading blocks in parallel - // and verifying their commits - FastSync bool `mapstructure:"fast_sync"` - // If true, query the ABCI app on connecting to a new peer // so the app can decide if we should keep the connection or not FilterPeers bool `mapstructure:"filter_peers"` // false - - // Database backend: leveldb | memdb - DBBackend string `mapstructure:"db_backend"` - - // Database directory - DBPath string `mapstructure:"db_dir"` } // DefaultBaseConfig returns a default base configuration for a Tendermint node @@ -239,6 +261,8 @@ type RPCConfig struct { // If you want to accept more significant number than the default, make sure // you increase your OS limits. // 0 - unlimited. + // Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} + // 1024 - 40 - 10 - 50 = 924 = ~900 MaxOpenConnections int `mapstructure:"max_open_connections"` } @@ -248,11 +272,9 @@ func DefaultRPCConfig() *RPCConfig { ListenAddress: "tcp://0.0.0.0:26657", GRPCListenAddress: "", - GRPCMaxOpenConnections: 900, // no ipv4 + GRPCMaxOpenConnections: 900, - Unsafe: false, - // should be < {ulimit -Sn} - {MaxNumPeers} - {N of wal, db and other open files} - // 1024 - 50 - 50 = 924 = ~900 + Unsafe: false, MaxOpenConnections: 900, } } @@ -266,6 +288,18 @@ func TestRPCConfig() *RPCConfig { return cfg } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *RPCConfig) ValidateBasic() error { + if cfg.GRPCMaxOpenConnections < 0 { + return errors.New("grpc_max_open_connections can't be negative") + } + if cfg.MaxOpenConnections < 0 { + return errors.New("max_open_connections can't be negative") + } + return nil +} + //----------------------------------------------------------------------------- // P2PConfig @@ -293,13 +327,17 @@ type P2PConfig struct { AddrBook string `mapstructure:"addr_book_file"` // Set true for strict address routability rules + // Set false for private or local networks AddrBookStrict bool `mapstructure:"addr_book_strict"` - // Maximum number of peers to connect to - MaxNumPeers int `mapstructure:"max_num_peers"` + // Maximum number of inbound peers + MaxNumInboundPeers int `mapstructure:"max_num_inbound_peers"` + + // Maximum number of outbound peers to connect to, excluding persistent peers + MaxNumOutboundPeers int `mapstructure:"max_num_outbound_peers"` - // Time to wait before flushing messages out on the connection, in ms - FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"` + // Time to wait before flushing messages out on the connection + FlushThrottleTimeout time.Duration `mapstructure:"flush_throttle_timeout"` // Maximum size of a message packet payload, in bytes MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"` @@ -346,8 +384,9 @@ func DefaultP2PConfig() *P2PConfig { UPNP: false, AddrBook: defaultAddrBookPath, AddrBookStrict: true, - MaxNumPeers: 50, - FlushThrottleTimeout: 100, + MaxNumInboundPeers: 40, + MaxNumOutboundPeers: 10, + FlushThrottleTimeout: 100 * time.Millisecond, MaxPacketMsgPayloadSize: 1024, // 1 kB SendRate: 5120000, // 5 mB/s RecvRate: 5120000, // 5 mB/s @@ -366,7 +405,7 @@ func DefaultP2PConfig() *P2PConfig { func TestP2PConfig() *P2PConfig { cfg := DefaultP2PConfig() cfg.ListenAddress = "tcp://0.0.0.0:36656" - cfg.FlushThrottleTimeout = 10 + cfg.FlushThrottleTimeout = 10 * time.Millisecond cfg.AllowDuplicateIP = true return cfg } @@ -376,6 +415,30 @@ func (cfg *P2PConfig) AddrBookFile() string { return rootify(cfg.AddrBook, cfg.RootDir) } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *P2PConfig) ValidateBasic() error { + if cfg.MaxNumInboundPeers < 0 { + return errors.New("max_num_inbound_peers can't be negative") + } + if cfg.MaxNumOutboundPeers < 0 { + return errors.New("max_num_outbound_peers can't be negative") + } + if cfg.FlushThrottleTimeout < 0 { + return errors.New("flush_throttle_timeout can't be negative") + } + if cfg.MaxPacketMsgPayloadSize < 0 { + return errors.New("max_packet_msg_payload_size can't be negative") + } + if cfg.SendRate < 0 { + return errors.New("send_rate can't be negative") + } + if cfg.RecvRate < 0 { + return errors.New("recv_rate can't be negative") + } + return nil +} + // FuzzConnConfig is a FuzzedConnection configuration. type FuzzConnConfig struct { Mode int @@ -401,24 +464,24 @@ func DefaultFuzzConnConfig() *FuzzConnConfig { // MempoolConfig defines the configuration options for the Tendermint mempool type MempoolConfig struct { - RootDir string `mapstructure:"home"` - Recheck bool `mapstructure:"recheck"` - RecheckEmpty bool `mapstructure:"recheck_empty"` - Broadcast bool `mapstructure:"broadcast"` - WalPath string `mapstructure:"wal_dir"` - Size int `mapstructure:"size"` - CacheSize int `mapstructure:"cache_size"` + RootDir string `mapstructure:"home"` + Recheck bool `mapstructure:"recheck"` + Broadcast bool `mapstructure:"broadcast"` + WalPath string `mapstructure:"wal_dir"` + Size int `mapstructure:"size"` + CacheSize int `mapstructure:"cache_size"` } // DefaultMempoolConfig returns a default configuration for the Tendermint mempool func DefaultMempoolConfig() *MempoolConfig { return &MempoolConfig{ - Recheck: true, - RecheckEmpty: true, - Broadcast: true, - WalPath: filepath.Join(defaultDataDir, "mempool.wal"), - Size: 100000, - CacheSize: 100000, + Recheck: true, + Broadcast: true, + WalPath: "", + // Each signature verification takes .5ms, size reduced until we implement + // ABCI Recheck + Size: 5000, + CacheSize: 10000, } } @@ -434,6 +497,23 @@ func (cfg *MempoolConfig) WalDir() string { return rootify(cfg.WalPath, cfg.RootDir) } +// WalEnabled returns true if the WAL is enabled. +func (cfg *MempoolConfig) WalEnabled() bool { + return cfg.WalPath != "" +} + +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *MempoolConfig) ValidateBasic() error { + if cfg.Size < 0 { + return errors.New("size can't be negative") + } + if cfg.CacheSize < 0 { + return errors.New("cache_size can't be negative") + } + return nil +} + //----------------------------------------------------------------------------- // ConsensusConfig @@ -444,100 +524,101 @@ type ConsensusConfig struct { WalPath string `mapstructure:"wal_file"` walFile string // overrides WalPath if set - // All timeouts are in milliseconds - TimeoutPropose int `mapstructure:"timeout_propose"` - TimeoutProposeDelta int `mapstructure:"timeout_propose_delta"` - TimeoutPrevote int `mapstructure:"timeout_prevote"` - TimeoutPrevoteDelta int `mapstructure:"timeout_prevote_delta"` - TimeoutPrecommit int `mapstructure:"timeout_precommit"` - TimeoutPrecommitDelta int `mapstructure:"timeout_precommit_delta"` - TimeoutCommit int `mapstructure:"timeout_commit"` + TimeoutPropose time.Duration `mapstructure:"timeout_propose"` + TimeoutProposeDelta time.Duration `mapstructure:"timeout_propose_delta"` + TimeoutPrevote time.Duration `mapstructure:"timeout_prevote"` + TimeoutPrevoteDelta time.Duration `mapstructure:"timeout_prevote_delta"` + TimeoutPrecommit time.Duration `mapstructure:"timeout_precommit"` + TimeoutPrecommitDelta time.Duration `mapstructure:"timeout_precommit_delta"` + TimeoutCommit time.Duration `mapstructure:"timeout_commit"` // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"` - // EmptyBlocks mode and possible interval between empty blocks in seconds - CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"` - CreateEmptyBlocksInterval int `mapstructure:"create_empty_blocks_interval"` + // EmptyBlocks mode and possible interval between empty blocks + CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"` + CreateEmptyBlocksInterval time.Duration `mapstructure:"create_empty_blocks_interval"` + + // Reactor sleep duration parameters + PeerGossipSleepDuration time.Duration `mapstructure:"peer_gossip_sleep_duration"` + PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer_query_maj23_sleep_duration"` - // Reactor sleep duration parameters are in milliseconds - PeerGossipSleepDuration int `mapstructure:"peer_gossip_sleep_duration"` - PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"` + // Block time parameters. Corresponds to the minimum time increment between consecutive blocks. + BlockTimeIota time.Duration `mapstructure:"blocktime_iota"` } // DefaultConsensusConfig returns a default configuration for the consensus service func DefaultConsensusConfig() *ConsensusConfig { return &ConsensusConfig{ WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"), - TimeoutPropose: 3000, - TimeoutProposeDelta: 500, - TimeoutPrevote: 1000, - TimeoutPrevoteDelta: 500, - TimeoutPrecommit: 1000, - TimeoutPrecommitDelta: 500, - TimeoutCommit: 1000, + TimeoutPropose: 3000 * time.Millisecond, + TimeoutProposeDelta: 500 * time.Millisecond, + TimeoutPrevote: 1000 * time.Millisecond, + TimeoutPrevoteDelta: 500 * time.Millisecond, + TimeoutPrecommit: 1000 * time.Millisecond, + TimeoutPrecommitDelta: 500 * time.Millisecond, + TimeoutCommit: 1000 * time.Millisecond, SkipTimeoutCommit: false, CreateEmptyBlocks: true, - CreateEmptyBlocksInterval: 0, - PeerGossipSleepDuration: 100, - PeerQueryMaj23SleepDuration: 2000, + CreateEmptyBlocksInterval: 0 * time.Second, + PeerGossipSleepDuration: 100 * time.Millisecond, + PeerQueryMaj23SleepDuration: 2000 * time.Millisecond, + BlockTimeIota: 1000 * time.Millisecond, } } // TestConsensusConfig returns a configuration for testing the consensus service func TestConsensusConfig() *ConsensusConfig { cfg := DefaultConsensusConfig() - cfg.TimeoutPropose = 100 - cfg.TimeoutProposeDelta = 1 - cfg.TimeoutPrevote = 10 - cfg.TimeoutPrevoteDelta = 1 - cfg.TimeoutPrecommit = 10 - cfg.TimeoutPrecommitDelta = 1 - cfg.TimeoutCommit = 10 + cfg.TimeoutPropose = 40 * time.Millisecond + cfg.TimeoutProposeDelta = 1 * time.Millisecond + cfg.TimeoutPrevote = 10 * time.Millisecond + cfg.TimeoutPrevoteDelta = 1 * time.Millisecond + cfg.TimeoutPrecommit = 10 * time.Millisecond + cfg.TimeoutPrecommitDelta = 1 * time.Millisecond + cfg.TimeoutCommit = 10 * time.Millisecond cfg.SkipTimeoutCommit = true - cfg.PeerGossipSleepDuration = 5 - cfg.PeerQueryMaj23SleepDuration = 250 + cfg.PeerGossipSleepDuration = 5 * time.Millisecond + cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond + cfg.BlockTimeIota = 10 * time.Millisecond return cfg } +// MinValidVoteTime returns the minimum acceptable block time. +// See the [BFT time spec](https://godoc.org/github.com/tendermint/tendermint/docs/spec/consensus/bft-time.md). +func (cfg *ConsensusConfig) MinValidVoteTime(lastBlockTime time.Time) time.Time { + return lastBlockTime.Add(cfg.BlockTimeIota) +} + // WaitForTxs returns true if the consensus should wait for transactions before entering the propose step func (cfg *ConsensusConfig) WaitForTxs() bool { return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0 } -// EmptyBlocks returns the amount of time to wait before proposing an empty block or starting the propose timer if there are no txs available -func (cfg *ConsensusConfig) EmptyBlocksInterval() time.Duration { - return time.Duration(cfg.CreateEmptyBlocksInterval) * time.Second -} - // Propose returns the amount of time to wait for a proposal func (cfg *ConsensusConfig) Propose(round int) time.Duration { - return time.Duration(cfg.TimeoutPropose+cfg.TimeoutProposeDelta*round) * time.Millisecond + return time.Duration( + cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round), + ) * time.Nanosecond } // Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes func (cfg *ConsensusConfig) Prevote(round int) time.Duration { - return time.Duration(cfg.TimeoutPrevote+cfg.TimeoutPrevoteDelta*round) * time.Millisecond + return time.Duration( + cfg.TimeoutPrevote.Nanoseconds()+cfg.TimeoutPrevoteDelta.Nanoseconds()*int64(round), + ) * time.Nanosecond } // Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits func (cfg *ConsensusConfig) Precommit(round int) time.Duration { - return time.Duration(cfg.TimeoutPrecommit+cfg.TimeoutPrecommitDelta*round) * time.Millisecond + return time.Duration( + cfg.TimeoutPrecommit.Nanoseconds()+cfg.TimeoutPrecommitDelta.Nanoseconds()*int64(round), + ) * time.Nanosecond } // Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits for a single block (ie. a commit). func (cfg *ConsensusConfig) Commit(t time.Time) time.Time { - return t.Add(time.Duration(cfg.TimeoutCommit) * time.Millisecond) -} - -// PeerGossipSleep returns the amount of time to sleep if there is nothing to send from the ConsensusReactor -func (cfg *ConsensusConfig) PeerGossipSleep() time.Duration { - return time.Duration(cfg.PeerGossipSleepDuration) * time.Millisecond -} - -// PeerQueryMaj23Sleep returns the amount of time to sleep after each VoteSetMaj23Message is sent in the ConsensusReactor -func (cfg *ConsensusConfig) PeerQueryMaj23Sleep() time.Duration { - return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond + return t.Add(cfg.TimeoutCommit) } // WalFile returns the full path to the write-ahead log file @@ -553,11 +634,50 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) { cfg.walFile = walFile } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *ConsensusConfig) ValidateBasic() error { + if cfg.TimeoutPropose < 0 { + return errors.New("timeout_propose can't be negative") + } + if cfg.TimeoutProposeDelta < 0 { + return errors.New("timeout_propose_delta can't be negative") + } + if cfg.TimeoutPrevote < 0 { + return errors.New("timeout_prevote can't be negative") + } + if cfg.TimeoutPrevoteDelta < 0 { + return errors.New("timeout_prevote_delta can't be negative") + } + if cfg.TimeoutPrecommit < 0 { + return errors.New("timeout_precommit can't be negative") + } + if cfg.TimeoutPrecommitDelta < 0 { + return errors.New("timeout_precommit_delta can't be negative") + } + if cfg.TimeoutCommit < 0 { + return errors.New("timeout_commit can't be negative") + } + if cfg.CreateEmptyBlocksInterval < 0 { + return errors.New("create_empty_blocks_interval can't be negative") + } + if cfg.PeerGossipSleepDuration < 0 { + return errors.New("peer_gossip_sleep_duration can't be negative") + } + if cfg.PeerQueryMaj23SleepDuration < 0 { + return errors.New("peer_query_maj23_sleep_duration can't be negative") + } + if cfg.BlockTimeIota < 0 { + return errors.New("blocktime_iota can't be negative") + } + return nil +} + //----------------------------------------------------------------------------- // TxIndexConfig -// TxIndexConfig defines the configuration for the transaction -// indexer, including tags to index. +// TxIndexConfig defines the configuration for the transaction indexer, +// including tags to index. type TxIndexConfig struct { // What indexer to use for transactions // @@ -566,16 +686,21 @@ type TxIndexConfig struct { // 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). Indexer string `mapstructure:"indexer"` - // Comma-separated list of tags to index (by default the only tag is tx hash) + // Comma-separated list of tags to index (by default the only tag is "tx.hash") + // + // You can also index transactions by height by adding "tx.height" tag here. // // It's recommended to index only a subset of tags due to possible memory // bloat. This is, of course, depends on the indexer's DB and the volume of // transactions. IndexTags string `mapstructure:"index_tags"` - // When set to true, tells indexer to index all tags. Note this may be not - // desirable (see the comment above). IndexTags has a precedence over - // IndexAllTags (i.e. when given both, IndexTags will be indexed). + // When set to true, tells indexer to index all tags (predefined tags: + // "tx.hash", "tx.height" and all tags from DeliverTx responses). + // + // Note this may be not desirable (see the comment above). IndexTags has a + // precedence over IndexAllTags (i.e. when given both, IndexTags will be + // indexed). IndexAllTags bool `mapstructure:"index_all_tags"` } @@ -611,6 +736,9 @@ type InstrumentationConfig struct { // you increase your OS limits. // 0 - unlimited. MaxOpenConnections int `mapstructure:"max_open_connections"` + + // Tendermint instrumentation namespace. + Namespace string `mapstructure:"namespace"` } // DefaultInstrumentationConfig returns a default configuration for metrics @@ -620,6 +748,7 @@ func DefaultInstrumentationConfig() *InstrumentationConfig { Prometheus: false, PrometheusListenAddr: ":26660", MaxOpenConnections: 3, + Namespace: "tendermint", } } @@ -629,6 +758,15 @@ func TestInstrumentationConfig() *InstrumentationConfig { return DefaultInstrumentationConfig() } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *InstrumentationConfig) ValidateBasic() error { + if cfg.MaxOpenConnections < 0 { + return errors.New("max_open_connections can't be negative") + } + return nil +} + //----------------------------------------------------------------------------- // Utils diff --git a/config/config_test.go b/config/config_test.go index 6379960fae0..afdbed181c8 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -2,6 +2,7 @@ package config import ( "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -26,3 +27,12 @@ func TestDefaultConfig(t *testing.T) { assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir()) } + +func TestConfigValidateBasic(t *testing.T) { + cfg := DefaultConfig() + assert.NoError(t, cfg.ValidateBasic()) + + // tamper with timeout_propose + cfg.Consensus.TimeoutPropose = -10 * time.Second + assert.Error(t, cfg.ValidateBasic()) +} diff --git a/config/toml.go b/config/toml.go index 60ce15de84e..d73b9c81de2 100644 --- a/config/toml.go +++ b/config/toml.go @@ -77,11 +77,11 @@ moniker = "{{ .BaseConfig.Moniker }}" # and verifying their commits fast_sync = {{ .BaseConfig.FastSync }} -# Database backend: leveldb | memdb +# Database backend: leveldb | memdb | cleveldb db_backend = "{{ .BaseConfig.DBBackend }}" # Database directory -db_path = "{{ js .BaseConfig.DBPath }}" +db_dir = "{{ js .BaseConfig.DBPath }}" # Output level for logging, including package level options log_level = "{{ .BaseConfig.LogLevel }}" @@ -94,8 +94,12 @@ genesis_file = "{{ js .BaseConfig.Genesis }}" # Path to the JSON file containing the private key to use as a validator in the consensus protocol priv_validator_file = "{{ js .BaseConfig.PrivValidator }}" +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}" + # Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "{{ js .BaseConfig.NodeKey}}" +node_key_file = "{{ js .BaseConfig.NodeKey }}" # Mechanism to connect to the ABCI application: socket | grpc abci = "{{ .BaseConfig.ABCI }}" @@ -124,6 +128,8 @@ grpc_laddr = "{{ .RPC.GRPCListenAddress }}" # If you want to accept more significant number than the default, make sure # you increase your OS limits. # 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 grpc_max_open_connections = {{ .RPC.GRPCMaxOpenConnections }} # Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool @@ -134,6 +140,8 @@ unsafe = {{ .RPC.Unsafe }} # If you want to accept more significant number than the default, make sure # you increase your OS limits. # 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 max_open_connections = {{ .RPC.MaxOpenConnections }} ##### peer to peer configuration options ##### @@ -161,13 +169,17 @@ upnp = {{ .P2P.UPNP }} addr_book_file = "{{ js .P2P.AddrBook }}" # Set true for strict address routability rules +# Set false for private or local networks addr_book_strict = {{ .P2P.AddrBookStrict }} -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }} +# Maximum number of inbound peers +max_num_inbound_peers = {{ .P2P.MaxNumInboundPeers }} + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = {{ .P2P.MaxNumOutboundPeers }} -# Maximum number of peers to connect to -max_num_peers = {{ .P2P.MaxNumPeers }} +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "{{ .P2P.FlushThrottleTimeout }}" # Maximum size of a message packet payload, in bytes max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }} @@ -190,11 +202,17 @@ seed_mode = {{ .P2P.SeedMode }} # Comma separated list of peer IDs to keep private (will not be gossiped to other peers) private_peer_ids = "{{ .P2P.PrivatePeerIDs }}" +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = {{ .P2P.AllowDuplicateIP }} + +# Peer connection configuration. +handshake_timeout = "{{ .P2P.HandshakeTimeout }}" +dial_timeout = "{{ .P2P.DialTimeout }}" + ##### mempool configuration options ##### [mempool] recheck = {{ .Mempool.Recheck }} -recheck_empty = {{ .Mempool.RecheckEmpty }} broadcast = {{ .Mempool.Broadcast }} wal_dir = "{{ js .Mempool.WalPath }}" @@ -209,25 +227,24 @@ cache_size = {{ .Mempool.CacheSize }} wal_file = "{{ js .Consensus.WalPath }}" -# All timeouts are in milliseconds -timeout_propose = {{ .Consensus.TimeoutPropose }} -timeout_propose_delta = {{ .Consensus.TimeoutProposeDelta }} -timeout_prevote = {{ .Consensus.TimeoutPrevote }} -timeout_prevote_delta = {{ .Consensus.TimeoutPrevoteDelta }} -timeout_precommit = {{ .Consensus.TimeoutPrecommit }} -timeout_precommit_delta = {{ .Consensus.TimeoutPrecommitDelta }} -timeout_commit = {{ .Consensus.TimeoutCommit }} +timeout_propose = "{{ .Consensus.TimeoutPropose }}" +timeout_propose_delta = "{{ .Consensus.TimeoutProposeDelta }}" +timeout_prevote = "{{ .Consensus.TimeoutPrevote }}" +timeout_prevote_delta = "{{ .Consensus.TimeoutPrevoteDelta }}" +timeout_precommit = "{{ .Consensus.TimeoutPrecommit }}" +timeout_precommit_delta = "{{ .Consensus.TimeoutPrecommitDelta }}" +timeout_commit = "{{ .Consensus.TimeoutCommit }}" # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }} -# EmptyBlocks mode and possible interval between empty blocks in seconds +# EmptyBlocks mode and possible interval between empty blocks create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }} -create_empty_blocks_interval = {{ .Consensus.CreateEmptyBlocksInterval }} +create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}" -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = {{ .Consensus.PeerGossipSleepDuration }} -peer_query_maj23_sleep_duration = {{ .Consensus.PeerQueryMaj23SleepDuration }} +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}" +peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" ##### transactions indexer configuration options ##### [tx_index] @@ -239,16 +256,21 @@ peer_query_maj23_sleep_duration = {{ .Consensus.PeerQueryMaj23SleepDuration }} # 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). indexer = "{{ .TxIndex.Indexer }}" -# Comma-separated list of tags to index (by default the only tag is tx hash) +# Comma-separated list of tags to index (by default the only tag is "tx.hash") +# +# You can also index transactions by height by adding "tx.height" tag here. # # It's recommended to index only a subset of tags due to possible memory # bloat. This is, of course, depends on the indexer's DB and the volume of # transactions. index_tags = "{{ .TxIndex.IndexTags }}" -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). +# When set to true, tells indexer to index all tags (predefined tags: +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# +# Note this may be not desirable (see the comment above). IndexTags has a +# precedence over IndexAllTags (i.e. when given both, IndexTags will be +# indexed). index_all_tags = {{ .TxIndex.IndexAllTags }} ##### instrumentation configuration options ##### @@ -267,6 +289,9 @@ prometheus_listen_addr = "{{ .Instrumentation.PrometheusListenAddr }}" # you increase your OS limits. # 0 - unlimited. max_open_connections = {{ .Instrumentation.MaxOpenConnections }} + +# Instrumentation namespace +namespace = "{{ .Instrumentation.Namespace }}" ` /****** these are for test settings ***********/ @@ -317,7 +342,7 @@ func ResetTestRoot(testName string) *Config { } var testGenesis = `{ - "genesis_time": "0001-01-01T00:00:00.000Z", + "genesis_time": "2018-10-10T08:20:13.695936996Z", "chain_id": "tendermint_test", "validators": [ { diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 5360a92c945..6f46c04d39c 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -2,14 +2,15 @@ package consensus import ( "context" + "fmt" "sync" "testing" "time" "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) func init() { @@ -38,7 +39,13 @@ func TestByzantine(t *testing.T) { switches := make([]*p2p.Switch, N) p2pLogger := logger.With("module", "p2p") for i := 0; i < N; i++ { - switches[i] = p2p.NewSwitch(config.P2P) + switches[i] = p2p.MakeSwitch( + config.P2P, + i, + "foo", "1.0.0", + func(i int, sw *p2p.Switch) *p2p.Switch { + return sw + }) switches[i].SetLogger(p2pLogger.With("validator", i)) } @@ -156,8 +163,8 @@ func TestByzantine(t *testing.T) { case <-done: case <-tick.C: for i, reactor := range reactors { - t.Log(cmn.Fmt("Consensus Reactor %v", i)) - t.Log(cmn.Fmt("%v", reactor)) + t.Log(fmt.Sprintf("Consensus Reactor %v", i)) + t.Log(fmt.Sprintf("%v", reactor)) } t.Fatalf("Timed out waiting for all validators to commit first block") } @@ -172,16 +179,16 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *Cons // Create a new proposal block from state/txs from the mempool. block1, blockParts1 := cs.createProposalBlock() - polRound, polBlockID := cs.Votes.POLInfo() - proposal1 := types.NewProposal(height, round, blockParts1.Header(), polRound, polBlockID) + polRound, propBlockID := cs.ValidRound, types.BlockID{block1.Hash(), blockParts1.Header()} + proposal1 := types.NewProposal(height, round, polRound, propBlockID) if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal1); err != nil { t.Error(err) } // Create a new proposal block from state/txs from the mempool. block2, blockParts2 := cs.createProposalBlock() - polRound, polBlockID = cs.Votes.POLInfo() - proposal2 := types.NewProposal(height, round, blockParts2.Header(), polRound, polBlockID) + polRound, propBlockID = cs.ValidRound, types.BlockID{block2.Hash(), blockParts2.Header()} + proposal2 := types.NewProposal(height, round, polRound, propBlockID) if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal2); err != nil { t.Error(err) } @@ -219,8 +226,8 @@ func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p. // votes cs.mtx.Lock() - prevote, _ := cs.signVote(types.VoteTypePrevote, blockHash, parts.Header()) - precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header()) + prevote, _ := cs.signVote(types.PrevoteType, blockHash, parts.Header()) + precommit, _ := cs.signVote(types.PrecommitType, blockHash, parts.Header()) cs.mtx.Unlock() peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{prevote})) @@ -256,7 +263,7 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) { // Send our state to peer. // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). if !br.reactor.fastSync { - br.reactor.sendNewRoundStepMessages(peer) + br.reactor.sendNewRoundStepMessage(peer) } } func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) { diff --git a/consensus/common_test.go b/consensus/common_test.go index f4855992d80..4f48f44244a 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -7,6 +7,7 @@ import ( "io/ioutil" "os" "path" + "reflect" "sort" "sync" "testing" @@ -25,6 +26,7 @@ import ( "github.com/tendermint/tendermint/privval" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" @@ -37,8 +39,8 @@ const ( ) // genesis, chain_id, priv_val -var config *cfg.Config // NOTE: must be reset for each _test.go file -var ensureTimeout = time.Second * 1 // must be in seconds because CreateEmptyBlocksInterval is +var config *cfg.Config // NOTE: must be reset for each _test.go file +var ensureTimeout = time.Millisecond * 100 func ensureDir(dir string, mode os.FileMode) { if err := cmn.EnsureDir(dir, mode); err != nil { @@ -69,13 +71,13 @@ func NewValidatorStub(privValidator types.PrivValidator, valIndex int) *validato } } -func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) { +func (vs *validatorStub) signVote(voteType types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) { vote := &types.Vote{ ValidatorIndex: vs.Index, ValidatorAddress: vs.PrivValidator.GetAddress(), Height: vs.Height, Round: vs.Round, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), Type: voteType, BlockID: types.BlockID{hash, header}, } @@ -84,7 +86,7 @@ func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartS } // Sign vote for type/hash/header -func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSetHeader) *types.Vote { +func signVote(vs *validatorStub, voteType types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { v, err := vs.signVote(voteType, hash, header) if err != nil { panic(fmt.Errorf("failed to sign vote: %v", err)) @@ -92,7 +94,7 @@ func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSe return v } -func signVotes(voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote { +func signVotes(voteType types.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote { votes := make([]*types.Vote, len(vss)) for i, vs := range vss { votes[i] = signVote(vs, voteType, hash, header) @@ -128,8 +130,8 @@ func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round } // Make proposal - polRound, polBlockID := cs1.Votes.POLInfo() - proposal = types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID) + polRound, propBlockID := cs1.ValidRound, types.BlockID{block.Hash(), blockParts.Header()} + proposal = types.NewProposal(height, round, polRound, propBlockID) if err := vs.SignProposal(cs1.state.ChainID, proposal); err != nil { panic(err) } @@ -142,7 +144,7 @@ func addVotes(to *ConsensusState, votes ...*types.Vote) { } } -func signAddVotes(to *ConsensusState, voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) { +func signAddVotes(to *ConsensusState, voteType types.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub) { votes := signVotes(voteType, hash, header, vss...) addVotes(to, votes...) } @@ -305,23 +307,204 @@ func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) { //------------------------------------------------------------------------------- -func ensureNoNewStep(stepCh <-chan interface{}) { - timer := time.NewTimer(ensureTimeout) +func ensureNoNewEvent(ch <-chan interface{}, timeout time.Duration, + errorMessage string) { select { - case <-timer.C: + case <-time.After(timeout): break - case <-stepCh: - panic("We should be stuck waiting, not moving to the next step") + case <-ch: + panic(errorMessage) } } -func ensureNewStep(stepCh <-chan interface{}) { - timer := time.NewTimer(ensureTimeout) +func ensureNoNewEventOnChannel(ch <-chan interface{}) { + ensureNoNewEvent( + ch, + ensureTimeout, + "We should be stuck waiting, not receiving new event on the channel") +} + +func ensureNoNewRoundStep(stepCh <-chan interface{}) { + ensureNoNewEvent( + stepCh, + ensureTimeout, + "We should be stuck waiting, not receiving NewRoundStep event") +} + +func ensureNoNewUnlock(unlockCh <-chan interface{}) { + ensureNoNewEvent( + unlockCh, + ensureTimeout, + "We should be stuck waiting, not receiving Unlock event") +} + +func ensureNoNewTimeout(stepCh <-chan interface{}, timeout int64) { + timeoutDuration := time.Duration(timeout*5) * time.Nanosecond + ensureNoNewEvent( + stepCh, + timeoutDuration, + "We should be stuck waiting, not receiving NewTimeout event") +} + +func ensureNewEvent( + ch <-chan interface{}, + height int64, + round int, + timeout time.Duration, + errorMessage string) { + + select { + case <-time.After(timeout): + panic(errorMessage) + case ev := <-ch: + rs, ok := ev.(types.EventDataRoundState) + if !ok { + panic( + fmt.Sprintf( + "expected a EventDataRoundState, got %v.Wrong subscription channel?", + reflect.TypeOf(rs))) + } + if rs.Height != height { + panic(fmt.Sprintf("expected height %v, got %v", height, rs.Height)) + } + if rs.Round != round { + panic(fmt.Sprintf("expected round %v, got %v", round, rs.Round)) + } + // TODO: We could check also for a step at this point! + } +} + +func ensureNewRoundStep(stepCh <-chan interface{}, height int64, round int) { + ensureNewEvent( + stepCh, + height, + round, + ensureTimeout, + "Timeout expired while waiting for NewStep event") +} + +func ensureNewVote(voteCh <-chan interface{}, height int64, round int) { select { - case <-timer.C: - panic("We shouldnt be stuck waiting") - case <-stepCh: + case <-time.After(ensureTimeout): break + case v := <-voteCh: + edv, ok := v.(types.EventDataVote) + if !ok { + panic(fmt.Sprintf("expected a *types.Vote, "+ + "got %v. wrong subscription channel?", + reflect.TypeOf(v))) + } + vote := edv.Vote + if vote.Height != height { + panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height)) + } + if vote.Round != round { + panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round)) + } + } +} + +func ensureNewRound(roundCh <-chan interface{}, height int64, round int) { + ensureNewEvent(roundCh, height, round, ensureTimeout, + "Timeout expired while waiting for NewRound event") +} + +func ensureNewTimeout(timeoutCh <-chan interface{}, height int64, round int, timeout int64) { + timeoutDuration := time.Duration(timeout*3) * time.Nanosecond + ensureNewEvent(timeoutCh, height, round, timeoutDuration, + "Timeout expired while waiting for NewTimeout event") +} + +func ensureNewProposal(proposalCh <-chan interface{}, height int64, round int) { + ensureNewEvent(proposalCh, height, round, ensureTimeout, + "Timeout expired while waiting for NewProposal event") +} + +func ensureNewValidBlock(validBlockCh <-chan interface{}, height int64, round int) { + ensureNewEvent(validBlockCh, height, round, ensureTimeout, + "Timeout expired while waiting for NewValidBlock event") +} + +func ensureNewBlock(blockCh <-chan interface{}, height int64) { + select { + case <-time.After(ensureTimeout): + panic("Timeout expired while waiting for NewBlock event") + case ev := <-blockCh: + block, ok := ev.(types.EventDataNewBlock) + if !ok { + panic(fmt.Sprintf("expected a *types.EventDataNewBlock, "+ + "got %v. wrong subscription channel?", + reflect.TypeOf(block))) + } + if block.Block.Height != height { + panic(fmt.Sprintf("expected height %v, got %v", height, block.Block.Height)) + } + } +} + +func ensureNewBlockHeader(blockCh <-chan interface{}, height int64, blockHash cmn.HexBytes) { + select { + case <-time.After(ensureTimeout): + panic("Timeout expired while waiting for NewBlockHeader event") + case ev := <-blockCh: + blockHeader, ok := ev.(types.EventDataNewBlockHeader) + if !ok { + panic(fmt.Sprintf("expected a *types.EventDataNewBlockHeader, "+ + "got %v. wrong subscription channel?", + reflect.TypeOf(blockHeader))) + } + if blockHeader.Header.Height != height { + panic(fmt.Sprintf("expected height %v, got %v", height, blockHeader.Header.Height)) + } + if !bytes.Equal(blockHeader.Header.Hash(), blockHash) { + panic(fmt.Sprintf("expected header %X, got %X", blockHash, blockHeader.Header.Hash())) + } + } +} + +func ensureNewUnlock(unlockCh <-chan interface{}, height int64, round int) { + ensureNewEvent(unlockCh, height, round, ensureTimeout, + "Timeout expired while waiting for NewUnlock event") +} + +func ensureVote(voteCh <-chan interface{}, height int64, round int, + voteType types.SignedMsgType) { + select { + case <-time.After(ensureTimeout): + panic("Timeout expired while waiting for NewVote event") + case v := <-voteCh: + edv, ok := v.(types.EventDataVote) + if !ok { + panic(fmt.Sprintf("expected a *types.Vote, "+ + "got %v. wrong subscription channel?", + reflect.TypeOf(v))) + } + vote := edv.Vote + if vote.Height != height { + panic(fmt.Sprintf("expected height %v, got %v", height, vote.Height)) + } + if vote.Round != round { + panic(fmt.Sprintf("expected round %v, got %v", round, vote.Round)) + } + if vote.Type != voteType { + panic(fmt.Sprintf("expected type %v, got %v", voteType, vote.Type)) + } + } +} + +func ensurePrecommit(voteCh <-chan interface{}, height int64, round int) { + ensureVote(voteCh, height, round, types.PrecommitType) +} + +func ensurePrevote(voteCh <-chan interface{}, height int64, round int) { + ensureVote(voteCh, height, round, types.PrevoteType) +} + +func ensureNewEventOnChannel(ch <-chan interface{}) { + select { + case <-time.After(ensureTimeout): + panic("Timeout expired while waiting for new activity on the channel") + case <-ch: } } @@ -348,13 +531,13 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou for i := 0; i < nValidators; i++ { stateDB := dbm.NewMemDB() // each state needs its own db state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) - thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) + thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) for _, opt := range configOpts { opt(thisConfig) } ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal app := appFunc() - vals := types.TM2PB.Validators(state.Validators) + vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app) @@ -372,7 +555,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF for i := 0; i < nPeers; i++ { stateDB := dbm.NewMemDB() // each state needs its own db state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) - thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) + thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal var privVal types.PrivValidator if i < nValidators { @@ -386,7 +569,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF } app := appFunc() - vals := types.TM2PB.Validators(state.Validators) + vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app) @@ -398,7 +581,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int { for i, s := range switches { - if peer.NodeInfo().ID == s.NodeInfo().ID { + if peer.NodeInfo().ID() == s.NodeInfo().ID() { return i } } @@ -423,7 +606,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G sort.Sort(types.PrivValidatorsByAddress(privValidators)) return &types.GenesisDoc{ - GenesisTime: time.Now(), + GenesisTime: tmtime.Now(), ChainID: config.ChainID(), Validators: validators, }, privValidators @@ -432,8 +615,6 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) { genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower) s0, _ := sm.MakeGenesisState(genDoc) - db := dbm.NewMemDB() // remove this ? - sm.SaveState(db, s0) return s0, privValidators } diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index a811de731b0..3dc1cd5ff43 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -10,8 +10,6 @@ import ( "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/types" ) @@ -29,17 +27,17 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) startTestRound(cs, height, round) - ensureNewStep(newBlockCh) // first block gets committed - ensureNoNewStep(newBlockCh) + ensureNewEventOnChannel(newBlockCh) // first block gets committed + ensureNoNewEventOnChannel(newBlockCh) deliverTxsRange(cs, 0, 1) - ensureNewStep(newBlockCh) // commit txs - ensureNewStep(newBlockCh) // commit updated app hash - ensureNoNewStep(newBlockCh) + ensureNewEventOnChannel(newBlockCh) // commit txs + ensureNewEventOnChannel(newBlockCh) // commit updated app hash + ensureNoNewEventOnChannel(newBlockCh) } func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { config := ResetConfig("consensus_mempool_txs_available_test") - config.Consensus.CreateEmptyBlocksInterval = int(ensureTimeout.Seconds()) + config.Consensus.CreateEmptyBlocksInterval = ensureTimeout state, privVals := randGenesisState(1, false, 10) cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication()) cs.mempool.EnableTxsAvailable() @@ -47,9 +45,9 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) startTestRound(cs, height, round) - ensureNewStep(newBlockCh) // first block gets committed - ensureNoNewStep(newBlockCh) // then we dont make a block ... - ensureNewStep(newBlockCh) // until the CreateEmptyBlocksInterval has passed + ensureNewEventOnChannel(newBlockCh) // first block gets committed + ensureNoNewEventOnChannel(newBlockCh) // then we dont make a block ... + ensureNewEventOnChannel(newBlockCh) // until the CreateEmptyBlocksInterval has passed } func TestMempoolProgressInHigherRound(t *testing.T) { @@ -73,13 +71,19 @@ func TestMempoolProgressInHigherRound(t *testing.T) { } startTestRound(cs, height, round) - ensureNewStep(newRoundCh) // first round at first height - ensureNewStep(newBlockCh) // first block gets committed - ensureNewStep(newRoundCh) // first round at next height - deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round - <-timeoutCh - ensureNewStep(newRoundCh) // wait for the next round - ensureNewStep(newBlockCh) // now we can commit the block + ensureNewRoundStep(newRoundCh, height, round) // first round at first height + ensureNewEventOnChannel(newBlockCh) // first block gets committed + + height = height + 1 // moving to the next height + round = 0 + + ensureNewRoundStep(newRoundCh, height, round) // first round at next height + deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round + ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) + + round = round + 1 // moving to the next round + ensureNewRoundStep(newRoundCh, height, round) // wait for the next round + ensureNewEventOnChannel(newBlockCh) // now we can commit the block } func deliverTxsRange(cs *ConsensusState, start, end int) { @@ -89,7 +93,7 @@ func deliverTxsRange(cs *ConsensusState, start, end int) { binary.BigEndian.PutUint64(txBytes, uint64(i)) err := cs.mempool.CheckTx(txBytes, nil) if err != nil { - panic(cmn.Fmt("Error after CheckTx: %v", err)) + panic(fmt.Sprintf("Error after CheckTx: %v", err)) } } } @@ -100,7 +104,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { height, round := cs.Height, cs.Round newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - NTxs := 10000 + NTxs := 3000 go deliverTxsRange(cs, 0, NTxs) startTestRound(cs, height, round) @@ -126,7 +130,7 @@ func TestMempoolRmBadTx(t *testing.T) { binary.BigEndian.PutUint64(txBytes, uint64(0)) resDeliver := app.DeliverTx(txBytes) - assert.False(t, resDeliver.IsErr(), cmn.Fmt("expected no error. got %v", resDeliver)) + assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver)) resCommit := app.Commit() assert.True(t, len(resCommit.Data) > 0) @@ -149,7 +153,7 @@ func TestMempoolRmBadTx(t *testing.T) { // check for the tx for { - txs := cs.mempool.Reap(1) + txs := cs.mempool.ReapMaxBytesMaxGas(int64(len(txBytes)), -1) if len(txs) == 0 { emptyMempoolCh <- struct{}{} return @@ -190,7 +194,7 @@ func NewCounterApplication() *CounterApplication { } func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo { - return abci.ResponseInfo{Data: cmn.Fmt("txs:%v", app.txCount)} + return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)} } func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx { diff --git a/consensus/metrics.go b/consensus/metrics.go index 253880e84e4..7b4a3fbc996 100644 --- a/consensus/metrics.go +++ b/consensus/metrics.go @@ -8,6 +8,8 @@ import ( stdprometheus "github.com/prometheus/client_golang/prometheus" ) +const MetricsSubsystem = "consensus" + // Metrics contains metrics exposed by this package. type Metrics struct { // Height of the chain. @@ -30,7 +32,7 @@ type Metrics struct { ByzantineValidatorsPower metrics.Gauge // Time between this and the last block. - BlockIntervalSeconds metrics.Histogram + BlockIntervalSeconds metrics.Gauge // Number of transactions. NumTxs metrics.Gauge @@ -38,75 +40,111 @@ type Metrics struct { BlockSizeBytes metrics.Gauge // Total number of transactions. TotalTxs metrics.Gauge + // The latest block height. + CommittedHeight metrics.Gauge + // Whether or not a node is fast syncing. 1 if yes, 0 if no. + FastSyncing metrics.Gauge + + // Number of blockparts transmitted by peer. + BlockParts metrics.Counter } // PrometheusMetrics returns Metrics build using Prometheus client library. -func PrometheusMetrics() *Metrics { +func PrometheusMetrics(namespace string) *Metrics { return &Metrics{ Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "height", Help: "Height of the chain.", }, []string{}), Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "rounds", Help: "Number of rounds.", }, []string{}), Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "validators", Help: "Number of validators.", }, []string{}), ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "validators_power", Help: "Total power of all validators.", }, []string{}), MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "missing_validators", Help: "Number of validators who did not sign.", }, []string{}), MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "missing_validators_power", Help: "Total power of the missing validators.", }, []string{}), ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "byzantine_validators", Help: "Number of validators who tried to double sign.", }, []string{}), ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "byzantine_validators_power", Help: "Total power of the byzantine validators.", }, []string{}), - BlockIntervalSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Subsystem: "consensus", + BlockIntervalSeconds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "block_interval_seconds", Help: "Time between this and the last block.", - Buckets: []float64{1, 2.5, 5, 10, 60}, }, []string{}), NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "num_txs", Help: "Number of transactions.", }, []string{}), BlockSizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "block_size_bytes", Help: "Size of the block.", }, []string{}), TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "total_txs", Help: "Total number of transactions.", }, []string{}), + CommittedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "latest_block_height", + Help: "The latest block height.", + }, []string{}), + FastSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "fast_syncing", + Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.", + }, []string{}), + BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_parts", + Help: "Number of blockparts transmitted by peer.", + }, []string{"peer_id"}), } } @@ -124,10 +162,13 @@ func NopMetrics() *Metrics { ByzantineValidators: discard.NewGauge(), ByzantineValidatorsPower: discard.NewGauge(), - BlockIntervalSeconds: discard.NewHistogram(), + BlockIntervalSeconds: discard.NewGauge(), - NumTxs: discard.NewGauge(), - BlockSizeBytes: discard.NewGauge(), - TotalTxs: discard.NewGauge(), + NumTxs: discard.NewGauge(), + BlockSizeBytes: discard.NewGauge(), + TotalTxs: discard.NewGauge(), + CommittedHeight: discard.NewGauge(), + FastSyncing: discard.NewGauge(), + BlockParts: discard.NewCounter(), } } diff --git a/consensus/reactor.go b/consensus/reactor.go index 58ff42ae2b7..12e8e0f1496 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -9,7 +9,6 @@ import ( "github.com/pkg/errors" amino "github.com/tendermint/go-amino" - cstypes "github.com/tendermint/tendermint/consensus/types" cmn "github.com/tendermint/tendermint/libs/common" tmevents "github.com/tendermint/tendermint/libs/events" @@ -17,6 +16,7 @@ import ( "github.com/tendermint/tendermint/p2p" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) const ( @@ -28,6 +28,7 @@ const ( maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes. blocksToContributeToBecomeGoodPeer = 10000 + votesToContributeToBecomeGoodPeer = 10000 ) //----------------------------------------------------------------------------- @@ -41,16 +42,27 @@ type ConsensusReactor struct { mtx sync.RWMutex fastSync bool eventBus *types.EventBus + + metrics *Metrics } +type ReactorOption func(*ConsensusReactor) + // NewConsensusReactor returns a new ConsensusReactor with the given // consensusState. -func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *ConsensusReactor { +func NewConsensusReactor(consensusState *ConsensusState, fastSync bool, options ...ReactorOption) *ConsensusReactor { conR := &ConsensusReactor{ conS: consensusState, fastSync: fastSync, + metrics: NopMetrics(), } + conR.updateFastSyncingMetric() conR.BaseReactor = *p2p.NewBaseReactor("ConsensusReactor", conR) + + for _, option := range options { + option(conR) + } + return conR } @@ -59,6 +71,9 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *Consens func (conR *ConsensusReactor) OnStart() error { conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync()) + // start routine that computes peer statistics for evaluating peer quality + go conR.peerStatsRoutine() + conR.subscribeToBroadcastEvents() if !conR.FastSync() { @@ -93,6 +108,7 @@ func (conR *ConsensusReactor) SwitchToConsensus(state sm.State, blocksSynced int conR.mtx.Lock() conR.fastSync = false conR.mtx.Unlock() + conR.metrics.FastSyncing.Set(0) if blocksSynced > 0 { // dont bother with the WAL if we fast synced @@ -157,7 +173,7 @@ func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) { // Send our state to peer. // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). if !conR.FastSync() { - conR.sendNewRoundStepMessages(peer) + conR.sendNewRoundStepMessage(peer) } } @@ -188,6 +204,13 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) conR.Switch.StopPeerForError(src, err) return } + + if err = msg.ValidateBasic(); err != nil { + conR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) + conR.Switch.StopPeerForError(src, err) + return + } + conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) // Get peer states @@ -198,8 +221,8 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) switch msg := msg.(type) { case *NewRoundStepMessage: ps.ApplyNewRoundStepMessage(msg) - case *CommitStepMessage: - ps.ApplyCommitStepMessage(msg) + case *NewValidBlockMessage: + ps.ApplyNewValidBlockMessage(msg) case *HasVoteMessage: ps.ApplyHasVoteMessage(msg) case *VoteSetMaj23Message: @@ -220,13 +243,12 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) // (and consequently shows which we don't have) var ourVotes *cmn.BitArray switch msg.Type { - case types.VoteTypePrevote: + case types.PrevoteType: ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) - case types.VoteTypePrecommit: + case types.PrecommitType: ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) default: - conR.Logger.Error("Bad VoteSetBitsMessage field Type") - return + panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") } src.TrySend(VoteSetBitsChannel, cdc.MustMarshalBinaryBare(&VoteSetBitsMessage{ Height: msg.Height, @@ -241,7 +263,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) "height", hb.Height, "round", hb.Round, "sequence", hb.Sequence, "valIdx", hb.ValidatorIndex, "valAddr", hb.ValidatorAddress) default: - conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } case DataChannel: @@ -257,12 +279,10 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) ps.ApplyProposalPOLMessage(msg) case *BlockPartMessage: ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index) - if numBlocks := ps.RecordBlockPart(msg); numBlocks%blocksToContributeToBecomeGoodPeer == 0 { - conR.Switch.MarkPeerAsGood(src) - } + conR.metrics.BlockParts.With("peer_id", string(src.ID())).Add(1) conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} default: - conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } case VoteChannel: @@ -279,15 +299,12 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) ps.EnsureVoteBitArrays(height, valSize) ps.EnsureVoteBitArrays(height-1, lastCommitSize) ps.SetHasVote(msg.Vote) - if blocks := ps.RecordVote(msg.Vote); blocks%blocksToContributeToBecomeGoodPeer == 0 { - conR.Switch.MarkPeerAsGood(src) - } cs.peerMsgQueue <- msgInfo{msg, src.ID()} default: // don't punish (leave room for soft upgrades) - conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } case VoteSetBitsChannel: @@ -305,13 +322,12 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) if height == msg.Height { var ourVotes *cmn.BitArray switch msg.Type { - case types.VoteTypePrevote: + case types.PrevoteType: ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) - case types.VoteTypePrecommit: + case types.PrecommitType: ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) default: - conR.Logger.Error("Bad VoteSetBitsMessage field Type") - return + panic("Bad VoteSetBitsMessage field Type. Forgot to add a check in ValidateBasic?") } ps.ApplyVoteSetBitsMessage(msg, ourVotes) } else { @@ -319,11 +335,11 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) } default: // don't punish (leave room for soft upgrades) - conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } default: - conR.Logger.Error(cmn.Fmt("Unknown chId %X", chID)) + conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID)) } if err != nil { @@ -353,7 +369,12 @@ func (conR *ConsensusReactor) subscribeToBroadcastEvents() { const subscriber = "consensus-reactor" conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep, func(data tmevents.EventData) { - conR.broadcastNewRoundStepMessages(data.(*cstypes.RoundState)) + conR.broadcastNewRoundStepMessage(data.(*cstypes.RoundState)) + }) + + conR.conS.evsw.AddListenerForEvent(subscriber, types.EventValidBlock, + func(data tmevents.EventData) { + conR.broadcastNewValidBlockMessage(data.(*cstypes.RoundState)) }) conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote, @@ -379,14 +400,20 @@ func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(hb *types.Heartb conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg)) } -func (conR *ConsensusReactor) broadcastNewRoundStepMessages(rs *cstypes.RoundState) { - nrsMsg, csMsg := makeRoundStepMessages(rs) - if nrsMsg != nil { - conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) - } - if csMsg != nil { - conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(csMsg)) +func (conR *ConsensusReactor) broadcastNewRoundStepMessage(rs *cstypes.RoundState) { + nrsMsg := makeRoundStepMessage(rs) + conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) +} + +func (conR *ConsensusReactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { + csMsg := &NewValidBlockMessage{ + Height: rs.Height, + Round: rs.Round, + BlockPartsHeader: rs.ProposalBlockParts.Header(), + BlockParts: rs.ProposalBlockParts.BitArray(), + IsCommit: rs.Step == cstypes.RoundStepCommit, } + conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(csMsg)) } // Broadcasts HasVoteMessage to peers that care. @@ -415,7 +442,7 @@ func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) { */ } -func makeRoundStepMessages(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage, csMsg *CommitStepMessage) { +func makeRoundStepMessage(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage) { nrsMsg = &NewRoundStepMessage{ Height: rs.Height, Round: rs.Round, @@ -423,25 +450,13 @@ func makeRoundStepMessages(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage, SecondsSinceStartTime: int(time.Since(rs.StartTime).Seconds()), LastCommitRound: rs.LastCommit.Round(), } - if rs.Step == cstypes.RoundStepCommit { - csMsg = &CommitStepMessage{ - Height: rs.Height, - BlockPartsHeader: rs.ProposalBlockParts.Header(), - BlockParts: rs.ProposalBlockParts.BitArray(), - } - } return } -func (conR *ConsensusReactor) sendNewRoundStepMessages(peer p2p.Peer) { +func (conR *ConsensusReactor) sendNewRoundStepMessage(peer p2p.Peer) { rs := conR.conS.GetRoundState() - nrsMsg, csMsg := makeRoundStepMessages(rs) - if nrsMsg != nil { - peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) - } - if csMsg != nil { - peer.Send(StateChannel, cdc.MustMarshalBinaryBare(csMsg)) - } + nrsMsg := makeRoundStepMessage(rs) + peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) } func (conR *ConsensusReactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) { @@ -482,7 +497,7 @@ OUTER_LOOP: if prs.ProposalBlockParts == nil { blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) if blockMeta == nil { - cmn.PanicCrisis(cmn.Fmt("Failed to load block %d when blockStore is at %d", + cmn.PanicCrisis(fmt.Sprintf("Failed to load block %d when blockStore is at %d", prs.Height, conR.conS.blockStore.Height())) } ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader) @@ -496,7 +511,7 @@ OUTER_LOOP: // If height and round don't match, sleep. if (rs.Height != prs.Height) || (rs.Round != prs.Round) { //logger.Info("Peer Height|Round mismatch, sleeping", "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer) - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) continue OUTER_LOOP } @@ -512,6 +527,7 @@ OUTER_LOOP: msg := &ProposalMessage{Proposal: rs.Proposal} logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) { + // NOTE[ZM]: A peer might have received different proposal msg so this Proposal msg will be rejected! ps.SetHasProposal(rs.Proposal) } } @@ -532,7 +548,7 @@ OUTER_LOOP: } // Nothing to do. Sleep. - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) continue OUTER_LOOP } } @@ -546,12 +562,12 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype if blockMeta == nil { logger.Error("Failed to load block meta", "ourHeight", rs.Height, "blockstoreHeight", conR.conS.blockStore.Height()) - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) return } else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) { logger.Info("Peer ProposalBlockPartsHeader mismatch, sleeping", "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) return } // Load the part @@ -559,7 +575,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype if part == nil { logger.Error("Could not load part", "index", index, "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) return } // Send the part @@ -577,7 +593,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype return } //logger.Info("No parts to send in catch-up, sleeping") - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) } func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) { @@ -646,7 +662,7 @@ OUTER_LOOP: sleeping = 1 } - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) continue OUTER_LOOP } } @@ -727,10 +743,10 @@ OUTER_LOOP: peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ Height: prs.Height, Round: prs.Round, - Type: types.VoteTypePrevote, + Type: types.PrevoteType, BlockID: maj23, })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } } @@ -744,10 +760,10 @@ OUTER_LOOP: peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ Height: prs.Height, Round: prs.Round, - Type: types.VoteTypePrecommit, + Type: types.PrecommitType, BlockID: maj23, })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } } @@ -761,10 +777,10 @@ OUTER_LOOP: peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ Height: prs.Height, Round: prs.ProposalPOLRound, - Type: types.VoteTypePrevote, + Type: types.PrevoteType, BlockID: maj23, })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } } @@ -780,19 +796,56 @@ OUTER_LOOP: peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ Height: prs.Height, Round: commit.Round(), - Type: types.VoteTypePrecommit, + Type: types.PrecommitType, BlockID: commit.BlockID, })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) continue OUTER_LOOP } } +func (conR *ConsensusReactor) peerStatsRoutine() { + for { + if !conR.IsRunning() { + conR.Logger.Info("Stopping peerStatsRoutine") + return + } + + select { + case msg := <-conR.conS.statsMsgQueue: + // Get peer + peer := conR.Switch.Peers().Get(msg.PeerID) + if peer == nil { + conR.Logger.Debug("Attempt to update stats for non-existent peer", + "peer", msg.PeerID) + continue + } + // Get peer state + ps := peer.Get(types.PeerStateKey).(*PeerState) + switch msg.Msg.(type) { + case *VoteMessage: + if numVotes := ps.RecordVote(); numVotes%votesToContributeToBecomeGoodPeer == 0 { + conR.Switch.MarkPeerAsGood(peer) + } + case *BlockPartMessage: + if numParts := ps.RecordBlockPart(); numParts%blocksToContributeToBecomeGoodPeer == 0 { + conR.Switch.MarkPeerAsGood(peer) + } + } + case <-conR.conS.Quit(): + return + + case <-conR.Quit(): + return + } + } +} + // String returns a string representation of the ConsensusReactor. // NOTE: For now, it is just a hard-coded string to avoid accessing unprotected shared variables. // TODO: improve! @@ -813,6 +866,21 @@ func (conR *ConsensusReactor) StringIndented(indent string) string { return s } +func (conR *ConsensusReactor) updateFastSyncingMetric() { + var fastSyncing float64 + if conR.fastSync { + fastSyncing = 1 + } else { + fastSyncing = 0 + } + conR.metrics.FastSyncing.Set(fastSyncing) +} + +// ReactorMetrics sets the metrics +func ReactorMetrics(metrics *Metrics) ReactorOption { + return func(conR *ConsensusReactor) { conR.metrics = metrics } +} + //----------------------------------------------------------------------------- var ( @@ -835,15 +903,13 @@ type PeerState struct { // peerStateStats holds internal statistics for a peer. type peerStateStats struct { - LastVoteHeight int64 `json:"last_vote_height"` - Votes int `json:"votes"` - LastBlockPartHeight int64 `json:"last_block_part_height"` - BlockParts int `json:"block_parts"` + Votes int `json:"votes"` + BlockParts int `json:"block_parts"` } func (pss peerStateStats) String() string { - return fmt.Sprintf("peerStateStats{lvh: %d, votes: %d, lbph: %d, blockParts: %d}", - pss.LastVoteHeight, pss.Votes, pss.LastBlockPartHeight, pss.BlockParts) + return fmt.Sprintf("peerStateStats{votes: %d, blockParts: %d}", + pss.Votes, pss.BlockParts) } // NewPeerState returns a new PeerState for the given Peer @@ -902,13 +968,20 @@ func (ps *PeerState) SetHasProposal(proposal *types.Proposal) { if ps.PRS.Height != proposal.Height || ps.PRS.Round != proposal.Round { return } + if ps.PRS.Proposal { return } ps.PRS.Proposal = true - ps.PRS.ProposalBlockPartsHeader = proposal.BlockPartsHeader - ps.PRS.ProposalBlockParts = cmn.NewBitArray(proposal.BlockPartsHeader.Total) + + // ps.PRS.ProposalBlockParts is set due to NewValidBlockMessage + if ps.PRS.ProposalBlockParts != nil { + return + } + + ps.PRS.ProposalBlockPartsHeader = proposal.BlockID.PartsHeader + ps.PRS.ProposalBlockParts = cmn.NewBitArray(proposal.BlockID.PartsHeader.Total) ps.PRS.ProposalPOLRound = proposal.POLRound ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received. } @@ -944,7 +1017,11 @@ func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { if vote, ok := ps.PickVoteToSend(votes); ok { msg := &VoteMessage{vote} ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) - return ps.peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(msg)) + if ps.peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(msg)) { + ps.SetHasVote(vote) + return true + } + return false } return false } @@ -960,7 +1037,7 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote return nil, false } - height, round, type_, size := votes.Height(), votes.Round(), votes.Type(), votes.Size() + height, round, type_, size := votes.Height(), votes.Round(), types.SignedMsgType(votes.Type()), votes.Size() // Lazily set data using 'votes'. if votes.IsCommit() { @@ -973,13 +1050,12 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote return nil, false // Not something worth sending } if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok { - ps.setHasVote(height, round, type_, index) return votes.GetByIndex(index), true } return nil, false } -func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.BitArray { +func (ps *PeerState) getVoteBitArray(height int64, round int, type_ types.SignedMsgType) *cmn.BitArray { if !types.IsVoteTypeValid(type_) { return nil } @@ -987,25 +1063,25 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.B if ps.PRS.Height == height { if ps.PRS.Round == round { switch type_ { - case types.VoteTypePrevote: + case types.PrevoteType: return ps.PRS.Prevotes - case types.VoteTypePrecommit: + case types.PrecommitType: return ps.PRS.Precommits } } if ps.PRS.CatchupCommitRound == round { switch type_ { - case types.VoteTypePrevote: + case types.PrevoteType: return nil - case types.VoteTypePrecommit: + case types.PrecommitType: return ps.PRS.CatchupCommit } } if ps.PRS.ProposalPOLRound == round { switch type_ { - case types.VoteTypePrevote: + case types.PrevoteType: return ps.PRS.ProposalPOL - case types.VoteTypePrecommit: + case types.PrecommitType: return nil } } @@ -1014,9 +1090,9 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.B if ps.PRS.Height == height+1 { if ps.PRS.LastCommitRound == round { switch type_ { - case types.VoteTypePrevote: + case types.PrevoteType: return nil - case types.VoteTypePrecommit: + case types.PrecommitType: return ps.PRS.LastCommit } } @@ -1034,7 +1110,7 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida NOTE: This is wrong, 'round' could change. e.g. if orig round is not the same as block LastCommit round. if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round { - cmn.PanicSanity(cmn.Fmt("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round)) + cmn.PanicSanity(fmt.Sprintf("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round)) } */ if ps.PRS.CatchupCommitRound == round { @@ -1079,18 +1155,14 @@ func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) { } } -// RecordVote updates internal statistics for this peer by recording the vote. -// It returns the total number of votes (1 per block). This essentially means -// the number of blocks for which peer has been sending us votes. -func (ps *PeerState) RecordVote(vote *types.Vote) int { +// RecordVote increments internal votes related statistics for this peer. +// It returns the total number of added votes. +func (ps *PeerState) RecordVote() int { ps.mtx.Lock() defer ps.mtx.Unlock() - if ps.Stats.LastVoteHeight >= vote.Height { - return ps.Stats.Votes - } - ps.Stats.LastVoteHeight = vote.Height ps.Stats.Votes++ + return ps.Stats.Votes } @@ -1103,25 +1175,17 @@ func (ps *PeerState) VotesSent() int { return ps.Stats.Votes } -// RecordBlockPart updates internal statistics for this peer by recording the -// block part. It returns the total number of block parts (1 per block). This -// essentially means the number of blocks for which peer has been sending us -// block parts. -func (ps *PeerState) RecordBlockPart(bp *BlockPartMessage) int { +// RecordBlockPart increments internal block part related statistics for this peer. +// It returns the total number of added block parts. +func (ps *PeerState) RecordBlockPart() int { ps.mtx.Lock() defer ps.mtx.Unlock() - if ps.Stats.LastBlockPartHeight >= bp.Height { - return ps.Stats.BlockParts - } - - ps.Stats.LastBlockPartHeight = bp.Height ps.Stats.BlockParts++ return ps.Stats.BlockParts } -// BlockPartsSent returns the number of blocks for which peer has been sending -// us block parts. +// BlockPartsSent returns the number of useful block parts the peer has sent us. func (ps *PeerState) BlockPartsSent() int { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -1137,8 +1201,8 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) { ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex) } -func (ps *PeerState) setHasVote(height int64, round int, type_ byte, index int) { - logger := ps.logger.With("peerH/R", cmn.Fmt("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", cmn.Fmt("%d/%d", height, round)) +func (ps *PeerState) setHasVote(height int64, round int, type_ types.SignedMsgType, index int) { + logger := ps.logger.With("peerH/R", fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", fmt.Sprintf("%d/%d", height, round)) logger.Debug("setHasVote", "type", type_, "index", index) // NOTE: some may be nil BitArrays -> no side effects. @@ -1161,11 +1225,10 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) { // Just remember these values. psHeight := ps.PRS.Height psRound := ps.PRS.Round - //psStep := ps.PRS.Step psCatchupCommitRound := ps.PRS.CatchupCommitRound psCatchupCommit := ps.PRS.CatchupCommit - startTime := time.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second) + startTime := tmtime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second) ps.PRS.Height = msg.Height ps.PRS.Round = msg.Round ps.PRS.Step = msg.Step @@ -1202,8 +1265,8 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) { } } -// ApplyCommitStepMessage updates the peer state for the new commit. -func (ps *PeerState) ApplyCommitStepMessage(msg *CommitStepMessage) { +// ApplyNewValidBlockMessage updates the peer state for the new valid block. +func (ps *PeerState) ApplyNewValidBlockMessage(msg *NewValidBlockMessage) { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -1211,6 +1274,10 @@ func (ps *PeerState) ApplyCommitStepMessage(msg *CommitStepMessage) { return } + if ps.PRS.Round != msg.Round && !msg.IsCommit { + return + } + ps.PRS.ProposalBlockPartsHeader = msg.BlockPartsHeader ps.PRS.ProposalBlockParts = msg.BlockParts } @@ -1289,12 +1356,14 @@ func (ps *PeerState) StringIndented(indent string) string { // Messages // ConsensusMessage is a message that can be sent and received on the ConsensusReactor -type ConsensusMessage interface{} +type ConsensusMessage interface { + ValidateBasic() error +} func RegisterConsensusMessages(cdc *amino.Codec) { cdc.RegisterInterface((*ConsensusMessage)(nil), nil) cdc.RegisterConcrete(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage", nil) - cdc.RegisterConcrete(&CommitStepMessage{}, "tendermint/CommitStep", nil) + cdc.RegisterConcrete(&NewValidBlockMessage{}, "tendermint/NewValidBlockMessage", nil) cdc.RegisterConcrete(&ProposalMessage{}, "tendermint/Proposal", nil) cdc.RegisterConcrete(&ProposalPOLMessage{}, "tendermint/ProposalPOL", nil) cdc.RegisterConcrete(&BlockPartMessage{}, "tendermint/BlockPart", nil) @@ -1325,6 +1394,27 @@ type NewRoundStepMessage struct { LastCommitRound int } +// ValidateBasic performs basic validation. +func (m *NewRoundStepMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + if !m.Step.IsValid() { + return errors.New("Invalid Step") + } + + // NOTE: SecondsSinceStartTime may be negative + + if (m.Height == 1 && m.LastCommitRound != -1) || + (m.Height > 1 && m.LastCommitRound < -1) { // TODO: #2737 LastCommitRound should always be >= 0 for heights > 1 + return errors.New("Invalid LastCommitRound (for 1st block: -1, for others: >= 0)") + } + return nil +} + // String returns a string representation. func (m *NewRoundStepMessage) String() string { return fmt.Sprintf("[NewRoundStep H:%v R:%v S:%v LCR:%v]", @@ -1333,16 +1423,40 @@ func (m *NewRoundStepMessage) String() string { //------------------------------------- -// CommitStepMessage is sent when a block is committed. -type CommitStepMessage struct { +// NewValidBlockMessage is sent when a validator observes a valid block B in some round r, +//i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +// In case the block is also committed, then IsCommit flag is set to true. +type NewValidBlockMessage struct { Height int64 + Round int BlockPartsHeader types.PartSetHeader BlockParts *cmn.BitArray + IsCommit bool +} + +// ValidateBasic performs basic validation. +func (m *NewValidBlockMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + if err := m.BlockPartsHeader.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong BlockPartsHeader: %v", err) + } + if m.BlockParts.Size() != m.BlockPartsHeader.Total { + return fmt.Errorf("BlockParts bit array size %d not equal to BlockPartsHeader.Total %d", + m.BlockParts.Size(), + m.BlockPartsHeader.Total) + } + return nil } // String returns a string representation. -func (m *CommitStepMessage) String() string { - return fmt.Sprintf("[CommitStep H:%v BP:%v BA:%v]", m.Height, m.BlockPartsHeader, m.BlockParts) +func (m *NewValidBlockMessage) String() string { + return fmt.Sprintf("[ValidBlockMessage H:%v R:%v BP:%v BA:%v IsCommit:%v]", + m.Height, m.Round, m.BlockPartsHeader, m.BlockParts, m.IsCommit) } //------------------------------------- @@ -1352,6 +1466,11 @@ type ProposalMessage struct { Proposal *types.Proposal } +// ValidateBasic performs basic validation. +func (m *ProposalMessage) ValidateBasic() error { + return m.Proposal.ValidateBasic() +} + // String returns a string representation. func (m *ProposalMessage) String() string { return fmt.Sprintf("[Proposal %v]", m.Proposal) @@ -1366,6 +1485,20 @@ type ProposalPOLMessage struct { ProposalPOL *cmn.BitArray } +// ValidateBasic performs basic validation. +func (m *ProposalPOLMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.ProposalPOLRound < 0 { + return errors.New("Negative ProposalPOLRound") + } + if m.ProposalPOL.Size() == 0 { + return errors.New("Empty ProposalPOL bit array") + } + return nil +} + // String returns a string representation. func (m *ProposalPOLMessage) String() string { return fmt.Sprintf("[ProposalPOL H:%v POLR:%v POL:%v]", m.Height, m.ProposalPOLRound, m.ProposalPOL) @@ -1380,6 +1513,20 @@ type BlockPartMessage struct { Part *types.Part } +// ValidateBasic performs basic validation. +func (m *BlockPartMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + if err := m.Part.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong Part: %v", err) + } + return nil +} + // String returns a string representation. func (m *BlockPartMessage) String() string { return fmt.Sprintf("[BlockPart H:%v R:%v P:%v]", m.Height, m.Round, m.Part) @@ -1392,6 +1539,11 @@ type VoteMessage struct { Vote *types.Vote } +// ValidateBasic performs basic validation. +func (m *VoteMessage) ValidateBasic() error { + return m.Vote.ValidateBasic() +} + // String returns a string representation. func (m *VoteMessage) String() string { return fmt.Sprintf("[Vote %v]", m.Vote) @@ -1403,10 +1555,27 @@ func (m *VoteMessage) String() string { type HasVoteMessage struct { Height int64 Round int - Type byte + Type types.SignedMsgType Index int } +// ValidateBasic performs basic validation. +func (m *HasVoteMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + if !types.IsVoteTypeValid(m.Type) { + return errors.New("Invalid Type") + } + if m.Index < 0 { + return errors.New("Negative Index") + } + return nil +} + // String returns a string representation. func (m *HasVoteMessage) String() string { return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v}]", m.Index, m.Height, m.Round, m.Type) @@ -1418,10 +1587,27 @@ func (m *HasVoteMessage) String() string { type VoteSetMaj23Message struct { Height int64 Round int - Type byte + Type types.SignedMsgType BlockID types.BlockID } +// ValidateBasic performs basic validation. +func (m *VoteSetMaj23Message) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + if !types.IsVoteTypeValid(m.Type) { + return errors.New("Invalid Type") + } + if err := m.BlockID.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong BlockID: %v", err) + } + return nil +} + // String returns a string representation. func (m *VoteSetMaj23Message) String() string { return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID) @@ -1433,11 +1619,29 @@ func (m *VoteSetMaj23Message) String() string { type VoteSetBitsMessage struct { Height int64 Round int - Type byte + Type types.SignedMsgType BlockID types.BlockID Votes *cmn.BitArray } +// ValidateBasic performs basic validation. +func (m *VoteSetBitsMessage) ValidateBasic() error { + if m.Height < 0 { + return errors.New("Negative Height") + } + if m.Round < 0 { + return errors.New("Negative Round") + } + if !types.IsVoteTypeValid(m.Type) { + return errors.New("Invalid Type") + } + if err := m.BlockID.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong BlockID: %v", err) + } + // NOTE: Votes.Size() can be zero if the node does not have any + return nil +} + // String returns a string representation. func (m *VoteSetBitsMessage) String() string { return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes) @@ -1450,6 +1654,11 @@ type ProposalHeartbeatMessage struct { Heartbeat *types.Heartbeat } +// ValidateBasic performs basic validation. +func (m *ProposalHeartbeatMessage) ValidateBasic() error { + return m.Heartbeat.ValidateBasic() +} + // String returns a string representation. func (m *ProposalHeartbeatMessage) String() string { return fmt.Sprintf("[HEARTBEAT %v]", m.Heartbeat) diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 63dd9075fdb..2758f3fabcf 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -11,23 +11,20 @@ import ( "testing" "time" - abcicli "github.com/tendermint/tendermint/abci/client" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" bc "github.com/tendermint/tendermint/blockchain" - cmn "github.com/tendermint/tendermint/libs/common" + cfg "github.com/tendermint/tendermint/config" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" mempl "github.com/tendermint/tendermint/mempool" - sm "github.com/tendermint/tendermint/state" - - cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" - p2pdummy "github.com/tendermint/tendermint/p2p/dummy" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func init() { @@ -100,6 +97,9 @@ func TestReactorBasic(t *testing.T) { // Ensure we can process blocks with evidence func TestReactorWithEvidence(t *testing.T) { + types.RegisterMockEvidences(cdc) + types.RegisterMockEvidences(types.GetCodec()) + nValidators := 4 testName := "consensus_reactor_test" tickerFunc := newMockTickerFunc(true) @@ -115,10 +115,10 @@ func TestReactorWithEvidence(t *testing.T) { for i := 0; i < nValidators; i++ { stateDB := dbm.NewMemDB() // each state needs its own db state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) - thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) + thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal app := appFunc() - vals := types.TM2PB.Validators(state.Validators) + vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) pv := privVals[i] @@ -194,7 +194,8 @@ func newMockEvidencePool(val []byte) *mockEvidencePool { } } -func (m *mockEvidencePool) PendingEvidence() []types.Evidence { +// NOTE: maxBytes is ignored +func (m *mockEvidencePool) PendingEvidence(maxBytes int64) []types.Evidence { if m.height > 0 { return m.ev } @@ -207,7 +208,7 @@ func (m *mockEvidencePool) Update(block *types.Block, state sm.State) { panic("block has no evidence") } } - m.height += 1 + m.height++ } //------------------------------------ @@ -244,110 +245,25 @@ func TestReactorProposalHeartbeats(t *testing.T) { }, css) } -// Test we record block parts from other peers -func TestReactorRecordsBlockParts(t *testing.T) { - // create dummy peer - peer := p2pdummy.NewPeer() - ps := NewPeerState(peer).SetLogger(log.TestingLogger()) - peer.Set(types.PeerStateKey, ps) - - // create reactor - css := randConsensusNet(1, "consensus_reactor_records_block_parts_test", newMockTickerFunc(true), newPersistentKVStore) - reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states - reactor.SetEventBus(css[0].eventBus) - reactor.SetLogger(log.TestingLogger()) - sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) - reactor.SetSwitch(sw) - err := reactor.Start() - require.NoError(t, err) - defer reactor.Stop() - - // 1) new block part - parts := types.NewPartSetFromData(cmn.RandBytes(100), 10) - msg := &BlockPartMessage{ - Height: 2, - Round: 0, - Part: parts.GetPart(0), - } - bz, err := cdc.MarshalBinaryBare(msg) - require.NoError(t, err) - - reactor.Receive(DataChannel, peer, bz) - require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should have increased by 1") - - // 2) block part with the same height, but different round - msg.Round = 1 - - bz, err = cdc.MarshalBinaryBare(msg) - require.NoError(t, err) - - reactor.Receive(DataChannel, peer, bz) - require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same") - - // 3) block part from earlier height - msg.Height = 1 - msg.Round = 0 - - bz, err = cdc.MarshalBinaryBare(msg) - require.NoError(t, err) - - reactor.Receive(DataChannel, peer, bz) - require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same") -} - -// Test we record votes from other peers -func TestReactorRecordsVotes(t *testing.T) { - // create dummy peer - peer := p2pdummy.NewPeer() - ps := NewPeerState(peer).SetLogger(log.TestingLogger()) - peer.Set(types.PeerStateKey, ps) - - // create reactor - css := randConsensusNet(1, "consensus_reactor_records_votes_test", newMockTickerFunc(true), newPersistentKVStore) - reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states - reactor.SetEventBus(css[0].eventBus) - reactor.SetLogger(log.TestingLogger()) - sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) - reactor.SetSwitch(sw) - err := reactor.Start() - require.NoError(t, err) - defer reactor.Stop() - _, val := css[0].state.Validators.GetByIndex(0) - - // 1) new vote - vote := &types.Vote{ - ValidatorIndex: 0, - ValidatorAddress: val.Address, - Height: 2, - Round: 0, - Timestamp: time.Now().UTC(), - Type: types.VoteTypePrevote, - BlockID: types.BlockID{}, - } - bz, err := cdc.MarshalBinaryBare(&VoteMessage{vote}) - require.NoError(t, err) - - reactor.Receive(VoteChannel, peer, bz) - assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should have increased by 1") - - // 2) vote with the same height, but different round - vote.Round = 1 - - bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote}) - require.NoError(t, err) - - reactor.Receive(VoteChannel, peer, bz) - assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should stay the same") +// Test we record stats about votes and block parts from other peers. +func TestReactorRecordsVotesAndBlockParts(t *testing.T) { + N := 4 + css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) + reactors, eventChans, eventBuses := startConsensusNet(t, css, N) + defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) - // 3) vote from earlier height - vote.Height = 1 - vote.Round = 0 + // wait till everyone makes the first new block + timeoutWaitGroup(t, N, func(j int) { + <-eventChans[j] + }, css) - bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote}) - require.NoError(t, err) + // Get peer + peer := reactors[1].Switch.Peers().List()[0] + // Get peer state + ps := peer.Get(types.PeerStateKey).(*PeerState) - reactor.Receive(VoteChannel, peer, bz) - assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should stay the same") + assert.Equal(t, true, ps.VotesSent() > 0, "number of votes sent should have increased") + assert.Equal(t, true, ps.BlockPartsSent() > 0, "number of votes sent should have increased") } //------------------------------------------------------------- @@ -540,7 +456,7 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{} err := validateBlock(newBlock, activeVals) assert.Nil(t, err) for _, tx := range txs { - css[j].mempool.CheckTx(tx, nil) + err := css[j].mempool.CheckTx(tx, nil) assert.Nil(t, err) } }, css) diff --git a/consensus/replay.go b/consensus/replay.go index bb1f2e46db9..abc43eb578d 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -11,6 +11,7 @@ import ( "time" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/version" //auto "github.com/tendermint/tendermint/libs/autofile" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" @@ -19,7 +20,6 @@ import ( "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" ) var crc32c = crc32.MakeTable(crc32.Castagnoli) @@ -73,7 +73,7 @@ func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan case *ProposalMessage: p := msg.Proposal cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header", - p.BlockPartsHeader, "pol", p.POLRound, "peer", peerID) + p.BlockID.PartsHeader, "pol", p.POLRound, "peer", peerID) case *BlockPartMessage: cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID) case *VoteMessage: @@ -227,7 +227,7 @@ func (h *Handshaker) NBlocks() int { func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { // Handshake is done via ABCI Info on the query conn. - res, err := proxyApp.Query().InfoSync(abci.RequestInfo{Version: version.Version}) + res, err := proxyApp.Query().InfoSync(proxy.RequestInfo) if err != nil { return fmt.Errorf("Error calling Info: %v", err) } @@ -238,9 +238,15 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { } appHash := res.LastBlockAppHash - h.logger.Info("ABCI Handshake", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash)) + h.logger.Info("ABCI Handshake App Info", + "height", blockHeight, + "hash", fmt.Sprintf("%X", appHash), + "software-version", res.Version, + "protocol-version", res.AppVersion, + ) - // TODO: check app version. + // Set AppVersion on the state. + h.initialState.Version.Consensus.App = version.Protocol(res.AppVersion) // Replay blocks up to the latest in the blockstore. _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) @@ -258,21 +264,25 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { // Replay all blocks since appBlockHeight and ensure the result matches the current state. // Returns the final AppHash or an error. -func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) { - +func (h *Handshaker) ReplayBlocks( + state sm.State, + appHash []byte, + appBlockHeight int64, + proxyApp proxy.AppConns, +) ([]byte, error) { storeBlockHeight := h.store.Height() stateBlockHeight := state.LastBlockHeight h.logger.Info("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight) - // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain + // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain. if appBlockHeight == 0 { - validators := types.TM2PB.Validators(state.Validators) + nextVals := types.TM2PB.ValidatorUpdates(state.NextValidators) // state.Validators would work too. csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams) req := abci.RequestInitChain{ Time: h.genDoc.GenesisTime, ChainId: h.genDoc.ChainID, ConsensusParams: csParams, - Validators: validators, + Validators: nextVals, AppStateBytes: h.genDoc.AppState, } res, err := proxyApp.Consensus().InitChainSync(req) @@ -280,15 +290,14 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight return nil, err } - // if the app returned validators - // or consensus params, update the state - // with the them + // If the app returned validators or consensus params, update the state. if len(res.Validators) > 0 { - vals, err := types.PB2TM.Validators(res.Validators) + vals, err := types.PB2TM.ValidatorUpdates(res.Validators) if err != nil { return nil, err } state.Validators = types.NewValidatorSet(vals) + state.NextValidators = types.NewValidatorSet(vals) } if res.ConsensusParams != nil { state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams) @@ -296,7 +305,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight sm.SaveState(h.stateDB, state) } - // First handle edge cases and constraints on the storeBlockHeight + // First handle edge cases and constraints on the storeBlockHeight. if storeBlockHeight == 0 { return appHash, checkAppHash(state, appHash) @@ -306,11 +315,11 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight } else if storeBlockHeight < stateBlockHeight { // the state should never be ahead of the store (this is under tendermint's control) - cmn.PanicSanity(cmn.Fmt("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight)) + cmn.PanicSanity(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight)) } else if storeBlockHeight > stateBlockHeight+1 { // store should be at most one ahead of the state (this is under tendermint's control) - cmn.PanicSanity(cmn.Fmt("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1)) + cmn.PanicSanity(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1)) } var err error diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 0c0b0dcb169..685eb71f2c3 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -13,12 +13,12 @@ import ( bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" ) const ( @@ -34,7 +34,7 @@ func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console consensusState := newConsensusStateForReplay(config, csConfig) if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil { - cmn.Exit(cmn.Fmt("Error during consensus replay: %v", err)) + cmn.Exit(fmt.Sprintf("Error during consensus replay: %v", err)) } } @@ -298,16 +298,21 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo // Create proxyAppConn connection (consensus, mempool, query) clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()) - proxyApp := proxy.NewAppConns(clientCreator, - NewHandshaker(stateDB, state, blockStore, gdoc)) + proxyApp := proxy.NewAppConns(clientCreator) err = proxyApp.Start() if err != nil { - cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err)) + cmn.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err)) + } + + handshaker := NewHandshaker(stateDB, state, blockStore, gdoc) + err = handshaker.Handshake(proxyApp) + if err != nil { + cmn.Exit(fmt.Sprintf("Error on handshake: %v", err)) } eventBus := types.NewEventBus() if err := eventBus.Start(); err != nil { - cmn.Exit(cmn.Fmt("Failed to start event bus: %v", err)) + cmn.Exit(fmt.Sprintf("Failed to start event bus: %v", err)) } mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{} diff --git a/consensus/replay_test.go b/consensus/replay_test.go index fa0ec040897..70c4ba332fb 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -3,7 +3,6 @@ package consensus import ( "bytes" "context" - "errors" "fmt" "io" "io/ioutil" @@ -20,15 +19,15 @@ import ( abci "github.com/tendermint/tendermint/abci/types" crypto "github.com/tendermint/tendermint/crypto" auto "github.com/tendermint/tendermint/libs/autofile" - cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/version" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/libs/log" ) var consensusReplayConfig *cfg.Config @@ -104,14 +103,6 @@ func TestWALCrash(t *testing.T) { {"empty block", func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) {}, 1}, - {"block with a smaller part size", - func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) { - // XXX: is there a better way to change BlockPartSizeBytes? - cs.state.ConsensusParams.BlockPartSizeBytes = 512 - sm.SaveState(stateDB, cs.state) - go sendTxs(cs, ctx) - }, - 1}, {"many non-empty blocks", func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) { go sendTxs(cs, ctx) @@ -347,7 +338,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { t.Fatalf(err.Error()) } - stateDB, state, store := stateAndStore(config, privVal.GetPubKey()) + stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion) store.chain = chain store.commits = commits @@ -361,19 +352,22 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { if nBlocks > 0 { // run nBlocks against a new client to build up the app state. // use a throwaway tendermint state - proxyApp := proxy.NewAppConns(clientCreator2, nil) - stateDB, state, _ := stateAndStore(config, privVal.GetPubKey()) + proxyApp := proxy.NewAppConns(clientCreator2) + stateDB, state, _ := stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion) buildAppStateFromChain(proxyApp, stateDB, state, chain, nBlocks, mode) } // now start the app using the handshake - it should sync genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) handshaker := NewHandshaker(stateDB, state, store, genDoc) - proxyApp := proxy.NewAppConns(clientCreator2, handshaker) + proxyApp := proxy.NewAppConns(clientCreator2) if err := proxyApp.Start(); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) } defer proxyApp.Stop() + if err := handshaker.Handshake(proxyApp); err != nil { + t.Fatalf("Error on abci handshake: %v", err) + } // get the latest app hash from the app res, err := proxyApp.Query().InfoSync(abci.RequestInfo{Version: ""}) @@ -399,7 +393,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { } func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.AppConns) sm.State { - testPartSize := st.ConsensusParams.BlockPartSizeBytes + testPartSize := types.BlockPartSizeBytes blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) blkID := types.BlockID{blk.Hash(), blk.MakePartSet(testPartSize).Header()} @@ -418,7 +412,7 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, } defer proxyApp.Stop() - validators := types.TM2PB.Validators(state.Validators) + validators := types.TM2PB.ValidatorUpdates(state.Validators) if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ Validators: validators, }); err != nil { @@ -449,13 +443,13 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, mode uint) sm.State { // run the whole chain against this client to build up the tendermint state clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "1"))) - proxyApp := proxy.NewAppConns(clientCreator, nil) // sm.NewHandshaker(config, state, store, ReplayLastBlock)) + proxyApp := proxy.NewAppConns(clientCreator) if err := proxyApp.Start(); err != nil { panic(err) } defer proxyApp.Stop() - validators := types.TM2PB.Validators(state.Validators) + validators := types.TM2PB.ValidatorUpdates(state.Validators) if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ Validators: validators, }); err != nil { @@ -494,7 +488,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { return nil, nil, err } if !found { - return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1)) + return nil, nil, fmt.Errorf("WAL does not contain height %d.", 1) } defer gr.Close() // nolint: errcheck @@ -526,16 +520,16 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { // if its not the first one, we have a full block if thisBlockParts != nil { var block = new(types.Block) - _, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(thisBlockParts.GetReader(), block, 0) if err != nil { panic(err) } if block.Height != height+1 { - panic(cmn.Fmt("read bad block from wal. got height %d, expected %d", block.Height, height+1)) + panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) } commitHeight := thisBlockCommit.Precommits[0].Height if commitHeight != height+1 { - panic(cmn.Fmt("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) + panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) } blocks = append(blocks, block) commits = append(commits, thisBlockCommit) @@ -549,7 +543,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { return nil, nil, err } case *types.Vote: - if p.Type == types.VoteTypePrecommit { + if p.Type == types.PrecommitType { thisBlockCommit = &types.Commit{ BlockID: p.BlockID, Precommits: []*types.Vote{p}, @@ -559,16 +553,16 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { } // grab the last block too var block = new(types.Block) - _, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(thisBlockParts.GetReader(), block, 0) if err != nil { panic(err) } if block.Height != height+1 { - panic(cmn.Fmt("read bad block from wal. got height %d, expected %d", block.Height, height+1)) + panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) } commitHeight := thisBlockCommit.Precommits[0].Height if commitHeight != height+1 { - panic(cmn.Fmt("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) + panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) } blocks = append(blocks, block) commits = append(commits, thisBlockCommit) @@ -581,7 +575,7 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { case msgInfo: switch msg := m.Msg.(type) { case *ProposalMessage: - return &msg.Proposal.BlockPartsHeader + return &msg.Proposal.BlockID.PartsHeader case *BlockPartMessage: return msg.Part case *VoteMessage: @@ -595,9 +589,10 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { } // fresh state and mock store -func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (dbm.DB, sm.State, *mockBlockStore) { +func stateAndStore(config *cfg.Config, pubKey crypto.PubKey, appVersion version.Protocol) (dbm.DB, sm.State, *mockBlockStore) { stateDB := dbm.NewMemDB() state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) + state.Version.Consensus.App = appVersion store := NewMockBlockStore(config, state.ConsensusParams) return stateDB, state, store } @@ -622,7 +617,7 @@ func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { block := bs.chain[height-1] return &types.BlockMeta{ - BlockID: types.BlockID{block.Hash(), block.MakePartSet(bs.params.BlockPartSizeBytes).Header()}, + BlockID: types.BlockID{block.Hash(), block.MakePartSet(types.BlockPartSizeBytes).Header()}, Header: block.Header, } } @@ -641,23 +636,26 @@ func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { func TestInitChainUpdateValidators(t *testing.T) { val, _ := types.RandValidator(true, 10) vals := types.NewValidatorSet([]*types.Validator{val}) - app := &initChainApp{vals: types.TM2PB.Validators(vals)} + app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} clientCreator := proxy.NewLocalClientCreator(app) config := ResetConfig("proxy_test_") privVal := privval.LoadFilePV(config.PrivValidatorFile()) - stateDB, state, store := stateAndStore(config, privVal.GetPubKey()) + stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0) oldValAddr := state.Validators.Validators[0].Address // now start the app using the handshake - it should sync genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) handshaker := NewHandshaker(stateDB, state, store, genDoc) - proxyApp := proxy.NewAppConns(clientCreator, handshaker) + proxyApp := proxy.NewAppConns(clientCreator) if err := proxyApp.Start(); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) } defer proxyApp.Stop() + if err := handshaker.Handshake(proxyApp); err != nil { + t.Fatalf("Error on abci handshake: %v", err) + } // reload the state, check the validator set was updated state = sm.LoadState(stateDB) @@ -668,7 +666,7 @@ func TestInitChainUpdateValidators(t *testing.T) { assert.Equal(t, newValAddr, expectValAddr) } -func newInitChainApp(vals []abci.Validator) *initChainApp { +func newInitChainApp(vals []abci.ValidatorUpdate) *initChainApp { return &initChainApp{ vals: vals, } @@ -677,7 +675,7 @@ func newInitChainApp(vals []abci.Validator) *initChainApp { // returns the vals on InitChain type initChainApp struct { abci.BaseApplication - vals []abci.Validator + vals []abci.ValidatorUpdate } func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain { diff --git a/consensus/state.go b/consensus/state.go index 6ffe6ef6431..8c2e292c8e6 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -9,9 +9,10 @@ import ( "sync" "time" - fail "github.com/ebuchman/fail-test" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/fail" "github.com/tendermint/tendermint/libs/log" + tmtime "github.com/tendermint/tendermint/types/time" cfg "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/consensus/types" @@ -74,7 +75,6 @@ type ConsensusState struct { privValidator types.PrivValidator // for signing votes // services for creating and executing blocks - // TODO: encapsulate all of this in one "BlockManager" blockExec *sm.BlockExecutor blockStore sm.BlockStore mempool sm.Mempool @@ -83,7 +83,8 @@ type ConsensusState struct { // internal state mtx sync.RWMutex cstypes.RoundState - state sm.State // State until height-1. + triggeredTimeoutPrecommit bool + state sm.State // State until height-1. // state changes may be triggered by: msgs from peers, // msgs from ourself, or by timeouts @@ -91,6 +92,10 @@ type ConsensusState struct { internalMsgQueue chan msgInfo timeoutTicker TimeoutTicker + // information about about added votes and block parts are written on this channel + // so statistics can be computed by reactor + statsMsgQueue chan msgInfo + // we use eventBus to trigger msg broadcasts in the reactor, // and to notify external subscribers, eg. through a websocket eventBus *types.EventBus @@ -120,8 +125,8 @@ type ConsensusState struct { metrics *Metrics } -// CSOption sets an optional parameter on the ConsensusState. -type CSOption func(*ConsensusState) +// StateOption sets an optional parameter on the ConsensusState. +type StateOption func(*ConsensusState) // NewConsensusState returns a new ConsensusState. func NewConsensusState( @@ -131,7 +136,7 @@ func NewConsensusState( blockStore sm.BlockStore, mempool sm.Mempool, evpool sm.EvidencePool, - options ...CSOption, + options ...StateOption, ) *ConsensusState { cs := &ConsensusState{ config: config, @@ -141,6 +146,7 @@ func NewConsensusState( peerMsgQueue: make(chan msgInfo, msgQueueSize), internalMsgQueue: make(chan msgInfo, msgQueueSize), timeoutTicker: NewTimeoutTicker(), + statsMsgQueue: make(chan msgInfo, msgQueueSize), done: make(chan struct{}), doWALCatchup: true, wal: nilWAL{}, @@ -154,6 +160,7 @@ func NewConsensusState( cs.setProposal = cs.defaultSetProposal cs.updateToState(state) + // Don't call scheduleRound0 yet. // We do that upon Start(). cs.reconstructLastCommit(state) @@ -179,15 +186,15 @@ func (cs *ConsensusState) SetEventBus(b *types.EventBus) { cs.blockExec.SetEventBus(b) } -// WithMetrics sets the metrics. -func WithMetrics(metrics *Metrics) CSOption { +// StateMetrics sets the metrics. +func StateMetrics(metrics *Metrics) StateOption { return func(cs *ConsensusState) { cs.metrics = metrics } } // String returns a string. func (cs *ConsensusState) String() string { // better not to access shared variables - return cmn.Fmt("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step) + return fmt.Sprintf("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step) } // GetState returns a copy of the chain state. @@ -197,6 +204,15 @@ func (cs *ConsensusState) GetState() sm.State { return cs.state.Copy() } +// GetLastHeight returns the last height committed. +// If there were no blocks, returns 0. +func (cs *ConsensusState) GetLastHeight() int64 { + cs.mtx.Lock() + defer cs.mtx.Unlock() + + return cs.RoundState.Height - 1 +} + // GetRoundState returns a shallow copy of the internal consensus state. func (cs *ConsensusState) GetRoundState() *cstypes.RoundState { cs.mtx.RLock() @@ -413,8 +429,8 @@ func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType) // enterNewRound(height, 0) at cs.StartTime. func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) { - //cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime) - sleepDuration := rs.StartTime.Sub(time.Now()) // nolint: gotype, gosimple + //cs.Logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime) + sleepDuration := rs.StartTime.Sub(tmtime.Now()) // nolint: gotype, gosimple cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight) } @@ -444,14 +460,14 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) { return } seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight) - lastPrecommits := types.NewVoteSet(state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.VoteTypePrecommit, state.LastValidators) + lastPrecommits := types.NewVoteSet(state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.PrecommitType, state.LastValidators) for _, precommit := range seenCommit.Precommits { if precommit == nil { continue } added, err := lastPrecommits.AddVote(precommit) if !added || err != nil { - cmn.PanicCrisis(cmn.Fmt("Failed to reconstruct LastCommit: %v", err)) + cmn.PanicCrisis(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err)) } } if !lastPrecommits.HasTwoThirdsMajority() { @@ -464,13 +480,13 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) { // The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight. func (cs *ConsensusState) updateToState(state sm.State) { if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight { - cmn.PanicSanity(cmn.Fmt("updateToState() expected state height of %v but found %v", + cmn.PanicSanity(fmt.Sprintf("updateToState() expected state height of %v but found %v", cs.Height, state.LastBlockHeight)) } if !cs.state.IsEmpty() && cs.state.LastBlockHeight+1 != cs.Height { // This might happen when someone else is mutating cs.state. // Someone forgot to pass in state.Copy() somewhere?! - cmn.PanicSanity(cmn.Fmt("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", + cmn.PanicSanity(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", cs.state.LastBlockHeight+1, cs.Height)) } @@ -507,7 +523,7 @@ func (cs *ConsensusState) updateToState(state sm.State) { // to be gathered for the first block. // And alternative solution that relies on clocks: // cs.StartTime = state.LastBlockTime.Add(timeoutCommit) - cs.StartTime = cs.config.Commit(time.Now()) + cs.StartTime = cs.config.Commit(tmtime.Now()) } else { cs.StartTime = cs.config.Commit(cs.CommitTime) } @@ -516,10 +532,10 @@ func (cs *ConsensusState) updateToState(state sm.State) { cs.Proposal = nil cs.ProposalBlock = nil cs.ProposalBlockParts = nil - cs.LockedRound = 0 + cs.LockedRound = -1 cs.LockedBlock = nil cs.LockedBlockParts = nil - cs.ValidRound = 0 + cs.ValidRound = -1 cs.ValidBlock = nil cs.ValidBlockParts = nil cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) @@ -629,7 +645,11 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { err = cs.setProposal(msg.Proposal) case *BlockPartMessage: // if the proposal is complete, we'll enterPrevote or tryFinalizeCommit - _, err = cs.addProposalBlockPart(msg, peerID) + added, err := cs.addProposalBlockPart(msg, peerID) + if added { + cs.statsMsgQueue <- mi + } + if err != nil && msg.Round != cs.Round { cs.Logger.Debug("Received block part from wrong round", "height", cs.Height, "csRound", cs.Round, "blockRound", msg.Round) err = nil @@ -637,7 +657,11 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) { case *VoteMessage: // attempt to add the vote and dupeout the validator if its a duplicate signature // if the vote gives us a 2/3-any or 2/3-one, we transition - err := cs.tryAddVote(msg.Vote, peerID) + added, err := cs.tryAddVote(msg.Vote, peerID) + if added { + cs.statsMsgQueue <- mi + } + if err == ErrAddingVote { // TODO: punish peer // We probably don't want to stop the peer here. The vote does not @@ -688,9 +712,10 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { cs.enterPrecommit(ti.Height, ti.Round) case cstypes.RoundStepPrecommitWait: cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()) + cs.enterPrecommit(ti.Height, ti.Round) cs.enterNewRound(ti.Height, ti.Round+1) default: - panic(cmn.Fmt("Invalid timeout step: %v", ti.Step)) + panic(fmt.Sprintf("Invalid timeout step: %v", ti.Step)) } } @@ -716,15 +741,15 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { - logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Debug(fmt.Sprintf("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } - if now := time.Now(); cs.StartTime.After(now) { + if now := tmtime.Now(); cs.StartTime.After(now) { logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now) } - logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(fmt.Sprintf("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) // Increment validators if necessary validators := cs.Validators @@ -749,6 +774,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) { cs.ProposalBlockParts = nil } cs.Votes.SetRound(round + 1) // also track next round (round+1) to allow round-skipping + cs.triggeredTimeoutPrecommit = false cs.eventBus.PublishEventNewRound(cs.RoundStateEvent()) cs.metrics.Rounds.Set(float64(round)) @@ -759,7 +785,8 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) { waitForTxs := cs.config.WaitForTxs() && round == 0 && !cs.needProofBlock(height) if waitForTxs { if cs.config.CreateEmptyBlocksInterval > 0 { - cs.scheduleTimeout(cs.config.EmptyBlocksInterval(), height, round, cstypes.RoundStepNewRound) + cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round, + cstypes.RoundStepNewRound) } go cs.proposalHeartbeat(height, round) } else { @@ -811,10 +838,10 @@ func (cs *ConsensusState) enterPropose(height int64, round int) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) { - logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Debug(fmt.Sprintf("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } - logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(fmt.Sprintf("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPropose: @@ -862,10 +889,7 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) { var blockParts *types.PartSet // Decide on block - if cs.LockedBlock != nil { - // If we're locked onto a block, just choose that. - block, blockParts = cs.LockedBlock, cs.LockedBlockParts - } else if cs.ValidBlock != nil { + if cs.ValidBlock != nil { // If there is valid block, choose that. block, blockParts = cs.ValidBlock, cs.ValidBlockParts } else { @@ -877,15 +901,9 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) { } // Make proposal - polRound, polBlockID := cs.Votes.POLInfo() - proposal := types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID) + propBlockId := types.BlockID{block.Hash(), blockParts.Header()} + proposal := types.NewProposal(height, round, cs.ValidRound, propBlockId) if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal); err == nil { - // Set fields - /* fields set by setProposal and addBlockPart - cs.Proposal = proposal - cs.ProposalBlock = block - cs.ProposalBlockParts = blockParts - */ // send proposal and block parts on internal msg queue cs.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) @@ -894,7 +912,7 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) { cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""}) } cs.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal) - cs.Logger.Debug(cmn.Fmt("Signed proposal block: %v", block)) + cs.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block)) } else { if !cs.replayMode { cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) @@ -938,21 +956,29 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts return } + maxBytes := cs.state.ConsensusParams.BlockSize.MaxBytes + maxGas := cs.state.ConsensusParams.BlockSize.MaxGas + // bound evidence to 1/10th of the block + evidence := cs.evpool.PendingEvidence(types.MaxEvidenceBytesPerBlock(maxBytes)) // Mempool validated transactions - txs := cs.mempool.Reap(cs.state.ConsensusParams.BlockSize.MaxTxs) - evidence := cs.evpool.PendingEvidence() - block, parts := cs.state.MakeBlock(cs.Height, txs, commit, evidence) + txs := cs.mempool.ReapMaxBytesMaxGas(types.MaxDataBytes( + maxBytes, + cs.state.Validators.Size(), + len(evidence), + ), maxGas) + proposerAddr := cs.privValidator.GetAddress() + block, parts := cs.state.MakeBlock(cs.Height, txs, commit, evidence, proposerAddr) + return block, parts } // Enter: `timeoutPropose` after entering Propose. // Enter: proposal block and POL is ready. -// Enter: any +2/3 prevotes for future round. // Prevote for LockedBlock if we're locked, or ProposalBlock if valid. // Otherwise vote nil. func (cs *ConsensusState) enterPrevote(height int64, round int) { if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) { - cs.Logger.Debug(cmn.Fmt("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + cs.Logger.Debug(fmt.Sprintf("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } @@ -962,15 +988,7 @@ func (cs *ConsensusState) enterPrevote(height int64, round int) { cs.newStep() }() - // fire event for how we got here - if cs.isProposalComplete() { - cs.eventBus.PublishEventCompleteProposal(cs.RoundStateEvent()) - } else { - // we received +2/3 prevotes for a future round - // TODO: catchup event? - } - - cs.Logger.Info(cmn.Fmt("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + cs.Logger.Info(fmt.Sprintf("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) // Sign and broadcast vote as necessary cs.doPrevote(height, round) @@ -981,17 +999,18 @@ func (cs *ConsensusState) enterPrevote(height int64, round int) { func (cs *ConsensusState) defaultDoPrevote(height int64, round int) { logger := cs.Logger.With("height", height, "round", round) + // If a block is locked, prevote that. if cs.LockedBlock != nil { logger.Info("enterPrevote: Block was locked") - cs.signAddVote(types.VoteTypePrevote, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) + cs.signAddVote(types.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) return } // If ProposalBlock is nil, prevote nil. if cs.ProposalBlock == nil { logger.Info("enterPrevote: ProposalBlock is nil") - cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{}) + cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{}) return } @@ -1000,7 +1019,7 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) { if err != nil { // ProposalBlock is invalid, prevote nil. logger.Error("enterPrevote: ProposalBlock is invalid", "err", err) - cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{}) + cs.signAddVote(types.PrevoteType, nil, types.PartSetHeader{}) return } @@ -1008,7 +1027,7 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) { // NOTE: the proposal signature is validated when it is received, // and the proposal block parts are validated as they are received (against the merkle hash in the proposal) logger.Info("enterPrevote: ProposalBlock is valid") - cs.signAddVote(types.VoteTypePrevote, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) + cs.signAddVote(types.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) } // Enter: any +2/3 prevotes at next round. @@ -1016,13 +1035,13 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { - logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Debug(fmt.Sprintf("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { - cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) + cmn.PanicSanity(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) } - logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(fmt.Sprintf("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPrevoteWait: @@ -1035,8 +1054,8 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) { } // Enter: `timeoutPrevote` after any +2/3 prevotes. +// Enter: `timeoutPrecommit` after any +2/3 precommits. // Enter: +2/3 precomits for block or nil. -// Enter: any +2/3 precommits for next round. // Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round) // else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil, // else, precommit nil otherwise. @@ -1044,11 +1063,11 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { - logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Debug(fmt.Sprintf("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } - logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(fmt.Sprintf("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPrecommit: @@ -1066,7 +1085,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { } else { logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.") } - cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) + cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{}) return } @@ -1076,7 +1095,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { // the latest POLRound should be this round. polRound, _ := cs.Votes.POLInfo() if polRound < round { - cmn.PanicSanity(cmn.Fmt("This POLRound should be %v but got %", round, polRound)) + cmn.PanicSanity(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound)) } // +2/3 prevoted nil. Unlock and precommit nil. @@ -1085,12 +1104,12 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { logger.Info("enterPrecommit: +2/3 prevoted for nil.") } else { logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking") - cs.LockedRound = 0 + cs.LockedRound = -1 cs.LockedBlock = nil cs.LockedBlockParts = nil cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) } - cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) + cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{}) return } @@ -1101,7 +1120,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking") cs.LockedRound = round cs.eventBus.PublishEventRelock(cs.RoundStateEvent()) - cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader) + cs.signAddVote(types.PrecommitType, blockID.Hash, blockID.PartsHeader) return } @@ -1110,13 +1129,13 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash) // Validate the block. if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { - cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) + cmn.PanicConsensus(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) } cs.LockedRound = round cs.LockedBlock = cs.ProposalBlock cs.LockedBlockParts = cs.ProposalBlockParts cs.eventBus.PublishEventLock(cs.RoundStateEvent()) - cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader) + cs.signAddVote(types.PrecommitType, blockID.Hash, blockID.PartsHeader) return } @@ -1124,7 +1143,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { // Fetch that block, unlock, and precommit nil. // The +2/3 prevotes for this round is the POL for our unlock. // TODO: In the future save the POL prevotes for justification. - cs.LockedRound = 0 + cs.LockedRound = -1 cs.LockedBlock = nil cs.LockedBlockParts = nil if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) { @@ -1132,25 +1151,29 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) } cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) - cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) + cs.signAddVote(types.PrecommitType, nil, types.PartSetHeader{}) } // Enter: any +2/3 precommits for next round. func (cs *ConsensusState) enterPrecommitWait(height int64, round int) { logger := cs.Logger.With("height", height, "round", round) - if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) { - logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + if cs.Height != height || round < cs.Round || (cs.Round == round && cs.triggeredTimeoutPrecommit) { + logger.Debug( + fmt.Sprintf( + "enterPrecommitWait(%v/%v): Invalid args. "+ + "Current state is Height/Round: %v/%v/, triggeredTimeoutPrecommit:%v", + height, round, cs.Height, cs.Round, cs.triggeredTimeoutPrecommit)) return } if !cs.Votes.Precommits(round).HasTwoThirdsAny() { - cmn.PanicSanity(cmn.Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round)) + cmn.PanicSanity(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round)) } - logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(fmt.Sprintf("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPrecommitWait: - cs.updateRoundStep(round, cstypes.RoundStepPrecommitWait) + cs.triggeredTimeoutPrecommit = true cs.newStep() }() @@ -1164,17 +1187,17 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) { logger := cs.Logger.With("height", height, "commitRound", commitRound) if cs.Height != height || cstypes.RoundStepCommit <= cs.Step { - logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) + logger.Debug(fmt.Sprintf("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) return } - logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) + logger.Info(fmt.Sprintf("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterCommit: // keep cs.Round the same, commitRound points to the right Precommits set. cs.updateRoundStep(cs.Round, cstypes.RoundStepCommit) cs.CommitRound = commitRound - cs.CommitTime = time.Now() + cs.CommitTime = tmtime.Now() cs.newStep() // Maybe finalize immediately. @@ -1203,6 +1226,8 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) { // Set up ProposalBlockParts and keep waiting. cs.ProposalBlock = nil cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) + cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()) + cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState) } else { // We just need to keep waiting. } @@ -1214,7 +1239,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) { logger := cs.Logger.With("height", height) if cs.Height != height { - cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) + cmn.PanicSanity(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) } blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() @@ -1236,7 +1261,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) { // Increment height and goto cstypes.RoundStepNewHeight func (cs *ConsensusState) finalizeCommit(height int64) { if cs.Height != height || cs.Step != cstypes.RoundStepCommit { - cs.Logger.Debug(cmn.Fmt("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step)) + cs.Logger.Debug(fmt.Sprintf("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step)) return } @@ -1244,21 +1269,21 @@ func (cs *ConsensusState) finalizeCommit(height int64) { block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts if !ok { - cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, commit does not have two thirds majority")) + cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority")) } if !blockParts.HasHeader(blockID.PartsHeader) { - cmn.PanicSanity(cmn.Fmt("Expected ProposalBlockParts header to be commit header")) + cmn.PanicSanity(fmt.Sprintf("Expected ProposalBlockParts header to be commit header")) } if !block.HashesTo(blockID.Hash) { - cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, ProposalBlock does not hash to commit hash")) + cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash")) } if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil { - cmn.PanicConsensus(cmn.Fmt("+2/3 committed an invalid block: %v", err)) + cmn.PanicConsensus(fmt.Sprintf("+2/3 committed an invalid block: %v", err)) } - cs.Logger.Info(cmn.Fmt("Finalizing commit of block with %d txs", block.NumTxs), + cs.Logger.Info(fmt.Sprintf("Finalizing commit of block with %d txs", block.NumTxs), "height", block.Height, "hash", block.Hash(), "root", block.AppHash) - cs.Logger.Info(cmn.Fmt("%v", block)) + cs.Logger.Info(fmt.Sprintf("%v", block)) fail.Fail() // XXX @@ -1357,7 +1382,7 @@ func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) { if height > 1 { lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) - cs.metrics.BlockIntervalSeconds.Observe( + cs.metrics.BlockIntervalSeconds.Set( block.Time.Sub(lastBlockMeta.Header.Time).Seconds(), ) } @@ -1365,6 +1390,8 @@ func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) { cs.metrics.NumTxs.Set(float64(block.NumTxs)) cs.metrics.BlockSizeBytes.Set(float64(block.Size())) cs.metrics.TotalTxs.Set(float64(block.TotalTxs)) + cs.metrics.CommittedHeight.Set(float64(block.Height)) + } //----------------------------------------------------------------------------- @@ -1381,14 +1408,9 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error { return nil } - // We don't care about the proposal if we're already in cstypes.RoundStepCommit. - if cstypes.RoundStepCommit <= cs.Step { - return nil - } - - // Verify POLRound, which must be -1 or between 0 and proposal.Round exclusive. - if proposal.POLRound != -1 && - (proposal.POLRound < 0 || proposal.Round <= proposal.POLRound) { + // Verify POLRound, which must be -1 or in range [0, proposal.Round). + if proposal.POLRound < -1 || + (proposal.POLRound >= 0 && proposal.POLRound >= proposal.Round) { return ErrInvalidProposalPOLRound } @@ -1398,7 +1420,12 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error { } cs.Proposal = proposal - cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockPartsHeader) + // We don't update cs.ProposalBlockParts if it is already set. + // This happens if we're already in cstypes.RoundStepCommit or if there is a valid block in the current round. + // TODO: We can check if Proposal is for a different block as this is a sign of misbehavior! + if cs.ProposalBlockParts == nil { + cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartsHeader) + } cs.Logger.Info("Received proposal", "proposal", proposal) return nil } @@ -1429,12 +1456,17 @@ func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p } if added && cs.ProposalBlockParts.IsComplete() { // Added and completed! - _, err = cdc.UnmarshalBinaryReader(cs.ProposalBlockParts.GetReader(), &cs.ProposalBlock, int64(cs.state.ConsensusParams.BlockSize.MaxBytes)) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader( + cs.ProposalBlockParts.GetReader(), + &cs.ProposalBlock, + int64(cs.state.ConsensusParams.BlockSize.MaxBytes), + ) if err != nil { - return true, err + return added, err } // NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) + cs.eventBus.PublishEventCompleteProposal(cs.RoundStateEvent()) // Update Valid* if we can. prevotes := cs.Votes.Prevotes(cs.Round) @@ -1457,39 +1489,42 @@ func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p if cs.Step <= cstypes.RoundStepPropose && cs.isProposalComplete() { // Move onto the next step cs.enterPrevote(height, cs.Round) + if hasTwoThirds { // this is optimisation as this will be triggered when prevote is added + cs.enterPrecommit(height, cs.Round) + } } else if cs.Step == cstypes.RoundStepCommit { // If we're waiting on the proposal block... cs.tryFinalizeCommit(height) } - return true, nil + return added, nil } return added, nil } // Attempt to add the vote. if its a duplicate signature, dupeout the validator -func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) error { - _, err := cs.addVote(vote, peerID) +func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { + added, err := cs.addVote(vote, peerID) if err != nil { // If the vote height is off, we'll just ignore it, // But if it's a conflicting sig, add it to the cs.evpool. // If it's otherwise invalid, punish peer. if err == ErrVoteHeightMismatch { - return err + return added, err } else if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok { if bytes.Equal(vote.ValidatorAddress, cs.privValidator.GetAddress()) { cs.Logger.Error("Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, "type", vote.Type) - return err + return added, err } cs.evpool.AddEvidence(voteErr.DuplicateVoteEvidence) - return err + return added, err } else { // Probably an invalid signature / Bad peer. // Seems this can also err sometimes with "Unexpected step" - perhaps not from a bad peer ? cs.Logger.Error("Error attempting to add vote", "err", err) - return ErrAddingVote + return added, ErrAddingVote } } - return nil + return added, nil } //----------------------------------------------------------------------------- @@ -1500,7 +1535,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, // A precommit for the previous height? // These come in while we wait timeoutCommit if vote.Height+1 == cs.Height { - if !(cs.Step == cstypes.RoundStepNewHeight && vote.Type == types.VoteTypePrecommit) { + if !(cs.Step == cstypes.RoundStepNewHeight && vote.Type == types.PrecommitType) { // TODO: give the reason .. // fmt.Errorf("tryAddVote: Wrong height, not a LastCommit straggler commit.") return added, ErrVoteHeightMismatch @@ -1510,7 +1545,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, return added, err } - cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort())) + cs.Logger.Info(fmt.Sprintf("Added to lastPrecommits: %v", cs.LastCommit.StringShort())) cs.eventBus.PublishEventVote(types.EventDataVote{vote}) cs.evsw.FireEvent(types.EventVote, vote) @@ -1543,7 +1578,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, cs.evsw.FireEvent(types.EventVote, vote) switch vote.Type { - case types.VoteTypePrevote: + case types.PrevoteType: prevotes := cs.Votes.Prevotes(vote.Round) cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort()) @@ -1562,7 +1597,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, !cs.LockedBlock.HashesTo(blockID.Hash) { cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round) - cs.LockedRound = 0 + cs.LockedRound = -1 cs.LockedBlock = nil cs.LockedBlockParts = nil cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) @@ -1570,27 +1605,38 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, // Update Valid* if we can. // NOTE: our proposal block may be nil or not what received a polka.. - // TODO: we may want to still update the ValidBlock and obtain it via gossipping - if !blockID.IsZero() && - (cs.ValidRound < vote.Round) && - (vote.Round <= cs.Round) && - cs.ProposalBlock.HashesTo(blockID.Hash) { - - cs.Logger.Info("Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round) - cs.ValidRound = vote.Round - cs.ValidBlock = cs.ProposalBlock - cs.ValidBlockParts = cs.ProposalBlockParts + if len(blockID.Hash) != 0 && (cs.ValidRound < vote.Round) && (vote.Round == cs.Round) { + + if cs.ProposalBlock.HashesTo(blockID.Hash) { + cs.Logger.Info( + "Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round) + cs.ValidRound = vote.Round + cs.ValidBlock = cs.ProposalBlock + cs.ValidBlockParts = cs.ProposalBlockParts + } else { + cs.Logger.Info( + "Valid block we don't know about. Set ProposalBlock=nil", + "proposal", cs.ProposalBlock.Hash(), "blockId", blockID.Hash) + // We're getting the wrong block. + cs.ProposalBlock = nil + } + if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) { + cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) + } + cs.evsw.FireEvent(types.EventValidBlock, &cs.RoundState) + cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()) } } - // If +2/3 prevotes for *anything* for this or future round: - if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() { - // Round-skip over to PrevoteWait or goto Precommit. - cs.enterNewRound(height, vote.Round) // if the vote is ahead of us - if prevotes.HasTwoThirdsMajority() { + // If +2/3 prevotes for *anything* for future round: + if cs.Round < vote.Round && prevotes.HasTwoThirdsAny() { + // Round-skip if there is any 2/3+ of votes ahead of us + cs.enterNewRound(height, vote.Round) + } else if cs.Round == vote.Round && cstypes.RoundStepPrevote <= cs.Step { // current round + blockID, ok := prevotes.TwoThirdsMajority() + if ok && (cs.isProposalComplete() || len(blockID.Hash) == 0) { cs.enterPrecommit(height, vote.Round) - } else { - cs.enterPrevote(height, vote.Round) // if the vote is ahead of us + } else if prevotes.HasTwoThirdsAny() { cs.enterPrevoteWait(height, vote.Round) } } else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round { @@ -1600,47 +1646,45 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, } } - case types.VoteTypePrecommit: + case types.PrecommitType: precommits := cs.Votes.Precommits(vote.Round) cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort()) + blockID, ok := precommits.TwoThirdsMajority() if ok { - if len(blockID.Hash) == 0 { - cs.enterNewRound(height, vote.Round+1) - } else { - cs.enterNewRound(height, vote.Round) - cs.enterPrecommit(height, vote.Round) + // Executed as TwoThirdsMajority could be from a higher round + cs.enterNewRound(height, vote.Round) + cs.enterPrecommit(height, vote.Round) + if len(blockID.Hash) != 0 { cs.enterCommit(height, vote.Round) - if cs.config.SkipTimeoutCommit && precommits.HasAll() { - // if we have all the votes now, - // go straight to new round (skip timeout commit) - // cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight) cs.enterNewRound(cs.Height, 0) } - + } else { + cs.enterPrecommitWait(height, vote.Round) } } else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() { cs.enterNewRound(height, vote.Round) - cs.enterPrecommit(height, vote.Round) cs.enterPrecommitWait(height, vote.Round) } + default: - panic(cmn.Fmt("Unexpected vote type %X", vote.Type)) // go-wire should prevent this. + panic(fmt.Sprintf("Unexpected vote type %X", vote.Type)) // go-wire should prevent this. } return } -func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) { +func (cs *ConsensusState) signVote(type_ types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) { addr := cs.privValidator.GetAddress() valIndex, _ := cs.Validators.GetByAddress(addr) + vote := &types.Vote{ ValidatorAddress: addr, ValidatorIndex: valIndex, Height: cs.Height, Round: cs.Round, - Timestamp: time.Now().UTC(), + Timestamp: cs.voteTime(), Type: type_, BlockID: types.BlockID{hash, header}, } @@ -1648,8 +1692,25 @@ func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSet return vote, err } +func (cs *ConsensusState) voteTime() time.Time { + now := tmtime.Now() + minVoteTime := now + // TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil, + // even if cs.LockedBlock != nil. See https://github.com/tendermint/spec. + if cs.LockedBlock != nil { + minVoteTime = cs.config.MinValidVoteTime(cs.LockedBlock.Time) + } else if cs.ProposalBlock != nil { + minVoteTime = cs.config.MinValidVoteTime(cs.ProposalBlock.Time) + } + + if now.After(minVoteTime) { + return now + } + return minVoteTime +} + // sign the vote and publish on internalMsgQueue -func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.PartSetHeader) *types.Vote { +func (cs *ConsensusState) signAddVote(type_ types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { // if we don't have a key or we're not in the validator set, do nothing if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetAddress()) { return nil diff --git a/consensus/state_test.go b/consensus/state_test.go index 6a14e17b56e..9bf4fada5b9 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -7,19 +7,23 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cstypes "github.com/tendermint/tendermint/consensus/types" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" + tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + p2pdummy "github.com/tendermint/tendermint/p2p/dummy" + "github.com/tendermint/tendermint/types" ) func init() { config = ResetConfig("consensus_state_test") } -func ensureProposeTimeout(timeoutPropose int) time.Duration { - return time.Duration(timeoutPropose*2) * time.Millisecond +func ensureProposeTimeout(timeoutPropose time.Duration) time.Duration { + return time.Duration(timeoutPropose.Nanoseconds()*2) * time.Nanosecond } /* @@ -64,54 +68,56 @@ func TestStateProposerSelection0(t *testing.T) { startTestRound(cs1, height, round) - // wait for new round so proposer is set - <-newRoundCh + // Wait for new round so proposer is set. + ensureNewRound(newRoundCh, height, round) - // lets commit a block and ensure proposer for the next height is correct + // Commit a block and ensure proposer for the next height is correct. prop := cs1.GetRoundState().Validators.GetProposer() if !bytes.Equal(prop.Address, cs1.privValidator.GetAddress()) { t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address) } - // wait for complete proposal - <-proposalCh + // Wait for complete proposal. + ensureNewProposal(proposalCh, height, round) rs := cs1.GetRoundState() - signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) + signAddVotes(cs1, types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) - // wait for new round so next validator is set - <-newRoundCh + // Wait for new round so next validator is set. + ensureNewRound(newRoundCh, height+1, 0) prop = cs1.GetRoundState().Validators.GetProposer() if !bytes.Equal(prop.Address, vss[1].GetAddress()) { - panic(cmn.Fmt("expected proposer to be validator %d. Got %X", 1, prop.Address)) + panic(fmt.Sprintf("expected proposer to be validator %d. Got %X", 1, prop.Address)) } } // Now let's do it all again, but starting from round 2 instead of 0 func TestStateProposerSelection2(t *testing.T) { cs1, vss := randConsensusState(4) // test needs more work for more than 3 validators - + height := cs1.Height newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) // this time we jump in at round 2 incrementRound(vss[1:]...) incrementRound(vss[1:]...) - startTestRound(cs1, cs1.Height, 2) - <-newRoundCh // wait for the new round + round := 2 + startTestRound(cs1, height, round) + + ensureNewRound(newRoundCh, height, round) // wait for the new round // everyone just votes nil. we get a new proposer each round for i := 0; i < len(vss); i++ { prop := cs1.GetRoundState().Validators.GetProposer() - if !bytes.Equal(prop.Address, vss[(i+2)%len(vss)].GetAddress()) { - panic(cmn.Fmt("expected proposer to be validator %d. Got %X", (i+2)%len(vss), prop.Address)) + correctProposer := vss[(i+round)%len(vss)].GetAddress() + if !bytes.Equal(prop.Address, correctProposer) { + panic(fmt.Sprintf("expected RoundState.Validators.GetProposer() to be validator %d. Got %X", (i+2)%len(vss), prop.Address)) } rs := cs1.GetRoundState() - signAddVotes(cs1, types.VoteTypePrecommit, nil, rs.ProposalBlockParts.Header(), vss[1:]...) - <-newRoundCh // wait for the new round event each round - + signAddVotes(cs1, types.PrecommitType, nil, rs.ProposalBlockParts.Header(), vss[1:]...) + ensureNewRound(newRoundCh, height, i+round+1) // wait for the new round event each round incrementRound(vss[1:]...) } @@ -129,13 +135,7 @@ func TestStateEnterProposeNoPrivValidator(t *testing.T) { startTestRound(cs, height, round) // if we're not a validator, EnterPropose should timeout - ticker := time.NewTicker(ensureProposeTimeout(cs.config.TimeoutPropose)) - select { - case <-timeoutCh: - case <-ticker.C: - panic("Expected EnterPropose to timeout") - - } + ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds()) if cs.GetRoundState().Proposal != nil { t.Error("Expected to make no proposal, since no privValidator") @@ -155,7 +155,7 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) { cs.enterNewRound(height, round) cs.startRoutines(3) - <-proposalCh + ensureNewProposal(proposalCh, height, round) // Check that Proposal, ProposalBlock, ProposalBlockParts are set. rs := cs.GetRoundState() @@ -170,13 +170,7 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) { } // if we're a validator, enterPropose should not timeout - ticker := time.NewTicker(ensureProposeTimeout(cs.config.TimeoutPropose)) - select { - case <-timeoutCh: - panic("Expected EnterPropose not to timeout") - case <-ticker.C: - - } + ensureNoNewTimeout(timeoutCh, cs.config.TimeoutPropose.Nanoseconds()) } func TestStateBadProposal(t *testing.T) { @@ -184,7 +178,7 @@ func TestStateBadProposal(t *testing.T) { height, round := cs1.Height, cs1.Round vs2 := vss[1] - partSize := cs1.state.ConsensusParams.BlockPartSizeBytes + partSize := types.BlockPartSizeBytes proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) voteCh := subscribe(cs1.eventBus, types.EventQueryVote) @@ -203,7 +197,9 @@ func TestStateBadProposal(t *testing.T) { stateHash[0] = byte((stateHash[0] + 1) % 255) propBlock.AppHash = stateHash propBlockParts := propBlock.MakePartSet(partSize) - proposal := types.NewProposal(vs2.Height, round, propBlockParts.Header(), -1, types.BlockID{}) + proposal := types.NewProposal( + vs2.Height, round, -1, + types.BlockID{propBlock.Hash(), propBlockParts.Header()}) if err := vs2.SignProposal(config.ChainID(), proposal); err != nil { t.Fatal("failed to sign bad proposal", err) } @@ -217,22 +213,20 @@ func TestStateBadProposal(t *testing.T) { startTestRound(cs1, height, round) // wait for proposal - <-proposalCh + ensureNewProposal(proposalCh, height, round) // wait for prevote - <-voteCh - + ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], nil) // add bad prevote from vs2 and wait for it - signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh + signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + ensurePrevote(voteCh, height, round) // wait for precommit - <-voteCh - - validatePrecommit(t, cs1, round, 0, vss[0], nil, nil) - signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + signAddVotes(cs1, types.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) } //---------------------------------------------------------------------------------------------------- @@ -255,21 +249,21 @@ func TestStateFullRound1(t *testing.T) { propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) + // Maybe it would be better to call explicitly startRoutines(4) startTestRound(cs, height, round) - <-newRoundCh + ensureNewRound(newRoundCh, height, round) - // grab proposal - re := <-propCh - propBlockHash := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash() + ensureNewProposal(propCh, height, round) + propBlockHash := cs.GetRoundState().ProposalBlock.Hash() - <-voteCh // wait for prevote + ensurePrevote(voteCh, height, round) // wait for prevote validatePrevote(t, cs, round, vss[0], propBlockHash) - <-voteCh // wait for precommit + ensurePrecommit(voteCh, height, round) // wait for precommit // we're going to roll right into new height - <-newRoundCh + ensureNewRound(newRoundCh, height+1, 0) validateLastPrecommit(t, cs, vss[0], propBlockHash) } @@ -284,11 +278,11 @@ func TestStateFullRoundNil(t *testing.T) { cs.enterPrevote(height, round) cs.startRoutines(4) - <-voteCh // prevote - <-voteCh // precommit + ensurePrevote(voteCh, height, round) // prevote + ensurePrecommit(voteCh, height, round) // precommit // should prevote and precommit nil - validatePrevoteAndPrecommit(t, cs, round, 0, vss[0], nil, nil) + validatePrevoteAndPrecommit(t, cs, round, -1, vss[0], nil, nil) } // run through propose, prevote, precommit commit with two validators @@ -304,29 +298,28 @@ func TestStateFullRound2(t *testing.T) { // start round and wait for propose and prevote startTestRound(cs1, height, round) - <-voteCh // prevote + ensurePrevote(voteCh, height, round) // prevote // we should be stuck in limbo waiting for more prevotes rs := cs1.GetRoundState() propBlockHash, propPartsHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header() // prevote arrives from vs2: - signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propPartsHeader, vs2) - <-voteCh - - <-voteCh //precommit + signAddVotes(cs1, types.PrevoteType, propBlockHash, propPartsHeader, vs2) + ensurePrevote(voteCh, height, round) // prevote + ensurePrecommit(voteCh, height, round) //precommit // the proposed block should now be locked and our precommit added validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash) // we should be stuck in limbo waiting for more precommits // precommit arrives from vs2: - signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propPartsHeader, vs2) - <-voteCh + signAddVotes(cs1, types.PrecommitType, propBlockHash, propPartsHeader, vs2) + ensurePrecommit(voteCh, height, round) // wait to finish commit, propose in next height - <-newBlockCh + ensureNewBlock(newBlockCh, height) } //------------------------------------------------------------------------------------------ @@ -337,9 +330,9 @@ func TestStateFullRound2(t *testing.T) { func TestStateLockNoPOL(t *testing.T) { cs1, vss := randConsensusState(2) vs2 := vss[1] - height := cs1.Height + height, round := cs1.Height, cs1.Round - partSize := cs1.state.ConsensusParams.BlockPartSizeBytes + partSize := types.BlockPartSizeBytes timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) @@ -352,41 +345,43 @@ func TestStateLockNoPOL(t *testing.T) { */ // start round and wait for prevote - cs1.enterNewRound(height, 0) + cs1.enterNewRound(height, round) cs1.startRoutines(0) - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - theBlockHash := rs.ProposalBlock.Hash() + ensureNewRound(newRoundCh, height, round) - <-voteCh // prevote + ensureNewProposal(proposalCh, height, round) + roundState := cs1.GetRoundState() + theBlockHash := roundState.ProposalBlock.Hash() + thePartSetHeader := roundState.ProposalBlockParts.Header() + + ensurePrevote(voteCh, height, round) // prevote // we should now be stuck in limbo forever, waiting for more prevotes // prevote arrives from vs2: - signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2) - <-voteCh // prevote - - <-voteCh // precommit + signAddVotes(cs1, types.PrevoteType, theBlockHash, thePartSetHeader, vs2) + ensurePrevote(voteCh, height, round) // prevote + ensurePrecommit(voteCh, height, round) // precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // we should now be stuck in limbo forever, waiting for more precommits // lets add one for a different block - // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round hash := make([]byte, len(theBlockHash)) copy(hash, theBlockHash) hash[0] = byte((hash[0] + 1) % 255) - signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh // precommit + signAddVotes(cs1, types.PrecommitType, hash, thePartSetHeader, vs2) + ensurePrecommit(voteCh, height, round) // precommit // (note we're entering precommit for a second time this round) // but with invalid args. then we enterPrecommitWait, and the timeout to new round - <-timeoutWaitCh + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) /// - <-newRoundCh + round = round + 1 // moving to the next round + ensureNewRound(newRoundCh, height, round) t.Log("#### ONTO ROUND 1") /* Round2 (cs1, B) // B B2 @@ -395,43 +390,42 @@ func TestStateLockNoPOL(t *testing.T) { incrementRound(vs2) // now we're on a new round and not the proposer, so wait for timeout - re = <-timeoutProposeCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + + rs := cs1.GetRoundState() if rs.ProposalBlock != nil { panic("Expected proposal block to be nil") } // wait to finish prevote - <-voteCh - + ensurePrevote(voteCh, height, round) // we should have prevoted our locked block - validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash()) + validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) // add a conflicting prevote from the other validator - signAddVotes(cs1, types.VoteTypePrevote, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh + signAddVotes(cs1, types.PrevoteType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) + ensurePrevote(voteCh, height, round) // now we're going to enter prevote again, but with invalid args // and then prevote wait, which should timeout. then wait for precommit - <-timeoutWaitCh - - <-voteCh // precommit + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensurePrecommit(voteCh, height, round) // precommit // the proposed block should still be locked and our precommit added // we should precommit nil and be locked on the proposal - validatePrecommit(t, cs1, 1, 0, vss[0], nil, theBlockHash) + validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // add conflicting precommit from vs2 - // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round - signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh + signAddVotes(cs1, types.PrecommitType, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) + ensurePrecommit(voteCh, height, round) // (note we're entering precommit for a second time this round, but with invalid args // then we enterPrecommitWait and timeout into NewRound - <-timeoutWaitCh + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) - <-newRoundCh + round = round + 1 // entering new round + ensureNewRound(newRoundCh, height, round) t.Log("#### ONTO ROUND 2") /* Round3 (vs2, _) // B, B2 @@ -439,40 +433,41 @@ func TestStateLockNoPOL(t *testing.T) { incrementRound(vs2) - re = <-proposalCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + ensureNewProposal(proposalCh, height, round) + rs = cs1.GetRoundState() // now we're on a new round and are the proposer if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) { - panic(cmn.Fmt("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock)) + panic(fmt.Sprintf("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock)) } - <-voteCh // prevote - - validatePrevote(t, cs1, 2, vss[0], rs.LockedBlock.Hash()) + ensurePrevote(voteCh, height, round) // prevote + validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) - signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh + signAddVotes(cs1, types.PrevoteType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) + ensurePrevote(voteCh, height, round) - <-timeoutWaitCh // prevote wait - <-voteCh // precommit + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensurePrecommit(voteCh, height, round) // precommit - validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal + validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal - signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height - <-voteCh + signAddVotes(cs1, types.PrecommitType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height + ensurePrecommit(voteCh, height, round) - <-timeoutWaitCh + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + cs2, _ := randConsensusState(2) // needed so generated block is different than locked block // before we time out into new round, set next proposal block - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) + prop, propBlock := decideProposal(cs2, vs2, vs2.Height, vs2.Round+1) if prop == nil || propBlock == nil { t.Fatal("Failed to create proposal block with vs2") } incrementRound(vs2) - <-newRoundCh + round = round + 1 // entering new round + ensureNewRound(newRoundCh, height, round) t.Log("#### ONTO ROUND 3") /* Round4 (vs2, C) // B C // B C @@ -484,35 +479,34 @@ func TestStateLockNoPOL(t *testing.T) { t.Fatal(err) } - <-proposalCh - <-voteCh // prevote - + ensureNewProposal(proposalCh, height, round) + ensurePrevote(voteCh, height, round) // prevote // prevote for locked block (not proposal) - validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash()) + validatePrevote(t, cs1, 3, vss[0], cs1.LockedBlock.Hash()) - signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh - - <-timeoutWaitCh - <-voteCh + // prevote for proposed block + signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) + ensurePrevote(voteCh, height, round) - validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal - signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height - <-voteCh + signAddVotes(cs1, types.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height + ensurePrecommit(voteCh, height, round) } // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka func TestStateLockPOLRelock(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round - partSize := cs1.state.ConsensusParams.BlockPartSizeBytes + partSize := types.BlockPartSizeBytes - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) @@ -525,28 +519,25 @@ func TestStateLockPOLRelock(t *testing.T) { */ // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) + startTestRound(cs1, height, round) - <-newRoundCh - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + ensureNewRound(newRoundCh, height, round) + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() theBlockHash := rs.ProposalBlock.Hash() + theBlockParts := rs.ProposalBlockParts.Header() - <-voteCh // prevote + ensurePrevote(voteCh, height, round) // prevote - signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4) - // prevotes - discardFromChan(voteCh, 3) + signAddVotes(cs1, types.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) - <-voteCh // our precommit + ensurePrecommit(voteCh, height, round) // our precommit // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits from the rest - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3) - // precommites - discardFromChan(voteCh, 3) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) // before we timeout to the new round set the new proposal prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) @@ -556,14 +547,15 @@ func TestStateLockPOLRelock(t *testing.T) { incrementRound(vs2, vs3, vs4) // timeout to new round - <-timeoutWaitCh + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + round = round + 1 // moving to the next round //XXX: this isnt guaranteed to get there before the timeoutPropose ... if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - <-newRoundCh + ensureNewRound(newRoundCh, height, round) t.Log("### ONTO ROUND 1") /* @@ -574,58 +566,34 @@ func TestStateLockPOLRelock(t *testing.T) { // now we're on a new round and not the proposer // but we should receive the proposal - select { - case <-proposalCh: - case <-timeoutProposeCh: - <-proposalCh - } + ensureNewProposal(proposalCh, height, round) // go to prevote, prevote for locked block (not proposal), move on - <-voteCh - validatePrevote(t, cs1, 0, vss[0], theBlockHash) + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], theBlockHash) // now lets add prevotes from everyone else for the new block - signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - // prevotes - discardFromChan(voteCh, 3) - - // now either we go to PrevoteWait or Precommit - select { - case <-timeoutWaitCh: // we're in PrevoteWait, go to Precommit - // XXX: there's no guarantee we see the polka, this might be a precommit for nil, - // in which case the test fails! - <-voteCh - case <-voteCh: // we went straight to Precommit - } + signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + ensurePrecommit(voteCh, height, round) // we should have unlocked and locked on the new block - validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash) - - signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propBlockParts.Header(), vs2, vs3) - discardFromChan(voteCh, 2) + validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) - be := <-newBlockCh - b := be.(types.EventDataNewBlockHeader) - re = <-newRoundCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - if rs.Height != 2 { - panic("Expected height to increment") - } + signAddVotes(cs1, types.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3) + ensureNewBlockHeader(newBlockCh, height, propBlockHash) - if !bytes.Equal(b.Header.Hash(), propBlockHash) { - panic("Expected new block to be proposal block") - } + ensureNewRound(newRoundCh, height+1, 0) } // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka func TestStateLockPOLUnlock(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round - partSize := cs1.state.ConsensusParams.BlockPartSizeBytes + partSize := types.BlockPartSizeBytes proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) @@ -640,75 +608,71 @@ func TestStateLockPOLUnlock(t *testing.T) { */ // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - theBlockHash := rs.ProposalBlock.Hash() + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) - <-voteCh // prevote + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() + theBlockHash := rs.ProposalBlock.Hash() + theBlockParts := rs.ProposalBlockParts.Header() - signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4) + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], theBlockHash) - <-voteCh //precommit + signAddVotes(cs1, types.PrevoteType, theBlockHash, theBlockParts, vs2, vs3, vs4) + ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) rs = cs1.GetRoundState() // add precommits from the rest - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(cs1, types.PrecommitType, theBlockHash, theBlockParts, vs3) // before we time out into new round, set next proposal block prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) propBlockParts := propBlock.MakePartSet(partSize) - incrementRound(vs2, vs3, vs4) - // timeout to new round - re = <-timeoutWaitCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + rs = cs1.GetRoundState() lockedBlockHash := rs.LockedBlock.Hash() - //XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } + incrementRound(vs2, vs3, vs4) + round = round + 1 // moving to the next round - <-newRoundCh + ensureNewRound(newRoundCh, height, round) t.Log("#### ONTO ROUND 1") /* Round2 (vs2, C) // B nil nil nil // nil nil nil _ cs1 unlocks! */ - - // now we're on a new round and not the proposer, - // but we should receive the proposal - select { - case <-proposalCh: - case <-timeoutProposeCh: - <-proposalCh + //XXX: this isnt guaranteed to get there before the timeoutPropose ... + if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) } + ensureNewProposal(proposalCh, height, round) + // go to prevote, prevote for locked block (not proposal) - <-voteCh - validatePrevote(t, cs1, 0, vss[0], lockedBlockHash) + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], lockedBlockHash) // now lets add prevotes from everyone else for nil (a polka!) - signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) // the polka makes us unlock and precommit nil - <-unlockCh - <-voteCh // precommit + ensureNewUnlock(unlockCh, height, round) + ensurePrecommit(voteCh, height, round) // we should have unlocked and committed nil - // NOTE: since we don't relock on nil, the lock round is 0 - validatePrecommit(t, cs1, 1, 0, vss[0], nil, nil) + // NOTE: since we don't relock on nil, the lock round is -1 + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3) - <-newRoundCh + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3) + ensureNewRound(newRoundCh, height, round+1) } // 4 vals @@ -718,8 +682,9 @@ func TestStateLockPOLUnlock(t *testing.T) { func TestStateLockPOLSafety1(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round - partSize := cs1.state.ConsensusParams.BlockPartSizeBytes + partSize := types.BlockPartSizeBytes proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) @@ -728,32 +693,29 @@ func TestStateLockPOLSafety1(t *testing.T) { voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - propBlock := rs.ProposalBlock + startTestRound(cs1, cs1.Height, round) + ensureNewRound(newRoundCh, height, round) - <-voteCh // prevote + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() + propBlock := rs.ProposalBlock - validatePrevote(t, cs1, 0, vss[0], propBlock.Hash()) + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], propBlock.Hash()) // the others sign a polka but we don't see it - prevotes := signVotes(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) - - // before we time out into new round, set next proposer - // and next proposal block - /* - _, v1 := cs1.Validators.GetByAddress(vss[0].Address) - v1.VotingPower = 1 - if updated := cs1.Validators.Update(v1); !updated { - panic("failed to update validator") - }*/ + prevotes := signVotes(types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash())) // we do see them precommit nil - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + // cs1 precommit nil + ensurePrecommit(voteCh, height, round) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + + t.Log("### ONTO ROUND 1") prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) propBlockHash := propBlock.Hash() @@ -761,51 +723,46 @@ func TestStateLockPOLSafety1(t *testing.T) { incrementRound(vs2, vs3, vs4) + round = round + 1 // moving to the next round + ensureNewRound(newRoundCh, height, round) + //XXX: this isnt guaranteed to get there before the timeoutPropose ... if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { t.Fatal(err) } - - <-newRoundCh - t.Log("### ONTO ROUND 1") /*Round2 // we timeout and prevote our lock // a polka happened but we didn't see it! */ - // now we're on a new round and not the proposer, - // but we should receive the proposal - select { - case re = <-proposalCh: - case <-timeoutProposeCh: - re = <-proposalCh - } + ensureNewProposal(proposalCh, height, round) - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + rs = cs1.GetRoundState() if rs.LockedBlock != nil { panic("we should not be locked!") } t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash)) + // go to prevote, prevote for proposal block - <-voteCh - validatePrevote(t, cs1, 1, vss[0], propBlockHash) + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], propBlockHash) // now we see the others prevote for it, so we should lock on it - signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - - <-voteCh // precommit + signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + ensurePrecommit(voteCh, height, round) // we should have precommitted - validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash) + validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) - <-timeoutWaitCh + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) incrementRound(vs2, vs3, vs4) + round = round + 1 // moving to the next round - <-newRoundCh + ensureNewRound(newRoundCh, height, round) t.Log("### ONTO ROUND 2") /*Round3 @@ -813,22 +770,22 @@ func TestStateLockPOLSafety1(t *testing.T) { */ // timeout of propose - <-timeoutProposeCh + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) // finish prevote - <-voteCh - + ensurePrevote(voteCh, height, round) // we should prevote what we're locked on - validatePrevote(t, cs1, 2, vss[0], propBlockHash) + validatePrevote(t, cs1, round, vss[0], propBlockHash) newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep) + // before prevotes from the previous round are added // add prevotes from the earlier round addVotes(cs1, prevotes...) t.Log("Done adding prevotes!") - ensureNoNewStep(newStepCh) + ensureNoNewRoundStep(newStepCh) } // 4 vals. @@ -841,11 +798,11 @@ func TestStateLockPOLSafety1(t *testing.T) { func TestStateLockPOLSafety2(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round - partSize := cs1.state.ConsensusParams.BlockPartSizeBytes + partSize := types.BlockPartSizeBytes proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) @@ -853,53 +810,53 @@ func TestStateLockPOLSafety2(t *testing.T) { // the block for R0: gets polkad but we miss it // (even though we signed it, shhh) - _, propBlock0 := decideProposal(cs1, vss[0], cs1.Height, cs1.Round) + _, propBlock0 := decideProposal(cs1, vss[0], height, round) propBlockHash0 := propBlock0.Hash() propBlockParts0 := propBlock0.MakePartSet(partSize) + propBlockID0 := types.BlockID{propBlockHash0, propBlockParts0.Header()} // the others sign a polka but we don't see it - prevotes := signVotes(types.VoteTypePrevote, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4) + prevotes := signVotes(types.PrevoteType, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4) // the block for round 1 prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) propBlockHash1 := propBlock1.Hash() propBlockParts1 := propBlock1.MakePartSet(partSize) - propBlockID1 := types.BlockID{propBlockHash1, propBlockParts1.Header()} incrementRound(vs2, vs3, vs4) - cs1.updateRoundStep(0, cstypes.RoundStepPrecommitWait) - + round = round + 1 // moving to the next round t.Log("### ONTO Round 1") // jump in at round 1 - height := cs1.Height - startTestRound(cs1, height, 1) - <-newRoundCh + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil { t.Fatal(err) } - <-proposalCh + ensureNewProposal(proposalCh, height, round) - <-voteCh // prevote + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], propBlockHash1) - signAddVotes(cs1, types.VoteTypePrevote, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) + signAddVotes(cs1, types.PrevoteType, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) - <-voteCh // precommit + ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash1, propBlockHash1) + validatePrecommit(t, cs1, round, round, vss[0], propBlockHash1, propBlockHash1) // add precommits from the rest - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash1, propBlockParts1.Header(), vs3) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(cs1, types.PrecommitType, propBlockHash1, propBlockParts1.Header(), vs3) incrementRound(vs2, vs3, vs4) // timeout of precommit wait to new round - <-timeoutWaitCh + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + round = round + 1 // moving to the next round // in round 2 we see the polkad block from round 0 - newProp := types.NewProposal(height, 2, propBlockParts0.Header(), 0, propBlockID1) + newProp := types.NewProposal(height, round, 0, propBlockID0) if err := vs3.SignProposal(config.ChainID(), newProp); err != nil { t.Fatal(err) } @@ -910,26 +867,404 @@ func TestStateLockPOLSafety2(t *testing.T) { // Add the pol votes addVotes(cs1, prevotes...) - <-newRoundCh + ensureNewRound(newRoundCh, height, round) t.Log("### ONTO Round 2") /*Round2 // now we see the polka from round 1, but we shouldnt unlock */ + ensureNewProposal(proposalCh, height, round) - select { - case <-timeoutProposeCh: - <-proposalCh - case <-proposalCh: + ensureNoNewUnlock(unlockCh) + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], propBlockHash1) + +} + +// 4 vals. +// polka P0 at R0 for B0. We lock B0 on P0 at R0. P0 unlocks value at R1. + +// What we want: +// P0 proposes B0 at R3. +func TestProposeValidBlock(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + partSize := types.BlockPartSizeBytes + + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // start round and wait for propose and prevote + startTestRound(cs1, cs1.Height, round) + ensureNewRound(newRoundCh, height, round) + + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() + propBlock := rs.ProposalBlock + propBlockHash := propBlock.Hash() + + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], propBlockHash) + + // the others sign a polka + signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) + + ensurePrecommit(voteCh, height, round) + // we should have precommitted + validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) + + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + + incrementRound(vs2, vs3, vs4) + round = round + 1 // moving to the next round + + ensureNewRound(newRoundCh, height, round) + + t.Log("### ONTO ROUND 2") + + // timeout of propose + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], propBlockHash) + + signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + ensureNewUnlock(unlockCh, height, round) + + ensurePrecommit(voteCh, height, round) + // we should have precommitted + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + + incrementRound(vs2, vs3, vs4) + incrementRound(vs2, vs3, vs4) + + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + round = round + 2 // moving to the next round + + ensureNewRound(newRoundCh, height, round) + t.Log("### ONTO ROUND 3") + + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + + round = round + 1 // moving to the next round + + ensureNewRound(newRoundCh, height, round) + + t.Log("### ONTO ROUND 4") + + ensureNewProposal(proposalCh, height, round) + + rs = cs1.GetRoundState() + assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), propBlockHash)) + assert.True(t, bytes.Equal(rs.ProposalBlock.Hash(), rs.ValidBlock.Hash())) + assert.True(t, rs.Proposal.POLRound == rs.ValidRound) + assert.True(t, bytes.Equal(rs.Proposal.BlockID.Hash, rs.ValidBlock.Hash())) +} + +// What we want: +// P0 miss to lock B but set valid block to B after receiving delayed prevote. +func TestSetValidBlockOnDelayedPrevote(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + partSize := types.BlockPartSizeBytes + + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // start round and wait for propose and prevote + startTestRound(cs1, cs1.Height, round) + ensureNewRound(newRoundCh, height, round) + + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() + propBlock := rs.ProposalBlock + propBlockHash := propBlock.Hash() + propBlockParts := propBlock.MakePartSet(partSize) + + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], propBlockHash) + + // vs2 send prevote for propBlock + signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2) + + // vs3 send prevote nil + signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs3) + + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + + ensurePrecommit(voteCh, height, round) + // we should have precommitted + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + + rs = cs1.GetRoundState() + + assert.True(t, rs.ValidBlock == nil) + assert.True(t, rs.ValidBlockParts == nil) + assert.True(t, rs.ValidRound == -1) + + // vs2 send (delayed) prevote for propBlock + signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs4) + + ensureNewValidBlock(validBlockCh, height, round) + + rs = cs1.GetRoundState() + + assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), propBlockHash)) + assert.True(t, rs.ValidBlockParts.Header().Equals(propBlockParts.Header())) + assert.True(t, rs.ValidRound == round) +} + +// What we want: +// P0 miss to lock B as Proposal Block is missing, but set valid block to B after +// receiving delayed Block Proposal. +func TestSetValidBlockOnDelayedProposal(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + partSize := types.BlockPartSizeBytes + + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + + round = round + 1 // move to round in which P0 is not proposer + incrementRound(vs2, vs3, vs4) + + startTestRound(cs1, cs1.Height, round) + ensureNewRound(newRoundCh, height, round) + + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], nil) + + prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) + propBlockHash := propBlock.Hash() + propBlockParts := propBlock.MakePartSet(partSize) + + // vs2, vs3 and vs4 send prevote for propBlock + signAddVotes(cs1, types.PrevoteType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + ensureNewValidBlock(validBlockCh, height, round) + + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrevote.Nanoseconds()) + + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + + if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) } - select { - case <-unlockCh: - panic("validator unlocked using an old polka") - case <-voteCh: - // prevote our locked block + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() + + assert.True(t, bytes.Equal(rs.ValidBlock.Hash(), propBlockHash)) + assert.True(t, rs.ValidBlockParts.Header().Equals(propBlockParts.Header())) + assert.True(t, rs.ValidRound == round) +} + +// 4 vals, 3 Nil Precommits at P0 +// What we want: +// P0 waits for timeoutPrecommit before starting next round +func TestWaitingTimeoutOnNilPolka(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + + // start round + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) + + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + ensureNewRound(newRoundCh, height, round+1) +} + +// 4 vals, 3 Prevotes for nil from the higher round. +// What we want: +// P0 waits for timeoutPropose in the next round before entering prevote +func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // start round + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) + + ensurePrevote(voteCh, height, round) + + incrementRound(vss[1:]...) + signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + round = round + 1 // moving to the next round + ensureNewRound(newRoundCh, height, round) + + rs := cs1.GetRoundState() + assert.True(t, rs.Step == cstypes.RoundStepPropose) // P0 does not prevote before timeoutPropose expires + + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], nil) +} + +// 4 vals, 3 Precommits for nil from the higher round. +// What we want: +// P0 jump to higher round, precommit and start precommit wait +func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, cs1.Round + + timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // start round + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) + + ensurePrevote(voteCh, height, round) + + incrementRound(vss[1:]...) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + round = round + 1 // moving to the next round + ensureNewRound(newRoundCh, height, round) + + ensurePrecommit(voteCh, height, round) + validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) + + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + + round = round + 1 // moving to the next round + ensureNewRound(newRoundCh, height, round) +} + +// 4 vals, 3 Prevotes for nil in the current round. +// What we want: +// P0 wait for timeoutPropose to expire before sending prevote. +func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, 1 + + timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) + + // start round in which PO is not proposer + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) + + incrementRound(vss[1:]...) + signAddVotes(cs1, types.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + + ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.TimeoutPropose.Nanoseconds()) + + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], nil) +} + +// What we want: +// P0 emit NewValidBlock event upon receiving 2/3+ Precommit for B but hasn't received block B yet +func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, 1 + + incrementRound(vs2, vs3, vs4) + + partSize := types.BlockPartSizeBytes + + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + + _, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round) + propBlockHash := propBlock.Hash() + propBlockParts := propBlock.MakePartSet(partSize) + + // start round in which PO is not proposer + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) + + // vs2, vs3 and vs4 send precommit for propBlock + signAddVotes(cs1, types.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + ensureNewValidBlock(validBlockCh, height, round) + + rs := cs1.GetRoundState() + assert.True(t, rs.Step == cstypes.RoundStepCommit) + assert.True(t, rs.ProposalBlock == nil) + assert.True(t, rs.ProposalBlockParts.Header().Equals(propBlockParts.Header())) + +} + +// What we want: +// P0 receives 2/3+ Precommit for B for round 0, while being in round 1. It emits NewValidBlock event. +// After receiving block, it executes block and moves to the next height. +func TestCommitFromPreviousRound(t *testing.T) { + cs1, vss := randConsensusState(4) + vs2, vs3, vs4 := vss[1], vss[2], vss[3] + height, round := cs1.Height, 1 + + partSize := types.BlockPartSizeBytes + + newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + + prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round) + propBlockHash := propBlock.Hash() + propBlockParts := propBlock.MakePartSet(partSize) + + // start round in which PO is not proposer + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) + + // vs2, vs3 and vs4 send precommit for propBlock for the previous round + signAddVotes(cs1, types.PrecommitType, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) + + ensureNewValidBlock(validBlockCh, height, round) + + rs := cs1.GetRoundState() + assert.True(t, rs.Step == cstypes.RoundStepCommit) + assert.True(t, rs.CommitRound == vs2.Round) + assert.True(t, rs.ProposalBlock == nil) + assert.True(t, rs.ProposalBlockParts.Header().Equals(propBlockParts.Header())) + + if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { + t.Fatal(err) } - validatePrevote(t, cs1, 2, vss[0], propBlockHash1) + ensureNewProposal(proposalCh, height, round) + ensureNewRound(newRoundCh, height+1, 0) } //------------------------------------------------------------------------------------------ @@ -959,7 +1294,7 @@ func TestStateSlashingPrevotes(t *testing.T) { // add one for a different block should cause us to go into prevote wait hash := rs.ProposalBlock.Hash() hash[0] = byte(hash[0]+1) % 255 - signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, types.PrevoteType, hash, rs.ProposalBlockParts.Header(), vs2) <-timeoutWaitCh @@ -967,7 +1302,7 @@ func TestStateSlashingPrevotes(t *testing.T) { // away and ignore more prevotes (and thus fail to slash!) // add the conflicting vote - signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, types.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) // XXX: Check for existence of Dupeout info } @@ -989,7 +1324,7 @@ func TestStateSlashingPrecommits(t *testing.T) { <-voteCh // prevote // add prevote from vs2 - signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, types.PrevoteType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) <-voteCh // precommit @@ -997,13 +1332,13 @@ func TestStateSlashingPrecommits(t *testing.T) { // add one for a different block should cause us to go into prevote wait hash := rs.ProposalBlock.Hash() hash[0] = byte(hash[0]+1) % 255 - signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, types.PrecommitType, hash, rs.ProposalBlockParts.Header(), vs2) // NOTE: we have to send the vote for different block first so we don't just go into precommit round right // away and ignore more prevotes (and thus fail to slash!) // add precommit from vs2 - signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) + signAddVotes(cs1, types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) // XXX: Check for existence of Dupeout info } @@ -1020,8 +1355,8 @@ func TestStateSlashingPrecommits(t *testing.T) { func TestStateHalt1(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - - partSize := cs1.state.ConsensusParams.BlockPartSizeBytes + height, round := cs1.Height, cs1.Round + partSize := types.BlockPartSizeBytes proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) @@ -1030,33 +1365,37 @@ func TestStateHalt1(t *testing.T) { voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + startTestRound(cs1, height, round) + ensureNewRound(newRoundCh, height, round) + + ensureNewProposal(proposalCh, height, round) + rs := cs1.GetRoundState() propBlock := rs.ProposalBlock propBlockParts := propBlock.MakePartSet(partSize) - <-voteCh // prevote + ensurePrevote(voteCh, height, round) - signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlockParts.Header(), vs3, vs4) - <-voteCh // precommit + signAddVotes(cs1, types.PrevoteType, propBlock.Hash(), propBlockParts.Header(), vs2, vs3, vs4) + ensurePrecommit(voteCh, height, round) // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], propBlock.Hash(), propBlock.Hash()) + validatePrecommit(t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) // add precommits from the rest - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2) // didnt receive proposal - signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header(), vs3) + signAddVotes(cs1, types.PrecommitType, nil, types.PartSetHeader{}, vs2) // didnt receive proposal + signAddVotes(cs1, types.PrecommitType, propBlock.Hash(), propBlockParts.Header(), vs3) // we receive this later, but vs3 might receive it earlier and with ours will go to commit! - precommit4 := signVote(vs4, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header()) + precommit4 := signVote(vs4, types.PrecommitType, propBlock.Hash(), propBlockParts.Header()) incrementRound(vs2, vs3, vs4) // timeout to new round - <-timeoutWaitCh - re = <-newRoundCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.TimeoutPrecommit.Nanoseconds()) + + round = round + 1 // moving to the next round + + ensureNewRound(newRoundCh, height, round) + rs = cs1.GetRoundState() t.Log("### ONTO ROUND 1") /*Round2 @@ -1065,20 +1404,90 @@ func TestStateHalt1(t *testing.T) { */ // go to prevote, prevote for locked block - <-voteCh // prevote - validatePrevote(t, cs1, 0, vss[0], rs.LockedBlock.Hash()) + ensurePrevote(voteCh, height, round) + validatePrevote(t, cs1, round, vss[0], rs.LockedBlock.Hash()) // now we receive the precommit from the previous round addVotes(cs1, precommit4) // receiving that precommit should take us straight to commit - <-newBlockCh - re = <-newRoundCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) + ensureNewBlock(newBlockCh, height) + + ensureNewRound(newRoundCh, height+1, 0) +} - if rs.Height != 2 { - panic("expected height to increment") +func TestStateOutputsBlockPartsStats(t *testing.T) { + // create dummy peer + cs, _ := randConsensusState(1) + peer := p2pdummy.NewPeer() + + // 1) new block part + parts := types.NewPartSetFromData(cmn.RandBytes(100), 10) + msg := &BlockPartMessage{ + Height: 1, + Round: 0, + Part: parts.GetPart(0), + } + + cs.ProposalBlockParts = types.NewPartSetFromHeader(parts.Header()) + cs.handleMsg(msgInfo{msg, peer.ID()}) + + statsMessage := <-cs.statsMsgQueue + require.Equal(t, msg, statsMessage.Msg, "") + require.Equal(t, peer.ID(), statsMessage.PeerID, "") + + // sending the same part from different peer + cs.handleMsg(msgInfo{msg, "peer2"}) + + // sending the part with the same height, but different round + msg.Round = 1 + cs.handleMsg(msgInfo{msg, peer.ID()}) + + // sending the part from the smaller height + msg.Height = 0 + cs.handleMsg(msgInfo{msg, peer.ID()}) + + // sending the part from the bigger height + msg.Height = 3 + cs.handleMsg(msgInfo{msg, peer.ID()}) + + select { + case <-cs.statsMsgQueue: + t.Errorf("Should not output stats message after receiving the known block part!") + case <-time.After(50 * time.Millisecond): + } + +} + +func TestStateOutputVoteStats(t *testing.T) { + cs, vss := randConsensusState(2) + // create dummy peer + peer := p2pdummy.NewPeer() + + vote := signVote(vss[1], types.PrecommitType, []byte("test"), types.PartSetHeader{}) + + voteMessage := &VoteMessage{vote} + cs.handleMsg(msgInfo{voteMessage, peer.ID()}) + + statsMessage := <-cs.statsMsgQueue + require.Equal(t, voteMessage, statsMessage.Msg, "") + require.Equal(t, peer.ID(), statsMessage.PeerID, "") + + // sending the same part from different peer + cs.handleMsg(msgInfo{&VoteMessage{vote}, "peer2"}) + + // sending the vote for the bigger height + incrementHeight(vss[1]) + vote = signVote(vss[1], types.PrecommitType, []byte("test"), types.PartSetHeader{}) + + cs.handleMsg(msgInfo{&VoteMessage{vote}, peer.ID()}) + + select { + case <-cs.statsMsgQueue: + t.Errorf("Should not output stats message after receiving the known vote or vote from bigger height") + case <-time.After(50 * time.Millisecond): } + } // subscribe subscribes test client to the given query and returns a channel with cap = 1. @@ -1090,10 +1499,3 @@ func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan interface{} { } return out } - -// discardFromChan reads n values from the channel. -func discardFromChan(ch <-chan interface{}, n int) { - for i := 0; i < n; i++ { - <-ch - } -} diff --git a/consensus/types/height_vote_set.go b/consensus/types/height_vote_set.go index 70a38668ff9..eee013eeab8 100644 --- a/consensus/types/height_vote_set.go +++ b/consensus/types/height_vote_set.go @@ -6,9 +6,9 @@ import ( "strings" "sync" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) type RoundVoteSet struct { @@ -99,8 +99,8 @@ func (hvs *HeightVoteSet) addRound(round int) { cmn.PanicSanity("addRound() for an existing round") } // log.Debug("addRound(round)", "round", round) - prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.VoteTypePrevote, hvs.valSet) - precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, types.VoteTypePrecommit, hvs.valSet) + prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrevoteType, hvs.valSet) + precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrecommitType, hvs.valSet) hvs.roundVoteSets[round] = RoundVoteSet{ Prevotes: prevotes, Precommits: precommits, @@ -134,13 +134,13 @@ func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, func (hvs *HeightVoteSet) Prevotes(round int) *types.VoteSet { hvs.mtx.Lock() defer hvs.mtx.Unlock() - return hvs.getVoteSet(round, types.VoteTypePrevote) + return hvs.getVoteSet(round, types.PrevoteType) } func (hvs *HeightVoteSet) Precommits(round int) *types.VoteSet { hvs.mtx.Lock() defer hvs.mtx.Unlock() - return hvs.getVoteSet(round, types.VoteTypePrecommit) + return hvs.getVoteSet(round, types.PrecommitType) } // Last round and blockID that has +2/3 prevotes for a particular block or nil. @@ -149,7 +149,7 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int, polBlockID types.BlockID) { hvs.mtx.Lock() defer hvs.mtx.Unlock() for r := hvs.round; r >= 0; r-- { - rvs := hvs.getVoteSet(r, types.VoteTypePrevote) + rvs := hvs.getVoteSet(r, types.PrevoteType) polBlockID, ok := rvs.TwoThirdsMajority() if ok { return r, polBlockID @@ -158,18 +158,18 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int, polBlockID types.BlockID) { return -1, types.BlockID{} } -func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet { +func (hvs *HeightVoteSet) getVoteSet(round int, type_ types.SignedMsgType) *types.VoteSet { rvs, ok := hvs.roundVoteSets[round] if !ok { return nil } switch type_ { - case types.VoteTypePrevote: + case types.PrevoteType: return rvs.Prevotes - case types.VoteTypePrecommit: + case types.PrecommitType: return rvs.Precommits default: - cmn.PanicSanity(cmn.Fmt("Unexpected vote type %X", type_)) + cmn.PanicSanity(fmt.Sprintf("Unexpected vote type %X", type_)) return nil } } @@ -178,7 +178,7 @@ func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet { // NOTE: if there are too many peers, or too much peer churn, // this can cause memory issues. // TODO: implement ability to remove peers too -func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ byte, peerID p2p.ID, blockID types.BlockID) error { +func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ types.SignedMsgType, peerID p2p.ID, blockID types.BlockID) error { hvs.mtx.Lock() defer hvs.mtx.Unlock() if !types.IsVoteTypeValid(type_) { @@ -219,7 +219,7 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string { voteSetString = roundVoteSet.Precommits.StringShort() vsStrings = append(vsStrings, voteSetString) } - return cmn.Fmt(`HeightVoteSet{H:%v R:0~%v + return fmt.Sprintf(`HeightVoteSet{H:%v R:0~%v %s %v %s}`, hvs.height, hvs.round, diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index 0de656000cf..e2298cef941 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -1,12 +1,12 @@ package types import ( + "fmt" "testing" - "time" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" ) var config *cfg.Config // NOTE: must be reset for each _test.go file @@ -55,14 +55,14 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivVali ValidatorIndex: valIndex, Height: height, Round: round, - Timestamp: time.Now().UTC(), - Type: types.VoteTypePrecommit, + Timestamp: tmtime.Now(), + Type: types.PrecommitType, BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}}, } chainID := config.ChainID() err := privVal.SignVote(chainID, vote) if err != nil { - panic(cmn.Fmt("Error signing vote: %v", err)) + panic(fmt.Sprintf("Error signing vote: %v", err)) return nil } return vote diff --git a/consensus/types/peer_round_state.go b/consensus/types/peer_round_state.go index 7a5d69b8eb9..e42395bc3aa 100644 --- a/consensus/types/peer_round_state.go +++ b/consensus/types/peer_round_state.go @@ -4,8 +4,8 @@ import ( "fmt" "time" - "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/types" ) //----------------------------------------------------------------------------- diff --git a/consensus/types/round_state.go b/consensus/types/round_state.go index cca560ccf83..ef4236118bd 100644 --- a/consensus/types/round_state.go +++ b/consensus/types/round_state.go @@ -5,8 +5,8 @@ import ( "fmt" "time" - "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/types" ) //----------------------------------------------------------------------------- @@ -26,8 +26,15 @@ const ( RoundStepPrecommitWait = RoundStepType(0x07) // Did receive any +2/3 precommits, start timeout RoundStepCommit = RoundStepType(0x08) // Entered commit state machine // NOTE: RoundStepNewHeight acts as RoundStepCommitWait. + + // NOTE: Update IsValid method if you change this! ) +// IsValid returns true if the step is valid, false if unknown/undefined. +func (rs RoundStepType) IsValid() bool { + return uint8(rs) >= 0x01 && uint8(rs) <= 0x08 +} + // String returns a string func (rs RoundStepType) String() string { switch rs { @@ -107,8 +114,8 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple { // RoundStateEvent returns the H/R/S of the RoundState as an event. func (rs *RoundState) RoundStateEvent() types.EventDataRoundState { - // XXX: copy the RoundState - // if we want to avoid this, we may need synchronous events after all + // copy the RoundState. + // TODO: if we want to avoid this, we may need synchronous events after all rsCopy := *rs edrs := types.EventDataRoundState{ Height: rs.Height, diff --git a/consensus/types/round_state_test.go b/consensus/types/round_state_test.go index 0257ea2ff98..6a1c4533a36 100644 --- a/consensus/types/round_state_test.go +++ b/consensus/types/round_state_test.go @@ -2,12 +2,12 @@ package types import ( "testing" - "time" - amino "github.com/tendermint/go-amino" + "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) func BenchmarkRoundStateDeepCopy(b *testing.B) { @@ -23,11 +23,11 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { Hash: cmn.RandBytes(20), }, } - sig := make([]byte, ed25519.SignatureEd25519Size) + sig := make([]byte, ed25519.SignatureSize) for i := 0; i < nval; i++ { precommits[i] = &types.Vote{ ValidatorAddress: types.Address(cmn.RandBytes(20)), - Timestamp: time.Now(), + Timestamp: tmtime.Now(), BlockID: blockID, Signature: sig, } @@ -40,7 +40,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { block := &types.Block{ Header: types.Header{ ChainID: cmn.RandStr(12), - Time: time.Now(), + Time: tmtime.Now(), LastBlockID: blockID, LastCommitHash: cmn.RandBytes(20), DataHash: cmn.RandBytes(20), @@ -62,19 +62,16 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { parts := block.MakePartSet(4096) // Random Proposal proposal := &types.Proposal{ - Timestamp: time.Now(), - BlockPartsHeader: types.PartSetHeader{ - Hash: cmn.RandBytes(20), - }, - POLBlockID: blockID, - Signature: sig, + Timestamp: tmtime.Now(), + BlockID: blockID, + Signature: sig, } // Random HeightVoteSet // TODO: hvs := rs := &RoundState{ - StartTime: time.Now(), - CommitTime: time.Now(), + StartTime: tmtime.Now(), + CommitTime: tmtime.Now(), Validators: vset, Proposal: proposal, ProposalBlock: block, diff --git a/consensus/types/wire.go b/consensus/types/wire.go index db674816d34..e8a05b3554b 100644 --- a/consensus/types/wire.go +++ b/consensus/types/wire.go @@ -1,7 +1,7 @@ package types import ( - "github.com/tendermint/go-amino" + amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/types" ) diff --git a/consensus/version.go b/consensus/version.go deleted file mode 100644 index 5c74a16db37..00000000000 --- a/consensus/version.go +++ /dev/null @@ -1,13 +0,0 @@ -package consensus - -import ( - cmn "github.com/tendermint/tendermint/libs/common" -) - -// kind of arbitrary -var Spec = "1" // async -var Major = "0" // -var Minor = "2" // replay refactor -var Revision = "2" // validation -> commit - -var Version = cmn.Fmt("v%s/%s.%s.%s", Spec, Major, Minor, Revision) diff --git a/consensus/wal.go b/consensus/wal.go index 8c4c10bc7b0..bbc9908fbc3 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -11,13 +11,15 @@ import ( "github.com/pkg/errors" amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/types" auto "github.com/tendermint/tendermint/libs/autofile" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) const ( - // must be greater than params.BlockGossip.BlockPartSizeBytes + a few bytes + // must be greater than types.BlockPartSizeBytes + a few bytes maxMsgSizeBytes = 1024 * 1024 // 1MB ) @@ -72,13 +74,13 @@ type baseWAL struct { enc *WALEncoder } -func NewWAL(walFile string) (*baseWAL, error) { +func NewWAL(walFile string, groupOptions ...func(*auto.Group)) (*baseWAL, error) { err := cmn.EnsureDir(filepath.Dir(walFile), 0700) if err != nil { return nil, errors.Wrap(err, "failed to ensure WAL directory is in place") } - group, err := auto.OpenGroup(walFile) + group, err := auto.OpenGroup(walFile, groupOptions...) if err != nil { return nil, err } @@ -94,6 +96,11 @@ func (wal *baseWAL) Group() *auto.Group { return wal.group } +func (wal *baseWAL) SetLogger(l log.Logger) { + wal.BaseService.Logger = l + wal.group.SetLogger(l) +} + func (wal *baseWAL) OnStart() error { size, err := wal.group.Head.Size() if err != nil { @@ -119,8 +126,8 @@ func (wal *baseWAL) Write(msg WALMessage) { } // Write the wal message - if err := wal.enc.Encode(&TimedWALMessage{time.Now(), msg}); err != nil { - panic(cmn.Fmt("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg)) + if err := wal.enc.Encode(&TimedWALMessage{tmtime.Now(), msg}); err != nil { + panic(fmt.Sprintf("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg)) } } @@ -134,7 +141,7 @@ func (wal *baseWAL) WriteSync(msg WALMessage) { wal.Write(msg) if err := wal.group.Flush(); err != nil { - panic(cmn.Fmt("Error flushing consensus wal buf to file. Error: %v \n", err)) + panic(fmt.Sprintf("Error flushing consensus wal buf to file. Error: %v \n", err)) } } diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index f3a365809ce..5ff597a52e2 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "fmt" + "io" "os" "path/filepath" "strings" @@ -13,22 +14,21 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" auto "github.com/tendermint/tendermint/libs/autofile" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" ) -// WALWithNBlocks generates a consensus WAL. It does this by spining up a +// WALGenerateNBlocks generates a consensus WAL. It does this by spining up a // stripped down version of node (proxy app, event bus, consensus state) with a // persistent kvstore application and special consensus wal instance -// (byteBufferWAL) and waits until numBlocks are created. Then it returns a WAL -// content. If the node fails to produce given numBlocks, it returns an error. -func WALWithNBlocks(numBlocks int) (data []byte, err error) { +// (byteBufferWAL) and waits until numBlocks are created. If the node fails to produce given numBlocks, it returns an error. +func WALGenerateNBlocks(wr io.Writer, numBlocks int) (err error) { config := getConfig() app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator")) @@ -38,31 +38,33 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) { ///////////////////////////////////////////////////////////////////////////// // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS - // NOTE: we can't import node package because of circular dependency + // NOTE: we can't import node package because of circular dependency. + // NOTE: we don't do handshake so need to set state.Version.Consensus.App directly. privValidatorFile := config.PrivValidatorFile() privValidator := privval.LoadOrGenFilePV(privValidatorFile) genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) if err != nil { - return nil, errors.Wrap(err, "failed to read genesis file") + return errors.Wrap(err, "failed to read genesis file") } stateDB := db.NewMemDB() blockStoreDB := db.NewMemDB() state, err := sm.MakeGenesisState(genDoc) if err != nil { - return nil, errors.Wrap(err, "failed to make genesis state") + return errors.Wrap(err, "failed to make genesis state") } + state.Version.Consensus.App = kvstore.ProtocolVersion blockStore := bc.NewBlockStore(blockStoreDB) - handshaker := NewHandshaker(stateDB, state, blockStore, genDoc) - proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), handshaker) + proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app)) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start proxy app connections") + return errors.Wrap(err, "failed to start proxy app connections") } defer proxyApp.Stop() + eventBus := types.NewEventBus() eventBus.SetLogger(logger.With("module", "events")) if err := eventBus.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start event bus") + return errors.Wrap(err, "failed to start event bus") } defer eventBus.Stop() mempool := sm.MockMempool{} @@ -78,8 +80,6 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) { ///////////////////////////////////////////////////////////////////////////// // set consensus wal to buffered WAL, which will write all incoming msgs to buffer - var b bytes.Buffer - wr := bufio.NewWriter(&b) numBlocksWritten := make(chan struct{}) wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten) // see wal.go#103 @@ -87,18 +87,30 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) { consensusState.wal = wal if err := consensusState.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start consensus state") + return errors.Wrap(err, "failed to start consensus state") } select { case <-numBlocksWritten: consensusState.Stop() - wr.Flush() - return b.Bytes(), nil + return nil case <-time.After(1 * time.Minute): consensusState.Stop() - return []byte{}, fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks) + return fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks) + } +} + +//WALWithNBlocks returns a WAL content with numBlocks. +func WALWithNBlocks(numBlocks int) (data []byte, err error) { + var b bytes.Buffer + wr := bufio.NewWriter(&b) + + if err := WALGenerateNBlocks(wr, numBlocks); err != nil { + return []byte{}, err } + + wr.Flush() + return b.Bytes(), nil } // f**ing long, but unique for each test diff --git a/consensus/wal_test.go b/consensus/wal_test.go index 3ecb4fe8fb2..c45f6acee2a 100644 --- a/consensus/wal_test.go +++ b/consensus/wal_test.go @@ -3,20 +3,71 @@ package consensus import ( "bytes" "crypto/rand" + "fmt" + "io/ioutil" + "os" + "path/filepath" + // "sync" "testing" "time" "github.com/tendermint/tendermint/consensus/types" + "github.com/tendermint/tendermint/libs/autofile" tmtypes "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func TestWALTruncate(t *testing.T) { + walDir, err := ioutil.TempDir("", "wal") + if err != nil { + panic(fmt.Errorf("failed to create temp WAL file: %v", err)) + } + defer os.RemoveAll(walDir) + + walFile := filepath.Join(walDir, "wal") + + //this magic number 4K can truncate the content when RotateFile. defaultHeadSizeLimit(10M) is hard to simulate. + //this magic number 1 * time.Millisecond make RotateFile check frequently. defaultGroupCheckDuration(5s) is hard to simulate. + wal, err := NewWAL(walFile, autofile.GroupHeadSizeLimit(4096), autofile.GroupCheckDuration(1*time.Millisecond)) + if err != nil { + t.Fatal(err) + } + + wal.Start() + defer wal.Stop() + + //60 block's size nearly 70K, greater than group's headBuf size(4096 * 10), when headBuf is full, truncate content will Flush to the file. + //at this time, RotateFile is called, truncate content exist in each file. + err = WALGenerateNBlocks(wal.Group(), 60) + if err != nil { + t.Fatal(err) + } + + time.Sleep(1 * time.Millisecond) //wait groupCheckDuration, make sure RotateFile run + + wal.Group().Flush() + + h := int64(50) + gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) + assert.NoError(t, err, fmt.Sprintf("expected not to err on height %d", h)) + assert.True(t, found, fmt.Sprintf("expected to find end height for %d", h)) + assert.NotNil(t, gr, "expected group not to be nil") + defer gr.Close() + + dec := NewWALDecoder(gr) + msg, err := dec.Decode() + assert.NoError(t, err, "expected to decode a message") + rs, ok := msg.Msg.(tmtypes.EventDataRoundState) + assert.True(t, ok, "expected message of type EventDataRoundState") + assert.Equal(t, rs.Height, h+1, fmt.Sprintf("wrong height")) +} + func TestWALEncoderDecoder(t *testing.T) { - now := time.Now() + now := tmtime.Now() msgs := []TimedWALMessage{ TimedWALMessage{Time: now, Msg: EndHeightMessage{0}}, TimedWALMessage{Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}}, @@ -54,8 +105,8 @@ func TestWALSearchForEndHeight(t *testing.T) { h := int64(3) gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) - assert.NoError(t, err, cmn.Fmt("expected not to err on height %d", h)) - assert.True(t, found, cmn.Fmt("expected to find end height for %d", h)) + assert.NoError(t, err, fmt.Sprintf("expected not to err on height %d", h)) + assert.True(t, found, fmt.Sprintf("expected to find end height for %d", h)) assert.NotNil(t, gr, "expected group not to be nil") defer gr.Close() @@ -64,7 +115,7 @@ func TestWALSearchForEndHeight(t *testing.T) { assert.NoError(t, err, "expected to decode a message") rs, ok := msg.Msg.(tmtypes.EventDataRoundState) assert.True(t, ok, "expected message of type EventDataRoundState") - assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height")) + assert.Equal(t, rs.Height, h+1, fmt.Sprintf("wrong height")) } /* @@ -93,7 +144,7 @@ func benchmarkWalDecode(b *testing.B, n int) { enc := NewWALEncoder(buf) data := nBytes(n) - enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second)}) + enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()}) encoded := buf.Bytes() diff --git a/crypto/armor/armor.go b/crypto/armor/armor.go index c15d070e67e..e3b29a971d2 100644 --- a/crypto/armor/armor.go +++ b/crypto/armor/armor.go @@ -5,7 +5,7 @@ import ( "fmt" "io/ioutil" - "golang.org/x/crypto/openpgp/armor" + "golang.org/x/crypto/openpgp/armor" // forked to github.com/tendermint/crypto ) func EncodeArmor(blockType string, headers map[string]string, data []byte) string { diff --git a/crypto/crypto.go b/crypto/crypto.go index 09c12ff7660..b3526f88128 100644 --- a/crypto/crypto.go +++ b/crypto/crypto.go @@ -1,21 +1,24 @@ package crypto import ( + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) -type PrivKey interface { - Bytes() []byte - Sign(msg []byte) ([]byte, error) - PubKey() PubKey - Equals(PrivKey) bool -} +const ( + // AddressSize is the size of a pubkey address. + AddressSize = tmhash.TruncatedSize +) // An address is a []byte, but hex-encoded even in JSON. // []byte leaves us the option to change the address length. // Use an alias so Unmarshal methods (with ptr receivers) are available too. type Address = cmn.HexBytes +func AddressHash(bz []byte) Address { + return Address(tmhash.SumTruncated(bz)) +} + type PubKey interface { Address() Address Bytes() []byte @@ -23,6 +26,13 @@ type PubKey interface { Equals(PubKey) bool } +type PrivKey interface { + Bytes() []byte + Sign(msg []byte) ([]byte, error) + PubKey() PubKey + Equals(PrivKey) bool +} + type Symmetric interface { Keygen() []byte Encrypt(plaintext []byte, secret []byte) (ciphertext []byte) diff --git a/crypto/ed25519/ed25519.go b/crypto/ed25519/ed25519.go index fa7526f3fcf..e077cbda486 100644 --- a/crypto/ed25519/ed25519.go +++ b/crypto/ed25519/ed25519.go @@ -6,9 +6,9 @@ import ( "fmt" "io" - "github.com/tendermint/ed25519" - "github.com/tendermint/ed25519/extra25519" amino "github.com/tendermint/go-amino" + "golang.org/x/crypto/ed25519" // forked to github.com/tendermint/crypto + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" ) @@ -18,11 +18,11 @@ import ( var _ crypto.PrivKey = PrivKeyEd25519{} const ( - Ed25519PrivKeyAminoRoute = "tendermint/PrivKeyEd25519" - Ed25519PubKeyAminoRoute = "tendermint/PubKeyEd25519" + PrivKeyAminoRoute = "tendermint/PrivKeyEd25519" + PubKeyAminoRoute = "tendermint/PubKeyEd25519" // Size of an Edwards25519 signature. Namely the size of a compressed // Edwards25519 point, and a field element. Both of which are 32 bytes. - SignatureEd25519Size = 64 + SignatureSize = 64 ) var cdc = amino.NewCodec() @@ -30,11 +30,11 @@ var cdc = amino.NewCodec() func init() { cdc.RegisterInterface((*crypto.PubKey)(nil), nil) cdc.RegisterConcrete(PubKeyEd25519{}, - Ed25519PubKeyAminoRoute, nil) + PubKeyAminoRoute, nil) cdc.RegisterInterface((*crypto.PrivKey)(nil), nil) cdc.RegisterConcrete(PrivKeyEd25519{}, - Ed25519PrivKeyAminoRoute, nil) + PrivKeyAminoRoute, nil) } // PrivKeyEd25519 implements crypto.PrivKey. @@ -46,9 +46,14 @@ func (privKey PrivKeyEd25519) Bytes() []byte { } // Sign produces a signature on the provided message. +// This assumes the privkey is wellformed in the golang format. +// The first 32 bytes should be random, +// corresponding to the normal ed25519 private key. +// The latter 32 bytes should be the compressed public key. +// If these conditions aren't met, Sign will panic or produce an +// incorrect signature. func (privKey PrivKeyEd25519) Sign(msg []byte) ([]byte, error) { - privKeyBytes := [64]byte(privKey) - signatureBytes := ed25519.Sign(&privKeyBytes, msg) + signatureBytes := ed25519.Sign(privKey[:], msg) return signatureBytes[:], nil } @@ -65,14 +70,14 @@ func (privKey PrivKeyEd25519) PubKey() crypto.PubKey { break } } - if initialized { - var pubkeyBytes [PubKeyEd25519Size]byte - copy(pubkeyBytes[:], privKeyBytes[32:]) - return PubKeyEd25519(pubkeyBytes) + + if !initialized { + panic("Expected PrivKeyEd25519 to include concatenated pubkey bytes") } - pubBytes := *ed25519.MakePublicKey(&privKeyBytes) - return PubKeyEd25519(pubBytes) + var pubkeyBytes [PubKeyEd25519Size]byte + copy(pubkeyBytes[:], privKeyBytes[32:]) + return PubKeyEd25519(pubkeyBytes) } // Equals - you probably don't need to use this. @@ -85,17 +90,6 @@ func (privKey PrivKeyEd25519) Equals(other crypto.PrivKey) bool { } } -// ToCurve25519 takes a private key and returns its representation on -// Curve25519. Curve25519 is birationally equivalent to Edwards25519, -// which Ed25519 uses internally. This method is intended for use in -// an X25519 Diffie Hellman key exchange. -func (privKey PrivKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte { - keyCurve25519 := new([32]byte) - privKeyBytes := [64]byte(privKey) - extra25519.PrivateKeyToCurve25519(keyCurve25519, &privKeyBytes) - return keyCurve25519 -} - // GenPrivKey generates a new ed25519 private key. // It uses OS randomness in conjunction with the current global random seed // in tendermint/libs/common to generate the private key. @@ -105,16 +99,16 @@ func GenPrivKey() PrivKeyEd25519 { // genPrivKey generates a new ed25519 private key using the provided reader. func genPrivKey(rand io.Reader) PrivKeyEd25519 { - privKey := new([64]byte) - _, err := io.ReadFull(rand, privKey[:32]) + seed := make([]byte, 32) + _, err := io.ReadFull(rand, seed[:]) if err != nil { panic(err) } - // ed25519.MakePublicKey(privKey) alters the last 32 bytes of privKey. - // It places the pubkey in the last 32 bytes of privKey, and returns the - // public key. - ed25519.MakePublicKey(privKey) - return PrivKeyEd25519(*privKey) + + privKey := ed25519.NewKeyFromSeed(seed) + var privKeyEd PrivKeyEd25519 + copy(privKeyEd[:], privKey) + return privKeyEd } // GenPrivKeyFromSecret hashes the secret with SHA2, and uses @@ -122,14 +116,12 @@ func genPrivKey(rand io.Reader) PrivKeyEd25519 { // NOTE: secret should be the output of a KDF like bcrypt, // if it's derived from user input. func GenPrivKeyFromSecret(secret []byte) PrivKeyEd25519 { - privKey32 := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes. - privKey := new([64]byte) - copy(privKey[:32], privKey32) - // ed25519.MakePublicKey(privKey) alters the last 32 bytes of privKey. - // It places the pubkey in the last 32 bytes of privKey, and returns the - // public key. - ed25519.MakePublicKey(privKey) - return PrivKeyEd25519(*privKey) + seed := crypto.Sha256(secret) // Not Ripemd160 because we want 32 bytes. + + privKey := ed25519.NewKeyFromSeed(seed) + var privKeyEd PrivKeyEd25519 + copy(privKeyEd[:], privKey) + return privKeyEd } //------------------------------------- @@ -144,7 +136,7 @@ type PubKeyEd25519 [PubKeyEd25519Size]byte // Address is the SHA256-20 of the raw pubkey bytes. func (pubKey PubKeyEd25519) Address() crypto.Address { - return crypto.Address(tmhash.Sum(pubKey[:])) + return crypto.Address(tmhash.SumTruncated(pubKey[:])) } // Bytes marshals the PubKey using amino encoding. @@ -156,30 +148,12 @@ func (pubKey PubKeyEd25519) Bytes() []byte { return bz } -func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ []byte) bool { +func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig []byte) bool { // make sure we use the same algorithm to sign - if len(sig_) != SignatureEd25519Size { + if len(sig) != SignatureSize { return false } - sig := new([SignatureEd25519Size]byte) - copy(sig[:], sig_) - pubKeyBytes := [PubKeyEd25519Size]byte(pubKey) - return ed25519.Verify(&pubKeyBytes, msg, sig) -} - -// ToCurve25519 takes a public key and returns its representation on -// Curve25519. Curve25519 is birationally equivalent to Edwards25519, -// which Ed25519 uses internally. This method is intended for use in -// an X25519 Diffie Hellman key exchange. -// -// If there is an error, then this function returns nil. -func (pubKey PubKeyEd25519) ToCurve25519() *[PubKeyEd25519Size]byte { - keyCurve25519, pubKeyBytes := new([PubKeyEd25519Size]byte), [PubKeyEd25519Size]byte(pubKey) - ok := extra25519.PublicKeyToCurve25519(keyCurve25519, &pubKeyBytes) - if !ok { - return nil - } - return keyCurve25519 + return ed25519.Verify(pubKey[:], msg, sig) } func (pubKey PubKeyEd25519) String() string { diff --git a/crypto/encoding/amino/amino.go b/crypto/encoding/amino/amino.go index fd9a08442e8..d66ecd9b188 100644 --- a/crypto/encoding/amino/amino.go +++ b/crypto/encoding/amino/amino.go @@ -1,14 +1,23 @@ package cryptoAmino import ( + "reflect" + amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/multisig" "github.com/tendermint/tendermint/crypto/secp256k1" ) var cdc = amino.NewCodec() +// routeTable is used to map public key concrete types back +// to their amino routes. This should eventually be handled +// by amino. Example usage: +// routeTable[reflect.TypeOf(ed25519.PubKeyEd25519{})] = ed25519.PubKeyAminoRoute +var routeTable = make(map[reflect.Type]string, 3) + func init() { // NOTE: It's important that there be no conflicts here, // as that would change the canonical representations, @@ -17,6 +26,20 @@ func init() { // https://github.com/tendermint/go-amino/issues/9 // is resolved RegisterAmino(cdc) + + // TODO: Have amino provide a way to go from concrete struct to route directly. + // Its currently a private API + routeTable[reflect.TypeOf(ed25519.PubKeyEd25519{})] = ed25519.PubKeyAminoRoute + routeTable[reflect.TypeOf(secp256k1.PubKeySecp256k1{})] = secp256k1.PubKeyAminoRoute + routeTable[reflect.TypeOf(&multisig.PubKeyMultisigThreshold{})] = multisig.PubKeyMultisigThresholdAminoRoute +} + +// PubkeyAminoRoute returns the amino route of a pubkey +// cdc is currently passed in, as eventually this will not be using +// a package level codec. +func PubkeyAminoRoute(cdc *amino.Codec, key crypto.PubKey) (string, bool) { + route, found := routeTable[reflect.TypeOf(key)] + return route, found } // RegisterAmino registers all crypto related types in the given (amino) codec. @@ -24,15 +47,17 @@ func RegisterAmino(cdc *amino.Codec) { // These are all written here instead of cdc.RegisterInterface((*crypto.PubKey)(nil), nil) cdc.RegisterConcrete(ed25519.PubKeyEd25519{}, - "tendermint/PubKeyEd25519", nil) + ed25519.PubKeyAminoRoute, nil) cdc.RegisterConcrete(secp256k1.PubKeySecp256k1{}, - "tendermint/PubKeySecp256k1", nil) + secp256k1.PubKeyAminoRoute, nil) + cdc.RegisterConcrete(multisig.PubKeyMultisigThreshold{}, + multisig.PubKeyMultisigThresholdAminoRoute, nil) cdc.RegisterInterface((*crypto.PrivKey)(nil), nil) cdc.RegisterConcrete(ed25519.PrivKeyEd25519{}, - "tendermint/PrivKeyEd25519", nil) + ed25519.PrivKeyAminoRoute, nil) cdc.RegisterConcrete(secp256k1.PrivKeySecp256k1{}, - "tendermint/PrivKeySecp256k1", nil) + secp256k1.PrivKeyAminoRoute, nil) } func PrivKeyFromBytes(privKeyBytes []byte) (privKey crypto.PrivKey, err error) { diff --git a/crypto/encoding/amino/encode_test.go b/crypto/encoding/amino/encode_test.go index 0581ba6438f..056dbec44f8 100644 --- a/crypto/encoding/amino/encode_test.go +++ b/crypto/encoding/amino/encode_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/multisig" "github.com/tendermint/tendermint/crypto/secp256k1" ) @@ -53,24 +54,27 @@ func ExamplePrintRegisteredTypes() { //| ---- | ---- | ------ | ----- | ------ | //| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | | //| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | | + //| PubKeyMultisigThreshold | tendermint/PubKeyMultisigThreshold | 0x22C1F7E2 | variable | | //| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | | //| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | | } func TestKeyEncodings(t *testing.T) { cases := []struct { - privKey crypto.PrivKey - privSize, pubSize int // binary sizes + privKey crypto.PrivKey + privSize, pubSize, sigSize int // binary sizes }{ { privKey: ed25519.GenPrivKey(), privSize: 69, pubSize: 37, + sigSize: 65, }, { privKey: secp256k1.GenPrivKey(), privSize: 37, pubSize: 38, + sigSize: 65, }, } @@ -87,7 +91,7 @@ func TestKeyEncodings(t *testing.T) { var sig1, sig2 []byte sig1, err := tc.privKey.Sign([]byte("something")) assert.NoError(t, err, "tc #%d", tcIndex) - checkAminoBinary(t, sig1, &sig2, -1) // Signature size changes for Secp anyways. + checkAminoBinary(t, sig1, &sig2, tc.sigSize) assert.EqualValues(t, sig1, sig2, "tc #%d", tcIndex) // Check (de/en)codings of PubKeys. @@ -116,11 +120,29 @@ func TestNilEncodings(t *testing.T) { var e, f crypto.PrivKey checkAminoJSON(t, &e, &f, true) assert.EqualValues(t, e, f) - } func TestPubKeyInvalidDataProperReturnsEmpty(t *testing.T) { pk, err := PubKeyFromBytes([]byte("foo")) - require.NotNil(t, err, "expecting a non-nil error") - require.Nil(t, pk, "expecting an empty public key on error") + require.NotNil(t, err) + require.Nil(t, pk) +} + +func TestPubkeyAminoRoute(t *testing.T) { + tests := []struct { + key crypto.PubKey + want string + found bool + }{ + {ed25519.PubKeyEd25519{}, ed25519.PubKeyAminoRoute, true}, + {secp256k1.PubKeySecp256k1{}, secp256k1.PubKeyAminoRoute, true}, + {&multisig.PubKeyMultisigThreshold{}, multisig.PubKeyMultisigThresholdAminoRoute, true}, + } + for i, tc := range tests { + got, found := PubkeyAminoRoute(cdc, tc.key) + require.Equal(t, tc.found, found, "not equal on tc %d", i) + if tc.found { + require.Equal(t, tc.want, got, "not equal on tc %d", i) + } + } } diff --git a/crypto/hash.go b/crypto/hash.go index c1fb41f7a5d..a384bbb5508 100644 --- a/crypto/hash.go +++ b/crypto/hash.go @@ -3,7 +3,7 @@ package crypto import ( "crypto/sha256" - "golang.org/x/crypto/ripemd160" + "golang.org/x/crypto/ripemd160" // forked to github.com/tendermint/crypto ) func Sha256(bytes []byte) []byte { diff --git a/crypto/merkle/compile.sh b/crypto/merkle/compile.sh new file mode 100644 index 00000000000..8e4c739f4ed --- /dev/null +++ b/crypto/merkle/compile.sh @@ -0,0 +1,6 @@ +#! /bin/bash + +protoc --gogo_out=. -I $GOPATH/src/ -I . -I $GOPATH/src/github.com/gogo/protobuf/protobuf merkle.proto +echo "--> adding nolint declarations to protobuf generated files" +awk '/package merkle/ { print "//nolint: gas"; print; next }1' merkle.pb.go > merkle.pb.go.new +mv merkle.pb.go.new merkle.pb.go diff --git a/crypto/merkle/merkle.pb.go b/crypto/merkle/merkle.pb.go new file mode 100644 index 00000000000..75e1b08c313 --- /dev/null +++ b/crypto/merkle/merkle.pb.go @@ -0,0 +1,792 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: crypto/merkle/merkle.proto + +package merkle + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import bytes "bytes" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// ProofOp defines an operation used for calculating Merkle root +// The data could be arbitrary format, providing nessecary data +// for example neighbouring node hash +type ProofOp struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProofOp) Reset() { *m = ProofOp{} } +func (m *ProofOp) String() string { return proto.CompactTextString(m) } +func (*ProofOp) ProtoMessage() {} +func (*ProofOp) Descriptor() ([]byte, []int) { + return fileDescriptor_merkle_5d3f6051907285da, []int{0} +} +func (m *ProofOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProofOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProofOp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ProofOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProofOp.Merge(dst, src) +} +func (m *ProofOp) XXX_Size() int { + return m.Size() +} +func (m *ProofOp) XXX_DiscardUnknown() { + xxx_messageInfo_ProofOp.DiscardUnknown(m) +} + +var xxx_messageInfo_ProofOp proto.InternalMessageInfo + +func (m *ProofOp) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ProofOp) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *ProofOp) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// Proof is Merkle proof defined by the list of ProofOps +type Proof struct { + Ops []ProofOp `protobuf:"bytes,1,rep,name=ops" json:"ops"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Proof) Reset() { *m = Proof{} } +func (m *Proof) String() string { return proto.CompactTextString(m) } +func (*Proof) ProtoMessage() {} +func (*Proof) Descriptor() ([]byte, []int) { + return fileDescriptor_merkle_5d3f6051907285da, []int{1} +} +func (m *Proof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Proof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Proof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Proof) XXX_Merge(src proto.Message) { + xxx_messageInfo_Proof.Merge(dst, src) +} +func (m *Proof) XXX_Size() int { + return m.Size() +} +func (m *Proof) XXX_DiscardUnknown() { + xxx_messageInfo_Proof.DiscardUnknown(m) +} + +var xxx_messageInfo_Proof proto.InternalMessageInfo + +func (m *Proof) GetOps() []ProofOp { + if m != nil { + return m.Ops + } + return nil +} + +func init() { + proto.RegisterType((*ProofOp)(nil), "merkle.ProofOp") + proto.RegisterType((*Proof)(nil), "merkle.Proof") +} +func (this *ProofOp) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ProofOp) + if !ok { + that2, ok := that.(ProofOp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !bytes.Equal(this.Key, that1.Key) { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Proof) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Proof) + if !ok { + that2, ok := that.(Proof) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Ops) != len(that1.Ops) { + return false + } + for i := range this.Ops { + if !this.Ops[i].Equal(&that1.Ops[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (m *ProofOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProofOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintMerkle(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintMerkle(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Data) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintMerkle(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Proof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Proof) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ops) > 0 { + for _, msg := range m.Ops { + dAtA[i] = 0xa + i++ + i = encodeVarintMerkle(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintMerkle(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedProofOp(r randyMerkle, easy bool) *ProofOp { + this := &ProofOp{} + this.Type = string(randStringMerkle(r)) + v1 := r.Intn(100) + this.Key = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Key[i] = byte(r.Intn(256)) + } + v2 := r.Intn(100) + this.Data = make([]byte, v2) + for i := 0; i < v2; i++ { + this.Data[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedMerkle(r, 4) + } + return this +} + +func NewPopulatedProof(r randyMerkle, easy bool) *Proof { + this := &Proof{} + if r.Intn(10) != 0 { + v3 := r.Intn(5) + this.Ops = make([]ProofOp, v3) + for i := 0; i < v3; i++ { + v4 := NewPopulatedProofOp(r, easy) + this.Ops[i] = *v4 + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedMerkle(r, 2) + } + return this +} + +type randyMerkle interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneMerkle(r randyMerkle) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringMerkle(r randyMerkle) string { + v5 := r.Intn(100) + tmps := make([]rune, v5) + for i := 0; i < v5; i++ { + tmps[i] = randUTF8RuneMerkle(r) + } + return string(tmps) +} +func randUnrecognizedMerkle(r randyMerkle, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldMerkle(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldMerkle(dAtA []byte, r randyMerkle, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) + v6 := r.Int63() + if r.Intn(2) == 0 { + v6 *= -1 + } + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(v6)) + case 1: + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateMerkle(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *ProofOp) Size() (n int) { + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovMerkle(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovMerkle(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovMerkle(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Proof) Size() (n int) { + var l int + _ = l + if len(m.Ops) > 0 { + for _, e := range m.Ops { + l = e.Size() + n += 1 + l + sovMerkle(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMerkle(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozMerkle(x uint64) (n int) { + return sovMerkle(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ProofOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProofOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProofOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMerkle + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMerkle + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMerkle + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMerkle(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMerkle + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Proof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Proof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Proof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ops", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMerkle + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ops = append(m.Ops, ProofOp{}) + if err := m.Ops[len(m.Ops)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMerkle(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMerkle + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMerkle(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkle + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkle + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkle + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthMerkle + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkle + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMerkle(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthMerkle = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMerkle = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("crypto/merkle/merkle.proto", fileDescriptor_merkle_5d3f6051907285da) } + +var fileDescriptor_merkle_5d3f6051907285da = []byte{ + // 200 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4a, 0x2e, 0xaa, 0x2c, + 0x28, 0xc9, 0xd7, 0xcf, 0x4d, 0x2d, 0xca, 0xce, 0x49, 0x85, 0x52, 0x7a, 0x05, 0x45, 0xf9, 0x25, + 0xf9, 0x42, 0x6c, 0x10, 0x9e, 0x94, 0x6e, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, + 0xae, 0x7e, 0x7a, 0x7e, 0x7a, 0xbe, 0x3e, 0x58, 0x3a, 0xa9, 0x34, 0x0d, 0xcc, 0x03, 0x73, 0xc0, + 0x2c, 0x88, 0x36, 0x25, 0x67, 0x2e, 0xf6, 0x80, 0xa2, 0xfc, 0xfc, 0x34, 0xff, 0x02, 0x21, 0x21, + 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0x48, + 0x80, 0x8b, 0x39, 0x3b, 0xb5, 0x52, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc4, 0x04, 0xa9, + 0x4a, 0x49, 0x2c, 0x49, 0x94, 0x60, 0x06, 0x0b, 0x81, 0xd9, 0x4a, 0x06, 0x5c, 0xac, 0x60, 0x43, + 0x84, 0xd4, 0xb9, 0x98, 0xf3, 0x0b, 0x8a, 0x25, 0x18, 0x15, 0x98, 0x35, 0xb8, 0x8d, 0xf8, 0xf5, + 0xa0, 0x0e, 0x84, 0x5a, 0xe0, 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x85, 0x93, 0xc8, + 0x8f, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, + 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x49, 0x6c, 0x60, 0x37, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, + 0xb9, 0x2b, 0x0f, 0xd1, 0xe8, 0x00, 0x00, 0x00, +} diff --git a/crypto/merkle/merkle.proto b/crypto/merkle/merkle.proto new file mode 100644 index 00000000000..8a6c467d423 --- /dev/null +++ b/crypto/merkle/merkle.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package merkle; + +// For more information on gogo.proto, see: +// https://github.com/gogo/protobuf/blob/master/extensions.md +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.sizer_all) = true; + +option (gogoproto.populate_all) = true; +option (gogoproto.equal_all) = true; + +//---------------------------------------- +// Message types + +// ProofOp defines an operation used for calculating Merkle root +// The data could be arbitrary format, providing nessecary data +// for example neighbouring node hash +message ProofOp { + string type = 1; + bytes key = 2; + bytes data = 3; +} + +// Proof is Merkle proof defined by the list of ProofOps +message Proof { + repeated ProofOp ops = 1 [(gogoproto.nullable)=false]; +} diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go new file mode 100644 index 00000000000..8f8b460c988 --- /dev/null +++ b/crypto/merkle/proof.go @@ -0,0 +1,138 @@ +package merkle + +import ( + "bytes" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +//---------------------------------------- +// ProofOp gets converted to an instance of ProofOperator: + +// ProofOperator is a layer for calculating intermediate Merkle roots +// when a series of Merkle trees are chained together. +// Run() takes leaf values from a tree and returns the Merkle +// root for the corresponding tree. It takes and returns a list of bytes +// to allow multiple leaves to be part of a single proof, for instance in a range proof. +// ProofOp() encodes the ProofOperator in a generic way so it can later be +// decoded with OpDecoder. +type ProofOperator interface { + Run([][]byte) ([][]byte, error) + GetKey() []byte + ProofOp() ProofOp +} + +//---------------------------------------- +// Operations on a list of ProofOperators + +// ProofOperators is a slice of ProofOperator(s). +// Each operator will be applied to the input value sequentially +// and the last Merkle root will be verified with already known data +type ProofOperators []ProofOperator + +func (poz ProofOperators) VerifyValue(root []byte, keypath string, value []byte) (err error) { + return poz.Verify(root, keypath, [][]byte{value}) +} + +func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (err error) { + keys, err := KeyPathToKeys(keypath) + if err != nil { + return + } + + for i, op := range poz { + key := op.GetKey() + if len(key) != 0 { + if len(keys) == 0 { + return cmn.NewError("Key path has insufficient # of parts: expected no more keys but got %+v", string(key)) + } + lastKey := keys[len(keys)-1] + if !bytes.Equal(lastKey, key) { + return cmn.NewError("Key mismatch on operation #%d: expected %+v but got %+v", i, string(lastKey), string(key)) + } + keys = keys[:len(keys)-1] + } + args, err = op.Run(args) + if err != nil { + return + } + } + if !bytes.Equal(root, args[0]) { + return cmn.NewError("Calculated root hash is invalid: expected %+v but got %+v", root, args[0]) + } + if len(keys) != 0 { + return cmn.NewError("Keypath not consumed all") + } + return nil +} + +//---------------------------------------- +// ProofRuntime - main entrypoint + +type OpDecoder func(ProofOp) (ProofOperator, error) + +type ProofRuntime struct { + decoders map[string]OpDecoder +} + +func NewProofRuntime() *ProofRuntime { + return &ProofRuntime{ + decoders: make(map[string]OpDecoder), + } +} + +func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) { + _, ok := prt.decoders[typ] + if ok { + panic("already registered for type " + typ) + } + prt.decoders[typ] = dec +} + +func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) { + decoder := prt.decoders[pop.Type] + if decoder == nil { + return nil, cmn.NewError("unrecognized proof type %v", pop.Type) + } + return decoder(pop) +} + +func (prt *ProofRuntime) DecodeProof(proof *Proof) (ProofOperators, error) { + var poz ProofOperators + for _, pop := range proof.Ops { + operator, err := prt.Decode(pop) + if err != nil { + return nil, cmn.ErrorWrap(err, "decoding a proof operator") + } + poz = append(poz, operator) + } + return poz, nil +} + +func (prt *ProofRuntime) VerifyValue(proof *Proof, root []byte, keypath string, value []byte) (err error) { + return prt.Verify(proof, root, keypath, [][]byte{value}) +} + +// TODO In the long run we'll need a method of classifcation of ops, +// whether existence or absence or perhaps a third? +func (prt *ProofRuntime) VerifyAbsence(proof *Proof, root []byte, keypath string) (err error) { + return prt.Verify(proof, root, keypath, nil) +} + +func (prt *ProofRuntime) Verify(proof *Proof, root []byte, keypath string, args [][]byte) (err error) { + poz, err := prt.DecodeProof(proof) + if err != nil { + return cmn.ErrorWrap(err, "decoding proof") + } + return poz.Verify(root, keypath, args) +} + +// DefaultProofRuntime only knows about Simple value +// proofs. +// To use e.g. IAVL proofs, register op-decoders as +// defined in the IAVL package. +func DefaultProofRuntime() (prt *ProofRuntime) { + prt = NewProofRuntime() + prt.RegisterOpDecoder(ProofOpSimpleValue, SimpleValueOpDecoder) + return +} diff --git a/crypto/merkle/proof_key_path.go b/crypto/merkle/proof_key_path.go new file mode 100644 index 00000000000..aec93e8267d --- /dev/null +++ b/crypto/merkle/proof_key_path.go @@ -0,0 +1,111 @@ +package merkle + +import ( + "encoding/hex" + "fmt" + "net/url" + "strings" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +/* + + For generalized Merkle proofs, each layer of the proof may require an + optional key. The key may be encoded either by URL-encoding or + (upper-case) hex-encoding. + TODO: In the future, more encodings may be supported, like base32 (e.g. + /32:) + + For example, for a Cosmos-SDK application where the first two proof layers + are SimpleValueOps, and the third proof layer is an IAVLValueOp, the keys + might look like: + + 0: []byte("App") + 1: []byte("IBC") + 2: []byte{0x01, 0x02, 0x03} + + Assuming that we know that the first two layers are always ASCII texts, we + probably want to use URLEncoding for those, whereas the third layer will + require HEX encoding for efficient representation. + + kp := new(KeyPath) + kp.AppendKey([]byte("App"), KeyEncodingURL) + kp.AppendKey([]byte("IBC"), KeyEncodingURL) + kp.AppendKey([]byte{0x01, 0x02, 0x03}, KeyEncodingURL) + kp.String() // Should return "/App/IBC/x:010203" + + NOTE: Key paths must begin with a `/`. + + NOTE: All encodings *MUST* work compatibly, such that you can choose to use + whatever encoding, and the decoded keys will always be the same. In other + words, it's just as good to encode all three keys using URL encoding or HEX + encoding... it just wouldn't be optimal in terms of readability or space + efficiency. + + NOTE: Punycode will never be supported here, because not all values can be + decoded. For example, no string decodes to the string "xn--blah" in + Punycode. + +*/ + +type keyEncoding int + +const ( + KeyEncodingURL keyEncoding = iota + KeyEncodingHex + KeyEncodingMax // Number of known encodings. Used for testing +) + +type Key struct { + name []byte + enc keyEncoding +} + +type KeyPath []Key + +func (pth KeyPath) AppendKey(key []byte, enc keyEncoding) KeyPath { + return append(pth, Key{key, enc}) +} + +func (pth KeyPath) String() string { + res := "" + for _, key := range pth { + switch key.enc { + case KeyEncodingURL: + res += "/" + url.PathEscape(string(key.name)) + case KeyEncodingHex: + res += "/x:" + fmt.Sprintf("%X", key.name) + default: + panic("unexpected key encoding type") + } + } + return res +} + +// Decode a path to a list of keys. Path must begin with `/`. +// Each key must use a known encoding. +func KeyPathToKeys(path string) (keys [][]byte, err error) { + if path == "" || path[0] != '/' { + return nil, cmn.NewError("key path string must start with a forward slash '/'") + } + parts := strings.Split(path[1:], "/") + keys = make([][]byte, len(parts)) + for i, part := range parts { + if strings.HasPrefix(part, "x:") { + hexPart := part[2:] + key, err := hex.DecodeString(hexPart) + if err != nil { + return nil, cmn.ErrorWrap(err, "decoding hex-encoded part #%d: /%s", i, part) + } + keys[i] = key + } else { + key, err := url.PathUnescape(part) + if err != nil { + return nil, cmn.ErrorWrap(err, "decoding url-encoded part #%d: /%s", i, part) + } + keys[i] = []byte(key) // TODO Test this with random bytes, I'm not sure that it works for arbitrary bytes... + } + } + return keys, nil +} diff --git a/crypto/merkle/proof_key_path_test.go b/crypto/merkle/proof_key_path_test.go new file mode 100644 index 00000000000..48fda3032af --- /dev/null +++ b/crypto/merkle/proof_key_path_test.go @@ -0,0 +1,41 @@ +package merkle + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestKeyPath(t *testing.T) { + var path KeyPath + keys := make([][]byte, 10) + alphanum := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + + for d := 0; d < 1e4; d++ { + path = nil + + for i := range keys { + enc := keyEncoding(rand.Intn(int(KeyEncodingMax))) + keys[i] = make([]byte, rand.Uint32()%20) + switch enc { + case KeyEncodingURL: + for j := range keys[i] { + keys[i][j] = alphanum[rand.Intn(len(alphanum))] + } + case KeyEncodingHex: + rand.Read(keys[i]) + default: + panic("Unexpected encoding") + } + path = path.AppendKey(keys[i], enc) + } + + res, err := KeyPathToKeys(path.String()) + require.Nil(t, err) + + for i, key := range keys { + require.Equal(t, key, res[i]) + } + } +} diff --git a/crypto/merkle/proof_simple_value.go b/crypto/merkle/proof_simple_value.go new file mode 100644 index 00000000000..904b6e5ec27 --- /dev/null +++ b/crypto/merkle/proof_simple_value.go @@ -0,0 +1,91 @@ +package merkle + +import ( + "bytes" + "fmt" + + "github.com/tendermint/tendermint/crypto/tmhash" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ProofOpSimpleValue = "simple:v" + +// SimpleValueOp takes a key and a single value as argument and +// produces the root hash. The corresponding tree structure is +// the SimpleMap tree. SimpleMap takes a Hasher, and currently +// Tendermint uses aminoHasher. SimpleValueOp should support +// the hash function as used in aminoHasher. TODO support +// additional hash functions here as options/args to this +// operator. +// +// If the produced root hash matches the expected hash, the +// proof is good. +type SimpleValueOp struct { + // Encoded in ProofOp.Key. + key []byte + + // To encode in ProofOp.Data + Proof *SimpleProof `json:"simple_proof"` +} + +var _ ProofOperator = SimpleValueOp{} + +func NewSimpleValueOp(key []byte, proof *SimpleProof) SimpleValueOp { + return SimpleValueOp{ + key: key, + Proof: proof, + } +} + +func SimpleValueOpDecoder(pop ProofOp) (ProofOperator, error) { + if pop.Type != ProofOpSimpleValue { + return nil, cmn.NewError("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpSimpleValue) + } + var op SimpleValueOp // a bit strange as we'll discard this, but it works. + err := cdc.UnmarshalBinaryLengthPrefixed(pop.Data, &op) + if err != nil { + return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp") + } + return NewSimpleValueOp(pop.Key, op.Proof), nil +} + +func (op SimpleValueOp) ProofOp() ProofOp { + bz := cdc.MustMarshalBinaryLengthPrefixed(op) + return ProofOp{ + Type: ProofOpSimpleValue, + Key: op.key, + Data: bz, + } +} + +func (op SimpleValueOp) String() string { + return fmt.Sprintf("SimpleValueOp{%v}", op.GetKey()) +} + +func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) { + if len(args) != 1 { + return nil, cmn.NewError("expected 1 arg, got %v", len(args)) + } + value := args[0] + hasher := tmhash.New() + hasher.Write(value) // does not error + vhash := hasher.Sum(nil) + + // Wrap to hash the KVPair. + hasher = tmhash.New() + encodeByteSlice(hasher, []byte(op.key)) // does not error + encodeByteSlice(hasher, []byte(vhash)) // does not error + kvhash := hasher.Sum(nil) + + if !bytes.Equal(kvhash, op.Proof.LeafHash) { + return nil, cmn.NewError("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash) + } + + return [][]byte{ + op.Proof.ComputeRootHash(), + }, nil +} + +func (op SimpleValueOp) GetKey() []byte { + return op.key +} diff --git a/crypto/merkle/proof_test.go b/crypto/merkle/proof_test.go new file mode 100644 index 00000000000..320b9188a18 --- /dev/null +++ b/crypto/merkle/proof_test.go @@ -0,0 +1,140 @@ +package merkle + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/go-amino" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ProofOpDomino = "test:domino" + +// Expects given input, produces given output. +// Like the game dominos. +type DominoOp struct { + key string // unexported, may be empty + Input string + Output string +} + +func NewDominoOp(key, input, output string) DominoOp { + return DominoOp{ + key: key, + Input: input, + Output: output, + } +} + +func DominoOpDecoder(pop ProofOp) (ProofOperator, error) { + if pop.Type != ProofOpDomino { + panic("unexpected proof op type") + } + var op DominoOp // a bit strange as we'll discard this, but it works. + err := amino.UnmarshalBinaryLengthPrefixed(pop.Data, &op) + if err != nil { + return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp") + } + return NewDominoOp(string(pop.Key), op.Input, op.Output), nil +} + +func (dop DominoOp) ProofOp() ProofOp { + bz := amino.MustMarshalBinaryLengthPrefixed(dop) + return ProofOp{ + Type: ProofOpDomino, + Key: []byte(dop.key), + Data: bz, + } +} + +func (dop DominoOp) Run(input [][]byte) (output [][]byte, err error) { + if len(input) != 1 { + return nil, cmn.NewError("Expected input of length 1") + } + if string(input[0]) != dop.Input { + return nil, cmn.NewError("Expected input %v, got %v", + dop.Input, string(input[0])) + } + return [][]byte{[]byte(dop.Output)}, nil +} + +func (dop DominoOp) GetKey() []byte { + return []byte(dop.key) +} + +//---------------------------------------- + +func TestProofOperators(t *testing.T) { + var err error + + // ProofRuntime setup + // TODO test this somehow. + // prt := NewProofRuntime() + // prt.RegisterOpDecoder(ProofOpDomino, DominoOpDecoder) + + // ProofOperators setup + op1 := NewDominoOp("KEY1", "INPUT1", "INPUT2") + op2 := NewDominoOp("KEY2", "INPUT2", "INPUT3") + op3 := NewDominoOp("", "INPUT3", "INPUT4") + op4 := NewDominoOp("KEY4", "INPUT4", "OUTPUT4") + + // Good + popz := ProofOperators([]ProofOperator{op1, op2, op3, op4}) + err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.Nil(t, err) + err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1")) + assert.Nil(t, err) + + // BAD INPUT + err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1_WRONG")}) + assert.NotNil(t, err) + err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1_WRONG")) + assert.NotNil(t, err) + + // BAD KEY 1 + err = popz.Verify(bz("OUTPUT4"), "/KEY3/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD KEY 2 + err = popz.Verify(bz("OUTPUT4"), "KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD KEY 3 + err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1/", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD KEY 4 + err = popz.Verify(bz("OUTPUT4"), "//KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD KEY 5 + err = popz.Verify(bz("OUTPUT4"), "/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD OUTPUT 1 + err = popz.Verify(bz("OUTPUT4_WRONG"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD OUTPUT 2 + err = popz.Verify(bz(""), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD POPZ 1 + popz = []ProofOperator{op1, op2, op4} + err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD POPZ 2 + popz = []ProofOperator{op4, op3, op2, op1} + err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) + + // BAD POPZ 3 + popz = []ProofOperator{} + err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")}) + assert.NotNil(t, err) +} + +func bz(s string) []byte { + return []byte(s) +} diff --git a/crypto/merkle/simple_map.go b/crypto/merkle/simple_map.go index ba4b9309af0..bde442203c5 100644 --- a/crypto/merkle/simple_map.go +++ b/crypto/merkle/simple_map.go @@ -1,6 +1,9 @@ package merkle import ( + "bytes" + + amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -20,14 +23,15 @@ func newSimpleMap() *simpleMap { } } -// Set hashes the key and value and appends it to the kv pairs. -func (sm *simpleMap) Set(key string, value Hasher) { +// Set creates a kv pair of the key and the hash of the value, +// and then appends it to simpleMap's kv pairs. +func (sm *simpleMap) Set(key string, value []byte) { sm.sorted = false // The value is hashed, so you can // check for equality with a cached value (say) // and make a determination to fetch or not. - vhash := value.Hash() + vhash := tmhash.Sum(value) sm.kvs = append(sm.kvs, cmn.KVPair{ Key: []byte(key), @@ -66,23 +70,25 @@ func (sm *simpleMap) KVPairs() cmn.KVPairs { // then hashed. type KVPair cmn.KVPair -func (kv KVPair) Hash() []byte { - hasher := tmhash.New() - err := encodeByteSlice(hasher, kv.Key) +// Bytes returns key || value, with both the +// key and value length prefixed. +func (kv KVPair) Bytes() []byte { + var b bytes.Buffer + err := amino.EncodeByteSlice(&b, kv.Key) if err != nil { panic(err) } - err = encodeByteSlice(hasher, kv.Value) + err = amino.EncodeByteSlice(&b, kv.Value) if err != nil { panic(err) } - return hasher.Sum(nil) + return b.Bytes() } func hashKVPairs(kvs cmn.KVPairs) []byte { - kvsH := make([]Hasher, len(kvs)) + kvsH := make([][]byte, len(kvs)) for i, kvp := range kvs { - kvsH[i] = KVPair(kvp) + kvsH[i] = KVPair(kvp).Bytes() } - return SimpleHashFromHashers(kvsH) + return SimpleHashFromByteSlices(kvsH) } diff --git a/crypto/merkle/simple_map_test.go b/crypto/merkle/simple_map_test.go index 34febcf162e..7abde119de6 100644 --- a/crypto/merkle/simple_map_test.go +++ b/crypto/merkle/simple_map_test.go @@ -5,50 +5,29 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/crypto/tmhash" ) -type strHasher string - -func (str strHasher) Hash() []byte { - return tmhash.Sum([]byte(str)) -} - func TestSimpleMap(t *testing.T) { - { - db := newSimpleMap() - db.Set("key1", strHasher("value1")) - assert.Equal(t, "fa9bc106ffd932d919bee935ceb6cf2b3dd72d8f", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") - } - { - db := newSimpleMap() - db.Set("key1", strHasher("value2")) - assert.Equal(t, "e00e7dcfe54e9fafef5111e813a587f01ba9c3e8", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") - } - { - db := newSimpleMap() - db.Set("key1", strHasher("value1")) - db.Set("key2", strHasher("value2")) - assert.Equal(t, "eff12d1c703a1022ab509287c0f196130123d786", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") - } - { - db := newSimpleMap() - db.Set("key2", strHasher("value2")) // NOTE: out of order - db.Set("key1", strHasher("value1")) - assert.Equal(t, "eff12d1c703a1022ab509287c0f196130123d786", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") - } - { - db := newSimpleMap() - db.Set("key1", strHasher("value1")) - db.Set("key2", strHasher("value2")) - db.Set("key3", strHasher("value3")) - assert.Equal(t, "b2c62a277c08dbd2ad73ca53cd1d6bfdf5830d26", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + tests := []struct { + keys []string + values []string // each string gets converted to []byte in test + want string + }{ + {[]string{"key1"}, []string{"value1"}, "321d150de16dceb51c72981b432b115045383259b1a550adf8dc80f927508967"}, + {[]string{"key1"}, []string{"value2"}, "2a9e4baf321eac99f6eecc3406603c14bc5e85bb7b80483cbfc75b3382d24a2f"}, + // swap order with 2 keys + {[]string{"key1", "key2"}, []string{"value1", "value2"}, "c4d8913ab543ba26aa970646d4c99a150fd641298e3367cf68ca45fb45a49881"}, + {[]string{"key2", "key1"}, []string{"value2", "value1"}, "c4d8913ab543ba26aa970646d4c99a150fd641298e3367cf68ca45fb45a49881"}, + // swap order with 3 keys + {[]string{"key1", "key2", "key3"}, []string{"value1", "value2", "value3"}, "b23cef00eda5af4548a213a43793f2752d8d9013b3f2b64bc0523a4791196268"}, + {[]string{"key1", "key3", "key2"}, []string{"value1", "value3", "value2"}, "b23cef00eda5af4548a213a43793f2752d8d9013b3f2b64bc0523a4791196268"}, } - { + for i, tc := range tests { db := newSimpleMap() - db.Set("key2", strHasher("value2")) // NOTE: out of order - db.Set("key1", strHasher("value1")) - db.Set("key3", strHasher("value3")) - assert.Equal(t, "b2c62a277c08dbd2ad73ca53cd1d6bfdf5830d26", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + for i := 0; i < len(tc.keys); i++ { + db.Set(tc.keys[i], []byte(tc.values[i])) + } + got := db.Hash() + assert.Equal(t, tc.want, fmt.Sprintf("%x", got), "Hash didn't match on tc %d", i) } } diff --git a/crypto/merkle/simple_proof.go b/crypto/merkle/simple_proof.go index 2541b6d3843..fd6d07b88c9 100644 --- a/crypto/merkle/simple_proof.go +++ b/crypto/merkle/simple_proof.go @@ -2,23 +2,39 @@ package merkle import ( "bytes" + "errors" "fmt" + + "github.com/tendermint/tendermint/crypto/tmhash" + cmn "github.com/tendermint/tendermint/libs/common" ) -// SimpleProof represents a simple merkle proof. +// SimpleProof represents a simple Merkle proof. +// NOTE: The convention for proofs is to include leaf hashes but to +// exclude the root hash. +// This convention is implemented across IAVL range proofs as well. +// Keep this consistent unless there's a very good reason to change +// everything. This also affects the generalized proof system as +// well. type SimpleProof struct { - Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. + Total int `json:"total"` // Total number of items. + Index int `json:"index"` // Index of item to prove. + LeafHash []byte `json:"leaf_hash"` // Hash of item value. + Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. } -// SimpleProofsFromHashers computes inclusion proof for given items. +// SimpleProofsFromByteSlices computes inclusion proof for given items. // proofs[0] is the proof for items[0]. -func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleProof) { - trails, rootSPN := trailsFromHashers(items) +func SimpleProofsFromByteSlices(items [][]byte) (rootHash []byte, proofs []*SimpleProof) { + trails, rootSPN := trailsFromByteSlices(items) rootHash = rootSPN.Hash proofs = make([]*SimpleProof, len(items)) for i, trail := range trails { proofs[i] = &SimpleProof{ - Aunts: trail.FlattenAunts(), + Total: len(items), + Index: i, + LeafHash: trail.Hash, + Aunts: trail.FlattenAunts(), } } return @@ -27,19 +43,19 @@ func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleP // SimpleProofsFromMap generates proofs from a map. The keys/values of the map will be used as the keys/values // in the underlying key-value pairs. // The keys are sorted before the proofs are computed. -func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs map[string]*SimpleProof, keys []string) { +func SimpleProofsFromMap(m map[string][]byte) (rootHash []byte, proofs map[string]*SimpleProof, keys []string) { sm := newSimpleMap() for k, v := range m { sm.Set(k, v) } sm.Sort() kvs := sm.kvs - kvsH := make([]Hasher, 0, len(kvs)) - for _, kvp := range kvs { - kvsH = append(kvsH, KVPair(kvp)) + kvsBytes := make([][]byte, len(kvs)) + for i, kvp := range kvs { + kvsBytes[i] = KVPair(kvp).Bytes() } - rootHash, proofList := SimpleProofsFromHashers(kvsH) + rootHash, proofList := SimpleProofsFromByteSlices(kvsBytes) proofs = make(map[string]*SimpleProof) keys = make([]string, len(proofList)) for i, kvp := range kvs { @@ -49,11 +65,33 @@ func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs map[strin return } -// Verify that leafHash is a leaf hash of the simple-merkle-tree -// which hashes to rootHash. -func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool { - computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts) - return computedHash != nil && bytes.Equal(computedHash, rootHash) +// Verify that the SimpleProof proves the root hash. +// Check sp.Index/sp.Total manually if needed +func (sp *SimpleProof) Verify(rootHash []byte, leafHash []byte) error { + if sp.Total < 0 { + return errors.New("Proof total must be positive") + } + if sp.Index < 0 { + return errors.New("Proof index cannot be negative") + } + if !bytes.Equal(sp.LeafHash, leafHash) { + return cmn.NewError("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash) + } + computedHash := sp.ComputeRootHash() + if !bytes.Equal(computedHash, rootHash) { + return cmn.NewError("invalid root hash: wanted %X got %X", rootHash, computedHash) + } + return nil +} + +// Compute the root hash given a leaf hash. Does not verify the result. +func (sp *SimpleProof) ComputeRootHash() []byte { + return computeHashFromAunts( + sp.Index, + sp.Total, + sp.LeafHash, + sp.Aunts, + ) } // String implements the stringer interface for SimpleProof. @@ -96,13 +134,13 @@ func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][ if leftHash == nil { return nil } - return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) + return simpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) } rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) if rightHash == nil { return nil } - return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) + return simpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) } } @@ -138,18 +176,18 @@ func (spn *SimpleProofNode) FlattenAunts() [][]byte { // trails[0].Hash is the leaf hash for items[0]. // trails[i].Parent.Parent....Parent == root for all i. -func trailsFromHashers(items []Hasher) (trails []*SimpleProofNode, root *SimpleProofNode) { +func trailsFromByteSlices(items [][]byte) (trails []*SimpleProofNode, root *SimpleProofNode) { // Recursive impl. switch len(items) { case 0: return nil, nil case 1: - trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil} + trail := &SimpleProofNode{tmhash.Sum(items[0]), nil, nil, nil} return []*SimpleProofNode{trail}, trail default: - lefts, leftRoot := trailsFromHashers(items[:(len(items)+1)/2]) - rights, rightRoot := trailsFromHashers(items[(len(items)+1)/2:]) - rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) + lefts, leftRoot := trailsFromByteSlices(items[:(len(items)+1)/2]) + rights, rightRoot := trailsFromByteSlices(items[(len(items)+1)/2:]) + rootHash := simpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) root := &SimpleProofNode{rootHash, nil, nil, nil} leftRoot.Parent = root leftRoot.Right = rightRoot diff --git a/crypto/merkle/simple_tree.go b/crypto/merkle/simple_tree.go index 46a07590991..7aacb0889fb 100644 --- a/crypto/merkle/simple_tree.go +++ b/crypto/merkle/simple_tree.go @@ -4,8 +4,8 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" ) -// SimpleHashFromTwoHashes is the basic operation of the Merkle tree: Hash(left | right). -func SimpleHashFromTwoHashes(left, right []byte) []byte { +// simpleHashFromTwoHashes is the basic operation of the Merkle tree: Hash(left | right). +func simpleHashFromTwoHashes(left, right []byte) []byte { var hasher = tmhash.New() err := encodeByteSlice(hasher, left) if err != nil { @@ -18,41 +18,29 @@ func SimpleHashFromTwoHashes(left, right []byte) []byte { return hasher.Sum(nil) } -// SimpleHashFromHashers computes a Merkle tree from items that can be hashed. -func SimpleHashFromHashers(items []Hasher) []byte { - hashes := make([][]byte, len(items)) - for i, item := range items { - hash := item.Hash() - hashes[i] = hash +// SimpleHashFromByteSlices computes a Merkle tree where the leaves are the byte slice, +// in the provided order. +func SimpleHashFromByteSlices(items [][]byte) []byte { + switch len(items) { + case 0: + return nil + case 1: + return tmhash.Sum(items[0]) + default: + left := SimpleHashFromByteSlices(items[:(len(items)+1)/2]) + right := SimpleHashFromByteSlices(items[(len(items)+1)/2:]) + return simpleHashFromTwoHashes(left, right) } - return simpleHashFromHashes(hashes) } // SimpleHashFromMap computes a Merkle tree from sorted map. // Like calling SimpleHashFromHashers with // `item = []byte(Hash(key) | Hash(value))`, // sorted by `item`. -func SimpleHashFromMap(m map[string]Hasher) []byte { +func SimpleHashFromMap(m map[string][]byte) []byte { sm := newSimpleMap() for k, v := range m { sm.Set(k, v) } return sm.Hash() } - -//---------------------------------------------------------------- - -// Expects hashes! -func simpleHashFromHashes(hashes [][]byte) []byte { - // Recursive impl. - switch len(hashes) { - case 0: - return nil - case 1: - return hashes[0] - default: - left := simpleHashFromHashes(hashes[:(len(hashes)+1)/2]) - right := simpleHashFromHashes(hashes[(len(hashes)+1)/2:]) - return SimpleHashFromTwoHashes(left, right) - } -} diff --git a/crypto/merkle/simple_tree_test.go b/crypto/merkle/simple_tree_test.go index 488e0c90767..32edc652e8a 100644 --- a/crypto/merkle/simple_tree_test.go +++ b/crypto/merkle/simple_tree_test.go @@ -1,13 +1,14 @@ package merkle import ( - "bytes" + "testing" + + "github.com/stretchr/testify/require" cmn "github.com/tendermint/tendermint/libs/common" . "github.com/tendermint/tendermint/libs/test" "github.com/tendermint/tendermint/crypto/tmhash" - "testing" ) type testItem []byte @@ -20,69 +21,52 @@ func TestSimpleProof(t *testing.T) { total := 100 - items := make([]Hasher, total) + items := make([][]byte, total) for i := 0; i < total; i++ { items[i] = testItem(cmn.RandBytes(tmhash.Size)) } - rootHash := SimpleHashFromHashers(items) + rootHash := SimpleHashFromByteSlices(items) - rootHash2, proofs := SimpleProofsFromHashers(items) + rootHash2, proofs := SimpleProofsFromByteSlices(items) - if !bytes.Equal(rootHash, rootHash2) { - t.Errorf("Unmatched root hashes: %X vs %X", rootHash, rootHash2) - } + require.Equal(t, rootHash, rootHash2, "Unmatched root hashes: %X vs %X", rootHash, rootHash2) // For each item, check the trail. for i, item := range items { - itemHash := item.Hash() + itemHash := tmhash.Sum(item) proof := proofs[i] + // Check total/index + require.Equal(t, proof.Index, i, "Unmatched indicies: %d vs %d", proof.Index, i) + + require.Equal(t, proof.Total, total, "Unmatched totals: %d vs %d", proof.Total, total) + // Verify success - ok := proof.Verify(i, total, itemHash, rootHash) - if !ok { - t.Errorf("Verification failed for index %v.", i) - } - - // Wrong item index should make it fail - { - ok = proof.Verify((i+1)%total, total, itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong index %v.", i) - } - } + err := proof.Verify(rootHash, itemHash) + require.NoError(t, err, "Verificatior failed: %v.", err) // Trail too long should make it fail origAunts := proof.Aunts proof.Aunts = append(proof.Aunts, cmn.RandBytes(32)) - { - ok = proof.Verify(i, total, itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong trail length.") - } - } + err = proof.Verify(rootHash, itemHash) + require.Error(t, err, "Expected verification to fail for wrong trail length") + proof.Aunts = origAunts // Trail too short should make it fail proof.Aunts = proof.Aunts[0 : len(proof.Aunts)-1] - { - ok = proof.Verify(i, total, itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong trail length.") - } - } + err = proof.Verify(rootHash, itemHash) + require.Error(t, err, "Expected verification to fail for wrong trail length") + proof.Aunts = origAunts // Mutating the itemHash should make it fail. - ok = proof.Verify(i, total, MutateByteSlice(itemHash), rootHash) - if ok { - t.Errorf("Expected verification to fail for mutated leaf hash") - } + err = proof.Verify(rootHash, MutateByteSlice(itemHash)) + require.Error(t, err, "Expected verification to fail for mutated leaf hash") // Mutating the rootHash should make it fail. - ok = proof.Verify(i, total, itemHash, MutateByteSlice(rootHash)) - if ok { - t.Errorf("Expected verification to fail for mutated root hash") - } + err = proof.Verify(MutateByteSlice(rootHash), itemHash) + require.Error(t, err, "Expected verification to fail for mutated root hash") } } diff --git a/crypto/merkle/types.go b/crypto/merkle/types.go index 2fcb3f39d8b..97a47879b38 100644 --- a/crypto/merkle/types.go +++ b/crypto/merkle/types.go @@ -25,11 +25,6 @@ type Tree interface { IterateRange(start []byte, end []byte, ascending bool, fx func(key []byte, value []byte) (stop bool)) (stopped bool) } -// Hasher represents a hashable piece of data which can be hashed in the Tree. -type Hasher interface { - Hash() []byte -} - //----------------------------------------------------------------------- // Uvarint length prefixed byteslice diff --git a/crypto/merkle/wire.go b/crypto/merkle/wire.go new file mode 100644 index 00000000000..c20ec9aa472 --- /dev/null +++ b/crypto/merkle/wire.go @@ -0,0 +1,12 @@ +package merkle + +import ( + "github.com/tendermint/go-amino" +) + +var cdc *amino.Codec + +func init() { + cdc = amino.NewCodec() + cdc.Seal() +} diff --git a/crypto/multisig/bitarray/compact_bit_array.go b/crypto/multisig/bitarray/compact_bit_array.go new file mode 100644 index 00000000000..0152db72469 --- /dev/null +++ b/crypto/multisig/bitarray/compact_bit_array.go @@ -0,0 +1,233 @@ +package bitarray + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "regexp" + "strings" +) + +// CompactBitArray is an implementation of a space efficient bit array. +// This is used to ensure that the encoded data takes up a minimal amount of +// space after amino encoding. +// This is not thread safe, and is not intended for concurrent usage. +type CompactBitArray struct { + ExtraBitsStored byte `json:"extra_bits"` // The number of extra bits in elems. + Elems []byte `json:"bits"` +} + +// NewCompactBitArray returns a new compact bit array. +// It returns nil if the number of bits is zero. +func NewCompactBitArray(bits int) *CompactBitArray { + if bits <= 0 { + return nil + } + return &CompactBitArray{ + ExtraBitsStored: byte(bits % 8), + Elems: make([]byte, (bits+7)/8), + } +} + +// Size returns the number of bits in the bitarray +func (bA *CompactBitArray) Size() int { + if bA == nil { + return 0 + } else if bA.ExtraBitsStored == byte(0) { + return len(bA.Elems) * 8 + } + // num_bits = 8*num_full_bytes + overflow_in_last_byte + // num_full_bytes = (len(bA.Elems)-1) + return (len(bA.Elems)-1)*8 + int(bA.ExtraBitsStored) +} + +// GetIndex returns the bit at index i within the bit array. +// The behavior is undefined if i >= bA.Size() +func (bA *CompactBitArray) GetIndex(i int) bool { + if bA == nil { + return false + } + if i >= bA.Size() { + return false + } + return bA.Elems[i>>3]&(uint8(1)< 0 +} + +// SetIndex sets the bit at index i within the bit array. +// The behavior is undefined if i >= bA.Size() +func (bA *CompactBitArray) SetIndex(i int, v bool) bool { + if bA == nil { + return false + } + if i >= bA.Size() { + return false + } + if v { + bA.Elems[i>>3] |= (uint8(1) << uint8(7-(i%8))) + } else { + bA.Elems[i>>3] &= ^(uint8(1) << uint8(7-(i%8))) + } + return true +} + +// NumTrueBitsBefore returns the number of bits set to true before the +// given index. e.g. if bA = _XX__XX, NumOfTrueBitsBefore(4) = 2, since +// there are two bits set to true before index 4. +func (bA *CompactBitArray) NumTrueBitsBefore(index int) int { + numTrueValues := 0 + for i := 0; i < index; i++ { + if bA.GetIndex(i) { + numTrueValues++ + } + } + return numTrueValues +} + +// Copy returns a copy of the provided bit array. +func (bA *CompactBitArray) Copy() *CompactBitArray { + if bA == nil { + return nil + } + c := make([]byte, len(bA.Elems)) + copy(c, bA.Elems) + return &CompactBitArray{ + ExtraBitsStored: bA.ExtraBitsStored, + Elems: c, + } +} + +// String returns a string representation of CompactBitArray: BA{}, +// where is a sequence of 'x' (1) and '_' (0). +// The includes spaces and newlines to help people. +// For a simple sequence of 'x' and '_' characters with no spaces or newlines, +// see the MarshalJSON() method. +// Example: "BA{_x_}" or "nil-BitArray" for nil. +func (bA *CompactBitArray) String() string { + return bA.StringIndented("") +} + +// StringIndented returns the same thing as String(), but applies the indent +// at every 10th bit, and twice at every 50th bit. +func (bA *CompactBitArray) StringIndented(indent string) string { + if bA == nil { + return "nil-BitArray" + } + lines := []string{} + bits := "" + size := bA.Size() + for i := 0; i < size; i++ { + if bA.GetIndex(i) { + bits += "x" + } else { + bits += "_" + } + if i%100 == 99 { + lines = append(lines, bits) + bits = "" + } + if i%10 == 9 { + bits += indent + } + if i%50 == 49 { + bits += indent + } + } + if len(bits) > 0 { + lines = append(lines, bits) + } + return fmt.Sprintf("BA{%v:%v}", size, strings.Join(lines, indent)) +} + +// MarshalJSON implements json.Marshaler interface by marshaling bit array +// using a custom format: a string of '-' or 'x' where 'x' denotes the 1 bit. +func (bA *CompactBitArray) MarshalJSON() ([]byte, error) { + if bA == nil { + return []byte("null"), nil + } + + bits := `"` + size := bA.Size() + for i := 0; i < size; i++ { + if bA.GetIndex(i) { + bits += `x` + } else { + bits += `_` + } + } + bits += `"` + return []byte(bits), nil +} + +var bitArrayJSONRegexp = regexp.MustCompile(`\A"([_x]*)"\z`) + +// UnmarshalJSON implements json.Unmarshaler interface by unmarshaling a custom +// JSON description. +func (bA *CompactBitArray) UnmarshalJSON(bz []byte) error { + b := string(bz) + if b == "null" { + // This is required e.g. for encoding/json when decoding + // into a pointer with pre-allocated BitArray. + bA.ExtraBitsStored = 0 + bA.Elems = nil + return nil + } + + // Validate 'b'. + match := bitArrayJSONRegexp.FindStringSubmatch(b) + if match == nil { + return fmt.Errorf("BitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b) + } + bits := match[1] + + // Construct new CompactBitArray and copy over. + numBits := len(bits) + bA2 := NewCompactBitArray(numBits) + for i := 0; i < numBits; i++ { + if bits[i] == 'x' { + bA2.SetIndex(i, true) + } + } + *bA = *bA2 + return nil +} + +// CompactMarshal is a space efficient encoding for CompactBitArray. +// It is not amino compatible. +func (bA *CompactBitArray) CompactMarshal() []byte { + size := bA.Size() + if size <= 0 { + return []byte("null") + } + bz := make([]byte, 0, size/8) + // length prefix number of bits, not number of bytes. This difference + // takes 3-4 bits in encoding, as opposed to instead encoding the number of + // bytes (saving 3-4 bits) and including the offset as a full byte. + bz = appendUvarint(bz, uint64(size)) + bz = append(bz, bA.Elems...) + return bz +} + +// CompactUnmarshal is a space efficient decoding for CompactBitArray. +// It is not amino compatible. +func CompactUnmarshal(bz []byte) (*CompactBitArray, error) { + if len(bz) < 2 { + return nil, errors.New("compact bit array: invalid compact unmarshal size") + } else if bytes.Equal(bz, []byte("null")) { + return NewCompactBitArray(0), nil + } + size, n := binary.Uvarint(bz) + bz = bz[n:] + if len(bz) != int(size+7)/8 { + return nil, errors.New("compact bit array: invalid compact unmarshal size") + } + + bA := &CompactBitArray{byte(int(size % 8)), bz} + return bA, nil +} + +func appendUvarint(b []byte, x uint64) []byte { + var a [binary.MaxVarintLen64]byte + n := binary.PutUvarint(a[:], x) + return append(b, a[:n]...) +} diff --git a/crypto/multisig/bitarray/compact_bit_array_test.go b/crypto/multisig/bitarray/compact_bit_array_test.go new file mode 100644 index 00000000000..4612ae25af6 --- /dev/null +++ b/crypto/multisig/bitarray/compact_bit_array_test.go @@ -0,0 +1,196 @@ +package bitarray + +import ( + "encoding/json" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func randCompactBitArray(bits int) (*CompactBitArray, []byte) { + numBytes := (bits + 7) / 8 + src := cmn.RandBytes((bits + 7) / 8) + bA := NewCompactBitArray(bits) + + for i := 0; i < numBytes-1; i++ { + for j := uint8(0); j < 8; j++ { + bA.SetIndex(i*8+int(j), src[i]&(uint8(1)<<(8-j)) > 0) + } + } + // Set remaining bits + for i := uint8(0); i < 8-uint8(bA.ExtraBitsStored); i++ { + bA.SetIndex(numBytes*8+int(i), src[numBytes-1]&(uint8(1)<<(8-i)) > 0) + } + return bA, src +} + +func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) { + bitList := []int{-127, -128, -1 << 31} + for _, bits := range bitList { + bA := NewCompactBitArray(bits) + require.Nil(t, bA) + } +} + +func TestJSONMarshalUnmarshal(t *testing.T) { + + bA1 := NewCompactBitArray(0) + bA2 := NewCompactBitArray(1) + + bA3 := NewCompactBitArray(1) + bA3.SetIndex(0, true) + + bA4 := NewCompactBitArray(5) + bA4.SetIndex(0, true) + bA4.SetIndex(1, true) + + bA5 := NewCompactBitArray(9) + bA5.SetIndex(0, true) + bA5.SetIndex(1, true) + bA5.SetIndex(8, true) + + bA6 := NewCompactBitArray(16) + bA6.SetIndex(0, true) + bA6.SetIndex(1, true) + bA6.SetIndex(8, false) + bA6.SetIndex(15, true) + + testCases := []struct { + bA *CompactBitArray + marshalledBA string + }{ + {nil, `null`}, + {bA1, `null`}, + {bA2, `"_"`}, + {bA3, `"x"`}, + {bA4, `"xx___"`}, + {bA5, `"xx______x"`}, + {bA6, `"xx_____________x"`}, + } + + for _, tc := range testCases { + t.Run(tc.bA.String(), func(t *testing.T) { + bz, err := json.Marshal(tc.bA) + require.NoError(t, err) + + assert.Equal(t, tc.marshalledBA, string(bz)) + + var unmarshalledBA *CompactBitArray + err = json.Unmarshal(bz, &unmarshalledBA) + require.NoError(t, err) + + if tc.bA == nil { + require.Nil(t, unmarshalledBA) + } else { + require.NotNil(t, unmarshalledBA) + assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) + if assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) { + assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) + } + } + }) + } +} + +func TestCompactMarshalUnmarshal(t *testing.T) { + bA1 := NewCompactBitArray(0) + bA2 := NewCompactBitArray(1) + + bA3 := NewCompactBitArray(1) + bA3.SetIndex(0, true) + + bA4 := NewCompactBitArray(5) + bA4.SetIndex(0, true) + bA4.SetIndex(1, true) + + bA5 := NewCompactBitArray(9) + bA5.SetIndex(0, true) + bA5.SetIndex(1, true) + bA5.SetIndex(8, true) + + bA6 := NewCompactBitArray(16) + bA6.SetIndex(0, true) + bA6.SetIndex(1, true) + bA6.SetIndex(8, false) + bA6.SetIndex(15, true) + + testCases := []struct { + bA *CompactBitArray + marshalledBA []byte + }{ + {nil, []byte("null")}, + {bA1, []byte("null")}, + {bA2, []byte{byte(1), byte(0)}}, + {bA3, []byte{byte(1), byte(128)}}, + {bA4, []byte{byte(5), byte(192)}}, + {bA5, []byte{byte(9), byte(192), byte(128)}}, + {bA6, []byte{byte(16), byte(192), byte(1)}}, + } + + for _, tc := range testCases { + t.Run(tc.bA.String(), func(t *testing.T) { + bz := tc.bA.CompactMarshal() + + assert.Equal(t, tc.marshalledBA, bz) + + unmarshalledBA, err := CompactUnmarshal(bz) + require.NoError(t, err) + if tc.bA == nil { + require.Nil(t, unmarshalledBA) + } else { + require.NotNil(t, unmarshalledBA) + assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) + if assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) { + assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) + } + } + }) + } +} + +func TestCompactBitArrayNumOfTrueBitsBefore(t *testing.T) { + testCases := []struct { + marshalledBA string + bAIndex []int + trueValueIndex []int + }{ + {`"_____"`, []int{0, 1, 2, 3, 4}, []int{0, 0, 0, 0, 0}}, + {`"x"`, []int{0}, []int{0}}, + {`"_x"`, []int{1}, []int{0}}, + {`"x___xxxx"`, []int{0, 4, 5, 6, 7}, []int{0, 1, 2, 3, 4}}, + {`"__x_xx_x__x_x___"`, []int{2, 4, 5, 7, 10, 12}, []int{0, 1, 2, 3, 4, 5}}, + {`"______________xx"`, []int{14, 15}, []int{0, 1}}, + } + for tcIndex, tc := range testCases { + t.Run(tc.marshalledBA, func(t *testing.T) { + var bA *CompactBitArray + err := json.Unmarshal([]byte(tc.marshalledBA), &bA) + require.NoError(t, err) + + for i := 0; i < len(tc.bAIndex); i++ { + require.Equal(t, tc.trueValueIndex[i], bA.NumTrueBitsBefore(tc.bAIndex[i]), "tc %d, i %d", tcIndex, i) + } + }) + } +} + +func TestCompactBitArrayGetSetIndex(t *testing.T) { + r := rand.New(rand.NewSource(100)) + numTests := 10 + numBitsPerArr := 100 + for i := 0; i < numTests; i++ { + bits := r.Intn(1000) + bA, _ := randCompactBitArray(bits) + + for j := 0; j < numBitsPerArr; j++ { + copy := bA.Copy() + index := r.Intn(bits) + val := (r.Int63() % 2) == 0 + bA.SetIndex(index, val) + require.Equal(t, val, bA.GetIndex(index), "bA.SetIndex(%d, %v) failed on bit array: %s", index, val, copy) + } + } +} diff --git a/crypto/multisig/multisignature.go b/crypto/multisig/multisignature.go new file mode 100644 index 00000000000..0d1796890cb --- /dev/null +++ b/crypto/multisig/multisignature.go @@ -0,0 +1,70 @@ +package multisig + +import ( + "errors" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/multisig/bitarray" +) + +// Multisignature is used to represent the signature object used in the multisigs. +// Sigs is a list of signatures, sorted by corresponding index. +type Multisignature struct { + BitArray *bitarray.CompactBitArray + Sigs [][]byte +} + +// NewMultisig returns a new Multisignature of size n. +func NewMultisig(n int) *Multisignature { + // Default the signature list to have a capacity of two, since we can + // expect that most multisigs will require multiple signers. + return &Multisignature{bitarray.NewCompactBitArray(n), make([][]byte, 0, 2)} +} + +// GetIndex returns the index of pk in keys. Returns -1 if not found +func getIndex(pk crypto.PubKey, keys []crypto.PubKey) int { + for i := 0; i < len(keys); i++ { + if pk.Equals(keys[i]) { + return i + } + } + return -1 +} + +// AddSignature adds a signature to the multisig, at the corresponding index. +// If the signature already exists, replace it. +func (mSig *Multisignature) AddSignature(sig []byte, index int) { + newSigIndex := mSig.BitArray.NumTrueBitsBefore(index) + // Signature already exists, just replace the value there + if mSig.BitArray.GetIndex(index) { + mSig.Sigs[newSigIndex] = sig + return + } + mSig.BitArray.SetIndex(index, true) + // Optimization if the index is the greatest index + if newSigIndex == len(mSig.Sigs) { + mSig.Sigs = append(mSig.Sigs, sig) + return + } + // Expand slice by one with a dummy element, move all elements after i + // over by one, then place the new signature in that gap. + mSig.Sigs = append(mSig.Sigs, make([]byte, 0)) + copy(mSig.Sigs[newSigIndex+1:], mSig.Sigs[newSigIndex:]) + mSig.Sigs[newSigIndex] = sig +} + +// AddSignatureFromPubKey adds a signature to the multisig, +// at the index in keys corresponding to the provided pubkey. +func (mSig *Multisignature) AddSignatureFromPubKey(sig []byte, pubkey crypto.PubKey, keys []crypto.PubKey) error { + index := getIndex(pubkey, keys) + if index == -1 { + return errors.New("provided key didn't exist in pubkeys") + } + mSig.AddSignature(sig, index) + return nil +} + +// Marshal the multisignature with amino +func (mSig *Multisignature) Marshal() []byte { + return cdc.MustMarshalBinaryBare(mSig) +} diff --git a/crypto/multisig/threshold_pubkey.go b/crypto/multisig/threshold_pubkey.go new file mode 100644 index 00000000000..ca8d4230304 --- /dev/null +++ b/crypto/multisig/threshold_pubkey.go @@ -0,0 +1,92 @@ +package multisig + +import ( + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/tmhash" +) + +// PubKeyMultisigThreshold implements a K of N threshold multisig. +type PubKeyMultisigThreshold struct { + K uint `json:"threshold"` + PubKeys []crypto.PubKey `json:"pubkeys"` +} + +var _ crypto.PubKey = &PubKeyMultisigThreshold{} + +// NewPubKeyMultisigThreshold returns a new PubKeyMultisigThreshold. +// Panics if len(pubkeys) < k or 0 >= k. +func NewPubKeyMultisigThreshold(k int, pubkeys []crypto.PubKey) crypto.PubKey { + if k <= 0 { + panic("threshold k of n multisignature: k <= 0") + } + if len(pubkeys) < k { + panic("threshold k of n multisignature: len(pubkeys) < k") + } + return &PubKeyMultisigThreshold{uint(k), pubkeys} +} + +// VerifyBytes expects sig to be an amino encoded version of a MultiSignature. +// Returns true iff the multisignature contains k or more signatures +// for the correct corresponding keys, +// and all signatures are valid. (Not just k of the signatures) +// The multisig uses a bitarray, so multiple signatures for the same key is not +// a concern. +func (pk *PubKeyMultisigThreshold) VerifyBytes(msg []byte, marshalledSig []byte) bool { + var sig *Multisignature + err := cdc.UnmarshalBinaryBare(marshalledSig, &sig) + if err != nil { + return false + } + size := sig.BitArray.Size() + // ensure bit array is the correct size + if len(pk.PubKeys) != size { + return false + } + // ensure size of signature list + if len(sig.Sigs) < int(pk.K) || len(sig.Sigs) > size { + return false + } + // ensure at least k signatures are set + if sig.BitArray.NumTrueBitsBefore(size) < int(pk.K) { + return false + } + // index in the list of signatures which we are concerned with. + sigIndex := 0 + for i := 0; i < size; i++ { + if sig.BitArray.GetIndex(i) { + if !pk.PubKeys[i].VerifyBytes(msg, sig.Sigs[sigIndex]) { + return false + } + sigIndex++ + } + } + return true +} + +// Bytes returns the amino encoded version of the PubKeyMultisigThreshold +func (pk *PubKeyMultisigThreshold) Bytes() []byte { + return cdc.MustMarshalBinaryBare(pk) +} + +// Address returns tmhash(PubKeyMultisigThreshold.Bytes()) +func (pk *PubKeyMultisigThreshold) Address() crypto.Address { + return crypto.Address(tmhash.Sum(pk.Bytes())) +} + +// Equals returns true iff pk and other both have the same number of keys, and +// all constituent keys are the same, and in the same order. +func (pk *PubKeyMultisigThreshold) Equals(other crypto.PubKey) bool { + otherKey, sameType := other.(*PubKeyMultisigThreshold) + if !sameType { + return false + } + if pk.K != otherKey.K || len(pk.PubKeys) != len(otherKey.PubKeys) { + return false + } + for i := 0; i < len(pk.PubKeys); i++ { + if !pk.PubKeys[i].Equals(otherKey.PubKeys[i]) { + return false + } + } + return true +} diff --git a/crypto/multisig/threshold_pubkey_test.go b/crypto/multisig/threshold_pubkey_test.go new file mode 100644 index 00000000000..bfc874ebedf --- /dev/null +++ b/crypto/multisig/threshold_pubkey_test.go @@ -0,0 +1,112 @@ +package multisig + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" +) + +// This tests multisig functionality, but it expects the first k signatures to be valid +// TODO: Adapt it to give more flexibility about first k signatures being valid +func TestThresholdMultisigValidCases(t *testing.T) { + pkSet1, sigSet1 := generatePubKeysAndSignatures(5, []byte{1, 2, 3, 4}) + cases := []struct { + msg []byte + k int + pubkeys []crypto.PubKey + signingIndices []int + // signatures should be the same size as signingIndices. + signatures [][]byte + passAfterKSignatures []bool + }{ + { + msg: []byte{1, 2, 3, 4}, + k: 2, + pubkeys: pkSet1, + signingIndices: []int{0, 3, 1}, + signatures: sigSet1, + passAfterKSignatures: []bool{false}, + }, + } + for tcIndex, tc := range cases { + multisigKey := NewPubKeyMultisigThreshold(tc.k, tc.pubkeys) + multisignature := NewMultisig(len(tc.pubkeys)) + for i := 0; i < tc.k-1; i++ { + signingIndex := tc.signingIndices[i] + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) + require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig passed when i < k, tc %d, i %d", tcIndex, i) + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) + require.Equal(t, i+1, len(multisignature.Sigs), + "adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex) + } + require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig passed with k - 1 sigs, tc %d", tcIndex) + multisignature.AddSignatureFromPubKey(tc.signatures[tc.signingIndices[tc.k]], tc.pubkeys[tc.signingIndices[tc.k]], tc.pubkeys) + require.True(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig failed after k good signatures, tc %d", tcIndex) + for i := tc.k + 1; i < len(tc.signingIndices); i++ { + signingIndex := tc.signingIndices[i] + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) + require.Equal(t, tc.passAfterKSignatures[i-tc.k-1], + multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig didn't verify as expected after k sigs, tc %d, i %d", tcIndex, i) + + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) + require.Equal(t, i+1, len(multisignature.Sigs), + "adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex) + } + } +} + +// TODO: Fully replace this test with table driven tests +func TestThresholdMultisigDuplicateSignatures(t *testing.T) { + msg := []byte{1, 2, 3, 4, 5} + pubkeys, sigs := generatePubKeysAndSignatures(5, msg) + multisigKey := NewPubKeyMultisigThreshold(2, pubkeys) + multisignature := NewMultisig(5) + require.False(t, multisigKey.VerifyBytes(msg, multisignature.Marshal())) + multisignature.AddSignatureFromPubKey(sigs[0], pubkeys[0], pubkeys) + // Add second signature manually + multisignature.Sigs = append(multisignature.Sigs, sigs[0]) + require.False(t, multisigKey.VerifyBytes(msg, multisignature.Marshal())) +} + +// TODO: Fully replace this test with table driven tests +func TestMultiSigPubKeyEquality(t *testing.T) { + msg := []byte{1, 2, 3, 4} + pubkeys, _ := generatePubKeysAndSignatures(5, msg) + multisigKey := NewPubKeyMultisigThreshold(2, pubkeys) + var unmarshalledMultisig *PubKeyMultisigThreshold + cdc.MustUnmarshalBinaryBare(multisigKey.Bytes(), &unmarshalledMultisig) + require.True(t, multisigKey.Equals(unmarshalledMultisig)) + + // Ensure that reordering pubkeys is treated as a different pubkey + pubkeysCpy := make([]crypto.PubKey, 5) + copy(pubkeysCpy, pubkeys) + pubkeysCpy[4] = pubkeys[3] + pubkeysCpy[3] = pubkeys[4] + multisigKey2 := NewPubKeyMultisigThreshold(2, pubkeysCpy) + require.False(t, multisigKey.Equals(multisigKey2)) +} + +func generatePubKeysAndSignatures(n int, msg []byte) (pubkeys []crypto.PubKey, signatures [][]byte) { + pubkeys = make([]crypto.PubKey, n) + signatures = make([][]byte, n) + for i := 0; i < n; i++ { + var privkey crypto.PrivKey + if rand.Int63()%2 == 0 { + privkey = ed25519.GenPrivKey() + } else { + privkey = secp256k1.GenPrivKey() + } + pubkeys[i] = privkey.PubKey() + signatures[i], _ = privkey.Sign(msg) + } + return +} diff --git a/crypto/multisig/wire.go b/crypto/multisig/wire.go new file mode 100644 index 00000000000..68b84fbfb7a --- /dev/null +++ b/crypto/multisig/wire.go @@ -0,0 +1,26 @@ +package multisig + +import ( + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" +) + +// TODO: Figure out API for others to either add their own pubkey types, or +// to make verify / marshal accept a cdc. +const ( + PubKeyMultisigThresholdAminoRoute = "tendermint/PubKeyMultisigThreshold" +) + +var cdc = amino.NewCodec() + +func init() { + cdc.RegisterInterface((*crypto.PubKey)(nil), nil) + cdc.RegisterConcrete(PubKeyMultisigThreshold{}, + PubKeyMultisigThresholdAminoRoute, nil) + cdc.RegisterConcrete(ed25519.PubKeyEd25519{}, + ed25519.PubKeyAminoRoute, nil) + cdc.RegisterConcrete(secp256k1.PubKeySecp256k1{}, + secp256k1.PubKeyAminoRoute, nil) +} diff --git a/crypto/random.go b/crypto/random.go index 5c5057d301d..914c321b74e 100644 --- a/crypto/random.go +++ b/crypto/random.go @@ -1,7 +1,6 @@ package crypto import ( - "crypto/aes" "crypto/cipher" crand "crypto/rand" "crypto/sha256" @@ -9,16 +8,29 @@ import ( "io" "sync" - . "github.com/tendermint/tendermint/libs/common" + "golang.org/x/crypto/chacha20poly1305" ) +// NOTE: This is ignored for now until we have time +// to properly review the MixEntropy function - https://github.com/tendermint/tendermint/issues/2099. +// +// The randomness here is derived from xoring a chacha20 keystream with +// output from crypto/rand's OS Entropy Reader. (Due to fears of the OS' +// entropy being backdoored) +// +// For forward secrecy of produced randomness, the internal chacha key is hashed +// and thereby rotated after each call. var gRandInfo *randInfo func init() { gRandInfo = &randInfo{} - gRandInfo.MixEntropy(randBytes(32)) // Init + + // TODO: uncomment after reviewing MixEntropy - + // https://github.com/tendermint/tendermint/issues/2099 + // gRandInfo.MixEntropy(randBytes(32)) // Init } +// WARNING: This function needs review - https://github.com/tendermint/tendermint/issues/2099. // Mix additional bytes of randomness, e.g. from hardware, user-input, etc. // It is OK to call it multiple times. It does not diminish security. func MixEntropy(seedBytes []byte) { @@ -30,20 +42,28 @@ func randBytes(numBytes int) []byte { b := make([]byte, numBytes) _, err := crand.Read(b) if err != nil { - PanicCrisis(err) + panic(err) } return b } +// This only uses the OS's randomness +func CRandBytes(numBytes int) []byte { + return randBytes(numBytes) +} + +/* TODO: uncomment after reviewing MixEntropy - https://github.com/tendermint/tendermint/issues/2099 // This uses the OS and the Seed(s). func CRandBytes(numBytes int) []byte { - b := make([]byte, numBytes) - _, err := gRandInfo.Read(b) - if err != nil { - PanicCrisis(err) - } - return b + return randBytes(numBytes) + b := make([]byte, numBytes) + _, err := gRandInfo.Read(b) + if err != nil { + panic(err) + } + return b } +*/ // CRandHex returns a hex encoded string that's floor(numDigits/2) * 2 long. // @@ -53,23 +73,29 @@ func CRandHex(numDigits int) string { return hex.EncodeToString(CRandBytes(numDigits / 2)) } +// Returns a crand.Reader. +func CReader() io.Reader { + return crand.Reader +} + +/* TODO: uncomment after reviewing MixEntropy - https://github.com/tendermint/tendermint/issues/2099 // Returns a crand.Reader mixed with user-supplied entropy func CReader() io.Reader { return gRandInfo } +*/ //-------------------------------------------------------------------------------- type randInfo struct { - mtx sync.Mutex - seedBytes [32]byte - cipherAES256 cipher.Block - streamAES256 cipher.Stream - reader io.Reader + mtx sync.Mutex + seedBytes [chacha20poly1305.KeySize]byte + chacha cipher.AEAD + reader io.Reader } // You can call this as many times as you'd like. -// XXX TODO review +// XXX/TODO: review - https://github.com/tendermint/tendermint/issues/2099 func (ri *randInfo) MixEntropy(seedBytes []byte) { ri.mtx.Lock() defer ri.mtx.Unlock() @@ -79,30 +105,35 @@ func (ri *randInfo) MixEntropy(seedBytes []byte) { h.Write(seedBytes) h.Write(ri.seedBytes[:]) hashBytes := h.Sum(nil) - hashBytes32 := [32]byte{} - copy(hashBytes32[:], hashBytes) - ri.seedBytes = xorBytes32(ri.seedBytes, hashBytes32) - // Create new cipher.Block - var err error - ri.cipherAES256, err = aes.NewCipher(ri.seedBytes[:]) + copy(ri.seedBytes[:], hashBytes) + chacha, err := chacha20poly1305.New(ri.seedBytes[:]) if err != nil { - PanicSanity("Error creating AES256 cipher: " + err.Error()) + panic("Initializing chacha20 failed") } - // Create new stream - ri.streamAES256 = cipher.NewCTR(ri.cipherAES256, randBytes(aes.BlockSize)) + ri.chacha = chacha // Create new reader - ri.reader = &cipher.StreamReader{S: ri.streamAES256, R: crand.Reader} + ri.reader = &cipher.StreamReader{S: ri, R: crand.Reader} } -func (ri *randInfo) Read(b []byte) (n int, err error) { - ri.mtx.Lock() - defer ri.mtx.Unlock() - return ri.reader.Read(b) +func (ri *randInfo) XORKeyStream(dst, src []byte) { + // nonce being 0 is safe due to never re-using a key. + emptyNonce := make([]byte, 12) + tmpDst := ri.chacha.Seal([]byte{}, emptyNonce, src, []byte{0}) + // this removes the poly1305 tag as well, since chacha is a stream cipher + // and we truncate at input length. + copy(dst, tmpDst[:len(src)]) + // hash seedBytes for forward secrecy, and initialize new chacha instance + newSeed := sha256.Sum256(ri.seedBytes[:]) + chacha, err := chacha20poly1305.New(newSeed[:]) + if err != nil { + panic("Initializing chacha20 failed") + } + ri.chacha = chacha } -func xorBytes32(bytesA [32]byte, bytesB [32]byte) (res [32]byte) { - for i, b := range bytesA { - res[i] = b ^ bytesB[i] - } - return res +func (ri *randInfo) Read(b []byte) (n int, err error) { + ri.mtx.Lock() + n, err = ri.reader.Read(b) + ri.mtx.Unlock() + return } diff --git a/crypto/random_test.go b/crypto/random_test.go new file mode 100644 index 00000000000..34f7372fe28 --- /dev/null +++ b/crypto/random_test.go @@ -0,0 +1,23 @@ +package crypto_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto" +) + +// the purpose of this test is primarily to ensure that the randomness +// generation won't error. +func TestRandomConsistency(t *testing.T) { + x1 := crypto.CRandBytes(256) + x2 := crypto.CRandBytes(256) + x3 := crypto.CRandBytes(256) + x4 := crypto.CRandBytes(256) + x5 := crypto.CRandBytes(256) + require.NotEqual(t, x1, x2) + require.NotEqual(t, x3, x4) + require.NotEqual(t, x4, x5) + require.NotEqual(t, x1, x5) +} diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index aee5dafe7a2..784409f3c72 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -7,16 +7,17 @@ import ( "fmt" "io" - secp256k1 "github.com/btcsuite/btcd/btcec" + secp256k1 "github.com/tendermint/btcd/btcec" amino "github.com/tendermint/go-amino" + "golang.org/x/crypto/ripemd160" // forked to github.com/tendermint/crypto + "github.com/tendermint/tendermint/crypto" - "golang.org/x/crypto/ripemd160" ) //------------------------------------- const ( - Secp256k1PrivKeyAminoRoute = "tendermint/PrivKeySecp256k1" - Secp256k1PubKeyAminoRoute = "tendermint/PubKeySecp256k1" + PrivKeyAminoRoute = "tendermint/PrivKeySecp256k1" + PubKeyAminoRoute = "tendermint/PubKeySecp256k1" ) var cdc = amino.NewCodec() @@ -24,11 +25,11 @@ var cdc = amino.NewCodec() func init() { cdc.RegisterInterface((*crypto.PubKey)(nil), nil) cdc.RegisterConcrete(PubKeySecp256k1{}, - Secp256k1PubKeyAminoRoute, nil) + PubKeyAminoRoute, nil) cdc.RegisterInterface((*crypto.PrivKey)(nil), nil) cdc.RegisterConcrete(PrivKeySecp256k1{}, - Secp256k1PrivKeyAminoRoute, nil) + PrivKeyAminoRoute, nil) } //------------------------------------- @@ -141,10 +142,12 @@ func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sig []byte) bool { if err != nil { return false } - parsedSig, err := secp256k1.ParseDERSignature(sig[:], secp256k1.S256()) + parsedSig, err := secp256k1.ParseSignature(sig[:], secp256k1.S256()) if err != nil { return false } + // Underlying library ensures that this signature is in canonical form, to + // prevent Secp256k1 malleability from altering the sign of the s term. return parsedSig.Verify(crypto.Sha256(msg), pub) } diff --git a/crypto/secp256k1/secpk256k1_test.go b/crypto/secp256k1/secpk256k1_test.go index 0f0b5adce9c..2fa483014db 100644 --- a/crypto/secp256k1/secpk256k1_test.go +++ b/crypto/secp256k1/secpk256k1_test.go @@ -11,7 +11,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/secp256k1" - underlyingSecp256k1 "github.com/btcsuite/btcd/btcec" + underlyingSecp256k1 "github.com/tendermint/btcd/btcec" ) type keyData struct { diff --git a/crypto/tmhash/hash.go b/crypto/tmhash/hash.go index 1b29d8680cf..f9b9582420d 100644 --- a/crypto/tmhash/hash.go +++ b/crypto/tmhash/hash.go @@ -6,10 +6,27 @@ import ( ) const ( - Size = 20 + Size = sha256.Size BlockSize = sha256.BlockSize ) +// New returns a new hash.Hash. +func New() hash.Hash { + return sha256.New() +} + +// Sum returns the SHA256 of the bz. +func Sum(bz []byte) []byte { + h := sha256.Sum256(bz) + return h[:] +} + +//------------------------------------------------------------- + +const ( + TruncatedSize = 20 +) + type sha256trunc struct { sha256 hash.Hash } @@ -19,7 +36,7 @@ func (h sha256trunc) Write(p []byte) (n int, err error) { } func (h sha256trunc) Sum(b []byte) []byte { shasum := h.sha256.Sum(b) - return shasum[:Size] + return shasum[:TruncatedSize] } func (h sha256trunc) Reset() { @@ -27,22 +44,22 @@ func (h sha256trunc) Reset() { } func (h sha256trunc) Size() int { - return Size + return TruncatedSize } func (h sha256trunc) BlockSize() int { return h.sha256.BlockSize() } -// New returns a new hash.Hash. -func New() hash.Hash { +// NewTruncated returns a new hash.Hash. +func NewTruncated() hash.Hash { return sha256trunc{ sha256: sha256.New(), } } -// Sum returns the first 20 bytes of SHA256 of the bz. -func Sum(bz []byte) []byte { +// SumTruncated returns the first 20 bytes of SHA256 of the bz. +func SumTruncated(bz []byte) []byte { hash := sha256.Sum256(bz) - return hash[:Size] + return hash[:TruncatedSize] } diff --git a/crypto/tmhash/hash_test.go b/crypto/tmhash/hash_test.go index 27938039a1e..89a77980129 100644 --- a/crypto/tmhash/hash_test.go +++ b/crypto/tmhash/hash_test.go @@ -14,10 +14,29 @@ func TestHash(t *testing.T) { hasher.Write(testVector) bz := hasher.Sum(nil) + bz2 := tmhash.Sum(testVector) + + hasher = sha256.New() + hasher.Write(testVector) + bz3 := hasher.Sum(nil) + + assert.Equal(t, bz, bz2) + assert.Equal(t, bz, bz3) +} + +func TestHashTruncated(t *testing.T) { + testVector := []byte("abc") + hasher := tmhash.NewTruncated() + hasher.Write(testVector) + bz := hasher.Sum(nil) + + bz2 := tmhash.SumTruncated(testVector) + hasher = sha256.New() hasher.Write(testVector) - bz2 := hasher.Sum(nil) - bz2 = bz2[:20] + bz3 := hasher.Sum(nil) + bz3 = bz3[:tmhash.TruncatedSize] assert.Equal(t, bz, bz2) + assert.Equal(t, bz, bz3) } diff --git a/crypto/xchacha20poly1305/xchachapoly.go b/crypto/xchacha20poly1305/xchachapoly.go index c7a175b5f5c..115c9190f97 100644 --- a/crypto/xchacha20poly1305/xchachapoly.go +++ b/crypto/xchacha20poly1305/xchachapoly.go @@ -8,7 +8,7 @@ import ( "errors" "fmt" - "golang.org/x/crypto/chacha20poly1305" + "golang.org/x/crypto/chacha20poly1305" // forked to github.com/tendermint/crypto ) // Implements crypto.AEAD diff --git a/crypto/xsalsa20symmetric/symmetric.go b/crypto/xsalsa20symmetric/symmetric.go index d2369675d1f..c51e24590d8 100644 --- a/crypto/xsalsa20symmetric/symmetric.go +++ b/crypto/xsalsa20symmetric/symmetric.go @@ -2,10 +2,12 @@ package xsalsa20symmetric import ( "errors" + "fmt" + + "golang.org/x/crypto/nacl/secretbox" // forked to github.com/tendermint/crypto "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" - "golang.org/x/crypto/nacl/secretbox" ) // TODO, make this into a struct that implements crypto.Symmetric. @@ -18,7 +20,7 @@ const secretLen = 32 // NOTE: call crypto.MixEntropy() first. func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { if len(secret) != secretLen { - cmn.PanicSanity(cmn.Fmt("Secret must be 32 bytes long, got len %v", len(secret))) + cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) } nonce := crypto.CRandBytes(nonceLen) nonceArr := [nonceLen]byte{} @@ -35,7 +37,7 @@ func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { // The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. func DecryptSymmetric(ciphertext []byte, secret []byte) (plaintext []byte, err error) { if len(secret) != secretLen { - cmn.PanicSanity(cmn.Fmt("Secret must be 32 bytes long, got len %v", len(secret))) + cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) } if len(ciphertext) <= secretbox.Overhead+nonceLen { return nil, errors.New("Ciphertext is too short") diff --git a/crypto/xsalsa20symmetric/symmetric_test.go b/crypto/xsalsa20symmetric/symmetric_test.go index d955307ea8a..e9adf728e6a 100644 --- a/crypto/xsalsa20symmetric/symmetric_test.go +++ b/crypto/xsalsa20symmetric/symmetric_test.go @@ -6,8 +6,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/crypto/bcrypt" // forked to github.com/tendermint/crypto + "github.com/tendermint/tendermint/crypto" - "golang.org/x/crypto/bcrypt" ) func TestSimple(t *testing.T) { @@ -29,7 +30,9 @@ func TestSimpleWithKDF(t *testing.T) { plaintext := []byte("sometext") secretPass := []byte("somesecret") - secret, err := bcrypt.GenerateFromPassword(secretPass, 12) + salt := []byte("somesaltsomesalt") // len 16 + // NOTE: we use a fork of x/crypto so we can inject our own randomness for salt + secret, err := bcrypt.GenerateFromPassword(salt, secretPass, 12) if err != nil { t.Error(err) } diff --git a/docs/.textlintrc.json b/docs/.textlintrc.json new file mode 100644 index 00000000000..4103f89e89d --- /dev/null +++ b/docs/.textlintrc.json @@ -0,0 +1,9 @@ +{ + "rules": { + "stop-words": { + "severity": "warning", + "defaultWords": false, + "words": "stop-words.txt" + } + } +} diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js new file mode 100644 index 00000000000..9f8ddbc73c0 --- /dev/null +++ b/docs/.vuepress/config.js @@ -0,0 +1,118 @@ +module.exports = { + title: "Tendermint Documentation", + description: "Documentation for Tendermint Core.", + ga: "UA-51029217-1", + dest: "./dist/docs", + base: "/docs/", + markdown: { + lineNumbers: true + }, + themeConfig: { + lastUpdated: "Last Updated", + nav: [{ text: "Back to Tendermint", link: "https://tendermint.com" }], + sidebar: [ + { + title: "Introduction", + collapsable: false, + children: [ + "/introduction/", + "/introduction/quick-start", + "/introduction/install", + "/introduction/what-is-tendermint" + ] + }, + { + title: "Tendermint Core", + collapsable: false, + children: [ + "/tendermint-core/", + "/tendermint-core/using-tendermint", + "/tendermint-core/configuration", + "/tendermint-core/rpc", + "/tendermint-core/running-in-production", + "/tendermint-core/fast-sync", + "/tendermint-core/how-to-read-logs", + "/tendermint-core/block-structure", + "/tendermint-core/light-client-protocol", + "/tendermint-core/metrics", + "/tendermint-core/secure-p2p", + "/tendermint-core/validators" + ] + }, + { + title: "Tools", + collapsable: false, + children: [ + "/tools/", + "/tools/benchmarking", + "/tools/monitoring" + ] + }, + { + title: "Networks", + collapsable: false, + children: [ + "/networks/", + "/networks/docker-compose", + "/networks/terraform-and-ansible", + ] + }, + { + title: "Apps", + collapsable: false, + children: [ + "/app-dev/getting-started", + "/app-dev/abci-cli", + "/app-dev/app-architecture", + "/app-dev/app-development", + "/app-dev/subscribing-to-events-via-websocket", + "/app-dev/indexing-transactions", + "/app-dev/abci-spec", + "/app-dev/ecosystem" + ] + }, + { + title: "Tendermint Spec", + collapsable: true, + children: [ + "/spec/", + "/spec/blockchain/blockchain", + "/spec/blockchain/encoding", + "/spec/blockchain/state", + "/spec/software/abci", + "/spec/consensus/bft-time", + "/spec/consensus/consensus", + "/spec/consensus/light-client", + "/spec/software/wal", + "/spec/p2p/config", + "/spec/p2p/connection", + "/spec/p2p/node", + "/spec/p2p/peer", + "/spec/reactors/block_sync/reactor", + "/spec/reactors/block_sync/impl", + "/spec/reactors/consensus/consensus", + "/spec/reactors/consensus/consensus-reactor", + "/spec/reactors/consensus/proposer-selection", + "/spec/reactors/evidence/reactor", + "/spec/reactors/mempool/concurrency", + "/spec/reactors/mempool/config", + "/spec/reactors/mempool/functionality", + "/spec/reactors/mempool/messages", + "/spec/reactors/mempool/reactor", + "/spec/reactors/pex/pex", + "/spec/reactors/pex/reactor", + ] + }, + { + title: "ABCI Spec", + collapsable: false, + children: [ + "/spec/abci/", + "/spec/abci/abci", + "/spec/abci/apps", + "/spec/abci/client-server" + ] + } + ] + } +}; diff --git a/docs/.vuepress/dist/404.html b/docs/.vuepress/dist/404.html new file mode 100644 index 00000000000..6249309b0fa --- /dev/null +++ b/docs/.vuepress/dist/404.html @@ -0,0 +1,17 @@ + + + + + + VuePress + + + + + + + +

404

Looks like we've got some broken links.
Take me home.
+ + + diff --git a/docs/.vuepress/dist/assets/css/1.styles.c01b7ee3.css b/docs/.vuepress/dist/assets/css/1.styles.c01b7ee3.css new file mode 100644 index 00000000000..0df6f0be4d0 --- /dev/null +++ b/docs/.vuepress/dist/assets/css/1.styles.c01b7ee3.css @@ -0,0 +1 @@ +.icon.outbound{color:#aaa;display:inline-block}.badge{display:inline-block;vertical-align:top;font-size:14px;height:18px;line-height:18px;border-radius:9px;padding:0 5px;color:#fff;margin-right:5px}.badge.tip{background-color:#42b983}.badge.warn,.badge.warning{background-color:#e7c000}.home{padding:3.6rem 2rem 0;max-width:960px;margin:0 auto}.home .hero{text-align:center}.home .hero img{max-height:280px;display:block;margin:3rem auto 1.5rem}.home .hero h1{font-size:3rem}.home .hero .action,.home .hero .description,.home .hero h1{margin:1.8rem auto}.home .hero .description{max-width:35rem;font-size:1.6rem;line-height:1.3;color:#6a8bad}.home .hero .action-button{display:inline-block;font-size:1.2rem;color:#fff;background-color:#3eaf7c;padding:.8rem 1.6rem;border-radius:4px;transition:background-color .1s ease;box-sizing:border-box;border-bottom:1px solid #389d70}.home .hero .action-button:hover{background-color:#4abf8a}.home .features{border-top:1px solid #eaecef;padding:1.2rem 0;margin-top:2.5rem;display:flex;flex-wrap:wrap;align-items:flex-start;align-content:stretch;justify-content:space-between}.home .feature{flex-grow:1;flex-basis:30%;max-width:30%}.home .feature h2{font-size:1.4rem;font-weight:500;border-bottom:none;padding-bottom:0;color:#3a5169}.home .feature p{color:#4e6e8e}.home .footer{padding:2.5rem;border-top:1px solid #eaecef;text-align:center;color:#4e6e8e}@media (max-width:719px){.home .features{flex-direction:column}.home .feature{max-width:100%;padding:0 2.5rem}}@media (max-width:419px){.home{padding-left:1.5rem;padding-right:1.5rem}.home .hero img{max-height:210px;margin:2rem auto 1.2rem}.home .hero h1{font-size:2rem}.home .hero .action,.home .hero .description,.home .hero h1{margin:1.2rem auto}.home .hero .description{font-size:1.2rem}.home .hero .action-button{font-size:1rem;padding:.6rem 1.2rem}.home .feature h2{font-size:1.25rem}}.sidebar-button{display:none;width:1.25rem;height:1.25rem;position:absolute;padding:.6rem;top:.6rem;left:1rem}.sidebar-button .icon{display:block;width:1.25rem;height:1.25rem}@media (max-width:719px){.sidebar-button{display:block}}.search-box{display:inline-block;position:relative;margin-right:.5rem}.search-box input{cursor:text;width:10rem;color:#4e6e8e;display:inline-block;border:1px solid #cfd4db;border-radius:2rem;font-size:.9rem;line-height:2rem;padding:0 .5rem 0 2rem;outline:none;transition:all .2s ease;background:#fff url(/assets/img/search.83621669.svg) .6rem .5rem no-repeat;background-size:1rem}.search-box input:focus{cursor:auto;border-color:#3eaf7c}.search-box .suggestions{background:#fff;width:20rem;position:absolute;top:1.5rem;border:1px solid #cfd4db;border-radius:6px;padding:.4rem;list-style-type:none}.search-box .suggestions.align-right{right:0}.search-box .suggestion{line-height:1.4;padding:.4rem .6rem;border-radius:4px;cursor:pointer}.search-box .suggestion a{color:#5d82a6}.search-box .suggestion a .page-title{font-weight:600}.search-box .suggestion a .header{font-size:.9em;margin-left:.25em}.search-box .suggestion.focused{background-color:#f3f4f5}.search-box .suggestion.focused a{color:#3eaf7c}@media (max-width:959px){.search-box input{cursor:pointer;width:0;border-color:transparent;position:relative;left:1rem}.search-box input:focus{cursor:text;left:0;width:10rem}}@media (max-width:959px) and (min-width:719px){.search-box .suggestions{left:0}}@media (max-width:719px){.search-box{margin-right:0}.search-box .suggestions{right:0}}@media (max-width:419px){.search-box .suggestions{width:calc(100vw - 4rem)}.search-box input:focus{width:8rem}}.dropdown-enter,.dropdown-leave-to{height:0!important}.dropdown-wrapper .dropdown-title{display:block}.dropdown-wrapper .dropdown-title:hover{border-color:transparent}.dropdown-wrapper .dropdown-title .arrow{vertical-align:middle;margin-top:-1px;margin-left:.4rem}.dropdown-wrapper .nav-dropdown .dropdown-item{color:inherit;line-height:1.7rem}.dropdown-wrapper .nav-dropdown .dropdown-item h4{margin:.45rem 0 0;border-top:1px solid #eee;padding:.45rem 1.5rem 0 1.25rem}.dropdown-wrapper .nav-dropdown .dropdown-item .dropdown-subitem-wrapper{padding:0;list-style:none}.dropdown-wrapper .nav-dropdown .dropdown-item .dropdown-subitem-wrapper .dropdown-subitem{font-size:.9em}.dropdown-wrapper .nav-dropdown .dropdown-item a{display:block;line-height:1.7rem;position:relative;border-bottom:none;font-weight:400;margin-bottom:0;padding:0 1.5rem 0 1.25rem}.dropdown-wrapper .nav-dropdown .dropdown-item a.router-link-active,.dropdown-wrapper .nav-dropdown .dropdown-item a:hover{color:#3eaf7c}.dropdown-wrapper .nav-dropdown .dropdown-item a.router-link-active:after{content:"";width:0;height:0;border-left:5px solid #3eaf7c;border-top:3px solid transparent;border-bottom:3px solid transparent;position:absolute;top:calc(50% - 2px);left:9px}.dropdown-wrapper .nav-dropdown .dropdown-item:first-child h4{margin-top:0;padding-top:0;border-top:0}@media (max-width:719px){.dropdown-wrapper.open .dropdown-title{margin-bottom:.5rem}.dropdown-wrapper .nav-dropdown{transition:height .1s ease-out;overflow:hidden}.dropdown-wrapper .nav-dropdown .dropdown-item h4{border-top:0;margin-top:0;padding-top:0}.dropdown-wrapper .nav-dropdown .dropdown-item>a,.dropdown-wrapper .nav-dropdown .dropdown-item h4{font-size:15px;line-height:2rem}.dropdown-wrapper .nav-dropdown .dropdown-item .dropdown-subitem{font-size:14px;padding-left:1rem}}@media (min-width:719px){.dropdown-wrapper{height:1.8rem}.dropdown-wrapper:hover .nav-dropdown{display:block!important}.dropdown-wrapper .dropdown-title .arrow{border-left:4px solid transparent;border-right:4px solid transparent;border-top:6px solid #ccc;border-bottom:0}.dropdown-wrapper .nav-dropdown{display:none;height:auto!important;box-sizing:border-box;max-height:calc(100vh - 2.7rem);overflow-y:auto;position:absolute;top:100%;right:0;background-color:#fff;padding:.6rem 0;border:1px solid #ddd;border-bottom-color:#ccc;text-align:left;border-radius:.25rem;white-space:nowrap;margin:0}}.nav-links{display:inline-block}.nav-links a{line-height:1.4rem;color:inherit}.nav-links a.router-link-active,.nav-links a:hover{color:#3eaf7c}.nav-links .nav-item{cursor:pointer;position:relative;display:inline-block;margin-left:1.5rem;line-height:2rem}.nav-links .repo-link{margin-left:1.5rem}@media (max-width:719px){.nav-links .nav-item,.nav-links .repo-link{margin-left:0}}@media (min-width:719px){.nav-links a.router-link-active,.nav-links a:hover{color:#2c3e50}.nav-item>a:not(.external).router-link-active,.nav-item>a:not(.external):hover{margin-bottom:-2px;border-bottom:2px solid #46bd87}}.navbar{padding:.7rem 1.5rem;line-height:2.2rem;position:relative}.navbar a,.navbar img,.navbar span{display:inline-block}.navbar .logo{height:2.2rem;min-width:2.2rem;margin-right:.8rem;vertical-align:top}.navbar .site-name{font-size:1.3rem;font-weight:600;color:#2c3e50;position:relative}.navbar .links{font-size:.9rem;position:absolute;right:1.5rem;top:.7rem}@media (max-width:719px){.navbar{padding-left:4rem}.navbar .can-hide{display:none}}.page-edit,.page-nav{max-width:740px;margin:0 auto;padding:2rem 2.5rem}@media (max-width:959px){.page-edit,.page-nav{padding:2rem}}@media (max-width:419px){.page-edit,.page-nav{padding:1.5rem}}.page{padding-bottom:2rem}.page-edit{padding-top:1rem;padding-bottom:1rem;overflow:auto}.page-edit .edit-link{display:inline-block}.page-edit .edit-link a{color:#4e6e8e;margin-right:.25rem}.page-edit .last-updated{float:right;font-size:.9em}.page-edit .last-updated .prefix{font-weight:500;color:#4e6e8e}.page-edit .last-updated .time{font-weight:400;color:#aaa}.page-nav{padding-top:1rem;padding-bottom:0}.page-nav .inner{min-height:2rem;margin-top:0;border-top:1px solid #eaecef;padding-top:1rem;overflow:auto}.page-nav .next{float:right}@media (max-width:719px){.page-edit .edit-link{margin-bottom:.5rem}.page-edit .last-updated{font-size:.8em;float:none;text-align:left}}.sidebar .sidebar-sub-headers{padding-left:1rem;font-size:.95em}a.sidebar-link{font-weight:400;display:inline-block;color:#2c3e50;border-left:.25rem solid transparent;padding:.35rem 1rem .35rem 1.25rem;line-height:1.4;width:100%;box-sizing:border-box}a.sidebar-link:hover{color:#3eaf7c}a.sidebar-link.active{font-weight:600;color:#3eaf7c;border-left-color:#3eaf7c}.sidebar-group a.sidebar-link{padding-left:2rem}.sidebar-sub-headers a.sidebar-link{padding-top:.25rem;padding-bottom:.25rem;border-left:none}.sidebar-sub-headers a.sidebar-link.active{font-weight:500}.sidebar-group:not(.first){margin-top:1em}.sidebar-group .sidebar-group{padding-left:.5em}.sidebar-group:not(.collapsable) .sidebar-heading{cursor:auto;color:inherit}.sidebar-heading{color:#999;transition:color .15s ease;cursor:pointer;font-size:1.1em;font-weight:700;padding:0 1.5rem;margin-top:0;margin-bottom:.5rem}.sidebar-heading.open,.sidebar-heading:hover{color:inherit}.sidebar-heading .arrow{position:relative;top:-.12em;left:.5em}.sidebar-heading:.open .arrow{top:-.18em}.sidebar-group-items{transition:height .1s ease-out;overflow:hidden}.sidebar ul{padding:0;margin:0;list-style-type:none}.sidebar a{display:inline-block}.sidebar .nav-links{display:none;border-bottom:1px solid #eaecef;padding:.5rem 0 .75rem 0}.sidebar .nav-links a{font-weight:600}.sidebar .nav-links .nav-item,.sidebar .nav-links .repo-link{display:block;line-height:1.25rem;font-size:1.1em;padding:.5rem 0 .5rem 1.5rem}.sidebar .sidebar-links{padding:1.5rem 0}@media (max-width:719px){.sidebar .nav-links{display:block}.sidebar .nav-links .dropdown-wrapper .nav-dropdown .dropdown-item a.router-link-active:after{top:calc(1rem - 2px)}.sidebar .sidebar-links{padding:1rem 0}}code[class*=language-],pre[class*=language-]{color:#ccc;background:none;font-family:Consolas,Monaco,Andale Mono,Ubuntu Mono,monospace;text-align:left;white-space:pre;word-spacing:normal;word-break:normal;word-wrap:normal;line-height:1.5;-moz-tab-size:4;-o-tab-size:4;tab-size:4;-webkit-hyphens:none;-ms-hyphens:none;hyphens:none}pre[class*=language-]{padding:1em;margin:.5em 0;overflow:auto}:not(pre)>code[class*=language-],pre[class*=language-]{background:#2d2d2d}:not(pre)>code[class*=language-]{padding:.1em;border-radius:.3em;white-space:normal}.token.block-comment,.token.cdata,.token.comment,.token.doctype,.token.prolog{color:#999}.token.punctuation{color:#ccc}.token.attr-name,.token.deleted,.token.namespace,.token.tag{color:#e2777a}.token.function-name{color:#6196cc}.token.boolean,.token.function,.token.number{color:#f08d49}.token.class-name,.token.constant,.token.property,.token.symbol{color:#f8c555}.token.atrule,.token.builtin,.token.important,.token.keyword,.token.selector{color:#cc99cd}.token.attr-value,.token.char,.token.regex,.token.string,.token.variable{color:#7ec699}.token.entity,.token.operator,.token.url{color:#67cdcc}.token.bold,.token.important{font-weight:700}.token.italic{font-style:italic}.token.entity{cursor:help}.token.inserted{color:green}#nprogress{pointer-events:none}#nprogress .bar{background:#3eaf7c;position:fixed;z-index:1031;top:0;left:0;width:100%;height:2px}#nprogress .peg{display:block;position:absolute;right:0;width:100px;height:100%;box-shadow:0 0 10px #3eaf7c,0 0 5px #3eaf7c;opacity:1;transform:rotate(3deg) translateY(-4px)}#nprogress .spinner{display:block;position:fixed;z-index:1031;top:15px;right:15px}#nprogress .spinner-icon{width:18px;height:18px;box-sizing:border-box;border:2px solid transparent;border-top-color:#3eaf7c;border-left-color:#3eaf7c;border-radius:50%;animation:nprogress-spinner .4s linear infinite}.nprogress-custom-parent{overflow:hidden;position:relative}.nprogress-custom-parent #nprogress .bar,.nprogress-custom-parent #nprogress .spinner{position:absolute}@keyframes nprogress-spinner{0%{transform:rotate(0deg)}to{transform:rotate(1turn)}}.content code{color:#476582;padding:.25rem .5rem;margin:0;font-size:.85em;background-color:rgba(27,31,35,.05);border-radius:3px}.content pre,.content pre[class*=language-]{line-height:1.4;padding:1.25rem 1.5rem;margin:.85rem 0;background:transparent;overflow:auto}.content pre[class*=language-] code,.content pre code{color:#fff;padding:0;background-color:transparent;border-radius:0}div[class*=language-]{position:relative;background-color:#282c34;border-radius:6px}div[class*=language-] .highlight-lines{-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;padding-top:1.3rem;position:absolute;top:0;left:0;width:100%;line-height:1.4}div[class*=language-] .highlight-lines .highlighted{background-color:rgba(0,0,0,.66)}div[class*=language-] pre{position:relative;z-index:1}div[class*=language-]:before{position:absolute;z-index:3;top:.8em;right:1em;font-size:.75rem;color:hsla(0,0%,100%,.4)}div[class*=language-]:not(.line-numbers-mode) .line-numbers-wrapper{display:none}div[class*=language-].line-numbers-mode .highlight-lines .highlighted{position:relative}div[class*=language-].line-numbers-mode .highlight-lines .highlighted:before{content:" ";position:absolute;z-index:3;left:0;top:0;display:block;width:3.5rem;height:100%;background-color:rgba(0,0,0,.66)}div[class*=language-].line-numbers-mode pre{padding-left:4.5rem;vertical-align:middle}div[class*=language-].line-numbers-mode .line-numbers-wrapper{position:absolute;top:0;width:3.5rem;text-align:center;color:hsla(0,0%,100%,.3);padding:1.25rem 0;line-height:1.4}div[class*=language-].line-numbers-mode .line-numbers-wrapper .line-number,div[class*=language-].line-numbers-mode .line-numbers-wrapper br{-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none}div[class*=language-].line-numbers-mode .line-numbers-wrapper .line-number{position:relative;z-index:4;font-size:.85em}div[class*=language-].line-numbers-mode:after{content:"";position:absolute;z-index:2;top:0;left:0;width:3.5rem;height:100%;border-radius:6px 0 0 6px;border-right:1px solid rgba(0,0,0,.66);background-color:#282c34}div[class~=language-js]:before{content:"js"}div[class~=language-ts]:before{content:"ts"}div[class~=language-html]:before{content:"html"}div[class~=language-md]:before{content:"md"}div[class~=language-vue]:before{content:"vue"}div[class~=language-css]:before{content:"css"}div[class~=language-sass]:before{content:"sass"}div[class~=language-scss]:before{content:"scss"}div[class~=language-less]:before{content:"less"}div[class~=language-stylus]:before{content:"stylus"}div[class~=language-go]:before{content:"go"}div[class~=language-java]:before{content:"java"}div[class~=language-c]:before{content:"c"}div[class~=language-sh]:before{content:"sh"}div[class~=language-yaml]:before{content:"yaml"}div[class~=language-javascript]:before{content:"js"}div[class~=language-typescript]:before{content:"ts"}div[class~=language-markup]:before{content:"html"}div[class~=language-markdown]:before{content:"md"}div[class~=language-json]:before{content:"json"}div[class~=language-ruby]:before{content:"rb"}div[class~=language-python]:before{content:"py"}div[class~=language-bash]:before{content:"sh"}.custom-block .custom-block-title{font-weight:600;margin-bottom:-.4rem}.custom-block.danger,.custom-block.tip,.custom-block.warning{padding:.1rem 1.5rem;border-left-width:.5rem;border-left-style:solid;margin:1rem 0}.custom-block.tip{background-color:#f3f5f7;border-color:#42b983}.custom-block.warning{background-color:rgba(255,229,100,.3);border-color:#e7c000;color:#6b5900}.custom-block.warning .custom-block-title{color:#b29400}.custom-block.warning a{color:#2c3e50}.custom-block.danger{background-color:#ffe6e6;border-color:#c00;color:#4d0000}.custom-block.danger .custom-block-title{color:#900}.custom-block.danger a{color:#2c3e50}.arrow{display:inline-block;width:0;height:0}.arrow.up{border-bottom:6px solid #ccc}.arrow.down,.arrow.up{border-left:4px solid transparent;border-right:4px solid transparent}.arrow.down{border-top:6px solid #ccc}.arrow.right{border-left:6px solid #ccc}.arrow.left,.arrow.right{border-top:4px solid transparent;border-bottom:4px solid transparent}.arrow.left{border-right:6px solid #ccc}.content:not(.custom){max-width:740px;margin:0 auto;padding:2rem 2.5rem}@media (max-width:959px){.content:not(.custom){padding:2rem}}@media (max-width:419px){.content:not(.custom){padding:1.5rem}}.table-of-contents .badge{vertical-align:middle}body,html{padding:0;margin:0}body{font-family:-apple-system,BlinkMacSystemFont,Segoe UI,Roboto,Oxygen,Ubuntu,Cantarell,Fira Sans,Droid Sans,Helvetica Neue,sans-serif;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale;font-size:16px;color:#2c3e50}.page{padding-left:20rem}.navbar{z-index:20;right:0;height:3.6rem;background-color:#fff;box-sizing:border-box;border-bottom:1px solid #eaecef}.navbar,.sidebar-mask{position:fixed;top:0;left:0}.sidebar-mask{z-index:9;width:100vw;height:100vh;display:none}.sidebar{font-size:15px;background-color:#fff;width:20rem;position:fixed;z-index:10;margin:0;top:3.6rem;left:0;bottom:0;box-sizing:border-box;border-right:1px solid #eaecef;overflow-y:auto}.content:not(.custom)>:first-child{margin-top:3.6rem}.content:not(.custom) a:hover{text-decoration:underline}.content:not(.custom) p.demo{padding:1rem 1.5rem;border:1px solid #ddd;border-radius:4px}.content:not(.custom) img{max-width:100%}.content.custom{padding:0;margin:0}.content.custom img{max-width:100%}a{font-weight:500;text-decoration:none}a,p a code{color:#3eaf7c}p a code{font-weight:400}kbd{background:#eee;border:.15rem solid #ddd;border-bottom:.25rem solid #ddd;border-radius:.15rem;padding:0 .15em}blockquote{font-size:1.2rem;color:#999;border-left:.25rem solid #dfe2e5;margin-left:0;padding-left:1rem}ol,ul{padding-left:1.2em}strong{font-weight:600}h1,h2,h3,h4,h5,h6{font-weight:600;line-height:1.25}.content:not(.custom)>h1,.content:not(.custom)>h2,.content:not(.custom)>h3,.content:not(.custom)>h4,.content:not(.custom)>h5,.content:not(.custom)>h6{margin-top:-3.1rem;padding-top:4.6rem;margin-bottom:0}.content:not(.custom)>h1:first-child,.content:not(.custom)>h2:first-child,.content:not(.custom)>h3:first-child,.content:not(.custom)>h4:first-child,.content:not(.custom)>h5:first-child,.content:not(.custom)>h6:first-child{margin-top:-1.5rem;margin-bottom:1rem}.content:not(.custom)>h1:first-child+.custom-block,.content:not(.custom)>h1:first-child+p,.content:not(.custom)>h1:first-child+pre,.content:not(.custom)>h2:first-child+.custom-block,.content:not(.custom)>h2:first-child+p,.content:not(.custom)>h2:first-child+pre,.content:not(.custom)>h3:first-child+.custom-block,.content:not(.custom)>h3:first-child+p,.content:not(.custom)>h3:first-child+pre,.content:not(.custom)>h4:first-child+.custom-block,.content:not(.custom)>h4:first-child+p,.content:not(.custom)>h4:first-child+pre,.content:not(.custom)>h5:first-child+.custom-block,.content:not(.custom)>h5:first-child+p,.content:not(.custom)>h5:first-child+pre,.content:not(.custom)>h6:first-child+.custom-block,.content:not(.custom)>h6:first-child+p,.content:not(.custom)>h6:first-child+pre{margin-top:2rem}h1:hover .header-anchor,h2:hover .header-anchor,h3:hover .header-anchor,h4:hover .header-anchor,h5:hover .header-anchor,h6:hover .header-anchor{opacity:1}h1{font-size:2.2rem}h2{font-size:1.65rem;padding-bottom:.3rem;border-bottom:1px solid #eaecef}h3{font-size:1.35rem}a.header-anchor{font-size:.85em;float:left;margin-left:-.87em;padding-right:.23em;margin-top:.125em;opacity:0}a.header-anchor:hover{text-decoration:none}.line-number,code,kbd{font-family:source-code-pro,Menlo,Monaco,Consolas,Courier New,monospace}ol,p,ul{line-height:1.7}hr{border:0;border-top:1px solid #eaecef}table{border-collapse:collapse;margin:1rem 0;display:block;overflow-x:auto}tr{border-top:1px solid #dfe2e5}tr:nth-child(2n){background-color:#f6f8fa}td,th{border:1px solid #dfe2e5;padding:.6em 1em}.custom-layout{padding-top:3.6rem}.theme-container.sidebar-open .sidebar-mask{display:block}.theme-container.no-navbar .content:not(.custom)>h1,.theme-container.no-navbar h2,.theme-container.no-navbar h3,.theme-container.no-navbar h4,.theme-container.no-navbar h5,.theme-container.no-navbar h6{margin-top:1.5rem;padding-top:0}.theme-container.no-navbar .sidebar{top:0}.theme-container.no-navbar .custom-layout{padding-top:0}@media (min-width:720px){.theme-container.no-sidebar .sidebar{display:none}.theme-container.no-sidebar .page{padding-left:0}}@media (max-width:959px){.sidebar{font-size:15px;width:16.4rem}.page{padding-left:16.4rem}}@media (max-width:719px){.sidebar{top:0;padding-top:3.6rem;transform:translateX(-100%);transition:transform .2s ease}.page{padding-left:0}.theme-container.sidebar-open .sidebar{transform:translateX(0)}.theme-container.no-navbar .sidebar{padding-top:0}}@media (max-width:419px){h1{font-size:1.9rem}.content div[class*=language-]{margin:.85rem -1.5rem;border-radius:0}} \ No newline at end of file diff --git a/docs/.vuepress/dist/assets/img/search.83621669.svg b/docs/.vuepress/dist/assets/img/search.83621669.svg new file mode 100644 index 00000000000..03d83913e82 --- /dev/null +++ b/docs/.vuepress/dist/assets/img/search.83621669.svg @@ -0,0 +1 @@ + diff --git a/docs/.vuepress/dist/assets/js/0.7c2695bf.js b/docs/.vuepress/dist/assets/js/0.7c2695bf.js new file mode 100644 index 00000000000..2e3485dd075 --- /dev/null +++ b/docs/.vuepress/dist/assets/js/0.7c2695bf.js @@ -0,0 +1 @@ +(window.webpackJsonp=window.webpackJsonp||[]).push([[0],{136:function(e,t,s){"use strict";s.r(t);var n=s(0),r=Object(n.a)({},function(){this.$createElement;this._self._c;return this._m(0)},[function(){var e=this.$createElement,t=this._self._c||e;return t("div",{staticClass:"content"},[t("h1",{attrs:{id:"hello-vuepress"}},[t("a",{staticClass:"header-anchor",attrs:{href:"#hello-vuepress","aria-hidden":"true"}},[this._v("#")]),this._v(" Hello VuePress")])])}],!1,null,null,null);t.default=r.exports}}]); \ No newline at end of file diff --git a/docs/.vuepress/dist/assets/js/app.48f1ff5f.js b/docs/.vuepress/dist/assets/js/app.48f1ff5f.js new file mode 100644 index 00000000000..30bce60ff66 --- /dev/null +++ b/docs/.vuepress/dist/assets/js/app.48f1ff5f.js @@ -0,0 +1,8 @@ +(window.webpackJsonp=window.webpackJsonp||[]).push([[1],[]]);!function(t){function e(e){for(var r,a,s=e[0],c=e[1],u=e[2],f=0,p=[];f=t.length?(this._t=void 0,o(1)):o(0,"keys"==e?n:"values"==e?t[n]:[n,t[n]])},"values"),i.Arguments=i.Array,r("keys"),r("values"),r("entries")},function(t,e){t.exports={}},function(t,e,n){var r=n(2),o=n(12),i=n(8),a=n(11),s=n(17),c=function(t,e,n){var u,l,f,p,h=t&c.F,d=t&c.G,v=t&c.S,m=t&c.P,g=t&c.B,y=d?r:v?r[e]||(r[e]={}):(r[e]||{}).prototype,b=d?o:o[e]||(o[e]={}),_=b.prototype||(b.prototype={});for(u in d&&(n=e),n)f=((l=!h&&y&&void 0!==y[u])?y:n)[u],p=g&&l?s(f,r):m&&"function"==typeof f?s(Function.call,f):f,y&&a(y,u,f,t&c.U),b[u]!=f&&i(b,u,p),m&&_[u]!=f&&(_[u]=f)};r.core=o,c.F=1,c.G=2,c.S=4,c.P=8,c.B=16,c.W=32,c.U=64,c.R=128,t.exports=c},function(t,e){var n={}.toString;t.exports=function(t){return n.call(t).slice(8,-1)}},function(t,e,n){var r=n(25);t.exports=function(t,e,n){if(r(t),void 0===e)return t;switch(n){case 1:return function(n){return t.call(e,n)};case 2:return function(n,r){return t.call(e,n,r)};case 3:return function(n,r,o){return t.call(e,n,r,o)}}return function(){return t.apply(e,arguments)}}},function(t,e,n){n(42)("replace",2,function(t,e,n){return[function(r,o){"use strict";var i=t(this),a=void 0==r?void 0:r[e];return void 0!==a?a.call(r,i,o):n.call(String(i),r,o)},n]})},function(t,e,n){var r=n(43),o=n(22);n(52)("keys",function(){return function(t){return o(r(t))}})},function(t,e){t.exports=function(t){if(void 0==t)throw TypeError("Can't call method on "+t);return t}},function(t,e,n){var r=n(54),o=n(20);t.exports=function(t){return r(o(t))}},function(t,e,n){var r=n(55),o=n(44);t.exports=Object.keys||function(t){return r(t,o)}},function(t,e,n){var r=n(15);r(r.S+r.F,"Object",{assign:n(121)})},function(t,e){var n=0,r=Math.random();t.exports=function(t){return"Symbol(".concat(void 0===t?"":t,")_",(++n+r).toString(36))}},function(t,e){t.exports=function(t){if("function"!=typeof t)throw TypeError(t+" is not a function!");return t}},function(t,e,n){},function(t,e,n){},function(t,e,n){},function(t,e,n){},function(t,e,n){},function(t,e,n){},function(t,e,n){},function(t,e,n){},function(t,e,n){},function(t,e,n){},function(t,e,n){},function(t,e,n){},function(t,e,n){},function(t,e,n){},function(t,e,n){},function(t,e,n){var r,o; +/* NProgress, (c) 2013, 2014 Rico Sta. Cruz - http://ricostacruz.com/nprogress + * @license MIT */void 0===(o="function"==typeof(r=function(){var t,e,n={version:"0.2.0"},r=n.settings={minimum:.08,easing:"ease",positionUsing:"",speed:200,trickle:!0,trickleRate:.02,trickleSpeed:800,showSpinner:!0,barSelector:'[role="bar"]',spinnerSelector:'[role="spinner"]',parent:"body",template:'
'};function o(t,e,n){return tn?n:t}function i(t){return 100*(-1+t)}n.configure=function(t){var e,n;for(e in t)void 0!==(n=t[e])&&t.hasOwnProperty(e)&&(r[e]=n);return this},n.status=null,n.set=function(t){var e=n.isStarted();t=o(t,r.minimum,1),n.status=1===t?null:t;var c=n.render(!e),u=c.querySelector(r.barSelector),l=r.speed,f=r.easing;return c.offsetWidth,a(function(e){""===r.positionUsing&&(r.positionUsing=n.getPositioningCSS()),s(u,function(t,e,n){var o;return(o="translate3d"===r.positionUsing?{transform:"translate3d("+i(t)+"%,0,0)"}:"translate"===r.positionUsing?{transform:"translate("+i(t)+"%,0)"}:{"margin-left":i(t)+"%"}).transition="all "+e+"ms "+n,o}(t,l,f)),1===t?(s(c,{transition:"none",opacity:1}),c.offsetWidth,setTimeout(function(){s(c,{transition:"all "+l+"ms linear",opacity:0}),setTimeout(function(){n.remove(),e()},l)},l)):setTimeout(e,l)}),this},n.isStarted=function(){return"number"==typeof n.status},n.start=function(){n.status||n.set(0);var t=function(){setTimeout(function(){n.status&&(n.trickle(),t())},r.trickleSpeed)};return r.trickle&&t(),this},n.done=function(t){return t||n.status?n.inc(.3+.5*Math.random()).set(1):this},n.inc=function(t){var e=n.status;return e?("number"!=typeof t&&(t=(1-e)*o(Math.random()*e,.1,.95)),e=o(e+t,0,.994),n.set(e)):n.start()},n.trickle=function(){return n.inc(Math.random()*r.trickleRate)},t=0,e=0,n.promise=function(r){return r&&"resolved"!==r.state()?(0===e&&n.start(),t++,e++,r.always(function(){0==--e?(t=0,n.done()):n.set((t-e)/t)}),this):this},n.render=function(t){if(n.isRendered())return document.getElementById("nprogress");u(document.documentElement,"nprogress-busy");var e=document.createElement("div");e.id="nprogress",e.innerHTML=r.template;var o,a=e.querySelector(r.barSelector),c=t?"-100":i(n.status||0),l=document.querySelector(r.parent);return s(a,{transition:"all 0 linear",transform:"translate3d("+c+"%,0,0)"}),r.showSpinner||(o=e.querySelector(r.spinnerSelector))&&p(o),l!=document.body&&u(l,"nprogress-custom-parent"),l.appendChild(e),e},n.remove=function(){l(document.documentElement,"nprogress-busy"),l(document.querySelector(r.parent),"nprogress-custom-parent");var t=document.getElementById("nprogress");t&&p(t)},n.isRendered=function(){return!!document.getElementById("nprogress")},n.getPositioningCSS=function(){var t=document.body.style,e="WebkitTransform"in t?"Webkit":"MozTransform"in t?"Moz":"msTransform"in t?"ms":"OTransform"in t?"O":"";return e+"Perspective"in t?"translate3d":e+"Transform"in t?"translate":"margin"};var a=function(){var t=[];function e(){var n=t.shift();n&&n(e)}return function(n){t.push(n),1==t.length&&e()}}(),s=function(){var t=["Webkit","O","Moz","ms"],e={};function n(n){return n=n.replace(/^-ms-/,"ms-").replace(/-([\da-z])/gi,function(t,e){return e.toUpperCase()}),e[n]||(e[n]=function(e){var n=document.body.style;if(e in n)return e;for(var r,o=t.length,i=e.charAt(0).toUpperCase()+e.slice(1);o--;)if((r=t[o]+i)in n)return r;return e}(n))}function r(t,e,r){e=n(e),t.style[e]=r}return function(t,e){var n,o,i=arguments;if(2==i.length)for(n in e)void 0!==(o=e[n])&&e.hasOwnProperty(n)&&r(t,n,o);else r(t,i[1],i[2])}}();function c(t,e){var n="string"==typeof t?t:f(t);return n.indexOf(" "+e+" ")>=0}function u(t,e){var n=f(t),r=n+e;c(n,e)||(t.className=r.substring(1))}function l(t,e){var n,r=f(t);c(t,e)&&(n=r.replace(" "+e+" "," "),t.className=n.substring(1,n.length-1))}function f(t){return(" "+(t.className||"")+" ").replace(/\s+/gi," ")}function p(t){t&&t.parentNode&&t.parentNode.removeChild(t)}return n})?r.call(e,n,e,t):r)||(t.exports=o)},function(t,e,n){"use strict";var r=n(8),o=n(11),i=n(5),a=n(20),s=n(1);t.exports=function(t,e,n){var c=s(t),u=n(a,c,""[t]),l=u[0],f=u[1];i(function(){var e={};return e[c]=function(){return 7},7!=""[t](e)})&&(o(String.prototype,t,l),r(RegExp.prototype,c,2==e?function(t,e){return f.call(t,this,e)}:function(t){return f.call(t,this)}))}},function(t,e,n){var r=n(20);t.exports=function(t){return Object(r(t))}},function(t,e){t.exports="constructor,hasOwnProperty,isPrototypeOf,propertyIsEnumerable,toLocaleString,toString,valueOf".split(",")},function(t,e,n){var r=n(64)("keys"),o=n(24);t.exports=function(t){return r[t]||(r[t]=o(t))}},function(t,e,n){var r=n(7).f,o=n(10),i=n(1)("toStringTag");t.exports=function(t,e,n){t&&!o(t=n?t:t.prototype,i)&&r(t,i,{configurable:!0,value:e})}},function(t,e){t.exports=function(t,e){return{enumerable:!(1&t),configurable:!(2&t),writable:!(4&t),value:e}}},function(t,e,n){var r=n(3),o=n(2).document,i=r(o)&&r(o.createElement);t.exports=function(t){return i?o.createElement(t):{}}},function(t,e){t.exports=!1},function(t,e,n){n(42)("match",1,function(t,e,n){return[function(n){"use strict";var r=t(this),o=void 0==n?void 0:n[e];return void 0!==o?o.call(n,r):new RegExp(n)[e](String(r))},n]})},function(t,e,n){var r=n(3),o=n(16),i=n(1)("match");t.exports=function(t){var e;return r(t)&&(void 0!==(e=t[i])?!!e:"RegExp"==o(t))}},function(t,e,n){var r=n(15),o=n(12),i=n(5);t.exports=function(t,e){var n=(o.Object||{})[t]||Object[t],a={};a[t]=e(n),r(r.S+r.F*i(function(){n(1)}),"Object",a)}},function(t,e){e.f={}.propertyIsEnumerable},function(t,e,n){var r=n(16);t.exports=Object("z").propertyIsEnumerable(0)?Object:function(t){return"String"==r(t)?t.split(""):Object(t)}},function(t,e,n){var r=n(10),o=n(21),i=n(120)(!1),a=n(45)("IE_PROTO");t.exports=function(t,e){var n,s=o(t),c=0,u=[];for(n in s)n!=a&&r(s,n)&&u.push(n);for(;e.length>c;)r(s,n=e[c++])&&(~i(u,n)||u.push(n));return u}},function(t,e,n){"use strict";var r=n(2),o=n(7),i=n(6),a=n(1)("species");t.exports=function(t){var e=r[t];i&&e&&!e[a]&&o.f(e,a,{configurable:!0,get:function(){return this}})}},function(t,e,n){"use strict";var r=n(25);t.exports.f=function(t){return new function(t){var e,n;this.promise=new t(function(t,r){if(void 0!==e||void 0!==n)throw TypeError("Bad Promise constructor");e=t,n=r}),this.resolve=r(e),this.reject=r(n)}(t)}},function(t,e,n){var r=n(2).document;t.exports=r&&r.documentElement},function(t,e,n){var r,o,i,a=n(17),s=n(128),c=n(58),u=n(48),l=n(2),f=l.process,p=l.setImmediate,h=l.clearImmediate,d=l.MessageChannel,v=l.Dispatch,m=0,g={},y=function(){var t=+this;if(g.hasOwnProperty(t)){var e=g[t];delete g[t],e()}},b=function(t){y.call(t.data)};p&&h||(p=function(t){for(var e=[],n=1;arguments.length>n;)e.push(arguments[n++]);return g[++m]=function(){s("function"==typeof t?t:Function(t),e)},r(m),m},h=function(t){delete g[t]},"process"==n(16)(f)?r=function(t){f.nextTick(a(y,t,1))}:v&&v.now?r=function(t){v.now(a(y,t,1))}:d?(i=(o=new d).port2,o.port1.onmessage=b,r=a(i.postMessage,i,1)):l.addEventListener&&"function"==typeof postMessage&&!l.importScripts?(r=function(t){l.postMessage(t+"","*")},l.addEventListener("message",b,!1)):r="onreadystatechange"in u("script")?function(t){c.appendChild(u("script")).onreadystatechange=function(){c.removeChild(this),y.call(t)}}:function(t){setTimeout(a(y,t,1),0)}),t.exports={set:p,clear:h}},function(t,e){var n=Math.ceil,r=Math.floor;t.exports=function(t){return isNaN(t=+t)?0:(t>0?r:n)(t)}},function(t,e,n){var r=n(60),o=Math.min;t.exports=function(t){return t>0?o(r(t),9007199254740991):0}},function(t,e,n){var r=n(3);t.exports=function(t,e){if(!r(t))return t;var n,o;if(e&&"function"==typeof(n=t.toString)&&!r(o=n.call(t)))return o;if("function"==typeof(n=t.valueOf)&&!r(o=n.call(t)))return o;if(!e&&"function"==typeof(n=t.toString)&&!r(o=n.call(t)))return o;throw TypeError("Can't convert object to primitive value")}},function(t,e,n){t.exports=!n(6)&&!n(5)(function(){return 7!=Object.defineProperty(n(48)("div"),"a",{get:function(){return 7}}).a})},function(t,e,n){var r=n(12),o=n(2),i=o["__core-js_shared__"]||(o["__core-js_shared__"]={});(t.exports=function(t,e){return i[t]||(i[t]=void 0!==e?e:{})})("versions",[]).push({version:r.version,mode:n(49)?"pure":"global",copyright:"© 2018 Denis Pushkarev (zloirock.ru)"})},function(t,e,n){var r=n(16),o=n(1)("toStringTag"),i="Arguments"==r(function(){return arguments}());t.exports=function(t){var e,n,a;return void 0===t?"Undefined":null===t?"Null":"string"==typeof(n=function(t,e){try{return t[e]}catch(t){}}(e=Object(t),o))?n:i?r(e):"Object"==(a=r(e))&&"function"==typeof e.callee?"Arguments":a}},function(t,e,n){"use strict";var r,o,i,a,s=n(49),c=n(2),u=n(17),l=n(65),f=n(15),p=n(3),h=n(25),d=n(134),v=n(133),m=n(129),g=n(59).set,y=n(127)(),b=n(57),_=n(126),x=n(125),w=n(124),C=c.TypeError,k=c.process,$=k&&k.versions,O=$&&$.v8||"",S=c.Promise,A="process"==l(k),E=function(){},j=o=b.f,T=!!function(){try{var t=S.resolve(1),e=(t.constructor={})[n(1)("species")]=function(t){t(E,E)};return(A||"function"==typeof PromiseRejectionEvent)&&t.then(E)instanceof e&&0!==O.indexOf("6.6")&&-1===x.indexOf("Chrome/66")}catch(t){}}(),L=function(t){var e;return!(!p(t)||"function"!=typeof(e=t.then))&&e},P=function(t,e){if(!t._n){t._n=!0;var n=t._c;y(function(){for(var r=t._v,o=1==t._s,i=0,a=function(e){var n,i,a,s=o?e.ok:e.fail,c=e.resolve,u=e.reject,l=e.domain;try{s?(o||(2==t._h&&R(t),t._h=1),!0===s?n=r:(l&&l.enter(),n=s(r),l&&(l.exit(),a=!0)),n===e.promise?u(C("Promise-chain cycle")):(i=L(n))?i.call(n,c,u):c(n)):u(r)}catch(t){l&&!a&&l.exit(),u(t)}};n.length>i;)a(n[i++]);t._c=[],t._n=!1,e&&!t._h&&I(t)})}},I=function(t){g.call(c,function(){var e,n,r,o=t._v,i=M(t);if(i&&(e=_(function(){A?k.emit("unhandledRejection",o,t):(n=c.onunhandledrejection)?n({promise:t,reason:o}):(r=c.console)&&r.error&&r.error("Unhandled promise rejection",o)}),t._h=A||M(t)?2:1),t._a=void 0,i&&e.e)throw e.v})},M=function(t){return 1!==t._h&&0===(t._a||t._c).length},R=function(t){g.call(c,function(){var e;A?k.emit("rejectionHandled",t):(e=c.onrejectionhandled)&&e({promise:t,reason:t._v})})},N=function(t){var e=this;e._d||(e._d=!0,(e=e._w||e)._v=t,e._s=2,e._a||(e._a=e._c.slice()),P(e,!0))},D=function(t){var e,n=this;if(!n._d){n._d=!0,n=n._w||n;try{if(n===t)throw C("Promise can't be resolved itself");(e=L(t))?y(function(){var r={_w:n,_d:!1};try{e.call(t,u(D,r,1),u(N,r,1))}catch(t){N.call(r,t)}}):(n._v=t,n._s=1,P(n,!1))}catch(t){N.call({_w:n,_d:!1},t)}}};T||(S=function(t){d(this,S,"Promise","_h"),h(t),r.call(this);try{t(u(D,this,1),u(N,this,1))}catch(t){N.call(this,t)}},(r=function(t){this._c=[],this._a=void 0,this._s=0,this._d=!1,this._v=void 0,this._h=0,this._n=!1}).prototype=n(123)(S.prototype,{then:function(t,e){var n=j(m(this,S));return n.ok="function"!=typeof t||t,n.fail="function"==typeof e&&e,n.domain=A?k.domain:void 0,this._c.push(n),this._a&&this._a.push(n),this._s&&P(this,!1),n.promise},catch:function(t){return this.then(void 0,t)}}),i=function(){var t=new r;this.promise=t,this.resolve=u(D,t,1),this.reject=u(N,t,1)},b.f=j=function(t){return t===S||t===a?new i(t):o(t)}),f(f.G+f.W+f.F*!T,{Promise:S}),n(46)(S,"Promise"),n(56)("Promise"),a=n(12).Promise,f(f.S+f.F*!T,"Promise",{reject:function(t){var e=j(this);return(0,e.reject)(t),e.promise}}),f(f.S+f.F*(s||!T),"Promise",{resolve:function(t){return w(s&&this===a?S:this,t)}}),f(f.S+f.F*!(T&&n(122)(function(t){S.all(t).catch(E)})),"Promise",{all:function(t){var e=this,n=j(e),r=n.resolve,o=n.reject,i=_(function(){var n=[],i=0,a=1;v(t,!1,function(t){var s=i++,c=!1;n.push(void 0),a++,e.resolve(t).then(function(t){c||(c=!0,n[s]=t,--a||r(n))},o)}),--a||r(n)});return i.e&&o(i.v),n.promise},race:function(t){var e=this,n=j(e),r=n.reject,o=_(function(){v(t,!1,function(t){e.resolve(t).then(n.resolve,r)})});return o.e&&r(o.v),n.promise}})},function(t,e){var n="Expected a function",r=NaN,o="[object Symbol]",i=/^\s+|\s+$/g,a=/^[-+]0x[0-9a-f]+$/i,s=/^0b[01]+$/i,c=/^0o[0-7]+$/i,u=parseInt,l="object"==typeof global&&global&&global.Object===Object&&global,f="object"==typeof self&&self&&self.Object===Object&&self,p=l||f||Function("return this")(),h=Object.prototype.toString,d=Math.max,v=Math.min,m=function(){return p.Date.now()};function g(t,e,r){var o,i,a,s,c,u,l=0,f=!1,p=!1,h=!0;if("function"!=typeof t)throw new TypeError(n);function g(e){var n=o,r=i;return o=i=void 0,l=e,s=t.apply(r,n)}function _(t){var n=t-u;return void 0===u||n>=e||n<0||p&&t-l>=a}function x(){var t=m();if(_(t))return w(t);c=setTimeout(x,function(t){var n=e-(t-u);return p?v(n,a-(t-l)):n}(t))}function w(t){return c=void 0,h&&o?g(t):(o=i=void 0,s)}function C(){var t=m(),n=_(t);if(o=arguments,i=this,u=t,n){if(void 0===c)return function(t){return l=t,c=setTimeout(x,e),f?g(t):s}(u);if(p)return c=setTimeout(x,e),g(u)}return void 0===c&&(c=setTimeout(x,e)),s}return e=b(e)||0,y(r)&&(f=!!r.leading,a=(p="maxWait"in r)?d(b(r.maxWait)||0,e):a,h="trailing"in r?!!r.trailing:h),C.cancel=function(){void 0!==c&&clearTimeout(c),l=0,o=u=i=c=void 0},C.flush=function(){return void 0===c?s:w(m())},C}function y(t){var e=typeof t;return!!t&&("object"==e||"function"==e)}function b(t){if("number"==typeof t)return t;if(function(t){return"symbol"==typeof t||function(t){return!!t&&"object"==typeof t}(t)&&h.call(t)==o}(t))return r;if(y(t)){var e="function"==typeof t.valueOf?t.valueOf():t;t=y(e)?e+"":e}if("string"!=typeof t)return 0===t?t:+t;t=t.replace(i,"");var n=s.test(t);return n||c.test(t)?u(t.slice(2),n?2:8):a.test(t)?r:+t}t.exports=function(t,e,r){var o=!0,i=!0;if("function"!=typeof t)throw new TypeError(n);return y(r)&&(o="leading"in r?!!r.leading:o,i="trailing"in r?!!r.trailing:i),g(t,e,{leading:o,maxWait:e,trailing:i})}},function(t,e,n){"use strict";n.r(e);n(66),n(23);var r=Object.freeze({});function o(t){return void 0===t||null===t}function i(t){return void 0!==t&&null!==t}function a(t){return!0===t}function s(t){return"string"==typeof t||"number"==typeof t||"symbol"==typeof t||"boolean"==typeof t}function c(t){return null!==t&&"object"==typeof t}var u=Object.prototype.toString;function l(t){return"[object Object]"===u.call(t)}function f(t){return"[object RegExp]"===u.call(t)}function p(t){var e=parseFloat(String(t));return e>=0&&Math.floor(e)===e&&isFinite(t)}function h(t){return null==t?"":"object"==typeof t?JSON.stringify(t,null,2):String(t)}function d(t){var e=parseFloat(t);return isNaN(e)?t:e}function v(t,e){for(var n=Object.create(null),r=t.split(","),o=0;o-1)return t.splice(n,1)}}var y=Object.prototype.hasOwnProperty;function b(t,e){return y.call(t,e)}function _(t){var e=Object.create(null);return function(n){return e[n]||(e[n]=t(n))}}var x=/-(\w)/g,w=_(function(t){return t.replace(x,function(t,e){return e?e.toUpperCase():""})}),C=_(function(t){return t.charAt(0).toUpperCase()+t.slice(1)}),k=/\B([A-Z])/g,$=_(function(t){return t.replace(k,"-$1").toLowerCase()});var O=Function.prototype.bind?function(t,e){return t.bind(e)}:function(t,e){function n(n){var r=arguments.length;return r?r>1?t.apply(e,arguments):t.call(e,n):t.call(e)}return n._length=t.length,n};function S(t,e){e=e||0;for(var n=t.length-e,r=new Array(n);n--;)r[n]=t[n+e];return r}function A(t,e){for(var n in e)t[n]=e[n];return t}function E(t){for(var e={},n=0;n0,Y=G&&G.indexOf("edge/")>0,J=(G&&G.indexOf("android"),G&&/iphone|ipad|ipod|ios/.test(G)||"ios"===W),Q=(G&&/chrome\/\d+/.test(G),{}.watch),Z=!1;if(V)try{var tt={};Object.defineProperty(tt,"passive",{get:function(){Z=!0}}),window.addEventListener("test-passive",null,tt)}catch(t){}var et=function(){return void 0===H&&(H=!V&&!z&&"undefined"!=typeof global&&"server"===global.process.env.VUE_ENV),H},nt=V&&window.__VUE_DEVTOOLS_GLOBAL_HOOK__;function rt(t){return"function"==typeof t&&/native code/.test(t.toString())}var ot,it="undefined"!=typeof Symbol&&rt(Symbol)&&"undefined"!=typeof Reflect&&rt(Reflect.ownKeys);ot="undefined"!=typeof Set&&rt(Set)?Set:function(){function t(){this.set=Object.create(null)}return t.prototype.has=function(t){return!0===this.set[t]},t.prototype.add=function(t){this.set[t]=!0},t.prototype.clear=function(){this.set=Object.create(null)},t}();var at=j,st=0,ct=function(){this.id=st++,this.subs=[]};ct.prototype.addSub=function(t){this.subs.push(t)},ct.prototype.removeSub=function(t){g(this.subs,t)},ct.prototype.depend=function(){ct.target&&ct.target.addDep(this)},ct.prototype.notify=function(){for(var t=this.subs.slice(),e=0,n=t.length;e-1)if(i&&!b(o,"default"))a=!1;else if(""===a||a===$(t)){var c=Ft(String,o.type);(c<0||s0&&(ue((u=t(u,(n||"")+"_"+c))[0])&&ue(f)&&(r[l]=vt(f.text+u[0].text),u.shift()),r.push.apply(r,u)):s(u)?ue(f)?r[l]=vt(f.text+u):""!==u&&r.push(vt(u)):ue(u)&&ue(f)?r[l]=vt(f.text+u.text):(a(e._isVList)&&i(u.tag)&&o(u.key)&&i(n)&&(u.key="__vlist"+n+"_"+c+"__"),r.push(u)));return r}(t):void 0}function ue(t){return i(t)&&i(t.text)&&!1===t.isComment}function le(t,e){return(t.__esModule||it&&"Module"===t[Symbol.toStringTag])&&(t=t.default),c(t)?e.extend(t):t}function fe(t){return t.isComment&&t.asyncFactory}function pe(t){if(Array.isArray(t))for(var e=0;eAe&&Ce[n].id>t.id;)n--;Ce.splice(n+1,0,t)}else Ce.push(t);Oe||(Oe=!0,Zt(Ee))}}(this)},Te.prototype.run=function(){if(this.active){var t=this.get();if(t!==this.value||c(t)||this.deep){var e=this.value;if(this.value=t,this.user)try{this.cb.call(this.vm,t,e)}catch(t){Bt(t,this.vm,'callback for watcher "'+this.expression+'"')}else this.cb.call(this.vm,t,e)}}},Te.prototype.evaluate=function(){this.value=this.get(),this.dirty=!1},Te.prototype.depend=function(){for(var t=this.deps.length;t--;)this.deps[t].depend()},Te.prototype.teardown=function(){if(this.active){this.vm._isBeingDestroyed||g(this.vm._watchers,this);for(var t=this.deps.length;t--;)this.deps[t].removeSub(this);this.active=!1}};var Le={enumerable:!0,configurable:!0,get:j,set:j};function Pe(t,e,n){Le.get=function(){return this[e][n]},Le.set=function(t){this[e][n]=t},Object.defineProperty(t,n,Le)}function Ie(t){t._watchers=[];var e=t.$options;e.props&&function(t,e){var n=t.$options.propsData||{},r=t._props={},o=t.$options._propKeys=[];t.$parent&&xt(!1);var i=function(i){o.push(i);var a=Nt(i,e,n,t);Ot(r,i,a),i in t||Pe(t,"_props",i)};for(var a in e)i(a);xt(!0)}(t,e.props),e.methods&&function(t,e){t.$options.props;for(var n in e)t[n]=null==e[n]?j:O(e[n],t)}(t,e.methods),e.data?function(t){var e=t.$options.data;l(e=t._data="function"==typeof e?function(t,e){lt();try{return t.call(e,e)}catch(t){return Bt(t,e,"data()"),{}}finally{ft()}}(e,t):e||{})||(e={});var n=Object.keys(e),r=t.$options.props,o=(t.$options.methods,n.length);for(;o--;){var i=n[o];0,r&&b(r,i)||(void 0,36!==(a=(i+"").charCodeAt(0))&&95!==a&&Pe(t,"_data",i))}var a;$t(e,!0)}(t):$t(t._data={},!0),e.computed&&function(t,e){var n=t._computedWatchers=Object.create(null),r=et();for(var o in e){var i=e[o],a="function"==typeof i?i:i.get;0,r||(n[o]=new Te(t,a||j,j,Me)),o in t||Re(t,o,i)}}(t,e.computed),e.watch&&e.watch!==Q&&function(t,e){for(var n in e){var r=e[n];if(Array.isArray(r))for(var o=0;o=0||n.indexOf(t[o])<0)&&r.push(t[o]);return r}return t}function fn(t){this._init(t)}function pn(t){t.cid=0;var e=1;t.extend=function(t){t=t||{};var n=this,r=n.cid,o=t._Ctor||(t._Ctor={});if(o[r])return o[r];var i=t.name||n.options.name;var a=function(t){this._init(t)};return(a.prototype=Object.create(n.prototype)).constructor=a,a.cid=e++,a.options=Mt(n.options,t),a.super=n,a.options.props&&function(t){var e=t.options.props;for(var n in e)Pe(t.prototype,"_props",n)}(a),a.options.computed&&function(t){var e=t.options.computed;for(var n in e)Re(t.prototype,n,e[n])}(a),a.extend=n.extend,a.mixin=n.mixin,a.use=n.use,N.forEach(function(t){a[t]=n[t]}),i&&(a.options.components[i]=a),a.superOptions=n.options,a.extendOptions=t,a.sealedOptions=A({},a.options),o[r]=a,a}}function hn(t){return t&&(t.Ctor.options.name||t.tag)}function dn(t,e){return Array.isArray(t)?t.indexOf(e)>-1:"string"==typeof t?t.split(",").indexOf(e)>-1:!!f(t)&&t.test(e)}function vn(t,e){var n=t.cache,r=t.keys,o=t._vnode;for(var i in n){var a=n[i];if(a){var s=hn(a.componentOptions);s&&!e(s)&&mn(n,i,r,o)}}}function mn(t,e,n,r){var o=t[e];!o||r&&o.tag===r.tag||o.componentInstance.$destroy(),t[e]=null,g(n,e)}!function(t){t.prototype._init=function(t){var e=this;e._uid=cn++,e._isVue=!0,t&&t._isComponent?function(t,e){var n=t.$options=Object.create(t.constructor.options),r=e._parentVnode;n.parent=e.parent,n._parentVnode=r,n._parentElm=e._parentElm,n._refElm=e._refElm;var o=r.componentOptions;n.propsData=o.propsData,n._parentListeners=o.listeners,n._renderChildren=o.children,n._componentTag=o.tag,e.render&&(n.render=e.render,n.staticRenderFns=e.staticRenderFns)}(e,t):e.$options=Mt(un(e.constructor),t||{},e),e._renderProxy=e,e._self=e,function(t){var e=t.$options,n=e.parent;if(n&&!e.abstract){for(;n.$options.abstract&&n.$parent;)n=n.$parent;n.$children.push(t)}t.$parent=n,t.$root=n?n.$root:t,t.$children=[],t.$refs={},t._watcher=null,t._inactive=null,t._directInactive=!1,t._isMounted=!1,t._isDestroyed=!1,t._isBeingDestroyed=!1}(e),function(t){t._events=Object.create(null),t._hasHookEvent=!1;var e=t.$options._parentListeners;e&&ve(t,e)}(e),function(t){t._vnode=null,t._staticTrees=null;var e=t.$options,n=t.$vnode=e._parentVnode,o=n&&n.context;t.$slots=me(e._renderChildren,o),t.$scopedSlots=r,t._c=function(e,n,r,o){return sn(t,e,n,r,o,!1)},t.$createElement=function(e,n,r,o){return sn(t,e,n,r,o,!0)};var i=n&&n.data;Ot(t,"$attrs",i&&i.attrs||r,null,!0),Ot(t,"$listeners",e._parentListeners||r,null,!0)}(e),we(e,"beforeCreate"),function(t){var e=Ue(t.$options.inject,t);e&&(xt(!1),Object.keys(e).forEach(function(n){Ot(t,n,e[n])}),xt(!0))}(e),Ie(e),function(t){var e=t.$options.provide;e&&(t._provided="function"==typeof e?e.call(t):e)}(e),we(e,"created"),e.$options.el&&e.$mount(e.$options.el)}}(fn),function(t){var e={get:function(){return this._data}},n={get:function(){return this._props}};Object.defineProperty(t.prototype,"$data",e),Object.defineProperty(t.prototype,"$props",n),t.prototype.$set=St,t.prototype.$delete=At,t.prototype.$watch=function(t,e,n){if(l(e))return De(this,t,e,n);(n=n||{}).user=!0;var r=new Te(this,t,e,n);return n.immediate&&e.call(this,r.value),function(){r.teardown()}}}(fn),function(t){var e=/^hook:/;t.prototype.$on=function(t,n){if(Array.isArray(t))for(var r=0,o=t.length;r1?S(e):e;for(var n=S(arguments,1),r=0,o=e.length;rparseInt(this.max)&&mn(a,s[0],s,this._vnode)),e.data.keepAlive=!0}return e||t&&t[0]}}};!function(t){var e={get:function(){return U}};Object.defineProperty(t,"config",e),t.util={warn:at,extend:A,mergeOptions:Mt,defineReactive:Ot},t.set=St,t.delete=At,t.nextTick=Zt,t.options=Object.create(null),N.forEach(function(e){t.options[e+"s"]=Object.create(null)}),t.options._base=t,A(t.options.components,yn),function(t){t.use=function(t){var e=this._installedPlugins||(this._installedPlugins=[]);if(e.indexOf(t)>-1)return this;var n=S(arguments,1);return n.unshift(this),"function"==typeof t.install?t.install.apply(t,n):"function"==typeof t&&t.apply(null,n),e.push(t),this}}(t),function(t){t.mixin=function(t){return this.options=Mt(this.options,t),this}}(t),pn(t),function(t){N.forEach(function(e){t[e]=function(t,n){return n?("component"===e&&l(n)&&(n.name=n.name||t,n=this.options._base.extend(n)),"directive"===e&&"function"==typeof n&&(n={bind:n,update:n}),this.options[e+"s"][t]=n,n):this.options[e+"s"][t]}})}(t)}(fn),Object.defineProperty(fn.prototype,"$isServer",{get:et}),Object.defineProperty(fn.prototype,"$ssrContext",{get:function(){return this.$vnode&&this.$vnode.ssrContext}}),Object.defineProperty(fn,"FunctionalRenderContext",{value:Qe}),fn.version="2.5.16";var bn=v("style,class"),_n=v("input,textarea,option,select,progress"),xn=v("contenteditable,draggable,spellcheck"),wn=v("allowfullscreen,async,autofocus,autoplay,checked,compact,controls,declare,default,defaultchecked,defaultmuted,defaultselected,defer,disabled,enabled,formnovalidate,hidden,indeterminate,inert,ismap,itemscope,loop,multiple,muted,nohref,noresize,noshade,novalidate,nowrap,open,pauseonexit,readonly,required,reversed,scoped,seamless,selected,sortable,translate,truespeed,typemustmatch,visible"),Cn="http://www.w3.org/1999/xlink",kn=function(t){return":"===t.charAt(5)&&"xlink"===t.slice(0,5)},$n=function(t){return kn(t)?t.slice(6,t.length):""},On=function(t){return null==t||!1===t};function Sn(t){for(var e=t.data,n=t,r=t;i(r.componentInstance);)(r=r.componentInstance._vnode)&&r.data&&(e=An(r.data,e));for(;i(n=n.parent);)n&&n.data&&(e=An(e,n.data));return function(t,e){if(i(t)||i(e))return En(t,jn(e));return""}(e.staticClass,e.class)}function An(t,e){return{staticClass:En(t.staticClass,e.staticClass),class:i(t.class)?[t.class,e.class]:e.class}}function En(t,e){return t?e?t+" "+e:t:e||""}function jn(t){return Array.isArray(t)?function(t){for(var e,n="",r=0,o=t.length;r-1?Zn(t,e,n):wn(e)?On(n)?t.removeAttribute(e):(n="allowfullscreen"===e&&"EMBED"===t.tagName?"true":e,t.setAttribute(e,n)):xn(e)?t.setAttribute(e,On(n)||"false"===n?"false":"true"):kn(e)?On(n)?t.removeAttributeNS(Cn,$n(e)):t.setAttributeNS(Cn,e,n):Zn(t,e,n)}function Zn(t,e,n){if(On(n))t.removeAttribute(e);else{if(K&&!X&&"TEXTAREA"===t.tagName&&"placeholder"===e&&!t.__ieph){var r=function(e){e.stopImmediatePropagation(),t.removeEventListener("input",r)};t.addEventListener("input",r),t.__ieph=!0}t.setAttribute(e,n)}}var tr={create:Jn,update:Jn};function er(t,e){var n=e.elm,r=e.data,a=t.data;if(!(o(r.staticClass)&&o(r.class)&&(o(a)||o(a.staticClass)&&o(a.class)))){var s=Sn(e),c=n._transitionClasses;i(c)&&(s=En(s,jn(c))),s!==n._prevClass&&(n.setAttribute("class",s),n._prevClass=s)}}var nr,rr={create:er,update:er},or="__r",ir="__c";function ar(t,e,n,r,o){var i;e=(i=e)._withTask||(i._withTask=function(){Xt=!0;var t=i.apply(null,arguments);return Xt=!1,t}),n&&(e=function(t,e,n){var r=nr;return function o(){null!==t.apply(null,arguments)&&sr(e,o,n,r)}}(e,t,r)),nr.addEventListener(t,e,Z?{capture:r,passive:o}:r)}function sr(t,e,n,r){(r||nr).removeEventListener(t,e._withTask||e,n)}function cr(t,e){if(!o(t.data.on)||!o(e.data.on)){var n=e.data.on||{},r=t.data.on||{};nr=e.elm,function(t){if(i(t[or])){var e=K?"change":"input";t[e]=[].concat(t[or],t[e]||[]),delete t[or]}i(t[ir])&&(t.change=[].concat(t[ir],t.change||[]),delete t[ir])}(n),ie(n,r,ar,sr,e.context),nr=void 0}}var ur={create:cr,update:cr};function lr(t,e){if(!o(t.data.domProps)||!o(e.data.domProps)){var n,r,a=e.elm,s=t.data.domProps||{},c=e.data.domProps||{};for(n in i(c.__ob__)&&(c=e.data.domProps=A({},c)),s)o(c[n])&&(a[n]="");for(n in c){if(r=c[n],"textContent"===n||"innerHTML"===n){if(e.children&&(e.children.length=0),r===s[n])continue;1===a.childNodes.length&&a.removeChild(a.childNodes[0])}if("value"===n){a._value=r;var u=o(r)?"":String(r);fr(a,u)&&(a.value=u)}else a[n]=r}}}function fr(t,e){return!t.composing&&("OPTION"===t.tagName||function(t,e){var n=!0;try{n=document.activeElement!==t}catch(t){}return n&&t.value!==e}(t,e)||function(t,e){var n=t.value,r=t._vModifiers;if(i(r)){if(r.lazy)return!1;if(r.number)return d(n)!==d(e);if(r.trim)return n.trim()!==e.trim()}return n!==e}(t,e))}var pr={create:lr,update:lr},hr=_(function(t){var e={},n=/:(.+)/;return t.split(/;(?![^(]*\))/g).forEach(function(t){if(t){var r=t.split(n);r.length>1&&(e[r[0].trim()]=r[1].trim())}}),e});function dr(t){var e=vr(t.style);return t.staticStyle?A(t.staticStyle,e):e}function vr(t){return Array.isArray(t)?E(t):"string"==typeof t?hr(t):t}var mr,gr=/^--/,yr=/\s*!important$/,br=function(t,e,n){if(gr.test(e))t.style.setProperty(e,n);else if(yr.test(n))t.style.setProperty(e,n.replace(yr,""),"important");else{var r=xr(e);if(Array.isArray(n))for(var o=0,i=n.length;o-1?e.split(/\s+/).forEach(function(e){return t.classList.add(e)}):t.classList.add(e);else{var n=" "+(t.getAttribute("class")||"")+" ";n.indexOf(" "+e+" ")<0&&t.setAttribute("class",(n+e).trim())}}function $r(t,e){if(e&&(e=e.trim()))if(t.classList)e.indexOf(" ")>-1?e.split(/\s+/).forEach(function(e){return t.classList.remove(e)}):t.classList.remove(e),t.classList.length||t.removeAttribute("class");else{for(var n=" "+(t.getAttribute("class")||"")+" ",r=" "+e+" ";n.indexOf(r)>=0;)n=n.replace(r," ");(n=n.trim())?t.setAttribute("class",n):t.removeAttribute("class")}}function Or(t){if(t){if("object"==typeof t){var e={};return!1!==t.css&&A(e,Sr(t.name||"v")),A(e,t),e}return"string"==typeof t?Sr(t):void 0}}var Sr=_(function(t){return{enterClass:t+"-enter",enterToClass:t+"-enter-to",enterActiveClass:t+"-enter-active",leaveClass:t+"-leave",leaveToClass:t+"-leave-to",leaveActiveClass:t+"-leave-active"}}),Ar=V&&!X,Er="transition",jr="animation",Tr="transition",Lr="transitionend",Pr="animation",Ir="animationend";Ar&&(void 0===window.ontransitionend&&void 0!==window.onwebkittransitionend&&(Tr="WebkitTransition",Lr="webkitTransitionEnd"),void 0===window.onanimationend&&void 0!==window.onwebkitanimationend&&(Pr="WebkitAnimation",Ir="webkitAnimationEnd"));var Mr=V?window.requestAnimationFrame?window.requestAnimationFrame.bind(window):setTimeout:function(t){return t()};function Rr(t){Mr(function(){Mr(t)})}function Nr(t,e){var n=t._transitionClasses||(t._transitionClasses=[]);n.indexOf(e)<0&&(n.push(e),kr(t,e))}function Dr(t,e){t._transitionClasses&&g(t._transitionClasses,e),$r(t,e)}function Ur(t,e,n){var r=Br(t,e),o=r.type,i=r.timeout,a=r.propCount;if(!o)return n();var s=o===Er?Lr:Ir,c=0,u=function(){t.removeEventListener(s,l),n()},l=function(e){e.target===t&&++c>=a&&u()};setTimeout(function(){c0&&(n=Er,l=a,f=i.length):e===jr?u>0&&(n=jr,l=u,f=c.length):f=(n=(l=Math.max(a,u))>0?a>u?Er:jr:null)?n===Er?i.length:c.length:0,{type:n,timeout:l,propCount:f,hasTransform:n===Er&&Fr.test(r[Tr+"Property"])}}function Hr(t,e){for(;t.length1}function Kr(t,e){!0!==e.data.show&&Vr(e)}var Xr=function(t){var e,n,r={},c=t.modules,u=t.nodeOps;for(e=0;ed?b(t,o(n[g+1])?null:n[g+1].elm,n,h,g,r):h>g&&x(0,e,p,d)}(c,h,d,n,s):i(d)?(i(t.text)&&u.setTextContent(c,""),b(c,null,d,0,d.length-1,n)):i(h)?x(0,h,0,h.length-1):i(t.text)&&u.setTextContent(c,""):t.text!==e.text&&u.setTextContent(c,e.text),i(p)&&i(l=p.hook)&&i(l=l.postpatch)&&l(t,e)}}}function $(t,e,n){if(a(n)&&i(t.parent))t.parent.data.pendingInsert=e;else for(var r=0;r-1,a.selected!==i&&(a.selected=i);else if(P(to(a),r))return void(t.selectedIndex!==s&&(t.selectedIndex=s));o||(t.selectedIndex=-1)}}function Zr(t,e){return e.every(function(e){return!P(e,t)})}function to(t){return"_value"in t?t._value:t.value}function eo(t){t.target.composing=!0}function no(t){t.target.composing&&(t.target.composing=!1,ro(t.target,"input"))}function ro(t,e){var n=document.createEvent("HTMLEvents");n.initEvent(e,!0,!0),t.dispatchEvent(n)}function oo(t){return!t.componentInstance||t.data&&t.data.transition?t:oo(t.componentInstance._vnode)}var io={model:Yr,show:{bind:function(t,e,n){var r=e.value,o=(n=oo(n)).data&&n.data.transition,i=t.__vOriginalDisplay="none"===t.style.display?"":t.style.display;r&&o?(n.data.show=!0,Vr(n,function(){t.style.display=i})):t.style.display=r?i:"none"},update:function(t,e,n){var r=e.value;!r!=!e.oldValue&&((n=oo(n)).data&&n.data.transition?(n.data.show=!0,r?Vr(n,function(){t.style.display=t.__vOriginalDisplay}):zr(n,function(){t.style.display="none"})):t.style.display=r?t.__vOriginalDisplay:"none")},unbind:function(t,e,n,r,o){o||(t.style.display=t.__vOriginalDisplay)}}},ao={name:String,appear:Boolean,css:Boolean,mode:String,type:String,enterClass:String,leaveClass:String,enterToClass:String,leaveToClass:String,enterActiveClass:String,leaveActiveClass:String,appearClass:String,appearActiveClass:String,appearToClass:String,duration:[Number,String,Object]};function so(t){var e=t&&t.componentOptions;return e&&e.Ctor.options.abstract?so(pe(e.children)):t}function co(t){var e={},n=t.$options;for(var r in n.propsData)e[r]=t[r];var o=n._parentListeners;for(var i in o)e[w(i)]=o[i];return e}function uo(t,e){if(/\d-keep-alive$/.test(e.tag))return t("keep-alive",{props:e.componentOptions.propsData})}var lo={name:"transition",props:ao,abstract:!0,render:function(t){var e=this,n=this.$slots.default;if(n&&(n=n.filter(function(t){return t.tag||fe(t)})).length){0;var r=this.mode;0;var o=n[0];if(function(t){for(;t=t.parent;)if(t.data.transition)return!0}(this.$vnode))return o;var i=so(o);if(!i)return o;if(this._leaving)return uo(t,o);var a="__transition-"+this._uid+"-";i.key=null==i.key?i.isComment?a+"comment":a+i.tag:s(i.key)?0===String(i.key).indexOf(a)?i.key:a+i.key:i.key;var c=(i.data||(i.data={})).transition=co(this),u=this._vnode,l=so(u);if(i.data.directives&&i.data.directives.some(function(t){return"show"===t.name})&&(i.data.show=!0),l&&l.data&&!function(t,e){return e.key===t.key&&e.tag===t.tag}(i,l)&&!fe(l)&&(!l.componentInstance||!l.componentInstance._vnode.isComment)){var f=l.data.transition=A({},c);if("out-in"===r)return this._leaving=!0,ae(f,"afterLeave",function(){e._leaving=!1,e.$forceUpdate()}),uo(t,o);if("in-out"===r){if(fe(i))return u;var p,h=function(){p()};ae(c,"afterEnter",h),ae(c,"enterCancelled",h),ae(f,"delayLeave",function(t){p=t})}}return o}}},fo=A({tag:String,moveClass:String},ao);function po(t){t.elm._moveCb&&t.elm._moveCb(),t.elm._enterCb&&t.elm._enterCb()}function ho(t){t.data.newPos=t.elm.getBoundingClientRect()}function vo(t){var e=t.data.pos,n=t.data.newPos,r=e.left-n.left,o=e.top-n.top;if(r||o){t.data.moved=!0;var i=t.elm.style;i.transform=i.WebkitTransform="translate("+r+"px,"+o+"px)",i.transitionDuration="0s"}}delete fo.mode;var mo={Transition:lo,TransitionGroup:{props:fo,render:function(t){for(var e=this.tag||this.$vnode.data.tag||"span",n=Object.create(null),r=this.prevChildren=this.children,o=this.$slots.default||[],i=this.children=[],a=co(this),s=0;s-1?Mn[t]=e.constructor===window.HTMLUnknownElement||e.constructor===window.HTMLElement:Mn[t]=/HTMLUnknownElement/.test(e.toString())},A(fn.options.directives,io),A(fn.options.components,mo),fn.prototype.__patch__=V?Xr:j,fn.prototype.$mount=function(t,e){return function(t,e,n){return t.$el=e,t.$options.render||(t.$options.render=dt),we(t,"beforeMount"),new Te(t,function(){t._update(t._render(),n)},j,null,!0),n=!1,null==t.$vnode&&(t._isMounted=!0,we(t,"mounted")),t}(this,t=t&&V?function(t){if("string"==typeof t){var e=document.querySelector(t);return e||document.createElement("div")}return t}(t):void 0,e)},V&&setTimeout(function(){U.devtools&&nt&&nt.emit("init",fn)},0);var go=fn; +/** + * vue-router v3.0.1 + * (c) 2017 Evan You + * @license MIT + */function yo(t,e){0}function bo(t){return Object.prototype.toString.call(t).indexOf("Error")>-1}var _o={name:"router-view",functional:!0,props:{name:{type:String,default:"default"}},render:function(t,e){var n=e.props,r=e.children,o=e.parent,i=e.data;i.routerView=!0;for(var a=o.$createElement,s=n.name,c=o.$route,u=o._routerViewCache||(o._routerViewCache={}),l=0,f=!1;o&&o._routerRoot!==o;)o.$vnode&&o.$vnode.data.routerView&&l++,o._inactive&&(f=!0),o=o.$parent;if(i.routerViewDepth=l,f)return a(u[s],i,r);var p=c.matched[l];if(!p)return u[s]=null,a();var h=u[s]=p.components[s];i.registerRouteInstance=function(t,e){var n=p.instances[s];(e&&n!==t||!e&&n===t)&&(p.instances[s]=e)},(i.hook||(i.hook={})).prepatch=function(t,e){p.instances[s]=e.componentInstance};var d=i.props=function(t,e){switch(typeof e){case"undefined":return;case"object":return e;case"function":return e(t);case"boolean":return e?t.params:void 0;default:0}}(c,p.props&&p.props[s]);if(d){d=i.props=function(t,e){for(var n in e)t[n]=e[n];return t}({},d);var v=i.attrs=i.attrs||{};for(var m in d)h.props&&m in h.props||(v[m]=d[m],delete d[m])}return a(h,i,r)}};var xo=/[!'()*]/g,wo=function(t){return"%"+t.charCodeAt(0).toString(16)},Co=/%2C/g,ko=function(t){return encodeURIComponent(t).replace(xo,wo).replace(Co,",")},$o=decodeURIComponent;function Oo(t){var e={};return(t=t.trim().replace(/^(\?|#|&)/,""))?(t.split("&").forEach(function(t){var n=t.replace(/\+/g," ").split("="),r=$o(n.shift()),o=n.length>0?$o(n.join("=")):null;void 0===e[r]?e[r]=o:Array.isArray(e[r])?e[r].push(o):e[r]=[e[r],o]}),e):e}function So(t){var e=t?Object.keys(t).map(function(e){var n=t[e];if(void 0===n)return"";if(null===n)return ko(e);if(Array.isArray(n)){var r=[];return n.forEach(function(t){void 0!==t&&(null===t?r.push(ko(e)):r.push(ko(e)+"="+ko(t)))}),r.join("&")}return ko(e)+"="+ko(n)}).filter(function(t){return t.length>0}).join("&"):null;return e?"?"+e:""}var Ao=/\/?$/;function Eo(t,e,n,r){var o=r&&r.options.stringifyQuery,i=e.query||{};try{i=jo(i)}catch(t){}var a={name:e.name||t&&t.name,meta:t&&t.meta||{},path:e.path||"/",hash:e.hash||"",query:i,params:e.params||{},fullPath:Lo(e,o),matched:t?function(t){var e=[];for(;t;)e.unshift(t),t=t.parent;return e}(t):[]};return n&&(a.redirectedFrom=Lo(n,o)),Object.freeze(a)}function jo(t){if(Array.isArray(t))return t.map(jo);if(t&&"object"==typeof t){var e={};for(var n in t)e[n]=jo(t[n]);return e}return t}var To=Eo(null,{path:"/"});function Lo(t,e){var n=t.path,r=t.query;void 0===r&&(r={});var o=t.hash;return void 0===o&&(o=""),(n||"/")+(e||So)(r)+o}function Po(t,e){return e===To?t===e:!!e&&(t.path&&e.path?t.path.replace(Ao,"")===e.path.replace(Ao,"")&&t.hash===e.hash&&Io(t.query,e.query):!(!t.name||!e.name)&&(t.name===e.name&&t.hash===e.hash&&Io(t.query,e.query)&&Io(t.params,e.params)))}function Io(t,e){if(void 0===t&&(t={}),void 0===e&&(e={}),!t||!e)return t===e;var n=Object.keys(t),r=Object.keys(e);return n.length===r.length&&n.every(function(n){var r=t[n],o=e[n];return"object"==typeof r&&"object"==typeof o?Io(r,o):String(r)===String(o)})}var Mo,Ro=[String,Object],No=[String,Array],Do={name:"router-link",props:{to:{type:Ro,required:!0},tag:{type:String,default:"a"},exact:Boolean,append:Boolean,replace:Boolean,activeClass:String,exactActiveClass:String,event:{type:No,default:"click"}},render:function(t){var e=this,n=this.$router,r=this.$route,o=n.resolve(this.to,r,this.append),i=o.location,a=o.route,s=o.href,c={},u=n.options.linkActiveClass,l=n.options.linkExactActiveClass,f=null==u?"router-link-active":u,p=null==l?"router-link-exact-active":l,h=null==this.activeClass?f:this.activeClass,d=null==this.exactActiveClass?p:this.exactActiveClass,v=i.path?Eo(null,i,null,n):a;c[d]=Po(r,v),c[h]=this.exact?c[d]:function(t,e){return 0===t.path.replace(Ao,"/").indexOf(e.path.replace(Ao,"/"))&&(!e.hash||t.hash===e.hash)&&function(t,e){for(var n in e)if(!(n in t))return!1;return!0}(t.query,e.query)}(r,v);var m=function(t){Uo(t)&&(e.replace?n.replace(i):n.push(i))},g={click:Uo};Array.isArray(this.event)?this.event.forEach(function(t){g[t]=m}):g[this.event]=m;var y={class:c};if("a"===this.tag)y.on=g,y.attrs={href:s};else{var b=function t(e){if(e)for(var n,r=0;r=0&&(e=t.slice(r),t=t.slice(0,r));var o=t.indexOf("?");return o>=0&&(n=t.slice(o+1),t=t.slice(0,o)),{path:t,query:n,hash:e}}(o.path||""),c=e&&e.path||"/",u=s.path?Ho(s.path,c,n||o.append):c,l=function(t,e,n){void 0===e&&(e={});var r,o=n||Oo;try{r=o(t||"")}catch(t){r={}}for(var i in e)r[i]=e[i];return r}(s.query,o.query,r&&r.options.parseQuery),f=o.hash||s.hash;return f&&"#"!==f.charAt(0)&&(f="#"+f),{_normalized:!0,path:u,query:l,hash:f}}function li(t,e){for(var n in e)t[n]=e[n];return t}function fi(t,e){var n=ci(t),r=n.pathList,o=n.pathMap,i=n.nameMap;function a(t,n,a){var s=ui(t,n,!1,e),u=s.name;if(u){var l=i[u];if(!l)return c(null,s);var f=l.regex.keys.filter(function(t){return!t.optional}).map(function(t){return t.name});if("object"!=typeof s.params&&(s.params={}),n&&"object"==typeof n.params)for(var p in n.params)!(p in s.params)&&f.indexOf(p)>-1&&(s.params[p]=n.params[p]);if(l)return s.path=si(l.path,s.params),c(l,s,a)}else if(s.path){s.params={};for(var h=0;h=t.length?n():t[o]?e(t[o],function(){r(o+1)}):r(o+1)};r(0)}function ji(t){return function(e,n,r){var o=!1,i=0,a=null;Ti(t,function(t,e,n,s){if("function"==typeof t&&void 0===t.cid){o=!0,i++;var c,u=Ii(function(e){var o;((o=e).__esModule||Pi&&"Module"===o[Symbol.toStringTag])&&(e=e.default),t.resolved="function"==typeof e?e:Mo.extend(e),n.components[s]=e,--i<=0&&r()}),l=Ii(function(t){var e="Failed to resolve async component "+s+": "+t;a||(a=bo(t)?t:new Error(e),r(a))});try{c=t(u,l)}catch(t){l(t)}if(c)if("function"==typeof c.then)c.then(u,l);else{var f=c.component;f&&"function"==typeof f.then&&f.then(u,l)}}}),o||r()}}function Ti(t,e){return Li(t.map(function(t){return Object.keys(t.components).map(function(n){return e(t.components[n],t.instances[n],t,n)})}))}function Li(t){return Array.prototype.concat.apply([],t)}var Pi="function"==typeof Symbol&&"symbol"==typeof Symbol.toStringTag;function Ii(t){var e=!1;return function(){for(var n=[],r=arguments.length;r--;)n[r]=arguments[r];if(!e)return e=!0,t.apply(this,n)}}var Mi=function(t,e){this.router=t,this.base=function(t){if(!t)if(Bo){var e=document.querySelector("base");t=(t=e&&e.getAttribute("href")||"/").replace(/^https?:\/\/[^\/]+/,"")}else t="/";"/"!==t.charAt(0)&&(t="/"+t);return t.replace(/\/$/,"")}(e),this.current=To,this.pending=null,this.ready=!1,this.readyCbs=[],this.readyErrorCbs=[],this.errorCbs=[]};function Ri(t,e,n,r){var o=Ti(t,function(t,r,o,i){var a=function(t,e){"function"!=typeof t&&(t=Mo.extend(t));return t.options[e]}(t,e);if(a)return Array.isArray(a)?a.map(function(t){return n(t,r,o,i)}):n(a,r,o,i)});return Li(r?o.reverse():o)}function Ni(t,e){if(e)return function(){return t.apply(e,arguments)}}Mi.prototype.listen=function(t){this.cb=t},Mi.prototype.onReady=function(t,e){this.ready?t():(this.readyCbs.push(t),e&&this.readyErrorCbs.push(e))},Mi.prototype.onError=function(t){this.errorCbs.push(t)},Mi.prototype.transitionTo=function(t,e,n){var r=this,o=this.router.match(t,this.current);this.confirmTransition(o,function(){r.updateRoute(o),e&&e(o),r.ensureURL(),r.ready||(r.ready=!0,r.readyCbs.forEach(function(t){t(o)}))},function(t){n&&n(t),t&&!r.ready&&(r.ready=!0,r.readyErrorCbs.forEach(function(e){e(t)}))})},Mi.prototype.confirmTransition=function(t,e,n){var r=this,o=this.current,i=function(t){bo(t)&&(r.errorCbs.length?r.errorCbs.forEach(function(e){e(t)}):(yo(),console.error(t))),n&&n(t)};if(Po(t,o)&&t.matched.length===o.matched.length)return this.ensureURL(),i();var a=function(t,e){var n,r=Math.max(t.length,e.length);for(n=0;n=0?e.slice(0,n):e)+"#"+t}function Vi(t){wi?Si(qi(t)):window.location.hash=t}function zi(t){wi?Ai(qi(t)):window.location.replace(qi(t))}var Wi=function(t){function e(e,n){t.call(this,e,n),this.stack=[],this.index=-1}return t&&(e.__proto__=t),e.prototype=Object.create(t&&t.prototype),e.prototype.constructor=e,e.prototype.push=function(t,e,n){var r=this;this.transitionTo(t,function(t){r.stack=r.stack.slice(0,r.index+1).concat(t),r.index++,e&&e(t)},n)},e.prototype.replace=function(t,e,n){var r=this;this.transitionTo(t,function(t){r.stack=r.stack.slice(0,r.index).concat(t),e&&e(t)},n)},e.prototype.go=function(t){var e=this,n=this.index+t;if(!(n<0||n>=this.stack.length)){var r=this.stack[n];this.confirmTransition(r,function(){e.index=n,e.updateRoute(r)})}},e.prototype.getCurrentLocation=function(){var t=this.stack[this.stack.length-1];return t?t.fullPath:"/"},e.prototype.ensureURL=function(){},e}(Mi),Gi=function(t){void 0===t&&(t={}),this.app=null,this.apps=[],this.options=t,this.beforeHooks=[],this.resolveHooks=[],this.afterHooks=[],this.matcher=fi(t.routes||[],this);var e=t.mode||"hash";switch(this.fallback="history"===e&&!wi&&!1!==t.fallback,this.fallback&&(e="hash"),Bo||(e="abstract"),this.mode=e,e){case"history":this.history=new Di(this,t.base);break;case"hash":this.history=new Fi(this,t.base,this.fallback);break;case"abstract":this.history=new Wi(this,t.base);break;default:0}},Ki={currentRoute:{configurable:!0}};function Xi(t,e){return t.push(e),function(){var n=t.indexOf(e);n>-1&&t.splice(n,1)}}Gi.prototype.match=function(t,e,n){return this.matcher.match(t,e,n)},Ki.currentRoute.get=function(){return this.history&&this.history.current},Gi.prototype.init=function(t){var e=this;if(this.apps.push(t),!this.app){this.app=t;var n=this.history;if(n instanceof Di)n.transitionTo(n.getCurrentLocation());else if(n instanceof Fi){var r=function(){n.setupListeners()};n.transitionTo(n.getCurrentLocation(),r,r)}n.listen(function(t){e.apps.forEach(function(e){e._route=t})})}},Gi.prototype.beforeEach=function(t){return Xi(this.beforeHooks,t)},Gi.prototype.beforeResolve=function(t){return Xi(this.resolveHooks,t)},Gi.prototype.afterEach=function(t){return Xi(this.afterHooks,t)},Gi.prototype.onReady=function(t,e){this.history.onReady(t,e)},Gi.prototype.onError=function(t){this.history.onError(t)},Gi.prototype.push=function(t,e,n){this.history.push(t,e,n)},Gi.prototype.replace=function(t,e,n){this.history.replace(t,e,n)},Gi.prototype.go=function(t){this.history.go(t)},Gi.prototype.back=function(){this.go(-1)},Gi.prototype.forward=function(){this.go(1)},Gi.prototype.getMatchedComponents=function(t){var e=t?t.matched?t:this.resolve(t).route:this.currentRoute;return e?[].concat.apply([],e.matched.map(function(t){return Object.keys(t.components).map(function(e){return t.components[e]})})):[]},Gi.prototype.resolve=function(t,e,n){var r=ui(t,e||this.history.current,n,this),o=this.match(r,e),i=o.redirectedFrom||o.fullPath;return{location:r,route:o,href:function(t,e,n){var r="hash"===n?"#"+e:e;return t?qo(t+"/"+r):r}(this.history.base,i,this.mode),normalizedTo:r,resolved:o}},Gi.prototype.addRoutes=function(t){this.matcher.addRoutes(t),this.history.current!==To&&this.history.transitionTo(this.history.getCurrentLocation())},Object.defineProperties(Gi.prototype,Ki),Gi.install=Fo,Gi.version="3.0.1",Bo&&window.Vue&&window.Vue.use(Gi);var Yi=Gi,Ji={functional:!0,props:{custom:{type:Boolean,default:!0}},render:function(t,e){var n=e.parent,r=e.props,o=e.data;return t(n.$page.key,{class:[r.custom?"custom":"",o.class,o.staticClass],style:o.style})}},Qi=(n(117),n(0)),Zi=Object(Qi.a)({},function(t,e){var n=e._c;return n("svg",{staticClass:"icon outbound",attrs:{xmlns:"http://www.w3.org/2000/svg","aria-hidden":"true",x:"0px",y:"0px",viewBox:"0 0 100 100",width:"15",height:"15"}},[n("path",{attrs:{fill:"currentColor",d:"M18.8,85.1h56l0,0c2.2,0,4-1.8,4-4v-32h-8v28h-48v-48h28v-8h-32l0,0c-2.2,0-4,1.8-4,4v56C14.8,83.3,16.6,85.1,18.8,85.1z"}}),n("polygon",{attrs:{fill:"currentColor",points:"45.7,48.7 51.3,54.3 77.2,28.5 77.2,37.2 85.2,37.2 85.2,14.9 62.8,14.9 62.8,22.9 71.5,22.9"}})])},[],!0,null,null,null).exports,ta={functional:!0,props:["type","text"],render:function(t,e){var n=e.props,r=e.slots;return t("span",{class:["badge",n.type]},n.text||r().default)}},ea=(n(115),Object(Qi.a)(ta,void 0,void 0,!1,null,null,null).exports);n(113),n(13),n(19),n(9);function na(t){return function(t){if(Array.isArray(t)){for(var e=0,n=new Array(t.length);e-1},a=[],s=0;s=r);s++){var c=n[s];if(this.getPageLocalePath(c)===o)if(i(c))a.push(c);else if(c.headers)for(var u=0;u=r);u++){var l=c.headers[u];i(l)&&a.push(Object.assign({},c,{path:c.path+"#"+l.slug,header:l}))}}return a}},alignRight:function(){return(this.$site.themeConfig.nav||[]).length+(this.$site.repo?1:0)<=2}},methods:{getPageLocalePath:function(t){for(var e in this.$site.locales||{})if("/"!==e&&0===t.path.indexOf(e))return e;return"/"},onUp:function(){this.showSuggestions&&(this.focusIndex>0?this.focusIndex--:this.focusIndex=this.suggestions.length-1)},onDown:function(){this.showSuggestions&&(this.focusIndex "+t._s(e.header.title))]):t._e()])])})):t._e()])},[],!1,null,null,null).exports),Oa=(n(94),{name:"DropdownTransition",methods:{setHeight:function(t){t.style.height=t.scrollHeight+"px"},unsetHeight:function(t){t.style.height=""}}}),Sa=(n(88),Object(Qi.a)(Oa,function(){var t=this.$createElement;return(this._self._c||t)("transition",{attrs:{name:"dropdown"},on:{enter:this.setHeight,"after-enter":this.unsetHeight,"before-leave":this.setHeight}},[this._t("default")],2)},[],!1,null,null,null).exports),Aa={components:{NavLink:_a,DropdownTransition:Sa},data:function(){return{open:!1}},props:{item:{required:!0}},methods:{toggle:function(){this.open=!this.open}}},Ea=(n(86),{components:{NavLink:_a,DropdownLink:Object(Qi.a)(Aa,function(){var t=this,e=t.$createElement,n=t._self._c||e;return n("div",{staticClass:"dropdown-wrapper",class:{open:t.open}},[n("a",{staticClass:"dropdown-title",on:{click:t.toggle}},[n("span",{staticClass:"title"},[t._v(t._s(t.item.text))]),n("span",{staticClass:"arrow",class:t.open?"down":"right"})]),n("DropdownTransition",[n("ul",{directives:[{name:"show",rawName:"v-show",value:t.open,expression:"open"}],staticClass:"nav-dropdown"},t._l(t.item.items,function(e,r){return n("li",{key:e.link||r,staticClass:"dropdown-item"},["links"===e.type?n("h4",[t._v(t._s(e.text))]):t._e(),"links"===e.type?n("ul",{staticClass:"dropdown-subitem-wrapper"},t._l(e.items,function(t){return n("li",{key:t.link,staticClass:"dropdown-subitem"},[n("NavLink",{attrs:{item:t}})],1)})):n("NavLink",{attrs:{item:e}})],1)}))])],1)},[],!1,null,null,null).exports},computed:{userNav:function(){return this.$themeLocaleConfig.nav||this.$site.themeConfig.nav||[]},nav:function(){var t=this,e=this.$site.locales;if(e&&Object.keys(e).length>1){var n=this.$page.path,r=this.$router.options.routes,o=this.$site.themeConfig.locales||{},i={text:this.$themeLocaleConfig.selectText||"Languages",items:Object.keys(e).map(function(i){var a,s=e[i],c=o[i]&&o[i].label||s.lang;return s.lang===t.$lang?a=n:(a=n.replace(t.$localeConfig.path,i),r.some(function(t){return t.path===a})||(a=i)),{text:c,link:a}})};return na(this.userNav).concat([i])}return this.userNav},userLinks:function(){return(this.nav||[]).map(function(t){return Object.assign(ya(t),{items:(t.items||[]).map(ya)})})},repoLink:function(){var t=this.$site.themeConfig.repo;if(t)return/^https?:/.test(t)?t:"https://github.com/".concat(t)},repoLabel:function(){if(this.repoLink){if(this.$site.themeConfig.repoLabel)return this.$site.themeConfig.repoLabel;for(var t=this.repoLink.match(/^https?:\/\/[^/]+/)[0],e=["GitHub","GitLab","Bitbucket"],n=0;n5&&void 0!==arguments[5]?arguments[5]:1;return!e||i>o?null:t("ul",{class:"sidebar-sub-headers"},e.map(function(e){var a=da(r,n+"#"+e.slug);return t("li",{class:"sidebar-sub-header"},[Ra(t,"#"+e.slug,e.title,a),Na(t,e.children,n,r,o,i+1)])}))}var Da={functional:!0,props:["item"],render:function(t,e){var n=e.parent,r=n.$page,o=n.$site,i=n.$route,a=e.props.item,s=da(i,a.path),c="auto"===a.type?s||a.children.some(function(t){return da(i,a.basePath+"#"+t.slug)}):s,u=Ra(t,a.path,a.title||a.path,c),l=null!=r.frontmatter.sidebarDepth?r.frontmatter.sidebarDepth:o.themeConfig.sidebarDepth,f=null==l?1:l;return"auto"===a.type?[u,Na(t,a.children,a.basePath,i,f)]:c&&a.headers&&!sa.test(a.path)?[u,Na(t,ga(a.headers),a.path,i,f)]:u}},Ua=(n(78),Object(Qi.a)(Da,void 0,void 0,!1,null,null,null).exports),Fa={name:"SidebarGroup",props:["item","first","open","collapsable"],components:{SidebarLink:Ua,DropdownTransition:Sa}};n(76);var Ba={components:{SidebarGroup:Object(Qi.a)(Fa,function(){var t=this,e=t.$createElement,n=t._self._c||e;return n("div",{staticClass:"sidebar-group",class:{first:t.first,collapsable:t.collapsable}},[n("p",{staticClass:"sidebar-heading",class:{open:t.open},on:{click:function(e){t.$emit("toggle")}}},[n("span",[t._v(t._s(t.item.title))]),t.collapsable?n("span",{staticClass:"arrow",class:t.open?"down":"right"}):t._e()]),n("DropdownTransition",[t.open||!t.collapsable?n("ul",{ref:"items",staticClass:"sidebar-group-items"},t._l(t.item.children,function(t){return n("li",[n("SidebarLink",{attrs:{item:t}})],1)})):t._e()])],1)},[],!1,null,null,null).exports,SidebarLink:Ua,NavLinks:ja},props:["items"],data:function(){return{openGroupIndex:0}},created:function(){this.refreshIndex()},watch:{$route:function(){this.refreshIndex()}},methods:{refreshIndex:function(){var t=function(t,e){for(var n=0;n-1&&(this.openGroupIndex=t)},toggleGroup:function(t){this.openGroupIndex=t===this.openGroupIndex?-1:t},isActive:function(t){return da(this.$route,t.path)}}},Ha=(n(74),{components:{Home:wa,Page:Ma,Sidebar:Object(Qi.a)(Ba,function(){var t=this,e=t.$createElement,n=t._self._c||e;return n("div",{staticClass:"sidebar"},[n("NavLinks"),t._t("top"),t.items.length?n("ul",{staticClass:"sidebar-links"},t._l(t.items,function(e,r){return n("li",{key:r},["group"===e.type?n("SidebarGroup",{attrs:{item:e,first:0===r,open:r===t.openGroupIndex,collapsable:e.collapsable},on:{toggle:function(e){t.toggleGroup(r)}}}):n("SidebarLink",{attrs:{item:e}})],1)})):t._e(),t._t("bottom")],2)},[],!1,null,null,null).exports,Navbar:La},data:function(){return{isSidebarOpen:!1}},computed:{shouldShowNavbar:function(){var t=this.$site.themeConfig;return!1!==this.$page.frontmatter.navbar&&!1!==t.navbar&&(this.$title||t.logo||t.repo||t.nav||this.$themeLocaleConfig.nav)},shouldShowSidebar:function(){var t=this.$page.frontmatter;return!t.layout&&!t.home&&!1!==t.sidebar&&this.sidebarItems.length},sidebarItems:function(){return ma(this.$page,this.$route,this.$site,this.$localePath)},pageClasses:function(){var t=this.$page.frontmatter.pageClass;return[{"no-navbar":!this.shouldShowNavbar,"sidebar-open":this.isSidebarOpen,"no-sidebar":!this.shouldShowSidebar},t]}},mounted:function(){var t=this;window.addEventListener("scroll",this.onScroll),aa.a.configure({showSpinner:!1}),this.$router.beforeEach(function(t,e,n){t.path===e.path||go.component(t.name)||aa.a.start(),n()}),this.$router.afterEach(function(){aa.a.done(),t.isSidebarOpen=!1})},methods:{toggleSidebar:function(t){this.isSidebarOpen="boolean"==typeof t?t:!this.isSidebarOpen},onTouchStart:function(t){this.touchStart={x:t.changedTouches[0].clientX,y:t.changedTouches[0].clientY}},onTouchEnd:function(t){var e=t.changedTouches[0].clientX-this.touchStart.x,n=t.changedTouches[0].clientY-this.touchStart.y;Math.abs(e)>Math.abs(n)&&Math.abs(e)>40&&(e>0&&this.touchStart.x<=80?this.toggleSidebar(!0):this.toggleSidebar(!1))}}}),qa=(n(72),n(70),Object(Qi.a)(Ha,function(){var t=this,e=t.$createElement,n=t._self._c||e;return n("div",{staticClass:"theme-container",class:t.pageClasses,on:{touchstart:t.onTouchStart,touchend:t.onTouchEnd}},[t.shouldShowNavbar?n("Navbar",{on:{"toggle-sidebar":t.toggleSidebar}}):t._e(),n("div",{staticClass:"sidebar-mask",on:{click:function(e){t.toggleSidebar(!1)}}}),n("Sidebar",{attrs:{items:t.sidebarItems},on:{"toggle-sidebar":t.toggleSidebar}},[t._t("sidebar-top",null,{slot:"top"}),t._t("sidebar-bottom",null,{slot:"bottom"})],2),t.$page.frontmatter.layout?n("div",{staticClass:"custom-layout"},[n(t.$page.frontmatter.layout,{tag:"component"})],1):t.$page.frontmatter.home?n("Home"):n("Page",{attrs:{"sidebar-items":t.sidebarItems}},[t._t("page-top",null,{slot:"top"}),t._t("page-bottom",null,{slot:"bottom"})],2)],1)},[],!1,null,null,null).exports),Va=["There's nothing here.","How did we get here?","That's a Four-Oh-Four.","Looks like we've got some broken links."],za={methods:{getMsg:function(){return Va[Math.floor(Math.random()*Va.length)]}}},Wa=Object(Qi.a)(za,function(){var t=this.$createElement,e=this._self._c||t;return e("div",{staticClass:"theme-container"},[e("div",{staticClass:"content"},[e("h1",[this._v("404")]),e("blockquote",[this._v(this._s(this.getMsg()))]),e("router-link",{attrs:{to:"/"}},[this._v("Take me home.")])],1)])},[],!1,null,null,null).exports,Ga={created:function(){this.$ssrContext&&(this.$ssrContext.title=this.$title,this.$ssrContext.lang=this.$lang,this.$ssrContext.description=this.$page.description||this.$description)},mounted:function(){var t=this;this.currentMetaTags=[];var e=function(){document.title=t.$title,document.documentElement.lang=t.$lang;var e=[{name:"description",content:t.$description}].concat(na(t.$page.frontmatter.meta||[]));t.currentMetaTags=Ka(e,t.currentMetaTags)};this.$watch("$page",e),e()},beforeDestroy:function(){Ka(null,this.currentMetaTags)}};function Ka(t,e){if(e&&e.forEach(function(t){document.head.removeChild(t)}),t)return t.map(function(t){var e=document.createElement("meta");return Object.keys(t).forEach(function(n){e.setAttribute(n,t[n])}),document.head.appendChild(e),e})}var Xa=n(67),Ya=[Ga,{mounted:function(){window.addEventListener("scroll",this.onScroll)},methods:{onScroll:n.n(Xa)()(function(){this.setActiveHash()},300),setActiveHash:function(){for(var t=this,e=[].slice.call(document.querySelectorAll(".sidebar-link")),n=[].slice.call(document.querySelectorAll(".header-anchor")).filter(function(t){return e.some(function(e){return e.hash===t.hash})}),r=Math.max(window.pageYOffset,document.documentElement.scrollTop,document.body.scrollTop),o=0;o=i.parentElement.offsetTop+10&&(!a||rg;)v(m[g++]);f.constructor=u,u.prototype=f,n(11)(r,"RegExp",u)}n(56)("RegExp")},,function(t,e,n){"use strict";var r=n(36);n.n(r).a},,function(t,e,n){"use strict";var r=n(37);n.n(r).a},,function(t,e,n){"use strict";var r=n(38);n.n(r).a},function(t,e,n){n(42)("split",2,function(t,e,r){"use strict";var o=n(51),i=r,a=[].push;if("c"=="abbc".split(/(b)*/)[1]||4!="test".split(/(?:)/,-1).length||2!="ab".split(/(?:ab)*/).length||4!=".".split(/(.?)(.?)/).length||".".split(/()()/).length>1||"".split(/.?/).length){var s=void 0===/()??/.exec("")[1];r=function(t,e){var n=String(this);if(void 0===t&&0===e)return[];if(!o(t))return i.call(n,t,e);var r,c,u,l,f,p=[],h=(t.ignoreCase?"i":"")+(t.multiline?"m":"")+(t.unicode?"u":"")+(t.sticky?"y":""),d=0,v=void 0===e?4294967295:e>>>0,m=new RegExp(t.source,h+"g");for(s||(r=new RegExp("^"+m.source+"$(?!\\s)",h));(c=m.exec(n))&&!((u=c.index+c[0].length)>d&&(p.push(n.slice(d,c.index)),!s&&c.length>1&&c[0].replace(r,function(){for(f=1;f1&&c.index=v));)m.lastIndex===c.index&&m.lastIndex++;return d===n.length?!l&&m.test("")||p.push(""):p.push(n.slice(d)),p.length>v?p.slice(0,v):p}}else"0".split(void 0,0).length&&(r=function(t,e){return void 0===t&&0===e?[]:i.call(this,t,e)});return[function(n,o){var i=t(this),a=void 0==n?void 0:n[e];return void 0!==a?a.call(n,i,o):r.call(String(i),n,o)},r]})},function(t,e,n){var r=n(15),o=n(5),i=n(20),a=/"/g,s=function(t,e,n,r){var o=String(i(t)),s="<"+e;return""!==n&&(s+=" "+n+'="'+String(r).replace(a,""")+'"'),s+">"+o+""};t.exports=function(t,e){var n={};n[t]=e(s),r(r.P+r.F*o(function(){var e=""[t]('"');return e!==e.toLowerCase()||e.split('"').length>3}),"String",n)}},function(t,e,n){"use strict";n(102)("link",function(t){return function(e){return t(this,"a","href",e)}})},function(t,e,n){var r=n(7).f,o=Function.prototype,i=/^\s*function ([^ (]*)/;"name"in o||n(6)&&r(o,"name",{configurable:!0,get:function(){try{return(""+this).match(i)[1]}catch(t){return""}}})},function(t,e,n){var r=n(10),o=n(43),i=n(45)("IE_PROTO"),a=Object.prototype;t.exports=Object.getPrototypeOf||function(t){return t=o(t),r(t,i)?t[i]:"function"==typeof t.constructor&&t instanceof t.constructor?t.constructor.prototype:t instanceof Object?a:null}},function(t,e,n){var r=n(7),o=n(4),i=n(22);t.exports=n(6)?Object.defineProperties:function(t,e){o(t);for(var n,a=i(e),s=a.length,c=0;s>c;)r.f(t,n=a[c++],e[n]);return t}},function(t,e,n){var r=n(4),o=n(106),i=n(44),a=n(45)("IE_PROTO"),s=function(){},c=function(){var t,e=n(48)("iframe"),r=i.length;for(e.style.display="none",n(58).appendChild(e),e.src="javascript:",(t=e.contentWindow.document).open(),t.write(" + + diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index 016bac5e4ec..a7671c36093 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -1,21 +1,99 @@ -# Documentation Maintenance Overview +# Docs Build Workflow -The documentation found in this directory is hosted at: +The documentation for Tendermint Core is hosted at: -- https://tendermint.com/docs/ +- https://tendermint.com/docs/ and +- https://tendermint-staging.interblock.io/docs/ -and built using [VuePress](https://vuepress.vuejs.org/) from the tendermint website repo: +built from the files in this (`/docs`) directory for +[master](https://github.com/tendermint/tendermint/tree/master/docs) +and [develop](https://github.com/tendermint/tendermint/tree/develop/docs), +respectively. -- https://github.com/tendermint/tendermint.com +## How It Works -which has a [configuration file](https://github.com/tendermint/tendermint.com/blob/develop/docs/.vuepress/config.js) for displaying -the Table of Contents that lists all the documentation. +There is a Jenkins job listening for changes in the `/docs` directory, on both +the `master` and `develop` branches. Any updates to files in this directory +on those branches will automatically trigger a website deployment. Under the hood, +a private website repository has make targets consumed by a standard Jenkins task. -Under the hood, Jenkins listens for changes in ./docs then pushes a `docs-staging` branch to the tendermint.com repo with the latest documentation. That branch must be manually PR'd to `develop` then `master` for staging then production. This process should happen in synchrony with a release. +## README -The `README.md` in this directory is the landing page for -website documentation and the following folders are intentionally -ommitted: +The [README.md](./README.md) is also the landing page for the documentation +on the website. During the Jenkins build, the current commit is added to the bottom +of the README. -- `architecture/` ==> contains Architecture Design Records -- `spec/` ==> contains the detailed specification +## Config.js + +The [config.js](./.vuepress/config.js) generates the sidebar and Table of Contents +on the website docs. Note the use of relative links and the omission of +file extensions. Additional features are available to improve the look +of the sidebar. + +## Links + +**NOTE:** Strongly consider the existing links - both within this directory +and to the website docs - when moving or deleting files. + +Links to directories *MUST* end in a `/`. + +Relative links should be used nearly everywhere, having discovered and weighed the following: + +### Relative + +Where is the other file, relative to the current one? + +- works both on GitHub and for the VuePress build +- confusing / annoying to have things like: `../../../../myfile.md` +- requires more updates when files are re-shuffled + +### Absolute + +Where is the other file, given the root of the repo? + +- works on GitHub, doesn't work for the VuePress build +- this is much nicer: `/docs/hereitis/myfile.md` +- if you move that file around, the links inside it are preserved (but not to it, of course) + +### Full + +The full GitHub URL to a file or directory. Used occasionally when it makes sense +to send users to the GitHub. + +## Building Locally + +To build and serve the documentation locally, run: + +``` +# from this directory +npm install +npm install -g vuepress +``` + +then change the following line in the `config.js`: + +``` +base: "/docs/", +``` + +to: + +``` +base: "/", +``` + +Finally, go up one directory to the root of the repo and run: + +``` +# from root of repo +vuepress build docs +cd dist/docs +python -m SimpleHTTPServer 8080 +``` + +then navigate to localhost:8080 in your browser. + +## Consistency + +Because the build processes are identical (as is the information contained herein), this file should be kept in sync as +much as possible with its [counterpart in the Cosmos SDK repo](https://github.com/cosmos/cosmos-sdk/blob/develop/docs/DOCS_README.md). diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 442c9be651e..00000000000 --- a/docs/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = python -msphinx -SPHINXPROJ = Tendermint -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -install: - @pip install -r requirements.txt - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/README.md b/docs/README.md index 16ea708ad0a..c329354771e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,27 +1,29 @@ # Tendermint -Welcome to the Tendermint Core documentation! The introduction below provides -an overview to help you navigate to your area of interest. +Welcome to the Tendermint Core documentation! -## Introduction +Tendermint Core is a blockchain application platform; it provides the equivalent +of a web-server, database, and supporting libraries for blockchain applications +written in any programming language. Like a web-server serving web applications, +Tendermint serves blockchain applications. -Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state -transition machine - written in any programming language - and securely -replicates it on many machines. In other words, a blockchain. +More formally, Tendermint Core performs Byzantine Fault Tolerant (BFT) +State Machine Replication (SMR) for arbitrary deterministic, finite state machines. +For more background, see [What is +Tendermint?](introduction/what-is-tendermint.md). -Tendermint requires an application running over the Application Blockchain -Interface (ABCI) - and comes packaged with an example application to do so. -Follow the [installation instructions](./introduction/install) to get up and running -quickly. For more details on [using tendermint](./tendermint-core/using-tendermint) see that -and the following sections. +To get started quickly with an example application, see the [quick start guide](introduction/quick-start.md). -## Networks +To learn about application development on Tendermint, see the [Application Blockchain Interface](spec/abci/). -Testnets can be setup manually on one or more machines, or automatically on one -or more machine, using a variety of methods described in the [deploy testnets -section](./networks/deploy-testnets). +For more details on using Tendermint, see the respective documentation for +[Tendermint Core](tendermint-core/), [benchmarking and monitoring](tools/), and [network deployments](networks/). -## Application Development +## Contribute -The first step to building application on Tendermint is to [install -ABCI-CLI](./app-dev/getting-started) and play with the example applications. +To contribute to the documentation, see [this file](./DOCS_README.md) for details of the build process and +considerations when making changes. + +## Version + +This documentation is built from the following commit: diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index 4f9019fdae4..263c2c5ee73 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -36,7 +36,7 @@ Available Commands: console Start an interactive abci console for multiple commands counter ABCI demo example deliver_tx Deliver a new tx to the application - kvstore ABCI demo example + kvstore ABCI demo example echo Have the application echo a message help Help about any command info Get some info about the application @@ -140,7 +140,7 @@ response. The server may be generic for a particular language, and we provide a [reference implementation in Golang](https://github.com/tendermint/tendermint/tree/develop/abci/server). See the -[list of other ABCI implementations](./ecosystem.html) for servers in +[list of other ABCI implementations](./ecosystem.md) for servers in other languages. The handler is specific to the application, and may be arbitrary, so diff --git a/docs/app-dev/abci-spec.md b/docs/app-dev/abci-spec.md index 770740b86c8..6d0f712a2c0 100644 --- a/docs/app-dev/abci-spec.md +++ b/docs/app-dev/abci-spec.md @@ -1,5 +1,9 @@ # ABCI Specification +### XXX + +DEPRECATED: Moved [here](../spec/abci/abci.md) + ## Message Types ABCI requests/responses are defined as simple Protobuf messages in [this @@ -111,14 +115,21 @@ See below for more details on the message types and how they are used. - `Time (google.protobuf.Timestamp)`: Genesis time. - `ChainID (string)`: ID of the blockchain. - `ConsensusParams (ConsensusParams)`: Initial consensus-critical parameters. - - `Validators ([]Validator)`: Initial genesis validators. + - `Validators ([]ValidatorUpdate)`: Initial genesis validators. - `AppStateBytes ([]byte)`: Serialized initial application state. Amino-encoded JSON bytes. - **Response**: - `ConsensusParams (ConsensusParams)`: Initial consensus-critical parameters. - - `Validators ([]Validator)`: Initial validator set. + - `Validators ([]ValidatorUpdate)`: Initial validator set (if non empty). - **Usage**: - Called once upon genesis. + - If ResponseInitChain.Validators is empty, the initial validator set will be the RequestInitChain.Validators + - If ResponseInitChain.Validators is not empty, the initial validator set will be the + ResponseInitChain.Validators (regardless of what is in RequestInitChain.Validators). + - This allows the app to decide if it wants to accept the initial validator + set proposed by tendermint (ie. in the genesis file), or if it wants to use + a different one (perhaps computed based on some application specific + information in the genesis file). ### Query @@ -161,15 +172,17 @@ See below for more details on the message types and how they are used. - `Hash ([]byte)`: The block's hash. This can be derived from the block header. - `Header (struct{})`: The block header. - - `LastCommitInfo (LastCommitInfo)`: Info about the last commit. + - `LastCommitInfo (LastCommitInfo)`: Info about the last commit, including the + round, and the list of validators and which ones signed the last block. - `ByzantineValidators ([]Evidence)`: List of evidence of - validators that acted maliciously + validators that acted maliciously. - **Response**: - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing - **Usage**: - Signals the beginning of a new block. Called prior to any DeliverTxs. - - The header is expected to at least contain the Height. + - The header contains the height, timestamp, and more - it exactly matches the + Tendermint block header. We may seek to generalize this in the future. - The `LastCommitInfo` and `ByzantineValidators` can be used to determine rewards and punishments for the validators. NOTE validators here do not include pubkeys. @@ -237,7 +250,7 @@ See below for more details on the message types and how they are used. - **Request**: - `Height (int64)`: Height of the block just executed. - **Response**: - - `ValidatorUpdates ([]Validator)`: Changes to validator set (set + - `ValidatorUpdates ([]ValidatorUpdate)`: Changes to validator set (set voting power to 0 to remove). - `ConsensusParamUpdates (ConsensusParams)`: Changes to consensus-critical time, size, and other parameters. @@ -245,8 +258,11 @@ See below for more details on the message types and how they are used. - **Usage**: - Signals the end of a block. - Called prior to each Commit, after all transactions. - - Validator set and consensus params are updated with the result. - - Validator pubkeys are expected to be go-wire encoded. + - Validator updates returned for block H: + - apply to the NextValidatorsHash of block H+1 + - apply to the ValidatorsHash (and thus the validator set) for block H+2 + - apply to the RequestBeginBlock.LastCommitInfo (ie. the last validator set) for block H+3 + - Consensus params returned for block H apply for block H+1 ### Commit @@ -271,12 +287,17 @@ See below for more details on the message types and how they are used. - `NumTxs (int32)`: Number of transactions in the block - `TotalTxs (int64)`: Total number of transactions in the blockchain until now - - `LastBlockHash ([]byte)`: Hash of the previous (parent) block + - `LastBlockID (BlockID)`: Hash of the previous (parent) block + - `LastCommitHash ([]byte)`: Hash of the previous block's commit - `ValidatorsHash ([]byte)`: Hash of the validator set for this block + - `NextValidatorsHash ([]byte)`: Hash of the validator set for the next block + - `ConsensusHash ([]byte)`: Hash of the consensus parameters for this block - `AppHash ([]byte)`: Data returned by the last call to `Commit` - typically the Merkle root of the application state after executing the previous block's transactions - - `Proposer (Validator)`: Original proposer for the block + - `LastResultsHash ([]byte)`: Hash of the ABCI results returned by the last block + - `EvidenceHash ([]byte)`: Hash of the evidence included in this block + - `ProposerAddress ([]byte)`: Original proposer for the block - **Usage**: - Provided in RequestBeginBlock - Provides important context about the current state of the blockchain - @@ -288,16 +309,27 @@ See below for more details on the message types and how they are used. - **Fields**: - `Address ([]byte)`: Address of the validator (hash of the public key) + - `Power (int64)`: Voting power of the validator +- **Usage**: + - Validator identified by address + - Used in RequestBeginBlock as part of VoteInfo + - Does not include PubKey to avoid sending potentially large quantum pubkeys + over the ABCI + +### ValidatorUpdate + +- **Fields**: - `PubKey (PubKey)`: Public key of the validator - `Power (int64)`: Voting power of the validator - **Usage**: - - Provides all identifying information about the validator + - Validator identified by PubKey + - Used to tell Tendermint to update the validator set -### SigningValidator +### VoteInfo - **Fields**: - `Validator (Validator)`: A validator - - `SignedLastBlock (bool)`: Indicated whether or not the validator signed + - `SignedLastBlock (bool)`: Indicates whether or not the validator signed the last block - **Usage**: - Indicates whether a validator signed the last block, allowing for rewards @@ -330,6 +362,6 @@ See below for more details on the message types and how they are used. ### LastCommitInfo - **Fields**: - - `CommitRound (int32)`: Commit round. - - `Validators ([]SigningValidator)`: List of validators in the current - validator set and whether or not they signed a vote. + - `Round (int32)`: Commit round. + - `Votes ([]VoteInfo)`: List of validators addresses in the last validator set + with their voting power and whether or not they signed a vote. diff --git a/docs/app-dev/app-architecture.md b/docs/app-dev/app-architecture.md index 9ce0fae9f78..b141c0f3008 100644 --- a/docs/app-dev/app-architecture.md +++ b/docs/app-dev/app-architecture.md @@ -46,6 +46,5 @@ See the following for more extensive documentation: - [Interchain Standard for the Light-Client REST API](https://github.com/cosmos/cosmos-sdk/pull/1028) - [Tendermint RPC Docs](https://tendermint.github.io/slate/) -- [Tendermint in Production](https://github.com/tendermint/tendermint/pull/1618) -- [Tendermint Basics](https://tendermint.readthedocs.io/en/master/using-tendermint.html) -- [ABCI spec](https://github.com/tendermint/tendermint/blob/develop/abci/docs/abci-spec.md) +- [Tendermint in Production](../tendermint-core/running-in-production.md) +- [ABCI spec](./abci-spec.md) diff --git a/docs/app-dev/app-development.md b/docs/app-dev/app-development.md index d3f22362bf1..d157ce37834 100644 --- a/docs/app-dev/app-development.md +++ b/docs/app-dev/app-development.md @@ -1,5 +1,10 @@ # Application Development Guide +## XXX + +This page is undergoing deprecation. All content is being moved to the new [home +of the ABCI specification](../spec/abci/README.md). + ## ABCI Design The purpose of ABCI is to provide a clean interface between state @@ -42,90 +47,6 @@ The mempool and consensus logic act as clients, and each maintains an open ABCI connection with the application, which hosts an ABCI server. Shown are the request and response types sent on each connection. -## Message Protocol - -The message protocol consists of pairs of requests and responses. Some -messages have no fields, while others may include byte-arrays, strings, -or integers. See the `message Request` and `message Response` -definitions in [the protobuf definition -file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto), -and the [protobuf -documentation](https://developers.google.com/protocol-buffers/docs/overview) -for more details. - -For each request, a server should respond with the corresponding -response, where order of requests is preserved in the order of -responses. - -## Server - -To use ABCI in your programming language of choice, there must be a ABCI -server in that language. Tendermint supports two kinds of implementation -of the server: - -- Asynchronous, raw socket server (Tendermint Socket Protocol, also - known as TSP or Teaspoon) -- GRPC - -Both can be tested using the `abci-cli` by setting the `--abci` flag -appropriately (ie. to `socket` or `grpc`). - -See examples, in various stages of maintenance, in -[Go](https://github.com/tendermint/tendermint/tree/develop/abci/server), -[JavaScript](https://github.com/tendermint/js-abci), -[Python](https://github.com/tendermint/tendermint/tree/develop/abci/example/python3/abci), -[C++](https://github.com/mdyring/cpp-tmsp), and -[Java](https://github.com/jTendermint/jabci). - -### GRPC - -If GRPC is available in your language, this is the easiest approach, -though it will have significant performance overhead. - -To get started with GRPC, copy in the [protobuf -file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto) -and compile it using the GRPC plugin for your language. For instance, -for golang, the command is `protoc --go_out=plugins=grpc:. types.proto`. -See the [grpc documentation for more details](http://www.grpc.io/docs/). -`protoc` will autogenerate all the necessary code for ABCI client and -server in your language, including whatever interface your application -must satisfy to be used by the ABCI server for handling requests. - -### TSP - -If GRPC is not available in your language, or you require higher -performance, or otherwise enjoy programming, you may implement your own -ABCI server using the Tendermint Socket Protocol, known affectionately -as Teaspoon. The first step is still to auto-generate the relevant data -types and codec in your language using `protoc`. Messages coming over -the socket are proto3 encoded, but additionally length-prefixed to -facilitate use as a streaming protocol. proto3 doesn't have an -official length-prefix standard, so we use our own. The first byte in -the prefix represents the length of the Big Endian encoded length. The -remaining bytes in the prefix are the Big Endian encoded length. - -For example, if the proto3 encoded ABCI message is 0xDEADBEEF (4 -bytes), the length-prefixed message is 0x0104DEADBEEF. If the proto3 -encoded ABCI message is 65535 bytes long, the length-prefixed message -would be like 0x02FFFF.... - -Note this prefixing does not apply for grpc. - -An ABCI server must also be able to support multiple connections, as -Tendermint uses three connections. - -## Client - -There are currently two use-cases for an ABCI client. One is a testing -tool, as in the `abci-cli`, which allows ABCI requests to be sent via -command line. The other is a consensus engine, such as Tendermint Core, -which makes requests to the application every time a new transaction is -received or a block is committed. - -It is unlikely that you will need to implement a client. For details of -our client, see -[here](https://github.com/tendermint/tendermint/tree/develop/abci/client). - Most of the examples below are from [kvstore application](https://github.com/tendermint/tendermint/blob/develop/abci/example/kvstore/kvstore.go), which is a part of the abci repo. [persistent_kvstore @@ -426,17 +347,30 @@ Note: these query formats are subject to change! In go: ``` -func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { - if reqQuery.Prove { - value, proof, exists := app.state.Proof(reqQuery.Data) - resQuery.Index = -1 // TODO make Proof return index - resQuery.Key = reqQuery.Data - resQuery.Value = value - resQuery.Proof = proof - if exists { - resQuery.Log = "exists" - } else { - resQuery.Log = "does not exist" + func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { + if reqQuery.Prove { + value, proof, exists := app.state.GetWithProof(reqQuery.Data) + resQuery.Index = -1 // TODO make Proof return index + resQuery.Key = reqQuery.Data + resQuery.Value = value + resQuery.Proof = proof + if exists { + resQuery.Log = "exists" + } else { + resQuery.Log = "does not exist" + } + return + } else { + index, value, exists := app.state.Get(reqQuery.Data) + resQuery.Index = int64(index) + resQuery.Value = value + if exists { + resQuery.Log = "exists" + } else { + resQuery.Log = "does not exist" + } + return + } } return } else { @@ -456,22 +390,25 @@ func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery type In Java: ``` -ResponseQuery requestQuery(RequestQuery req) { - final boolean isProveQuery = req.getProve(); - final ResponseQuery.Builder responseBuilder = ResponseQuery.newBuilder(); - - if (isProveQuery) { - com.app.example.ProofResult proofResult = generateProof(req.getData().toByteArray()); - final byte[] proofAsByteArray = proofResult.getAsByteArray(); - - responseBuilder.setProof(ByteString.copyFrom(proofAsByteArray)); - responseBuilder.setKey(req.getData()); - responseBuilder.setValue(ByteString.copyFrom(proofResult.getData())); - responseBuilder.setLog(result.getLogValue()); - } else { - byte[] queryData = req.getData().toByteArray(); - - final com.app.example.QueryResult result = generateQueryResult(queryData); + ResponseQuery requestQuery(RequestQuery req) { + final boolean isProveQuery = req.getProve(); + final ResponseQuery.Builder responseBuilder = ResponseQuery.newBuilder(); + byte[] queryData = req.getData().toByteArray(); + + if (isProveQuery) { + com.app.example.QueryResultWithProof result = generateQueryResultWithProof(queryData); + responseBuilder.setIndex(result.getLeftIndex()); + responseBuilder.setKey(req.getData()); + responseBuilder.setValue(result.getValueOrNull(0)); + responseBuilder.setHeight(result.getHeight()); + responseBuilder.setProof(result.getProof()); + responseBuilder.setLog(result.getLogValue()); + } else { + com.app.example.QueryResult result = generateQueryResult(queryData); + responseBuilder.setIndex(result.getIndex()); + responseBuilder.setValue(result.getValue()); + responseBuilder.setLog(result.getLogValue()); + } responseBuilder.setIndex(result.getIndex()); responseBuilder.setValue(ByteString.copyFrom(result.getValue())); @@ -502,7 +439,7 @@ In go: ``` func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) { - return types.ResponseInfo{Data: cmn.Fmt("{\"size\":%v}", app.state.Size())} + return types.ResponseInfo{Data: fmt.Sprintf("{\"size\":%v}", app.state.Size())} } ``` diff --git a/docs/app-dev/ecosystem.json b/docs/app-dev/ecosystem.json index 363f1890287..1c2bc2b250c 100644 --- a/docs/app-dev/ecosystem.json +++ b/docs/app-dev/ecosystem.json @@ -5,24 +5,21 @@ "url": "https://github.com/cosmos/cosmos-sdk", "language": "Go", "author": "Cosmos", - "description": - "A prototypical account based crypto currency state machine supporting plugins" + "description": "A prototypical account based crypto currency state machine supporting plugins" }, { "name": "cb-ledger", "url": "https://github.com/block-finance/cpp-abci", "language": "C++", "author": "Block Finance", - "description": - "Custodian Bank Ledger, integrating central banking with the blockchains of tomorrow" + "description": "Custodian Bank Ledger, integrating central banking with the blockchains of tomorrow" }, { "name": "Clearchain", "url": "https://github.com/tendermint/clearchain", "language": "Go", "author": "FXCLR", - "description": - "Application to manage a distributed ledger for money transfers that support multi-currency accounts" + "description": "Application to manage a distributed ledger for money transfers that support multi-currency accounts" }, { "name": "Ethermint", @@ -43,8 +40,7 @@ "url": "https://github.com/hyperledger/burrow", "language": "Go", "author": "Monax Industries", - "description": - "Ethereum Virtual Machine augmented with native permissioning scheme and global key-value store" + "description": "Ethereum Virtual Machine augmented with native permissioning scheme and global key-value store" }, { "name": "Merkle AVL Tree", @@ -72,8 +68,7 @@ "url": "https://github.com/trusch/passchain", "language": "Go", "author": "trusch", - "description": - "Tool to securely store and share passwords, tokens and other short secrets" + "description": "Tool to securely store and share passwords, tokens and other short secrets" }, { "name": "Passwerk", @@ -87,8 +82,7 @@ "url": "https://github.com/davebryson/py-tendermint", "language": "Python", "author": "Dave Bryson", - "description": - "A Python microframework for building blockchain applications with Tendermint" + "description": "A Python microframework for building blockchain applications with Tendermint" }, { "name": "Stratumn SDK", @@ -102,16 +96,14 @@ "url": "https://github.com/keppel/lotion", "language": "Javascript", "author": "Judd Keppel", - "description": - "A Javascript microframework for building blockchain applications with Tendermint" + "description": "A Javascript microframework for building blockchain applications with Tendermint" }, { "name": "Tendermint Blockchain Chat App", "url": "https://github.com/SaifRehman/tendermint-chat-app/", "language": "Javascript", "author": "Saif Rehman", - "description": - "This is a minimal chat application based on Tendermint using Lotion.js in 30 lines of code!. It also includes web/mobile application built using Ionic 3." + "description": "This is a minimal chat application based on Tendermint using Lotion.js in 30 lines of code!. It also includes web/mobile application built using Ionic 3." }, { "name": "BigchainDB", @@ -131,7 +123,7 @@ "abciServers": [ { "name": "abci", - "url": "https://github.com/tendermint/abci", + "url": "https://github.com/tendermint/tendermint/tree/master/abci", "language": "Go", "author": "Tendermint" }, @@ -171,6 +163,12 @@ "language": "Python", "author": "Dave Bryson" }, + { + "name": "tm-abci", + "url": "https://github.com/SoftblocksCo/tm-abci", + "language": "Python", + "author": "Softblocks" + }, { "name": "Spearmint", "url": "https://github.com/dennismckinnon/spearmint", @@ -184,16 +182,14 @@ "url": "https://github.com/tendermint/tools", "technology": "Docker and Kubernetes", "author": "Tendermint", - "description": - "Deploy a Tendermint test network using Google's kubernetes" + "description": "Deploy a Tendermint test network using Google's kubernetes" }, { "name": "terraforce", "url": "https://github.com/tendermint/tools", "technology": "Terraform", "author": "Tendermint", - "description": - "Terraform + our custom terraforce tool; deploy a production Tendermint network with load balancing over multiple AWS availability zones" + "description": "Terraform + our custom terraforce tool; deploy a production Tendermint network with load balancing over multiple AWS availability zones" }, { "name": "ansible-tendermint", diff --git a/docs/app-dev/getting-started.md b/docs/app-dev/getting-started.md index 40820bea7ac..14aa0dc3066 100644 --- a/docs/app-dev/getting-started.md +++ b/docs/app-dev/getting-started.md @@ -7,8 +7,7 @@ application you want to run. So, to run a complete blockchain that does something useful, you must start two programs: one is Tendermint Core, the other is your application, which can be written in any programming language. Recall from [the intro to -ABCI](./introduction.md#ABCI-Overview) that Tendermint Core handles all -the p2p and consensus stuff, and just forwards transactions to the +ABCI](../introduction/what-is-tendermint.md#abci-overview) that Tendermint Core handles all the p2p and consensus stuff, and just forwards transactions to the application when they need to be validated, or when they're ready to be committed to a block. @@ -64,7 +63,7 @@ tendermint node If you have used Tendermint, you may want to reset the data for a new blockchain by running `tendermint unsafe_reset_all`. Then you can run `tendermint node` to start Tendermint, and connect to the app. For more -details, see [the guide on using Tendermint](./using-tendermint.md). +details, see [the guide on using Tendermint](../tendermint-core/using-tendermint.md). You should see Tendermint making blocks! We can get the status of our Tendermint node as follows: @@ -244,7 +243,7 @@ But if we send a `1`, it works again: ``` For more details on the `broadcast_tx` API, see [the guide on using -Tendermint](./using-tendermint.md). +Tendermint](../tendermint-core/using-tendermint.md). ## CounterJS - Example in Another Language diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md index 3bca109591f..61c959ca470 100644 --- a/docs/app-dev/indexing-transactions.md +++ b/docs/app-dev/indexing-transactions.md @@ -12,20 +12,25 @@ Let's take a look at the `[tx_index]` config section: # What indexer to use for transactions # # Options: -# 1) "null" (default) -# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). +# 1) "null" +# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). indexer = "kv" -# Comma-separated list of tags to index (by default the only tag is tx hash) +# Comma-separated list of tags to index (by default the only tag is "tx.hash") +# +# You can also index transactions by height by adding "tx.height" tag here. # # It's recommended to index only a subset of tags due to possible memory # bloat. This is, of course, depends on the indexer's DB and the volume of # transactions. index_tags = "" -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). +# When set to true, tells indexer to index all tags (predefined tags: +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# +# Note this may be not desirable (see the comment above). IndexTags has a +# precedence over IndexAllTags (i.e. when given both, IndexTags will be +# indexed). index_all_tags = false ``` @@ -59,7 +64,6 @@ all tags, set `index_all_tags=true` Note, there are a few predefined tags: -- `tm.event` (event type) - `tx.hash` (transaction's hash) - `tx.height` (height of the block transaction was committed in) diff --git a/docs/app-dev/subscribing-to-events-via-websocket.md b/docs/app-dev/subscribing-to-events-via-websocket.md index 9e7c642a020..49954809409 100644 --- a/docs/app-dev/subscribing-to-events-via-websocket.md +++ b/docs/app-dev/subscribing-to-events-via-websocket.md @@ -20,9 +20,45 @@ method via Websocket. } ``` -Check out [API docs](https://tendermint.github.io/slate/#subscribe) for +Check out [API docs](https://tendermint.com/rpc/) for more information on query syntax and other options. You can also use tags, given you had included them into DeliverTx response, to query transaction results. See [Indexing transactions](./indexing-transactions.md) for details. + +### ValidatorSetUpdates + +When validator set changes, ValidatorSetUpdates event is published. The +event carries a list of pubkey/power pairs. The list is the same +Tendermint receives from ABCI application (see [EndBlock +section](../spec/abci/abci.md#endblock) in +the ABCI spec). + +Response: + +``` +{ + "jsonrpc": "2.0", + "id": "0#event", + "result": { + "query": "tm.event='ValidatorSetUpdates'", + "data": { + "type": "tendermint/event/ValidatorSetUpdates", + "value": { + "validator_updates": [ + { + "address": "09EAD022FD25DE3A02E64B0FE9610B1417183EE4", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "ww0z4WaZ0Xg+YI10w43wTWbBmM3dpVza4mmSQYsd0ck=" + }, + "voting_power": "10", + "accum": "0" + } + ] + } + } + } +} +``` diff --git a/docs/architecture/adr-001-logging.md b/docs/architecture/adr-001-logging.md index a11a49e14b3..77e5d39a808 100644 --- a/docs/architecture/adr-001-logging.md +++ b/docs/architecture/adr-001-logging.md @@ -52,13 +52,13 @@ On top of this interface, we will need to implement a stdout logger, which will Many people say that they like the current output, so let's stick with it. ``` -NOTE[04-25|14:45:08] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 +NOTE[2017-04-25|14:45:08] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 ``` Couple of minor changes: ``` -I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 +I[2017-04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 ``` Notice the level is encoded using only one char plus milliseconds. @@ -155,14 +155,14 @@ Important keyvals should go first. Example: ``` correct -I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus instance=1 appHeight=0 storeHeight=0 stateHeight=0 +I[2017-04-25|14:45:08.322] ABCI Replay Blocks module=consensus instance=1 appHeight=0 storeHeight=0 stateHeight=0 ``` not ``` wrong -I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 instance=1 +I[2017-04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 instance=1 ``` for that in most cases you'll need to add `instance` field to a logger upon creating, not when u log a particular message: diff --git a/docs/architecture/adr-002-event-subscription.md b/docs/architecture/adr-002-event-subscription.md index cc207c4af38..a73d584abc9 100644 --- a/docs/architecture/adr-002-event-subscription.md +++ b/docs/architecture/adr-002-event-subscription.md @@ -7,8 +7,7 @@ a subset of transactions** (rather than all of them) using `/subscribe?event=X`. example, I want to subscribe for all transactions associated with a particular account. Same for fetching. The user may want to **fetch transactions based on some filter** (rather than fetching all the blocks). For example, I want to get -all transactions for a particular account in the last two weeks (`tx's block -time >= '2017-06-05'`). +all transactions for a particular account in the last two weeks (`tx's block time >= '2017-06-05'`). Now you can't even subscribe to "all txs" in Tendermint. diff --git a/docs/architecture/adr-004-historical-validators.md b/docs/architecture/adr-004-historical-validators.md index be0de22c150..c98af702696 100644 --- a/docs/architecture/adr-004-historical-validators.md +++ b/docs/architecture/adr-004-historical-validators.md @@ -3,11 +3,11 @@ ## Context Right now, we can query the present validator set, but there is no history. -If you were offline for a long time, there is no way to reconstruct past validators. This is needed for the light client and we agreed needs enhancement of the API. +If you were offline for a long time, there is no way to reconstruct past validators. This is needed for the light client and we agreed needs enhancement of the API. ## Decision -For every block, store a new structure that contains either the latest validator set, +For every block, store a new structure that contains either the latest validator set, or the height of the last block for which the validator set changed. Note this is not the height of the block which returned the validator set change itself, but the next block, ie. the first block it comes into effect for. @@ -19,7 +19,7 @@ are updated frequently - for instance by only saving the diffs, rather than the An alternative approach suggested keeping the validator set, or diffs of it, in a merkle IAVL tree. While it might afford cheaper proofs that a validator set has not changed, it would be more complex, -and likely less efficient. +and likely less efficient. ## Status diff --git a/docs/architecture/adr-005-consensus-params.md b/docs/architecture/adr-005-consensus-params.md index 6656d35b2ab..ad132c9b983 100644 --- a/docs/architecture/adr-005-consensus-params.md +++ b/docs/architecture/adr-005-consensus-params.md @@ -7,7 +7,7 @@ Since they may be need to be different in different networks, and potentially to networks, we seek to initialize them in a genesis file, and expose them through the ABCI. While we have some specific parameters now, like maximum block and transaction size, we expect to have more in the future, -such as a period over which evidence is valid, or the frequency of checkpoints. +such as a period over which evidence is valid, or the frequency of checkpoints. ## Decision @@ -45,7 +45,7 @@ type BlockGossip struct { The `ConsensusParams` can evolve over time by adding new structs that cover different aspects of the consensus rules. -The `BlockPartSizeBytes` and the `BlockSize.MaxBytes` are enforced to be greater than 0. +The `BlockPartSizeBytes` and the `BlockSize.MaxBytes` are enforced to be greater than 0. The former because we need a part size, the latter so that we always have at least some sanity check over the size of blocks. ### ABCI @@ -53,14 +53,14 @@ The former because we need a part size, the latter so that we always have at lea #### InitChain InitChain currently takes the initial validator set. It should be extended to also take parts of the ConsensusParams. -There is some case to be made for it to take the entire Genesis, except there may be things in the genesis, +There is some case to be made for it to take the entire Genesis, except there may be things in the genesis, like the BlockPartSize, that the app shouldn't really know about. #### EndBlock The EndBlock response includes a `ConsensusParams`, which includes BlockSize and TxSize, but not BlockGossip. Other param struct can be added to `ConsensusParams` in the future. -The `0` value is used to denote no change. +The `0` value is used to denote no change. Any other value will update that parameter in the `State.ConsensusParams`, to be applied for the next block. Tendermint should have hard-coded upper limits as sanity checks. @@ -83,4 +83,3 @@ Proposed. ### Neutral - The TxSize, which checks validity, may be in conflict with the config's `max_block_size_tx`, which determines proposal sizes - diff --git a/docs/architecture/adr-006-trust-metric.md b/docs/architecture/adr-006-trust-metric.md index ec8a0cce76f..6fa77a609b2 100644 --- a/docs/architecture/adr-006-trust-metric.md +++ b/docs/architecture/adr-006-trust-metric.md @@ -8,13 +8,13 @@ The proposed trust metric will allow Tendermint to maintain local trust rankings The Tendermint Core project developers would like to improve Tendermint security and reliability by keeping track of the level of trustworthiness peers have demonstrated within the peer-to-peer network. This way, undesirable outcomes from peers will not immediately result in them being dropped from the network (potentially causing drastic changes to take place). Instead, peers behavior can be monitored with appropriate metrics and be removed from the network once Tendermint Core is certain the peer is a threat. For example, when the PEXReactor makes a request for peers network addresses from a already known peer, and the returned network addresses are unreachable, this untrustworthy behavior should be tracked. Returning a few bad network addresses probably shouldn’t cause a peer to be dropped, while excessive amounts of this behavior does qualify the peer being dropped. -Trust metrics can be circumvented by malicious nodes through the use of strategic oscillation techniques, which adapts the malicious node’s behavior pattern in order to maximize its goals. For instance, if the malicious node learns that the time interval of the Tendermint trust metric is *X* hours, then it could wait *X* hours in-between malicious activities. We could try to combat this issue by increasing the interval length, yet this will make the system less adaptive to recent events. +Trust metrics can be circumvented by malicious nodes through the use of strategic oscillation techniques, which adapts the malicious node’s behavior pattern in order to maximize its goals. For instance, if the malicious node learns that the time interval of the Tendermint trust metric is _X_ hours, then it could wait _X_ hours in-between malicious activities. We could try to combat this issue by increasing the interval length, yet this will make the system less adaptive to recent events. Instead, having shorter intervals, but keeping a history of interval values, will give our metric the flexibility needed in order to keep the network stable, while also making it resilient against a strategic malicious node in the Tendermint peer-to-peer network. Also, the metric can access trust data over a rather long period of time while not greatly increasing its history size by aggregating older history values over a larger number of intervals, and at the same time, maintain great precision for the recent intervals. This approach is referred to as fading memories, and closely resembles the way human beings remember their experiences. The trade-off to using history data is that the interval values should be preserved in-between executions of the node. ### References -S. Mudhakar, L. Xiong, and L. Liu, “TrustGuard: Countering Vulnerabilities in Reputation Management for Decentralized Overlay Networks,” in *Proceedings of the 14th international conference on World Wide Web, pp. 422-431*, May 2005. +S. Mudhakar, L. Xiong, and L. Liu, “TrustGuard: Countering Vulnerabilities in Reputation Management for Decentralized Overlay Networks,” in _Proceedings of the 14th international conference on World Wide Web, pp. 422-431_, May 2005. ## Decision @@ -26,25 +26,23 @@ The three subsections below will cover the process being considered for calculat The proposed trust metric will count good and bad events relevant to the object, and calculate the percent of counters that are good over an interval with a predefined duration. This is the procedure that will continue for the life of the trust metric. When the trust metric is queried for the current **trust value**, a resilient equation will be utilized to perform the calculation. -The equation being proposed resembles a Proportional-Integral-Derivative (PID) controller used in control systems. The proportional component allows us to be sensitive to the value of the most recent interval, while the integral component allows us to incorporate trust values stored in the history data, and the derivative component allows us to give weight to sudden changes in the behavior of a peer. We compute the trust value of a peer in interval i based on its current trust ranking, its trust rating history prior to interval *i* (over the past *maxH* number of intervals) and its trust ranking fluctuation. We will break up the equation into the three components. +The equation being proposed resembles a Proportional-Integral-Derivative (PID) controller used in control systems. The proportional component allows us to be sensitive to the value of the most recent interval, while the integral component allows us to incorporate trust values stored in the history data, and the derivative component allows us to give weight to sudden changes in the behavior of a peer. We compute the trust value of a peer in interval i based on its current trust ranking, its trust rating history prior to interval _i_ (over the past _maxH_ number of intervals) and its trust ranking fluctuation. We will break up the equation into the three components. ```math (1) Proportional Value = a * R[i] ``` -where *R*[*i*] denotes the raw trust value at time interval *i* (where *i* == 0 being current time) and *a* is the weight applied to the contribution of the current reports. The next component of our equation uses a weighted sum over the last *maxH* intervals to calculate the history value for time *i*: - +where _R_[*i*] denotes the raw trust value at time interval _i_ (where _i_ == 0 being current time) and _a_ is the weight applied to the contribution of the current reports. The next component of our equation uses a weighted sum over the last _maxH_ intervals to calculate the history value for time _i_: -`H[i] = ` ![formula1](img/formula1.png "Weighted Sum Formula") +`H[i] =` ![formula1](img/formula1.png "Weighted Sum Formula") - -The weights can be chosen either optimistically or pessimistically. An optimistic weight creates larger weights for newer history data values, while the the pessimistic weight creates larger weights for time intervals with lower scores. The default weights used during the calculation of the history value are optimistic and calculated as *Wk* = 0.8^*k*, for time interval *k*. With the history value available, we can now finish calculating the integral value: +The weights can be chosen either optimistically or pessimistically. An optimistic weight creates larger weights for newer history data values, while the the pessimistic weight creates larger weights for time intervals with lower scores. The default weights used during the calculation of the history value are optimistic and calculated as _Wk_ = 0.8^_k_, for time interval _k_. With the history value available, we can now finish calculating the integral value: ```math (2) Integral Value = b * H[i] ``` -Where *H*[*i*] denotes the history value at time interval *i* and *b* is the weight applied to the contribution of past performance for the object being measured. The derivative component will be calculated as follows: +Where _H_[*i*] denotes the history value at time interval _i_ and _b_ is the weight applied to the contribution of past performance for the object being measured. The derivative component will be calculated as follows: ```math D[i] = R[i] – H[i] @@ -52,25 +50,25 @@ D[i] = R[i] – H[i] (3) Derivative Value = c(D[i]) * D[i] ``` -Where the value of *c* is selected based on the *D*[*i*] value relative to zero. The default selection process makes *c* equal to 0 unless *D*[*i*] is a negative value, in which case c is equal to 1. The result is that the maximum penalty is applied when current behavior is lower than previously experienced behavior. If the current behavior is better than the previously experienced behavior, then the Derivative Value has no impact on the trust value. With the three components brought together, our trust value equation is calculated as follows: +Where the value of _c_ is selected based on the _D_[*i*] value relative to zero. The default selection process makes _c_ equal to 0 unless _D_[*i*] is a negative value, in which case c is equal to 1. The result is that the maximum penalty is applied when current behavior is lower than previously experienced behavior. If the current behavior is better than the previously experienced behavior, then the Derivative Value has no impact on the trust value. With the three components brought together, our trust value equation is calculated as follows: ```math TrustValue[i] = a * R[i] + b * H[i] + c(D[i]) * D[i] ``` -As a performance optimization that will keep the amount of raw interval data being saved to a reasonable size of *m*, while allowing us to represent 2^*m* - 1 history intervals, we can employ the fading memories technique that will trade space and time complexity for the precision of the history data values by summarizing larger quantities of less recent values. While our equation above attempts to access up to *maxH* (which can be 2^*m* - 1), we will map those requests down to *m* values using equation 4 below: +As a performance optimization that will keep the amount of raw interval data being saved to a reasonable size of _m_, while allowing us to represent 2^_m_ - 1 history intervals, we can employ the fading memories technique that will trade space and time complexity for the precision of the history data values by summarizing larger quantities of less recent values. While our equation above attempts to access up to _maxH_ (which can be 2^_m_ - 1), we will map those requests down to _m_ values using equation 4 below: ```math (4) j = index, where index > 0 ``` -Where *j* is one of *(0, 1, 2, … , m – 1)* indices used to access history interval data. Now we can access the raw intervals using the following calculations: +Where _j_ is one of _(0, 1, 2, … , m – 1)_ indices used to access history interval data. Now we can access the raw intervals using the following calculations: ```math R[0] = raw data for current time interval ``` -`R[j] = ` ![formula2](img/formula2.png "Fading Memories Formula") +`R[j] =` ![formula2](img/formula2.png "Fading Memories Formula") ### Trust Metric Store @@ -84,9 +82,7 @@ When the node is shutting down, the trust metric store will save history data fo Each trust metric allows for the recording of positive/negative events, querying the current trust value/score, and the stopping/pausing of tracking over time intervals. This can be seen below: - ```go - // TrustMetric - keeps track of peer reliability type TrustMetric struct { // Private elements. @@ -123,13 +119,11 @@ tm.BadEvents(1) score := tm.TrustScore() tm.Stop() - ``` Some of the trust metric parameters can be configured. The weight values should probably be left alone in more cases, yet the time durations for the tracking window and individual time interval should be considered. ```go - // TrustMetricConfig - Configures the weight functions and time intervals for the metric type TrustMetricConfig struct { // Determines the percentage given to current behavior @@ -165,23 +159,21 @@ config := TrustMetricConfig{ tm := NewMetricWithConfig(config) tm.BadEvents(10) -tm.Pause() +tm.Pause() tm.GoodEvents(1) // becomes active again - ``` -A trust metric store should be created with a DB that has persistent storage so it can save history data across node executions. All trust metrics instantiated by the store will be created with the provided TrustMetricConfig configuration. +A trust metric store should be created with a DB that has persistent storage so it can save history data across node executions. All trust metrics instantiated by the store will be created with the provided TrustMetricConfig configuration. When you attempt to fetch the trust metric for a peer, and an entry does not exist in the trust metric store, a new metric is automatically created and the entry made within the store. In additional to the fetching method, GetPeerTrustMetric, the trust metric store provides a method to call when a peer has disconnected from the node. This is so the metric can be paused (history data will not be saved) for periods of time when the node is not having direct experiences with the peer. ```go - // TrustMetricStore - Manages all trust metrics for peers type TrustMetricStore struct { cmn.BaseService - + // Private elements } @@ -214,7 +206,6 @@ tm := tms.GetPeerTrustMetric(key) tm.BadEvents(1) tms.PeerDisconnected(key) - ``` ## Status diff --git a/docs/architecture/adr-007-trust-metric-usage.md b/docs/architecture/adr-007-trust-metric-usage.md index 4d833a69ff8..de3a088cb04 100644 --- a/docs/architecture/adr-007-trust-metric-usage.md +++ b/docs/architecture/adr-007-trust-metric-usage.md @@ -17,11 +17,13 @@ For example, when the PEXReactor makes a request for peers network addresses fro The trust metric implementation allows a developer to obtain a peer's trust metric from a trust metric store, and track good and bad events relevant to a peer's behavior, and at any time, the peer's metric can be queried for a current trust value. The current trust value is calculated with a formula that utilizes current behavior, previous behavior, and change between the two. Current behavior is calculated as the percentage of good behavior within a time interval. The time interval is short; probably set between 30 seconds and 5 minutes. On the other hand, the historic data can estimate a peer's behavior over days worth of tracking. At the end of a time interval, the current behavior becomes part of the historic data, and a new time interval begins with the good and bad counters reset to zero. These are some important things to keep in mind regarding how the trust metrics handle time intervals and scoring: + - Each new time interval begins with a perfect score - Bad events quickly bring the score down and good events cause the score to slowly rise - When the time interval is over, the percentage of good events becomes historic data. Some useful information about the inner workings of the trust metric: + - When a trust metric is first instantiated, a timer (ticker) periodically fires in order to handle transitions between trust metric time intervals - If a peer is disconnected from a node, the timer should be paused, since the node is no longer connected to that peer - The ability to pause the metric is supported with the store **PeerDisconnected** method and the metric **Pause** method @@ -76,6 +78,7 @@ Peer quality is tracked in the connection and across the reactors by storing the thread safe Data store. Peer behaviour is then defined as one of the following: + - Fatal - something outright malicious that causes us to disconnect the peer and ban it from the address book for some amount of time - Bad - Any kind of timeout, messages that don't unmarshal, fail other validity checks, or messages we didn't ask for or aren't expecting (usually worth one bad event) - Neutral - Unknown channels/message types/version upgrades (no good or bad events recorded) diff --git a/docs/architecture/adr-008-priv-validator.md b/docs/architecture/adr-008-priv-validator.md index 4c1d87bed6d..94e882af418 100644 --- a/docs/architecture/adr-008-priv-validator.md +++ b/docs/architecture/adr-008-priv-validator.md @@ -11,18 +11,18 @@ implementations: The SocketPV address can be provided via flags at the command line - doing so will cause Tendermint to ignore any "priv_validator.json" file and to listen on the given address for incoming connections from an external priv_validator -process. It will halt any operation until at least one external process +process. It will halt any operation until at least one external process succesfully connected. The external priv_validator process will dial the address to connect to Tendermint, and then Tendermint will send requests on the ensuing connection to -sign votes and proposals. Thus the external process initiates the connection, -but the Tendermint process makes all requests. In a later stage we're going to +sign votes and proposals. Thus the external process initiates the connection, +but the Tendermint process makes all requests. In a later stage we're going to support multiple validators for fault tolerance. To prevent double signing they need to be synced, which is deferred to an external solution (see #1185). In addition, Tendermint will provide implementations that can be run in that -external process. These include: +external process. These include: - FilePV will encrypt the private key, and the user must enter password to decrypt key when process is started. diff --git a/docs/architecture/adr-009-ABCI-design.md b/docs/architecture/adr-009-ABCI-design.md index 8b85679b883..fab28853b80 100644 --- a/docs/architecture/adr-009-ABCI-design.md +++ b/docs/architecture/adr-009-ABCI-design.md @@ -8,7 +8,7 @@ ## Context -The ABCI was first introduced in late 2015. It's purpose is to be: +The ABCI was first introduced in late 2015. It's purpose is to be: - a generic interface between state machines and their replication engines - agnostic to the language the state machine is written in @@ -66,8 +66,8 @@ possible. ### Validators To change the validator set, applications can return a list of validator updates -with ResponseEndBlock. In these updates, the public key *must* be included, -because Tendermint requires the public key to verify validator signatures. This +with ResponseEndBlock. In these updates, the public key _must_ be included, +because Tendermint requires the public key to verify validator signatures. This means ABCI developers have to work with PubKeys. That said, it would also be convenient to work with address information, and for it to be simple to do so. @@ -80,7 +80,7 @@ in commits. ### InitChain -Tendermint passes in a list of validators here, and nothing else. It would +Tendermint passes in a list of validators here, and nothing else. It would benefit the application to be able to control the initial validator set. For instance the genesis file could include application-based information about the initial validator set that the application could process to determine the @@ -120,7 +120,6 @@ v1 will: That said, an Amino v2 will be worked on to improve the performance of the format and its useability in cryptographic applications. - ### PubKey Encoding schemes infect software. As a generic middleware, ABCI aims to have @@ -143,7 +142,6 @@ where `type` can be: - "ed225519", with `data = ` - "secp256k1", with `data = <33-byte OpenSSL compressed pubkey>` - As we want to retain flexibility here, and since ideally, PubKey would be an interface type, we do not use `enum` or `oneof`. diff --git a/docs/architecture/adr-010-crypto-changes.md b/docs/architecture/adr-010-crypto-changes.md index cfe6184210d..0bc07d69cae 100644 --- a/docs/architecture/adr-010-crypto-changes.md +++ b/docs/architecture/adr-010-crypto-changes.md @@ -66,13 +66,10 @@ Make the following changes: - More modern and standard cryptographic functions with wider adoption and hardware acceleration - ### Negative - Exact authenticated encryption construction isn't already provided in a well-used library - ### Neutral ## References - diff --git a/docs/architecture/adr-011-monitoring.md b/docs/architecture/adr-011-monitoring.md index ca16a9a1c05..8f2d009dd62 100644 --- a/docs/architecture/adr-011-monitoring.md +++ b/docs/architecture/adr-011-monitoring.md @@ -15,11 +15,11 @@ https://github.com/tendermint/tendermint/issues/986. A few solutions were considered: 1. [Prometheus](https://prometheus.io) - a) Prometheus API - b) [go-kit metrics package](https://github.com/go-kit/kit/tree/master/metrics) as an interface plus Prometheus - c) [telegraf](https://github.com/influxdata/telegraf) - d) new service, which will listen to events emitted by pubsub and report metrics -5. [OpenCensus](https://opencensus.io/go/index.html) + a) Prometheus API + b) [go-kit metrics package](https://github.com/go-kit/kit/tree/master/metrics) as an interface plus Prometheus + c) [telegraf](https://github.com/influxdata/telegraf) + d) new service, which will listen to events emitted by pubsub and report metrics +2. [OpenCensus](https://opencensus.io/go/index.html) ### 1. Prometheus @@ -70,30 +70,30 @@ will need to write interfaces ourselves. ### List of metrics -| | Name | Type | Description | -| - | --------------------------------------- | ------- | ----------------------------------------------------------------------------- | -| A | consensus_height | Gauge | | -| A | consensus_validators | Gauge | Number of validators who signed | -| A | consensus_validators_power | Gauge | Total voting power of all validators | -| A | consensus_missing_validators | Gauge | Number of validators who did not sign | -| A | consensus_missing_validators_power | Gauge | Total voting power of the missing validators | -| A | consensus_byzantine_validators | Gauge | Number of validators who tried to double sign | -| A | consensus_byzantine_validators_power | Gauge | Total voting power of the byzantine validators | -| A | consensus_block_interval | Timing | Time between this and last block (Block.Header.Time) | -| | consensus_block_time | Timing | Time to create a block (from creating a proposal to commit) | -| | consensus_time_between_blocks | Timing | Time between committing last block and (receiving proposal creating proposal) | -| A | consensus_rounds | Gauge | Number of rounds | -| | consensus_prevotes | Gauge | | -| | consensus_precommits | Gauge | | -| | consensus_prevotes_total_power | Gauge | | -| | consensus_precommits_total_power | Gauge | | -| A | consensus_num_txs | Gauge | | -| A | mempool_size | Gauge | | -| A | consensus_total_txs | Gauge | | -| A | consensus_block_size | Gauge | In bytes | -| A | p2p_peers | Gauge | Number of peers node's connected to | - -`A` - will be implemented in the fist place. +| | Name | Type | Description | +| --- | ------------------------------------ | ------ | ----------------------------------------------------------------------------- | +| A | consensus_height | Gauge | | +| A | consensus_validators | Gauge | Number of validators who signed | +| A | consensus_validators_power | Gauge | Total voting power of all validators | +| A | consensus_missing_validators | Gauge | Number of validators who did not sign | +| A | consensus_missing_validators_power | Gauge | Total voting power of the missing validators | +| A | consensus_byzantine_validators | Gauge | Number of validators who tried to double sign | +| A | consensus_byzantine_validators_power | Gauge | Total voting power of the byzantine validators | +| A | consensus_block_interval | Timing | Time between this and last block (Block.Header.Time) | +| | consensus_block_time | Timing | Time to create a block (from creating a proposal to commit) | +| | consensus_time_between_blocks | Timing | Time between committing last block and (receiving proposal creating proposal) | +| A | consensus_rounds | Gauge | Number of rounds | +| | consensus_prevotes | Gauge | | +| | consensus_precommits | Gauge | | +| | consensus_prevotes_total_power | Gauge | | +| | consensus_precommits_total_power | Gauge | | +| A | consensus_num_txs | Gauge | | +| A | mempool_size | Gauge | | +| A | consensus_total_txs | Gauge | | +| A | consensus_block_size | Gauge | In bytes | +| A | p2p_peers | Gauge | Number of peers node's connected to | + +`A` - will be implemented in the fist place. **Proposed solution** diff --git a/docs/architecture/adr-012-peer-transport.md b/docs/architecture/adr-012-peer-transport.md index 01a79c8a9bd..1cf4fb80b81 100644 --- a/docs/architecture/adr-012-peer-transport.md +++ b/docs/architecture/adr-012-peer-transport.md @@ -9,8 +9,9 @@ handling. An artifact is the dependency of the Switch on `[config.P2PConfig`](https://github.com/tendermint/tendermint/blob/05a76fb517f50da27b4bfcdc7b4cf185fc61eff6/config/config.go#L272-L339). Addresses: -* [#2046](https://github.com/tendermint/tendermint/issues/2046) -* [#2047](https://github.com/tendermint/tendermint/issues/2047) + +- [#2046](https://github.com/tendermint/tendermint/issues/2046) +- [#2047](https://github.com/tendermint/tendermint/issues/2047) First iteraton in [#2067](https://github.com/tendermint/tendermint/issues/2067) @@ -29,15 +30,14 @@ transport implementation is responsible to filter establishing peers specific to its domain, for the default multiplexed implementation the following will apply: -* connections from our own node -* handshake fails -* upgrade to secret connection fails -* prevent duplicate ip -* prevent duplicate id -* nodeinfo incompatibility - +- connections from our own node +- handshake fails +- upgrade to secret connection fails +- prevent duplicate ip +- prevent duplicate id +- nodeinfo incompatibility -``` go +```go // PeerTransport proxies incoming and outgoing peer connections. type PeerTransport interface { // Accept returns a newly connected Peer. @@ -75,7 +75,7 @@ func NewMTransport( nodeAddr NetAddress, nodeInfo NodeInfo, nodeKey NodeKey, -) *multiplexTransport +) *multiplexTransport ``` ### Switch @@ -84,7 +84,7 @@ From now the Switch will depend on a fully setup `PeerTransport` to retrieve/reach out to its peers. As the more low-level concerns are pushed to the transport, we can omit passing the `config.P2PConfig` to the Switch. -``` go +```go func NewSwitch(transport PeerTransport, opts ...SwitchOption) *Switch ``` @@ -96,17 +96,17 @@ In Review. ### Positive -* free Switch from transport concerns - simpler implementation -* pluggable transport implementation - simpler test setup -* remove Switch dependency on P2PConfig - easier to test +- free Switch from transport concerns - simpler implementation +- pluggable transport implementation - simpler test setup +- remove Switch dependency on P2PConfig - easier to test ### Negative -* more setup for tests which depend on Switches +- more setup for tests which depend on Switches ### Neutral -* multiplexed will be the default implementation +- multiplexed will be the default implementation [0] These guards could be potentially extended to be pluggable much like middlewares to express different concerns required by differentally configured diff --git a/docs/architecture/adr-013-symmetric-crypto.md b/docs/architecture/adr-013-symmetric-crypto.md index 00442ab0d27..69bfc2f2901 100644 --- a/docs/architecture/adr-013-symmetric-crypto.md +++ b/docs/architecture/adr-013-symmetric-crypto.md @@ -14,22 +14,23 @@ to easily swap these out. ### How do we encrypt with AEAD's -AEAD's typically require a nonce in addition to the key. +AEAD's typically require a nonce in addition to the key. For the purposes we require symmetric cryptography for, we need encryption to be stateless. -Because of this we use random nonces. +Because of this we use random nonces. (Thus the AEAD must support random nonces) -We currently construct a random nonce, and encrypt the data with it. +We currently construct a random nonce, and encrypt the data with it. The returned value is `nonce || encrypted data`. The limitation of this is that does not provide a way to identify which algorithm was used in encryption. -Consequently decryption with multiple algoritms is sub-optimal. +Consequently decryption with multiple algoritms is sub-optimal. (You have to try them all) ## Decision -We should create the following two methods in a new `crypto/encoding/symmetric` package: +We should create the following two methods in a new `crypto/encoding/symmetric` package: + ```golang func Encrypt(aead cipher.AEAD, plaintext []byte) (ciphertext []byte, err error) func Decrypt(key []byte, ciphertext []byte) (plaintext []byte, err error) @@ -37,18 +38,19 @@ func Register(aead cipher.AEAD, algo_name string, NewAead func(key []byte) (ciph ``` This allows you to specify the algorithm in encryption, but not have to specify -it in decryption. +it in decryption. This is intended for ease of use in downstream applications, in addition to people looking at the file directly. One downside is that for the encrypt function you must have already initialized an AEAD, -but I don't really see this as an issue. +but I don't really see this as an issue. -If there is no error in encryption, Encrypt will return `algo_name || nonce || aead_ciphertext`. +If there is no error in encryption, Encrypt will return `algo_name || nonce || aead_ciphertext`. `algo_name` should be length prefixed, using standard varuint encoding. This will be binary data, but thats not a problem considering the nonce and ciphertext are also binary. -This solution requires a mapping from aead type to name. -We can achieve this via reflection. +This solution requires a mapping from aead type to name. +We can achieve this via reflection. + ```golang func getType(myvar interface{}) string { if t := reflect.TypeOf(myvar); t.Kind() == reflect.Ptr { @@ -58,7 +60,8 @@ func getType(myvar interface{}) string { } } ``` -Then we maintain a map from the name returned from `getType(aead)` to `algo_name`. + +Then we maintain a map from the name returned from `getType(aead)` to `algo_name`. In decryption, we read the `algo_name`, and then instantiate a new AEAD with the key. Then we call the AEAD's decrypt method on the provided nonce/ciphertext. @@ -81,13 +84,16 @@ Proposed. ## Consequences ### Positive -* Allows us to support new AEAD's, in a way that makes decryption easier -* Allows downstream users to add their own AEAD + +- Allows us to support new AEAD's, in a way that makes decryption easier +- Allows downstream users to add their own AEAD ### Negative -* We will have to break all private keys stored on disk. -They can be recovered using seed words, and upgrade scripts are simple. + +- We will have to break all private keys stored on disk. + They can be recovered using seed words, and upgrade scripts are simple. ### Neutral -* Caller has to instantiate the AEAD with the private key. -However it forces them to be aware of what signing algorithm they are using, which is a positive. \ No newline at end of file + +- Caller has to instantiate the AEAD with the private key. + However it forces them to be aware of what signing algorithm they are using, which is a positive. diff --git a/docs/architecture/adr-014-secp-malleability.md b/docs/architecture/adr-014-secp-malleability.md index 3351056c61b..e6014c09b4d 100644 --- a/docs/architecture/adr-014-secp-malleability.md +++ b/docs/architecture/adr-014-secp-malleability.md @@ -22,21 +22,21 @@ Removing this second layer of signature malleability concerns could ease downstr ### ECDSA context Secp256k1 is ECDSA over a particular curve. -The signature is of the form `(r, s)`, where `s` is a field element. +The signature is of the form `(r, s)`, where `s` is a field element. (The particular field is the `Z_n`, where the elliptic curve has order `n`) However `(r, -s)` is also another valid solution. Note that anyone can negate a group element, and therefore can get this second signature. ## Decision -We can just distinguish a canonical form for the ECDSA signatures. +We can just distinguish a canonical form for the ECDSA signatures. Then we require that all ECDSA signatures be in the form which we defined as canonical. We reject signatures in non-canonical form. -A canonical form is rather easy to define and check. +A canonical form is rather easy to define and check. It would just be the smaller of the two values for `s`, defined lexicographically. This is a simple check, instead of checking if `s < n`, instead check `s <= (n - 1)/2`. -An example of another cryptosystem using this +An example of another cryptosystem using this is the parity definition here https://github.com/zkcrypto/pairing/pull/30#issuecomment-372910663. This is the same solution Ethereum has chosen for solving secp malleability. @@ -47,15 +47,17 @@ Fork https://github.com/btcsuite/btcd, and just update the [parse sig method](ht ## Status -Proposed. +Implemented ## Consequences ### Positive -* Lets us maintain the ability to expect a tx hash to appear in the blockchain. + +- Lets us maintain the ability to expect a tx hash to appear in the blockchain. ### Negative -* More work in all future implementations (Though this is a very simple check) -* Requires us to maintain another fork + +- More work in all future implementations (Though this is a very simple check) +- Requires us to maintain another fork ### Neutral diff --git a/docs/architecture/adr-015-crypto-encoding.md b/docs/architecture/adr-015-crypto-encoding.md index 67cce95f933..665129f12f3 100644 --- a/docs/architecture/adr-015-crypto-encoding.md +++ b/docs/architecture/adr-015-crypto-encoding.md @@ -1,8 +1,8 @@ -# ADR 015: Crypto encoding +# ADR 015: Crypto encoding ## Context -We must standardize our method for encoding public keys and signatures on chain. +We must standardize our method for encoding public keys and signatures on chain. Currently we amino encode the public keys and signatures. The reason we are using amino here is primarily due to ease of support in parsing for other languages. @@ -54,9 +54,11 @@ When placed in state, signatures will still be amino encoded, but it will be the primitive type `[]byte` getting encoded. #### Ed25519 + Use the canonical representation for signatures. #### Secp256k1 + There isn't a clear canonical representation here. Signatures have two elements `r,s`. These bytes are encoded as `r || s`, where `r` and `s` are both exactly @@ -65,16 +67,18 @@ This is basically Ethereum's encoding, but without the leading recovery bit. ## Status -Proposed. The signature section seems to be agreed upon for the most part. -Needs decision on Enum types. +Implemented ## Consequences ### Positive -* More space efficient signatures + +- More space efficient signatures ### Negative -* We have an amino dependency for cryptography. + +- We have an amino dependency for cryptography. ### Neutral -* No change to public keys \ No newline at end of file + +- No change to public keys diff --git a/docs/architecture/adr-016-protocol-versions.md b/docs/architecture/adr-016-protocol-versions.md new file mode 100644 index 00000000000..3a2351a563f --- /dev/null +++ b/docs/architecture/adr-016-protocol-versions.md @@ -0,0 +1,308 @@ +# ADR 016: Protocol Versions + +## TODO + +- How to / should we version the authenticated encryption handshake itself (ie. + upfront protocol negotiation for the P2PVersion) +- How to / should we version ABCI itself? Should it just be absorbed by the + BlockVersion? + +## Changelog + +- 18-09-2018: Updates after working a bit on implementation + - ABCI Handshake needs to happen independently of starting the app + conns so we can see the result + - Add question about ABCI protocol version +- 16-08-2018: Updates after discussion with SDK team + - Remove signalling for next version from Header/ABCI +- 03-08-2018: Updates from discussion with Jae: + - ProtocolVersion contains Block/AppVersion, not Current/Next + - signal upgrades to Tendermint using EndBlock fields + - dont restrict peer compatibilty by version to simplify syncing old nodes +- 28-07-2018: Updates from review + - split into two ADRs - one for protocol, one for chains + - include signalling for upgrades in header +- 16-07-2018: Initial draft - was originally joint ADR for protocol and chain + versions + +## Context + +Here we focus on software-agnostic protocol versions. + +The Software Version is covered by SemVer and described elsewhere. +It is not relevant to the protocol description, suffice to say that if any protocol version +changes, the software version changes, but not necessarily vice versa. + +Software version should be included in NodeInfo for convenience/diagnostics. + +We are also interested in versioning across different blockchains in a +meaningful way, for instance to differentiate branches of a contentious +hard-fork. We leave that for a later ADR. + +## Requirements + +We need to version components of the blockchain that may be independently upgraded. +We need to do it in a way that is scalable and maintainable - we can't just litter +the code with conditionals. + +We can consider the complete version of the protocol to contain the following sub-versions: +BlockVersion, P2PVersion, AppVersion. These versions reflect the major sub-components +of the software that are likely to evolve together, at different rates, and in different ways, +as described below. + +The BlockVersion defines the core of the blockchain data structures and +should change infrequently. + +The P2PVersion defines how peers connect and communicate with eachother - it's +not part of the blockchain data structures, but defines the protocols used to build the +blockchain. It may change gradually. + +The AppVersion determines how we compute app specific information, like the +AppHash and the Results. + +All of these versions may change over the life of a blockchain, and we need to +be able to help new nodes sync up across version changes. This means we must be willing +to connect to peers with older version. + +### BlockVersion + +- All tendermint hashed data-structures (headers, votes, txs, responses, etc.). + - Note the semantic meaning of a transaction may change according to the AppVersion, but the way txs are merklized into the header is part of the BlockVersion +- It should be the least frequent/likely to change. + - Tendermint should be stabilizing - it's just Atomic Broadcast. + - We can start considering for Tendermint v2.0 in a year +- It's easy to determine the version of a block from its serialized form + +### P2PVersion + +- All p2p and reactor messaging (messages, detectable behaviour) +- Will change gradually as reactors evolve to improve performance and support new features - eg proposed new message types BatchTx in the mempool and HasBlockPart in the consensus +- It's easy to determine the version of a peer from its first serialized message/s +- New versions must be compatible with at least one old version to allow gradual upgrades + +### AppVersion + +- The ABCI state machine (txs, begin/endblock behaviour, commit hashing) +- Behaviour and message types will change abruptly in the course of the life of a chain +- Need to minimize complexity of the code for supporting different AppVersions at different heights +- Ideally, each version of the software supports only a _single_ AppVersion at one time + - this means we checkout different versions of the software at different heights instead of littering the code + with conditionals + - minimize the number of data migrations required across AppVersion (ie. most AppVersion should be able to read the same state from disk as previous AppVersion). + +## Ideal + +Each component of the software is independently versioned in a modular way and its easy to mix and match and upgrade. + +## Proposal + +Each of BlockVersion, AppVersion, P2PVersion, is a monotonically increasing uint64. + +To use these versions, we need to update the block Header, the p2p NodeInfo, and the ABCI. + +### Header + +Block Header should include a `Version` struct as its first field like: + +``` +type Version struct { + Block uint64 + App uint64 +} +``` + +Here, `Version.Block` defines the rules for the current block, while +`Version.App` defines the app version that processed the last block and computed +the `AppHash` in the current block. Together they provide a complete description +of the consensus-critical protocol. + +Since we have settled on a proto3 header, the ability to read the BlockVersion out of the serialized header is unanimous. + +Using a Version struct gives us more flexibility to add fields without breaking +the header. + +The ProtocolVersion struct includes both the Block and App versions - it should +serve as a complete description of the consensus-critical protocol. + +### NodeInfo + +NodeInfo should include a Version struct as its first field like: + +``` +type Version struct { + P2P uint64 + Block uint64 + App uint64 + + Other []string +} +``` + +Note this effectively makes `Version.P2P` the first field in the NodeInfo, so it +should be easy to read this out of the serialized header if need be to facilitate an upgrade. + +The `Version.Other` here should include additional information like the name of the software client and +it's SemVer version - this is for convenience only. Eg. +`tendermint-core/v0.22.8`. It's a `[]string` so it can include information about +the version of Tendermint, of the app, of Tendermint libraries, etc. + +### ABCI + +Since the ABCI is responsible for keeping Tendermint and the App in sync, we +need to communicate version information through it. + +On startup, we use Info to perform a basic handshake. It should include all the +version information. + +We also need to be able to update versions in the life of a blockchain. The +natural place to do this is EndBlock. + +Note that currently the result of the Handshake isn't exposed anywhere, as the +handshaking happens inside the `proxy.AppConns` abstraction. We will need to +remove the handshaking from the `proxy` package so we can call it independently +and get the result, which should contain the application version. + +#### Info + +RequestInfo should add support for protocol versions like: + +``` +message RequestInfo { + string version + uint64 block_version + uint64 p2p_version +} +``` + +Similarly, ResponseInfo should return the versions: + +``` +message ResponseInfo { + string data + + string version + uint64 app_version + + int64 last_block_height + bytes last_block_app_hash +} +``` + +The existing `version` fields should be called `software_version` but we leave +them for now to reduce the number of breaking changes. + +#### EndBlock + +Updating the version could be done either with new fields or by using the +existing `tags`. Since we're trying to communicate information that will be +included in Tendermint block Headers, it should be native to the ABCI, and not +something embedded through some scheme in the tags. Thus, version updates should +be communicated through EndBlock. + +EndBlock already contains `ConsensusParams`. We can add version information to +the ConsensusParams as well: + +``` +message ConsensusParams { + + BlockSize block_size + EvidenceParams evidence_params + VersionParams version +} + +message VersionParams { + uint64 block_version + uint64 app_version +} +``` + +For now, the `block_version` will be ignored, as we do not allow block version +to be updated live. If the `app_version` is set, it signals that the app's +protocol version has changed, and the new `app_version` will be included in the +`Block.Header.Version.App` for the next block. + +### BlockVersion + +BlockVersion is included in both the Header and the NodeInfo. + +Changing BlockVersion should happen quite infrequently and ideally only for +critical upgrades. For now, it is not encoded in ABCI, though it's always +possible to use tags to signal an external process to co-ordinate an upgrade. + +Note Ethereum has not had to make an upgrade like this (everything has been at state machine level, AFAIK). + +### P2PVersion + +P2PVersion is not included in the block Header, just the NodeInfo. + +P2PVersion is the first field in the NodeInfo. NodeInfo is also proto3 so this is easy to read out. + +Note we need the peer/reactor protocols to take the versions of peers into account when sending messages: + +- don't send messages they don't understand +- don't send messages they don't expect + +Doing this will be specific to the upgrades being made. + +Note we also include the list of reactor channels in the NodeInfo and already don't send messages for channels the peer doesn't understand. +If upgrades always use new channels, this simplifies the development cost of backwards compatibility. + +Note NodeInfo is only exchanged after the authenticated encryption handshake to ensure that it's private. +Doing any version exchange before encrypting could be considered information leakage, though I'm not sure +how much that matters compared to being able to upgrade the protocol. + +XXX: if needed, can we change the meaning of the first byte of the first message to encode a handshake version? +this is the first byte of a 32-byte ed25519 pubkey. + +### AppVersion + +AppVersion is also included in the block Header and the NodeInfo. + +AppVersion essentially defines how the AppHash and LastResults are computed. + +### Peer Compatibility + +Restricting peer compatibility based on version is complicated by the need to +help old peers, possibly on older versions, sync the blockchain. + +We might be tempted to say that we only connect to peers with the same +AppVersion and BlockVersion (since these define the consensus critical +computations), and a select list of P2PVersions (ie. those compatible with +ours), but then we'd need to make accomodations for connecting to peers with the +right Block/AppVersion for the height they're on. + +For now, we will connect to peers with any version and restrict compatibility +solely based on the ChainID. We leave more restrictive rules on peer +compatibiltiy to a future proposal. + +### Future Changes + +It may be valuable to support an `/unsafe_stop?height=_` endpoint to tell Tendermint to shutdown at a given height. +This could be use by an external manager process that oversees upgrades by +checking out and installing new software versions and restarting the process. It +would subscribe to the relevant upgrade event (needs to be implemented) and call `/unsafe_stop` at +the correct height (of course only after getting approval from its user!) + +## Consequences + +### Positive + +- Make tendermint and application versions native to the ABCI to more clearly + communicate about them +- Distinguish clearly between protocol versions and software version to + facilitate implementations in other languages +- Versions included in key data structures in easy to discern way +- Allows proposers to signal for upgrades and apps to decide when to actually change the + version (and start signalling for a new version) + +### Neutral + +- Unclear how to version the initial P2P handshake itself +- Versions aren't being used (yet) to restrict peer compatibility +- Signalling for a new version happens through the proposer and must be + tallied/tracked in the app. + +### Negative + +- Adds more fields to the ABCI +- Implies that a single codebase must be able to handle multiple versions diff --git a/docs/architecture/adr-017-chain-versions.md b/docs/architecture/adr-017-chain-versions.md new file mode 100644 index 00000000000..7113dbaee76 --- /dev/null +++ b/docs/architecture/adr-017-chain-versions.md @@ -0,0 +1,99 @@ +# ADR 017: Chain Versions + +## TODO + +- clarify how to handle slashing when ChainID changes + +## Changelog + +- 28-07-2018: Updates from review + - split into two ADRs - one for protocol, one for chains +- 16-07-2018: Initial draft - was originally joint ADR for protocol and chain + versions + +## Context + +Software and Protocol versions are covered in a separate ADR. + +Here we focus on chain versions. + +## Requirements + +We need to version blockchains across protocols, networks, forks, etc. +We need chain identifiers and descriptions so we can talk about a multitude of chains, +and especially the differences between them, in a meaningful way. + +### Networks + +We need to support many independent networks running the same version of the software, +even possibly starting from the same initial state. +They must have distinct identifiers so that peers know which one they are joining and so +validators and users can prevent replay attacks. + +Call this the `NetworkName` (note we currently call this `ChainID` in the software. In this +ADR, ChainID has a different meaning). +It represents both the application being run and the community or intention +of running it. + +Peers only connect to other peers with the same NetworkName. + +### Forks + +We need to support existing networks upgrading and forking, wherein they may do any of: + + - revert back to some height, continue with the same versions but new blocks + - arbitrarily mutate state at some height, continue with the same versions (eg. Dao Fork) + - change the AppVersion at some height + +Note because of Tendermint's voting power threshold rules, a chain can only be extended under the "original" rules and under the new rules +if 1/3 or more is double signing, which is expressly prohibited, and is supposed to result in their punishment on both chains. Since they can censor +the punishment, the chain is expected to be hardforked to remove the validators. Thus, if both branches are to continue after a fork, +they will each require a new identifier, and the old chain identifier will be retired (ie. only useful for syncing history, not for new blocks).. + +TODO: explain how to handle slashing when chain id changed! + +We need a consistent way to describe forks. + +## Proposal + +### ChainDescription + +ChainDescription is a complete immutable description of a blockchain. It takes the following form: + +``` +ChainDescription = ///// +``` + +Here, StateHash is the merkle root of the initial state, ValHash is the merkle root of the initial Tendermint validator set, +and ConsensusParamsHash is the merkle root of the initial Tendermint consensus parameters. + +The `genesis.json` file must contain enough information to compute this value. It need not contain the StateHash or ValHash itself, +but contain the state from which they can be computed with the given protocol versions. + +NOTE: consider splitting NetworkName into NetworkName and AppName - this allows +folks to independently use the same application for different networks (ie we +could imagine multiple communities of validators wanting to put up a Hub using +the same app but having a distinct network name. Arguably not needed if +differences will come via different initial state / validators). + +#### ChainID + +Define `ChainID = TMHASH(ChainDescriptor)`. It's the unique ID of a blockchain. + +It should be Bech32 encoded when handled by users, eg. with `cosmoschain` prefix. + +#### Forks and Uprades + +When a chain forks or upgrades but continues the same history, it takes a new ChainDescription as follows: + +``` +ChainDescription = /x// +``` + +Where + +- ChainID is the ChainID from the previous ChainDescription (ie. its hash) +- `x` denotes that a change occured +- `Height` is the height the change occured +- ForkDescription has the same form as ChainDescription but for the fork +- this allows forks to specify new versions for tendermint or the app, as well as arbitrary changes to the state or validator set diff --git a/docs/architecture/adr-018-ABCI-Validators.md b/docs/architecture/adr-018-ABCI-Validators.md new file mode 100644 index 00000000000..b632da855c4 --- /dev/null +++ b/docs/architecture/adr-018-ABCI-Validators.md @@ -0,0 +1,103 @@ +# ADR 018: ABCI Validator Improvements + +## Changelog + +016-08-2018: Follow up from review: + - Revert changes to commit round + - Remind about justification for removing pubkey + - Update pros/cons +05-08-2018: Initial draft + +## Context + +ADR 009 introduced major improvements to the ABCI around validators and the use +of Amino. Here we follow up with some additional changes to improve the naming +and expected use of Validator messages. + +## Decision + +### Validator + +Currently a Validator contains `address` and `pub_key`, and one or the other is +optional/not-sent depending on the use case. Instead, we should have a +`Validator` (with just the address, used for RequestBeginBlock) +and a `ValidatorUpdate` (with the pubkey, used for ResponseEndBlock): + +``` +message Validator { + bytes address + int64 power +} + +message ValidatorUpdate { + PubKey pub_key + int64 power +} +``` + +As noted in ADR-009[https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-009-ABCI-design.md], +the `Validator` does not contain a pubkey because quantum public keys are +quite large and it would be wasteful to send them all over ABCI with every block. +Thus, applications that want to take advantage of the information in BeginBlock +are *required* to store pubkeys in state (or use much less efficient lazy means +of verifying BeginBlock data). + +### RequestBeginBlock + +LastCommitInfo currently has an array of `SigningValidator` that contains +information for each validator in the entire validator set. +Instead, this should be called `VoteInfo`, since it is information about the +validator votes. + +Note that all votes in a commit must be from the same round. + +``` +message LastCommitInfo { + int64 round + repeated VoteInfo commit_votes +} + +message VoteInfo { + Validator validator + bool signed_last_block +} +``` + +### ResponseEndBlock + +Use ValidatorUpdates instead of Validators. Then it's clear we don't need an +address, and we do need a pubkey. + +We could require the address here as well as a sanity check, but it doesn't seem +necessary. + +### InitChain + +Use ValidatorUpdates for both Request and Response. InitChain +is about setting/updating the initial validator set, unlike BeginBlock +which is just informational. + +## Status + +Proposal. + +## Consequences + +### Positive + +- Clarifies the distinction between the different uses of validator information + +### Negative + +- Apps must still store the public keys in state to utilize the RequestBeginBlock info + +### Neutral + +- ResponseEndBlock does not require an address + +## References + +- [Latest ABCI Spec](https://github.com/tendermint/tendermint/blob/v0.22.8/docs/app-dev/abci-spec.md) +- [ADR-009](https://github.com/tendermint/tendermint/blob/v0.22.8/docs/architecture/adr-009-ABCI-design.md) +- [Issue #1712 - Don't send PubKey in + RequestBeginBlock](https://github.com/tendermint/tendermint/issues/1712) diff --git a/docs/architecture/adr-019-multisigs.md b/docs/architecture/adr-019-multisigs.md new file mode 100644 index 00000000000..3d1c5ba871f --- /dev/null +++ b/docs/architecture/adr-019-multisigs.md @@ -0,0 +1,160 @@ +# ADR 019: Encoding standard for Multisignatures + +## Changelog + +06-08-2018: Minor updates + +27-07-2018: Update draft to use amino encoding + +11-07-2018: Initial Draft + +## Context + +Multisignatures, or technically _Accountable Subgroup Multisignatures_ (ASM), +are signature schemes which enable any subgroup of a set of signers to sign any message, +and reveal to the verifier exactly who the signers were. +This allows for complex conditionals of when to validate a signature. + +Suppose the set of signers is of size _n_. +If we validate a signature if any subgroup of size _k_ signs a message, +this becomes what is commonly reffered to as a _k of n multisig_ in Bitcoin. + +This ADR specifies the encoding standard for general accountable subgroup multisignatures, +k of n accountable subgroup multisignatures, and its weighted variant. + +In the future, we can also allow for more complex conditionals on the accountable subgroup. + +## Proposed Solution + +### New structs + +Every ASM will then have its own struct, implementing the crypto.Pubkey interface. + +This ADR assumes that [replacing crypto.Signature with []bytes](https://github.com/tendermint/tendermint/issues/1957) has been accepted. + +#### K of N threshold signature + +The pubkey is the following struct: + +```golang +type ThresholdMultiSignaturePubKey struct { // K of N threshold multisig + K uint `json:"threshold"` + Pubkeys []crypto.Pubkey `json:"pubkeys"` +} +``` + +We will derive N from the length of pubkeys. (For spatial efficiency in encoding) + +`Verify` will expect an `[]byte` encoded version of the Multisignature. +(Multisignature is described in the next section) +The multisignature will be rejected if the bitmap has less than k indices, +or if any signature at any of the k indices is not a valid signature from +the kth public key on the message. +(If more than k signatures are included, all must be valid) + +`Bytes` will be the amino encoded version of the pubkey. + +Address will be `Hash(amino_encoded_pubkey)` + +The reason this doesn't use `log_8(n)` bytes per signer is because that heavily optimizes for the case where a very small number of signers are required. +e.g. for `n` of size `24`, that would only be more space efficient for `k < 3`. +This seems less likely, and that it should not be the case optimized for. + +#### Weighted threshold signature + +The pubkey is the following struct: + +```golang +type WeightedThresholdMultiSignaturePubKey struct { + Weights []uint `json:"weights"` + Threshold uint `json:"threshold"` + Pubkeys []crypto.Pubkey `json:"pubkeys"` +} +``` + +Weights and Pubkeys must be of the same length. +Everything else proceeds identically to the K of N multisig, +except the multisig fails if the sum of the weights is less than the threshold. + +#### Multisignature + +The inter-mediate phase of the signatures (as it accrues more signatures) will be the following struct: + +```golang +type Multisignature struct { + BitArray CryptoBitArray // Documented later + Sigs [][]byte +``` + +It is important to recall that each private key will output a signature on the provided message itself. +So no signing algorithm ever outputs the multisignature. +The UI will take a signature, cast into a multisignature, and then keep adding +new signatures into it, and when done marshal into `[]byte`. +This will require the following helper methods: + +```golang +func SigToMultisig(sig []byte, n int) +func GetIndex(pk crypto.Pubkey, []crypto.Pubkey) +func AddSignature(sig Signature, index int, multiSig *Multisignature) +``` + +The multisignature will be converted to an `[]byte` using amino.MarshalBinaryBare. \* + +#### Bit Array + +We would be using a new implementation of a bitarray. The struct it would be encoded/decoded from is + +```golang +type CryptoBitArray struct { + ExtraBitsStored byte `json:"extra_bits"` // The number of extra bits in elems. + Elems []byte `json:"elems"` +} +``` + +The reason for not using the BitArray currently implemented in `libs/common/bit_array.go` +is that it is less space efficient, due to a space / time trade-off. +Evidence for this is outlined in [this issue](https://github.com/tendermint/tendermint/issues/2077). + +In the multisig, we will not be performing arithmetic operations, +so there is no performance increase with the current implementation, +and just loss of spatial efficiency. +Implementing this new bit array with `[]byte` _should_ be simple, as no +arithmetic operations between bit arrays are required, and save a couple of bytes. +(Explained in that same issue) + +When this bit array encoded, the number of elements is encoded due to amino. +However we may be encoding a full byte for what we actually only need 1-7 bits for. +We store that difference in ExtraBitsStored. +This allows for us to have an unbounded number of signers, and is more space efficient than what is currently used in `libs/common`. +Again the implementation of this space saving feature is straight forward. + +### Encoding the structs + +We will use straight forward amino encoding. This is chosen for ease of compatibility in other languages. + +### Future points of discussion + +If desired, we can use ed25519 batch verification for all ed25519 keys. +This is a future point of discussion, but would be backwards compatible as this information won't need to be marshalled. +(There may even be cofactor concerns without ristretto) +Aggregation of pubkeys / sigs in Schnorr sigs / BLS sigs is not backwards compatible, and would need to be a new ASM type. + +## Status + +Proposed. + +## Consequences + +### Positive + +- Supports multisignatures, in a way that won't require any special cases in our downstream verification code. +- Easy to serialize / deserialize +- Unbounded number of signers + +### Negative + +- Larger codebase, however this should reside in a subfolder of tendermint/crypto, as it provides no new interfaces. (Ref #https://github.com/tendermint/go-crypto/issues/136) +- Space inefficient due to utilization of amino encoding +- Suggested implementation requires a new struct for every ASM. + +### Neutral diff --git a/docs/architecture/adr-020-block-size.md b/docs/architecture/adr-020-block-size.md new file mode 100644 index 00000000000..aebf3069c3b --- /dev/null +++ b/docs/architecture/adr-020-block-size.md @@ -0,0 +1,77 @@ +# ADR 020: Limiting txs size inside a block + +## Changelog + +13-08-2018: Initial Draft +15-08-2018: Second version after Dev's comments +28-08-2018: Third version after Ethan's comments +30-08-2018: AminoOverheadForBlock => MaxAminoOverheadForBlock +31-08-2018: Bounding evidence and chain ID + +## Context + +We currently use MaxTxs to reap txs from the mempool when proposing a block, +but enforce MaxBytes when unmarshalling a block, so we could easily propose a +block thats too large to be valid. + +We should just remove MaxTxs all together and stick with MaxBytes, and have a +`mempool.ReapMaxBytes`. + +But we can't just reap BlockSize.MaxBytes, since MaxBytes is for the entire block, +not for the txs inside the block. There's extra amino overhead + the actual +headers on top of the actual transactions + evidence + last commit. + +## Proposed solution + +Therefore, we should + +1) Get rid of MaxTxs. +2) Rename MaxTxsBytes to MaxBytes. + +When we need to ReapMaxBytes from the mempool, we calculate the upper bound as follows: + +``` +ExactLastCommitBytes = {number of validators currently enabled} * {MaxVoteBytes} +MaxEvidenceBytesPerBlock = MaxBytes / 10 +ExactEvidenceBytes = cs.evpool.PendingEvidence(MaxEvidenceBytesPerBlock) * MaxEvidenceBytes + +mempool.ReapMaxBytes(MaxBytes - MaxAminoOverheadForBlock - ExactLastCommitBytes - ExactEvidenceBytes - MaxHeaderBytes) +``` + +where MaxVoteBytes, MaxEvidenceBytes, MaxHeaderBytes and MaxAminoOverheadForBlock +are constants defined inside the `types` package: + +- MaxVoteBytes - 170 bytes +- MaxEvidenceBytes - 364 bytes +- MaxHeaderBytes - 476 bytes (~276 bytes hashes + 200 bytes - 50 UTF-8 encoded + symbols of chain ID 4 bytes each in the worst case + amino overhead) +- MaxAminoOverheadForBlock - 8 bytes (assuming MaxHeaderBytes includes amino + overhead for encoding header, MaxVoteBytes - for encoding vote, etc.) + +ChainID needs to bound to 50 symbols max. + +When reaping evidence, we use MaxBytes to calculate the upper bound (e.g. 1/10) +to save some space for transactions. + +NOTE while reaping the `max int` bytes in mempool, we should account that every +transaction will take `len(tx)+aminoOverhead`, where aminoOverhead=1-4 bytes. + +We should write a test that fails if the underlying structs got changed, but +MaxXXX stayed the same. + +## Status + +Proposed. + +## Consequences + +### Positive + +* one way to limit the size of a block +* less variables to configure + +### Negative + +* constants that need to be adjusted if the underlying structs got changed + +### Neutral diff --git a/docs/architecture/adr-021-abci-events.md b/docs/architecture/adr-021-abci-events.md new file mode 100644 index 00000000000..45a73df1d64 --- /dev/null +++ b/docs/architecture/adr-021-abci-events.md @@ -0,0 +1,52 @@ +# ADR 012: ABCI Events + +## Changelog + +- *2018-09-02* Remove ABCI errors component. Update description for events +- *2018-07-12* Initial version + +## Context + +ABCI tags were first described in [ADR 002](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-002-event-subscription.md). +They are key-value pairs that can be used to index transactions. + +Currently, ABCI messages return a list of tags to describe an +"event" that took place during the Check/DeliverTx/Begin/EndBlock, +where each tag refers to a different property of the event, like the sending and receiving account addresses. + +Since there is only one list of tags, recording data for multiple such events in +a single Check/DeliverTx/Begin/EndBlock must be done using prefixes in the key +space. + +Alternatively, groups of tags that constitute an event can be separated by a +special tag that denotes a break between the events. This would allow +straightforward encoding of multiple events into a single list of tags without +prefixing, at the cost of these "special" tags to separate the different events. + +TODO: brief description of how the indexing works + +## Decision + +Instead of returning a list of tags, return a list of events, where +each event is a list of tags. This way we naturally capture the concept of +multiple events happening during a single ABCI message. + +TODO: describe impact on indexing and querying + +## Status + +Proposed + +## Consequences + +### Positive + +- Ability to track distinct events separate from ABCI calls (DeliverTx/BeginBlock/EndBlock) +- More powerful query abilities + +### Negative + +- More complex query syntax +- More complex search implementation + +### Neutral diff --git a/docs/architecture/adr-022-abci-errors.md b/docs/architecture/adr-022-abci-errors.md new file mode 100644 index 00000000000..23e917f478b --- /dev/null +++ b/docs/architecture/adr-022-abci-errors.md @@ -0,0 +1,64 @@ +# ADR 023: ABCI Codespaces + +## Changelog + +- *2018-09-01* Initial version + +## Context + +ABCI errors should provide an abstraction between application details +and the client interface responsible for formatting & displaying errors to the user. + +Currently, this abstraction consists of a single integer (the `code`), where any +`code > 0` is considered an error (ie. invalid transaction) and all type +information about the error is contained in the code. This integer is +expected to be decoded by the client into a known error string, where any +more specific data is contained in the `data`. + +In a [previous conversation](https://github.com/tendermint/abci/issues/165#issuecomment-353704015), +it was suggested that not all non-zero codes need to be errors, hence why it's called `code` and not `error code`. +It is unclear exactly how the semantics of the `code` field will evolve, though +better lite-client proofs (like discussed for tags +[here](https://github.com/tendermint/tendermint/issues/1007#issuecomment-413917763)) +may play a role. + +Note that having all type information in a single integer +precludes an easy coordination method between "module implementers" and "client +implementers", especially for apps with many "modules". With an unbounded error domain (such as a string), module +implementers can pick a globally unique prefix & error code set, so client +implementers could easily implement support for "module A" regardless of which +particular blockchain network it was running in and which other modules were running with it. With +only error codes, globally unique codes are difficult/impossible, as the space +is finite and collisions are likely without an easy way to coordinate. + +For instance, while trying to build an ecosystem of modules that can be composed into a single +ABCI application, the Cosmos-SDK had to hack a higher level "codespace" into the +single integer so that each module could have its own space to express its +errors. + +## Decision + +Include a `string code_space` in all ABCI messages that have a `code`. +This allows applications to namespace the codes so they can experiment with +their own code schemes. + +It is the responsibility of applications to limit the size of the `code_space` +string. + +How the codespace is hashed into block headers (ie. so it can be queried +efficiently by lite clients) is left for a separate ADR. + +## Consequences + +## Positive + +- No need for complex codespacing on a single integer +- More expressive type system for errors + +## Negative + +- Another field in the response needs to be accounted for +- Some redundancy with `code` field +- May encourage more error/code type info to move to the `codespace` string, which + could impact lite clients. + diff --git a/docs/architecture/adr-023-ABCI-propose-tx.md b/docs/architecture/adr-023-ABCI-propose-tx.md new file mode 100644 index 00000000000..497ccd18407 --- /dev/null +++ b/docs/architecture/adr-023-ABCI-propose-tx.md @@ -0,0 +1,183 @@ +# ADR 012: ABCI `ProposeTx` Method + +## Changelog + +25-06-2018: Initial draft based on [#1776](https://github.com/tendermint/tendermint/issues/1776) + +## Context + +[#1776](https://github.com/tendermint/tendermint/issues/1776) was +opened in relation to implementation of a Plasma child chain using Tendermint +Core as consensus/replication engine. + +Due to the requirements of [Minimal Viable Plasma (MVP)](https://ethresear.ch/t/minimal-viable-plasma/426) and [Plasma Cash](https://ethresear.ch/t/plasma-cash-plasma-with-much-less-per-user-data-checking/1298), it is necessary for ABCI apps to have a mechanism to handle the following cases (more may emerge in the near future): + +1. `deposit` transactions on the Root Chain, which must consist of a block + with a single transaction, where there are no inputs and only one output + made in favour of the depositor. In this case, a `block` consists of + a transaction with the following shape: + + ``` + [0, 0, 0, 0, #input1 - zeroed out + 0, 0, 0, 0, #input2 - zeroed out + , , #output1 - in favour of depositor + 0, 0, #output2 - zeroed out + , + ] + ``` + + `exit` transactions may also be treated in a similar manner, wherein the + input is the UTXO being exited on the Root Chain, and the output belongs to + a reserved "burn" address, e.g., `0x0`. In such cases, it is favourable for + the containing block to only hold a single transaction that may receive + special treatment. + +2. Other "internal" transactions on the child chain, which may be initiated + unilaterally. The most basic example of is a coinbase transaction + implementing validator node incentives, but may also be app-specific. In + these cases, it may be favourable for such transactions to + be ordered in a specific manner, e.g., coinbase transactions will always be + at index 0. In general, such strategies increase the determinism and + predictability of blockchain applications. + +While it is possible to deal with the cases enumerated above using the +existing ABCI, currently available result in suboptimal workarounds. Two are +explained in greater detail below. + +### Solution 1: App state-based Plasma chain + +In this work around, the app maintains a `PlasmaStore` with a corresponding +`Keeper`. The PlasmaStore is responsible for maintaing a second, separate +blockchain that complies with the MVP specification, including `deposit` +blocks and other "internal" transactions. These "virtual" blocks are then broadcasted +to the Root Chain. + +This naive approach is, however, fundamentally flawed, as it by definition +diverges from the canonical chain maintained by Tendermint. This is further +exacerbated if the business logic for generating such transactions is +potentially non-deterministic, as this should not even be done in +`Begin/EndBlock`, which may, as a result, break consensus guarantees. + +Additinoally, this has serious implications for "watchers" - independent third parties, +or even an auxilliary blockchain, responsible for ensuring that blocks recorded +on the Root Chain are consistent with the Plasma chain's. Since, in this case, +the Plasma chain is inconsistent with the canonical one maintained by Tendermint +Core, it seems that there exists no compact means of verifying the legitimacy of +the Plasma chain without replaying every state transition from genesis (!). + +### Solution 2: Broadcast to Tendermint Core from ABCI app + +This approach is inspired by `tendermint`, in which Ethereum transactions are +relayed to Tendermint Core. It requires the app to maintain a client connection +to the consensus engine. + +Whenever an "internal" transaction needs to be created, the proposer of the +current block broadcasts the transaction or transactions to Tendermint as +needed in order to ensure that the Tendermint chain and Plasma chain are +completely consistent. + +This allows "internal" transactions to pass through the full consensus +process, and can be validated in methods like `CheckTx`, i.e., signed by the +proposer, is the semantically correct, etc. Note that this involves informing +the ABCI app of the block proposer, which was temporarily hacked in as a means +of conducting this experiment, although this should not be necessary when the +current proposer is passed to `BeginBlock`. + +It is much easier to relay these transactions directly to the Root +Chain smart contract and/or maintain a "compressed" auxiliary chain comprised +of Plasma-friendly blocks that 100% reflect the canonical (Tendermint) +blockchain. Unfortunately, this approach not idiomatic (i.e., utilises the +Tendermint consensus engine in unintended ways). Additionally, it does not +allow the application developer to: + +- Control the _ordering_ of transactions in the proposed block (e.g., index 0, + or 0 to `n` for coinbase transactions) +- Control the _number_ of transactions in the block (e.g., when a `deposit` + block is required) + +Since determinism is of utmost importance in blockchain engineering, this approach, +while more viable, should also not be considered as fit for production. + +## Decision + +### `ProposeTx` + +In order to address the difficulties described above, the ABCI interface must +expose an additional method, tentatively named `ProposeTx`. + +It should have the following signature: + +``` +ProposeTx(RequestProposeTx) ResponseProposeTx +``` + +Where `RequestProposeTx` and `ResponseProposeTx` are `message`s with the +following shapes: + +``` +message RequestProposeTx { + int64 next_block_height = 1; // height of the block the proposed tx would be part of + Validator proposer = 2; // the proposer details +} + +message ResponseProposeTx { + int64 num_tx = 1; // the number of tx to include in proposed block + repeated bytes txs = 2; // ordered transaction data to include in block + bool exclusive = 3; // whether the block should include other transactions (from `mempool`) +} +``` + +`ProposeTx` would be called by before `mempool.Reap` at this +[line](https://github.com/tendermint/tendermint/blob/master/consensus/state.go#L906). +Depending on whether `exclusive` is `true` or `false`, the proposed +transactions are then pushed on top of the transactions received from +`mempool.Reap`. + +### `DeliverTx` + +Since the list of `tx` received from `ProposeTx` are _not_ passed through `CheckTx`, +it is probably a good idea to provide a means of differentiatiating "internal" transactions +from user-generated ones, in case the app developer needs/wants to take extra measures to +ensure validity of the proposed transactions. + +Therefore, the `RequestDeliverTx` message should be changed to provide an additional flag, like so: + +``` +message RequestDeliverTx { + bytes tx = 1; + bool internal = 2; +} +``` + +Alternatively, an additional method `DeliverProposeTx` may be added as an accompanient to +`ProposeTx`. However, it is not clear at this stage if this additional overhead is necessary +to preserve consensus guarantees given that a simple flag may suffice for now. + +## Status + +Pending + +## Consequences + +### Positive + +- Tendermint ABCI apps will be able to function as minimally viable Plasma chains. +- It will thereby become possible to add an extension to `cosmos-sdk` to enable + ABCI apps to support both IBC and Plasma, maximising interop. +- ABCI apps will have great control and flexibility in managing blockchain state, + without having to resort to non-deterministic hacks and/or unsafe workarounds + +### Negative + +- Maintenance overhead of exposing additional ABCI method +- Potential security issues that may have been overlooked and must now be tested extensively + +### Neutral + +- ABCI developers must deal with increased (albeit nominal) API surface area. + +## References + +- [#1776 Plasma and "Internal" Transactions in ABCI Apps](https://github.com/tendermint/tendermint/issues/1776) +- [Minimal Viable Plasma](https://ethresear.ch/t/minimal-viable-plasma/426) +- [Plasma Cash: Plasma with much less per-user data checking](https://ethresear.ch/t/plasma-cash-plasma-with-much-less-per-user-data-checking/1298) diff --git a/docs/architecture/adr-024-sign-bytes.md b/docs/architecture/adr-024-sign-bytes.md new file mode 100644 index 00000000000..34bf6e51e66 --- /dev/null +++ b/docs/architecture/adr-024-sign-bytes.md @@ -0,0 +1,234 @@ +# ADR 024: SignBytes and validator types in privval + +## Context + +Currently, the messages exchanged between tendermint and a (potentially remote) signer/validator, +namely votes, proposals, and heartbeats, are encoded as a JSON string +(e.g., via `Vote.SignBytes(...)`) and then +signed . JSON encoding is sub-optimal for both, hardware wallets +and for usage in ethereum smart contracts. Both is laid down in detail in [issue#1622]. + +Also, there are currently no differences between sign-request and -replies. Also, there is no possibility +for a remote signer to include an error code or message in case something went wrong. +The messages exchanged between tendermint and a remote signer currently live in +[privval/socket.go] and encapsulate the corresponding types in [types]. + + +[privval/socket.go]: https://github.com/tendermint/tendermint/blob/d419fffe18531317c28c29a292ad7d253f6cafdf/privval/socket.go#L496-L502 +[issue#1622]: https://github.com/tendermint/tendermint/issues/1622 +[types]: https://github.com/tendermint/tendermint/tree/master/types + + +## Decision + +- restructure vote, proposal, and heartbeat such that their encoding is easily parseable by +hardware devices and smart contracts using a binary encoding format ([amino] in this case) +- split up the messages exchanged between tendermint and remote signers into requests and +responses (see details below) +- include an error type in responses + +### Overview +``` ++--------------+ +----------------+ +| | SignXRequest | | +|Remote signer |<---------------------+ tendermint | +| (e.g. KMS) | | | +| +--------------------->| | ++--------------+ SignedXReply +----------------+ + + +SignXRequest { + x: X +} + +SignedXReply { + x: X + sig: Signature // []byte + err: Error{ + code: int + desc: string + } +} +``` + +TODO: Alternatively, the type `X` might directly include the signature. A lot of places expect a vote with a +signature and do not necessarily deal with "Replies". +Still exploring what would work best here. +This would look like (exemplified using X = Vote): +``` +Vote { + // all fields besides signature +} + +SignedVote { + Vote Vote + Signature []byte +} + +SignVoteRequest { + Vote Vote +} + +SignedVoteReply { + Vote SignedVote + Err Error +} +``` + +**Note:** There was a related discussion around including a fingerprint of, or, the whole public-key +into each sign-request to tell the signer which corresponding private-key to +use to sign the message. This is particularly relevant in the context of the KMS +but is currently not considered in this ADR. + + +[amino]: https://github.com/tendermint/go-amino/ + +### Vote + +As explained in [issue#1622] `Vote` will be changed to contain the following fields +(notation in protobuf-like syntax for easy readability): + +```proto +// vanilla protobuf / amino encoded +message Vote { + Version fixed32 + Height sfixed64 + Round sfixed32 + VoteType fixed32 + Timestamp Timestamp // << using protobuf definition + BlockID BlockID // << as already defined + ChainID string // at the end because length could vary a lot +} + +// this is an amino registered type; like currently privval.SignVoteMsg: +// registered with "tendermint/socketpv/SignVoteRequest" +message SignVoteRequest { + Vote vote +} + +// amino registered type +// registered with "tendermint/socketpv/SignedVoteReply" +message SignedVoteReply { + Vote Vote + Signature Signature + Err Error +} + +// we will use this type everywhere below +message Error { + Type uint // error code + Description string // optional description +} + +``` + +The `ChainID` gets moved into the vote message directly. Previously, it was injected +using the [Signable] interface method `SignBytes(chainID string) []byte`. Also, the +signature won't be included directly, only in the corresponding `SignedVoteReply` message. + +[Signable]: https://github.com/tendermint/tendermint/blob/d419fffe18531317c28c29a292ad7d253f6cafdf/types/signable.go#L9-L11 + +### Proposal + +```proto +// vanilla protobuf / amino encoded +message Proposal { + Height sfixed64 + Round sfixed32 + Timestamp Timestamp // << using protobuf definition + BlockPartsHeader PartSetHeader // as already defined + POLRound sfixed32 + POLBlockID BlockID // << as already defined +} + +// amino registered with "tendermint/socketpv/SignProposalRequest" +message SignProposalRequest { + Proposal proposal +} + +// amino registered with "tendermint/socketpv/SignProposalReply" +message SignProposalReply { + Prop Proposal + Sig Signature + Err Error // as defined above +} +``` + +### Heartbeat + +**TODO**: clarify if heartbeat also needs a fixed offset and update the fields accordingly: + +```proto +message Heartbeat { + ValidatorAddress Address + ValidatorIndex int + Height int64 + Round int + Sequence int +} +// amino registered with "tendermint/socketpv/SignHeartbeatRequest" +message SignHeartbeatRequest { + Hb Heartbeat +} + +// amino registered with "tendermint/socketpv/SignHeartbeatReply" +message SignHeartbeatReply { + Hb Heartbeat + Sig Signature + Err Error // as defined above +} + +``` + +## PubKey + +TBA - this needs further thoughts: e.g. what todo like in the case of the KMS which holds +several keys? How does it know with which key to reply? + +## SignBytes +`SignBytes` will not require a `ChainID` parameter: + +```golang +type Signable interface { + SignBytes() []byte +} + +``` +And the implementation for vote, heartbeat, proposal will look like: +```golang +// type T is one of vote, sign, proposal +func (tp *T) SignBytes() []byte { + bz, err := cdc.MarshalBinary(tp) + if err != nil { + panic(err) + } + return bz +} +``` + +## Status + +DRAFT + +## Consequences + + + +### Positive + +The most relevant positive effect is that the signing bytes can easily be parsed by a +hardware module and a smart contract. Besides that: + +- clearer separation between requests and responses +- added error messages enable better error handling + + +### Negative + +- relatively huge change / refactoring touching quite some code +- lot's of places assume a `Vote` with a signature included -> they will need to +- need to modify some interfaces + +### Neutral + +not even the swiss are neutral diff --git a/docs/architecture/adr-025-commit.md b/docs/architecture/adr-025-commit.md new file mode 100644 index 00000000000..3f2527951e2 --- /dev/null +++ b/docs/architecture/adr-025-commit.md @@ -0,0 +1,75 @@ +# ADR 025 Commit + +## Context +Currently the `Commit` structure contains a lot of potentially redundant or unnecessary data. +In particular it contains an array of every precommit from the validators, which includes many copies of the same data. Such as `Height`, `Round`, `Type`, and `BlockID`. Also the `ValidatorIndex` could be derived from the vote's position in the array, and the `ValidatorAddress` could potentially be derived from runtime context. The only truely necessary data is the `Signature` and `Timestamp` associated with each `Vote`. + +``` +type Commit struct { + BlockID BlockID `json:"block_id"` + Precommits []*Vote `json:"precommits"` +} +type Vote struct { + ValidatorAddress Address `json:"validator_address"` + ValidatorIndex int `json:"validator_index"` + Height int64 `json:"height"` + Round int `json:"round"` + Timestamp time.Time `json:"timestamp"` + Type byte `json:"type"` + BlockID BlockID `json:"block_id"` + Signature []byte `json:"signature"` +} +``` +References: +[#1648](https://github.com/tendermint/tendermint/issues/1648) +[#2179](https://github.com/tendermint/tendermint/issues/2179) +[#2226](https://github.com/tendermint/tendermint/issues/2226) + +## Proposed Solution +We can improve efficiency by replacing the usage of the `Vote` struct with a subset of each vote, and by storing the constant values (`Height`, `Round`, `BlockID`) in the Commit itself. +``` +type Commit struct { + Height int64 + Round int + BlockID BlockID `json:"block_id"` + Precommits []*CommitSig `json:"precommits"` +} +type CommitSig struct { + ValidatorAddress Address + Signature []byte + Timestamp time.Time +} +``` +Continuing to store the `ValidatorAddress` in the `CommitSig` takes up extra space, but simplifies the process and allows for easier debugging. + +## Status +Proposed + +## Consequences + +### Positive +The size of a `Commit` transmitted over the network goes from: + +|BlockID| + n * (|Address| + |ValidatorIndex| + |Height| + |Round| + |Timestamp| + |Type| + |BlockID| + |Signature|) + +to: + + +|BlockID|+|Height|+|Round| + n*(|Address| + |Signature| + |Timestamp|) + +This saves: + +n * (|BlockID| + |ValidatorIndex| + |Type|) + (n-1) * (Height + Round) + +In the current context, this would concretely be: +(assuming all ints are int64, and hashes are 32 bytes) + +n *(72 + 8 + 1 + 8 + 8) - 16 = n * 97 - 16 + +With 100 validators this is a savings of almost 10KB on every block. + +### Negative +This would add some complexity to the processing and verification of blocks and commits, as votes would have to be reconstructed to be verified and gossiped. The reconstruction could be relatively straightforward, only requiring the copying of data from the `Commit` itself into the newly created `Vote`. + +### Neutral +This design leaves the `ValidatorAddress` in the `CommitSig` and in the `Vote`. These could be removed at some point for additional savings, but that would introduce more complexity, and make printing of `Commit` and `VoteSet` objects less informative, which could harm debugging efficiency and UI/UX. \ No newline at end of file diff --git a/docs/architecture/adr-026-general-merkle-proof.md b/docs/architecture/adr-026-general-merkle-proof.md new file mode 100644 index 00000000000..af81947cb22 --- /dev/null +++ b/docs/architecture/adr-026-general-merkle-proof.md @@ -0,0 +1,47 @@ +# ADR 026: General Merkle Proof + +## Context + +We are using raw `[]byte` for merkle proofs in `abci.ResponseQuery`. It makes hard to handle multilayer merkle proofs and general cases. Here, new interface `ProofOperator` is defined. The users can defines their own Merkle proof format and layer them easily. + +Goals: +- Layer Merkle proofs without decoding/reencoding +- Provide general way to chain proofs +- Make the proof format extensible, allowing thirdparty proof types + +## Decision + +### ProofOperator + +`type ProofOperator` is an interface for Merkle proofs. The definition is: + +```go +type ProofOperator interface { + Run([][]byte) ([][]byte, error) + GetKey() []byte + ProofOp() ProofOp +} +``` + +Since a proof can treat various data type, `Run()` takes `[][]byte` as the argument, not `[]byte`. For example, a range proof's `Run()` can take multiple key-values as its argument. It will then return the root of the tree for the further process, calculated with the input value. + +`ProofOperator` does not have to be a Merkle proof - it can be a function that transforms the argument for intermediate process e.g. prepending the length to the `[]byte`. + +### ProofOp + +`type ProofOp` is a protobuf message which is a triple of `Type string`, `Key []byte`, and `Data []byte`. `ProofOperator` and `ProofOp`are interconvertible, using `ProofOperator.ProofOp()` and `OpDecoder()`, where `OpDecoder` is a function that each proof type can register for their own encoding scheme. For example, we can add an byte for encoding scheme before the serialized proof, supporting JSON decoding. + +## Status + +## Consequences + +### Positive + +- Layering becomes easier (no encoding/decoding at each step) +- Thirdparty proof format is available + +### Negative + +- Larger size for abci.ResponseQuery +- Unintuitive proof chaining(it is not clear what `Run()` is doing) +- Additional codes for registering `OpDecoder`s diff --git a/docs/architecture/adr-029-check-tx-consensus.md b/docs/architecture/adr-029-check-tx-consensus.md new file mode 100644 index 00000000000..c1b882c61c0 --- /dev/null +++ b/docs/architecture/adr-029-check-tx-consensus.md @@ -0,0 +1,128 @@ +# ADR 029: Check block txs before prevote + +## Changelog + +04-10-2018: Update with link to issue +[#2384](https://github.com/tendermint/tendermint/issues/2384) and reason for rejection +19-09-2018: Initial Draft + +## Context + +We currently check a tx's validity through 2 ways. + +1. Through checkTx in mempool connection. +2. Through deliverTx in consensus connection. + +The 1st is called when external tx comes in, so the node should be a proposer this time. The 2nd is called when external block comes in and reach the commit phase, the node doesn't need to be the proposer of the block, however it should check the txs in that block. + +In the 2nd situation, if there are many invalid txs in the block, it would be too late for all nodes to discover that most txs in the block are invalid, and we'd better not record invalid txs in the blockchain too. + +## Proposed solution + +Therefore, we should find a way to check the txs' validity before send out a prevote. Currently we have cs.isProposalComplete() to judge whether a block is complete. We can have + +``` +func (blockExec *BlockExecutor) CheckBlock(block *types.Block) error { + // check txs of block. + for _, tx := range block.Txs { + reqRes := blockExec.proxyApp.CheckTxAsync(tx) + reqRes.Wait() + if reqRes.Response == nil || reqRes.Response.GetCheckTx() == nil || reqRes.Response.GetCheckTx().Code != abci.CodeTypeOK { + return errors.Errorf("tx %v check failed. response: %v", tx, reqRes.Response) + } + } + return nil +} +``` + +such a method in BlockExecutor to check all txs' validity in that block. + +However, this method should not be implemented like that, because checkTx will share the same state used in mempool in the app. So we should define a new interface method checkBlock in Application to indicate it to use the same state as deliverTx. + +``` +type Application interface { + // Info/Query Connection + Info(RequestInfo) ResponseInfo // Return application info + SetOption(RequestSetOption) ResponseSetOption // Set application option + Query(RequestQuery) ResponseQuery // Query for state + + // Mempool Connection + CheckTx(tx []byte) ResponseCheckTx // Validate a tx for the mempool + + // Consensus Connection + InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore + CheckBlock(RequestCheckBlock) ResponseCheckBlock + BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block + DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing + EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set + Commit() ResponseCommit // Commit the state and return the application Merkle root hash +} +``` + +All app should implement that method. For example, counter: + +``` +func (app *CounterApplication) CheckBlock(block types.Request_CheckBlock) types.ResponseCheckBlock { + if app.serial { + app.originalTxCount = app.txCount //backup the txCount state + for _, tx := range block.CheckBlock.Block.Txs { + if len(tx) > 8 { + return types.ResponseCheckBlock{ + Code: code.CodeTypeEncodingError, + Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))} + } + tx8 := make([]byte, 8) + copy(tx8[len(tx8)-len(tx):], tx) + txValue := binary.BigEndian.Uint64(tx8) + if txValue < uint64(app.txCount) { + return types.ResponseCheckBlock{ + Code: code.CodeTypeBadNonce, + Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)} + } + app.txCount++ + } + } + return types.ResponseCheckBlock{Code: code.CodeTypeOK} +} +``` + +In BeginBlock, the app should restore the state to the orignal state before checking the block: + +``` +func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { + if app.serial { + app.txCount = app.originalTxCount //restore the txCount state + } + app.txCount++ + return types.ResponseDeliverTx{Code: code.CodeTypeOK} +} +``` + +The txCount is like the nonce in ethermint, it should be restored when entering the deliverTx phase. While some operation like checking the tx signature needs not to be done again. So the deliverTx can focus on how a tx can be applied, ignoring the checking of the tx, because all the checking has already been done in the checkBlock phase before. + +An optional optimization is alter the deliverTx to deliverBlock. For the block has already been checked by checkBlock, so all the txs in it are valid. So the app can cache the block, and in the deliverBlock phase, it just needs to apply the block in the cache. This optimization can save network current in deliverTx. + + + +## Status + +Rejected + +## Decision + +Performance impact is considered too great. See [#2384](https://github.com/tendermint/tendermint/issues/2384) + +## Consequences + +### Positive + +- more robust to defend the adversary to propose a block full of invalid txs. + +### Negative + +- add a new interface method. app logic needs to adjust to appeal to it. +- sending all the tx data over the ABCI twice +- potentially redundant validations (eg. signature checks in both CheckBlock and + DeliverTx) + +### Neutral diff --git a/docs/architecture/adr-030-consensus-refactor.md b/docs/architecture/adr-030-consensus-refactor.md new file mode 100644 index 00000000000..d48cfe103a3 --- /dev/null +++ b/docs/architecture/adr-030-consensus-refactor.md @@ -0,0 +1,152 @@ +# ADR 030: Consensus Refactor + +## Context + +One of the biggest challenges this project faces is to proof that the +implementations of the specifications are correct, much like we strive to +formaly verify our alogrithms and protocols we should work towards high +confidence about the correctness of our program code. One of those is the core +of Tendermint - Consensus - which currently resides in the `consensus` package. +Over time there has been high friction making changes to the package due to the +algorithm being scattered in a side-effectful container (the current +`ConsensusState`). In order to test the algorithm a large object-graph needs to +be set up and even than the non-deterministic parts of the container makes will +prevent high certainty. Where ideally we have a 1-to-1 representation of the +[spec](https://github.com/tendermint/spec), ready and easy to test for domain +experts. + +Addresses: + +- [#1495](https://github.com/tendermint/tendermint/issues/1495) +- [#1692](https://github.com/tendermint/tendermint/issues/1692) + +## Decision + +To remedy these issues we plan a gradual, non-invasive refactoring of the +`consensus` package. Starting of by isolating the consensus alogrithm into +a pure function and a finite state machine to address the most pressuring issue +of lack of confidence. Doing so while leaving the rest of the package in tact +and have follow-up optional changes to improve the sepration of concerns. + +### Implementation changes + +The core of Consensus can be modelled as a function with clear defined inputs: + +* `State` - data container for current round, height, etc. +* `Event`- significant events in the network + +producing clear outputs; + +* `State` - updated input +* `Message` - signal what actions to perform + +```go +type Event int + +const ( + EventUnknown Event = iota + EventProposal + Majority23PrevotesBlock + Majority23PrecommitBlock + Majority23PrevotesAny + Majority23PrecommitAny + TimeoutNewRound + TimeoutPropose + TimeoutPrevotes + TimeoutPrecommit +) + +type Message int + +const ( + MeesageUnknown Message = iota + MessageProposal + MessageVotes + MessageDecision +) + +type State struct { + height uint64 + round uint64 + step uint64 + lockedValue interface{} // TODO: Define proper type. + lockedRound interface{} // TODO: Define proper type. + validValue interface{} // TODO: Define proper type. + validRound interface{} // TODO: Define proper type. + // From the original notes: valid(v) + valid interface{} // TODO: Define proper type. + // From the original notes: proposer(h, r) + proposer interface{} // TODO: Define proper type. +} + +func Consensus(Event, State) (State, Message) { + // Consolidate implementation. +} +``` + +Tracking of relevant information to feed `Event` into the function and act on +the output is left to the `ConsensusExecutor` (formerly `ConsensusState`). + +Benefits for testing surfacing nicely as testing for a sequence of events +against algorithm could be as simple as the following example: + +``` go +func TestConsensusXXX(t *testing.T) { + type expected struct { + message Message + state State + } + + // Setup order of events, initial state and expectation. + var ( + events = []struct { + event Event + want expected + }{ + // ... + } + state = State{ + // ... + } + ) + + for _, e := range events { + sate, msg = Consensus(e.event, state) + + // Test message expectation. + if msg != e.want.message { + t.Fatalf("have %v, want %v", msg, e.want.message) + } + + // Test state expectation. + if !reflect.DeepEqual(state, e.want.state) { + t.Fatalf("have %v, want %v", state, e.want.state) + } + } +} +``` + +### Implementation roadmap + +* implement proposed implementation +* replace currently scattered calls in `ConsensusState` with calls to the new + `Consensus` function +* rename `ConsensusState` to `ConsensusExecutor` to avoid confusion +* propose design for improved separation and clear information flow between + `ConsensusExecutor` and `ConsensusReactor` + +## Status + +Draft. + +## Consequences + +### Positive + +- isolated implementation of the algorithm +- improved testability - simpler to proof correctness +- clearer separation of concerns - easier to reason + +### Negative + +### Neutral diff --git a/docs/architecture/adr-033-pubsub.md b/docs/architecture/adr-033-pubsub.md new file mode 100644 index 00000000000..0ef0cae6289 --- /dev/null +++ b/docs/architecture/adr-033-pubsub.md @@ -0,0 +1,122 @@ +# ADR 033: pubsub 2.0 + +Author: Anton Kaliaev (@melekes) + +## Changelog + +02-10-2018: Initial draft + +## Context + +Since the initial version of the pubsub, there's been a number of issues +raised: #951, #1879, #1880. Some of them are high-level issues questioning the +core design choices made. Others are minor and mostly about the interface of +`Subscribe()` / `Publish()` functions. + +### Sync vs Async + +Now, when publishing a message to subscribers, we can do it in a goroutine: + +_using channels for data transmission_ +```go +for each subscriber { + out := subscriber.outc + go func() { + out <- msg + } +} +``` + +_by invoking callback functions_ +```go +for each subscriber { + go subscriber.callbackFn() +} +``` + +This gives us greater performance and allows us to avoid "slow client problem" +(when other subscribers have to wait for a slow subscriber). A pool of +goroutines can be used to avoid uncontrolled memory growth. + +In certain cases, this is what you want. But in our case, because we need +strict ordering of events (if event A was published before B, the guaranteed +delivery order will be A -> B), we can't use goroutines. + +There is also a question whenever we should have a non-blocking send: + +```go +for each subscriber { + out := subscriber.outc + select { + case out <- msg: + default: + log("subscriber %v buffer is full, skipping...") + } +} +``` + +This fixes the "slow client problem", but there is no way for a slow client to +know if it had missed a message. On the other hand, if we're going to stick +with blocking send, **devs must always ensure subscriber's handling code does not +block**. As you can see, there is an implicit choice between ordering guarantees +and using goroutines. + +The interim option is to run goroutines pool for a single message, wait for all +goroutines to finish. This will solve "slow client problem", but we'd still +have to wait `max(goroutine_X_time)` before we can publish the next message. +My opinion: not worth doing. + +### Channels vs Callbacks + +Yet another question is whether we should use channels for message transmission or +call subscriber-defined callback functions. Callback functions give subscribers +more flexibility - you can use mutexes in there, channels, spawn goroutines, +anything you really want. But they also carry local scope, which can result in +memory leaks and/or memory usage increase. + +Go channels are de-facto standard for carrying data between goroutines. + +**Question: Is it worth switching to callback functions?** + +### Why `Subscribe()` accepts an `out` channel? + +Because in our tests, we create buffered channels (cap: 1). Alternatively, we +can make capacity an argument. + +## Decision + +Change Subscribe() function to return out channel: + +```go +// outCap can be used to set capacity of out channel (unbuffered by default). +Subscribe(ctx context.Context, clientID string, query Query, outCap... int) (out <-chan interface{}, err error) { +``` + +It's more idiomatic since we're closing it during Unsubscribe/UnsubscribeAll calls. + +Also, we should make tags available to subscribers: + +```go +type MsgAndTags struct { + Msg interface{} + Tags TagMap +} + +// outCap can be used to set capacity of out channel (unbuffered by default). +Subscribe(ctx context.Context, clientID string, query Query, outCap... int) (out <-chan MsgAndTags, err error) { +``` + +## Status + +In review + +## Consequences + +### Positive + +- more idiomatic interface +- subscribers know what tags msg was published with + +### Negative + +### Neutral diff --git a/docs/architecture/adr-template.md b/docs/architecture/adr-template.md index 2303490ad29..4879afc408a 100644 --- a/docs/architecture/adr-template.md +++ b/docs/architecture/adr-template.md @@ -1,12 +1,15 @@ # ADR 000: Template for an ADR +Author: + +## Changelog + ## Context ## Decision ## Status - ## Consequences ### Positive diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index 0cfc05cdfa0..00000000000 --- a/docs/conf.py +++ /dev/null @@ -1,199 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Tendermint documentation build configuration file, created by -# sphinx-quickstart on Mon Aug 7 04:55:09 2017. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) -import urllib - -import sphinx_rtd_theme - - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# - -from recommonmark.parser import CommonMarkParser - -source_parsers = { - '.md': CommonMarkParser, -} - -source_suffix = ['.rst', '.md'] -#source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Tendermint' -copyright = u'2018, The Authors' -author = u'Tendermint' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = u'' -# The full version, including alpha/beta/rc tags. -release = u'' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture', 'spec', 'examples'] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'sphinx_rtd_theme' -# html_theme = 'alabaster' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Custom sidebar templates, must be a dictionary that maps document names -# to template names. -# -# This is required for the alabaster theme -# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars -html_sidebars = { - '**': [ - 'about.html', - 'navigation.html', - 'relations.html', # needs 'show_related': True theme option to display - 'searchbox.html', - 'donate.html', - ] -} - - -# -- Options for HTMLHelp output ------------------------------------------ - -# Output file base name for HTML help builder. -htmlhelp_basename = 'Tendermintdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'Tendermint.tex', u'Tendermint Documentation', - u'The Authors', 'manual'), -] - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'Tendermint', u'Tendermint Documentation', - [author], 1) -] - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'Tendermint', u'Tendermint Documentation', - author, 'Tendermint', 'Byzantine Fault Tolerant Consensus.', - 'Database'), -] - -# ---------------- customizations ---------------------- - -# for Docker README, below -from shutil import copyfile - -# tm-bench and tm-monitor -tools_repo = "https://raw.githubusercontent.com/tendermint/tools/" -tools_branch = "master" - -tools_dir = "./tools" - - -if os.path.isdir(tools_dir) != True: - os.mkdir(tools_dir) - -copyfile('../DOCKER/README.md', tools_dir+'/docker.md') - -urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.md', filename=tools_dir+'/benchmarking.md') -urllib.urlretrieve(tools_repo+tools_branch+'/tm-monitor/README.md', filename=tools_dir+'/monitoring.md') diff --git a/docs/assets/a_plus_t.png b/docs/imgs/a_plus_t.png similarity index 100% rename from docs/assets/a_plus_t.png rename to docs/imgs/a_plus_t.png diff --git a/docs/assets/abci.png b/docs/imgs/abci.png similarity index 100% rename from docs/assets/abci.png rename to docs/imgs/abci.png diff --git a/docs/assets/consensus_logic.png b/docs/imgs/consensus_logic.png similarity index 100% rename from docs/assets/consensus_logic.png rename to docs/imgs/consensus_logic.png diff --git a/docs/assets/tm-application-example.png b/docs/imgs/tm-application-example.png similarity index 100% rename from docs/assets/tm-application-example.png rename to docs/imgs/tm-application-example.png diff --git a/docs/assets/tm-transaction-flow.png b/docs/imgs/tm-transaction-flow.png similarity index 100% rename from docs/assets/tm-transaction-flow.png rename to docs/imgs/tm-transaction-flow.png diff --git a/docs/assets/tmint-logo-blue.png b/docs/imgs/tmint-logo-blue.png similarity index 100% rename from docs/assets/tmint-logo-blue.png rename to docs/imgs/tmint-logo-blue.png diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index bafbec3543e..00000000000 --- a/docs/index.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. Tendermint documentation master file, created by - sphinx-quickstart on Mon Aug 7 04:55:09 2017. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to Tendermint! -====================== - -This location for our documentation has been deprecated, please see: - -- https://tendermint.com/docs/ - -The last version built by Read The Docs will still be available at: - -- https://tendermint.readthedocs.io/projects/tools/en/v0.21.0/ diff --git a/docs/introduction/README.md b/docs/introduction/README.md new file mode 100644 index 00000000000..7f3f97a279b --- /dev/null +++ b/docs/introduction/README.md @@ -0,0 +1,15 @@ +# Overview + +## Quick Start + +Get Tendermint up-and-running quickly with the [quick-start guide](./quick-start.md)! + +## Install + +Detailed [installation instructions](./install.md). + +## What is Tendermint? + +Dive into [what Tendermint is and why](./what-is-tendermint.md)! + + diff --git a/docs/introduction/install.md b/docs/introduction/install.md index d026911025a..f3498514c12 100644 --- a/docs/introduction/install.md +++ b/docs/introduction/install.md @@ -5,7 +5,7 @@ is to run [this script](https://github.com/tendermint/tendermint/blob/develop/sc a fresh Ubuntu instance, or [this script](https://github.com/tendermint/tendermint/blob/develop/scripts/install/install_tendermint_bsd.sh) on a fresh FreeBSD instance. Read the comments / instructions carefully (i.e., reset your terminal after running the script, -make sure your okay with the network connections being made). +make sure you are okay with the network connections being made). ## From Binary @@ -48,6 +48,15 @@ to put the binary in `./build`. The latest `tendermint version` is now installed. +## Run + +To start a one-node blockchain with a simple in-process application: + +``` +tendermint init +tendermint node --proxy_app=kvstore +``` + ## Reinstall If you already have Tendermint installed, and you make updates, simply @@ -66,11 +75,42 @@ make get_vendor_deps make install ``` -## Run +## Compile with CLevelDB support -To start a one-node blockchain with a simple in-process application: +Install [LevelDB](https://github.com/google/leveldb) (minimum version is 1.7). + +Build Tendermint with C libraries: `make build_c`. + +### Ubuntu + +Install LevelDB with snappy: ``` -tendermint init -tendermint node --proxy_app=kvstore +sudo apt-get update +sudo apt install build-essential + +sudo apt-get install libsnappy-dev + +wget https://github.com/google/leveldb/archive/v1.20.tar.gz && \ + tar -zxvf v1.20.tar.gz && \ + cd leveldb-1.20/ && \ + make && \ + cp -r out-static/lib* out-shared/lib* /usr/local/lib/ && \ + cd include/ && \ + cp -r leveldb /usr/local/include/ && \ + sudo ldconfig && \ + rm -f v1.20.tar.gz +``` + +Set database backend to cleveldb: + +``` +# config/config.toml +db_backend = "cleveldb" +``` + +To build Tendermint, run + +``` +CGO_LDFLAGS="-lsnappy" go build -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" -tags "tendermint gcc" -o build/tendermint ./cmd/tendermint/ ``` diff --git a/docs/introduction/introduction.md b/docs/introduction/introduction.md index d43fa9b2d00..f80a159cafa 100644 --- a/docs/introduction/introduction.md +++ b/docs/introduction/introduction.md @@ -1,5 +1,7 @@ # What is Tendermint? +DEPRECATED! See [What is Tendermint?](what-is-tendermint.md). + Tendermint is software for securely and consistently replicating an application on many machines. By securely, we mean that Tendermint works even if up to 1/3 of machines fail in arbitrary ways. By consistently, @@ -90,7 +92,7 @@ it can be used as a plug-and-play replacement for the consensus engines of other blockchain software. So one can take the current Ethereum code base, whether in Rust, or Go, or Haskell, and run it as a ABCI application using Tendermint consensus. Indeed, [we did that with -Ethereum](https://github.com/tendermint/ethermint). And we plan to do +Ethereum](https://github.com/cosmos/ethermint). And we plan to do the same for Bitcoin, ZCash, and various other deterministic applications as well. @@ -227,7 +229,7 @@ design their message handlers to create a blockchain that does anything useful but this architecture provides a place to start. The diagram below illustrates the flow of messages via ABCI. -![](assets/abci.png) +![](../imgs/abci.png) ## A Note on Determinism @@ -263,7 +265,7 @@ Tendermint is an easy-to-understand, mostly asynchronous, BFT consensus protocol. The protocol follows a simple state machine that looks like this: -![](assets/consensus_logic.png) +![](../imgs/consensus_logic.png) Participants in the protocol are called **validators**; they take turns proposing blocks of transactions and voting on them. Blocks are @@ -321,7 +323,7 @@ consensus protocol. This adds an economic element to the security of the protocol, allowing one to quantify the cost of violating the assumption that less than one-third of voting power is Byzantine. -The [Cosmos Network](http://cosmos.network) is designed to use this +The [Cosmos Network](https://cosmos.network) is designed to use this Proof-of-Stake mechanism across an array of cryptocurrencies implemented as ABCI applications. @@ -329,4 +331,4 @@ The following diagram is Tendermint in a (technical) nutshell. [See here for high resolution version](https://github.com/mobfoundry/hackatom/blob/master/tminfo.pdf). -![](assets/tm-transaction-flow.png) +![](../imgs/tm-transaction-flow.png) diff --git a/docs/introduction/quick-start.md b/docs/introduction/quick-start.md index 8e4908784c3..05facadf4f4 100644 --- a/docs/introduction/quick-start.md +++ b/docs/introduction/quick-start.md @@ -1,4 +1,4 @@ -# Tendermint +# Quick Start ## Overview @@ -9,42 +9,21 @@ works and want to get started right away, continue. ### Quick Install -On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/fFfOR), like so: +To quickly get Tendermint installed on a fresh +Ubuntu 16.04 machine, use [this script](https://git.io/fFfOR). + +WARNING: do not run this on your local machine. ``` curl -L https://git.io/fFfOR | bash source ~/.profile ``` -WARNING: do not run the above on your local machine. - The script is also used to facilitate cluster deployment below. ### Manual Install -Requires: - -- `go` minimum version 1.10 -- `$GOPATH` environment variable must be set -- `$GOPATH/bin` must be on your `$PATH` (see https://github.com/tendermint/tendermint/wiki/Setting-GOPATH) - -To install Tendermint, run: - -``` -go get github.com/tendermint/tendermint -cd $GOPATH/src/github.com/tendermint/tendermint -make get_tools && make get_vendor_deps -make install -``` - -Note that `go get` may return an error but it can be ignored. - -Confirm installation: - -``` -$ tendermint version -0.23.0-dev -``` +For manual installation, see the [install instructions](install.md) ## Initialization @@ -142,8 +121,6 @@ tendermint node --home ./mytestnet/node3 --proxy_app=kvstore --p2p.persistent_pe Note that after the third node is started, blocks will start to stream in because >2/3 of validators (defined in the `genesis.json`) have come online. -Seeds can also be specified in the `config.toml`. See [this -PR](https://github.com/tendermint/tendermint/pull/792) for more information -about configuration options. +Seeds can also be specified in the `config.toml`. See [here](../tendermint-core/configuration.md) for more information about configuration options. Transactions can then be sent as covered in the single, local node example above. diff --git a/docs/introduction/what-is-tendermint.md b/docs/introduction/what-is-tendermint.md new file mode 100644 index 00000000000..389bf96584c --- /dev/null +++ b/docs/introduction/what-is-tendermint.md @@ -0,0 +1,332 @@ +# What is Tendermint? + +Tendermint is software for securely and consistently replicating an +application on many machines. By securely, we mean that Tendermint works +even if up to 1/3 of machines fail in arbitrary ways. By consistently, +we mean that every non-faulty machine sees the same transaction log and +computes the same state. Secure and consistent replication is a +fundamental problem in distributed systems; it plays a critical role in +the fault tolerance of a broad range of applications, from currencies, +to elections, to infrastructure orchestration, and beyond. + +The ability to tolerate machines failing in arbitrary ways, including +becoming malicious, is known as Byzantine fault tolerance (BFT). The +theory of BFT is decades old, but software implementations have only +became popular recently, due largely to the success of "blockchain +technology" like Bitcoin and Ethereum. Blockchain technology is just a +reformalization of BFT in a more modern setting, with emphasis on +peer-to-peer networking and cryptographic authentication. The name +derives from the way transactions are batched in blocks, where each +block contains a cryptographic hash of the previous one, forming a +chain. In practice, the blockchain data structure actually optimizes BFT +design. + +Tendermint consists of two chief technical components: a blockchain +consensus engine and a generic application interface. The consensus +engine, called Tendermint Core, ensures that the same transactions are +recorded on every machine in the same order. The application interface, +called the Application BlockChain Interface (ABCI), enables the +transactions to be processed in any programming language. Unlike other +blockchain and consensus solutions, which come pre-packaged with built +in state machines (like a fancy key-value store, or a quirky scripting +language), developers can use Tendermint for BFT state machine +replication of applications written in whatever programming language and +development environment is right for them. + +Tendermint is designed to be easy-to-use, simple-to-understand, highly +performant, and useful for a wide variety of distributed applications. + +## Tendermint vs. X + +Tendermint is broadly similar to two classes of software. The first +class consists of distributed key-value stores, like Zookeeper, etcd, +and consul, which use non-BFT consensus. The second class is known as +"blockchain technology", and consists of both cryptocurrencies like +Bitcoin and Ethereum, and alternative distributed ledger designs like +Hyperledger's Burrow. + +### Zookeeper, etcd, consul + +Zookeeper, etcd, and consul are all implementations of a key-value store +atop a classical, non-BFT consensus algorithm. Zookeeper uses a version +of Paxos called Zookeeper Atomic Broadcast, while etcd and consul use +the Raft consensus algorithm, which is much younger and simpler. A +typical cluster contains 3-5 machines, and can tolerate crash failures +in up to 1/2 of the machines, but even a single Byzantine fault can +destroy the system. + +Each offering provides a slightly different implementation of a +featureful key-value store, but all are generally focused around +providing basic services to distributed systems, such as dynamic +configuration, service discovery, locking, leader-election, and so on. + +Tendermint is in essence similar software, but with two key differences: + +- It is Byzantine Fault Tolerant, meaning it can only tolerate up to a + 1/3 of failures, but those failures can include arbitrary behaviour - + including hacking and malicious attacks. - It does not specify a + particular application, like a fancy key-value store. Instead, it + focuses on arbitrary state machine replication, so developers can build + the application logic that's right for them, from key-value store to + cryptocurrency to e-voting platform and beyond. + +The layout of this Tendermint website content is also ripped directly +and without shame from [consul.io](https://www.consul.io/) and the other +[Hashicorp sites](https://www.hashicorp.com/#tools). + +### Bitcoin, Ethereum, etc. + +Tendermint emerged in the tradition of cryptocurrencies like Bitcoin, +Ethereum, etc. with the goal of providing a more efficient and secure +consensus algorithm than Bitcoin's Proof of Work. In the early days, +Tendermint had a simple currency built in, and to participate in +consensus, users had to "bond" units of the currency into a security +deposit which could be revoked if they misbehaved -this is what made +Tendermint a Proof-of-Stake algorithm. + +Since then, Tendermint has evolved to be a general purpose blockchain +consensus engine that can host arbitrary application states. That means +it can be used as a plug-and-play replacement for the consensus engines +of other blockchain software. So one can take the current Ethereum code +base, whether in Rust, or Go, or Haskell, and run it as a ABCI +application using Tendermint consensus. Indeed, [we did that with +Ethereum](https://github.com/cosmos/ethermint). And we plan to do +the same for Bitcoin, ZCash, and various other deterministic +applications as well. + +Another example of a cryptocurrency application built on Tendermint is +[the Cosmos network](http://cosmos.network). + +### Other Blockchain Projects + +[Fabric](https://github.com/hyperledger/fabric) takes a similar approach +to Tendermint, but is more opinionated about how the state is managed, +and requires that all application behaviour runs in potentially many +docker containers, modules it calls "chaincode". It uses an +implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf). +from a team at IBM that is [augmented to handle potentially +non-deterministic +chaincode](https://www.zurich.ibm.com/~cca/papers/sieve.pdf) It is +possible to implement this docker-based behaviour as a ABCI app in +Tendermint, though extending Tendermint to handle non-determinism +remains for future work. + +[Burrow](https://github.com/hyperledger/burrow) is an implementation of +the Ethereum Virtual Machine and Ethereum transaction mechanics, with +additional features for a name-registry, permissions, and native +contracts, and an alternative blockchain API. It uses Tendermint as its +consensus engine, and provides a particular application state. + +## ABCI Overview + +The [Application BlockChain Interface +(ABCI)](https://github.com/tendermint/tendermint/tree/develop/abci) +allows for Byzantine Fault Tolerant replication of applications +written in any programming language. + +### Motivation + +Thus far, all blockchains "stacks" (such as +[Bitcoin](https://github.com/bitcoin/bitcoin)) have had a monolithic +design. That is, each blockchain stack is a single program that handles +all the concerns of a decentralized ledger; this includes P2P +connectivity, the "mempool" broadcasting of transactions, consensus on +the most recent block, account balances, Turing-complete contracts, +user-level permissions, etc. + +Using a monolithic architecture is typically bad practice in computer +science. It makes it difficult to reuse components of the code, and +attempts to do so result in complex maintenance procedures for forks of +the codebase. This is especially true when the codebase is not modular +in design and suffers from "spaghetti code". + +Another problem with monolithic design is that it limits you to the +language of the blockchain stack (or vice versa). In the case of +Ethereum which supports a Turing-complete bytecode virtual-machine, it +limits you to languages that compile down to that bytecode; today, those +are Serpent and Solidity. + +In contrast, our approach is to decouple the consensus engine and P2P +layers from the details of the application state of the particular +blockchain application. We do this by abstracting away the details of +the application to an interface, which is implemented as a socket +protocol. + +Thus we have an interface, the Application BlockChain Interface (ABCI), +and its primary implementation, the Tendermint Socket Protocol (TSP, or +Teaspoon). + +### Intro to ABCI + +[Tendermint Core](https://github.com/tendermint/tendermint) (the +"consensus engine") communicates with the application via a socket +protocol that satisfies the ABCI. + +To draw an analogy, lets talk about a well-known cryptocurrency, +Bitcoin. Bitcoin is a cryptocurrency blockchain where each node +maintains a fully audited Unspent Transaction Output (UTXO) database. If +one wanted to create a Bitcoin-like system on top of ABCI, Tendermint +Core would be responsible for + +- Sharing blocks and transactions between nodes +- Establishing a canonical/immutable order of transactions + (the blockchain) + +The application will be responsible for + +- Maintaining the UTXO database +- Validating cryptographic signatures of transactions +- Preventing transactions from spending non-existent transactions +- Allowing clients to query the UTXO database. + +Tendermint is able to decompose the blockchain design by offering a very +simple API (ie. the ABCI) between the application process and consensus +process. + +The ABCI consists of 3 primary message types that get delivered from the +core to the application. The application replies with corresponding +response messages. + +The messages are specified here: [ABCI Message +Types](https://github.com/tendermint/tendermint/blob/develop/abci/README.md#message-types). + +The **DeliverTx** message is the work horse of the application. Each +transaction in the blockchain is delivered with this message. The +application needs to validate each transaction received with the +**DeliverTx** message against the current state, application protocol, +and the cryptographic credentials of the transaction. A validated +transaction then needs to update the application state — by binding a +value into a key values store, or by updating the UTXO database, for +instance. + +The **CheckTx** message is similar to **DeliverTx**, but it's only for +validating transactions. Tendermint Core's mempool first checks the +validity of a transaction with **CheckTx**, and only relays valid +transactions to its peers. For instance, an application may check an +incrementing sequence number in the transaction and return an error upon +**CheckTx** if the sequence number is old. Alternatively, they might use +a capabilities based system that requires capabilities to be renewed +with every transaction. + +The **Commit** message is used to compute a cryptographic commitment to +the current application state, to be placed into the next block header. +This has some handy properties. Inconsistencies in updating that state +will now appear as blockchain forks which catches a whole class of +programming errors. This also simplifies the development of secure +lightweight clients, as Merkle-hash proofs can be verified by checking +against the block hash, and that the block hash is signed by a quorum. + +There can be multiple ABCI socket connections to an application. +Tendermint Core creates three ABCI connections to the application; one +for the validation of transactions when broadcasting in the mempool, one +for the consensus engine to run block proposals, and one more for +querying the application state. + +It's probably evident that applications designers need to very carefully +design their message handlers to create a blockchain that does anything +useful but this architecture provides a place to start. The diagram +below illustrates the flow of messages via ABCI. + +![](../imgs/abci.png) + +## A Note on Determinism + +The logic for blockchain transaction processing must be deterministic. +If the application logic weren't deterministic, consensus would not be +reached among the Tendermint Core replica nodes. + +Solidity on Ethereum is a great language of choice for blockchain +applications because, among other reasons, it is a completely +deterministic programming language. However, it's also possible to +create deterministic applications using existing popular languages like +Java, C++, Python, or Go. Game programmers and blockchain developers are +already familiar with creating deterministic programs by avoiding +sources of non-determinism such as: + +- random number generators (without deterministic seeding) +- race conditions on threads (or avoiding threads altogether) +- the system clock +- uninitialized memory (in unsafe programming languages like C + or C++) +- [floating point + arithmetic](http://gafferongames.com/networking-for-game-programmers/floating-point-determinism/) +- language features that are random (e.g. map iteration in Go) + +While programmers can avoid non-determinism by being careful, it is also +possible to create a special linter or static analyzer for each language +to check for determinism. In the future we may work with partners to +create such tools. + +## Consensus Overview + +Tendermint is an easy-to-understand, mostly asynchronous, BFT consensus +protocol. The protocol follows a simple state machine that looks like +this: + +![](../imgs/consensus_logic.png) + +Participants in the protocol are called **validators**; they take turns +proposing blocks of transactions and voting on them. Blocks are +committed in a chain, with one block at each **height**. A block may +fail to be committed, in which case the protocol moves to the next +**round**, and a new validator gets to propose a block for that height. +Two stages of voting are required to successfully commit a block; we +call them **pre-vote** and **pre-commit**. A block is committed when +more than 2/3 of validators pre-commit for the same block in the same +round. + +There is a picture of a couple doing the polka because validators are +doing something like a polka dance. When more than two-thirds of the +validators pre-vote for the same block, we call that a **polka**. Every +pre-commit must be justified by a polka in the same round. + +Validators may fail to commit a block for a number of reasons; the +current proposer may be offline, or the network may be slow. Tendermint +allows them to establish that a validator should be skipped. Validators +wait a small amount of time to receive a complete proposal block from +the proposer before voting to move to the next round. This reliance on a +timeout is what makes Tendermint a weakly synchronous protocol, rather +than an asynchronous one. However, the rest of the protocol is +asynchronous, and validators only make progress after hearing from more +than two-thirds of the validator set. A simplifying element of +Tendermint is that it uses the same mechanism to commit a block as it +does to skip to the next round. + +Assuming less than one-third of the validators are Byzantine, Tendermint +guarantees that safety will never be violated - that is, validators will +never commit conflicting blocks at the same height. To do this it +introduces a few **locking** rules which modulate which paths can be +followed in the flow diagram. Once a validator precommits a block, it is +locked on that block. Then, + +1. it must prevote for the block it is locked on +2. it can only unlock, and precommit for a new block, if there is a + polka for that block in a later round + +## Stake + +In many systems, not all validators will have the same "weight" in the +consensus protocol. Thus, we are not so much interested in one-third or +two-thirds of the validators, but in those proportions of the total +voting power, which may not be uniformly distributed across individual +validators. + +Since Tendermint can replicate arbitrary applications, it is possible to +define a currency, and denominate the voting power in that currency. +When voting power is denominated in a native currency, the system is +often referred to as Proof-of-Stake. Validators can be forced, by logic +in the application, to "bond" their currency holdings in a security +deposit that can be destroyed if they're found to misbehave in the +consensus protocol. This adds an economic element to the security of the +protocol, allowing one to quantify the cost of violating the assumption +that less than one-third of voting power is Byzantine. + +The [Cosmos Network](https://cosmos.network) is designed to use this +Proof-of-Stake mechanism across an array of cryptocurrencies implemented +as ABCI applications. + +The following diagram is Tendermint in a (technical) nutshell. [See here +for high resolution +version](https://github.com/mobfoundry/hackatom/blob/master/tminfo.pdf). + +![](../imgs/tm-transaction-flow.png) diff --git a/docs/networks/README.md b/docs/networks/README.md new file mode 100644 index 00000000000..aa53afb08b4 --- /dev/null +++ b/docs/networks/README.md @@ -0,0 +1,9 @@ +# Overview + +Use [Docker Compose](./docker-compose.md) to spin up Tendermint testnets on your +local machine. + +Use [Terraform and Ansible](./terraform-and-ansible.md) to deploy Tendermint +testnets to the cloud. + +See the `tendermint testnet --help` command for more help initializing testnets. diff --git a/docs/networks/deploy-testnets.md b/docs/networks/deploy-testnets.md index 88e5c6f7250..21c346103c4 100644 --- a/docs/networks/deploy-testnets.md +++ b/docs/networks/deploy-testnets.md @@ -1,8 +1,8 @@ # Deploy a Testnet -Now that we've seen how ABCI works, and even played with a few -applications on a single validator node, it's time to deploy a test -network to four validator nodes. +DEPRECATED DOCS! + +See [Networks](../networks). ## Manual Deployments @@ -21,22 +21,21 @@ Here are the steps to setting up a testnet manually: 3. Generate a private key and a node key for each validator using `tendermint init` 4. Compile a list of public keys for each validator into a - `genesis.json` file and replace the existing file with it. -5. Run - `tendermint node --proxy_app=kvstore --p2p.persistent_peers=< peer addresses >` on each node, where `< peer addresses >` is a comma separated - list of the ID@IP:PORT combination for each node. The default port for - Tendermint is `26656`. The ID of a node can be obtained by running - `tendermint show_node_id` command. Thus, if the IP addresses of your nodes - were `192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4`, the command - would look like: + new `genesis.json` file and replace the existing file with it. +5. Get the node IDs of any peers you want other peers to connect to by + running `tendermint show_node_id` on the relevant machine +6. Set the `p2p.persistent_peers` in the config for all nodes to the comma + separated list of `ID@IP:PORT` for all nodes. Default port is 26656. + +Then start the node ``` -tendermint node --proxy_app=kvstore --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:26656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:26656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:26656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:26656 +tendermint node --proxy_app=kvstore ``` After a few seconds, all the nodes should connect to each other and start making blocks! For more information, see the Tendermint Networks -section of [the guide to using Tendermint](./using-tendermint.md). +section of [the guide to using Tendermint](../tendermint-core/using-tendermint.md). But wait! Steps 3, 4 and 5 are quite manual. Instead, use the `tendermint testnet` command. By default, running `tendermint testnet` will create all the required files, but it won't populate the list of persistent peers. It will do @@ -67,8 +66,18 @@ make localnet-start ``` from the root of the tendermint repository. This will spin up a 4-node -local testnet. Review the target in the Makefile to debug any problems. +local testnet. Note that this command expects a linux binary in the build directory. +If you built the binary using a non-linux OS, you may see +the error `Binary needs to be OS linux, ARCH amd64`, in which case you can +run: + +``` +make build-linux +make localnet-start +``` + +Review the target in the Makefile to debug any problems. ### Cloud -See the [next section](./terraform-and-ansible.html) for details. +See the [next section](./terraform-and-ansible.md) for details. diff --git a/docs/networks/docker-compose.md b/docs/networks/docker-compose.md new file mode 100644 index 00000000000..a1924eb9d9a --- /dev/null +++ b/docs/networks/docker-compose.md @@ -0,0 +1,85 @@ +# Docker Compose + +With Docker Compose, we can spin up local testnets in a single command: + +``` +make localnet-start +``` + +## Requirements + +- [Install tendermint](/docs/install.md) +- [Install docker](https://docs.docker.com/engine/installation/) +- [Install docker-compose](https://docs.docker.com/compose/install/) + +## Build + +Build the `tendermint` binary and the `tendermint/localnode` docker image. + +Note the binary will be mounted into the container so it can be updated without +rebuilding the image. + +``` +cd $GOPATH/src/github.com/tendermint/tendermint + +# Build the linux binary in ./build +make build-linux + +# Build tendermint/localnode image +make build-docker-localnode +``` + + +## Run a testnet + +To start a 4 node testnet run: + +``` +make localnet-start +``` + +The nodes bind their RPC servers to ports 26657, 26660, 26662, and 26664 on the host. +This file creates a 4-node network using the localnode image. +The nodes of the network expose their P2P and RPC endpoints to the host machine on ports 26656-26657, 26659-26660, 26661-26662, and 26663-26664 respectively. + +To update the binary, just rebuild it and restart the nodes: + +``` +make build-linux +make localnet-stop +make localnet-start +``` + +## Configuration + +The `make localnet-start` creates files for a 4-node testnet in `./build` by calling the `tendermint testnet` command. + +The `./build` directory is mounted to the `/tendermint` mount point to attach the binary and config files to the container. + +For instance, to create a single node testnet: + +``` +cd $GOPATH/src/github.com/tendermint/tendermint + +# Clear the build folder +rm -rf ./build + +# Build binary +make build-linux + +# Create configuration +docker run -e LOG="stdout" -v `pwd`/build:/tendermint tendermint/localnode testnet --o . --v 1 + +#Run the node +docker run -v `pwd`/build:/tendermint tendermint/localnode + +``` + +## Logging + +Log is saved under the attached volume, in the `tendermint.log` file. If the `LOG` environment variable is set to `stdout` at start, the log is not saved, but printed on the screen. + +## Special binaries + +If you have multiple binaries with different names, you can specify which one to run with the BINARY environment variable. The path of the binary is relative to the attached volume. + diff --git a/docs/networks/terraform-and-ansible.md b/docs/networks/terraform-and-ansible.md index 5a4b9c53b44..c08ade17acd 100644 --- a/docs/networks/terraform-and-ansible.md +++ b/docs/networks/terraform-and-ansible.md @@ -29,7 +29,7 @@ export SSH_KEY_FILE="$HOME/.ssh/id_rsa.pub" These will be used by both `terraform` and `ansible`. -### Terraform +## Terraform This step will create four Digital Ocean droplets. First, go to the correct directory: @@ -49,7 +49,7 @@ and you will get a list of IP addresses that belong to your droplets. With the droplets created and running, let's setup Ansible. -### Ansible +## Ansible The playbooks in [the ansible directory](https://github.com/tendermint/tendermint/tree/master/networks/remote/ansible) @@ -144,7 +144,7 @@ Peek at the logs with the status role: ansible-playbook -i inventory/digital_ocean.py -l sentrynet status.yml ``` -### Logging +## Logging The crudest way is the status role described above. You can also ship logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana) @@ -160,7 +160,7 @@ go get github.com/mheese/journalbeat ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 ``` -### Cleanup +## Cleanup To remove your droplets, run: diff --git a/docs/package-lock.json b/docs/package-lock.json new file mode 100644 index 00000000000..3449eda1a21 --- /dev/null +++ b/docs/package-lock.json @@ -0,0 +1,4670 @@ +{ + "name": "tendermint", + "version": "0.0.1", + "lockfileVersion": 1, + "requires": true, + "dependencies": { + "@azu/format-text": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@azu/format-text/-/format-text-1.0.1.tgz", + "integrity": "sha1-aWc1CpRkD2sChVFpvYl85U1s6+I=" + }, + "@azu/style-format": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/@azu/style-format/-/style-format-1.0.0.tgz", + "integrity": "sha1-5wGH+Khi4ZGxvObAJo8TrNOlayA=", + "requires": { + "@azu/format-text": "^1.0.1" + } + }, + "@sindresorhus/is": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.7.0.tgz", + "integrity": "sha512-ONhaKPIufzzrlNbqtWFFd+jlnemX6lJAgq9ZeiZtS7I1PIf/la7CW4m83rTXRnVnsMbW2k56pGYu7AUFJD9Pow==" + }, + "@textlint/ast-node-types": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/@textlint/ast-node-types/-/ast-node-types-4.0.3.tgz", + "integrity": "sha512-mkkqbuxZkCESmMCrVN5QEgmFqBJAcoAGIaZaQfziqKAyCQBLLgKVJzeFuup9mDm9mvCTKekhLk9yIaEFc8EFxA==" + }, + "@textlint/ast-traverse": { + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/@textlint/ast-traverse/-/ast-traverse-2.0.9.tgz", + "integrity": "sha512-E2neVj65wyadt3hr9R+DHW01dG4dNOMmFRab7Bph/rkDDeK85w/6RNJgIt9vBCPtt7a4bndTj1oZrK6wDZAEtQ==", + "requires": { + "@textlint/ast-node-types": "^4.0.3" + } + }, + "@textlint/feature-flag": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@textlint/feature-flag/-/feature-flag-3.0.5.tgz", + "integrity": "sha512-hXTDGvltgiUtJs7QhALSILNE+g0cdY4CyqHR2r5+EmiYbS3NuqWVLn3GZYUPWXl9rVDky/IpR+6DF0uLJF8m8Q==", + "requires": { + "map-like": "^2.0.0" + } + }, + "@textlint/fixer-formatter": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@textlint/fixer-formatter/-/fixer-formatter-3.0.8.tgz", + "integrity": "sha512-LTHcCLTyESdz90NGYzrYC0juSqLzGBc5VMMRO8Xvz3fapBya/Sn5ncgvsHqnKY0OIbV/IdOT54G2F46D8R6P9Q==", + "requires": { + "@textlint/kernel": "^3.0.0", + "chalk": "^1.1.3", + "debug": "^2.1.0", + "diff": "^2.2.2", + "interop-require": "^1.0.0", + "is-file": "^1.0.0", + "string-width": "^1.0.1", + "text-table": "^0.2.0", + "try-resolve": "^1.0.1" + }, + "dependencies": { + "@textlint/kernel": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@textlint/kernel/-/kernel-3.0.0.tgz", + "integrity": "sha512-SxHWr6VAD/SdqTCy1uB03bFLbGYbhZeQTeUuIJE6s1pD7wtQ1+Y1n8nx9I9m7nqGZi5eYuVA6WnpvCq10USz+w==", + "requires": { + "@textlint/ast-node-types": "^4.0.3", + "@textlint/ast-traverse": "^2.0.9", + "@textlint/feature-flag": "^3.0.5", + "@types/bluebird": "^3.5.18", + "bluebird": "^3.5.1", + "debug": "^2.6.6", + "deep-equal": "^1.0.1", + "map-like": "^2.0.0", + "object-assign": "^4.1.1", + "structured-source": "^3.0.2" + } + }, + "ansi-styles": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", + "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=" + }, + "chalk": { + "version": "1.1.3", + "resolved": "http://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", + "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", + "requires": { + "ansi-styles": "^2.2.1", + "escape-string-regexp": "^1.0.2", + "has-ansi": "^2.0.0", + "strip-ansi": "^3.0.0", + "supports-color": "^2.0.0" + } + }, + "supports-color": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", + "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=" + } + } + }, + "@textlint/kernel": { + "version": "2.0.9", + "resolved": "https://registry.npmjs.org/@textlint/kernel/-/kernel-2.0.9.tgz", + "integrity": "sha512-0237/9yDIlSVaH0pcVAxm0rV1xF96UpjXUXoBRdciWnf2+O0tWQEeBC9B2/B2jLw9Ha0zGlK+q+bLREpXB97Cw==", + "requires": { + "@textlint/ast-node-types": "^4.0.2", + "@textlint/ast-traverse": "^2.0.8", + "@textlint/feature-flag": "^3.0.4", + "@types/bluebird": "^3.5.18", + "bluebird": "^3.5.1", + "debug": "^2.6.6", + "deep-equal": "^1.0.1", + "object-assign": "^4.1.1", + "structured-source": "^3.0.2" + } + }, + "@textlint/linter-formatter": { + "version": "3.0.8", + "resolved": "https://registry.npmjs.org/@textlint/linter-formatter/-/linter-formatter-3.0.8.tgz", + "integrity": "sha512-hayZi4ybj01Km9Soi34cT8EkmEcqGgQKHu1tvPQVd8S2zaE3m/8nmf6qhwAo/HAwMzbIj0XxdV8nVuiUfz8ADQ==", + "requires": { + "@azu/format-text": "^1.0.1", + "@azu/style-format": "^1.0.0", + "@textlint/kernel": "^3.0.0", + "chalk": "^1.0.0", + "concat-stream": "^1.5.1", + "js-yaml": "^3.2.4", + "optionator": "^0.8.1", + "pluralize": "^2.0.0", + "string-width": "^1.0.1", + "string.prototype.padstart": "^3.0.0", + "strip-ansi": "^3.0.1", + "table": "^3.7.8", + "text-table": "^0.2.0", + "try-resolve": "^1.0.1", + "xml-escape": "^1.0.0" + }, + "dependencies": { + "@textlint/kernel": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@textlint/kernel/-/kernel-3.0.0.tgz", + "integrity": "sha512-SxHWr6VAD/SdqTCy1uB03bFLbGYbhZeQTeUuIJE6s1pD7wtQ1+Y1n8nx9I9m7nqGZi5eYuVA6WnpvCq10USz+w==", + "requires": { + "@textlint/ast-node-types": "^4.0.3", + "@textlint/ast-traverse": "^2.0.9", + "@textlint/feature-flag": "^3.0.5", + "@types/bluebird": "^3.5.18", + "bluebird": "^3.5.1", + "debug": "^2.6.6", + "deep-equal": "^1.0.1", + "map-like": "^2.0.0", + "object-assign": "^4.1.1", + "structured-source": "^3.0.2" + } + }, + "ansi-styles": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", + "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=" + }, + "chalk": { + "version": "1.1.3", + "resolved": "http://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", + "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", + "requires": { + "ansi-styles": "^2.2.1", + "escape-string-regexp": "^1.0.2", + "has-ansi": "^2.0.0", + "strip-ansi": "^3.0.0", + "supports-color": "^2.0.0" + } + }, + "supports-color": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", + "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=" + } + } + }, + "@textlint/markdown-to-ast": { + "version": "6.0.9", + "resolved": "https://registry.npmjs.org/@textlint/markdown-to-ast/-/markdown-to-ast-6.0.9.tgz", + "integrity": "sha512-hfAWBvTeUGh5t5kTn2U3uP3qOSM1BSrxzl1jF3nn0ywfZXpRBZr5yRjXnl4DzIYawCtZOshmRi/tI3/x4TE1jQ==", + "requires": { + "@textlint/ast-node-types": "^4.0.3", + "debug": "^2.1.3", + "remark-frontmatter": "^1.2.0", + "remark-parse": "^5.0.0", + "structured-source": "^3.0.2", + "traverse": "^0.6.6", + "unified": "^6.1.6" + } + }, + "@textlint/text-to-ast": { + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/@textlint/text-to-ast/-/text-to-ast-3.0.9.tgz", + "integrity": "sha512-0Vycl2XtGv3pUtUNkBn9M/e3jBAtmlh7STUa3GuiyATXg49PsqqX7c8NxGPrNqMvDYCJ3ZubBx8GSEyra6ZWFw==", + "requires": { + "@textlint/ast-node-types": "^4.0.3" + } + }, + "@textlint/textlint-plugin-markdown": { + "version": "4.0.10", + "resolved": "https://registry.npmjs.org/@textlint/textlint-plugin-markdown/-/textlint-plugin-markdown-4.0.10.tgz", + "integrity": "sha512-HIV2UAhjnt9/tJQbuXkrD3CRiEFRtNpYoQEZCNCwd1nBMWUypAFthL9jT1KJ8tagOF7wEiGMB19QfDxiNQ+6mw==", + "requires": { + "@textlint/markdown-to-ast": "^6.0.8" + } + }, + "@textlint/textlint-plugin-text": { + "version": "3.0.10", + "resolved": "https://registry.npmjs.org/@textlint/textlint-plugin-text/-/textlint-plugin-text-3.0.10.tgz", + "integrity": "sha512-GSw9vsuKt7E85jDSFEXT0VYZo4C3e8XFFrSWYqXlwPKl/oQ/WHQfMg7GM288uGoEaMzbKEfBtpdwdZqTjGHOQA==", + "requires": { + "@textlint/text-to-ast": "^3.0.8" + } + }, + "@types/bluebird": { + "version": "3.5.24", + "resolved": "https://registry.npmjs.org/@types/bluebird/-/bluebird-3.5.24.tgz", + "integrity": "sha512-YeQoDpq4Lm8ppSBqAnAeF/xy1cYp/dMTif2JFcvmAbETMRlvKHT2iLcWu+WyYiJO3b3Ivokwo7EQca/xfLVJmg==" + }, + "adverb-where": { + "version": "0.0.9", + "resolved": "https://registry.npmjs.org/adverb-where/-/adverb-where-0.0.9.tgz", + "integrity": "sha1-CcXN3Y1QO5/l924LjcXHCo8ZPjQ=" + }, + "aggregate-error": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-1.0.0.tgz", + "integrity": "sha1-iINE2tAiCnLjr1CQYRf0h3GSX6w=", + "requires": { + "clean-stack": "^1.0.0", + "indent-string": "^3.0.0" + } + }, + "ajv": { + "version": "5.5.2", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-5.5.2.tgz", + "integrity": "sha1-c7Xuyj+rZT49P5Qis0GtQiBdyWU=", + "requires": { + "co": "^4.6.0", + "fast-deep-equal": "^1.0.0", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.3.0" + } + }, + "ajv-keywords": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-1.5.1.tgz", + "integrity": "sha1-MU3QpLM2j609/NxU7eYXG4htrzw=" + }, + "ansi-regex": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", + "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=" + }, + "ansi-styles": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", + "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", + "requires": { + "color-convert": "^1.9.0" + } + }, + "anymatch": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-1.3.2.tgz", + "integrity": "sha512-0XNayC8lTHQ2OI8aljNCN3sSx6hsr/1+rlcDAotXJR7C1oZZHCNsfpbKwMjRA3Uqb5tF1Rae2oloTr4xpq+WjA==", + "requires": { + "micromatch": "^2.1.5", + "normalize-path": "^2.0.0" + } + }, + "argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "requires": { + "sprintf-js": "~1.0.2" + } + }, + "arr-diff": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-2.0.0.tgz", + "integrity": "sha1-jzuCf5Vai9ZpaX5KQlasPOrjVs8=", + "requires": { + "arr-flatten": "^1.0.1" + } + }, + "arr-flatten": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz", + "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==" + }, + "arr-union": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz", + "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=" + }, + "array-union": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz", + "integrity": "sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk=", + "requires": { + "array-uniq": "^1.0.1" + } + }, + "array-uniq": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/array-uniq/-/array-uniq-1.0.3.tgz", + "integrity": "sha1-r2rId6Jcx/dOBYiUdThY39sk/bY=" + }, + "array-unique": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.2.1.tgz", + "integrity": "sha1-odl8yvy8JiXMcPrc6zalDFiwGlM=" + }, + "arrify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz", + "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=" + }, + "asn1": { + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", + "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", + "requires": { + "safer-buffer": "~2.1.0" + } + }, + "assert-plus": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", + "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=" + }, + "assign-symbols": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz", + "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=" + }, + "async-each": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/async-each/-/async-each-1.0.1.tgz", + "integrity": "sha1-GdOGodntxufByF04iu28xW0zYC0=" + }, + "asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=" + }, + "atob": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz", + "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==" + }, + "aws-sign2": { + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", + "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=" + }, + "aws4": { + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.8.0.tgz", + "integrity": "sha512-ReZxvNHIOv88FlT7rxcXIIC0fPt4KZqZbOlivyWtXLt8ESx84zd3kMC6iK5jVeS2qt+g7ftS7ye4fi06X5rtRQ==" + }, + "bail": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/bail/-/bail-1.0.3.tgz", + "integrity": "sha512-1X8CnjFVQ+a+KW36uBNMTU5s8+v5FzeqrP7hTG5aTb4aPreSbZJlhwPon9VKMuEVgV++JM+SQrALY3kr7eswdg==" + }, + "balanced-match": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", + "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=" + }, + "base": { + "version": "0.11.2", + "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz", + "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==", + "requires": { + "cache-base": "^1.0.1", + "class-utils": "^0.3.5", + "component-emitter": "^1.2.1", + "define-property": "^1.0.0", + "isobject": "^3.0.1", + "mixin-deep": "^1.2.0", + "pascalcase": "^0.1.1" + }, + "dependencies": { + "define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "requires": { + "is-descriptor": "^1.0.0" + } + }, + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + }, + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" + }, + "kind-of": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", + "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + } + } + }, + "bcrypt-pbkdf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", + "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", + "optional": true, + "requires": { + "tweetnacl": "^0.14.3" + } + }, + "binary-extensions": { + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-1.12.0.tgz", + "integrity": "sha512-DYWGk01lDcxeS/K9IHPGWfT8PsJmbXRtRd2Sx72Tnb8pcYZQFF1oSDb8hJtS1vhp212q1Rzi5dUf9+nq0o9UIg==" + }, + "bluebird": { + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.5.2.tgz", + "integrity": "sha512-dhHTWMI7kMx5whMQntl7Vr9C6BvV10lFXDAasnqnrMYhXVCzzk6IO9Fo2L75jXHT07WrOngL1WDXOp+yYS91Yg==" + }, + "boundary": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/boundary/-/boundary-1.0.1.tgz", + "integrity": "sha1-TWfcJgLAzBbdm85+v4fpSCkPWBI=" + }, + "brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "braces": { + "version": "1.8.5", + "resolved": "https://registry.npmjs.org/braces/-/braces-1.8.5.tgz", + "integrity": "sha1-uneWLhLf+WnWt2cR6RS3N4V79qc=", + "requires": { + "expand-range": "^1.8.1", + "preserve": "^0.2.0", + "repeat-element": "^1.1.2" + } + }, + "buffer-from": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", + "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==" + }, + "builtin-modules": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-1.1.1.tgz", + "integrity": "sha1-Jw8HbFpywC9bZaR9+Uxf46J4iS8=" + }, + "cache-base": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz", + "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==", + "requires": { + "collection-visit": "^1.0.0", + "component-emitter": "^1.2.1", + "get-value": "^2.0.6", + "has-value": "^1.0.0", + "isobject": "^3.0.1", + "set-value": "^2.0.0", + "to-object-path": "^0.3.0", + "union-value": "^1.0.0", + "unset-value": "^1.0.0" + }, + "dependencies": { + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" + } + } + }, + "cacheable-request": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-2.1.4.tgz", + "integrity": "sha1-DYCIAbY0KtM8kd+dC0TcCbkeXD0=", + "requires": { + "clone-response": "1.0.2", + "get-stream": "3.0.0", + "http-cache-semantics": "3.8.1", + "keyv": "3.0.0", + "lowercase-keys": "1.0.0", + "normalize-url": "2.0.1", + "responselike": "1.0.2" + }, + "dependencies": { + "lowercase-keys": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.0.tgz", + "integrity": "sha1-TjNms55/VFfjXxMkvfb4jQv8cwY=" + } + } + }, + "camelcase": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-4.1.0.tgz", + "integrity": "sha1-1UVjW+HjPFQmScaRc+Xeas+uNN0=" + }, + "capture-stack-trace": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/capture-stack-trace/-/capture-stack-trace-1.0.1.tgz", + "integrity": "sha512-mYQLZnx5Qt1JgB1WEiMCf2647plpGeQ2NMR/5L0HNZzGQo4fuSPnK+wjfPnKZV0aiJDgzmWqqkV/g7JD+DW0qw==" + }, + "caseless": { + "version": "0.12.0", + "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", + "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=" + }, + "ccount": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/ccount/-/ccount-1.0.3.tgz", + "integrity": "sha512-Jt9tIBkRc9POUof7QA/VwWd+58fKkEEfI+/t1/eOlxKM7ZhrczNzMFefge7Ai+39y1pR/pP6cI19guHy3FSLmw==" + }, + "chalk": { + "version": "2.4.1", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz", + "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==", + "requires": { + "ansi-styles": "^3.2.1", + "escape-string-regexp": "^1.0.5", + "supports-color": "^5.3.0" + } + }, + "character-entities": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.2.tgz", + "integrity": "sha512-sMoHX6/nBiy3KKfC78dnEalnpn0Az0oSNvqUWYTtYrhRI5iUIYsROU48G+E+kMFQzqXaJ8kHJZ85n7y6/PHgwQ==" + }, + "character-entities-html4": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/character-entities-html4/-/character-entities-html4-1.1.2.tgz", + "integrity": "sha512-sIrXwyna2+5b0eB9W149izTPJk/KkJTg6mEzDGibwBUkyH1SbDa+nf515Ppdi3MaH35lW0JFJDWeq9Luzes1Iw==" + }, + "character-entities-legacy": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.2.tgz", + "integrity": "sha512-9NB2VbXtXYWdXzqrvAHykE/f0QJxzaKIpZ5QzNZrrgQ7Iyxr2vnfS8fCBNVW9nUEZE0lo57nxKRqnzY/dKrwlA==" + }, + "character-reference-invalid": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.2.tgz", + "integrity": "sha512-7I/xceXfKyUJmSAn/jw8ve/9DyOP7XxufNYLI9Px7CmsKgEUaZLUTax6nZxGQtaoiZCjpu6cHPj20xC/vqRReQ==" + }, + "charenc": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/charenc/-/charenc-0.0.2.tgz", + "integrity": "sha1-wKHS86cJLgN3S/qD8UwPxXkKhmc=" + }, + "chokidar": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-1.7.0.tgz", + "integrity": "sha1-eY5ol3gVHIB2tLNg5e3SjNortGg=", + "requires": { + "anymatch": "^1.3.0", + "async-each": "^1.0.0", + "fsevents": "^1.0.0", + "glob-parent": "^2.0.0", + "inherits": "^2.0.1", + "is-binary-path": "^1.0.0", + "is-glob": "^2.0.0", + "path-is-absolute": "^1.0.0", + "readdirp": "^2.0.0" + } + }, + "circular-json": { + "version": "0.3.3", + "resolved": "https://registry.npmjs.org/circular-json/-/circular-json-0.3.3.tgz", + "integrity": "sha512-UZK3NBx2Mca+b5LsG7bY183pHWt5Y1xts4P3Pz7ENTwGVnJOUWbRb3ocjvX7hx9tq/yTAdclXm9sZ38gNuem4A==" + }, + "class-utils": { + "version": "0.3.6", + "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz", + "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==", + "requires": { + "arr-union": "^3.1.0", + "define-property": "^0.2.5", + "isobject": "^3.0.0", + "static-extend": "^0.1.1" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "requires": { + "is-descriptor": "^0.1.0" + } + }, + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" + } + } + }, + "clean-stack": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-1.3.0.tgz", + "integrity": "sha1-noIVAa6XmYbEax1m0tQy2y/UrjE=" + }, + "clone-response": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.2.tgz", + "integrity": "sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws=", + "requires": { + "mimic-response": "^1.0.0" + } + }, + "co": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz", + "integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=" + }, + "code-point-at": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", + "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=" + }, + "collapse-white-space": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-1.0.4.tgz", + "integrity": "sha512-YfQ1tAUZm561vpYD+5eyWN8+UsceQbSrqqlc/6zDY2gtAE+uZLSdkkovhnGpmCThsvKBFakq4EdY/FF93E8XIw==" + }, + "collection-visit": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz", + "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=", + "requires": { + "map-visit": "^1.0.0", + "object-visit": "^1.0.0" + } + }, + "color-convert": { + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", + "requires": { + "color-name": "1.1.3" + } + }, + "color-name": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=" + }, + "combined-stream": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.7.tgz", + "integrity": "sha512-brWl9y6vOB1xYPZcpZde3N9zDByXTosAeMDo4p1wzo6UMOX4vumB+TP1RZ76sfE6Md68Q0NJSrE/gbezd4Ul+w==", + "requires": { + "delayed-stream": "~1.0.0" + } + }, + "component-emitter": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz", + "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY=" + }, + "concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=" + }, + "concat-stream": { + "version": "1.6.2", + "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", + "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", + "requires": { + "buffer-from": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^2.2.2", + "typedarray": "^0.0.6" + } + }, + "copy-descriptor": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz", + "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=" + }, + "core-util-is": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", + "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=" + }, + "create-error-class": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/create-error-class/-/create-error-class-3.0.2.tgz", + "integrity": "sha1-Br56vvlHo/FKMP1hBnHUAbyot7Y=", + "requires": { + "capture-stack-trace": "^1.0.0" + } + }, + "crypt": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/crypt/-/crypt-0.0.2.tgz", + "integrity": "sha1-iNf/fsDfuG9xPch7u0LQRNPmxBs=" + }, + "dashdash": { + "version": "1.14.1", + "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", + "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", + "requires": { + "assert-plus": "^1.0.0" + } + }, + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + } + }, + "decode-uri-component": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz", + "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=" + }, + "decompress-response": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz", + "integrity": "sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M=", + "requires": { + "mimic-response": "^1.0.0" + } + }, + "deep-equal": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-1.0.1.tgz", + "integrity": "sha1-9dJgKStmDghO/0zbyfCK0yR0SLU=" + }, + "deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==" + }, + "deep-is": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz", + "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=" + }, + "define-properties": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", + "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", + "requires": { + "object-keys": "^1.0.12" + } + }, + "define-property": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz", + "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==", + "requires": { + "is-descriptor": "^1.0.2", + "isobject": "^3.0.1" + }, + "dependencies": { + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + }, + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" + }, + "kind-of": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", + "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + } + } + }, + "del": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/del/-/del-2.2.2.tgz", + "integrity": "sha1-wSyYHQZ4RshLyvhiz/kw2Qf/0ag=", + "requires": { + "globby": "^5.0.0", + "is-path-cwd": "^1.0.0", + "is-path-in-cwd": "^1.0.0", + "object-assign": "^4.0.1", + "pify": "^2.0.0", + "pinkie-promise": "^2.0.0", + "rimraf": "^2.2.8" + }, + "dependencies": { + "pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=" + } + } + }, + "delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=" + }, + "diff": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/diff/-/diff-2.2.3.tgz", + "integrity": "sha1-YOr9DSjukG5Oj/ClLBIpUhAzv5k=" + }, + "dns-packet": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-1.3.1.tgz", + "integrity": "sha512-0UxfQkMhYAUaZI+xrNZOz/as5KgDU0M/fQ9b6SpkyLbk3GEswDi6PADJVaYJradtRVsRIlF1zLyOodbcTCDzUg==", + "requires": { + "ip": "^1.1.0", + "safe-buffer": "^5.0.1" + } + }, + "dns-socket": { + "version": "1.6.3", + "resolved": "https://registry.npmjs.org/dns-socket/-/dns-socket-1.6.3.tgz", + "integrity": "sha512-/mUy3VGqIP69dAZjh2xxHXcpK9wk2Len1Dxz8mWAdrIgFC8tnR/aQAyU4a+UTXzOcTvEvGBdp1zFiwnpWKaXng==", + "requires": { + "dns-packet": "^1.1.0" + } + }, + "duplexer3": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.4.tgz", + "integrity": "sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI=" + }, + "e-prime": { + "version": "0.10.2", + "resolved": "https://registry.npmjs.org/e-prime/-/e-prime-0.10.2.tgz", + "integrity": "sha1-6pN165hWNt6IATx6n7EprZ4V7/g=" + }, + "ecc-jsbn": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", + "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", + "optional": true, + "requires": { + "jsbn": "~0.1.0", + "safer-buffer": "^2.1.0" + } + }, + "error-ex": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "requires": { + "is-arrayish": "^0.2.1" + } + }, + "es-abstract": { + "version": "1.12.0", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.12.0.tgz", + "integrity": "sha512-C8Fx/0jFmV5IPoMOFPA9P9G5NtqW+4cOPit3MIuvR2t7Ag2K15EJTpxnHAYTzL+aYQJIESYeXZmDBfOBE1HcpA==", + "requires": { + "es-to-primitive": "^1.1.1", + "function-bind": "^1.1.1", + "has": "^1.0.1", + "is-callable": "^1.1.3", + "is-regex": "^1.0.4" + } + }, + "es-to-primitive": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-to-primitive/-/es-to-primitive-1.1.1.tgz", + "integrity": "sha1-RTVSSKiJeQNLZ5Lhm7gfK3l13Q0=", + "requires": { + "is-callable": "^1.1.1", + "is-date-object": "^1.0.1", + "is-symbol": "^1.0.1" + } + }, + "escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=" + }, + "esprima": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==" + }, + "expand-brackets": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-0.1.5.tgz", + "integrity": "sha1-3wcoTjQqgHzXM6xa9yQR5YHRF3s=", + "requires": { + "is-posix-bracket": "^0.1.0" + } + }, + "expand-range": { + "version": "1.8.2", + "resolved": "https://registry.npmjs.org/expand-range/-/expand-range-1.8.2.tgz", + "integrity": "sha1-opnv/TNf4nIeuujiV+x5ZE/IUzc=", + "requires": { + "fill-range": "^2.1.0" + } + }, + "extend": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", + "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==" + }, + "extend-shallow": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz", + "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=", + "requires": { + "assign-symbols": "^1.0.0", + "is-extendable": "^1.0.1" + }, + "dependencies": { + "is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "requires": { + "is-plain-object": "^2.0.4" + } + } + } + }, + "extglob": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/extglob/-/extglob-0.3.2.tgz", + "integrity": "sha1-Lhj/PS9JqydlzskCPwEdqo2DSaE=", + "requires": { + "is-extglob": "^1.0.0" + } + }, + "extsprintf": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", + "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=" + }, + "fast-deep-equal": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz", + "integrity": "sha1-wFNHeBfIa1HaqFPIHgWbcz0CNhQ=" + }, + "fast-json-stable-stringify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz", + "integrity": "sha1-1RQsDK7msRifh9OnYREGT4bIu/I=" + }, + "fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=" + }, + "fault": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/fault/-/fault-1.0.2.tgz", + "integrity": "sha512-o2eo/X2syzzERAtN5LcGbiVQ0WwZSlN3qLtadwAz3X8Bu+XWD16dja/KMsjZLiQr+BLGPDnHGkc4yUJf1Xpkpw==", + "requires": { + "format": "^0.2.2" + } + }, + "file-entry-cache": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-2.0.0.tgz", + "integrity": "sha1-w5KZDD5oR4PYOLjISkXYoEhFg2E=", + "requires": { + "flat-cache": "^1.2.1", + "object-assign": "^4.0.1" + } + }, + "filename-regex": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/filename-regex/-/filename-regex-2.0.1.tgz", + "integrity": "sha1-wcS5vuPglyXdsQa3XB4wH+LxiyY=" + }, + "fill-range": { + "version": "2.2.4", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-2.2.4.tgz", + "integrity": "sha512-cnrcCbj01+j2gTG921VZPnHbjmdAf8oQV/iGeV2kZxGSyfYjjTyY79ErsK1WJWMpw6DaApEX72binqJE+/d+5Q==", + "requires": { + "is-number": "^2.1.0", + "isobject": "^2.0.0", + "randomatic": "^3.0.0", + "repeat-element": "^1.1.2", + "repeat-string": "^1.5.2" + } + }, + "find-up": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-2.1.0.tgz", + "integrity": "sha1-RdG35QbHF93UgndaK3eSCjwMV6c=", + "requires": { + "locate-path": "^2.0.0" + } + }, + "flat-cache": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-1.3.0.tgz", + "integrity": "sha1-0wMLMrOBVPTjt+nHCfSQ9++XxIE=", + "requires": { + "circular-json": "^0.3.1", + "del": "^2.0.2", + "graceful-fs": "^4.1.2", + "write": "^0.2.1" + } + }, + "fn-name": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/fn-name/-/fn-name-2.0.1.tgz", + "integrity": "sha1-UhTXU3pNBqSjAcDMJi/rhBiAAuc=" + }, + "for-in": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz", + "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=" + }, + "for-own": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/for-own/-/for-own-0.1.5.tgz", + "integrity": "sha1-UmXGgaTylNq78XyVCbZ2OqhFEM4=", + "requires": { + "for-in": "^1.0.1" + } + }, + "forever-agent": { + "version": "0.6.1", + "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", + "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=" + }, + "form-data": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.2.tgz", + "integrity": "sha1-SXBJi+YEwgwAXU9cI67NIda0kJk=", + "requires": { + "asynckit": "^0.4.0", + "combined-stream": "1.0.6", + "mime-types": "^2.1.12" + }, + "dependencies": { + "combined-stream": { + "version": "1.0.6", + "resolved": "http://registry.npmjs.org/combined-stream/-/combined-stream-1.0.6.tgz", + "integrity": "sha1-cj599ugBrFYTETp+RFqbactjKBg=", + "requires": { + "delayed-stream": "~1.0.0" + } + } + } + }, + "format": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", + "integrity": "sha1-1hcBB+nv3E7TDJ3DkBbflCtctYs=" + }, + "fragment-cache": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz", + "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=", + "requires": { + "map-cache": "^0.2.2" + } + }, + "from2": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz", + "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=", + "requires": { + "inherits": "^2.0.1", + "readable-stream": "^2.0.0" + } + }, + "fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" + }, + "fsevents": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.4.tgz", + "integrity": "sha512-z8H8/diyk76B7q5wg+Ud0+CqzcAF3mBBI/bA5ne5zrRUUIvNkJY//D3BqyH571KuAC4Nr7Rw7CjWX4r0y9DvNg==", + "optional": true, + "requires": { + "nan": "^2.9.2", + "node-pre-gyp": "^0.10.0" + }, + "dependencies": { + "abbrev": { + "version": "1.1.1", + "bundled": true, + "optional": true + }, + "ansi-regex": { + "version": "2.1.1", + "bundled": true + }, + "aproba": { + "version": "1.2.0", + "bundled": true, + "optional": true + }, + "are-we-there-yet": { + "version": "1.1.4", + "bundled": true, + "optional": true, + "requires": { + "delegates": "^1.0.0", + "readable-stream": "^2.0.6" + } + }, + "balanced-match": { + "version": "1.0.0", + "bundled": true + }, + "brace-expansion": { + "version": "1.1.11", + "bundled": true, + "requires": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "chownr": { + "version": "1.0.1", + "bundled": true, + "optional": true + }, + "code-point-at": { + "version": "1.1.0", + "bundled": true + }, + "concat-map": { + "version": "0.0.1", + "bundled": true + }, + "console-control-strings": { + "version": "1.1.0", + "bundled": true + }, + "core-util-is": { + "version": "1.0.2", + "bundled": true, + "optional": true + }, + "debug": { + "version": "2.6.9", + "bundled": true, + "optional": true, + "requires": { + "ms": "2.0.0" + } + }, + "deep-extend": { + "version": "0.5.1", + "bundled": true, + "optional": true + }, + "delegates": { + "version": "1.0.0", + "bundled": true, + "optional": true + }, + "detect-libc": { + "version": "1.0.3", + "bundled": true, + "optional": true + }, + "fs-minipass": { + "version": "1.2.5", + "bundled": true, + "optional": true, + "requires": { + "minipass": "^2.2.1" + } + }, + "fs.realpath": { + "version": "1.0.0", + "bundled": true, + "optional": true + }, + "gauge": { + "version": "2.7.4", + "bundled": true, + "optional": true, + "requires": { + "aproba": "^1.0.3", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.0", + "object-assign": "^4.1.0", + "signal-exit": "^3.0.0", + "string-width": "^1.0.1", + "strip-ansi": "^3.0.1", + "wide-align": "^1.1.0" + } + }, + "glob": { + "version": "7.1.2", + "bundled": true, + "optional": true, + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "has-unicode": { + "version": "2.0.1", + "bundled": true, + "optional": true + }, + "iconv-lite": { + "version": "0.4.21", + "bundled": true, + "optional": true, + "requires": { + "safer-buffer": "^2.1.0" + } + }, + "ignore-walk": { + "version": "3.0.1", + "bundled": true, + "optional": true, + "requires": { + "minimatch": "^3.0.4" + } + }, + "inflight": { + "version": "1.0.6", + "bundled": true, + "optional": true, + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.3", + "bundled": true + }, + "ini": { + "version": "1.3.5", + "bundled": true, + "optional": true + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "bundled": true, + "requires": { + "number-is-nan": "^1.0.0" + } + }, + "isarray": { + "version": "1.0.0", + "bundled": true, + "optional": true + }, + "minimatch": { + "version": "3.0.4", + "bundled": true, + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "0.0.8", + "bundled": true + }, + "minipass": { + "version": "2.2.4", + "bundled": true, + "requires": { + "safe-buffer": "^5.1.1", + "yallist": "^3.0.0" + } + }, + "minizlib": { + "version": "1.1.0", + "bundled": true, + "optional": true, + "requires": { + "minipass": "^2.2.1" + } + }, + "mkdirp": { + "version": "0.5.1", + "bundled": true, + "requires": { + "minimist": "0.0.8" + } + }, + "ms": { + "version": "2.0.0", + "bundled": true, + "optional": true + }, + "needle": { + "version": "2.2.0", + "bundled": true, + "optional": true, + "requires": { + "debug": "^2.1.2", + "iconv-lite": "^0.4.4", + "sax": "^1.2.4" + } + }, + "node-pre-gyp": { + "version": "0.10.0", + "bundled": true, + "optional": true, + "requires": { + "detect-libc": "^1.0.2", + "mkdirp": "^0.5.1", + "needle": "^2.2.0", + "nopt": "^4.0.1", + "npm-packlist": "^1.1.6", + "npmlog": "^4.0.2", + "rc": "^1.1.7", + "rimraf": "^2.6.1", + "semver": "^5.3.0", + "tar": "^4" + } + }, + "nopt": { + "version": "4.0.1", + "bundled": true, + "optional": true, + "requires": { + "abbrev": "1", + "osenv": "^0.1.4" + } + }, + "npm-bundled": { + "version": "1.0.3", + "bundled": true, + "optional": true + }, + "npm-packlist": { + "version": "1.1.10", + "bundled": true, + "optional": true, + "requires": { + "ignore-walk": "^3.0.1", + "npm-bundled": "^1.0.1" + } + }, + "npmlog": { + "version": "4.1.2", + "bundled": true, + "optional": true, + "requires": { + "are-we-there-yet": "~1.1.2", + "console-control-strings": "~1.1.0", + "gauge": "~2.7.3", + "set-blocking": "~2.0.0" + } + }, + "number-is-nan": { + "version": "1.0.1", + "bundled": true + }, + "object-assign": { + "version": "4.1.1", + "bundled": true, + "optional": true + }, + "once": { + "version": "1.4.0", + "bundled": true, + "requires": { + "wrappy": "1" + } + }, + "os-homedir": { + "version": "1.0.2", + "bundled": true, + "optional": true + }, + "os-tmpdir": { + "version": "1.0.2", + "bundled": true, + "optional": true + }, + "osenv": { + "version": "0.1.5", + "bundled": true, + "optional": true, + "requires": { + "os-homedir": "^1.0.0", + "os-tmpdir": "^1.0.0" + } + }, + "path-is-absolute": { + "version": "1.0.1", + "bundled": true, + "optional": true + }, + "process-nextick-args": { + "version": "2.0.0", + "bundled": true, + "optional": true + }, + "rc": { + "version": "1.2.7", + "bundled": true, + "optional": true, + "requires": { + "deep-extend": "^0.5.1", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "dependencies": { + "minimist": { + "version": "1.2.0", + "bundled": true, + "optional": true + } + } + }, + "readable-stream": { + "version": "2.3.6", + "bundled": true, + "optional": true, + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "rimraf": { + "version": "2.6.2", + "bundled": true, + "optional": true, + "requires": { + "glob": "^7.0.5" + } + }, + "safe-buffer": { + "version": "5.1.1", + "bundled": true + }, + "safer-buffer": { + "version": "2.1.2", + "bundled": true, + "optional": true + }, + "sax": { + "version": "1.2.4", + "bundled": true, + "optional": true + }, + "semver": { + "version": "5.5.0", + "bundled": true, + "optional": true + }, + "set-blocking": { + "version": "2.0.0", + "bundled": true, + "optional": true + }, + "signal-exit": { + "version": "3.0.2", + "bundled": true, + "optional": true + }, + "string-width": { + "version": "1.0.2", + "bundled": true, + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + }, + "string_decoder": { + "version": "1.1.1", + "bundled": true, + "optional": true, + "requires": { + "safe-buffer": "~5.1.0" + } + }, + "strip-ansi": { + "version": "3.0.1", + "bundled": true, + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "strip-json-comments": { + "version": "2.0.1", + "bundled": true, + "optional": true + }, + "tar": { + "version": "4.4.1", + "bundled": true, + "optional": true, + "requires": { + "chownr": "^1.0.1", + "fs-minipass": "^1.2.5", + "minipass": "^2.2.4", + "minizlib": "^1.1.0", + "mkdirp": "^0.5.0", + "safe-buffer": "^5.1.1", + "yallist": "^3.0.2" + } + }, + "util-deprecate": { + "version": "1.0.2", + "bundled": true, + "optional": true + }, + "wide-align": { + "version": "1.1.2", + "bundled": true, + "optional": true, + "requires": { + "string-width": "^1.0.2" + } + }, + "wrappy": { + "version": "1.0.2", + "bundled": true + }, + "yallist": { + "version": "3.0.2", + "bundled": true + } + } + }, + "function-bind": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", + "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" + }, + "get-stdin": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/get-stdin/-/get-stdin-5.0.1.tgz", + "integrity": "sha1-Ei4WFZHiH/TFJTAwVpPyDmOTo5g=" + }, + "get-stream": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-3.0.0.tgz", + "integrity": "sha1-jpQ9E1jcN1VQVOy+LtsFqhdO3hQ=" + }, + "get-value": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz", + "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=" + }, + "getpass": { + "version": "0.1.7", + "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", + "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", + "requires": { + "assert-plus": "^1.0.0" + } + }, + "glob": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.3.tgz", + "integrity": "sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ==", + "requires": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.0.4", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + } + }, + "glob-base": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/glob-base/-/glob-base-0.3.0.tgz", + "integrity": "sha1-27Fk9iIbHAscz4Kuoyi0l98Oo8Q=", + "requires": { + "glob-parent": "^2.0.0", + "is-glob": "^2.0.0" + } + }, + "glob-parent": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-2.0.0.tgz", + "integrity": "sha1-gTg9ctsFT8zPUzbaqQLxgvbtuyg=", + "requires": { + "is-glob": "^2.0.0" + } + }, + "globby": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-5.0.0.tgz", + "integrity": "sha1-69hGZ8oNuzMLmbz8aOrCvFQ3Dg0=", + "requires": { + "array-union": "^1.0.1", + "arrify": "^1.0.0", + "glob": "^7.0.3", + "object-assign": "^4.0.1", + "pify": "^2.0.0", + "pinkie-promise": "^2.0.0" + }, + "dependencies": { + "pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=" + } + } + }, + "got": { + "version": "6.7.1", + "resolved": "http://registry.npmjs.org/got/-/got-6.7.1.tgz", + "integrity": "sha1-JAzQV4WpoY5WHcG0S0HHY+8ejbA=", + "requires": { + "create-error-class": "^3.0.0", + "duplexer3": "^0.1.4", + "get-stream": "^3.0.0", + "is-redirect": "^1.0.0", + "is-retry-allowed": "^1.0.0", + "is-stream": "^1.0.0", + "lowercase-keys": "^1.0.0", + "safe-buffer": "^5.0.1", + "timed-out": "^4.0.0", + "unzip-response": "^2.0.1", + "url-parse-lax": "^1.0.0" + } + }, + "graceful-fs": { + "version": "4.1.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.11.tgz", + "integrity": "sha1-Dovf5NHduIVNZOBOp8AOKgJuVlg=" + }, + "har-schema": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", + "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=" + }, + "har-validator": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.0.tgz", + "integrity": "sha512-+qnmNjI4OfH2ipQ9VQOw23bBd/ibtfbVdK2fYbY4acTDqKTW/YDp9McimZdDbG8iV9fZizUqQMD5xvriB146TA==", + "requires": { + "ajv": "^5.3.0", + "har-schema": "^2.0.0" + } + }, + "has": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz", + "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==", + "requires": { + "function-bind": "^1.1.1" + } + }, + "has-ansi": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", + "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=", + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "has-flag": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", + "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=" + }, + "has-symbol-support-x": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz", + "integrity": "sha512-3ToOva++HaW+eCpgqZrCfN51IPB+7bJNVT6CUATzueB5Heb8o6Nam0V3HG5dlDvZU1Gn5QLcbahiKw/XVk5JJw==" + }, + "has-symbols": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.0.tgz", + "integrity": "sha1-uhqPGvKg/DllD1yFA2dwQSIGO0Q=" + }, + "has-to-string-tag-x": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz", + "integrity": "sha512-vdbKfmw+3LoOYVr+mtxHaX5a96+0f3DljYd8JOqvOLsf5mw2Otda2qCDT9qRqLAhrjyQ0h7ual5nOiASpsGNFw==", + "requires": { + "has-symbol-support-x": "^1.4.1" + } + }, + "has-value": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz", + "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=", + "requires": { + "get-value": "^2.0.6", + "has-values": "^1.0.0", + "isobject": "^3.0.0" + }, + "dependencies": { + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" + } + } + }, + "has-values": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz", + "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=", + "requires": { + "is-number": "^3.0.0", + "kind-of": "^4.0.0" + }, + "dependencies": { + "is-number": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", + "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "kind-of": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz", + "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=", + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "hosted-git-info": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-2.7.1.tgz", + "integrity": "sha512-7T/BxH19zbcCTa8XkMlbK5lTo1WtgkFi3GvdWEyNuc4Vex7/9Dqbnpsf4JMydcfj9HCg4zUWFTL3Za6lapg5/w==" + }, + "http-cache-semantics": { + "version": "3.8.1", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-3.8.1.tgz", + "integrity": "sha512-5ai2iksyV8ZXmnZhHH4rWPoxxistEexSi5936zIQ1bnNTW5VnA85B6P/VpXiRM017IgRvb2kKo1a//y+0wSp3w==" + }, + "http-signature": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", + "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", + "requires": { + "assert-plus": "^1.0.0", + "jsprim": "^1.2.2", + "sshpk": "^1.7.0" + } + }, + "ignore": { + "version": "3.3.10", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz", + "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==" + }, + "indent-string": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-3.2.0.tgz", + "integrity": "sha1-Sl/W0nzDMvN+VBmlBNu4NxBckok=" + }, + "inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", + "requires": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "inherits": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=" + }, + "ini": { + "version": "1.3.5", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.5.tgz", + "integrity": "sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw==" + }, + "interop-require": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/interop-require/-/interop-require-1.0.0.tgz", + "integrity": "sha1-5TEDZ5lEyI1+YQW2Kp9EdceDlx4=" + }, + "into-stream": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/into-stream/-/into-stream-3.1.0.tgz", + "integrity": "sha1-lvsKk2wSur1v8XUqF9BWFqvQlMY=", + "requires": { + "from2": "^2.1.1", + "p-is-promise": "^1.1.0" + } + }, + "ip": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/ip/-/ip-1.1.5.tgz", + "integrity": "sha1-vd7XARQpCCjAoDnnLvJfWq7ENUo=" + }, + "ip-regex": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/ip-regex/-/ip-regex-2.1.0.tgz", + "integrity": "sha1-+ni/XS5pE8kRzp+BnuUUa7bYROk=" + }, + "is-absolute-url": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-absolute-url/-/is-absolute-url-2.1.0.tgz", + "integrity": "sha1-UFMN+4T8yap9vnhS6Do3uTufKqY=" + }, + "is-accessor-descriptor": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", + "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", + "requires": { + "kind-of": "^3.0.2" + } + }, + "is-alphabetical": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.2.tgz", + "integrity": "sha512-V0xN4BYezDHcBSKb1QHUFMlR4as/XEuCZBzMJUU4n7+Cbt33SmUnSol+pnXFvLxSHNq2CemUXNdaXV6Flg7+xg==" + }, + "is-alphanumeric": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-alphanumeric/-/is-alphanumeric-1.0.0.tgz", + "integrity": "sha1-Spzvcdr0wAHB2B1j0UDPU/1oifQ=" + }, + "is-alphanumerical": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.2.tgz", + "integrity": "sha512-pyfU/0kHdISIgslFfZN9nfY1Gk3MquQgUm1mJTjdkEPpkAKNWuBTSqFwewOpR7N351VkErCiyV71zX7mlQQqsg==", + "requires": { + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0" + } + }, + "is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0=" + }, + "is-binary-path": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-1.0.1.tgz", + "integrity": "sha1-dfFmQrSA8YenEcgUFh/TpKdlWJg=", + "requires": { + "binary-extensions": "^1.0.0" + } + }, + "is-buffer": { + "version": "1.1.6", + "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz", + "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==" + }, + "is-builtin-module": { + "version": "1.0.0", + "resolved": "http://registry.npmjs.org/is-builtin-module/-/is-builtin-module-1.0.0.tgz", + "integrity": "sha1-VAVy0096wxGfj3bDDLwbHgN6/74=", + "requires": { + "builtin-modules": "^1.0.0" + } + }, + "is-callable": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/is-callable/-/is-callable-1.1.4.tgz", + "integrity": "sha512-r5p9sxJjYnArLjObpjA4xu5EKI3CuKHkJXMhT7kwbpUyIFD1n5PMAsoPvWnvtZiNz7LjkYDRZhd7FlI0eMijEA==" + }, + "is-data-descriptor": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", + "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", + "requires": { + "kind-of": "^3.0.2" + } + }, + "is-date-object": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-date-object/-/is-date-object-1.0.1.tgz", + "integrity": "sha1-mqIOtq7rv/d/vTPnTKAbM1gdOhY=" + }, + "is-decimal": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.2.tgz", + "integrity": "sha512-TRzl7mOCchnhchN+f3ICUCzYvL9ul7R+TYOsZ8xia++knyZAJfv/uA1FvQXsAnYIl1T3B2X5E/J7Wb1QXiIBXg==" + }, + "is-descriptor": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", + "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "requires": { + "is-accessor-descriptor": "^0.1.6", + "is-data-descriptor": "^0.1.4", + "kind-of": "^5.0.0" + }, + "dependencies": { + "kind-of": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", + "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" + } + } + }, + "is-dotfile": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/is-dotfile/-/is-dotfile-1.0.3.tgz", + "integrity": "sha1-pqLzL/0t+wT1yiXs0Pa4PPeYoeE=" + }, + "is-empty": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-empty/-/is-empty-1.2.0.tgz", + "integrity": "sha1-3pu1snhzigWgsJpX4ftNSjQan2s=" + }, + "is-equal-shallow": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/is-equal-shallow/-/is-equal-shallow-0.1.3.tgz", + "integrity": "sha1-IjgJj8Ih3gvPpdnqxMRdY4qhxTQ=", + "requires": { + "is-primitive": "^2.0.0" + } + }, + "is-extendable": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz", + "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=" + }, + "is-extglob": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-1.0.0.tgz", + "integrity": "sha1-rEaBd8SUNAWgkvyPKXYMb/xiBsA=" + }, + "is-file": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-file/-/is-file-1.0.0.tgz", + "integrity": "sha1-KKRM+9nT2xkwRfIrZfzo7fliBZY=" + }, + "is-fullwidth-code-point": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", + "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", + "requires": { + "number-is-nan": "^1.0.0" + } + }, + "is-glob": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-2.0.1.tgz", + "integrity": "sha1-0Jb5JqPe1WAPP9/ZEZjLCIjC2GM=", + "requires": { + "is-extglob": "^1.0.0" + } + }, + "is-hexadecimal": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.2.tgz", + "integrity": "sha512-but/G3sapV3MNyqiDBLrOi4x8uCIw0RY3o/Vb5GT0sMFHrVV7731wFSVy41T5FO1og7G0gXLJh0MkgPRouko/A==" + }, + "is-hidden": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/is-hidden/-/is-hidden-1.1.1.tgz", + "integrity": "sha512-175UKecS8+U4hh2PSY0j4xnm2GKYzvSKnbh+naC93JjuBA7LgIo6YxlbcsSo6seFBdQO3RuIcH980yvqqD/2cA==" + }, + "is-ip": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-ip/-/is-ip-2.0.0.tgz", + "integrity": "sha1-aO6gfooKCpTC0IDdZ0xzGrKkYas=", + "requires": { + "ip-regex": "^2.0.0" + } + }, + "is-number": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-2.1.0.tgz", + "integrity": "sha1-Afy7s5NGOlSPL0ZszhbezknbkI8=", + "requires": { + "kind-of": "^3.0.2" + } + }, + "is-object": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-object/-/is-object-1.0.1.tgz", + "integrity": "sha1-iVJojF7C/9awPsyF52ngKQMINHA=" + }, + "is-online": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-online/-/is-online-7.0.0.tgz", + "integrity": "sha1-fiQIwK4efje6jVC9sjcmDTK/2W4=", + "requires": { + "got": "^6.7.1", + "p-any": "^1.0.0", + "p-timeout": "^1.0.0", + "public-ip": "^2.3.0" + } + }, + "is-path-cwd": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-1.0.0.tgz", + "integrity": "sha1-0iXsIxMuie3Tj9p2dHLmLmXxEG0=" + }, + "is-path-in-cwd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-path-in-cwd/-/is-path-in-cwd-1.0.1.tgz", + "integrity": "sha512-FjV1RTW48E7CWM7eE/J2NJvAEEVektecDBVBE5Hh3nM1Jd0kvhHtX68Pr3xsDf857xt3Y4AkwVULK1Vku62aaQ==", + "requires": { + "is-path-inside": "^1.0.0" + } + }, + "is-path-inside": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-1.0.1.tgz", + "integrity": "sha1-jvW33lBDej/cprToZe96pVy0gDY=", + "requires": { + "path-is-inside": "^1.0.1" + } + }, + "is-plain-obj": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-1.1.0.tgz", + "integrity": "sha1-caUMhCnfync8kqOQpKA7OfzVHT4=" + }, + "is-plain-object": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "requires": { + "isobject": "^3.0.1" + }, + "dependencies": { + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" + } + } + }, + "is-posix-bracket": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/is-posix-bracket/-/is-posix-bracket-0.1.1.tgz", + "integrity": "sha1-MzTceXdDaOkvAW5vvAqI9c1ua8Q=" + }, + "is-primitive": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-primitive/-/is-primitive-2.0.0.tgz", + "integrity": "sha1-IHurkWOEmcB7Kt8kCkGochADRXU=" + }, + "is-redirect": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-redirect/-/is-redirect-1.0.0.tgz", + "integrity": "sha1-HQPd7VO9jbDzDCbk+V02/HyH3CQ=" + }, + "is-regex": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-regex/-/is-regex-1.0.4.tgz", + "integrity": "sha1-VRdIm1RwkbCTDglWVM7SXul+lJE=", + "requires": { + "has": "^1.0.1" + } + }, + "is-relative-url": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-relative-url/-/is-relative-url-2.0.0.tgz", + "integrity": "sha1-cpAtf+BLPUeS59sV+duEtyBMnO8=", + "requires": { + "is-absolute-url": "^2.0.0" + } + }, + "is-retry-allowed": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-retry-allowed/-/is-retry-allowed-1.1.0.tgz", + "integrity": "sha1-EaBgVotnM5REAz0BJaYaINVk+zQ=" + }, + "is-stream": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", + "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=" + }, + "is-symbol": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-symbol/-/is-symbol-1.0.1.tgz", + "integrity": "sha1-PMWfAAJRlLarLjjbrmaJJWtmBXI=" + }, + "is-typedarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", + "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=" + }, + "is-utf8": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-utf8/-/is-utf8-0.2.1.tgz", + "integrity": "sha1-Sw2hRCEE0bM2NA6AeX6GXPOffXI=" + }, + "is-whitespace-character": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-whitespace-character/-/is-whitespace-character-1.0.2.tgz", + "integrity": "sha512-SzM+T5GKUCtLhlHFKt2SDAX2RFzfS6joT91F2/WSi9LxgFdsnhfPK/UIA+JhRR2xuyLdrCys2PiFDrtn1fU5hQ==" + }, + "is-windows": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz", + "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==" + }, + "is-word-character": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-word-character/-/is-word-character-1.0.2.tgz", + "integrity": "sha512-T3FlsX8rCHAH8e7RE7PfOPZVFQlcV3XRF9eOOBQ1uf70OxO7CjjSOjeImMPCADBdYWcStAbVbYvJ1m2D3tb+EA==" + }, + "isarray": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=" + }, + "isemail": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/isemail/-/isemail-3.1.3.tgz", + "integrity": "sha512-5xbsG5wYADIcB+mfLsd+nst1V/D+I7EU7LEZPo2GOIMu4JzfcRs5yQoypP4avA7QtUqgxYLKBYNv4IdzBmbhdw==", + "requires": { + "punycode": "2.x.x" + } + }, + "isobject": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", + "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", + "requires": { + "isarray": "1.0.0" + } + }, + "isstream": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", + "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=" + }, + "isurl": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/isurl/-/isurl-1.0.0.tgz", + "integrity": "sha512-1P/yWsxPlDtn7QeRD+ULKQPaIaN6yF368GZ2vDfv0AL0NwpStafjWCDDdn0k8wgFMWpVAqG7oJhxHnlud42i9w==", + "requires": { + "has-to-string-tag-x": "^1.2.0", + "is-object": "^1.0.1" + } + }, + "js-yaml": { + "version": "3.12.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.0.tgz", + "integrity": "sha512-PIt2cnwmPfL4hKNwqeiuz4bKfnzHTBv6HyVgjahA6mPLwPDzjDWrplJBMjHUFxku/N3FlmrbyPclad+I+4mJ3A==", + "requires": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + } + }, + "jsbn": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", + "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=", + "optional": true + }, + "json-buffer": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz", + "integrity": "sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg=" + }, + "json-parse-better-errors": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz", + "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==" + }, + "json-schema": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", + "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=" + }, + "json-schema-traverse": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz", + "integrity": "sha1-NJptRMU6Ud6JtAgFxdXlm0F9M0A=" + }, + "json-stable-stringify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-1.0.1.tgz", + "integrity": "sha1-mnWdOcXy/1A/1TAGRu1EX4jE+a8=", + "requires": { + "jsonify": "~0.0.0" + } + }, + "json-stringify-safe": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", + "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=" + }, + "json5": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz", + "integrity": "sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE=" + }, + "jsonify": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.0.tgz", + "integrity": "sha1-LHS27kHZPKUbe1qu6PUDYx0lKnM=" + }, + "jsprim": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", + "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", + "requires": { + "assert-plus": "1.0.0", + "extsprintf": "1.3.0", + "json-schema": "0.2.3", + "verror": "1.10.0" + } + }, + "keyv": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.0.0.tgz", + "integrity": "sha512-eguHnq22OE3uVoSYG0LVWNP+4ppamWr9+zWBe1bsNcovIMy6huUJFPgy4mGwCd/rnl3vOLGW1MTlu4c57CT1xA==", + "requires": { + "json-buffer": "3.0.0" + } + }, + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "requires": { + "is-buffer": "^1.1.5" + } + }, + "levn": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz", + "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=", + "requires": { + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2" + } + }, + "link-check": { + "version": "4.4.4", + "resolved": "https://registry.npmjs.org/link-check/-/link-check-4.4.4.tgz", + "integrity": "sha512-yvowNBZEMOFH9nGLiJ5/YV68PBMVTo4opC2SzcACO8g4gSPTB9Rwa5GIziOX9Z5Er3Yf01DHoOyVV2LeApIw8w==", + "requires": { + "is-relative-url": "^2.0.0", + "isemail": "^3.1.2", + "ms": "^2.1.1", + "request": "^2.87.0" + }, + "dependencies": { + "ms": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", + "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==" + } + } + }, + "load-json-file": { + "version": "1.1.0", + "resolved": "http://registry.npmjs.org/load-json-file/-/load-json-file-1.1.0.tgz", + "integrity": "sha1-lWkFcI1YtLq0wiYbBPWfMcmTdMA=", + "requires": { + "graceful-fs": "^4.1.2", + "parse-json": "^2.2.0", + "pify": "^2.0.0", + "pinkie-promise": "^2.0.0", + "strip-bom": "^2.0.0" + }, + "dependencies": { + "parse-json": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-2.2.0.tgz", + "integrity": "sha1-9ID0BDTvgHQfhGkJn43qGPVaTck=", + "requires": { + "error-ex": "^1.2.0" + } + }, + "pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=" + } + } + }, + "load-plugin": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/load-plugin/-/load-plugin-2.2.2.tgz", + "integrity": "sha512-FYzamtURIJefQykZGtiClYuZkJBUKzmx8Tc74y8JGAulDzbzVm/C+w/MbAljHRr+REL0cRzy3WgnHE+T8gce5g==", + "requires": { + "npm-prefix": "^1.2.0", + "resolve-from": "^4.0.0" + } + }, + "locate-path": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-2.0.0.tgz", + "integrity": "sha1-K1aLJl7slExtnA3pw9u7ygNUzY4=", + "requires": { + "p-locate": "^2.0.0", + "path-exists": "^3.0.0" + } + }, + "lodash": { + "version": "4.17.11", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz", + "integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg==" + }, + "log-symbols": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-1.0.2.tgz", + "integrity": "sha1-N2/3tY6jCGoPCfrMdGF+ylAeGhg=", + "requires": { + "chalk": "^1.0.0" + }, + "dependencies": { + "ansi-styles": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", + "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=" + }, + "chalk": { + "version": "1.1.3", + "resolved": "http://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", + "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", + "requires": { + "ansi-styles": "^2.2.1", + "escape-string-regexp": "^1.0.2", + "has-ansi": "^2.0.0", + "strip-ansi": "^3.0.0", + "supports-color": "^2.0.0" + } + }, + "supports-color": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", + "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=" + } + } + }, + "longest-streak": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/longest-streak/-/longest-streak-2.0.2.tgz", + "integrity": "sha512-TmYTeEYxiAmSVdpbnQDXGtvYOIRsCMg89CVZzwzc2o7GFL1CjoiRPjH5ec0NFAVlAx3fVof9dX/t6KKRAo2OWA==" + }, + "lowercase-keys": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz", + "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==" + }, + "map-cache": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz", + "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=" + }, + "map-like": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/map-like/-/map-like-2.0.0.tgz", + "integrity": "sha1-lEltSa0zPA3DI0snrbvR6FNZU7Q=" + }, + "map-visit": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz", + "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=", + "requires": { + "object-visit": "^1.0.0" + } + }, + "markdown-escapes": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/markdown-escapes/-/markdown-escapes-1.0.2.tgz", + "integrity": "sha512-lbRZ2mE3Q9RtLjxZBZ9+IMl68DKIXaVAhwvwn9pmjnPLS0h/6kyBMgNhqi1xFJ/2yv6cSyv0jbiZavZv93JkkA==" + }, + "markdown-extensions": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/markdown-extensions/-/markdown-extensions-1.1.1.tgz", + "integrity": "sha512-WWC0ZuMzCyDHYCasEGs4IPvLyTGftYwh6wIEOULOF0HXcqZlhwRzrK0w2VUlxWA98xnvb/jszw4ZSkJ6ADpM6Q==" + }, + "markdown-table": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/markdown-table/-/markdown-table-1.1.2.tgz", + "integrity": "sha512-NcWuJFHDA8V3wkDgR/j4+gZx+YQwstPgfQDV8ndUeWWzta3dnDTBxpVzqS9lkmJAuV5YX35lmyojl6HO5JXAgw==" + }, + "math-random": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/math-random/-/math-random-1.0.1.tgz", + "integrity": "sha1-izqsWIuKZuSXXjzepn97sylgH6w=" + }, + "md5": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/md5/-/md5-2.2.1.tgz", + "integrity": "sha1-U6s41f48iJG6RlMp6iP6wFQBJvk=", + "requires": { + "charenc": "~0.0.1", + "crypt": "~0.0.1", + "is-buffer": "~1.1.1" + } + }, + "mdast-util-compact": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/mdast-util-compact/-/mdast-util-compact-1.0.2.tgz", + "integrity": "sha512-d2WS98JSDVbpSsBfVvD9TaDMlqPRz7ohM/11G0rp5jOBb5q96RJ6YLszQ/09AAixyzh23FeIpCGqfaamEADtWg==", + "requires": { + "unist-util-visit": "^1.1.0" + } + }, + "micromatch": { + "version": "2.3.11", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-2.3.11.tgz", + "integrity": "sha1-hmd8l9FyCzY0MdBNDRUpO9OMFWU=", + "requires": { + "arr-diff": "^2.0.0", + "array-unique": "^0.2.1", + "braces": "^1.8.2", + "expand-brackets": "^0.1.4", + "extglob": "^0.3.1", + "filename-regex": "^2.0.0", + "is-extglob": "^1.0.0", + "is-glob": "^2.0.1", + "kind-of": "^3.0.2", + "normalize-path": "^2.0.1", + "object.omit": "^2.0.0", + "parse-glob": "^3.0.4", + "regex-cache": "^0.4.2" + } + }, + "mime-db": { + "version": "1.36.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.36.0.tgz", + "integrity": "sha512-L+xvyD9MkoYMXb1jAmzI/lWYAxAMCPvIBSWur0PZ5nOf5euahRLVqH//FKW9mWp2lkqUgYiXPgkzfMUFi4zVDw==" + }, + "mime-types": { + "version": "2.1.20", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.20.tgz", + "integrity": "sha512-HrkrPaP9vGuWbLK1B1FfgAkbqNjIuy4eHlIYnFi7kamZyLLrGlo2mpcx0bBmNpKqBtYtAfGbodDddIgddSJC2A==", + "requires": { + "mime-db": "~1.36.0" + } + }, + "mimic-response": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz", + "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==" + }, + "minimatch": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", + "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", + "requires": { + "brace-expansion": "^1.1.7" + } + }, + "minimist": { + "version": "1.2.0", + "resolved": "http://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", + "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=" + }, + "mixin-deep": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.1.tgz", + "integrity": "sha512-8ZItLHeEgaqEvd5lYBXfm4EZSFCX29Jb9K+lAHhDKzReKBQKj3R+7NOF6tjqYi9t4oI8VUfaWITJQm86wnXGNQ==", + "requires": { + "for-in": "^1.0.2", + "is-extendable": "^1.0.1" + }, + "dependencies": { + "is-extendable": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz", + "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==", + "requires": { + "is-plain-object": "^2.0.4" + } + } + } + }, + "mkdirp": { + "version": "0.5.1", + "resolved": "http://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", + "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", + "requires": { + "minimist": "0.0.8" + }, + "dependencies": { + "minimist": { + "version": "0.0.8", + "resolved": "http://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", + "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=" + } + } + }, + "ms": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", + "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=" + }, + "nan": { + "version": "2.11.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.11.0.tgz", + "integrity": "sha512-F4miItu2rGnV2ySkXOQoA8FKz/SR2Q2sWP0sbTxNxz/tuokeC8WxOhPMcwi0qIyGtVn/rrSeLbvVkznqCdwYnw==", + "optional": true + }, + "nanomatch": { + "version": "1.2.13", + "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz", + "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==", + "requires": { + "arr-diff": "^4.0.0", + "array-unique": "^0.3.2", + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "fragment-cache": "^0.2.1", + "is-windows": "^1.0.2", + "kind-of": "^6.0.2", + "object.pick": "^1.3.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "dependencies": { + "arr-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", + "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=" + }, + "array-unique": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", + "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=" + }, + "kind-of": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", + "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + } + } + }, + "nlcst-to-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/nlcst-to-string/-/nlcst-to-string-2.0.2.tgz", + "integrity": "sha512-DV7wVvMcAsmZ5qEwvX1JUNF4lKkAAKbChwNlIH7NLsPR7LWWoeIt53YlZ5CQH5KDXEXQ9Xa3mw0PbPewymrtew==" + }, + "no-cliches": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/no-cliches/-/no-cliches-0.1.0.tgz", + "integrity": "sha1-9OuBpVH+zegT+MYR415kpRGNw4w=" + }, + "normalize-package-data": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/normalize-package-data/-/normalize-package-data-2.4.0.tgz", + "integrity": "sha512-9jjUFbTPfEy3R/ad/2oNbKtW9Hgovl5O1FvFWKkKblNXoN/Oou6+9+KKohPK13Yc3/TyunyWhJp6gvRNR/PPAw==", + "requires": { + "hosted-git-info": "^2.1.4", + "is-builtin-module": "^1.0.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "normalize-path": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-2.1.1.tgz", + "integrity": "sha1-GrKLVW4Zg2Oowab35vogE3/mrtk=", + "requires": { + "remove-trailing-separator": "^1.0.1" + } + }, + "normalize-url": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-2.0.1.tgz", + "integrity": "sha512-D6MUW4K/VzoJ4rJ01JFKxDrtY1v9wrgzCX5f2qj/lzH1m/lW6MhUZFKerVsnyjOhOsYzI9Kqqak+10l4LvLpMw==", + "requires": { + "prepend-http": "^2.0.0", + "query-string": "^5.0.1", + "sort-keys": "^2.0.0" + }, + "dependencies": { + "prepend-http": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", + "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=" + } + } + }, + "npm-prefix": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/npm-prefix/-/npm-prefix-1.2.0.tgz", + "integrity": "sha1-5hlFX3B0ulTMZtbQ033Z8b5ry8A=", + "requires": { + "rc": "^1.1.0", + "shellsubstitute": "^1.1.0", + "untildify": "^2.1.0" + } + }, + "number-is-nan": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", + "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=" + }, + "oauth-sign": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", + "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==" + }, + "object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=" + }, + "object-copy": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz", + "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=", + "requires": { + "copy-descriptor": "^0.1.0", + "define-property": "^0.2.5", + "kind-of": "^3.0.3" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "requires": { + "is-descriptor": "^0.1.0" + } + } + } + }, + "object-keys": { + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.0.12.tgz", + "integrity": "sha512-FTMyFUm2wBcGHnH2eXmz7tC6IwlqQZ6mVZ+6dm6vZ4IQIHjs6FdNsQBuKGPuUUUY6NfJw2PshC08Tn6LzLDOag==" + }, + "object-visit": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz", + "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=", + "requires": { + "isobject": "^3.0.0" + }, + "dependencies": { + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" + } + } + }, + "object.assign": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.0.tgz", + "integrity": "sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==", + "requires": { + "define-properties": "^1.1.2", + "function-bind": "^1.1.1", + "has-symbols": "^1.0.0", + "object-keys": "^1.0.11" + } + }, + "object.omit": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/object.omit/-/object.omit-2.0.1.tgz", + "integrity": "sha1-Gpx0SCnznbuFjHbKNXmuKlTr0fo=", + "requires": { + "for-own": "^0.1.4", + "is-extendable": "^0.1.1" + } + }, + "object.pick": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz", + "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=", + "requires": { + "isobject": "^3.0.1" + }, + "dependencies": { + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" + } + } + }, + "once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", + "requires": { + "wrappy": "1" + } + }, + "optionator": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.2.tgz", + "integrity": "sha1-NkxeQJ0/TWMB1sC0wFu6UBgK62Q=", + "requires": { + "deep-is": "~0.1.3", + "fast-levenshtein": "~2.0.4", + "levn": "~0.3.0", + "prelude-ls": "~1.1.2", + "type-check": "~0.3.2", + "wordwrap": "~1.0.0" + } + }, + "os-homedir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-homedir/-/os-homedir-1.0.2.tgz", + "integrity": "sha1-/7xJiDNuDoM94MFox+8VISGqf7M=" + }, + "p-any": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/p-any/-/p-any-1.1.0.tgz", + "integrity": "sha512-Ef0tVa4CZ5pTAmKn+Cg3w8ABBXh+hHO1aV8281dKOoUHfX+3tjG2EaFcC+aZyagg9b4EYGsHEjz21DnEE8Og2g==", + "requires": { + "p-some": "^2.0.0" + } + }, + "p-cancelable": { + "version": "0.4.1", + "resolved": "http://registry.npmjs.org/p-cancelable/-/p-cancelable-0.4.1.tgz", + "integrity": "sha512-HNa1A8LvB1kie7cERyy21VNeHb2CWJJYqyyC2o3klWFfMGlFmWv2Z7sFgZH8ZiaYL95ydToKTFVXgMV/Os0bBQ==" + }, + "p-finally": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", + "integrity": "sha1-P7z7FbiZpEEjs0ttzBi3JDNqLK4=" + }, + "p-is-promise": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/p-is-promise/-/p-is-promise-1.1.0.tgz", + "integrity": "sha1-nJRWmJ6fZYgBewQ01WCXZ1w9oF4=" + }, + "p-limit": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-1.3.0.tgz", + "integrity": "sha512-vvcXsLAJ9Dr5rQOPk7toZQZJApBl2K4J6dANSsEuh6QI41JYcsS/qhTGa9ErIUUgK3WNQoJYvylxvjqmiqEA9Q==", + "requires": { + "p-try": "^1.0.0" + } + }, + "p-locate": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-2.0.0.tgz", + "integrity": "sha1-IKAQOyIqcMj9OcwuWAaA893l7EM=", + "requires": { + "p-limit": "^1.1.0" + } + }, + "p-some": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/p-some/-/p-some-2.0.1.tgz", + "integrity": "sha1-Zdh8ixVO289SIdFnd4ttLhUPbwY=", + "requires": { + "aggregate-error": "^1.0.0" + } + }, + "p-timeout": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-1.2.1.tgz", + "integrity": "sha1-XrOzU7f86Z8QGhA4iAuwVOu+o4Y=", + "requires": { + "p-finally": "^1.0.0" + } + }, + "p-try": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/p-try/-/p-try-1.0.0.tgz", + "integrity": "sha1-y8ec26+P1CKOE/Yh8rGiN8GyB7M=" + }, + "parse-entities": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-1.1.2.tgz", + "integrity": "sha512-5N9lmQ7tmxfXf+hO3X6KRG6w7uYO/HL9fHalSySTdyn63C3WNvTM/1R8tn1u1larNcEbo3Slcy2bsVDQqvEpUg==", + "requires": { + "character-entities": "^1.0.0", + "character-entities-legacy": "^1.0.0", + "character-reference-invalid": "^1.0.0", + "is-alphanumerical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-hexadecimal": "^1.0.0" + } + }, + "parse-glob": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/parse-glob/-/parse-glob-3.0.4.tgz", + "integrity": "sha1-ssN2z7EfNVE7rdFz7wu246OIORw=", + "requires": { + "glob-base": "^0.3.0", + "is-dotfile": "^1.0.0", + "is-extglob": "^1.0.0", + "is-glob": "^2.0.0" + } + }, + "parse-json": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-4.0.0.tgz", + "integrity": "sha1-vjX1Qlvh9/bHRxhPmKeIy5lHfuA=", + "requires": { + "error-ex": "^1.3.1", + "json-parse-better-errors": "^1.0.1" + } + }, + "pascalcase": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz", + "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=" + }, + "passive-voice": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/passive-voice/-/passive-voice-0.1.0.tgz", + "integrity": "sha1-Fv+RrkC6DpLEPmcXY/3IQqcCcLE=" + }, + "path-exists": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha1-zg6+ql94yxiSXqfYENe1mwEP1RU=" + }, + "path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=" + }, + "path-is-inside": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz", + "integrity": "sha1-NlQX3t5EQw0cEa9hAn+s8HS9/FM=" + }, + "path-to-glob-pattern": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/path-to-glob-pattern/-/path-to-glob-pattern-1.0.2.tgz", + "integrity": "sha1-Rz5qOikqnRP7rj7czuctO6uoxhk=" + }, + "path-type": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-1.1.0.tgz", + "integrity": "sha1-WcRPfuSR2nBNpBXaWkBwuk+P5EE=", + "requires": { + "graceful-fs": "^4.1.2", + "pify": "^2.0.0", + "pinkie-promise": "^2.0.0" + }, + "dependencies": { + "pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=" + } + } + }, + "performance-now": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", + "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=" + }, + "pify": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", + "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=" + }, + "pinkie": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/pinkie/-/pinkie-2.0.4.tgz", + "integrity": "sha1-clVrgM+g1IqXToDnckjoDtT3+HA=" + }, + "pinkie-promise": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pinkie-promise/-/pinkie-promise-2.0.1.tgz", + "integrity": "sha1-ITXW36ejWMBprJsXh3YogihFD/o=", + "requires": { + "pinkie": "^2.0.0" + } + }, + "pluralize": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/pluralize/-/pluralize-2.0.0.tgz", + "integrity": "sha1-crcmqm+sHt7uQiVsfY3CVrM1Z38=" + }, + "posix-character-classes": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz", + "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=" + }, + "prelude-ls": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz", + "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=" + }, + "prepend-http": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-1.0.4.tgz", + "integrity": "sha1-1PRWKwzjaW5BrFLQ4ALlemNdxtw=" + }, + "preserve": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/preserve/-/preserve-0.2.0.tgz", + "integrity": "sha1-gV7R9uvGWSb4ZbMQwHE7yzMVzks=" + }, + "prettier": { + "version": "1.14.3", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-1.14.3.tgz", + "integrity": "sha512-qZDVnCrnpsRJJq5nSsiHCE3BYMED2OtsI+cmzIzF1QIfqm5ALf8tEJcO27zV1gKNKRPdhjO0dNWnrzssDQ1tFg==" + }, + "process-nextick-args": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz", + "integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==" + }, + "psl": { + "version": "1.1.29", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.1.29.tgz", + "integrity": "sha512-AeUmQ0oLN02flVHXWh9sSJF7mcdFq0ppid/JkErufc3hGIV/AMa8Fo9VgDo/cT2jFdOWoFvHp90qqBH54W+gjQ==" + }, + "public-ip": { + "version": "2.4.0", + "resolved": "https://registry.npmjs.org/public-ip/-/public-ip-2.4.0.tgz", + "integrity": "sha512-74cIy+T2cDmt+Z71AfVipH2q6qqZITPyNGszKV86OGDYIRvti1m8zg4GOaiTPCLgEIWnToKYXbhEnMiZWHPEUA==", + "requires": { + "dns-socket": "^1.6.2", + "got": "^8.0.0", + "is-ip": "^2.0.0", + "pify": "^3.0.0" + }, + "dependencies": { + "got": { + "version": "8.3.2", + "resolved": "https://registry.npmjs.org/got/-/got-8.3.2.tgz", + "integrity": "sha512-qjUJ5U/hawxosMryILofZCkm3C84PLJS/0grRIpjAwu+Lkxxj5cxeCU25BG0/3mDSpXKTyZr8oh8wIgLaH0QCw==", + "requires": { + "@sindresorhus/is": "^0.7.0", + "cacheable-request": "^2.1.1", + "decompress-response": "^3.3.0", + "duplexer3": "^0.1.4", + "get-stream": "^3.0.0", + "into-stream": "^3.1.0", + "is-retry-allowed": "^1.1.0", + "isurl": "^1.0.0-alpha5", + "lowercase-keys": "^1.0.0", + "mimic-response": "^1.0.0", + "p-cancelable": "^0.4.0", + "p-timeout": "^2.0.1", + "pify": "^3.0.0", + "safe-buffer": "^5.1.1", + "timed-out": "^4.0.1", + "url-parse-lax": "^3.0.0", + "url-to-options": "^1.0.1" + } + }, + "p-timeout": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/p-timeout/-/p-timeout-2.0.1.tgz", + "integrity": "sha512-88em58dDVB/KzPEx1X0N3LwFfYZPyDc4B6eF38M1rk9VTZMbxXXgjugz8mmwpS9Ox4BDZ+t6t3QP5+/gazweIA==", + "requires": { + "p-finally": "^1.0.0" + } + }, + "prepend-http": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz", + "integrity": "sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc=" + }, + "url-parse-lax": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz", + "integrity": "sha1-FrXK/Afb42dsGxmZF3gj1lA6yww=", + "requires": { + "prepend-http": "^2.0.0" + } + } + } + }, + "punycode": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", + "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" + }, + "qs": { + "version": "6.5.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", + "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==" + }, + "query-string": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/query-string/-/query-string-5.1.1.tgz", + "integrity": "sha512-gjWOsm2SoGlgLEdAGt7a6slVOk9mGiXmPFMqrEhLQ68rhQuBnpfs3+EmlvqKyxnCo9/PPlF+9MtY02S1aFg+Jw==", + "requires": { + "decode-uri-component": "^0.2.0", + "object-assign": "^4.1.0", + "strict-uri-encode": "^1.0.0" + } + }, + "randomatic": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/randomatic/-/randomatic-3.1.0.tgz", + "integrity": "sha512-KnGPVE0lo2WoXxIZ7cPR8YBpiol4gsSuOwDSg410oHh80ZMp5EiypNqL2K4Z77vJn6lB5rap7IkAmcUlalcnBQ==", + "requires": { + "is-number": "^4.0.0", + "kind-of": "^6.0.0", + "math-random": "^1.0.1" + }, + "dependencies": { + "is-number": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-4.0.0.tgz", + "integrity": "sha512-rSklcAIlf1OmFdyAqbnWTLVelsQ58uvZ66S/ZyawjWqIviTWCjg2PzVGw8WUA+nNuPTqb4wgA+NszrJ+08LlgQ==" + }, + "kind-of": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", + "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + } + } + }, + "rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "requires": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + } + }, + "rc-config-loader": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/rc-config-loader/-/rc-config-loader-2.0.2.tgz", + "integrity": "sha512-Nx9SNM47eNRqe0TdntOY600qWb8NDh+xU9sv5WnTscEtzfTB0ukihlqwuCLPteyJksvZ0sEVPoySNE01TKrmTQ==", + "requires": { + "debug": "^3.1.0", + "js-yaml": "^3.12.0", + "json5": "^1.0.1", + "object-assign": "^4.1.0", + "object-keys": "^1.0.12", + "path-exists": "^3.0.0", + "require-from-string": "^2.0.2" + }, + "dependencies": { + "debug": { + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.5.tgz", + "integrity": "sha512-D61LaDQPQkxJ5AUM2mbSJRbPkNs/TmdmOeLAi1hgDkpDfIfetSrjmWhccwtuResSwMbACjx/xXQofvM9CE/aeg==", + "requires": { + "ms": "^2.1.1" + } + }, + "json5": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json5/-/json5-1.0.1.tgz", + "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==", + "requires": { + "minimist": "^1.2.0" + } + }, + "ms": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", + "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==" + } + } + }, + "read-pkg": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-1.1.0.tgz", + "integrity": "sha1-9f+qXs0pyzHAR0vKfXVra7KePyg=", + "requires": { + "load-json-file": "^1.0.0", + "normalize-package-data": "^2.3.2", + "path-type": "^1.0.0" + } + }, + "read-pkg-up": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/read-pkg-up/-/read-pkg-up-3.0.0.tgz", + "integrity": "sha1-PtSWaF26D4/hGNBpHcUfSh/5bwc=", + "requires": { + "find-up": "^2.0.0", + "read-pkg": "^3.0.0" + }, + "dependencies": { + "load-json-file": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/load-json-file/-/load-json-file-4.0.0.tgz", + "integrity": "sha1-L19Fq5HjMhYjT9U62rZo607AmTs=", + "requires": { + "graceful-fs": "^4.1.2", + "parse-json": "^4.0.0", + "pify": "^3.0.0", + "strip-bom": "^3.0.0" + } + }, + "path-type": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz", + "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==", + "requires": { + "pify": "^3.0.0" + } + }, + "read-pkg": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/read-pkg/-/read-pkg-3.0.0.tgz", + "integrity": "sha1-nLxoaXj+5l0WwA4rGcI3/Pbjg4k=", + "requires": { + "load-json-file": "^4.0.0", + "normalize-package-data": "^2.3.2", + "path-type": "^3.0.0" + } + }, + "strip-bom": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=" + } + } + }, + "readable-stream": { + "version": "2.3.6", + "resolved": "http://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz", + "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==", + "requires": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "readdirp": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-2.2.1.tgz", + "integrity": "sha512-1JU/8q+VgFZyxwrJ+SVIOsh+KywWGpds3NTqikiKpDMZWScmAYyKIgqkO+ARvNWJfXeXR1zxz7aHF4u4CyH6vQ==", + "requires": { + "graceful-fs": "^4.1.11", + "micromatch": "^3.1.10", + "readable-stream": "^2.0.2" + }, + "dependencies": { + "arr-diff": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz", + "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=" + }, + "array-unique": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz", + "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=" + }, + "braces": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz", + "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==", + "requires": { + "arr-flatten": "^1.1.0", + "array-unique": "^0.3.2", + "extend-shallow": "^2.0.1", + "fill-range": "^4.0.0", + "isobject": "^3.0.1", + "repeat-element": "^1.1.2", + "snapdragon": "^0.8.1", + "snapdragon-node": "^2.0.1", + "split-string": "^3.0.2", + "to-regex": "^3.0.1" + }, + "dependencies": { + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "expand-brackets": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz", + "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=", + "requires": { + "debug": "^2.3.3", + "define-property": "^0.2.5", + "extend-shallow": "^2.0.1", + "posix-character-classes": "^0.1.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "requires": { + "is-descriptor": "^0.1.0" + } + }, + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "requires": { + "is-extendable": "^0.1.0" + } + }, + "is-accessor-descriptor": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz", + "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=", + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "is-data-descriptor": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz", + "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=", + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "is-descriptor": { + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz", + "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==", + "requires": { + "is-accessor-descriptor": "^0.1.6", + "is-data-descriptor": "^0.1.4", + "kind-of": "^5.0.0" + } + }, + "kind-of": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz", + "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==" + } + } + }, + "extglob": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz", + "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==", + "requires": { + "array-unique": "^0.3.2", + "define-property": "^1.0.0", + "expand-brackets": "^2.1.4", + "extend-shallow": "^2.0.1", + "fragment-cache": "^0.2.1", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.1" + }, + "dependencies": { + "define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "requires": { + "is-descriptor": "^1.0.0" + } + }, + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "fill-range": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz", + "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=", + "requires": { + "extend-shallow": "^2.0.1", + "is-number": "^3.0.0", + "repeat-string": "^1.6.1", + "to-regex-range": "^2.1.0" + }, + "dependencies": { + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + }, + "is-number": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", + "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", + "requires": { + "kind-of": "^3.0.2" + }, + "dependencies": { + "kind-of": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz", + "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=", + "requires": { + "is-buffer": "^1.1.5" + } + } + } + }, + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" + }, + "kind-of": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", + "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + }, + "micromatch": { + "version": "3.1.10", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz", + "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==", + "requires": { + "arr-diff": "^4.0.0", + "array-unique": "^0.3.2", + "braces": "^2.3.1", + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "extglob": "^2.0.4", + "fragment-cache": "^0.2.1", + "kind-of": "^6.0.2", + "nanomatch": "^1.2.9", + "object.pick": "^1.3.0", + "regex-not": "^1.0.0", + "snapdragon": "^0.8.1", + "to-regex": "^3.0.2" + } + } + } + }, + "regex-cache": { + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/regex-cache/-/regex-cache-0.4.4.tgz", + "integrity": "sha512-nVIZwtCjkC9YgvWkpM55B5rBhBYRZhAaJbgcFYXXsHnbZ9UZI9nnVWYZpBlCqv9ho2eZryPnWrZGsOdPwVWXWQ==", + "requires": { + "is-equal-shallow": "^0.1.3" + } + }, + "regex-not": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz", + "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==", + "requires": { + "extend-shallow": "^3.0.2", + "safe-regex": "^1.1.0" + } + }, + "remark": { + "version": "9.0.0", + "resolved": "https://registry.npmjs.org/remark/-/remark-9.0.0.tgz", + "integrity": "sha512-amw8rGdD5lHbMEakiEsllmkdBP+/KpjW/PRK6NSGPZKCQowh0BT4IWXDAkRMyG3SB9dKPXWMviFjNusXzXNn3A==", + "requires": { + "remark-parse": "^5.0.0", + "remark-stringify": "^5.0.0", + "unified": "^6.0.0" + } + }, + "remark-cli": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/remark-cli/-/remark-cli-5.0.0.tgz", + "integrity": "sha512-+j0tza5XZ/XHfity3mg5GJFezRt5hS+ybC7/LDItmOAA8u8gRgB51B+/m5U3yT6RLlhefdqkMGKZnZMcamnvsQ==", + "requires": { + "markdown-extensions": "^1.1.0", + "remark": "^9.0.0", + "unified-args": "^5.0.0" + } + }, + "remark-frontmatter": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/remark-frontmatter/-/remark-frontmatter-1.2.1.tgz", + "integrity": "sha512-PEXZFO3jrB+E0G6ZIsV8GOED1gPHQF5hgedJQJ8SbsLRQv4KKrFj3A+huaeu0qtzTScdxPeDTacQ9gkV4vIarA==", + "requires": { + "fault": "^1.0.1", + "xtend": "^4.0.1" + } + }, + "remark-lint-no-dead-urls": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/remark-lint-no-dead-urls/-/remark-lint-no-dead-urls-0.3.0.tgz", + "integrity": "sha512-eG+vVrNui7zeBmU6fsjIi8rwXriuyNhNcmJDQ7M5oaxCluWbH5bt6Yi/JNsabYE39dFdlVbw9JM3cLjaJv2hQw==", + "requires": { + "is-online": "^7.0.0", + "is-relative-url": "^2.0.0", + "link-check": "^4.1.0", + "unified-lint-rule": "^1.0.1", + "unist-util-visit": "^1.1.3" + } + }, + "remark-lint-write-good": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/remark-lint-write-good/-/remark-lint-write-good-1.0.3.tgz", + "integrity": "sha512-d4D4VrAklAx2ONhpXoQnt0YrJFpJBE5XEeCyDGjPhm4DkIoLOmHWZEjxl1HvdrpGXLb/KfYU4lJPeyxlKiDhVA==", + "requires": { + "nlcst-to-string": "^2.0.0", + "unified-lint-rule": "^1.0.1", + "unist-util-visit": "^1.1.1", + "write-good": "^0.11.1" + } + }, + "remark-parse": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-5.0.0.tgz", + "integrity": "sha512-b3iXszZLH1TLoyUzrATcTQUZrwNl1rE70rVdSruJFlDaJ9z5aMkhrG43Pp68OgfHndL/ADz6V69Zow8cTQu+JA==", + "requires": { + "collapse-white-space": "^1.0.2", + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-whitespace-character": "^1.0.0", + "is-word-character": "^1.0.0", + "markdown-escapes": "^1.0.0", + "parse-entities": "^1.1.0", + "repeat-string": "^1.5.4", + "state-toggle": "^1.0.0", + "trim": "0.0.1", + "trim-trailing-lines": "^1.0.0", + "unherit": "^1.0.4", + "unist-util-remove-position": "^1.0.0", + "vfile-location": "^2.0.0", + "xtend": "^4.0.1" + } + }, + "remark-stringify": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/remark-stringify/-/remark-stringify-5.0.0.tgz", + "integrity": "sha512-Ws5MdA69ftqQ/yhRF9XhVV29mhxbfGhbz0Rx5bQH+oJcNhhSM6nCu1EpLod+DjrFGrU0BMPs+czVmJZU7xiS7w==", + "requires": { + "ccount": "^1.0.0", + "is-alphanumeric": "^1.0.0", + "is-decimal": "^1.0.0", + "is-whitespace-character": "^1.0.0", + "longest-streak": "^2.0.1", + "markdown-escapes": "^1.0.0", + "markdown-table": "^1.1.0", + "mdast-util-compact": "^1.0.0", + "parse-entities": "^1.0.2", + "repeat-string": "^1.5.4", + "state-toggle": "^1.0.0", + "stringify-entities": "^1.0.1", + "unherit": "^1.0.4", + "xtend": "^4.0.1" + } + }, + "remove-trailing-separator": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", + "integrity": "sha1-wkvOKig62tW8P1jg1IJJuSN52O8=" + }, + "repeat-element": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.3.tgz", + "integrity": "sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g==" + }, + "repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=" + }, + "replace-ext": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/replace-ext/-/replace-ext-1.0.0.tgz", + "integrity": "sha1-3mMSg3P8v3w8z6TeWkgMRaZ5WOs=" + }, + "request": { + "version": "2.88.0", + "resolved": "https://registry.npmjs.org/request/-/request-2.88.0.tgz", + "integrity": "sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg==", + "requires": { + "aws-sign2": "~0.7.0", + "aws4": "^1.8.0", + "caseless": "~0.12.0", + "combined-stream": "~1.0.6", + "extend": "~3.0.2", + "forever-agent": "~0.6.1", + "form-data": "~2.3.2", + "har-validator": "~5.1.0", + "http-signature": "~1.2.0", + "is-typedarray": "~1.0.0", + "isstream": "~0.1.2", + "json-stringify-safe": "~5.0.1", + "mime-types": "~2.1.19", + "oauth-sign": "~0.9.0", + "performance-now": "^2.1.0", + "qs": "~6.5.2", + "safe-buffer": "^5.1.2", + "tough-cookie": "~2.4.3", + "tunnel-agent": "^0.6.0", + "uuid": "^3.3.2" + } + }, + "require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==" + }, + "resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==" + }, + "resolve-url": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz", + "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=" + }, + "responselike": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz", + "integrity": "sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec=", + "requires": { + "lowercase-keys": "^1.0.0" + } + }, + "ret": { + "version": "0.1.15", + "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz", + "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==" + }, + "rimraf": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.2.tgz", + "integrity": "sha512-lreewLK/BlghmxtfH36YYVg1i8IAce4TI7oao75I1g245+6BctqTVQiBP3YUJ9C6DQOXJmkYR9X9fCLtCOJc5w==", + "requires": { + "glob": "^7.0.5" + } + }, + "safe-buffer": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==" + }, + "safe-regex": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz", + "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=", + "requires": { + "ret": "~0.1.10" + } + }, + "safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==" + }, + "semver": { + "version": "5.5.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.1.tgz", + "integrity": "sha512-PqpAxfrEhlSUWge8dwIp4tZnQ25DIOthpiaHNIthsjEFQD6EvqUKUDM7L8O2rShkFccYo1VjJR0coWfNkCubRw==" + }, + "set-value": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.0.tgz", + "integrity": "sha512-hw0yxk9GT/Hr5yJEYnHNKYXkIA8mVJgd9ditYZCe16ZczcaELYYcfvaXesNACk2O8O0nTiPQcQhGUQj8JLzeeg==", + "requires": { + "extend-shallow": "^2.0.1", + "is-extendable": "^0.1.1", + "is-plain-object": "^2.0.3", + "split-string": "^3.0.1" + }, + "dependencies": { + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "shellsubstitute": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/shellsubstitute/-/shellsubstitute-1.2.0.tgz", + "integrity": "sha1-5PcCpQxRiw9v6YRRiQ1wWvKba3A=" + }, + "slice-ansi": { + "version": "0.0.4", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-0.0.4.tgz", + "integrity": "sha1-7b+JA/ZvfOL46v1s7tZeJkyDGzU=" + }, + "sliced": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/sliced/-/sliced-1.0.1.tgz", + "integrity": "sha1-CzpmK10Ewxd7GSa+qCsD+Dei70E=" + }, + "snapdragon": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz", + "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==", + "requires": { + "base": "^0.11.1", + "debug": "^2.2.0", + "define-property": "^0.2.5", + "extend-shallow": "^2.0.1", + "map-cache": "^0.2.2", + "source-map": "^0.5.6", + "source-map-resolve": "^0.5.0", + "use": "^3.1.0" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "requires": { + "is-descriptor": "^0.1.0" + } + }, + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "requires": { + "is-extendable": "^0.1.0" + } + } + } + }, + "snapdragon-node": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz", + "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==", + "requires": { + "define-property": "^1.0.0", + "isobject": "^3.0.0", + "snapdragon-util": "^3.0.1" + }, + "dependencies": { + "define-property": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz", + "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=", + "requires": { + "is-descriptor": "^1.0.0" + } + }, + "is-accessor-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz", + "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==", + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-data-descriptor": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz", + "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==", + "requires": { + "kind-of": "^6.0.0" + } + }, + "is-descriptor": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz", + "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==", + "requires": { + "is-accessor-descriptor": "^1.0.0", + "is-data-descriptor": "^1.0.0", + "kind-of": "^6.0.2" + } + }, + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" + }, + "kind-of": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz", + "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==" + } + } + }, + "snapdragon-util": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz", + "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==", + "requires": { + "kind-of": "^3.2.0" + } + }, + "sort-keys": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/sort-keys/-/sort-keys-2.0.0.tgz", + "integrity": "sha1-ZYU1WEhh7JfXMNbPQYIuH1ZoQSg=", + "requires": { + "is-plain-obj": "^1.0.0" + } + }, + "source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w=" + }, + "source-map-resolve": { + "version": "0.5.2", + "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.2.tgz", + "integrity": "sha512-MjqsvNwyz1s0k81Goz/9vRBe9SZdB09Bdw+/zYyO+3CuPk6fouTaxscHkgtE8jKvf01kVfl8riHzERQ/kefaSA==", + "requires": { + "atob": "^2.1.1", + "decode-uri-component": "^0.2.0", + "resolve-url": "^0.2.1", + "source-map-url": "^0.4.0", + "urix": "^0.1.0" + } + }, + "source-map-url": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.0.tgz", + "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=" + }, + "spdx-correct": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdx-correct/-/spdx-correct-3.0.0.tgz", + "integrity": "sha512-N19o9z5cEyc8yQQPukRCZ9EUmb4HUpnrmaL/fxS2pBo2jbfcFRVuFZ/oFC+vZz0MNNk0h80iMn5/S6qGZOL5+g==", + "requires": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-exceptions": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/spdx-exceptions/-/spdx-exceptions-2.1.0.tgz", + "integrity": "sha512-4K1NsmrlCU1JJgUrtgEeTVyfx8VaYea9J9LvARxhbHtVtohPs/gFGG5yy49beySjlIMhhXZ4QqujIZEfS4l6Cg==" + }, + "spdx-expression-parse": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/spdx-expression-parse/-/spdx-expression-parse-3.0.0.tgz", + "integrity": "sha512-Yg6D3XpRD4kkOmTpdgbUiEJFKghJH03fiC1OPll5h/0sO6neh2jqRDVHOQ4o/LMea0tgCkbMgea5ip/e+MkWyg==", + "requires": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "spdx-license-ids": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/spdx-license-ids/-/spdx-license-ids-3.0.1.tgz", + "integrity": "sha512-TfOfPcYGBB5sDuPn3deByxPhmfegAhpDYKSOXZQN81Oyrrif8ZCodOLzK3AesELnCx03kikhyDwh0pfvvQvF8w==" + }, + "split-lines": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/split-lines/-/split-lines-2.0.0.tgz", + "integrity": "sha512-gaIdhbqxkB5/VflPXsJwZvEzh/kdwiRPF9iqpkxX4us+lzB8INedFwjCyo6vwuz5x2Ddlnav2zh270CEjCG8mA==" + }, + "split-string": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz", + "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==", + "requires": { + "extend-shallow": "^3.0.0" + } + }, + "sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=" + }, + "sshpk": { + "version": "1.14.2", + "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.14.2.tgz", + "integrity": "sha1-xvxhZIo9nE52T9P8306hBeSSupg=", + "requires": { + "asn1": "~0.2.3", + "assert-plus": "^1.0.0", + "bcrypt-pbkdf": "^1.0.0", + "dashdash": "^1.12.0", + "ecc-jsbn": "~0.1.1", + "getpass": "^0.1.1", + "jsbn": "~0.1.0", + "safer-buffer": "^2.0.2", + "tweetnacl": "~0.14.0" + } + }, + "state-toggle": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/state-toggle/-/state-toggle-1.0.1.tgz", + "integrity": "sha512-Qe8QntFrrpWTnHwvwj2FZTgv+PKIsp0B9VxLzLLbSpPXWOgRgc5LVj/aTiSfK1RqIeF9jeC1UeOH8Q8y60A7og==" + }, + "static-extend": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz", + "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=", + "requires": { + "define-property": "^0.2.5", + "object-copy": "^0.1.0" + }, + "dependencies": { + "define-property": { + "version": "0.2.5", + "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz", + "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=", + "requires": { + "is-descriptor": "^0.1.0" + } + } + } + }, + "strict-uri-encode": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-1.1.0.tgz", + "integrity": "sha1-J5siXfHVgrH1TmWt3UNS4Y+qBxM=" + }, + "string-width": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", + "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", + "requires": { + "code-point-at": "^1.0.0", + "is-fullwidth-code-point": "^1.0.0", + "strip-ansi": "^3.0.0" + } + }, + "string.prototype.padstart": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/string.prototype.padstart/-/string.prototype.padstart-3.0.0.tgz", + "integrity": "sha1-W8+tOfRkm7LQMSkuGbzwtRDUskI=", + "requires": { + "define-properties": "^1.1.2", + "es-abstract": "^1.4.3", + "function-bind": "^1.0.2" + } + }, + "string_decoder": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "requires": { + "safe-buffer": "~5.1.0" + } + }, + "stringify-entities": { + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/stringify-entities/-/stringify-entities-1.3.2.tgz", + "integrity": "sha512-nrBAQClJAPN2p+uGCVJRPIPakKeKWZ9GtBCmormE7pWOSlHat7+x5A8gx85M7HM5Dt0BP3pP5RhVW77WdbJJ3A==", + "requires": { + "character-entities-html4": "^1.0.0", + "character-entities-legacy": "^1.0.0", + "is-alphanumerical": "^1.0.0", + "is-hexadecimal": "^1.0.0" + } + }, + "strip-ansi": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", + "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", + "requires": { + "ansi-regex": "^2.0.0" + } + }, + "strip-bom": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-2.0.0.tgz", + "integrity": "sha1-YhmoVhZSBJHzV4i9vxRHqZx+aw4=", + "requires": { + "is-utf8": "^0.2.0" + } + }, + "strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha1-PFMZQukIwml8DsNEhYwobHygpgo=" + }, + "structured-source": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/structured-source/-/structured-source-3.0.2.tgz", + "integrity": "sha1-3YAkJeD1PcSm56yjdSkBoczaevU=", + "requires": { + "boundary": "^1.0.1" + } + }, + "supports-color": { + "version": "5.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", + "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", + "requires": { + "has-flag": "^3.0.0" + } + }, + "table": { + "version": "3.8.3", + "resolved": "http://registry.npmjs.org/table/-/table-3.8.3.tgz", + "integrity": "sha1-K7xULw/amGGnVdOUf+/Ys/UThV8=", + "requires": { + "ajv": "^4.7.0", + "ajv-keywords": "^1.0.0", + "chalk": "^1.1.1", + "lodash": "^4.0.0", + "slice-ansi": "0.0.4", + "string-width": "^2.0.0" + }, + "dependencies": { + "ajv": { + "version": "4.11.8", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-4.11.8.tgz", + "integrity": "sha1-gv+wKynmYq5TvcIK8VlHcGc5xTY=", + "requires": { + "co": "^4.6.0", + "json-stable-stringify": "^1.0.1" + } + }, + "ansi-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", + "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=" + }, + "ansi-styles": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", + "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=" + }, + "chalk": { + "version": "1.1.3", + "resolved": "http://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", + "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", + "requires": { + "ansi-styles": "^2.2.1", + "escape-string-regexp": "^1.0.2", + "has-ansi": "^2.0.0", + "strip-ansi": "^3.0.0", + "supports-color": "^2.0.0" + } + }, + "is-fullwidth-code-point": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", + "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=" + }, + "string-width": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", + "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", + "requires": { + "is-fullwidth-code-point": "^2.0.0", + "strip-ansi": "^4.0.0" + }, + "dependencies": { + "strip-ansi": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", + "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", + "requires": { + "ansi-regex": "^3.0.0" + } + } + } + }, + "supports-color": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", + "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=" + } + } + }, + "text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=" + }, + "textlint": { + "version": "10.2.1", + "resolved": "https://registry.npmjs.org/textlint/-/textlint-10.2.1.tgz", + "integrity": "sha512-tjSvxRZ7iewPmw0ShIA5IIZNJM9m157K1hGXE9wGxALcSb+xOZ0oLPv1HN7z0UzqOuMNqYyeN7mi4N0IplLkYA==", + "requires": { + "@textlint/ast-node-types": "^4.0.2", + "@textlint/ast-traverse": "^2.0.8", + "@textlint/feature-flag": "^3.0.4", + "@textlint/fixer-formatter": "^3.0.7", + "@textlint/kernel": "^2.0.9", + "@textlint/linter-formatter": "^3.0.7", + "@textlint/textlint-plugin-markdown": "^4.0.10", + "@textlint/textlint-plugin-text": "^3.0.10", + "@types/bluebird": "^3.5.18", + "bluebird": "^3.0.5", + "debug": "^2.1.0", + "deep-equal": "^1.0.1", + "file-entry-cache": "^2.0.0", + "get-stdin": "^5.0.1", + "glob": "^7.1.1", + "interop-require": "^1.0.0", + "is-file": "^1.0.0", + "log-symbols": "^1.0.2", + "map-like": "^2.0.0", + "md5": "^2.2.1", + "mkdirp": "^0.5.0", + "object-assign": "^4.0.1", + "optionator": "^0.8.0", + "path-to-glob-pattern": "^1.0.2", + "rc-config-loader": "^2.0.1", + "read-pkg": "^1.1.0", + "read-pkg-up": "^3.0.0", + "structured-source": "^3.0.2", + "try-resolve": "^1.0.1", + "unique-concat": "^0.2.2" + } + }, + "textlint-rule-helper": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/textlint-rule-helper/-/textlint-rule-helper-2.0.0.tgz", + "integrity": "sha1-lctGlslcQljS4zienmS4SflyE4I=", + "requires": { + "unist-util-visit": "^1.1.0" + } + }, + "textlint-rule-stop-words": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/textlint-rule-stop-words/-/textlint-rule-stop-words-1.0.5.tgz", + "integrity": "sha512-sttfqpFX3ji4AD4eF3gpiCH+csqsaztO0V2koWVYhrHyPjUL4cPlB1I/H4Fa7G3Ik35dBA0q5Tf+88A0vO9erQ==", + "requires": { + "lodash": "^4.17.10", + "split-lines": "^2.0.0", + "textlint-rule-helper": "^2.0.0" + } + }, + "timed-out": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/timed-out/-/timed-out-4.0.1.tgz", + "integrity": "sha1-8y6srFoXW+ol1/q1Zas+2HQe9W8=" + }, + "to-object-path": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz", + "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=", + "requires": { + "kind-of": "^3.0.2" + } + }, + "to-regex": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz", + "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==", + "requires": { + "define-property": "^2.0.2", + "extend-shallow": "^3.0.2", + "regex-not": "^1.0.2", + "safe-regex": "^1.1.0" + } + }, + "to-regex-range": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz", + "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=", + "requires": { + "is-number": "^3.0.0", + "repeat-string": "^1.6.1" + }, + "dependencies": { + "is-number": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz", + "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=", + "requires": { + "kind-of": "^3.0.2" + } + } + } + }, + "to-vfile": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/to-vfile/-/to-vfile-2.2.0.tgz", + "integrity": "sha512-saGC8/lWdGrEoBMLUtgzhRHWAkQMP8gdldA3MOAUhBwTGEb1RSMVcflHGSx4ZJsdEZ9o1qDBCPp47LCPrbZWow==", + "requires": { + "is-buffer": "^1.1.4", + "vfile": "^2.0.0", + "x-is-function": "^1.0.4" + } + }, + "too-wordy": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/too-wordy/-/too-wordy-0.1.4.tgz", + "integrity": "sha1-jnsgp7ek2Pw3WfTgDEkpmT0bEvA=" + }, + "tough-cookie": { + "version": "2.4.3", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.4.3.tgz", + "integrity": "sha512-Q5srk/4vDM54WJsJio3XNn6K2sCG+CQ8G5Wz6bZhRZoAe/+TxjWB/GlFAnYEbkYVlON9FMk/fE3h2RLpPXo4lQ==", + "requires": { + "psl": "^1.1.24", + "punycode": "^1.4.1" + }, + "dependencies": { + "punycode": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", + "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=" + } + } + }, + "traverse": { + "version": "0.6.6", + "resolved": "https://registry.npmjs.org/traverse/-/traverse-0.6.6.tgz", + "integrity": "sha1-y99WD9e5r2MlAv7UD5GMFX6pcTc=" + }, + "trim": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/trim/-/trim-0.0.1.tgz", + "integrity": "sha1-WFhUf2spB1fulczMZm+1AITEYN0=" + }, + "trim-trailing-lines": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/trim-trailing-lines/-/trim-trailing-lines-1.1.1.tgz", + "integrity": "sha512-bWLv9BbWbbd7mlqqs2oQYnLD/U/ZqeJeJwbO0FG2zA1aTq+HTvxfHNKFa/HGCVyJpDiioUYaBhfiT6rgk+l4mg==" + }, + "trough": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/trough/-/trough-1.0.3.tgz", + "integrity": "sha512-fwkLWH+DimvA4YCy+/nvJd61nWQQ2liO/nF/RjkTpiOGi+zxZzVkhb1mvbHIIW4b/8nDsYI8uTmAlc0nNkRMOw==" + }, + "try-resolve": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/try-resolve/-/try-resolve-1.0.1.tgz", + "integrity": "sha1-z95vq9ctY+V5fPqrhzq76OcA6RI=" + }, + "tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", + "requires": { + "safe-buffer": "^5.0.1" + } + }, + "tweetnacl": { + "version": "0.14.5", + "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", + "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=", + "optional": true + }, + "type-check": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz", + "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=", + "requires": { + "prelude-ls": "~1.1.2" + } + }, + "typedarray": { + "version": "0.0.6", + "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", + "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=" + }, + "unherit": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/unherit/-/unherit-1.1.1.tgz", + "integrity": "sha512-+XZuV691Cn4zHsK0vkKYwBEwB74T3IZIcxrgn2E4rKwTfFyI1zCh7X7grwh9Re08fdPlarIdyWgI8aVB3F5A5g==", + "requires": { + "inherits": "^2.0.1", + "xtend": "^4.0.1" + } + }, + "unified": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/unified/-/unified-6.2.0.tgz", + "integrity": "sha512-1k+KPhlVtqmG99RaTbAv/usu85fcSRu3wY8X+vnsEhIxNP5VbVIDiXnLqyKIG+UMdyTg0ZX9EI6k2AfjJkHPtA==", + "requires": { + "bail": "^1.0.0", + "extend": "^3.0.0", + "is-plain-obj": "^1.1.0", + "trough": "^1.0.0", + "vfile": "^2.0.0", + "x-is-string": "^0.1.0" + } + }, + "unified-args": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/unified-args/-/unified-args-5.1.0.tgz", + "integrity": "sha512-IR8bS/qrfOMuIYrLlaXt+3L6cvDHv5YbBfYNVGBLbShUjE9vpbnUiPFMc/XKtH6oAGrD/m8lvVwCHDsFGBBzJA==", + "requires": { + "camelcase": "^4.0.0", + "chalk": "^2.0.0", + "chokidar": "^1.5.1", + "json5": "^0.5.1", + "minimist": "^1.2.0", + "text-table": "^0.2.0", + "unified-engine": "^5.1.0" + } + }, + "unified-engine": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/unified-engine/-/unified-engine-5.1.0.tgz", + "integrity": "sha512-N7b7HG6doQUtkWr+kH35tfUhfc9QiYeiZGG6TcZlexSURf4xRUpYKBbc2f67qJF5oPmn6mMkImkdhr31Q6saoA==", + "requires": { + "concat-stream": "^1.5.1", + "debug": "^3.1.0", + "fault": "^1.0.0", + "fn-name": "^2.0.1", + "glob": "^7.0.3", + "ignore": "^3.2.0", + "is-empty": "^1.0.0", + "is-hidden": "^1.0.1", + "is-object": "^1.0.1", + "js-yaml": "^3.6.1", + "load-plugin": "^2.0.0", + "parse-json": "^4.0.0", + "to-vfile": "^2.0.0", + "trough": "^1.0.0", + "unist-util-inspect": "^4.1.2", + "vfile-reporter": "^4.0.0", + "vfile-statistics": "^1.1.0", + "x-is-function": "^1.0.4", + "x-is-string": "^0.1.0", + "xtend": "^4.0.1" + }, + "dependencies": { + "debug": { + "version": "3.2.5", + "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.5.tgz", + "integrity": "sha512-D61LaDQPQkxJ5AUM2mbSJRbPkNs/TmdmOeLAi1hgDkpDfIfetSrjmWhccwtuResSwMbACjx/xXQofvM9CE/aeg==", + "requires": { + "ms": "^2.1.1" + } + }, + "ms": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz", + "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==" + } + } + }, + "unified-lint-rule": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/unified-lint-rule/-/unified-lint-rule-1.0.3.tgz", + "integrity": "sha512-6z+HH3mtlFdj/w3MaQpObrZAd9KRiro370GxBFh13qkV8LYR21lLozA4iQiZPhe7KuX/lHewoGOEgQ4AWrAR3Q==", + "requires": { + "wrapped": "^1.0.1" + } + }, + "union-value": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.0.tgz", + "integrity": "sha1-XHHDTLW61dzr4+oM0IIHulqhrqQ=", + "requires": { + "arr-union": "^3.1.0", + "get-value": "^2.0.6", + "is-extendable": "^0.1.1", + "set-value": "^0.4.3" + }, + "dependencies": { + "extend-shallow": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz", + "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=", + "requires": { + "is-extendable": "^0.1.0" + } + }, + "set-value": { + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/set-value/-/set-value-0.4.3.tgz", + "integrity": "sha1-fbCPnT0i3H945Trzw79GZuzfzPE=", + "requires": { + "extend-shallow": "^2.0.1", + "is-extendable": "^0.1.1", + "is-plain-object": "^2.0.1", + "to-object-path": "^0.3.0" + } + } + } + }, + "unique-concat": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/unique-concat/-/unique-concat-0.2.2.tgz", + "integrity": "sha1-khD5vcqsxeHjkpSQ18AZ35bxhxI=" + }, + "unist-util-inspect": { + "version": "4.1.3", + "resolved": "https://registry.npmjs.org/unist-util-inspect/-/unist-util-inspect-4.1.3.tgz", + "integrity": "sha512-Fv9R88ZBbDp7mHN+wsbxS1r8VW3unyhZh/F18dcJRQsg0+g3DxNQnMS+AEG/uotB8Md+HMK/TfzSU5lUDWxkZg==", + "requires": { + "is-empty": "^1.0.0" + } + }, + "unist-util-is": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-2.1.2.tgz", + "integrity": "sha512-YkXBK/H9raAmG7KXck+UUpnKiNmUdB+aBGrknfQ4EreE1banuzrKABx3jP6Z5Z3fMSPMQQmeXBlKpCbMwBkxVw==" + }, + "unist-util-remove-position": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-1.1.2.tgz", + "integrity": "sha512-XxoNOBvq1WXRKXxgnSYbtCF76TJrRoe5++pD4cCBsssSiWSnPEktyFrFLE8LTk3JW5mt9hB0Sk5zn4x/JeWY7Q==", + "requires": { + "unist-util-visit": "^1.1.0" + } + }, + "unist-util-stringify-position": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-1.1.2.tgz", + "integrity": "sha512-pNCVrk64LZv1kElr0N1wPiHEUoXNVFERp+mlTg/s9R5Lwg87f9bM/3sQB99w+N9D/qnM9ar3+AKDBwo/gm/iQQ==" + }, + "unist-util-visit": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-1.4.0.tgz", + "integrity": "sha512-FiGu34ziNsZA3ZUteZxSFaczIjGmksfSgdKqBfOejrrfzyUy5b7YrlzT1Bcvi+djkYDituJDy2XB7tGTeBieKw==", + "requires": { + "unist-util-visit-parents": "^2.0.0" + } + }, + "unist-util-visit-parents": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-2.0.1.tgz", + "integrity": "sha512-6B0UTiMfdWql4cQ03gDTCSns+64Zkfo2OCbK31Ov0uMizEz+CJeAp0cgZVb5Fhmcd7Bct2iRNywejT0orpbqUA==", + "requires": { + "unist-util-is": "^2.1.2" + } + }, + "unset-value": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz", + "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=", + "requires": { + "has-value": "^0.3.1", + "isobject": "^3.0.0" + }, + "dependencies": { + "has-value": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz", + "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=", + "requires": { + "get-value": "^2.0.3", + "has-values": "^0.1.4", + "isobject": "^2.0.0" + }, + "dependencies": { + "isobject": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz", + "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=", + "requires": { + "isarray": "1.0.0" + } + } + } + }, + "has-values": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz", + "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=" + }, + "isobject": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=" + } + } + }, + "untildify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/untildify/-/untildify-2.1.0.tgz", + "integrity": "sha1-F+soB5h/dpUunASF/DEdBqgmouA=", + "requires": { + "os-homedir": "^1.0.0" + } + }, + "unzip-response": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/unzip-response/-/unzip-response-2.0.1.tgz", + "integrity": "sha1-0vD3N9FrBhXnKmk17QQhRXLVb5c=" + }, + "urix": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz", + "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=" + }, + "url-parse-lax": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-1.0.0.tgz", + "integrity": "sha1-evjzA2Rem9eaJy56FKxovAYJ2nM=", + "requires": { + "prepend-http": "^1.0.1" + } + }, + "url-to-options": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/url-to-options/-/url-to-options-1.0.1.tgz", + "integrity": "sha1-FQWgOiiaSMvXpDTvuu7FBV9WM6k=" + }, + "use": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz", + "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==" + }, + "util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=" + }, + "uuid": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz", + "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA==" + }, + "validate-npm-package-license": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", + "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "requires": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "verror": { + "version": "1.10.0", + "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", + "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", + "requires": { + "assert-plus": "^1.0.0", + "core-util-is": "1.0.2", + "extsprintf": "^1.2.0" + } + }, + "vfile": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/vfile/-/vfile-2.3.0.tgz", + "integrity": "sha512-ASt4mBUHcTpMKD/l5Q+WJXNtshlWxOogYyGYYrg4lt/vuRjC1EFQtlAofL5VmtVNIZJzWYFJjzGWZ0Gw8pzW1w==", + "requires": { + "is-buffer": "^1.1.4", + "replace-ext": "1.0.0", + "unist-util-stringify-position": "^1.0.0", + "vfile-message": "^1.0.0" + } + }, + "vfile-location": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-2.0.3.tgz", + "integrity": "sha512-zM5/l4lfw1CBoPx3Jimxoc5RNDAHHpk6AM6LM0pTIkm5SUSsx8ZekZ0PVdf0WEZ7kjlhSt7ZlqbRL6Cd6dBs6A==" + }, + "vfile-message": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-1.0.1.tgz", + "integrity": "sha512-vSGCkhNvJzO6VcWC6AlJW4NtYOVtS+RgCaqFIYUjoGIlHnFL+i0LbtYvonDWOMcB97uTPT4PRsyYY7REWC9vug==", + "requires": { + "unist-util-stringify-position": "^1.1.1" + } + }, + "vfile-reporter": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/vfile-reporter/-/vfile-reporter-4.0.0.tgz", + "integrity": "sha1-6m8K4TQvSEFXOYXgX5QXNvJ96do=", + "requires": { + "repeat-string": "^1.5.0", + "string-width": "^1.0.0", + "supports-color": "^4.1.0", + "unist-util-stringify-position": "^1.0.0", + "vfile-statistics": "^1.1.0" + }, + "dependencies": { + "has-flag": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-2.0.0.tgz", + "integrity": "sha1-6CB68cx7MNRGzHC3NLXovhj4jVE=" + }, + "supports-color": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-4.5.0.tgz", + "integrity": "sha1-vnoN5ITexcXN34s9WRJQRJEvY1s=", + "requires": { + "has-flag": "^2.0.0" + } + } + } + }, + "vfile-statistics": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/vfile-statistics/-/vfile-statistics-1.1.1.tgz", + "integrity": "sha512-dxUM6IYvGChHuwMT3dseyU5BHprNRXzAV0OHx1A769lVGsTiT50kU7BbpRFV+IE6oWmU+PwHdsTKfXhnDIRIgQ==" + }, + "weasel-words": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/weasel-words/-/weasel-words-0.1.1.tgz", + "integrity": "sha1-cTeUZYXHP+RIggE4U70ADF1oek4=" + }, + "wordwrap": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz", + "integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=" + }, + "wrapped": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/wrapped/-/wrapped-1.0.1.tgz", + "integrity": "sha1-x4PZ2Aeyc+mwHoUWgKk4yHyQckI=", + "requires": { + "co": "3.1.0", + "sliced": "^1.0.1" + }, + "dependencies": { + "co": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/co/-/co-3.1.0.tgz", + "integrity": "sha1-TqVOpaCJOBUxheFSEMaNkJK8G3g=" + } + } + }, + "wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" + }, + "write": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/write/-/write-0.2.1.tgz", + "integrity": "sha1-X8A4KOJkzqP+kUVUdvejxWbLB1c=", + "requires": { + "mkdirp": "^0.5.1" + } + }, + "write-good": { + "version": "0.11.3", + "resolved": "https://registry.npmjs.org/write-good/-/write-good-0.11.3.tgz", + "integrity": "sha512-fDKIHO5wCzTLCOGNJl1rzzJrZlTIzfZl8msOoJQZzRhYo0X/tFTm4+2B1zTibFYK01Nnd1kLZBjj4xjcFLePNQ==", + "requires": { + "adverb-where": "0.0.9", + "e-prime": "^0.10.2", + "no-cliches": "^0.1.0", + "object.assign": "^4.0.4", + "passive-voice": "^0.1.0", + "too-wordy": "^0.1.4", + "weasel-words": "^0.1.1" + } + }, + "x-is-function": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/x-is-function/-/x-is-function-1.0.4.tgz", + "integrity": "sha1-XSlNw9Joy90GJYDgxd93o5HR+h4=" + }, + "x-is-string": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/x-is-string/-/x-is-string-0.1.0.tgz", + "integrity": "sha1-R0tQhlrzpJqcRlfwWs0UVFj3fYI=" + }, + "xml-escape": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/xml-escape/-/xml-escape-1.1.0.tgz", + "integrity": "sha1-OQTBQ/qOs6ADDsZG0pAqLxtwbEQ=" + }, + "xtend": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.1.tgz", + "integrity": "sha1-pcbVMr5lbiPbgg77lDofBJmNY68=" + } + } +} diff --git a/docs/package.json b/docs/package.json index c76bb37c43b..d45ba539b1e 100644 --- a/docs/package.json +++ b/docs/package.json @@ -3,7 +3,9 @@ "prettier": "^1.13.7", "remark-cli": "^5.0.0", "remark-lint-no-dead-urls": "^0.3.0", - "textlint": "^10.2.1" + "remark-lint-write-good": "^1.0.3", + "textlint": "^10.2.1", + "textlint-rule-stop-words": "^1.0.3" }, "name": "tendermint", "description": "Tendermint Core Documentation", @@ -31,7 +33,8 @@ "homepage": "https://tendermint.com/docs/", "remarkConfig": { "plugins": [ - "remark-lint-no-dead-urls" + "remark-lint-no-dead-urls", + "remark-lint-write-good" ] } } diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index 85e42ba8354..00000000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -sphinx -sphinx-autobuild -recommonmark -sphinx_rtd_theme diff --git a/docs/research/determinism.md b/docs/research/determinism.md index 2e23476c61e..b1c20566a2f 100644 --- a/docs/research/determinism.md +++ b/docs/research/determinism.md @@ -1,3 +1,3 @@ # On Determinism -Arguably, the most difficult part of blockchain programming is determinism - that is, ensuring that sources of indeterminism do not creep into the design of such systems. +See [Determinism](../spec/abci/abci.md#determinism). diff --git a/docs/research/transactional-semantics.md b/docs/research/transactional-semantics.md index bab1864e833..c4b54332869 100644 --- a/docs/research/transactional-semantics.md +++ b/docs/research/transactional-semantics.md @@ -1,25 +1,5 @@ # Transactional Semantics -In [Using Tendermint](./using-tendermint.md#broadcast-api) we -discussed different API endpoints for sending transactions and -differences between them. +See details of the [broadcast API](../tendermint-core/using-tendermint.md#broadcast-api) +and the [mempool WAL](../tendermint-core/running-in-production.md#mempool-wal). -What we have not yet covered is transactional semantics. - -When you send a transaction using one of the available methods, it first -goes to the mempool. Currently, it does not provide strong guarantees -like "if the transaction were accepted, it would be eventually included -in a block (given CheckTx passes)." - -For instance a tx could enter the mempool, but before it can be sent to -peers the node crashes. - -We are planning to provide such guarantees by using a WAL and replaying -transactions (See -[this issue](https://github.com/tendermint/tendermint/issues/248)), but -it's non-trivial to do this all efficiently. - -The temporary solution is for clients to monitor the node and resubmit -transaction(s) and/or send them to more nodes at once, so the -probability of all of them crashing at the same time and losing the msg -decreases substantially. diff --git a/docs/spec/README.md b/docs/spec/README.md index ab689d9d6cc..3e2c2bcd4b5 100644 --- a/docs/spec/README.md +++ b/docs/spec/README.md @@ -1,4 +1,4 @@ -# Tendermint Specification +# Overview This is a markdown specification of the Tendermint blockchain. It defines the base data structures, how they are validated, @@ -21,6 +21,7 @@ please submit them to our [bug bounty](https://tendermint.com/security)! ### Consensus Protocol - [Consensus Algorithm](/docs/spec/consensus/consensus.md) +- [Creating a proposal](/docs/spec/consensus/creating-proposal.md) - [Time](/docs/spec/consensus/bft-time.md) - [Light-Client](/docs/spec/consensus/light-client.md) @@ -31,7 +32,7 @@ please submit them to our [bug bounty](https://tendermint.com/security)! - [Block Sync](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/block_sync): gossip blocks so peers can catch up quickly - [Consensus](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/consensus): gossip votes and block parts so new blocks can be committed - [Mempool](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/mempool): gossip transactions so they get included in blocks -- Evidence: TODO +- Evidence: Forthcoming, see [this issue](https://github.com/tendermint/tendermint/issues/2329). ### Software @@ -57,7 +58,7 @@ is malicious or faulty. A commit in Tendermint is a set of signed messages from more than 2/3 of the total weight of the current Validator set. Validators take turns proposing blocks and voting on them. Once enough votes are received, the block is considered -committed. These votes are included in the *next* block as proof that the previous block +committed. These votes are included in the _next_ block as proof that the previous block was committed - they cannot be included in the current block, as that block has already been created. @@ -71,8 +72,8 @@ of the latest state of the blockchain. To achieve this, it embeds cryptographic commitments to certain information in the block "header". This information includes the contents of the block (eg. the transactions), the validator set committing the block, as well as the various results returned by the application. -Note, however, that block execution only occurs *after* a block is committed. -Thus, application results can only be included in the *next* block. +Note, however, that block execution only occurs _after_ a block is committed. +Thus, application results can only be included in the _next_ block. Also note that information like the transaction results and the validator set are never directly included in the block - only their cryptographic digests (Merkle roots) are. diff --git a/docs/spec/abci/README.md b/docs/spec/abci/README.md new file mode 100644 index 00000000000..bb1c38b6e32 --- /dev/null +++ b/docs/spec/abci/README.md @@ -0,0 +1,19 @@ +# Overview + +ABCI is the interface between Tendermint (a state-machine replication engine) +and your application (the actual state machine). It consists of a set of +*methods*, where each method has a corresponding `Request` and `Response` +message type. Tendermint calls the ABCI methods on the ABCI application by sending the `Request*` +messages and receiving the `Response*` messages in return. + +All message types are defined in a [protobuf file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). +This allows Tendermint to run applications written in any programming language. + +This specification is split as follows: + +- [Methods and Types](./abci.md) - complete details on all ABCI methods and + message types +- [Applications](./apps.md) - how to manage ABCI application state and other + details about building ABCI applications +- [Client and Server](./client-server.md) - for those looking to implement their + own ABCI application servers diff --git a/docs/spec/abci/abci.md b/docs/spec/abci/abci.md new file mode 100644 index 00000000000..f057002ef1a --- /dev/null +++ b/docs/spec/abci/abci.md @@ -0,0 +1,487 @@ +# Methods and Types + +## Overview + +The ABCI message types are defined in a [protobuf +file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). + +ABCI methods are split across 3 separate ABCI *connections*: + +- `Consensus Connection`: `InitChain, BeginBlock, DeliverTx, EndBlock, Commit` +- `Mempool Connection`: `CheckTx` +- `Info Connection`: `Info, SetOption, Query` + +The `Consensus Connection` is driven by a consensus protocol and is responsible +for block execution. +The `Mempool Connection` is for validating new transactions, before they're +shared or included in a block. +The `Info Connection` is for initialization and for queries from the user. + +Additionally, there is a `Flush` method that is called on every connection, +and an `Echo` method that is just for debugging. + +More details on managing state across connections can be found in the section on +[ABCI Applications](apps.md). + +## Errors + +Some methods (`Echo, Info, InitChain, BeginBlock, EndBlock, Commit`), +don't return errors because an error would indicate a critical failure +in the application and there's nothing Tendermint can do. The problem +should be addressed and both Tendermint and the application restarted. + +All other methods (`SetOption, Query, CheckTx, DeliverTx`) return an +application-specific response `Code uint32`, where only `0` is reserved +for `OK`. + +Finally, `Query`, `CheckTx`, and `DeliverTx` include a `Codespace string`, whose +intended use is to disambiguate `Code` values returned by different domains of the +application. The `Codespace` is a namespace for the `Code`. + +## Tags + +Some methods (`CheckTx, BeginBlock, DeliverTx, EndBlock`) +include a `Tags` field in their `Response*`. Each tag is key-value pair denoting +something about what happened during the methods execution. + +Tags can be used to index transactions and blocks according to what happened +during their execution. + +Keys and values in tags must be UTF-8 encoded strings (e.g. +"account.owner": "Bob", "balance": "100.0", +"time": "2018-01-02T12:30:00Z") + +## Determinism + +ABCI applications must implement deterministic finite-state machines to be +securely replicated by the Tendermint consensus. This means block execution +over the Consensus Connection must be strictly deterministic: given the same +ordered set of requests, all nodes will compute identical responses, for all +BeginBlock, DeliverTx, EndBlock, and Commit. This is critical, because the +responses are included in the header of the next block, either via a Merkle root +or directly, so all nodes must agree on exactly what they are. + +For this reason, it is recommended that applications not be exposed to any +external user or process except via the ABCI connections to a consensus engine +like Tendermint Core. The application must only change its state based on input +from block execution (BeginBlock, DeliverTx, EndBlock, Commit), and not through +any other kind of request. This is the only way to ensure all nodes see the same +transactions and compute the same results. + +If there is some non-determinism in the state machine, consensus will eventually +fail as nodes disagree over the correct values for the block header. The +non-determinism must be fixed and the nodes restarted. + +Sources of non-determinism in applications may include: + +- Hardware failures + - Cosmic rays, overheating, etc. +- Node-dependent state + - Random numbers + - Time +- Underspecification + - Library version changes + - Race conditions + - Floating point numbers + - JSON serialization + - Iterating through hash-tables/maps/dictionaries +- External Sources + - Filesystem + - Network calls (eg. some external REST API service) + +See [#56](https://github.com/tendermint/abci/issues/56) for original discussion. + +Note that some methods (`SetOption, Query, CheckTx, DeliverTx`) return +explicitly non-deterministic data in the form of `Info` and `Log` fields. The `Log` is +intended for the literal output from the application's logger, while the +`Info` is any additional info that should be returned. These are the only fields +that are not included in block header computations, so we don't need agreement +on them. All other fields in the `Response*` must be strictly deterministic. + +## Block Execution + +The first time a new blockchain is started, Tendermint calls +`InitChain`. From then on, the follow sequence of methods is executed for each +block: + +`BeginBlock, [DeliverTx], EndBlock, Commit` + +where one `DeliverTx` is called for each transaction in the block. +The result is an updated application state. +Cryptographic commitments to the results of DeliverTx, EndBlock, and +Commit are included in the header of the next block. + +## Messages + +### Echo + +- **Request**: + - `Message (string)`: A string to echo back +- **Response**: + - `Message (string)`: The input string +- **Usage**: + - Echo a string to test an abci client/server implementation + +### Flush + +- **Usage**: + - Signals that messages queued on the client should be flushed to + the server. It is called periodically by the client + implementation to ensure asynchronous requests are actually + sent, and is called immediately to make a synchronous request, + which returns when the Flush response comes back. + +### Info + +- **Request**: + - `Version (string)`: The Tendermint software semantic version + - `BlockVersion (uint64)`: The Tendermint Block Protocol version + - `P2PVersion (uint64)`: The Tendermint P2P Protocol version +- **Response**: + - `Data (string)`: Some arbitrary information + - `Version (string)`: The application software semantic version + - `AppVersion (uint64)`: The application protocol version + - `LastBlockHeight (int64)`: Latest block for which the app has + called Commit + - `LastBlockAppHash ([]byte)`: Latest result of Commit +- **Usage**: + - Return information about the application state. + - Used to sync Tendermint with the application during a handshake + that happens on startup. + - The returned `AppVersion` will be included in the Header of every block. + - Tendermint expects `LastBlockAppHash` and `LastBlockHeight` to + be updated during `Commit`, ensuring that `Commit` is never + called twice for the same block height. + +### SetOption + +- **Request**: + - `Key (string)`: Key to set + - `Value (string)`: Value to set for key +- **Response**: + - `Code (uint32)`: Response code + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. +- **Usage**: + - Set non-consensus critical application specific options. + - e.g. Key="min-fee", Value="100fermion" could set the minimum fee + required for CheckTx (but not DeliverTx - that would be + consensus critical). + +### InitChain + +- **Request**: + - `Time (google.protobuf.Timestamp)`: Genesis time. + - `ChainID (string)`: ID of the blockchain. + - `ConsensusParams (ConsensusParams)`: Initial consensus-critical parameters. + - `Validators ([]ValidatorUpdate)`: Initial genesis validators. + - `AppStateBytes ([]byte)`: Serialized initial application state. Amino-encoded JSON bytes. +- **Response**: + - `ConsensusParams (ConsensusParams)`: Initial + consensus-critical parameters. + - `Validators ([]ValidatorUpdate)`: Initial validator set (if non empty). +- **Usage**: + - Called once upon genesis. + - If ResponseInitChain.Validators is empty, the initial validator set will be the RequestInitChain.Validators + - If ResponseInitChain.Validators is not empty, the initial validator set will be the + ResponseInitChain.Validators (regardless of what is in RequestInitChain.Validators). + - This allows the app to decide if it wants to accept the initial validator + set proposed by tendermint (ie. in the genesis file), or if it wants to use + a different one (perhaps computed based on some application specific + information in the genesis file). + +### Query + +- **Request**: + - `Data ([]byte)`: Raw query bytes. Can be used with or in lieu + of Path. + - `Path (string)`: Path of request, like an HTTP GET path. Can be + used with or in liue of Data. + - Apps MUST interpret '/store' as a query by key on the + underlying store. The key SHOULD be specified in the Data field. + - Apps SHOULD allow queries over specific types like + '/accounts/...' or '/votes/...' + - `Height (int64)`: The block height for which you want the query + (default=0 returns data for the latest committed block). Note + that this is the height of the block containing the + application's Merkle root hash, which represents the state as it + was after committing the block at Height-1 + - `Prove (bool)`: Return Merkle proof with response if possible +- **Response**: + - `Code (uint32)`: Response code. + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. + - `Index (int64)`: The index of the key in the tree. + - `Key ([]byte)`: The key of the matching data. + - `Value ([]byte)`: The value of the matching data. + - `Proof (Proof)`: Serialized proof for the value data, if requested, to be + verified against the `AppHash` for the given Height. + - `Height (int64)`: The block height from which data was derived. + Note that this is the height of the block containing the + application's Merkle root hash, which represents the state as it + was after committing the block at Height-1 + - `Codespace (string)`: Namespace for the `Code`. +- **Usage**: + - Query for data from the application at current or past height. + - Optionally return Merkle proof. + - Merkle proof includes self-describing `type` field to support many types + of Merkle trees and encoding formats. + +### BeginBlock + +- **Request**: + - `Hash ([]byte)`: The block's hash. This can be derived from the + block header. + - `Header (struct{})`: The block header. + - `LastCommitInfo (LastCommitInfo)`: Info about the last commit, including the + round, and the list of validators and which ones signed the last block. + - `ByzantineValidators ([]Evidence)`: List of evidence of + validators that acted maliciously. +- **Response**: + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing +- **Usage**: + - Signals the beginning of a new block. Called prior to + any DeliverTxs. + - The header contains the height, timestamp, and more - it exactly matches the + Tendermint block header. We may seek to generalize this in the future. + - The `LastCommitInfo` and `ByzantineValidators` can be used to determine + rewards and punishments for the validators. NOTE validators here do not + include pubkeys. + +### CheckTx + +- **Request**: + - `Tx ([]byte)`: The request transaction bytes +- **Response**: + - `Code (uint32)`: Response code + - `Data ([]byte)`: Result bytes, if any. + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. + - `GasWanted (int64)`: Amount of gas requested for transaction. + - `GasUsed (int64)`: Amount of gas consumed by transaction. + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing + transactions (eg. by account). + - `Codespace (string)`: Namespace for the `Code`. +- **Usage**: + - Technically optional - not involved in processing blocks. + - Guardian of the mempool: every node runs CheckTx before letting a + transaction into its local mempool. + - The transaction may come from an external user or another node + - CheckTx need not execute the transaction in full, but rather a light-weight + yet stateful validation, like checking signatures and account balances, but + not running code in a virtual machine. + - Transactions where `ResponseCheckTx.Code != 0` will be rejected - they will not be broadcast to + other nodes or included in a proposal block. + - Tendermint attributes no other value to the response code + +### DeliverTx + +- **Request**: + - `Tx ([]byte)`: The request transaction bytes. +- **Response**: + - `Code (uint32)`: Response code. + - `Data ([]byte)`: Result bytes, if any. + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. + - `GasWanted (int64)`: Amount of gas requested for transaction. + - `GasUsed (int64)`: Amount of gas consumed by transaction. + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing + transactions (eg. by account). + - `Codespace (string)`: Namespace for the `Code`. +- **Usage**: + - The workhorse of the application - non-optional. + - Execute the transaction in full. + - `ResponseDeliverTx.Code == 0` only if the transaction is fully valid. + +### EndBlock + +- **Request**: + - `Height (int64)`: Height of the block just executed. +- **Response**: + - `ValidatorUpdates ([]ValidatorUpdate)`: Changes to validator set (set + voting power to 0 to remove). + - `ConsensusParamUpdates (ConsensusParams)`: Changes to + consensus-critical time, size, and other parameters. + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing +- **Usage**: + - Signals the end of a block. + - Called after all transactions, prior to each Commit. + - Validator updates returned by block `H` impact blocks `H+1`, `H+2`, and + `H+3`, but only effects changes on the validator set of `H+2`: + - `H+1`: NextValidatorsHash + - `H+2`: ValidatorsHash (and thus the validator set) + - `H+3`: LastCommitInfo (ie. the last validator set) + - Consensus params returned for block `H` apply for block `H+1` + +### Commit + +- **Response**: + - `Data ([]byte)`: The Merkle root hash of the application state +- **Usage**: + - Persist the application state. + - Return an (optional) Merkle root hash of the application state + - `ResponseCommit.Data` is included as the `Header.AppHash` in the next block + - it may be empty + - Later calls to `Query` can return proofs about the application state anchored + in this Merkle root hash + - Note developers can return whatever they want here (could be nothing, or a + constant string, etc.), so long as it is deterministic - it must not be a + function of anything that did not come from the + BeginBlock/DeliverTx/EndBlock methods. + +## Data Types + +### Header + +- **Fields**: + - `Version (Version)`: Version of the blockchain and the application + - `ChainID (string)`: ID of the blockchain + - `Height (int64)`: Height of the block in the chain + - `Time (google.protobuf.Timestamp)`: Time of the block. It is the proposer's + local time when block was created. + - `NumTxs (int32)`: Number of transactions in the block + - `TotalTxs (int64)`: Total number of transactions in the blockchain until + now + - `LastBlockID (BlockID)`: Hash of the previous (parent) block + - `LastCommitHash ([]byte)`: Hash of the previous block's commit + - `ValidatorsHash ([]byte)`: Hash of the validator set for this block + - `NextValidatorsHash ([]byte)`: Hash of the validator set for the next block + - `ConsensusHash ([]byte)`: Hash of the consensus parameters for this block + - `AppHash ([]byte)`: Data returned by the last call to `Commit` - typically the + Merkle root of the application state after executing the previous block's + transactions + - `LastResultsHash ([]byte)`: Hash of the ABCI results returned by the last block + - `EvidenceHash ([]byte)`: Hash of the evidence included in this block + - `ProposerAddress ([]byte)`: Original proposer for the block +- **Usage**: + - Provided in RequestBeginBlock + - Provides important context about the current state of the blockchain - + especially height and time. + - Provides the proposer of the current block, for use in proposer-based + reward mechanisms. + +### Version + +- **Fields**: + - `Block (uint64)`: Protocol version of the blockchain data structures. + - `App (uint64)`: Protocol version of the application. +- **Usage**: + - Block version should be static in the life of a blockchain. + - App version may be updated over time by the application. + +### Validator + +- **Fields**: + - `Address ([]byte)`: Address of the validator (hash of the public key) + - `Power (int64)`: Voting power of the validator +- **Usage**: + - Validator identified by address + - Used in RequestBeginBlock as part of VoteInfo + - Does not include PubKey to avoid sending potentially large quantum pubkeys + over the ABCI + +### ValidatorUpdate + +- **Fields**: + - `PubKey (PubKey)`: Public key of the validator + - `Power (int64)`: Voting power of the validator +- **Usage**: + - Validator identified by PubKey + - Used to tell Tendermint to update the validator set + +### VoteInfo + +- **Fields**: + - `Validator (Validator)`: A validator + - `SignedLastBlock (bool)`: Indicates whether or not the validator signed + the last block +- **Usage**: + - Indicates whether a validator signed the last block, allowing for rewards + based on validator availability + +### PubKey + +- **Fields**: + - `Type (string)`: Type of the public key. A simple string like `"ed25519"`. + In the future, may indicate a serialization algorithm to parse the `Data`, + for instance `"amino"`. + - `Data ([]byte)`: Public key data. For a simple public key, it's just the + raw bytes. If the `Type` indicates an encoding algorithm, this is the + encoded public key. +- **Usage**: + - A generic and extensible typed public key + +### Evidence + +- **Fields**: + - `Type (string)`: Type of the evidence. A hierarchical path like + "duplicate/vote". + - `Validator (Validator`: The offending validator + - `Height (int64)`: Height when the offense was committed + - `Time (google.protobuf.Timestamp)`: Time of the block at height `Height`. + It is the proposer's local time when block was created. + - `TotalVotingPower (int64)`: Total voting power of the validator set at + height `Height` + +### LastCommitInfo + +- **Fields**: + - `Round (int32)`: Commit round. + - `Votes ([]VoteInfo)`: List of validators addresses in the last validator set + with their voting power and whether or not they signed a vote. + +### ConsensusParams + +- **Fields**: + - `BlockSize (BlockSizeParams)`: Parameters limiting the size of a block. + - `Evidence (EvidenceParams)`: Parameters limiting the validity of + evidence of byzantine behaviour. + - `Validator (ValidatorParams)`: Parameters limitng the types of pubkeys validators can use. + +### BlockSizeParams + +- **Fields**: + - `MaxBytes (int64)`: Max size of a block, in bytes. + - `MaxGas (int64)`: Max sum of `GasWanted` in a proposed block. + - NOTE: blocks that violate this may be committed if there are Byzantine proposers. + It's the application's responsibility to handle this when processing a + block! + +### EvidenceParams + +- **Fields**: + - `MaxAge (int64)`: Max age of evidence, in blocks. Evidence older than this + is considered stale and ignored. + - This should correspond with an app's "unbonding period" or other + similar mechanism for handling Nothing-At-Stake attacks. + - NOTE: this should change to time (instead of blocks)! + +### ValidatorParams + +- **Fields**: + - `PubKeyTypes ([]string)`: List of accepted pubkey types. Uses same + naming as `PubKey.Type`. + +### Proof + +- **Fields**: + - `Ops ([]ProofOp)`: List of chained Merkle proofs, of possibly different types + - The Merkle root of one op is the value being proven in the next op. + - The Merkle root of the final op should equal the ultimate root hash being + verified against. + +### ProofOp + +- **Fields**: + - `Type (string)`: Type of Merkle proof and how it's encoded. + - `Key ([]byte)`: Key in the Merkle tree that this proof is for. + - `Data ([]byte)`: Encoded Merkle proof for the key. + diff --git a/docs/spec/abci/apps.md b/docs/spec/abci/apps.md new file mode 100644 index 00000000000..acf2c4e66b7 --- /dev/null +++ b/docs/spec/abci/apps.md @@ -0,0 +1,427 @@ +# Applications + +Please ensure you've first read the spec for [ABCI Methods and Types](abci.md) + +Here we cover the following components of ABCI applications: + +- [Connection State](#state) - the interplay between ABCI connections and application state + and the differences between `CheckTx` and `DeliverTx`. +- [Transaction Results](#transaction-results) - rules around transaction + results and validity +- [Validator Set Updates](#validator-updates) - how validator sets are + changed during `InitChain` and `EndBlock` +- [Query](#query) - standards for using the `Query` method and proofs about the + application state +- [Crash Recovery](#crash-recovery) - handshake protocol to synchronize + Tendermint and the application on startup. + +## State + +Since Tendermint maintains three concurrent ABCI connections, it is typical +for an application to maintain a distinct state for each, and for the states to +be synchronized during `Commit`. + +### Commit + +Application state should only be persisted to disk during `Commit`. + +Before `Commit` is called, Tendermint locks and flushes the mempool so that no new messages will +be received on the mempool connection. This provides an opportunity to safely update all three +states to the latest committed state at once. + +When `Commit` completes, it unlocks the mempool. + +Note that it is not possible to send transactions to Tendermint during `Commit` - if your app +tries to send a `/broadcast_tx` to Tendermint during Commit, it will deadlock. + +### Consensus Connection + +The Consensus Connection should maintain a `DeliverTxState` - +the working state for block execution. It should be updated by the calls to +`BeginBlock`, `DeliverTx`, and `EndBlock` during block execution and committed to +disk as the "latest committed state" during `Commit`. + +Updates made to the DeliverTxState by each method call must be readable by each subsequent method - +ie. the updates are linearizable. + +### Mempool Connection + +The Mempool Connection should maintain a `CheckTxState` +to sequentially process pending transactions in the mempool that have +not yet been committed. It should be initialized to the latest committed state +at the end of every `Commit`. + +The CheckTxState may be updated concurrently with the DeliverTxState, as +messages may be sent concurrently on the Consensus and Mempool connections. However, +before calling `Commit`, Tendermint will lock and flush the mempool connection, +ensuring that all existing CheckTx are responded to and no new ones can +begin. + +After `Commit`, CheckTx is run again on all transactions that remain in the +node's local mempool after filtering those included in the block. To prevent the +mempool from rechecking all transactions every time a block is committed, set +the configuration option `mempool.recheck=false`. + +Finally, the mempool will unlock and new transactions can be processed through CheckTx again. + +Note that CheckTx doesn't have to check everything that affects transaction validity; the +expensive things can be skipped. In fact, CheckTx doesn't have to check +anything; it might say that any transaction is a valid transaction. +Unlike DeliverTx, CheckTx is just there as +a sort of weak filter to keep invalid transactions out of the blockchain. It's +weak, because a Byzantine node doesn't care about CheckTx; it can propose a +block full of invalid transactions if it wants. + +### Info Connection + +The Info Connection should maintain a `QueryState` for answering queries from the user, +and for initialization when Tendermint first starts up (both described further +below). +It should always contain the latest committed state associated with the +latest committed block. + +QueryState should be set to the latest `DeliverTxState` at the end of every `Commit`, +ie. after the full block has been processed and the state committed to disk. +Otherwise it should never be modified. + +## Transaction Results + +`ResponseCheckTx` and `ResponseDeliverTx` contain the same fields. + +The `Info` and `Log` fields are non-deterministic values for debugging/convenience purposes +that are otherwise ignored. + +The `Data` field must be strictly deterministic, but can be arbitrary data. + +### Gas + +Ethereum introduced the notion of `gas` as an abstract representation of the +cost of resources used by nodes when processing transactions. Every operation in the +Ethereum Virtual Machine uses some amount of gas, and gas can be accepted at a market-variable price. +Users propose a maximum amount of gas for their transaction; if the tx uses less, they get +the difference credited back. Tendermint adopts a similar abstraction, +though uses it only optionally and weakly, allowing applications to define +their own sense of the cost of execution. + +In Tendermint, the `ConsensusParams.BlockSize.MaxGas` limits the amount of `gas` that can be used in a block. +The default value is `-1`, meaning no limit, or that the concept of gas is +meaningless. + +Responses contain a `GasWanted` and `GasUsed` field. The former is the maximum +amount of gas the sender of a tx is willing to use, and the later is how much it actually +used. Applications should enforce that `GasUsed <= GasWanted` - ie. tx execution +should halt before it can use more resources than it requested. + +When `MaxGas > -1`, Tendermint enforces the following rules: + +- `GasWanted <= MaxGas` for all txs in the mempool +- `(sum of GasWanted in a block) <= MaxGas` when proposing a block + +If `MaxGas == -1`, no rules about gas are enforced. + +Note that Tendermint does not currently enforce anything about Gas in the consensus, only the mempool. +This means it does not guarantee that committed blocks satisfy these rules! +It is the application's responsibility to return non-zero response codes when gas limits are exceeded. + +The `GasUsed` field is ignored completely by Tendermint. That said, applications should enforce: + +- `GasUsed <= GasWanted` for any given transaction +- `(sum of GasUsed in a block) <= MaxGas` for every block + +In the future, we intend to add a `Priority` field to the responses that can be +used to explicitly prioritize txs in the mempool for inclusion in a block +proposal. See [#1861](https://github.com/tendermint/tendermint/issues/1861). + +### CheckTx + +If `Code != 0`, it will be rejected from the mempool and hence +not broadcasted to other peers and not included in a proposal block. + +`Data` contains the result of the CheckTx transaction execution, if any. It is +semantically meaningless to Tendermint. + +`Tags` include any tags for the execution, though since the transaction has not +been committed yet, they are effectively ignored by Tendermint. + +### DeliverTx + +If DeliverTx returns `Code != 0`, the transaction will be considered invalid, +though it is still included in the block. + +`Data` contains the result of the CheckTx transaction execution, if any. It is +semantically meaningless to Tendermint. + +Both the `Code` and `Data` are included in a structure that is hashed into the +`LastResultsHash` of the next block header. + +`Tags` include any tags for the execution, which Tendermint will use to index +the transaction by. This allows transactions to be queried according to what +events took place during their execution. + +See issue [#1007](https://github.com/tendermint/tendermint/issues/1007) for how +the tags will be hashed into the next block header. + +## Validator Updates + +The application may set the validator set during InitChain, and update it during +EndBlock. + +### InitChain + +ResponseInitChain can return a list of validators. +If the list is empty, Tendermint will use the validators loaded in the genesis +file. +If the list is not empty, Tendermint will use it for the validator set. +This way the application can determine the initial validator set for the +blockchain. + +### EndBlock + +Updates to the Tendermint validator set can be made by returning +`ValidatorUpdate` objects in the `ResponseEndBlock`: + +``` +message ValidatorUpdate { + PubKey pub_key + int64 power +} + +message PubKey { + string type + bytes data +} +``` + +The `pub_key` currently supports only one type: + +- `type = "ed25519" and`data = ` + +The `power` is the new voting power for the validator, with the +following rules: + +- power must be non-negative +- if power is 0, the validator must already exist, and will be removed from the + validator set +- if power is non-0: + - if the validator does not already exist, it will be added to the validator + set with the given power + - if the validator does already exist, its power will be adjusted to the given power + +Note the updates returned in block `H` will only take effect at block `H+2`. + +## Consensus Parameters + +ConsensusParams enforce certain limits in the blockchain, like the maximum size +of blocks, amount of gas used in a block, and the maximum acceptable age of +evidence. They can be set in InitChain and updated in EndBlock. + +### BlockSize.MaxBytes + +The maximum size of a complete Amino encoded block. +This is enforced by Tendermint consensus. + +This implies a maximum tx size that is this MaxBytes, less the expected size of +the header, the validator set, and any included evidence in the block. + +Must have `0 < MaxBytes < 100 MB`. + +### BlockSize.MaxGas + +The maximum of the sum of `GasWanted` in a proposed block. +This is *not* enforced by Tendermint consensus. +It is left to the app to enforce (ie. if txs are included past the +limit, they should return non-zero codes). It is used by Tendermint to limit the +txs included in a proposed block. + +Must have `MaxGas >= -1`. +If `MaxGas == -1`, no limit is enforced. + +### EvidenceParams.MaxAge + +This is the maximum age of evidence. +This is enforced by Tendermint consensus. +If a block includes evidence older than this, the block will be rejected +(validators won't vote for it). + +Must have `0 < MaxAge`. + +### Updates + +The application may set the ConsensusParams during InitChain, and update them during +EndBlock. If the ConsensusParams is empty, it will be ignored. Each field +that is not empty will be applied in full. For instance, if updating the +BlockSize.MaxBytes, applications must also set the other BlockSize fields (like +BlockSize.MaxGas), even if they are unchanged, as they will otherwise cause the +value to be updated to 0. + +#### InitChain + +ResponseInitChain includes a ConsensusParams. +If its nil, Tendermint will use the params loaded in the genesis +file. If it's not nil, Tendermint will use it. +This way the application can determine the initial consensus params for the +blockchain. + +#### EndBlock + +ResponseEndBlock includes a ConsensusParams. +If its nil, Tendermint will do nothing. +If it's not nil, Tendermint will use it. +This way the application can update the consensus params over time. + +Note the updates returned in block `H` will take effect right away for block +`H+1`. + +## Query + +Query is a generic method with lots of flexibility to enable diverse sets +of queries on application state. Tendermint makes use of Query to filter new peers +based on ID and IP, and exposes Query to the user over RPC. + +Note that calls to Query are not replicated across nodes, but rather query the +local node's state - hence they may return stale reads. For reads that require +consensus, use a transaction. + +The most important use of Query is to return Merkle proofs of the application state at some height +that can be used for efficient application-specific lite-clients. + +Note Tendermint has technically no requirements from the Query +message for normal operation - that is, the ABCI app developer need not implement +Query functionality if they do not wish too. + +### Query Proofs + +The Tendermint block header includes a number of hashes, each providing an +anchor for some type of proof about the blockchain. The `ValidatorsHash` enables +quick verification of the validator set, the `DataHash` gives quick +verification of the transactions included in the block, etc. + +The `AppHash` is unique in that it is application specific, and allows for +application-specific Merkle proofs about the state of the application. +While some applications keep all relevant state in the transactions themselves +(like Bitcoin and its UTXOs), others maintain a separated state that is +computed deterministically *from* transactions, but is not contained directly in +the transactions themselves (like Ethereum contracts and accounts). +For such applications, the `AppHash` provides a much more efficient way to verify lite-client proofs. + +ABCI applications can take advantage of more efficient lite-client proofs for +their state as follows: + +- return the Merkle root of the deterministic application state in +`ResponseCommit.Data`. +- it will be included as the `AppHash` in the next block. +- return efficient Merkle proofs about that application state in `ResponseQuery.Proof` + that can be verified using the `AppHash` of the corresponding block. + +For instance, this allows an application's lite-client to verify proofs of +absence in the application state, something which is much less efficient to do using the block hash. + +Some applications (eg. Ethereum, Cosmos-SDK) have multiple "levels" of Merkle trees, +where the leaves of one tree are the root hashes of others. To support this, and +the general variability in Merkle proofs, the `ResponseQuery.Proof` has some minimal structure: + +``` +message Proof { + repeated ProofOp ops +} + +message ProofOp { + string type = 1; + bytes key = 2; + bytes data = 3; +} +``` + +Each `ProofOp` contains a proof for a single key in a single Merkle tree, of the specified `type`. +This allows ABCI to support many different kinds of Merkle trees, encoding +formats, and proofs (eg. of presence and absence) just by varying the `type`. +The `data` contains the actual encoded proof, encoded according to the `type`. +When verifying the full proof, the root hash for one ProofOp is the value being +verified for the next ProofOp in the list. The root hash of the final ProofOp in +the list should match the `AppHash` being verified against. + +### Peer Filtering + +When Tendermint connects to a peer, it sends two queries to the ABCI application +using the following paths, with no additional data: + +- `/p2p/filter/addr/`, where `` denote the IP address and + the port of the connection +- `p2p/filter/id/`, where `` is the peer node ID (ie. the + pubkey.Address() for the peer's PubKey) + +If either of these queries return a non-zero ABCI code, Tendermint will refuse +to connect to the peer. + +### Paths + +Queries are directed at paths, and may optionally include additional data. + +The expectation is for there to be some number of high level paths +differentiating concerns, like `/p2p`, `/store`, and `/app`. Currently, +Tendermint only uses `/p2p`, for filtering peers. For more advanced use, see the +implementation of +[Query in the Cosmos-SDK](https://github.com/cosmos/cosmos-sdk/blob/v0.23.1/baseapp/baseapp.go#L333). + +## Crash Recovery + +On startup, Tendermint calls the `Info` method on the Info Connection to get the latest +committed state of the app. The app MUST return information consistent with the +last block it succesfully completed Commit for. + +If the app succesfully committed block H but not H+1, then `last_block_height = H` and `last_block_app_hash = `. If the app +failed during the Commit of block H, then `last_block_height = H-1` and +`last_block_app_hash = `. + +We now distinguish three heights, and describe how Tendermint syncs itself with +the app. + +``` +storeBlockHeight = height of the last block Tendermint saw a commit for +stateBlockHeight = height of the last block for which Tendermint completed all + block processing and saved all ABCI results to disk +appBlockHeight = height of the last block for which ABCI app succesfully + completed Commit +``` + +Note we always have `storeBlockHeight >= stateBlockHeight` and `storeBlockHeight >= appBlockHeight` +Note also we never call Commit on an ABCI app twice for the same height. + +The procedure is as follows. + +First, some simple start conditions: + +If `appBlockHeight == 0`, then call InitChain. + +If `storeBlockHeight == 0`, we're done. + +Now, some sanity checks: + +If `storeBlockHeight < appBlockHeight`, error +If `storeBlockHeight < stateBlockHeight`, panic +If `storeBlockHeight > stateBlockHeight+1`, panic + +Now, the meat: + +If `storeBlockHeight == stateBlockHeight && appBlockHeight < storeBlockHeight`, +replay all blocks in full from `appBlockHeight` to `storeBlockHeight`. +This happens if we completed processing the block, but the app forgot its height. + +If `storeBlockHeight == stateBlockHeight && appBlockHeight == storeBlockHeight`, we're done +This happens if we crashed at an opportune spot. + +If `storeBlockHeight == stateBlockHeight+1` +This happens if we started processing the block but didn't finish. + + If `appBlockHeight < stateBlockHeight` + replay all blocks in full from `appBlockHeight` to `storeBlockHeight-1`, + and replay the block at `storeBlockHeight` using the WAL. + This happens if the app forgot the last block it committed. + + If `appBlockHeight == stateBlockHeight`, + replay the last block (storeBlockHeight) in full. + This happens if we crashed before the app finished Commit + + If appBlockHeight == storeBlockHeight { + update the state using the saved ABCI responses but dont run the block against the real app. + This happens if we crashed after the app finished Commit but before Tendermint saved the state. diff --git a/docs/spec/abci/client-server.md b/docs/spec/abci/client-server.md new file mode 100644 index 00000000000..5ac7b3eb481 --- /dev/null +++ b/docs/spec/abci/client-server.md @@ -0,0 +1,110 @@ +# Client and Server + +This section is for those looking to implement their own ABCI Server, perhaps in +a new programming language. + +You are expected to have read [ABCI Methods and Types](./abci.md) and [ABCI +Applications](./apps.md). + +## Message Protocol + +The message protocol consists of pairs of requests and responses defined in the +[protobuf file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). + +Some messages have no fields, while others may include byte-arrays, strings, integers, +or custom protobuf types. + +For more details on protobuf, see the [documentation](https://developers.google.com/protocol-buffers/docs/overview). + +For each request, a server should respond with the corresponding +response, where the order of requests is preserved in the order of +responses. + +## Server Implementations + +To use ABCI in your programming language of choice, there must be a ABCI +server in that language. Tendermint supports three implementations of the ABCI, written in Go: + +- In-process (Golang only) +- ABCI-socket +- GRPC + +The latter two can be tested using the `abci-cli` by setting the `--abci` flag +appropriately (ie. to `socket` or `grpc`). + +See examples, in various stages of maintenance, in +[Go](https://github.com/tendermint/tendermint/tree/develop/abci/server), +[JavaScript](https://github.com/tendermint/js-abci), +[Python](https://github.com/tendermint/tendermint/tree/develop/abci/example/python3/abci), +[C++](https://github.com/mdyring/cpp-tmsp), and +[Java](https://github.com/jTendermint/jabci). + +### In Process + +The simplest implementation uses function calls within Golang. +This means ABCI applications written in Golang can be compiled with TendermintCore and run as a single binary. + + +### GRPC + +If GRPC is available in your language, this is the easiest approach, +though it will have significant performance overhead. + +To get started with GRPC, copy in the [protobuf +file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto) +and compile it using the GRPC plugin for your language. For instance, +for golang, the command is `protoc --go_out=plugins=grpc:. types.proto`. +See the [grpc documentation for more details](http://www.grpc.io/docs/). +`protoc` will autogenerate all the necessary code for ABCI client and +server in your language, including whatever interface your application +must satisfy to be used by the ABCI server for handling requests. + +Note the length-prefixing used in the socket implementation (TSP) does not apply for GRPC. + +### TSP + +Tendermint Socket Protocol is an asynchronous, raw socket server which provides ordered message passing over unix or tcp. +Messages are serialized using Protobuf3 and length-prefixed with a [signed Varint](https://developers.google.com/protocol-buffers/docs/encoding?csw=1#signed-integers) + +If GRPC is not available in your language, or you require higher +performance, or otherwise enjoy programming, you may implement your own +ABCI server using the Tendermint Socket Protocol. The first step is still to auto-generate the relevant data +types and codec in your language using `protoc`. In addition to being proto3 encoded, messages coming over +the socket are length-prefixed to facilitate use as a streaming protocol. proto3 doesn't have an +official length-prefix standard, so we use our own. The first byte in +the prefix represents the length of the Big Endian encoded length. The +remaining bytes in the prefix are the Big Endian encoded length. + +For example, if the proto3 encoded ABCI message is 0xDEADBEEF (4 +bytes), the length-prefixed message is 0x0104DEADBEEF. If the proto3 +encoded ABCI message is 65535 bytes long, the length-prefixed message +would be like 0x02FFFF.... + +The benefit of using this `varint` encoding over the old version (where integers were encoded as `` is that +it is the standard way to encode integers in Protobuf. It is also generally shorter. + +As noted above, this prefixing does not apply for GRPC. + +An ABCI server must also be able to support multiple connections, as +Tendermint uses three connections. + +### Async vs Sync + +The main ABCI server (ie. non-GRPC) provides ordered asynchronous messages. +This is useful for DeliverTx and CheckTx, since it allows Tendermint to forward +transactions to the app before it's finished processing previous ones. + +Thus, DeliverTx and CheckTx messages are sent asynchronously, while all other +messages are sent synchronously. + +## Client + +There are currently two use-cases for an ABCI client. One is a testing +tool, as in the `abci-cli`, which allows ABCI requests to be sent via +command line. The other is a consensus engine, such as Tendermint Core, +which makes requests to the application every time a new transaction is +received or a block is committed. + +It is unlikely that you will need to implement a client. For details of +our client, see +[here](https://github.com/tendermint/tendermint/tree/develop/abci/client). diff --git a/docs/spec/blockchain/blockchain.md b/docs/spec/blockchain/blockchain.md index e3171818a56..d96a3c7b8f8 100644 --- a/docs/spec/blockchain/blockchain.md +++ b/docs/spec/blockchain/blockchain.md @@ -1,4 +1,4 @@ -# Tendermint Blockchain +# Blockchain Here we describe the data structures in the Tendermint blockchain and the rules for validating them. @@ -8,25 +8,29 @@ The Tendermint blockchains consists of a short list of basic data types: - `Block` - `Header` -- `Vote` +- `Version` - `BlockID` -- `Signature` -- `Evidence` +- `Time` +- `Data` (for transactions) +- `Commit` and `Vote` +- `EvidenceData` and `Evidence` ## Block -A block consists of a header, a list of transactions, a list of votes (the commit), +A block consists of a header, transactions, votes (the commit), and a list of evidence of malfeasance (ie. signing conflicting votes). ```go type Block struct { Header Header - Txs [][]byte - LastCommit []Vote - Evidence []Evidence + Txs Data + Evidence EvidenceData + LastCommit Commit } ``` +Note the `LastCommit` is the set of votes that committed the last block. + ## Header A block header contains metadata about the block and about the consensus, as well as commitments to @@ -34,36 +38,48 @@ the data in the current block, the previous block, and the results returned by t ```go type Header struct { - // block metadata - Version string // Version string - ChainID string // ID of the chain - Height int64 // Current block height - Time int64 // UNIX time, in millisconds - - // current block - NumTxs int64 // Number of txs in this block - TxHash []byte // SimpleMerkle of the block.Txs - LastCommitHash []byte // SimpleMerkle of the block.LastCommit - - // previous block - TotalTxs int64 // prevBlock.TotalTxs + block.NumTxs - LastBlockID BlockID // BlockID of prevBlock - - // application - ResultsHash []byte // SimpleMerkle of []abci.Result from prevBlock - AppHash []byte // Arbitrary state digest - ValidatorsHash []byte // SimpleMerkle of the current ValidatorSet - NextValidatorsHash []byte // SimpleMerkle of the next ValidatorSet - ConsensusParamsHash []byte // SimpleMerkle of the ConsensusParams - - // consensus - Proposer []byte // Address of the block proposer - EvidenceHash []byte // SimpleMerkle of []Evidence -} + // basic block info + Version Version + ChainID string + Height int64 + Time Time + NumTxs int64 + TotalTxs int64 + + // prev block info + LastBlockID BlockID + + // hashes of block data + LastCommitHash []byte // commit from validators from the last block + DataHash []byte // Merkle root of transactions + + // hashes from the app output from the prev block + ValidatorsHash []byte // validators for the current block + NextValidatorsHash []byte // validators for the next block + ConsensusHash []byte // consensus params for current block + AppHash []byte // state after txs from the previous block + LastResultsHash []byte // root hash of all results from the txs from the previous block + + // consensus info + EvidenceHash []byte // evidence included in the block + ProposerAddress []byte // original proposer of the block ``` Further details on each of these fields is described below. +## Version + +The `Version` contains the protocol version for the blockchain and the +application as two `uint64` values: + +```go +type Version struct { + Block uint64 + App uint64 +} +``` + + ## BlockID The `BlockID` contains two distinct Merkle roots of the block. @@ -85,6 +101,44 @@ type PartsHeader struct { } ``` +TODO: link to details of merkle sums. + +## Time + +Tendermint uses the +[Google.Protobuf.WellKnownTypes.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/timestamp) +format, which uses two integers, one for Seconds and for Nanoseconds. + +NOTE: there is currently a small divergence between Tendermint and the +Google.Protobuf.WellKnownTypes.Timestamp that should be resolved. See [this +issue](https://github.com/tendermint/go-amino/issues/223) for details. + +## Data + +Data is just a wrapper for a list of transactions, where transactions are +arbitrary byte arrays: + +``` +type Data struct { + Txs [][]byte +} +``` + +## Commit + +Commit is a simple wrapper for a list of votes, with one vote for each +validator. It also contains the relevant BlockID: + +``` +type Commit struct { + BlockID BlockID + Precommits []Vote +} +``` + +NOTE: this will likely change to reduce the commit size by eliminating redundant +information - see [issue #1648](https://github.com/tendermint/tendermint/issues/1648). + ## Vote A vote is a signed message from a validator for a particular block. @@ -92,63 +146,52 @@ The vote includes information about the validator signing it. ```go type Vote struct { - Timestamp int64 - Address []byte - Index int - Height int64 - Round int - Type int8 - BlockID BlockID - Signature Signature + Type SignedMsgType // byte + Height int64 + Round int + Timestamp time.Time + BlockID BlockID + ValidatorAddress Address + ValidatorIndex int + Signature []byte } ``` There are two types of votes: -a *prevote* has `vote.Type == 1` and -a *precommit* has `vote.Type == 2`. +a _prevote_ has `vote.Type == 1` and +a _precommit_ has `vote.Type == 2`. ## Signature -Tendermint allows for multiple signature schemes to be used by prepending a single type-byte -to the signature bytes. Different signatures may also come with fixed or variable lengths. -Currently, Tendermint supports Ed25519 and Secp256k1. +Signatures in Tendermint are raw bytes representing the underlying signature. +The only signature scheme currently supported for Tendermint validators is +ED25519. The signature is the raw 64-byte ED25519 signature. -### ED25519 +## EvidenceData -An ED25519 signature has `Type == 0x1`. It looks like: +EvidenceData is a simple wrapper for a list of evidence: -```go -// Implements Signature -type Ed25519Signature struct { - Type int8 = 0x1 - Signature [64]byte +``` +type EvidenceData struct { + Evidence []Evidence } ``` -where `Signature` is the 64 byte signature. - -### Secp256k1 +## Evidence -A `Secp256k1` signature has `Type == 0x2`. It looks like: +Evidence in Tendermint is implemented as an interface. +This means any evidence is encoded using its Amino prefix. +There is currently only a single type, the `DuplicateVoteEvidence`. -```go -// Implements Signature -type Secp256k1Signature struct { - Type int8 = 0x2 - Signature []byte -} ``` - -where `Signature` is the DER encoded signature, ie: - -```hex -0x30 <0x02> 0x2 . +// amino name: "tendermint/DuplicateVoteEvidence" +type DuplicateVoteEvidence struct { + PubKey PubKey + VoteA Vote + VoteB Vote +} ``` -## Evidence - -TODO - ## Validation Here we describe the validation rules for every element in a block. @@ -162,10 +205,10 @@ We refer to certain globally available objects: `prevBlock` is the `block` at the previous height, and `state` keeps track of the validator set, the consensus parameters and other results from the application. At the point when `block` is the block under consideration, -the current version of the `state` corresponds to the state -after executing transactions from the `prevBlock`. +the current version of the `state` corresponds to the state +after executing transactions from the `prevBlock`. Elements of an object are accessed as expected, -ie. `block.Header`. +ie. `block.Header`. See [here](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/state.md) for the definition of `state`. ### Header @@ -174,11 +217,20 @@ A Header is valid if its corresponding fields are valid. ### Version -Arbitrary string. +``` +block.Version.Block == state.Version.Block +block.Version.App == state.Version.App +``` + +The block version must match the state version. ### ChainID -Arbitrary constant string. +``` +len(block.ChainID) < 50 +``` + +ChainID must be maximum 50 UTF-8 symbols. ### Height @@ -191,38 +243,35 @@ The height is an incrementing integer. The first block has `block.Header.Height ### Time -The median of the timestamps of the valid votes in the block.LastCommit. -Corresponds to the number of nanoseconds, with millisecond resolution, since January 1, 1970. +``` +block.Header.Timestamp >= prevBlock.Header.Timestamp + 1 ms +block.Header.Timestamp == MedianTime(block.LastCommit, state.LastValidators) +``` + +The block timestamp must be monotonic. +It must equal the weighted median of the timestamps of the valid votes in the block.LastCommit. Note: the timestamp of a vote must be greater by at least one millisecond than that of the block being voted on. -### NumTxs +The timestamp of the first block must be equal to the genesis time (since +there's no votes to compute the median). -```go -block.Header.NumTxs == len(block.Txs) ``` - -Number of transactions included in the block. - -### TxHash - -```go -block.Header.TxHash == SimpleMerkleRoot(block.Txs) +if block.Header.Height == 1 { + block.Header.Timestamp == genesisTime +} ``` -Simple Merkle root of the transactions in the block. +See the section on [BFT time](../consensus/bft-time.md) for more details. -### LastCommitHash +### NumTxs ```go -block.Header.LastCommitHash == SimpleMerkleRoot(block.LastCommit) +block.Header.NumTxs == len(block.Txs.Txs) ``` -Simple Merkle root of the votes included in the block. -These are the votes that committed the previous block. - -The first block has `block.Header.LastCommitHash == []byte{}` +Number of transactions included in the block. ### TotalTxs @@ -254,25 +303,24 @@ which are held in the `state` and may be updated by the application. The first block has `block.Header.LastBlockID == BlockID{}`. -### ResultsHash +### LastCommitHash ```go -block.ResultsHash == SimpleMerkleRoot(state.LastResults) +block.Header.LastCommitHash == SimpleMerkleRoot(block.LastCommit) ``` -Simple Merkle root of the results of the transactions in the previous block. +Simple Merkle root of the votes included in the block. +These are the votes that committed the previous block. -The first block has `block.Header.ResultsHash == []byte{}`. +The first block has `block.Header.LastCommitHash == []byte{}` -### AppHash +### DataHash ```go -block.AppHash == state.AppHash +block.Header.DataHash == SimpleMerkleRoot(block.Txs.Txs) ``` -Arbitrary byte array returned by the application after executing and commiting the previous block. - -The first block has `block.Header.AppHash == []byte{}`. +Simple Merkle root of the transactions included in the block. ### ValidatorsHash @@ -288,27 +336,38 @@ This can be used to validate the `LastCommit` included in the next block. ```go block.NextValidatorsHash == SimpleMerkleRoot(state.NextValidators) ``` + Simple Merkle root of the next validator set that will be the validator set that commits the next block. -Modifications to the validator set are defined by the application. +This is included so that the current validator set gets a chance to sign the +next validator sets Merkle root. ### ConsensusParamsHash ```go -block.ConsensusParamsHash == SimpleMerkleRoot(state.ConsensusParams) +block.ConsensusParamsHash == TMHASH(amino(state.ConsensusParams)) ``` -Simple Merkle root of the consensus parameters. -May be updated by the application. +Hash of the amino-encoded consensus parameters. -### Proposer +### AppHash ```go -block.Header.Proposer in state.Validators +block.AppHash == state.AppHash ``` -Original proposer of the block. Must be a current validator. +Arbitrary byte array returned by the application after executing and commiting the previous block. It serves as the basis for validating any merkle proofs that comes from the ABCI application and represents the state of the actual application rather than the state of the blockchain itself. -NOTE: we also need to track the round. +The first block has `block.Header.AppHash == []byte{}`. + +### LastResultsHash + +```go +block.ResultsHash == SimpleMerkleRoot(state.LastResults) +``` + +Simple Merkle root of the results of the transactions in the previous block. + +The first block has `block.Header.ResultsHash == []byte{}`. ## EvidenceHash @@ -318,6 +377,14 @@ block.EvidenceHash == SimpleMerkleRoot(block.Evidence) Simple Merkle root of the evidence of Byzantine behaviour included in this block. +### ProposerAddress + +```go +block.Header.ProposerAddress in state.Validators +``` + +Address of the original proposer of the block. Must be a current validator. + ## Txs Arbitrary length array of arbitrary length byte-arrays. @@ -366,15 +433,24 @@ must be greater than 2/3 of the total voting power of the complete validator set ### Vote A vote is a signed message broadcast in the consensus for a particular block at a particular height and round. -When stored in the blockchain or propagated over the network, votes are encoded in TMBIN. -For signing, votes are encoded in JSON, and the ChainID is included, in the form of the `CanonicalSignBytes`. +When stored in the blockchain or propagated over the network, votes are encoded in Amino. +For signing, votes are represented via `CanonicalVote` and also encoded using amino (protobuf compatible) via +`Vote.SignBytes` which includes the `ChainID`, and uses a different ordering of +the fields. -We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the CanonicalSignBytes +We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the `SignBytes` using the given ChainID: ```go -func (v Vote) Verify(chainID string, pubKey PubKey) bool { - return pubKey.Verify(v.Signature, CanonicalSignBytes(chainID, v)) +func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error { + if !bytes.Equal(pubKey.Address(), vote.ValidatorAddress) { + return ErrVoteInvalidValidatorAddress + } + + if !pubKey.VerifyBytes(vote.SignBytes(chainID), vote.Signature) { + return ErrVoteInvalidSignature + } + return nil } ``` @@ -383,16 +459,7 @@ against the given signature and message bytes. ## Evidence -There is currently only one kind of evidence: - -``` -// amino: "tendermint/DuplicateVoteEvidence" -type DuplicateVoteEvidence struct { - PubKey crypto.PubKey - VoteA *Vote - VoteB *Vote -} -``` +There is currently only one kind of evidence, `DuplicateVoteEvidence`. DuplicateVoteEvidence `ev` is valid if @@ -427,11 +494,8 @@ Execute(s State, app ABCIApp, block Block) State { AppHash: AppHash, LastValidators: state.Validators, Validators: state.NextValidators, - NextValidators: UpdateValidators(state.NextValidators, ValidatorChanges), + NextValidators: UpdateValidators(state.NextValidators, ValidatorChanges), ConsensusParams: UpdateConsensusParams(state.ConsensusParams, ConsensusParamChanges), } } - ``` - - diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index 49c88475bd3..f5120cdd411 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -1,4 +1,4 @@ -# Tendermint Encoding +# Encoding ## Amino @@ -48,33 +48,33 @@ spec](https://github.com/tendermint/go-amino#computing-the-prefix-and-disambigua In what follows, we provide the type names and prefix bytes directly. Notice that when encoding byte-arrays, the length of the byte-array is appended -to the PrefixBytes. Thus the encoding of a byte array becomes ` - `. In other words, to encode any type listed below you do not need to be +to the PrefixBytes. Thus the encoding of a byte array becomes ` `. In other words, to encode any type listed below you do not need to be familiar with amino encoding. You can simply use below table and concatenate Prefix || Length (of raw bytes) || raw bytes ( while || stands for byte concatenation here). -| Type | Name | Prefix | Length | Notes | -| ---- | ---- | ------ | ----- | ------ | -| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | | -| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | | -| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | | -| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | | -| SignatureEd25519 | tendermint/SignatureEd25519 | 0x2031EA53 | 0x40 | | +| Type | Name | Prefix | Length | Notes | +| ------------------ | ----------------------------- | ---------- | -------- | ----- | +| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | | +| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | | +| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | | +| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | | +| SignatureEd25519 | tendermint/SignatureEd25519 | 0x2031EA53 | 0x40 | | | SignatureSecp256k1 | tendermint/SignatureSecp256k1 | 0x7FC4A495 | variable | + | ### Examples 1. For example, the 33-byte (or 0x21-byte in hex) Secp256k1 pubkey -`020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` -would be encoded as -`EB5AE98221020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` + `020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` + would be encoded as + `EB5AE98221020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` 2. For example, the variable size Secp256k1 signature (in this particular example 70 or 0x46 bytes) -`304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7` -would be encoded as -`16E1FEEA46304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7` + `304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7` + would be encoded as + `16E1FEEA46304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7` ### Addresses @@ -152,38 +152,36 @@ func MakeParts(obj interface{}, partSize int) []Part For an overview of Merkle trees, see [wikipedia](https://en.wikipedia.org/wiki/Merkle_tree) - A Simple Tree is a simple compact binary tree for a static list of items. Simple Merkle trees are used in numerous places in Tendermint to compute a cryptographic digest of a data structure. In a Simple Tree, the transactions and validation signatures of a block are hashed using this simple merkle tree logic. If the number of items is not a power of two, the tree will not be full and some leaf nodes will be at different levels. Simple Tree tries to keep both sides of the tree the same size, but the left side may be one -greater, for example: +greater, for example: ``` - Simple Tree with 6 items Simple Tree with 7 items - - * * - / \ / \ - / \ / \ - / \ / \ - / \ / \ - * * * * - / \ / \ / \ / \ - / \ / \ / \ / \ - / \ / \ / \ / \ + Simple Tree with 6 items Simple Tree with 7 items + + * * + / \ / \ + / \ / \ + / \ / \ + / \ / \ + * * * * + / \ / \ / \ / \ + / \ / \ / \ / \ + / \ / \ / \ / \ * h2 * h5 * * * h6 - / \ / \ / \ / \ / \ + / \ / \ / \ / \ / \ h0 h1 h3 h4 h0 h1 h2 h3 h4 h5 ``` -Tendermint always uses the `TMHASH` hash function, which is the first 20-bytes -of the SHA256: +Tendermint always uses the `TMHASH` hash function, which is equivalent to +SHA256: ``` func TMHASH(bz []byte) []byte { - shasum := SHA256(bz) - return shasum[:20] + return SHA256(bz) } ``` @@ -217,14 +215,13 @@ prefix) before being concatenated together and hashed. Note: we will abuse notion and invoke `SimpleMerkleRoot` with arguments of type `struct` or type `[]struct`. For `struct` arguments, we compute a `[][]byte` containing the hash of each -field in the struct sorted by the hash of the field name. +field in the struct, in the same order the fields appear in the struct. For `[]struct` arguments, we compute a `[][]byte` by hashing the individual `struct` elements. ### Simple Merkle Proof Proof that a leaf is in a Merkle tree consists of a simple structure: - ``` type SimpleProof struct { Aunts [][]byte @@ -265,25 +262,23 @@ func computeHashFromAunts(index, total int, leafHash []byte, innerHashes [][]byt The Simple Tree is used to merkelize a list of items, so to merkelize a (short) dictionary of key-value pairs, encode the dictionary as an -ordered list of ``KVPair`` structs. The block hash is such a hash -derived from all the fields of the block ``Header``. The state hash is +ordered list of `KVPair` structs. The block hash is such a hash +derived from all the fields of the block `Header`. The state hash is similarly derived. ### IAVL+ Tree -Because Tendermint only uses a Simple Merkle Tree, application developers are expect to use their own Merkle tree in their applications. For example, the IAVL+ Tree - an immutable self-balancing binary tree for persisting application state is used by the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/blob/develop/docs/core/multistore.md) +Because Tendermint only uses a Simple Merkle Tree, application developers are expect to use their own Merkle tree in their applications. For example, the IAVL+ Tree - an immutable self-balancing binary tree for persisting application state is used by the [Cosmos SDK](https://github.com/cosmos/cosmos-sdk/blob/develop/docs/sdk/core/multistore.md) ## JSON ### Amino -TODO: improve this - Amino also supports JSON encoding - registered types are simply encoded as: ``` { - "type": "", + "type": "", "value": } ``` @@ -298,20 +293,28 @@ For instance, an ED25519 PubKey would look like: ``` Where the `"value"` is the base64 encoding of the raw pubkey bytes, and the -`"type"` is the full disfix bytes for Ed25519 pubkeys. - +`"type"` is the amino name for Ed25519 pubkeys. ### Signed Messages -Signed messages (eg. votes, proposals) in the consensus are encoded using Amino-JSON, rather than in the standard binary format. +Signed messages (eg. votes, proposals) in the consensus are encoded using Amino. -When signing, the elements of a message are sorted by key and the sorted message is embedded in an -outer JSON that includes a `chain_id` field. -We call this encoding the CanonicalSignBytes. For instance, CanonicalSignBytes for a vote would look -like: +When signing, the elements of a message are re-ordered so the fixed-length fields +are first, making it easy to quickly check the type, height, and round. +The `ChainID` is also appended to the end. +We call this encoding the SignBytes. For instance, SignBytes for a vote is the Amino encoding of the following struct: -```json -{"chain_id":"my-chain-id","vote":{"block_id":{"hash":DEADBEEF,"parts":{"hash":BEEFDEAD,"total":3}},"height":3,"round":2,"timestamp":1234567890, "type":2} +```go +type CanonicalVote struct { + Type byte + Height int64 `binary:"fixed64"` + Round int64 `binary:"fixed64"` + Timestamp time.Time + BlockID CanonicalBlockID + ChainID string +} ``` -Note how the fields within each level are sorted. +The field ordering and the fixed sized encoding for the first three fields is optimized to ease parsing of SignBytes +in HSMs. It creates fixed offsets for relevant fields that need to be read in this context. +See [#1622](https://github.com/tendermint/tendermint/issues/1622) for more details. diff --git a/docs/spec/blockchain/state.md b/docs/spec/blockchain/state.md index df86cd45021..502f9d69620 100644 --- a/docs/spec/blockchain/state.md +++ b/docs/spec/blockchain/state.md @@ -1,4 +1,4 @@ -# Tendermint State +# State ## State @@ -8,13 +8,14 @@ transactions are never included in blocks, but their Merkle roots are - the stat Note that the `State` object itself is an implementation detail, since it is never included in a block or gossipped over the network, and we never compute -its hash. However, the types it contains are part of the specification, since -their Merkle roots are included in blocks. - -For details on an implementation of `State` with persistence, see TODO +its hash. Thus we do not include here details of how the `State` object is +persisted or queried. That said, the types it contains are part of the specification, since +their Merkle roots are included in blocks and their values are used in +validation. ```go type State struct { + Version Version LastResults []Result AppHash []byte @@ -32,25 +33,20 @@ type State struct { type Result struct { Code uint32 Data []byte - Tags []KVPair -} - -type KVPair struct { - Key []byte - Value []byte } ``` `Result` is the result of executing a transaction against the application. -It returns a result code, an arbitrary byte array (ie. a return value), -and a list of key-value pairs ordered by key. The key-value pairs, or tags, -can be used to index transactions according to their "effects", which are -represented in the tags. +It returns a result code and an arbitrary byte array (ie. a return value). + +NOTE: the Result needs to be updated to include more fields returned from +processing transactions, like gas variables and tags - see +[issue 1007](https://github.com/tendermint/tendermint/issues/1007). ### Validator A validator is an active participant in the consensus with a public key and a voting power. -Validator's also contain an address which is derived from the PubKey: +Validator's also contain an address field, which is a hash digest of the PubKey. ```go type Validator struct { @@ -60,7 +56,10 @@ type Validator struct { } ``` -The `state.Validators` and `state.LastValidators` must always by sorted by validator address, +When hashing the Validator struct, the address is not included, +because it is redundant with the pubkey. + +The `state.Validators`, `state.LastValidators`, and `state.NextValidators`, must always by sorted by validator address, so that there is a canonical order for computing the SimpleMerkleRoot. We also define a `TotalVotingPower` function, to return the total voting power: @@ -75,7 +74,61 @@ func TotalVotingPower(vals []Validators) int64{ } ``` - ### ConsensusParams -TODO +ConsensusParams define various limits for blockchain data structures. +Like validator sets, they are set during genesis and can be updated by the application through ABCI. + +``` +type ConsensusParams struct { + BlockSize + TxSize + BlockGossip + EvidenceParams +} + +type BlockSize struct { + MaxBytes int + MaxGas int64 +} + +type TxSize struct { + MaxBytes int + MaxGas int64 +} + +type BlockGossip struct { + BlockPartSizeBytes int +} + +type EvidenceParams struct { + MaxAge int64 +} +``` + +#### BlockSize + +The total size of a block is limited in bytes by the `ConsensusParams.BlockSize.MaxBytes`. +Proposed blocks must be less than this size, and will be considered invalid +otherwise. + +Blocks should additionally be limited by the amount of "gas" consumed by the +transactions in the block, though this is not yet implemented. + +#### TxSize + +These parameters are not yet enforced and may disappear. See [issue +#2347](https://github.com/tendermint/tendermint/issues/2347). + +#### BlockGossip + +When gossipping blocks in the consensus, they are first split into parts. The +size of each part is `ConsensusParams.BlockGossip.BlockPartSizeBytes`. + +#### EvidenceParams + +For evidence in a block to be valid, it must satisfy: + +``` +block.Header.Height - evidence.Height < ConsensusParams.EvidenceParams.MaxAge +``` diff --git a/docs/spec/consensus/bft-time.md b/docs/spec/consensus/bft-time.md index a005e904051..8e02f6abea7 100644 --- a/docs/spec/consensus/bft-time.md +++ b/docs/spec/consensus/bft-time.md @@ -1,4 +1,4 @@ -# BFT time in Tendermint +# BFT Time Tendermint provides a deterministic, Byzantine fault-tolerant, source of time. Time in Tendermint is defined with the Time field of the block header. @@ -16,10 +16,10 @@ In the context of Tendermint, time is of type int64 and denotes UNIX time in mil corresponds to the number of milliseconds since January 1, 1970. Before defining rules that need to be enforced by the Tendermint consensus protocol, so the properties above holds, we introduce the following definition: -- median of a set of `Vote` messages is equal to the median of `Vote.Time` fields of the corresponding `Vote` messages, +- median of a Commit is equal to the median of `Vote.Time` fields of the `Vote` messages, where the value of `Vote.Time` is counted number of times proportional to the process voting power. As in Tendermint the voting power is not uniform (one process one vote), a vote message is actually an aggregator of the same votes whose -number is equal to the voting power of the process that has casted the corresponding votes message. +number is equal to the voting power of the process that has casted the corresponding votes message. Let's consider the following example: - we have four processes p1, p2, p3 and p4, with the following voting power distribution (p1, 23), (p2, 27), (p3, 10) @@ -40,17 +40,15 @@ rs.Proposal.Timestamp == rs.ProposalBlock.Header.Time`. - Furthermore, when creating the `vote` message, the following rules for determining `vote.Time` field should hold: - - if `rs.Proposal` is defined then - `vote.Time = max(rs.Proposal.Timestamp + 1, time.Now())`, where `time.Now()` - denotes local Unix time in milliseconds. + - if `rs.LockedBlock` is defined then + `vote.Time = max(rs.LockedBlock.Timestamp + config.BlockTimeIota, time.Now())`, where `time.Now()` + denotes local Unix time in milliseconds, and `config.BlockTimeIota` is a configuration parameter that corresponds + to the minimum timestamp increment of the next block. + + - else if `rs.Proposal` is defined then + `vote.Time = max(rs.Proposal.Timestamp + config.BlockTimeIota, time.Now())`, - - if `rs.Proposal` is not defined and `rs.Votes` contains +2/3 of the corresponding vote messages (votes for the - current height and round, and with the corresponding type (`Prevote` or `Precommit`)), then - - `vote.Time = max(median(getVotes(rs.Votes, vote.Height, vote.Round, vote.Type)), time.Now())`, - - where `getVotes` function returns the votes for particular `Height`, `Round` and `Type`. - The second rule is relevant for the case when a process jumps to a higher round upon receiving +2/3 votes for a higher - round, but the corresponding `Proposal` message for the higher round hasn't been received yet. + - otherwise, `vote.Time = time.Now())`. In this case vote is for `nil` so it is not taken into account for + the timestamp of the next block. diff --git a/docs/spec/consensus/consensus.md b/docs/spec/consensus/consensus.md index d6804779c11..acd07397a9f 100644 --- a/docs/spec/consensus/consensus.md +++ b/docs/spec/consensus/consensus.md @@ -2,31 +2,31 @@ ## Terms -- The network is composed of optionally connected *nodes*. Nodes - directly connected to a particular node are called *peers*. -- The consensus process in deciding the next block (at some *height* - `H`) is composed of one or many *rounds*. -- `NewHeight`, `Propose`, `Prevote`, `Precommit`, and `Commit` - represent state machine states of a round. (aka `RoundStep` or - just "step"). -- A node is said to be *at* a given height, round, and step, or at - `(H,R,S)`, or at `(H,R)` in short to omit the step. -- To *prevote* or *precommit* something means to broadcast a [prevote - vote](https://godoc.org/github.com/tendermint/tendermint/types#Vote) - or [first precommit - vote](https://godoc.org/github.com/tendermint/tendermint/types#FirstPrecommit) - for something. -- A vote *at* `(H,R)` is a vote signed with the bytes for `H` and `R` - included in its [sign-bytes](block-structure.html#vote-sign-bytes). -- *+2/3* is short for "more than 2/3" -- *1/3+* is short for "1/3 or more" -- A set of +2/3 of prevotes for a particular block or `` at - `(H,R)` is called a *proof-of-lock-change* or *PoLC* for short. +- The network is composed of optionally connected _nodes_. Nodes + directly connected to a particular node are called _peers_. +- The consensus process in deciding the next block (at some _height_ + `H`) is composed of one or many _rounds_. +- `NewHeight`, `Propose`, `Prevote`, `Precommit`, and `Commit` + represent state machine states of a round. (aka `RoundStep` or + just "step"). +- A node is said to be _at_ a given height, round, and step, or at + `(H,R,S)`, or at `(H,R)` in short to omit the step. +- To _prevote_ or _precommit_ something means to broadcast a [prevote + vote](https://godoc.org/github.com/tendermint/tendermint/types#Vote) + or [first precommit + vote](https://godoc.org/github.com/tendermint/tendermint/types#FirstPrecommit) + for something. +- A vote _at_ `(H,R)` is a vote signed with the bytes for `H` and `R` + included in its [sign-bytes](../blockchain/blockchain.md#vote). +- _+2/3_ is short for "more than 2/3" +- _1/3+_ is short for "1/3 or more" +- A set of +2/3 of prevotes for a particular block or `` at + `(H,R)` is called a _proof-of-lock-change_ or _PoLC_ for short. ## State Machine Overview At each height of the blockchain a round-based protocol is run to -determine the next block. Each round is composed of three *steps* +determine the next block. Each round is composed of three _steps_ (`Propose`, `Prevote`, and `Precommit`), along with two special steps `Commit` and `NewHeight`. @@ -36,22 +36,22 @@ In the optimal scenario, the order of steps is: NewHeight -> (Propose -> Prevote -> Precommit)+ -> Commit -> NewHeight ->... ``` -The sequence `(Propose -> Prevote -> Precommit)` is called a *round*. +The sequence `(Propose -> Prevote -> Precommit)` is called a _round_. There may be more than one round required to commit a block at a given height. Examples for why more rounds may be required include: -- The designated proposer was not online. -- The block proposed by the designated proposer was not valid. -- The block proposed by the designated proposer did not propagate - in time. -- The block proposed was valid, but +2/3 of prevotes for the proposed - block were not received in time for enough validator nodes by the - time they reached the `Precommit` step. Even though +2/3 of prevotes - are necessary to progress to the next step, at least one validator - may have voted `` or maliciously voted for something else. -- The block proposed was valid, and +2/3 of prevotes were received for - enough nodes, but +2/3 of precommits for the proposed block were not - received for enough validator nodes. +- The designated proposer was not online. +- The block proposed by the designated proposer was not valid. +- The block proposed by the designated proposer did not propagate + in time. +- The block proposed was valid, but +2/3 of prevotes for the proposed + block were not received in time for enough validator nodes by the + time they reached the `Precommit` step. Even though +2/3 of prevotes + are necessary to progress to the next step, at least one validator + may have voted `` or maliciously voted for something else. +- The block proposed was valid, and +2/3 of prevotes were received for + enough nodes, but +2/3 of precommits for the proposed block were not + received for enough validator nodes. Some of these problems are resolved by moving onto the next round & proposer. Others are resolved by increasing certain round timeout @@ -80,14 +80,13 @@ parameters over each successive round. +--------------------------------------------------------------------+ ``` -Background Gossip -================= +# Background Gossip A node may not have a corresponding validator private key, but it nevertheless plays an active role in the consensus process by relaying relevant meta-data, proposals, blocks, and votes to its peers. A node that has the private keys of an active validator and is engaged in -signing votes is called a *validator-node*. All nodes (not just +signing votes is called a _validator-node_. All nodes (not just validator-nodes) have an associated state (the current height, round, and step) and work to make progress. @@ -97,21 +96,21 @@ epidemic gossip protocol is implemented among some of these channels to bring peers up to speed on the most recent state of consensus. For example, -- Nodes gossip `PartSet` parts of the current round's proposer's - proposed block. A LibSwift inspired algorithm is used to quickly - broadcast blocks across the gossip network. -- Nodes gossip prevote/precommit votes. A node `NODE_A` that is ahead - of `NODE_B` can send `NODE_B` prevotes or precommits for `NODE_B`'s - current (or future) round to enable it to progress forward. -- Nodes gossip prevotes for the proposed PoLC (proof-of-lock-change) - round if one is proposed. -- Nodes gossip to nodes lagging in blockchain height with block - [commits](https://godoc.org/github.com/tendermint/tendermint/types#Commit) - for older blocks. -- Nodes opportunistically gossip `HasVote` messages to hint peers what - votes it already has. -- Nodes broadcast their current state to all neighboring peers. (but - is not gossiped further) +- Nodes gossip `PartSet` parts of the current round's proposer's + proposed block. A LibSwift inspired algorithm is used to quickly + broadcast blocks across the gossip network. +- Nodes gossip prevote/precommit votes. A node `NODE_A` that is ahead + of `NODE_B` can send `NODE_B` prevotes or precommits for `NODE_B`'s + current (or future) round to enable it to progress forward. +- Nodes gossip prevotes for the proposed PoLC (proof-of-lock-change) + round if one is proposed. +- Nodes gossip to nodes lagging in blockchain height with block + [commits](https://godoc.org/github.com/tendermint/tendermint/types#Commit) + for older blocks. +- Nodes opportunistically gossip `HasVote` messages to hint peers what + votes it already has. +- Nodes broadcast their current state to all neighboring peers. (but + is not gossiped further) There's more, but let's not get ahead of ourselves here. @@ -144,14 +143,14 @@ and all prevotes at `PoLC-Round`. --> goto `Prevote(H,R)` - After Upon entering `Prevote`, each validator broadcasts its prevote vote. -- First, if the validator is locked on a block since `LastLockRound` - but now has a PoLC for something else at round `PoLC-Round` where - `LastLockRound < PoLC-Round < R`, then it unlocks. -- If the validator is still locked on a block, it prevotes that. -- Else, if the proposed block from `Propose(H,R)` is good, it - prevotes that. -- Else, if the proposal is invalid or wasn't received on time, it - prevotes ``. +- First, if the validator is locked on a block since `LastLockRound` + but now has a PoLC for something else at round `PoLC-Round` where + `LastLockRound < PoLC-Round < R`, then it unlocks. +- If the validator is still locked on a block, it prevotes that. +- Else, if the proposed block from `Propose(H,R)` is good, it + prevotes that. +- Else, if the proposal is invalid or wasn't received on time, it + prevotes ``. The `Prevote` step ends: - After +2/3 prevotes for a particular block or ``. -->; goto `Precommit(H,R)` - After `timeoutPrevote` after @@ -161,11 +160,12 @@ receiving any +2/3 prevotes. --> goto `Precommit(H,R)` - After ### Precommit Step (height:H,round:R) Upon entering `Precommit`, each validator broadcasts its precommit vote. + - If the validator has a PoLC at `(H,R)` for a particular block `B`, it -(re)locks (or changes lock to) and precommits `B` and sets -`LastLockRound = R`. - Else, if the validator has a PoLC at `(H,R)` for -``, it unlocks and precommits ``. - Else, it keeps the lock -unchanged and precommits ``. + (re)locks (or changes lock to) and precommits `B` and sets + `LastLockRound = R`. - Else, if the validator has a PoLC at `(H,R)` for + ``, it unlocks and precommits ``. - Else, it keeps the lock + unchanged and precommits ``. A precommit for `` means "I didn’t see a PoLC for this round, but I did get +2/3 prevotes and waited a bit". @@ -177,24 +177,24 @@ conditions](#common-exit-conditions) ### Common exit conditions -- After +2/3 precommits for a particular block. --> goto - `Commit(H)` -- After any +2/3 prevotes received at `(H,R+x)`. --> goto - `Prevote(H,R+x)` -- After any +2/3 precommits received at `(H,R+x)`. --> goto - `Precommit(H,R+x)` +- After +2/3 precommits for a particular block. --> goto + `Commit(H)` +- After any +2/3 prevotes received at `(H,R+x)`. --> goto + `Prevote(H,R+x)` +- After any +2/3 precommits received at `(H,R+x)`. --> goto + `Precommit(H,R+x)` ### Commit Step (height:H) -- Set `CommitTime = now()` -- Wait until block is received. --> goto `NewHeight(H+1)` +- Set `CommitTime = now()` +- Wait until block is received. --> goto `NewHeight(H+1)` ### NewHeight Step (height:H) -- Move `Precommits` to `LastCommit` and increment height. -- Set `StartTime = CommitTime+timeoutCommit` -- Wait until `StartTime` to receive straggler commits. --> goto - `Propose(H,0)` +- Move `Precommits` to `LastCommit` and increment height. +- Set `StartTime = CommitTime+timeoutCommit` +- Wait until `StartTime` to receive straggler commits. --> goto + `Propose(H,0)` ## Proofs @@ -236,20 +236,20 @@ Further, define the JSet at height `H` of a set of validators `VSet` to be the union of the JSets for each validator in `VSet`. For a given commit by honest validators at round `R` for block `B` we can construct a JSet to justify the commit for `B` at `R`. We say that a JSet -*justifies* a commit at `(H,R)` if all the committers (validators in the +_justifies_ a commit at `(H,R)` if all the committers (validators in the commit-set) are each justified in the JSet with no duplicitous vote signatures (by the committers). -- **Lemma**: When a fork is detected by the existence of two - conflicting [commits](./validators.html#commiting-a-block), the - union of the JSets for both commits (if they can be compiled) must - include double-signing by at least 1/3+ of the validator set. - **Proof**: The commit cannot be at the same round, because that - would immediately imply double-signing by 1/3+. Take the union of - the JSets of both commits. If there is no double-signing by at least - 1/3+ of the validator set in the union, then no honest validator - could have precommitted any different block after the first commit. - Yet, +2/3 did. Reductio ad absurdum. +- **Lemma**: When a fork is detected by the existence of two + conflicting [commits](../blockchain/blockchain.md#commit), the + union of the JSets for both commits (if they can be compiled) must + include double-signing by at least 1/3+ of the validator set. + **Proof**: The commit cannot be at the same round, because that + would immediately imply double-signing by 1/3+. Take the union of + the JSets of both commits. If there is no double-signing by at least + 1/3+ of the validator set in the union, then no honest validator + could have precommitted any different block after the first commit. + Yet, +2/3 did. Reductio ad absurdum. As a corollary, when there is a fork, an external process can determine the blame by requiring each validator to justify all of its round votes. @@ -282,7 +282,7 @@ may make JSet verification/gossip logic easier to implement. ### Censorship Attacks Due to the definition of a block -[commit](../../tendermint-core/validator.md#commiting-a-block), any 1/3+ coalition of +[commit](../../tendermint-core/validators.md#commit-a-block), any 1/3+ coalition of validators can halt the blockchain by not broadcasting their votes. Such a coalition can also censor particular transactions by rejecting blocks that include these transactions, though this would result in a diff --git a/docs/spec/consensus/creating-proposal.md b/docs/spec/consensus/creating-proposal.md new file mode 100644 index 00000000000..03f5866dcc5 --- /dev/null +++ b/docs/spec/consensus/creating-proposal.md @@ -0,0 +1,42 @@ +# Creating a proposal + +A block consists of a header, transactions, votes (the commit), +and a list of evidence of malfeasance (ie. signing conflicting votes). + +We include no more than 1/10th of the maximum block size +(`ConsensusParams.BlockSize.MaxBytes`) of evidence with each block. + +## Reaping transactions from the mempool + +When we reap transactions from the mempool, we calculate maximum data +size by subtracting maximum header size (`MaxHeaderBytes`), the maximum +amino overhead for a block (`MaxAminoOverheadForBlock`), the size of +the last commit (if present) and evidence (if present). While reaping +we account for amino overhead for each transaction. + +```go +func MaxDataBytes(maxBytes int64, valsCount, evidenceCount int) int64 { + return maxBytes - + MaxAminoOverheadForBlock - + MaxHeaderBytes - + int64(valsCount)*MaxVoteBytes - + int64(evidenceCount)*MaxEvidenceBytes +} +``` + +## Validating transactions in the mempool + +Before we accept a transaction in the mempool, we check if it's size is no more +than {MaxDataSize}. {MaxDataSize} is calculated using the same formula as +above, except because the evidence size is unknown at the moment, we subtract +maximum evidence size (1/10th of the maximum block size). + +```go +func MaxDataBytesUnknownEvidence(maxBytes int64, valsCount int) int64 { + return maxBytes - + MaxAminoOverheadForBlock - + MaxHeaderBytes - + int64(valsCount)*MaxVoteBytes - + MaxEvidenceBytesPerBlock(maxBytes) +} +``` diff --git a/docs/spec/consensus/light-client.md b/docs/spec/consensus/light-client.md index 0ed9d36d406..4b683b9a632 100644 --- a/docs/spec/consensus/light-client.md +++ b/docs/spec/consensus/light-client.md @@ -1,14 +1,14 @@ -# Light client +# Light Client -A light client is a process that connects to the Tendermint Full Node(s) and then tries to verify the Merkle proofs -about the blockchain application. In this document we describe mechanisms that ensures that the Tendermint light client -has the same level of security as Full Node processes (without being itself a Full Node). +A light client is a process that connects to the Tendermint Full Node(s) and then tries to verify the Merkle proofs +about the blockchain application. In this document we describe mechanisms that ensures that the Tendermint light client +has the same level of security as Full Node processes (without being itself a Full Node). -To be able to validate a Merkle proof, a light client needs to validate the blockchain header that contains the root app hash. -Validating a blockchain header in Tendermint consists in verifying that the header is committed (signed) by >2/3 of the -voting power of the corresponding validator set. As the validator set is a dynamic set (it is changing), one of the -core functionality of the light client is updating the current validator set, that is then used to verify the -blockchain header, and further the corresponding Merkle proofs. +To be able to validate a Merkle proof, a light client needs to validate the blockchain header that contains the root app hash. +Validating a blockchain header in Tendermint consists in verifying that the header is committed (signed) by >2/3 of the +voting power of the corresponding validator set. As the validator set is a dynamic set (it is changing), one of the +core functionality of the light client is updating the current validator set, that is then used to verify the +blockchain header, and further the corresponding Merkle proofs. For the purpose of this light client specification, we assume that the Tendermint Full Node exposes the following functions over Tendermint RPC: @@ -19,51 +19,50 @@ Validators(height int64) (ResultValidators, error) // returns validator set for LastHeader(valSetNumber int64) (SignedHeader, error) // returns last header signed by the validator set with the given validator set number type SignedHeader struct { - Header Header + Header Header Commit Commit - ValSetNumber int64 + ValSetNumber int64 } type ResultValidators struct { - BlockHeight int64 - Validators []Validator - // time the current validator set is initialised, i.e, time of the last validator change before header BlockHeight - ValSetTime int64 + BlockHeight int64 + Validators []Validator + // time the current validator set is initialised, i.e, time of the last validator change before header BlockHeight + ValSetTime int64 } ``` -We assume that Tendermint keeps track of the validator set changes and that each time a validator set is changed it is -being assigned the next sequence number. We can call this number the validator set sequence number. Tendermint also remembers +We assume that Tendermint keeps track of the validator set changes and that each time a validator set is changed it is +being assigned the next sequence number. We can call this number the validator set sequence number. Tendermint also remembers the Time from the header when the next validator set is initialised (starts to be in power), and we refer to this time as validator set init time. Furthermore, we assume that each validator set change is signed (committed) by the current validator set. More precisely, -given a block `H` that contains transactions that are modifying the current validator set, the Merkle root hash of the next -validator set (modified based on transactions from block H) will be in block `H+1` (and signed by the current validator -set), and then starting from the block `H+2`, it will be signed by the next validator set. - -Note that the real Tendermint RPC API is slightly different (for example, response messages contain more data and function -names are slightly different); we shortened (and modified) it for the purpose of this document to make the spec more -clear and simple. Furthermore, note that in case of the third function, the returned header has `ValSetNumber` equals to -`valSetNumber+1`. +given a block `H` that contains transactions that are modifying the current validator set, the Merkle root hash of the next +validator set (modified based on transactions from block H) will be in block `H+1` (and signed by the current validator +set), and then starting from the block `H+2`, it will be signed by the next validator set. +Note that the real Tendermint RPC API is slightly different (for example, response messages contain more data and function +names are slightly different); we shortened (and modified) it for the purpose of this document to make the spec more +clear and simple. Furthermore, note that in case of the third function, the returned header has `ValSetNumber` equals to +`valSetNumber+1`. Locally, light client manages the following state: ```golang -valSet []Validator // current validator set (last known and verified validator set) -valSetNumber int64 // sequence number of the current validator set +valSet []Validator // current validator set (last known and verified validator set) +valSetNumber int64 // sequence number of the current validator set valSetHash []byte // hash of the current validator set -valSetTime int64 // time when the current validator set is initialised +valSetTime int64 // time when the current validator set is initialised ``` The light client is initialised with the trusted validator set, for example based on the known validator set hash, validator set sequence number and the validator set init time. The core of the light client logic is captured by the VerifyAndUpdate function that is used to 1) verify if the given header is valid, -and 2) update the validator set (when the given header is valid and it is more recent than the seen headers). +and 2) update the validator set (when the given header is valid and it is more recent than the seen headers). ```golang VerifyAndUpdate(signedHeader SignedHeader): - assertThat signedHeader.valSetNumber >= valSetNumber + assertThat signedHeader.valSetNumber >= valSetNumber if isValid(signedHeader) and signedHeader.Header.Time <= valSetTime + UNBONDING_PERIOD then setValidatorSet(signedHeader) return true @@ -76,7 +75,7 @@ isValid(signedHeader SignedHeader): assertThat Hash(valSetOfTheHeader) == signedHeader.Header.ValSetHash assertThat signedHeader is passing basic validation if votingPower(signedHeader.Commit) > 2/3 * votingPower(valSetOfTheHeader) then return true - else + else return false setValidatorSet(signedHeader SignedHeader): @@ -85,7 +84,7 @@ setValidatorSet(signedHeader SignedHeader): valSet = nextValSet.Validators valSetHash = signedHeader.Header.ValidatorsHash valSetNumber = signedHeader.ValSetNumber - valSetTime = nextValSet.ValSetTime + valSetTime = nextValSet.ValSetTime votingPower(commit Commit): votingPower = 0 @@ -96,9 +95,9 @@ votingPower(commit Commit): votingPower(validatorSet []Validator): for each validator in validatorSet do: - votingPower += validator.VotingPower + votingPower += validator.VotingPower return votingPower - + updateValidatorSet(valSetNumberOfTheHeader): while valSetNumber != valSetNumberOfTheHeader do signedHeader = LastHeader(valSetNumber) @@ -110,5 +109,5 @@ updateValidatorSet(valSetNumberOfTheHeader): Note that in the logic above we assume that the light client will always go upward with respect to header verifications, i.e., that it will always be used to verify more recent headers. In case a light client needs to be used to verify older -headers (go backward) the same mechanisms and similar logic can be used. In case a call to the FullNode or subsequent -checks fail, a light client need to implement some recovery strategy, for example connecting to other FullNode. +headers (go backward) the same mechanisms and similar logic can be used. In case a call to the FullNode or subsequent +checks fail, a light client need to implement some recovery strategy, for example connecting to other FullNode. diff --git a/docs/spec/p2p/connection.md b/docs/spec/p2p/connection.md index 9b5e496752f..47366a549ca 100644 --- a/docs/spec/p2p/connection.md +++ b/docs/spec/p2p/connection.md @@ -4,10 +4,10 @@ `MConnection` is a multiplex connection that supports multiple independent streams with distinct quality of service guarantees atop a single TCP connection. -Each stream is known as a `Channel` and each `Channel` has a globally unique *byte id*. +Each stream is known as a `Channel` and each `Channel` has a globally unique _byte id_. Each `Channel` also has a relative priority that determines the quality of service of the `Channel` compared to other `Channel`s. -The *byte id* and the relative priorities of each `Channel` are configured upon +The _byte id_ and the relative priorities of each `Channel` are configured upon initialization of the connection. The `MConnection` supports three packet types: @@ -38,7 +38,7 @@ type msgPacket struct { } ``` -The `msgPacket` is serialized using [go-wire](https://github.com/tendermint/go-wire) and prefixed with 0x3. +The `msgPacket` is serialized using [go-amino](https://github.com/tendermint/go-amino) and prefixed with 0x3. The received `Bytes` of a sequential set of packets are appended together until a packet with `EOF=1` is received, then the complete serialized message is returned for processing by the `onReceive` function of the corresponding channel. @@ -53,13 +53,14 @@ Messages are chosen for a batch one at a time from the channel with the lowest r ## Sending Messages There are two methods for sending messages: + ```go func (m MConnection) Send(chID byte, msg interface{}) bool {} func (m MConnection) TrySend(chID byte, msg interface{}) bool {} ``` `Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued -for the channel with the given id byte `chID`. The message `msg` is serialized +for the channel with the given id byte `chID`. The message `msg` is serialized using the `tendermint/wire` submodule's `WriteBinary()` reflection routine. `TrySend(chID, msg)` is a nonblocking call that queues the message msg in the channel @@ -76,8 +77,8 @@ and other higher level thread-safe data used by the reactors. ## Switch/Reactor The `Switch` handles peer connections and exposes an API to receive incoming messages -on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one -or more `Channels`. So while sending outgoing messages is typically performed on the peer, +on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one +or more `Channels`. So while sending outgoing messages is typically performed on the peer, incoming messages are received on the reactor. ```go diff --git a/docs/spec/p2p/node.md b/docs/spec/p2p/node.md index 366b27dd2df..6d37eeb78c6 100644 --- a/docs/spec/p2p/node.md +++ b/docs/spec/p2p/node.md @@ -1,4 +1,4 @@ -# Tendermint Peer Discovery +# Peer Discovery A Tendermint P2P network has different kinds of nodes with different requirements for connectivity to one another. This document describes what kind of nodes Tendermint should enable and how they should work. @@ -17,8 +17,9 @@ See [the peer-exchange docs](https://github.com/tendermint/tendermint/blob/maste ## New Full Node A new node needs a few things to connect to the network: + - a list of seeds, which can be provided to Tendermint via config file or flags, -or hardcoded into the software by in-process apps + or hardcoded into the software by in-process apps - a `ChainID`, also called `Network` at the p2p layer - a recent block height, H, and hash, HASH for the blockchain. diff --git a/docs/spec/p2p/peer.md b/docs/spec/p2p/peer.md index dadb4a3a599..f5c2e7bf293 100644 --- a/docs/spec/p2p/peer.md +++ b/docs/spec/p2p/peer.md @@ -1,4 +1,4 @@ -# Tendermint Peers +# Peers This document explains how Tendermint Peers are identified and how they connect to one another. @@ -29,26 +29,26 @@ Both handshakes have configurable timeouts (they should complete quickly). Tendermint implements the Station-to-Station protocol using X25519 keys for Diffie-Helman key-exchange and chacha20poly1305 for encryption. It goes as follows: + - generate an ephemeral X25519 keypair - send the ephemeral public key to the peer - wait to receive the peer's ephemeral public key - compute the Diffie-Hellman shared secret using the peers ephemeral public key and our ephemeral private key - generate two keys to use for encryption (sending and receiving) and a challenge for authentication as follows: - - create a hkdf-sha256 instance with the key being the diffie hellman shared secret, and info parameter as - `TENDERMINT_SECRET_CONNECTION_KEY_AND_CHALLENGE_GEN` - - get 96 bytes of output from hkdf-sha256 - - if we had the smaller ephemeral pubkey, use the first 32 bytes for the key for receiving, the second 32 bytes for sending; else the opposite - - use the last 32 bytes of output for the challenge -- use a seperate nonce for receiving and sending. Both nonces start at 0, and should support the full 96 bit nonce range + - create a hkdf-sha256 instance with the key being the diffie hellman shared secret, and info parameter as + `TENDERMINT_SECRET_CONNECTION_KEY_AND_CHALLENGE_GEN` + - get 96 bytes of output from hkdf-sha256 + - if we had the smaller ephemeral pubkey, use the first 32 bytes for the key for receiving, the second 32 bytes for sending; else the opposite + - use the last 32 bytes of output for the challenge +- use a separate nonce for receiving and sending. Both nonces start at 0, and should support the full 96 bit nonce range - all communications from now on are encrypted in 1024 byte frames, -using the respective secret and nonce. Each nonce is incremented by one after each use. + using the respective secret and nonce. Each nonce is incremented by one after each use. - we now have an encrypted channel, but still need to authenticate - sign the common challenge obtained from the hkdf with our persistent private key - send the amino encoded persistent pubkey and signature to the peer - wait to receive the persistent public key and signature from the peer - verify the signature on the challenge using the peer's persistent public key - If this is an outgoing connection (we dialed the peer) and we used a peer ID, then finally verify that the peer's persistent public key corresponds to the peer ID we dialed, ie. `peer.PubKey.Address() == `. @@ -69,35 +69,45 @@ an optional whitelist which can be managed through the ABCI app - if the whitelist is enabled and the peer does not qualify, the connection is terminated. - ### Tendermint Version Handshake The Tendermint Version Handshake allows the peers to exchange their NodeInfo: ```golang type NodeInfo struct { + Version p2p.Version ID p2p.ID ListenAddr string Network string - Version string + SoftwareVersion string Channels []int8 Moniker string - Other []string + Other NodeInfoOther +} + +type Version struct { + P2P uint64 + Block uint64 + App uint64 +} + +type NodeInfoOther struct { + TxIndex string + RPCAddress string } ``` The connection is disconnected if: + - `peer.NodeInfo.ID` is not equal `peerConn.ID` -- `peer.NodeInfo.Version` is not formatted as `X.X.X` where X are integers known as Major, Minor, and Revision -- `peer.NodeInfo.Version` Major is not the same as ours +- `peer.NodeInfo.Version.Block` does not match ours - `peer.NodeInfo.Network` is not the same as ours - `peer.Channels` does not intersect with our known Channels. - `peer.NodeInfo.ListenAddr` is malformed or is a DNS host that cannot be resolved - At this point, if we have not disconnected, the peer is valid. It is added to the switch and hence all reactors via the `AddPeer` method. Note that each reactor may handle multiple channels. diff --git a/docs/spec/reactors/block_sync/impl.md b/docs/spec/reactors/block_sync/impl.md index a96f83b3211..195f9b86279 100644 --- a/docs/spec/reactors/block_sync/impl.md +++ b/docs/spec/reactors/block_sync/impl.md @@ -1,46 +1,41 @@ ## Blockchain Reactor -* coordinates the pool for syncing -* coordinates the store for persistence -* coordinates the playing of blocks towards the app using a sm.BlockExecutor -* handles switching between fastsync and consensus -* it is a p2p.BaseReactor -* starts the pool.Start() and its poolRoutine() -* registers all the concrete types and interfaces for serialisation +- coordinates the pool for syncing +- coordinates the store for persistence +- coordinates the playing of blocks towards the app using a sm.BlockExecutor +- handles switching between fastsync and consensus +- it is a p2p.BaseReactor +- starts the pool.Start() and its poolRoutine() +- registers all the concrete types and interfaces for serialisation ### poolRoutine -* listens to these channels: - * pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends +- listens to these channels: + - pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends a &bcBlockRequestMessage for a specific height - * pool signals timeout of a specific peer by posting to timeoutsCh - * switchToConsensusTicker to periodically try and switch to consensus - * trySyncTicker to periodically check if we have fallen behind and then catch-up sync - * if there aren't any new blocks available on the pool it skips syncing -* tries to sync the app by taking downloaded blocks from the pool, gives them to the app and stores + - pool signals timeout of a specific peer by posting to timeoutsCh + - switchToConsensusTicker to periodically try and switch to consensus + - trySyncTicker to periodically check if we have fallen behind and then catch-up sync + - if there aren't any new blocks available on the pool it skips syncing +- tries to sync the app by taking downloaded blocks from the pool, gives them to the app and stores them on disk -* implements Receive which is called by the switch/peer - * calls AddBlock on the pool when it receives a new block from a peer +- implements Receive which is called by the switch/peer + - calls AddBlock on the pool when it receives a new block from a peer ## Block Pool -* responsible for downloading blocks from peers -* makeRequestersRoutine() - * removes timeout peers - * starts new requesters by calling makeNextRequester() -* requestRoutine(): - * picks a peer and sends the request, then blocks until: - * pool is stopped by listening to pool.Quit - * requester is stopped by listening to Quit - * request is redone - * we receive a block - * gotBlockCh is strange +- responsible for downloading blocks from peers +- makeRequestersRoutine() + - removes timeout peers + - starts new requesters by calling makeNextRequester() +- requestRoutine(): + - picks a peer and sends the request, then blocks until: + - pool is stopped by listening to pool.Quit + - requester is stopped by listening to Quit + - request is redone + - we receive a block + - gotBlockCh is strange ## Block Store -* persists blocks to disk - -# TODO - -* How does the switch from bcR to conR happen? Does conR persist blocks to disk too? -* What is the interaction between the consensus and blockchain reactors? +- persists blocks to disk diff --git a/docs/spec/reactors/block_sync/reactor.md b/docs/spec/reactors/block_sync/reactor.md index 97104eeeb89..045bbd40039 100644 --- a/docs/spec/reactors/block_sync/reactor.md +++ b/docs/spec/reactors/block_sync/reactor.md @@ -46,11 +46,11 @@ type bcStatusResponseMessage struct { ## Architecture and algorithm -The Blockchain reactor is organised as a set of concurrent tasks: - - Receive routine of Blockchain Reactor - - Task for creating Requesters - - Set of Requesters tasks and - - Controller task. +The Blockchain reactor is organised as a set of concurrent tasks: + +- Receive routine of Blockchain Reactor +- Task for creating Requesters +- Set of Requesters tasks and - Controller task. ![Blockchain Reactor Architecture Diagram](img/bc-reactor.png) @@ -58,41 +58,39 @@ The Blockchain reactor is organised as a set of concurrent tasks: These are the core data structures necessarily to provide the Blockchain Reactor logic. -Requester data structure is used to track assignment of request for `block` at position `height` to a -peer with id equals to `peerID`. +Requester data structure is used to track assignment of request for `block` at position `height` to a peer with id equals to `peerID`. ```go type Requester { - mtx Mutex + mtx Mutex block Block - height int64 - 
 peerID p2p.ID + height int64 + 
peerID p2p.ID redoChannel chan struct{} } ``` -Pool is core data structure that stores last executed block (`height`), assignment of requests to peers (`requesters`), -current height for each peer and number of pending requests for each peer (`peers`), maximum peer height, etc. + +Pool is core data structure that stores last executed block (`height`), assignment of requests to peers (`requesters`), current height for each peer and number of pending requests for each peer (`peers`), maximum peer height, etc. ```go type Pool { - mtx Mutex + mtx Mutex requesters map[int64]*Requester - 
height int64 + height int64 peers map[p2p.ID]*Peer - 
maxPeerHeight int64 

 - 
numPending int32 + maxPeerHeight int64 + numPending int32 store BlockStore - 
requestsChannel chan<- BlockRequest - 
errorsChannel chan<- peerError + requestsChannel chan<- BlockRequest + errorsChannel chan<- peerError } ``` -Peer data structure stores for each peer current `height` and number of pending requests sent to -the peer (`numPending`), etc. +Peer data structure stores for each peer current `height` and number of pending requests sent to the peer (`numPending`), etc. ```go type Peer struct { - id p2p.ID + id p2p.ID height int64 numPending int32 timeout *time.Timer @@ -100,202 +98,202 @@ type Peer struct { } ``` -BlockRequest is internal data structure used to denote current mapping of request for a block at some `height` to -a peer (`PeerID`). - +BlockRequest is internal data structure used to denote current mapping of request for a block at some `height` to a peer (`PeerID`). + ```go type BlockRequest { Height int64 - PeerID p2p.ID + PeerID p2p.ID } ``` ### Receive routine of Blockchain Reactor -It is executed upon message reception on the BlockchainChannel inside p2p receive routine. There is a separate p2p -receive routine (and therefore receive routine of the Blockchain Reactor) executed for each peer. Note that -try to send will not block (returns immediately) if outgoing buffer is full. +It is executed upon message reception on the BlockchainChannel inside p2p receive routine. There is a separate p2p receive routine (and therefore receive routine of the Blockchain Reactor) executed for each peer. Note that try to send will not block (returns immediately) if outgoing buffer is full. ```go handleMsg(pool, m): upon receiving bcBlockRequestMessage m from peer p: - block = load block for height m.Height from pool.store - if block != nil then - try to send BlockResponseMessage(block) to p - else - try to send bcNoBlockResponseMessage(m.Height) to p - - upon receiving bcBlockResponseMessage m from peer p: - pool.mtx.Lock() - requester = pool.requesters[m.Height] - if requester == nil then - error("peer sent us a block we didn't expect") - continue - - if requester.block == nil and requester.peerID == p then + block = load block for height m.Height from pool.store + if block != nil then + try to send BlockResponseMessage(block) to p + else + try to send bcNoBlockResponseMessage(m.Height) to p + + upon receiving bcBlockResponseMessage m from peer p: + pool.mtx.Lock() + requester = pool.requesters[m.Height] + if requester == nil then + error("peer sent us a block we didn't expect") + continue + + if requester.block == nil and requester.peerID == p then requester.block = m - pool.numPending -= 1 // atomic decrement - peer = pool.peers[p] - if peer != nil then - peer.numPending-- - if peer.numPending == 0 then - peer.timeout.Stop() - // NOTE: we don't send Quit signal to the corresponding requester task! - else - trigger peer timeout to expire after peerTimeout - pool.mtx.Unlock() - - + pool.numPending -= 1 // atomic decrement + peer = pool.peers[p] + if peer != nil then + peer.numPending-- + if peer.numPending == 0 then + peer.timeout.Stop() + // NOTE: we don't send Quit signal to the corresponding requester task! + else + trigger peer timeout to expire after peerTimeout + pool.mtx.Unlock() + + upon receiving bcStatusRequestMessage m from peer p: - try to send bcStatusResponseMessage(pool.store.Height) + try to send bcStatusResponseMessage(pool.store.Height) upon receiving bcStatusResponseMessage m from peer p: - pool.mtx.Lock() - peer = pool.peers[p] - if peer != nil then - peer.height = m.height - else - peer = create new Peer data structure with id = p and height = m.Height - pool.peers[p] = peer - - if m.Height > pool.maxPeerHeight then - pool.maxPeerHeight = m.Height - pool.mtx.Unlock() - + pool.mtx.Lock() + peer = pool.peers[p] + if peer != nil then + peer.height = m.height + else + peer = create new Peer data structure with id = p and height = m.Height + pool.peers[p] = peer + + if m.Height > pool.maxPeerHeight then + pool.maxPeerHeight = m.Height + pool.mtx.Unlock() + onTimeout(p): - send error message to pool error channel - peer = pool.peers[p] - peer.didTimeout = true + send error message to pool error channel + peer = pool.peers[p] + peer.didTimeout = true ``` ### Requester tasks -Requester task is responsible for fetching a single block at position `height`. +Requester task is responsible for fetching a single block at position `height`. ```go fetchBlock(height, pool): - while true do - peerID = nil + while true do + peerID = nil block = nil - peer = pickAvailablePeer(height) - peerId = peer.id + peer = pickAvailablePeer(height) + peerId = peer.id enqueue BlockRequest(height, peerID) to pool.requestsChannel - redo = false - while !redo do - select { + redo = false + while !redo do + select { upon receiving Quit message do - return - upon receiving message on redoChannel do - mtx.Lock() + return + upon receiving message on redoChannel do + mtx.Lock() pool.numPending++ - redo = true - mtx.UnLock() - } + redo = true + mtx.UnLock() + } pickAvailablePeer(height): - selectedPeer = nil - while selectedPeer = nil do - pool.mtx.Lock() - for each peer in pool.peers do - if !peer.didTimeout and peer.numPending < maxPendingRequestsPerPeer and peer.height >= height then - peer.numPending++ - selectedPeer = peer - break - pool.mtx.Unlock() - - if selectedPeer = nil then - sleep requestIntervalMS - - return selectedPeer + selectedPeer = nil + while selectedPeer = nil do + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout and peer.numPending < maxPendingRequestsPerPeer and peer.height >= height then + peer.numPending++ + selectedPeer = peer + break + pool.mtx.Unlock() + + if selectedPeer = nil then + sleep requestIntervalMS + + return selectedPeer ``` + sleep for requestIntervalMS + ### Task for creating Requesters This task is responsible for continuously creating and starting Requester tasks. + ```go createRequesters(pool): - while true do - if !pool.isRunning then break - if pool.numPending < maxPendingRequests or size(pool.requesters) < maxTotalRequesters then + while true do + if !pool.isRunning then break + if pool.numPending < maxPendingRequests or size(pool.requesters) < maxTotalRequesters then pool.mtx.Lock() nextHeight = pool.height + size(pool.requesters) - requester = create new requester for height nextHeight - pool.requesters[nextHeight] = requester - pool.numPending += 1 // atomic increment - start requester task - pool.mtx.Unlock() - else + requester = create new requester for height nextHeight + pool.requesters[nextHeight] = requester + pool.numPending += 1 // atomic increment + start requester task + pool.mtx.Unlock() + else sleep requestIntervalMS - pool.mtx.Lock() - for each peer in pool.peers do - if !peer.didTimeout && peer.numPending > 0 && peer.curRate < minRecvRate then - send error on pool error channel + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout && peer.numPending > 0 && peer.curRate < minRecvRate then + send error on pool error channel peer.didTimeout = true - if peer.didTimeout then - for each requester in pool.requesters do - if requester.getPeerID() == peer then + if peer.didTimeout then + for each requester in pool.requesters do + if requester.getPeerID() == peer then enqueue msg on requestor's redoChannel - delete(pool.peers, peerID) - pool.mtx.Unlock() + delete(pool.peers, peerID) + pool.mtx.Unlock() ``` - -### Main blockchain reactor controller task +### Main blockchain reactor controller task + ```go main(pool): - create trySyncTicker with interval trySyncIntervalMS - create statusUpdateTicker with interval statusUpdateIntervalSeconds - create switchToConsensusTicker with interbal switchToConsensusIntervalSeconds - - while true do - select { + create trySyncTicker with interval trySyncIntervalMS + create statusUpdateTicker with interval statusUpdateIntervalSeconds + create switchToConsensusTicker with interbal switchToConsensusIntervalSeconds + + while true do + select { upon receiving BlockRequest(Height, Peer) on pool.requestsChannel: - try to send bcBlockRequestMessage(Height) to Peer + try to send bcBlockRequestMessage(Height) to Peer upon receiving error(peer) on errorsChannel: - stop peer for error + stop peer for error upon receiving message on statusUpdateTickerChannel: - broadcast bcStatusRequestMessage(bcR.store.Height) // message sent in a separate routine + broadcast bcStatusRequestMessage(bcR.store.Height) // message sent in a separate routine upon receiving message on switchToConsensusTickerChannel: - pool.mtx.Lock() - receivedBlockOrTimedOut = pool.height > 0 || (time.Now() - pool.startTime) > 5 Seconds - ourChainIsLongestAmongPeers = pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight - haveSomePeers = size of pool.peers > 0 + pool.mtx.Lock() + receivedBlockOrTimedOut = pool.height > 0 || (time.Now() - pool.startTime) > 5 Seconds + ourChainIsLongestAmongPeers = pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight + haveSomePeers = size of pool.peers > 0 pool.mtx.Unlock() if haveSomePeers && receivedBlockOrTimedOut && ourChainIsLongestAmongPeers then - switch to consensus mode + switch to consensus mode upon receiving message on trySyncTickerChannel: - for i = 0; i < 10; i++ do - pool.mtx.Lock() + for i = 0; i < 10; i++ do + pool.mtx.Lock() firstBlock = pool.requesters[pool.height].block secondBlock = pool.requesters[pool.height].block if firstBlock == nil or secondBlock == nil then continue pool.mtx.Unlock() - verify firstBlock using LastCommit from secondBlock - if verification failed - pool.mtx.Lock() + verify firstBlock using LastCommit from secondBlock + if verification failed + pool.mtx.Lock() peerID = pool.requesters[pool.height].peerID redoRequestsForPeer(peerId) delete(pool.peers, peerID) - stop peer peerID for error - pool.mtx.Unlock() - else + stop peer peerID for error + pool.mtx.Unlock() + else delete(pool.requesters, pool.height) save firstBlock to store - pool.height++ - execute firstBlock + pool.height++ + execute firstBlock } - + redoRequestsForPeer(pool, peerId): - for each requester in pool.requesters do - if requester.getPeerID() == peerID - enqueue msg on redoChannel for requester + for each requester in pool.requesters do + if requester.getPeerID() == peerID + enqueue msg on redoChannel for requester ``` - + ## Channels Defines `maxMsgSize` for the maximum size of incoming messages, diff --git a/docs/spec/reactors/consensus/consensus-reactor.md b/docs/spec/reactors/consensus/consensus-reactor.md index 0f03b44b787..23275b1222a 100644 --- a/docs/spec/reactors/consensus/consensus-reactor.md +++ b/docs/spec/reactors/consensus/consensus-reactor.md @@ -1,49 +1,48 @@ # Consensus Reactor -Consensus Reactor defines a reactor for the consensus service. It contains the ConsensusState service that -manages the state of the Tendermint consensus internal state machine. -When Consensus Reactor is started, it starts Broadcast Routine which starts ConsensusState service. -Furthermore, for each peer that is added to the Consensus Reactor, it creates (and manages) the known peer state -(that is used extensively in gossip routines) and starts the following three routines for the peer p: -Gossip Data Routine, Gossip Votes Routine and QueryMaj23Routine. Finally, Consensus Reactor is responsible +Consensus Reactor defines a reactor for the consensus service. It contains the ConsensusState service that +manages the state of the Tendermint consensus internal state machine. +When Consensus Reactor is started, it starts Broadcast Routine which starts ConsensusState service. +Furthermore, for each peer that is added to the Consensus Reactor, it creates (and manages) the known peer state +(that is used extensively in gossip routines) and starts the following three routines for the peer p: +Gossip Data Routine, Gossip Votes Routine and QueryMaj23Routine. Finally, Consensus Reactor is responsible for decoding messages received from a peer and for adequate processing of the message depending on its type and content. -The processing normally consists of updating the known peer state and for some messages -(`ProposalMessage`, `BlockPartMessage` and `VoteMessage`) also forwarding message to ConsensusState module -for further processing. In the following text we specify the core functionality of those separate unit of executions -that are part of the Consensus Reactor. +The processing normally consists of updating the known peer state and for some messages +(`ProposalMessage`, `BlockPartMessage` and `VoteMessage`) also forwarding message to ConsensusState module +for further processing. In the following text we specify the core functionality of those separate unit of executions +that are part of the Consensus Reactor. ## ConsensusState service -Consensus State handles execution of the Tendermint BFT consensus algorithm. It processes votes and proposals, +Consensus State handles execution of the Tendermint BFT consensus algorithm. It processes votes and proposals, and upon reaching agreement, commits blocks to the chain and executes them against the application. The internal state machine receives input from peers, the internal validator and from a timer. Inside Consensus State we have the following units of execution: Timeout Ticker and Receive Routine. -Timeout Ticker is a timer that schedules timeouts conditional on the height/round/step that are processed -by the Receive Routine. - +Timeout Ticker is a timer that schedules timeouts conditional on the height/round/step that are processed +by the Receive Routine. ### Receive Routine of the ConsensusState service Receive Routine of the ConsensusState handles messages which may cause internal consensus state transitions. -It is the only routine that updates RoundState that contains internal consensus state. -Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. -It receives messages from peers, internal validators and from Timeout Ticker -and invokes the corresponding handlers, potentially updating the RoundState. -The details of the protocol (together with formal proofs of correctness) implemented by the Receive Routine are +It is the only routine that updates RoundState that contains internal consensus state. +Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. +It receives messages from peers, internal validators and from Timeout Ticker +and invokes the corresponding handlers, potentially updating the RoundState. +The details of the protocol (together with formal proofs of correctness) implemented by the Receive Routine are discussed in separate document. For understanding of this document -it is sufficient to understand that the Receive Routine manages and updates RoundState data structure that is +it is sufficient to understand that the Receive Routine manages and updates RoundState data structure that is then extensively used by the gossip routines to determine what information should be sent to peer processes. ## Round State RoundState defines the internal consensus state. It contains height, round, round step, a current validator set, -a proposal and proposal block for the current round, locked round and block (if some block is being locked), set of -received votes and last commit and last validators set. +a proposal and proposal block for the current round, locked round and block (if some block is being locked), set of +received votes and last commit and last validators set. ```golang type RoundState struct { - Height int64 + Height int64 Round int Step RoundStepType Validators ValidatorSet @@ -54,10 +53,10 @@ type RoundState struct { LockedBlock Block LockedBlockParts PartSet Votes HeightVoteSet - LastCommit VoteSet + LastCommit VoteSet LastValidators ValidatorSet -} -``` +} +``` Internally, consensus will run as a state machine with the following states: @@ -82,8 +81,8 @@ type PeerRoundState struct { Round int // Round peer is at, -1 if unknown. Step RoundStepType // Step peer is at Proposal bool // True if peer has proposal for this round - ProposalBlockPartsHeader PartSetHeader - ProposalBlockParts BitArray + ProposalBlockPartsHeader PartSetHeader + ProposalBlockParts BitArray ProposalPOLRound int // Proposal's POL round. -1 if none. ProposalPOL BitArray // nil until ProposalPOLMessage received. Prevotes BitArray // All votes peer has for this round @@ -93,19 +92,19 @@ type PeerRoundState struct { CatchupCommitRound int // Round that we have commit for. Not necessarily unique. -1 if none. CatchupCommit BitArray // All commit precommits peer has for this height & CatchupCommitRound } -``` +``` ## Receive method of Consensus reactor -The entry point of the Consensus reactor is a receive method. When a message is received from a peer p, -normally the peer round state is updated correspondingly, and some messages +The entry point of the Consensus reactor is a receive method. When a message is received from a peer p, +normally the peer round state is updated correspondingly, and some messages are passed for further processing, for example to ConsensusState service. We now specify the processing of messages in the receive method of Consensus reactor for each message type. In the following message handler, `rs` and `prs` denote `RoundState` and `PeerRoundState`, respectively. -### NewRoundStepMessage handler +### NewRoundStepMessage handler -``` +``` handleMessage(msg): if msg is from smaller height/round/step then return // Just remember these values. @@ -116,10 +115,10 @@ handleMessage(msg): Update prs with values from msg if prs.Height or prs.Round has been updated then - reset Proposal related fields of the peer state + reset Proposal related fields of the peer state if prs.Round has been updated and msg.Round == prsCatchupCommitRound then prs.Precommits = psCatchupCommit - if prs.Height has been updated then + if prs.Height has been updated then if prsHeight+1 == msg.Height && prsRound == msg.LastCommitRound then prs.LastCommitRound = msg.LastCommitRound prs.LastCommit = prs.Precommits @@ -128,111 +127,114 @@ handleMessage(msg): prs.LastCommit = nil } Reset prs.CatchupCommitRound and prs.CatchupCommit -``` +``` -### CommitStepMessage handler +### NewValidBlockMessage handler -``` +``` handleMessage(msg): - if prs.Height == msg.Height then - prs.ProposalBlockPartsHeader = msg.BlockPartsHeader - prs.ProposalBlockParts = msg.BlockParts -``` + if prs.Height != msg.Height then return + + if prs.Round != msg.Round && !msg.IsCommit then return + + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader + prs.ProposalBlockParts = msg.BlockParts +``` ### HasVoteMessage handler -``` +``` handleMessage(msg): - if prs.Height == msg.Height then + if prs.Height == msg.Height then prs.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) -``` +``` ### VoteSetMaj23Message handler -``` +``` handleMessage(msg): if prs.Height == msg.Height then Record in rs that a peer claim to have ⅔ majority for msg.BlockID - Send VoteSetBitsMessage showing votes node has for that BlockId -``` + Send VoteSetBitsMessage showing votes node has for that BlockId +``` ### ProposalMessage handler ``` handleMessage(msg): - if prs.Height != msg.Height || prs.Round != msg.Round || prs.Proposal then return + if prs.Height != msg.Height || prs.Round != msg.Round || prs.Proposal then return prs.Proposal = true - prs.ProposalBlockPartsHeader = msg.BlockPartsHeader - prs.ProposalBlockParts = empty set + if prs.ProposalBlockParts == empty set then // otherwise it is set in NewValidBlockMessage handler + prs.ProposalBlockPartsHeader = msg.BlockPartsHeader prs.ProposalPOLRound = msg.POLRound - prs.ProposalPOL = nil + prs.ProposalPOL = nil Send msg through internal peerMsgQueue to ConsensusState service -``` +``` ### ProposalPOLMessage handler -``` +``` handleMessage(msg): if prs.Height != msg.Height or prs.ProposalPOLRound != msg.ProposalPOLRound then return prs.ProposalPOL = msg.ProposalPOL -``` +``` ### BlockPartMessage handler -``` +``` handleMessage(msg): if prs.Height != msg.Height || prs.Round != msg.Round then return - Record in prs that peer has block part msg.Part.Index + Record in prs that peer has block part msg.Part.Index Send msg trough internal peerMsgQueue to ConsensusState service -``` +``` ### VoteMessage handler -``` +``` handleMessage(msg): Record in prs that a peer knows vote with index msg.vote.ValidatorIndex for particular height and round Send msg trough internal peerMsgQueue to ConsensusState service -``` +``` ### VoteSetBitsMessage handler -``` +``` handleMessage(msg): Update prs for the bit-array of votes peer claims to have for the msg.BlockID -``` +``` ## Gossip Data Routine -It is used to send the following messages to the peer: `BlockPartMessage`, `ProposalMessage` and -`ProposalPOLMessage` on the DataChannel. The gossip data routine is based on the local RoundState (`rs`) +It is used to send the following messages to the peer: `BlockPartMessage`, `ProposalMessage` and +`ProposalPOLMessage` on the DataChannel. The gossip data routine is based on the local RoundState (`rs`) and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: ``` 1a) if rs.ProposalBlockPartsHeader == prs.ProposalBlockPartsHeader and the peer does not have all the proposal parts then - Part = pick a random proposal block part the peer does not have - Send BlockPartMessage(rs.Height, rs.Round, Part) to the peer on the DataChannel + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(rs.Height, rs.Round, Part) to the peer on the DataChannel if send returns true, record that the peer knows the corresponding block Part - Continue - + Continue + 1b) if (0 < prs.Height) and (prs.Height < rs.Height) then help peer catch up using gossipDataForCatchup function Continue -1c) if (rs.Height != prs.Height) or (rs.Round != prs.Round) then +1c) if (rs.Height != prs.Height) or (rs.Round != prs.Round) then Sleep PeerGossipSleepDuration - Continue + Continue // at this point rs.Height == prs.Height and rs.Round == prs.Round -1d) if (rs.Proposal != nil and !prs.Proposal) then +1d) if (rs.Proposal != nil and !prs.Proposal) then Send ProposalMessage(rs.Proposal) to the peer if send returns true, record that the peer knows Proposal if 0 <= rs.Proposal.POLRound then - polRound = rs.Proposal.POLRound - prevotesBitArray = rs.Votes.Prevotes(polRound).BitArray() + polRound = rs.Proposal.POLRound + prevotesBitArray = rs.Votes.Prevotes(polRound).BitArray() Send ProposalPOLMessage(rs.Height, polRound, prevotesBitArray) - Continue + Continue -2) Sleep PeerGossipSleepDuration +2) Sleep PeerGossipSleepDuration ``` ### Gossip Data For Catchup @@ -240,65 +242,65 @@ and the known PeerRoundState (`prs`). The routine repeats forever the logic show This function is responsible for helping peer catch up if it is at the smaller height (prs.Height < rs.Height). The function executes the following logic: - if peer does not have all block parts for prs.ProposalBlockPart then + if peer does not have all block parts for prs.ProposalBlockPart then blockMeta = Load Block Metadata for height prs.Height from blockStore if (!blockMeta.BlockID.PartsHeader == prs.ProposalBlockPartsHeader) then Sleep PeerGossipSleepDuration return - Part = pick a random proposal block part the peer does not have - Send BlockPartMessage(prs.Height, prs.Round, Part) to the peer on the DataChannel + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(prs.Height, prs.Round, Part) to the peer on the DataChannel if send returns true, record that the peer knows the corresponding block Part return - else Sleep PeerGossipSleepDuration - + else Sleep PeerGossipSleepDuration + ## Gossip Votes Routine It is used to send the following message: `VoteMessage` on the VoteChannel. -The gossip votes routine is based on the local RoundState (`rs`) +The gossip votes routine is based on the local RoundState (`rs`) and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: ``` 1a) if rs.Height == prs.Height then - if prs.Step == RoundStepNewHeight then - vote = random vote from rs.LastCommit the peer does not have - Send VoteMessage(vote) to the peer + if prs.Step == RoundStepNewHeight then + vote = random vote from rs.LastCommit the peer does not have + Send VoteMessage(vote) to the peer if send returns true, continue - - if prs.Step <= RoundStepPrevote and prs.Round != -1 and prs.Round <= rs.Round then + + if prs.Step <= RoundStepPrevote and prs.Round != -1 and prs.Round <= rs.Round then Prevotes = rs.Votes.Prevotes(prs.Round) - vote = random vote from Prevotes the peer does not have - Send VoteMessage(vote) to the peer + vote = random vote from Prevotes the peer does not have + Send VoteMessage(vote) to the peer if send returns true, continue - if prs.Step <= RoundStepPrecommit and prs.Round != -1 and prs.Round <= rs.Round then - Precommits = rs.Votes.Precommits(prs.Round) - vote = random vote from Precommits the peer does not have - Send VoteMessage(vote) to the peer + if prs.Step <= RoundStepPrecommit and prs.Round != -1 and prs.Round <= rs.Round then + Precommits = rs.Votes.Precommits(prs.Round) + vote = random vote from Precommits the peer does not have + Send VoteMessage(vote) to the peer if send returns true, continue - - if prs.ProposalPOLRound != -1 then + + if prs.ProposalPOLRound != -1 then PolPrevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) - vote = random vote from PolPrevotes the peer does not have - Send VoteMessage(vote) to the peer - if send returns true, continue + vote = random vote from PolPrevotes the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue 1b) if prs.Height != 0 and rs.Height == prs.Height+1 then - vote = random vote from rs.LastCommit peer does not have - Send VoteMessage(vote) to the peer + vote = random vote from rs.LastCommit peer does not have + Send VoteMessage(vote) to the peer if send returns true, continue - + 1c) if prs.Height != 0 and rs.Height >= prs.Height+2 then - Commit = get commit from BlockStore for prs.Height - vote = random vote from Commit the peer does not have - Send VoteMessage(vote) to the peer + Commit = get commit from BlockStore for prs.Height + vote = random vote from Commit the peer does not have + Send VoteMessage(vote) to the peer if send returns true, continue -2) Sleep PeerGossipSleepDuration +2) Sleep PeerGossipSleepDuration ``` ## QueryMaj23Routine -It is used to send the following message: `VoteSetMaj23Message`. `VoteSetMaj23Message` is sent to indicate that a given +It is used to send the following message: `VoteSetMaj23Message`. `VoteSetMaj23Message` is sent to indicate that a given BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs`) and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below. @@ -324,8 +326,8 @@ BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs` Send m to peer Sleep PeerQueryMaj23SleepDuration -1d) if prs.CatchupCommitRound != -1 and 0 < prs.Height and - prs.Height <= blockStore.Height() then +1d) if prs.CatchupCommitRound != -1 and 0 < prs.Height and + prs.Height <= blockStore.Height() then Commit = LoadCommit(prs.Height) m = VoteSetMaj23Message(prs.Height,Commit.Round,Precommit,Commit.blockId) Send m to peer @@ -339,14 +341,14 @@ BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs` The Broadcast routine subscribes to an internal event bus to receive new round steps, votes messages and proposal heartbeat messages, and broadcasts messages to peers upon receiving those events. It broadcasts `NewRoundStepMessage` or `CommitStepMessage` upon new round state event. Note that -broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel. -Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel. +broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel. +Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel. `ProposalHeartbeatMessage` is sent the same way on the StateChannel. ## Channels Defines 4 channels: state, data, vote and vote_set_bits. Each channel -has `SendQueueCapacity` and `RecvBufferCapacity` and +has `SendQueueCapacity` and `RecvBufferCapacity` and `RecvMessageCapacity` set to `maxMsgSize`. Sending incorrectly encoded data will result in stopping the peer. diff --git a/docs/spec/reactors/consensus/consensus.md b/docs/spec/reactors/consensus/consensus.md index 4ea619b5150..e5d1f4cc3ef 100644 --- a/docs/spec/reactors/consensus/consensus.md +++ b/docs/spec/reactors/consensus/consensus.md @@ -23,10 +23,10 @@ processes using `BlockPartMessage`. Validators in Tendermint communicate by peer-to-peer gossiping protocol. Each validator is connected only to a subset of processes called peers. By the gossiping protocol, a validator send to its peers -all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can +all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can reach agreement on some block, and also obtain the content of the chosen block (block parts). As part of the gossiping protocol, processes also send auxiliary messages that inform peers about the -executed steps of the core consensus algorithm (`NewRoundStepMessage` and `CommitStepMessage`), and +executed steps of the core consensus algorithm (`NewRoundStepMessage` and `NewValidBlockMessage`), and also messages that inform peers what votes the process has seen (`HasVoteMessage`, `VoteSetMaj23Message` and `VoteSetBitsMessage`). These messages are then used in the gossiping protocol to determine what messages a process should send to its peers. @@ -47,25 +47,21 @@ type ProposalMessage struct { ### Proposal Proposal contains height and round for which this proposal is made, BlockID as a unique identifier -of proposed block, timestamp, and two fields (POLRound and POLBlockID) that are needed for -termination of the consensus. The message is signed by the validator private key. +of proposed block, timestamp, and POLRound (a so-called Proof-of-Lock (POL) round) that is needed for +termination of the consensus. If POLRound >= 0, then BlockID corresponds to the block that +is locked in POLRound. The message is signed by the validator private key. ```go type Proposal struct { Height int64 Round int - Timestamp Time - BlockID BlockID POLRound int - POLBlockID BlockID + BlockID BlockID + Timestamp Time Signature Signature } ``` -NOTE: In the current version of the Tendermint, the consensus value in proposal is represented with -PartSetHeader, and with BlockID in vote message. It should be aligned as suggested in this spec as -BlockID contains PartSetHeader. - ## VoteMessage VoteMessage is sent to vote for some block (or to inform others that a process does not vote in the @@ -136,23 +132,26 @@ type NewRoundStepMessage struct { } ``` -## CommitStepMessage +## NewValidBlockMessage -CommitStepMessage is sent when an agreement on some block is reached. It contains height for which -agreement is reached, block parts header that describes the decided block and is used to obtain all +NewValidBlockMessage is sent when a validator observes a valid block B in some round r, +i.e., there is a Proposal for block B and 2/3+ prevotes for the block B in the round r. +It contains height and round in which valid block is observed, block parts header that describes +the valid block and is used to obtain all block parts, and a bit array of the block parts a process currently has, so its peers can know what parts it is missing so they can send them. +In case the block is also committed, then IsCommit flag is set to true. ```go -type CommitStepMessage struct { +type NewValidBlockMessage struct { Height int64 - BlockID BlockID + Round int + BlockPartsHeader PartSetHeader BlockParts BitArray + IsCommit bool } ``` -TODO: We use BlockID instead of BlockPartsHeader (in current implementation) for symmetry. - ## ProposalPOLMessage ProposalPOLMessage is sent when a previous block is re-proposed. diff --git a/docs/spec/reactors/consensus/proposer-selection.md b/docs/spec/reactors/consensus/proposer-selection.md index 649d3dd21d7..b5e0b35afbc 100644 --- a/docs/spec/reactors/consensus/proposer-selection.md +++ b/docs/spec/reactors/consensus/proposer-selection.md @@ -1,6 +1,6 @@ # Proposer selection procedure in Tendermint -This document specifies the Proposer Selection Procedure that is used in Tendermint to choose a round proposer. +This document specifies the Proposer Selection Procedure that is used in Tendermint to choose a round proposer. As Tendermint is “leader-based protocol”, the proposer selection is critical for its correct functioning. Let denote with `proposer_p(h,r)` a process returned by the Proposer Selection Procedure at the process p, at height h and round r. Then the Proposer Selection procedure should fulfill the following properties: @@ -9,13 +9,13 @@ and round r. Then the Proposer Selection procedure should fulfill the following p and q, for each height h, and each round r, proposer_p(h,r) = proposer_q(h,r) -`Liveness`: In every consecutive sequence of rounds of size K (K is system parameter), at least a -single round has an honest proposer. +`Liveness`: In every consecutive sequence of rounds of size K (K is system parameter), at least a +single round has an honest proposer. -`Fairness`: The proposer selection is proportional to the validator voting power, i.e., a validator with more -voting power is selected more frequently, proportional to its power. More precisely, given a set of processes -with the total voting power N, during a sequence of rounds of size N, every process is proposer in a number of rounds -equal to its voting power. +`Fairness`: The proposer selection is proportional to the validator voting power, i.e., a validator with more +voting power is selected more frequently, proportional to its power. More precisely, given a set of processes +with the total voting power N, during a sequence of rounds of size N, every process is proposer in a number of rounds +equal to its voting power. We now look at a few particular cases to understand better how fairness should be implemented. If we have 4 processes with the following voting power distribution (p0,4), (p1, 2), (p2, 2), (p3, 2) at some round r, @@ -27,20 +27,20 @@ Let consider now the following scenario where a total voting power of faulty pro p0: (p0,3), (p1, 1), (p2, 1), (p3, 1), (p4, 1), (p5, 1), (p6, 1), (p7, 1). In this case the sequence of proposer selections looks like this: -`p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, etc` +`p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, etc` In this case, we see that a number of rounds coordinated by a faulty process is proportional to its voting power. -We consider also the case where we have voting power uniformly distributed among processes, i.e., we have 10 processes -each with voting power of 1. And let consider that there are 3 faulty processes with consecutive addresses, +We consider also the case where we have voting power uniformly distributed among processes, i.e., we have 10 processes +each with voting power of 1. And let consider that there are 3 faulty processes with consecutive addresses, for example the first 3 processes are faulty. Then the sequence looks like this: `p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, etc` -In this case, we have 3 consecutive rounds with a faulty proposer. +In this case, we have 3 consecutive rounds with a faulty proposer. One special case we consider is the case where a single honest process p0 has most of the voting power, for example: (p0,100), (p1, 2), (p2, 3), (p3, 4). Then the sequence of proposer selection looks like this: p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p1, p0, p0, p0, p0, p0, etc -This basically means that almost all rounds have the same proposer. But in this case, the process p0 has anyway enough +This basically means that almost all rounds have the same proposer. But in this case, the process p0 has anyway enough voting power to decide whatever he wants, so the fact that he coordinates almost all rounds seems correct. diff --git a/docs/spec/reactors/mempool/concurrency.md b/docs/spec/reactors/mempool/concurrency.md index 991113e6da0..a6870db9bb5 100644 --- a/docs/spec/reactors/mempool/concurrency.md +++ b/docs/spec/reactors/mempool/concurrency.md @@ -2,7 +2,7 @@ Look at the concurrency model this uses... -* Receiving CheckTx -* Broadcasting new tx -* Interfaces with consensus engine, reap/update while checking -* Calling the ABCI app (ordering. callbacks. how proxy works alongside the blockchain proxy which actually writes blocks) +- Receiving CheckTx +- Broadcasting new tx +- Interfaces with consensus engine, reap/update while checking +- Calling the ABCI app (ordering. callbacks. how proxy works alongside the blockchain proxy which actually writes blocks) diff --git a/docs/spec/reactors/mempool/config.md b/docs/spec/reactors/mempool/config.md index 776149ba050..4fb756fa493 100644 --- a/docs/spec/reactors/mempool/config.md +++ b/docs/spec/reactors/mempool/config.md @@ -6,23 +6,21 @@ as command-line flags, but they can also be passed in as environmental variables or in the config.toml file. The following are all equivalent: -Flag: `--mempool.recheck_empty=false` +Flag: `--mempool.recheck=false` -Environment: `TM_MEMPOOL_RECHECK_EMPTY=false` +Environment: `TM_MEMPOOL_RECHECK=false` Config: + ``` [mempool] -recheck_empty = false +recheck = false ``` - ## Recheck `--mempool.recheck=false` (default: true) -`--mempool.recheck_empty=false` (default: true) - Recheck determines if the mempool rechecks all pending transactions after a block was committed. Once a block is committed, the mempool removes all valid transactions @@ -31,9 +29,6 @@ that were successfully included in the block. If `recheck` is true, then it will rerun CheckTx on all remaining transactions with the new block state. -If the block contained no transactions, it will skip the -recheck unless `recheck_empty` is true. - ## Broadcast `--mempool.broadcast=false` (default: true) diff --git a/docs/spec/reactors/mempool/functionality.md b/docs/spec/reactors/mempool/functionality.md index 85c3dc58d40..4064def0b63 100644 --- a/docs/spec/reactors/mempool/functionality.md +++ b/docs/spec/reactors/mempool/functionality.md @@ -6,26 +6,26 @@ consensus reactor when it is selected as the block proposer. There are two sides to the mempool state: -* External: get, check, and broadcast new transactions -* Internal: return valid transaction, update list after block commit - +- External: get, check, and broadcast new transactions +- Internal: return valid transaction, update list after block commit ## External functionality External functionality is exposed via network interfaces to potentially untrusted actors. -* CheckTx - triggered via RPC or P2P -* Broadcast - gossip messages after a successful check +- CheckTx - triggered via RPC or P2P +- Broadcast - gossip messages after a successful check ## Internal functionality Internal functionality is exposed via method calls to other code compiled into the tendermint binary. -* Reap - get tx to propose in next block -* Update - remove tx that were included in last block -* ABCI.CheckTx - call ABCI app to validate the tx +- ReapMaxBytesMaxGas - get txs to propose in the next block. Guarantees that the + size of the txs is less than MaxBytes, and gas is less than MaxGas +- Update - remove tx that were included in last block +- ABCI.CheckTx - call ABCI app to validate the tx What does it provide the consensus reactor? What guarantees does it need from the ABCI app? @@ -33,5 +33,11 @@ What guarantees does it need from the ABCI app? ## Optimizations -Talk about the LRU cache to make sure we don't process any -tx that we have seen before +The implementation within this library also implements a tx cache. +This is so that signatures don't have to be reverified if the tx has +already been seen before. +However, we only store valid txs in the cache, not invalid ones. +This is because invalid txs could become good later. +Txs that are included in a block aren't removed from the cache, +as they still may be getting received over the p2p network. +These txs are stored in the cache by their hash, to mitigate memory concerns. \ No newline at end of file diff --git a/docs/spec/reactors/mempool/messages.md b/docs/spec/reactors/mempool/messages.md index 9a624dff1dd..117fc5f2f61 100644 --- a/docs/spec/reactors/mempool/messages.md +++ b/docs/spec/reactors/mempool/messages.md @@ -35,12 +35,12 @@ Request (`POST http://gaia.zone:26657/`): ```json { - "id": "", - "jsonrpc": "2.0", - "method": "broadcast_sync", - "params": { + "id": "", + "jsonrpc": "2.0", + "method": "broadcast_sync", + "params": { "tx": "F012A4BC68..." - } + } } ``` @@ -48,14 +48,14 @@ Response: ```json { - "error": "", - "result": { - "hash": "E39AAB7A537ABAA237831742DCE1117F187C3C52", - "log": "", - "data": "", - "code": 0 - }, - "id": "", - "jsonrpc": "2.0" + "error": "", + "result": { + "hash": "E39AAB7A537ABAA237831742DCE1117F187C3C52", + "log": "", + "data": "", + "code": 0 + }, + "id": "", + "jsonrpc": "2.0" } ``` diff --git a/docs/spec/reactors/mempool/reactor.md b/docs/spec/reactors/mempool/reactor.md index 2bdbd895126..fa25eeb3eae 100644 --- a/docs/spec/reactors/mempool/reactor.md +++ b/docs/spec/reactors/mempool/reactor.md @@ -2,12 +2,12 @@ ## Channels -[#1503](https://github.com/tendermint/tendermint/issues/1503) +See [this issue](https://github.com/tendermint/tendermint/issues/1503) Mempool maintains a cache of the last 10000 transactions to prevent replaying old transactions (plus transactions coming from other validators, who are continually exchanging transactions). Read [Replay -Protection](https://tendermint.readthedocs.io/projects/tools/en/master/app-development.html?#replay-protection) +Protection](../../../../app-development.md#replay-protection) for details. Sending incorrectly encoded data or data exceeding `maxMsgSize` will result diff --git a/docs/spec/reactors/pex/pex.md b/docs/spec/reactors/pex/pex.md index 0f13c0cb549..26f1fa8bb04 100644 --- a/docs/spec/reactors/pex/pex.md +++ b/docs/spec/reactors/pex/pex.md @@ -15,6 +15,9 @@ we will not put them in the address book or gossip them to others. All peers except private peers and peers coming from them are tracked using the address book. +The rest of our peers are only distinguished by being either +inbound (they dialed our public address) or outbound (we dialed them). + ## Discovery Peer discovery begins with a list of seeds. @@ -26,15 +29,15 @@ and will attempt to maintain persistent connections with them. If the connection we will redial every 5s for a few minutes, then switch to an exponential backoff schedule, and after about a day of trying, stop dialing the peer. -So long as we have less than `MinNumOutboundPeers`, we periodically request additional peers +So long as we have less than `MaxNumOutboundPeers`, we periodically request additional peers from each of our own. If sufficient time goes by and we still can't find enough peers, we try the seeds again. ## Listening Peers listen on a configurable ListenAddr that they self-report in their -NodeInfo during handshakes with other peers. Peers accept up to (MaxNumPeers - -MinNumOutboundPeers) incoming peers. +NodeInfo during handshakes with other peers. Peers accept up to +`MaxNumInboundPeers` incoming peers. ## Address Book @@ -73,10 +76,11 @@ a trust metric (see below), but it's best to start with something simple. ## Select Peers to Dial -When we need more peers, we pick them randomly from the addrbook with some -configurable bias for unvetted peers. The bias should be lower when we have fewer peers -and can increase as we obtain more, ensuring that our first peers are more trustworthy, -but always giving us the chance to discover new good peers. +When we need more peers, we pick addresses randomly from the addrbook with some +configurable bias for unvetted peers. The bias should be lower when we have +fewer peers and can increase as we obtain more, ensuring that our first peers +are more trustworthy, but always giving us the chance to discover new good +peers. We track the last time we dialed a peer and the number of unsuccessful attempts we've made. If too many attempts are made, we mark the peer as bad. @@ -85,11 +89,13 @@ Connection attempts are made with exponential backoff (plus jitter). Because the selection process happens every `ensurePeersPeriod`, we might not end up dialing a peer for much longer than the backoff duration. -If we fail to connect to the peer after 16 tries (with exponential backoff), we remove from address book completely. +If we fail to connect to the peer after 16 tries (with exponential backoff), we +remove from address book completely. ## Select Peers to Exchange When we’re asked for peers, we select them as follows: + - select at most `maxGetSelection` peers - try to select at least `minGetSelection` peers - if we have less than that, select them all. - select a random, unbiased `getSelectionPercent` of the peers @@ -121,4 +127,3 @@ to use it in the PEX. See the [trustmetric](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-006-trust-metric.md) and [trustmetric useage](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-007-trust-metric-usage.md) architecture docs for more details. - diff --git a/docs/spec/software/abci.md b/docs/spec/software/abci.md index 613e181f0d9..6e17089f368 100644 --- a/docs/spec/software/abci.md +++ b/docs/spec/software/abci.md @@ -1,192 +1,3 @@ # Application Blockchain Interface (ABCI) -ABCI is the interface between Tendermint (a state-machine replication engine) -and an application (the actual state machine). - -The ABCI message types are defined in a [protobuf -file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). - -For full details on the ABCI message types and protocol, see the [ABCI -specification](https://github.com/tendermint/tendermint/blob/develop/docs/abci-spec.md). -Be sure to read the specification if you're trying to build an ABCI app! - -For additional details on server implementation, see the [ABCI -readme](https://github.com/tendermint/tendermint/blob/develop/abci/README.md). - -Here we provide some more details around the use of ABCI by Tendermint and -clarify common "gotchas". - -## ABCI connections - -Tendermint opens 3 ABCI connections to the app: one for Consensus, one for -Mempool, one for Queries. - -## Async vs Sync - -The main ABCI server (ie. non-GRPC) provides ordered asynchronous messages. -This is useful for DeliverTx and CheckTx, since it allows Tendermint to forward -transactions to the app before it's finished processing previous ones. - -Thus, DeliverTx and CheckTx messages are sent asycnhronously, while all other -messages are sent synchronously. - -## CheckTx and Commit - -It is typical to hold three distinct states in an ABCI app: CheckTxState, DeliverTxState, -QueryState. The QueryState contains the latest committed state for a block. -The CheckTxState and DeliverTxState may be updated concurrently with one another. -Before Commit is called, Tendermint locks and flushes the mempool so that no new changes will happen -to CheckTxState. When Commit completes, it unlocks the mempool. - -Thus, during Commit, it is safe to reset the QueryState and the CheckTxState to the latest DeliverTxState -(ie. the new state from executing all the txs in the block). - -Note, however, that it is not possible to send transactions to Tendermint during Commit - if your app -tries to send a `/broadcast_tx` to Tendermint during Commit, it will deadlock. - - -## EndBlock Validator Updates - -Updates to the Tendermint validator set can be made by returning `Validator` -objects in the `ResponseBeginBlock`: - -``` -message Validator { - bytes address = 1; - PubKey pub_key = 2; - int64 power = 3; -} - -message PubKey { - string type = 1; - bytes data = 2; -} - -``` - -The `pub_key` currently supports two types: - - `type = "ed25519" and `data = ` - - `type = "secp256k1" and `data = <33-byte OpenSSL compressed public key>` - -If the address is provided, it must match the address of the pubkey, as -specified [here](/docs/spec/blockchain/encoding.md#Addresses) - -(Note: In the v0.19 series, the `pub_key` is the [Amino encoded public -key](/docs/spec/blockchain/encoding.md#public-key-cryptography). -For Ed25519 pubkeys, the Amino prefix is always "1624DE6220". For example, the 32-byte Ed25519 pubkey -`76852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85` would be -Amino encoded as -`1624DE622076852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85`) - -(Note: In old versions of Tendermint (pre-v0.19.0), the pubkey is just prefixed with a -single type byte, so for ED25519 we'd have `pub_key = 0x1 | pub`) - -The `power` is the new voting power for the validator, with the -following rules: - -- power must be non-negative -- if power is 0, the validator must already exist, and will be removed from the - validator set -- if power is non-0: - - if the validator does not already exist, it will be added to the validator - set with the given power - - if the validator does already exist, its power will be adjusted to the given power - -## InitChain Validator Updates - -ResponseInitChain has the option to return a list of validators. -If the list is not empty, Tendermint will adopt it for the validator set. -This way the application can determine the initial validator set for the -blockchain. - -Note that if addressses are included in the returned validators, they must match -the address of the public key. - -ResponseInitChain also includes ConsensusParams, but these are presently -ignored. - -## Query - -Query is a generic message type with lots of flexibility to enable diverse sets -of queries from applications. Tendermint has no requirements from the Query -message for normal operation - that is, the ABCI app developer need not implement Query functionality if they do not wish too. -That said, Tendermint makes a number of queries to support some optional -features. These are: - -### Peer Filtering - -When Tendermint connects to a peer, it sends two queries to the ABCI application -using the following paths, with no additional data: - - - `/p2p/filter/addr/`, where `` denote the IP address and - the port of the connection - - `p2p/filter/id/`, where `` is the peer node ID (ie. the - pubkey.Address() for the peer's PubKey) - -If either of these queries return a non-zero ABCI code, Tendermint will refuse -to connect to the peer. - -## Info and the Handshake/Replay - -On startup, Tendermint calls Info on the Query connection to get the latest -committed state of the app. The app MUST return information consistent with the -last block it succesfully completed Commit for. - -If the app succesfully committed block H but not H+1, then `last_block_height = -H` and `last_block_app_hash = `. If the app -failed during the Commit of block H, then `last_block_height = H-1` and -`last_block_app_hash = `. - -We now distinguish three heights, and describe how Tendermint syncs itself with -the app. - -``` -storeBlockHeight = height of the last block Tendermint saw a commit for -stateBlockHeight = height of the last block for which Tendermint completed all - block processing and saved all ABCI results to disk -appBlockHeight = height of the last block for which ABCI app succesfully - completely Commit -``` - -Note we always have `storeBlockHeight >= stateBlockHeight` and `storeBlockHeight >= appBlockHeight` -Note also we never call Commit on an ABCI app twice for the same height. - -The procedure is as follows. - -First, some simeple start conditions: - -If `appBlockHeight == 0`, then call InitChain. - -If `storeBlockHeight == 0`, we're done. - -Now, some sanity checks: - -If `storeBlockHeight < appBlockHeight`, error -If `storeBlockHeight < stateBlockHeight`, panic -If `storeBlockHeight > stateBlockHeight+1`, panic - -Now, the meat: - -If `storeBlockHeight == stateBlockHeight && appBlockHeight < storeBlockHeight`, - replay all blocks in full from `appBlockHeight` to `storeBlockHeight`. - This happens if we completed processing the block, but the app forgot its height. - -If `storeBlockHeight == stateBlockHeight && appBlockHeight == storeBlockHeight`, we're done - This happens if we crashed at an opportune spot. - -If `storeBlockHeight == stateBlockHeight+1` - This happens if we started processing the block but didn't finish. - - If `appBlockHeight < stateBlockHeight` - replay all blocks in full from `appBlockHeight` to `storeBlockHeight-1`, - and replay the block at `storeBlockHeight` using the WAL. - This happens if the app forgot the last block it committed. - - If `appBlockHeight == stateBlockHeight`, - replay the last block (storeBlockHeight) in full. - This happens if we crashed before the app finished Commit - - If appBlockHeight == storeBlockHeight { - update the state using the saved ABCI responses but dont run the block against the real app. - This happens if we crashed after the app finished Commit but before Tendermint saved the state. +This page has [moved](../abci/apps.md). diff --git a/docs/spec/software/wal.md b/docs/spec/software/wal.md index a2e03137d39..1f5d712c5ed 100644 --- a/docs/spec/software/wal.md +++ b/docs/spec/software/wal.md @@ -28,6 +28,5 @@ WAL. Then it will go to precommit, and that time it will work because the private validator contains the `LastSignBytes` and then we’ll replay the precommit from the WAL. -Make sure to read about [WAL -corruption](https://tendermint.readthedocs.io/projects/tools/en/master/specification/corruption.html#wal-corruption) +Make sure to read about [WAL corruption](../../../tendermint-core/running-in-production.md#wal-corruption) and recovery strategies. diff --git a/docs/stop-words.txt b/docs/stop-words.txt new file mode 100644 index 00000000000..7f90eca320e --- /dev/null +++ b/docs/stop-words.txt @@ -0,0 +1,6 @@ +investor +invest +investing +token distribution +atom distribution +distribution of atoms diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md new file mode 100644 index 00000000000..88228a58165 --- /dev/null +++ b/docs/tendermint-core/README.md @@ -0,0 +1,4 @@ +# Overview + +See the side-bar for details on the various features of Tendermint Core. + diff --git a/docs/tendermint-core/block-structure.md b/docs/tendermint-core/block-structure.md index 803805529fa..587db0ff44a 100644 --- a/docs/tendermint-core/block-structure.md +++ b/docs/tendermint-core/block-structure.md @@ -7,200 +7,6 @@ nodes. This blockchain is accessible via various rpc endpoints, mainly `/blockchain?minHeight=_&maxHeight=_` to get a list of headers. But what exactly is stored in these blocks? -## Block +The [specification](../spec/blockchain/blockchain.md) contains a detailed description of each component - that's the best place to get started. -A -[Block](https://godoc.org/github.com/tendermint/tendermint/types#Block) -contains: - -- a [Header](#header) contains merkle hashes for various chain states -- the - [Data](https://godoc.org/github.com/tendermint/tendermint/types#Data) - is all transactions which are to be processed -- the [LastCommit](#commit) > 2/3 signatures for the last block - -The signatures returned along with block `H` are those validating block -`H-1`. This can be a little confusing, but we must also consider that -the `Header` also contains the `LastCommitHash`. It would be impossible -for a Header to include the commits that sign it, as it would cause an -infinite loop here. But when we get block `H`, we find -`Header.LastCommitHash`, which must match the hash of `LastCommit`. - -## Header - -The -[Header](https://godoc.org/github.com/tendermint/tendermint/types#Header) -contains lots of information (follow link for up-to-date info). Notably, -it maintains the `Height`, the `LastBlockID` (to make it a chain), and -hashes of the data, the app state, and the validator set. This is -important as the only item that is signed by the validators is the -`Header`, and all other data must be validated against one of the merkle -hashes in the `Header`. - -The `DataHash` can provide a nice check on the -[Data](https://godoc.org/github.com/tendermint/tendermint/types#Data) -returned in this same block. If you are subscribed to new blocks, via -tendermint RPC, in order to display or process the new transactions you -should at least validate that the `DataHash` is valid. If it is -important to verify autheniticity, you must wait for the `LastCommit` -from the next block to make sure the block header (including `DataHash`) -was properly signed. - -The `ValidatorHash` contains a hash of the current -[Validators](https://godoc.org/github.com/tendermint/tendermint/types#Validator). -Tracking all changes in the validator set is complex, but a client can -quickly compare this hash with the [hash of the currently known -validators](https://godoc.org/github.com/tendermint/tendermint/types#ValidatorSet.Hash) -to see if there have been changes. - -The `AppHash` serves as the basis for validating any merkle proofs that -come from the ABCI application. It represents the state of the actual -application, rather that the state of the blockchain itself. This means -it's necessary in order to perform any business logic, such as verifying -an account balance. - -**Note** After the transactions are committed to a block, they still -need to be processed in a separate step, which happens between the -blocks. If you find a given transaction in the block at height `H`, the -effects of running that transaction will be first visible in the -`AppHash` from the block header at height `H+1`. - -Like the `LastCommit` issue, this is a requirement of the immutability -of the block chain, as the application only applies transactions *after* -they are commited to the chain. - -## Commit - -The -[Commit](https://godoc.org/github.com/tendermint/tendermint/types#Commit) -contains a set of -[Votes](https://godoc.org/github.com/tendermint/tendermint/types#Vote) -that were made by the validator set to reach consensus on this block. -This is the key to the security in any PoS system, and actually no data -that cannot be traced back to a block header with a valid set of Votes -can be trusted. Thus, getting the Commit data and verifying the votes is -extremely important. - -As mentioned above, in order to find the `precommit votes` for block -header `H`, we need to query block `H+1`. Then we need to check the -votes, make sure they really are for that block, and properly formatted. -Much of this code is implemented in Go in the -[light-client](https://github.com/tendermint/light-client) package. If -you look at the code, you will notice that we need to provide the -`chainID` of the blockchain in order to properly calculate the votes. -This is to protect anyone from swapping votes between chains to fake (or -frame) a validator. Also note that this `chainID` is in the -`genesis.json` from *Tendermint*, not the `genesis.json` from the -basecoin app ([that is a different -chainID...](https://github.com/cosmos/cosmos-sdk/issues/32)). - -Once we have those votes, and we calculated the proper [sign -bytes](https://godoc.org/github.com/tendermint/tendermint/types#Vote.WriteSignBytes) -using the chainID and a [nice helper -function](https://godoc.org/github.com/tendermint/tendermint/types#SignBytes), -we can verify them. The light client is responsible for maintaining a -set of validators that we trust. Each vote only stores the validators -`Address`, as well as the `Signature`. Assuming we have a local copy of -the trusted validator set, we can look up the `Public Key` of the -validator given its `Address`, then verify that the `Signature` matches -the `SignBytes` and `Public Key`. Then we sum up the total voting power -of all validators, whose votes fulfilled all these stringent -requirements. If the total number of voting power for a single block is -greater than 2/3 of all voting power, then we can finally trust the -block header, the AppHash, and the proof we got from the ABCI -application. - -### Vote Sign Bytes - -The `sign-bytes` of a vote is produced by taking a -[stable-json](https://github.com/substack/json-stable-stringify)-like -deterministic JSON [wire](./wire-protocol.html) encoding of the vote -(excluding the `Signature` field), and wrapping it with -`{"chain_id":"my_chain","vote":...}`. - -For example, a precommit vote might have the following `sign-bytes`: - -``` -{"chain_id":"my_chain","vote":{"block_hash":"611801F57B4CE378DF1A3FFF1216656E89209A99","block_parts_header":{"hash":"B46697379DBE0774CC2C3B656083F07CA7E0F9CE","total":123},"height":1234,"round":1,"type":2}} -``` - -## Block Hash - -The [block -hash](https://godoc.org/github.com/tendermint/tendermint/types#Block.Hash) -is the [Simple Tree hash](./merkle.html#simple-tree-with-dictionaries) -of the fields of the block `Header` encoded as a list of `KVPair`s. - -## Transaction - -A transaction is any sequence of bytes. It is up to your ABCI -application to accept or reject transactions. - -## BlockID - -Many of these data structures refer to the -[BlockID](https://godoc.org/github.com/tendermint/tendermint/types#BlockID), -which is the `BlockHash` (hash of the block header, also referred to by -the next block) along with the `PartSetHeader`. The `PartSetHeader` is -explained below and is used internally to orchestrate the p2p -propogation. For clients, it is basically opaque bytes, but they must -match for all votes. - -## PartSetHeader - -The -[PartSetHeader](https://godoc.org/github.com/tendermint/tendermint/types#PartSetHeader) -contains the total number of pieces in a -[PartSet](https://godoc.org/github.com/tendermint/tendermint/types#PartSet), -and the Merkle root hash of those pieces. - -## PartSet - -PartSet is used to split a byteslice of data into parts (pieces) for -transmission. By splitting data into smaller parts and computing a -Merkle root hash on the list, you can verify that a part is legitimately -part of the complete data, and the part can be forwarded to other peers -before all the parts are known. In short, it's a fast way to securely -propagate a large chunk of data (like a block) over a gossip network. - -PartSet was inspired by the LibSwift project. - -Usage: - -``` -data := RandBytes(2 << 20) // Something large - -partSet := NewPartSetFromData(data) -partSet.Total() // Total number of 4KB parts -partSet.Count() // Equal to the Total, since we already have all the parts -partSet.Hash() // The Merkle root hash -partSet.BitArray() // A BitArray of partSet.Total() 1's - -header := partSet.Header() // Send this to the peer -header.Total // Total number of parts -header.Hash // The merkle root hash - -// Now we'll reconstruct the data from the parts -partSet2 := NewPartSetFromHeader(header) -partSet2.Total() // Same total as partSet.Total() -partSet2.Count() // Zero, since this PartSet doesn't have any parts yet. -partSet2.Hash() // Same hash as in partSet.Hash() -partSet2.BitArray() // A BitArray of partSet.Total() 0's - -// In a gossip network the parts would arrive in arbitrary order, perhaps -// in response to explicit requests for parts, or optimistically in response -// to the receiving peer's partSet.BitArray(). -for !partSet2.IsComplete() { - part := receivePartFromGossipNetwork() - added, err := partSet2.AddPart(part) - if err != nil { - // A wrong part, - // the merkle trail does not hash to partSet2.Hash() - } else if !added { - // A duplicate part already received - } -} - -data2, _ := ioutil.ReadAll(partSet2.GetReader()) -bytes.Equal(data, data2) // true -``` +To dig deeper, check out the [types package documentation](https://godoc.org/github.com/tendermint/tendermint/types). diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index ab2d7cc1729..8b3c3c22f40 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -30,11 +30,11 @@ moniker = "anonymous" # and verifying their commits fast_sync = true -# Database backend: leveldb | memdb +# Database backend: leveldb | memdb | cleveldb db_backend = "leveldb" # Database directory -db_path = "data" +db_dir = "data" # Output level for logging log_level = "state:info,*:error" @@ -77,6 +77,8 @@ grpc_laddr = "" # If you want to accept more significant number than the default, make sure # you increase your OS limits. # 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 grpc_max_open_connections = 900 # Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool @@ -87,7 +89,9 @@ unsafe = false # If you want to accept more significant number than the default, make sure # you increase your OS limits. # 0 - unlimited. -max_open_connections = 450 +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 ##### peer to peer configuration options ##### [p2p] @@ -108,13 +112,17 @@ upnp = false addr_book_file = "addrbook.json" # Set true for strict address routability rules +# Set false for private or local networks addr_book_strict = true -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = 100 +# Maximum number of inbound peers +max_num_inbound_peers = 40 -# Maximum number of peers to connect to -max_num_peers = 50 +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 + +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" # Maximum size of a message packet payload, in bytes max_packet_msg_payload_size = 1024 @@ -137,11 +145,17 @@ seed_mode = false # Comma separated list of peer IDs to keep private (will not be gossiped to other peers) private_peer_ids = "" +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = true + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + ##### mempool configuration options ##### [mempool] recheck = true -recheck_empty = true broadcast = true wal_dir = "data/mempool.wal" @@ -156,25 +170,24 @@ cache_size = 100000 wal_file = "data/cs.wal/wal" -# All timeouts are in milliseconds -timeout_propose = 3000 -timeout_propose_delta = 500 -timeout_prevote = 1000 -timeout_prevote_delta = 500 -timeout_precommit = 1000 -timeout_precommit_delta = 500 -timeout_commit = 1000 +timeout_propose = "3000ms" +timeout_propose_delta = "500ms" +timeout_prevote = "1000ms" +timeout_prevote_delta = "500ms" +timeout_precommit = "1000ms" +timeout_precommit_delta = "500ms" +timeout_commit = "1000ms" # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) skip_timeout_commit = false -# EmptyBlocks mode and possible interval between empty blocks in seconds +# EmptyBlocks mode and possible interval between empty blocks create_empty_blocks = true -create_empty_blocks_interval = 0 +create_empty_blocks_interval = "0s" -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = 100 -peer_query_maj23_sleep_duration = 2000 +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2000ms" ##### transactions indexer configuration options ##### [tx_index] @@ -186,16 +199,21 @@ peer_query_maj23_sleep_duration = 2000 # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). indexer = "kv" -# Comma-separated list of tags to index (by default the only tag is tx hash) +# Comma-separated list of tags to index (by default the only tag is "tx.hash") +# +# You can also index transactions by height by adding "tx.height" tag here. # # It's recommended to index only a subset of tags due to possible memory # bloat. This is, of course, depends on the indexer's DB and the volume of # transactions. index_tags = "" -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). +# When set to true, tells indexer to index all tags (predefined tags: +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# +# Note this may be not desirable (see the comment above). IndexTags has a +# precedence over IndexAllTags (i.e. when given both, IndexTags will be +# indexed). index_all_tags = false ##### instrumentation configuration options ##### @@ -214,4 +232,7 @@ prometheus_listen_addr = ":26660" # you increase your OS limits. # 0 - unlimited. max_open_connections = 3 + +# Instrumentation namespace +namespace = "tendermint" ``` diff --git a/docs/networks/fast-sync.md b/docs/tendermint-core/fast-sync.md similarity index 100% rename from docs/networks/fast-sync.md rename to docs/tendermint-core/fast-sync.md diff --git a/docs/tendermint-core/how-to-read-logs.md b/docs/tendermint-core/how-to-read-logs.md index 83dab38704a..54c2c8a325f 100644 --- a/docs/tendermint-core/how-to-read-logs.md +++ b/docs/tendermint-core/how-to-read-logs.md @@ -63,8 +63,8 @@ Next follows a standard block creation cycle, where we enter a new round, propose a block, receive more than 2/3 of prevotes, then precommits and finally have a chance to commit a block. For details, please refer to [Consensus -Overview](./introduction.md#consensus-overview) or [Byzantine Consensus -Algorithm](./spec/consensus). +Overview](../introduction/introduction.md#consensus-overview) or [Byzantine Consensus +Algorithm](../spec/consensus/consensus.md). ``` I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus @@ -112,7 +112,7 @@ I[10-04|13:54:30.410] Recheck txs module=mempoo Here is the list of modules you may encounter in Tendermint's log and a little overview what they do. -- `abci-client` As mentioned in [Application Development Guide](./app-development.md), Tendermint acts as an ABCI +- `abci-client` As mentioned in [Application Development Guide](../app-dev/app-development.md), Tendermint acts as an ABCI client with respect to the application and maintains 3 connections: mempool, consensus and query. The code used by Tendermint Core can be found [here](https://github.com/tendermint/tendermint/tree/develop/abci/client). @@ -127,15 +127,15 @@ little overview what they do. found [here](https://github.com/tendermint/tendermint/blob/master/types/events.go). You can subscribe to them by calling `subscribe` RPC method. Refer - to [RPC docs](./specification/rpc.md) for additional information. + to [RPC docs](./rpc.md) for additional information. - `mempool` Mempool module handles all incoming transactions, whenever they are coming from peers or the application. - `p2p` Provides an abstraction around peer-to-peer communication. For more details, please check out the [README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md). -- `rpc` [Tendermint's RPC](./specification/rpc.md). +- `rpc` [Tendermint's RPC](./rpc.md). - `rpc-server` RPC server. For implementation details, please read the - [README](https://github.com/tendermint/tendermint/blob/master/rpc/lib/README.md). + [doc.go](https://github.com/tendermint/tendermint/blob/master/rpc/lib/doc.go). - `state` Represents the latest state and execution submodule, which executes blocks against the application. - `types` A collection of the publicly exposed types and methods to diff --git a/docs/tendermint-core/light-client-protocol.md b/docs/tendermint-core/light-client-protocol.md index 6d905be32fa..f9ef4bd03cc 100644 --- a/docs/tendermint-core/light-client-protocol.md +++ b/docs/tendermint-core/light-client-protocol.md @@ -10,21 +10,21 @@ package](https://godoc.org/github.com/tendermint/tendermint/lite). ## Overview The objective of the light client protocol is to get a -[commit](./validators.md#committing-a-block) for a recent [block -hash](../spec/consensus/consensus.md.md#block-hash) where the commit includes a +commit for a recent block +hash where the commit includes a majority of signatures from the last known validator set. From there, all the application state is verifiable with [merkle -proofs](./merkle.md#iavl-tree). +proofs](../spec/blockchain/encoding.md#iavl-tree). ## Properties -- You get the full collateralized security benefits of Tendermint; No - need to wait for confirmations. -- You get the full speed benefits of Tendermint; transactions - commit instantly. -- You can get the most recent version of the application state - non-interactively (without committing anything to the blockchain). - For example, this means that you can get the most recent value of a - name from the name-registry without worrying about fork censorship - attacks, without posting a commit and waiting for confirmations. - It's fast, secure, and free! +- You get the full collateralized security benefits of Tendermint; No + need to wait for confirmations. +- You get the full speed benefits of Tendermint; transactions + commit instantly. +- You can get the most recent version of the application state + non-interactively (without committing anything to the blockchain). + For example, this means that you can get the most recent value of a + name from the name-registry without worrying about fork censorship + attacks, without posting a commit and waiting for confirmations. + It's fast, secure, and free! diff --git a/docs/tendermint-core/metrics.md b/docs/tendermint-core/metrics.md index b469c6890ea..ad6d4c765cd 100644 --- a/docs/tendermint-core/metrics.md +++ b/docs/tendermint-core/metrics.md @@ -8,35 +8,45 @@ This functionality is disabled by default. To enable the Prometheus metrics, set `instrumentation.prometheus=true` if your config file. Metrics will be served under `/metrics` on 26660 port by default. Listen address can be changed in the config file (see -`instrumentation.prometheus_listen_addr`). +`instrumentation.prometheus\_listen\_addr`). ## List of available metrics The following metrics are available: -``` -| Name | Type | Since | Description | -| --------------------------------------- | ------- | --------- | ----------------------------------------------------------------------------- | -| consensus_height | Gauge | 0.21.0 | Height of the chain | -| consensus_validators | Gauge | 0.21.0 | Number of validators | -| consensus_validators_power | Gauge | 0.21.0 | Total voting power of all validators | -| consensus_missing_validators | Gauge | 0.21.0 | Number of validators who did not sign | -| consensus_missing_validators_power | Gauge | 0.21.0 | Total voting power of the missing validators | -| consensus_byzantine_validators | Gauge | 0.21.0 | Number of validators who tried to double sign | -| consensus_byzantine_validators_power | Gauge | 0.21.0 | Total voting power of the byzantine validators | -| consensus_block_interval_seconds | Histogram | 0.21.0 | Time between this and last block (Block.Header.Time) in seconds | -| consensus_rounds | Gauge | 0.21.0 | Number of rounds | -| consensus_num_txs | Gauge | 0.21.0 | Number of transactions | -| mempool_size | Gauge | 0.21.0 | Number of uncommitted transactions | -| consensus_total_txs | Gauge | 0.21.0 | Total number of transactions committed | -| consensus_block_size_bytes | Gauge | 0.21.0 | Block size in bytes | -| p2p_peers | Gauge | 0.21.0 | Number of peers node's connected to | -``` +| **Name** | **Type** | **Since** | **Tags** | **Description** | +|-----------------------------------------|-----------|-----------|----------|-----------------------------------------------------------------| +| consensus\_height | Gauge | 0.21.0 | | Height of the chain | +| consensus\_validators | Gauge | 0.21.0 | | Number of validators | +| consensus\_validators\_power | Gauge | 0.21.0 | | Total voting power of all validators | +| consensus\_missing\_validators | Gauge | 0.21.0 | | Number of validators who did not sign | +| consensus\_missing\_validators\_power | Gauge | 0.21.0 | | Total voting power of the missing validators | +| consensus\_byzantine\_validators | Gauge | 0.21.0 | | Number of validators who tried to double sign | +| consensus\_byzantine\_validators\_power | Gauge | 0.21.0 | | Total voting power of the byzantine validators | +| consensus\_block\_interval\_seconds | Histogram | 0.21.0 | | Time between this and last block (Block.Header.Time) in seconds | +| consensus\_rounds | Gauge | 0.21.0 | | Number of rounds | +| consensus\_num\_txs | Gauge | 0.21.0 | | Number of transactions | +| consensus\_block\_parts | counter | on dev | peer\_id | number of blockparts transmitted by peer | +| consensus\_latest\_block\_height | gauge | on dev | | /status sync\_info number | +| consensus\_fast\_syncing | gauge | on dev | | either 0 (not fast syncing) or 1 (syncing) | +| consensus\_total\_txs | Gauge | 0.21.0 | | Total number of transactions committed | +| consensus\_block\_size\_bytes | Gauge | 0.21.0 | | Block size in bytes | +| p2p\_peers | Gauge | 0.21.0 | | Number of peers node's connected to | +| p2p\_peer\_receive\_bytes\_total | counter | on dev | peer\_id | number of bytes received from a given peer | +| p2p\_peer\_send\_bytes\_total | counter | on dev | peer\_id | number of bytes sent to a given peer | +| p2p\_peer\_pending\_send\_bytes | gauge | on dev | peer\_id | number of pending bytes to be sent to a given peer | +| p2p\_num\_txs | gauge | on dev | peer\_id | number of transactions submitted by each peer\_id | +| p2p\_pending\_send\_bytes | gauge | on dev | peer\_id | amount of data pending to be sent to peer | +| mempool\_size | Gauge | 0.21.0 | | Number of uncommitted transactions | +| mempool\_tx\_size\_bytes | histogram | on dev | | transaction sizes in bytes | +| mempool\_failed\_txs | counter | on dev | | number of failed transactions | +| mempool\_recheck\_times | counter | on dev | | number of transactions rechecked in the mempool | +| state\_block\_processing\_time | histogram | on dev | | time between BeginBlock and EndBlock in ms | ## Useful queries Percentage of missing + byzantine validators: ``` -((consensus_byzantine_validators_power + consensus_missing_validators_power) / consensus_validators_power) * 100 +((consensus\_byzantine\_validators\_power + consensus\_missing\_validators\_power) / consensus\_validators\_power) * 100 ``` diff --git a/docs/tendermint-core/rpc.md b/docs/tendermint-core/rpc.md index 2f3a72c747e..7ae59f0d315 100644 --- a/docs/tendermint-core/rpc.md +++ b/docs/tendermint-core/rpc.md @@ -1,3 +1,7 @@ # RPC -The RPC documentation is hosted [here](https://tendermint.github.io/slate) and is generated by the CI from our [Slate repo](https://github.com/tendermint/slate). To update the documentation, edit the relevant `godoc` comments in the [rpc/core directory](https://github.com/tendermint/tendermint/tree/develop/rpc/core). +The RPC documentation is hosted here: + +- https://tendermint.com/rpc/ + +To update the documentation, edit the relevant `godoc` comments in the [rpc/core directory](https://github.com/tendermint/tendermint/tree/develop/rpc/core). diff --git a/docs/tendermint-core/running-in-production.md b/docs/tendermint-core/running-in-production.md index 0947343207a..fb98626adcb 100644 --- a/docs/tendermint-core/running-in-production.md +++ b/docs/tendermint-core/running-in-production.md @@ -1,5 +1,41 @@ # Running in production +## Database + +By default, Tendermint uses the `syndtr/goleveldb` package for it's in-process +key-value database. Unfortunately, this implementation of LevelDB seems to suffer under heavy load (see +[#226](https://github.com/syndtr/goleveldb/issues/226)). It may be best to +install the real C-implementation of LevelDB and compile Tendermint to use +that using `make build_c`. See the [install instructions](../introduction/install.md) for details. + +Tendermint keeps multiple distinct LevelDB databases in the `$TMROOT/data`: + +- `blockstore.db`: Keeps the entire blockchain - stores blocks, + block commits, and block meta data, each indexed by height. Used to sync new + peers. +- `evidence.db`: Stores all verified evidence of misbehaviour. +- `state.db`: Stores the current blockchain state (ie. height, validators, + consensus params). Only grows if consensus params or validators change. Also + used to temporarily store intermediate results during block processing. +- `tx_index.db`: Indexes txs (and their results) by tx hash and by DeliverTx result tags. + +By default, Tendermint will only index txs by their hash, not by their DeliverTx +result tags. See [indexing transactions](../app-dev/indexing-transactions.md) for +details. + +There is no current strategy for pruning the databases. Consider reducing +block production by [controlling empty blocks](../tendermint-core/using-tendermint.md#no-empty-blocks) +or by increasing the `consensus.timeout_commit` param. Note both of these are +local settings and not enforced by the consensus. + +We're working on [state +syncing](https://github.com/tendermint/tendermint/issues/828), +which will enable history to be thrown away +and recent application state to be directly synced. We'll need to develop solutions +for archival nodes that allow queries on historical transactions and states. +The Cosmos project has had much success just dumping the latest state of a +blockchain to disk and starting a new chain from that state. + ## Logging Default logging level (`main:info,state:info,*:`) should suffice for @@ -11,6 +47,37 @@ you're trying to debug Tendermint or asked to provide logs with debug logging level, you can do so by running tendermint with `--log_level="*:debug"`. +## Write Ahead Logs (WAL) + +Tendermint uses write ahead logs for the consensus (`cs.wal`) and the mempool +(`mempool.wal`). Both WALs have a max size of 1GB and are automatically rotated. + +### Consensus WAL + +The `consensus.wal` is used to ensure we can recover from a crash at any point +in the consensus state machine. +It writes all consensus messages (timeouts, proposals, block part, or vote) +to a single file, flushing to disk before processing messages from its own +validator. Since Tendermint validators are expected to never sign a conflicting vote, the +WAL ensures we can always recover deterministically to the latest state of the consensus without +using the network or re-signing any consensus messages. + +If your `consensus.wal` is corrupted, see [below](#wal-corruption). + +### Mempool WAL + +The `mempool.wal` logs all incoming txs before running CheckTx, but is +otherwise not used in any programmatic way. It's just a kind of manual +safe guard. Note the mempool provides no durability guarantees - a tx sent to one or many nodes +may never make it into the blockchain if those nodes crash before being able to +propose it. Clients must monitor their txs by subscribing over websockets, +polling for them, or using `/broadcast_tx_commit`. In the worst case, txs can be +resent from the mempool WAL manually. + +For the above reasons, the `mempool.wal` is disabled by default. To enable, set +`mempool.wal_dir` to where you want the WAL to be located (e.g. +`data/mempool.wal`). + ## DOS Exposure and Mitigation Validators are supposed to setup [Sentry Node @@ -28,7 +95,8 @@ send & receive rate per connection (`SendRate`, `RecvRate`). ### RPC Endpoints returning multiple entries are limited by default to return 30 -elements (100 max). +elements (100 max). See the [RPC Documentation](https://tendermint.com/rpc/) +for more information. Rate-limiting and authentication are another key aspects to help protect against DOS attacks. While in the future we may implement these @@ -40,12 +108,12 @@ to achieve the same things. ## Debugging Tendermint If you ever have to debug Tendermint, the first thing you should -probably do is to check out the logs. See ["How to read -logs"](./how-to-read-logs.md), where we explain what certain log +probably do is to check out the logs. See [How to read +logs](./how-to-read-logs.md), where we explain what certain log statements mean. If, after skimming through the logs, things are not clear still, the -second TODO is to query the /status RPC endpoint. It provides the +next thing to try is query the /status RPC endpoint. It provides the necessary info: whenever the node is syncing or not, what height it is on, etc. @@ -80,7 +148,7 @@ Other useful endpoints include mentioned earlier `/status`, `/net_info` and We have a small tool, called `tm-monitor`, which outputs information from the endpoints above plus some statistics. The tool can be found -[here](https://github.com/tendermint/tools/tree/master/tm-monitor). +[here](https://github.com/tendermint/tendermint/tree/master/tools/tm-monitor). Tendermint also can report and serve Prometheus metrics. See [Metrics](./metrics.md). @@ -135,36 +203,37 @@ Tendermint, replay will fail with panic. Recovering from data corruption can be hard and time-consuming. Here are two approaches you can take: -1) Delete the WAL file and restart Tendermint. It will attempt to sync with other peers. -2) Try to repair the WAL file manually: +1. Delete the WAL file and restart Tendermint. It will attempt to sync with other peers. +2. Try to repair the WAL file manually: - 1. Create a backup of the corrupted WAL file: +1) Create a backup of the corrupted WAL file: ``` cp "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal_backup ``` - 2. Use `./scripts/wal2json` to create a human-readable version +2. Use `./scripts/wal2json` to create a human-readable version ``` ./scripts/wal2json/wal2json "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal ``` - 3. Search for a "CORRUPTED MESSAGE" line. - 4. By looking at the previous message and the message after the corrupted one - and looking at the logs, try to rebuild the message. If the consequent - messages are marked as corrupted too (this may happen if length header - got corrupted or some writes did not make it to the WAL ~ truncation), - then remove all the lines starting from the corrupted one and restart - Tendermint. +3. Search for a "CORRUPTED MESSAGE" line. +4. By looking at the previous message and the message after the corrupted one + and looking at the logs, try to rebuild the message. If the consequent + messages are marked as corrupted too (this may happen if length header + got corrupted or some writes did not make it to the WAL ~ truncation), + then remove all the lines starting from the corrupted one and restart + Tendermint. ``` $EDITOR /tmp/corrupted_wal ``` - 5. After editing, convert this file back into binary form by running: + +5. After editing, convert this file back into binary form by running: ``` -./scripts/json2wal/json2wal /tmp/corrupted_wal > "$TMHOME/data/cs.wal/wal" +./scripts/json2wal/json2wal /tmp/corrupted_wal $TMHOME/data/cs.wal/wal ``` ## Hardware @@ -206,14 +275,15 @@ operation systems (like Mac OS). ### Miscellaneous NOTE: if you are going to use Tendermint in a public domain, make sure -you read [hardware recommendations (see "4. -Hardware")](https://cosmos.network/validators) for a validator in the +you read [hardware recommendations](https://cosmos.network/validators) for a validator in the Cosmos network. ## Configuration parameters -- `p2p.flush_throttle_timeout` `p2p.max_packet_msg_payload_size` - `p2p.send_rate` `p2p.recv_rate` +- `p2p.flush_throttle_timeout` +- `p2p.max_packet_msg_payload_size` +- `p2p.send_rate` +- `p2p.recv_rate` If you are going to use Tendermint in a private domain and you have a private high-speed network among your peers, it makes sense to lower @@ -268,9 +338,9 @@ saving it to the address book. The address is considered as routable if the IP is [valid and within allowed ranges](https://github.com/tendermint/tendermint/blob/27bd1deabe4ba6a2d9b463b8f3e3f1e31b993e61/p2p/netaddress.go#L209). -This may not be the case for private networks, where your IP range is usually +This may not be the case for private or local networks, where your IP range is usually strictly limited and private. If that case, you need to set `addr_book_strict` -to `false` (turn off). +to `false` (turn it off). - `rpc.max_open_connections` diff --git a/docs/tendermint-core/secure-p2p.md b/docs/tendermint-core/secure-p2p.md index aad5eac4113..ee02f3f7c0c 100644 --- a/docs/tendermint-core/secure-p2p.md +++ b/docs/tendermint-core/secure-p2p.md @@ -3,50 +3,48 @@ The Tendermint p2p protocol uses an authenticated encryption scheme based on the [Station-to-Station Protocol](https://en.wikipedia.org/wiki/Station-to-Station_protocol). -The implementation uses -[golang's](https://godoc.org/golang.org/x/crypto/nacl/box) [nacl -box](http://nacl.cr.yp.to/box.html) for the actual authenticated -encryption algorithm. Each peer generates an ED25519 key-pair to use as a persistent (long-term) id. When two peers establish a TCP connection, they first each generate an -ephemeral ED25519 key-pair to use for this session, and send each other +ephemeral X25519 key-pair to use for this session, and send each other their respective ephemeral public keys. This happens in the clear. -They then each compute the shared secret. The shared secret is the -multiplication of the peer's ephemeral private key by the other peer's -ephemeral public key. The result is the same for both peers by the magic -of [elliptic -curves](https://en.wikipedia.org/wiki/Elliptic_curve_cryptography). The -shared secret is used as the symmetric key for the encryption algorithm. - -The two ephemeral public keys are sorted to establish a canonical order. -Then a 24-byte nonce is generated by concatenating the public keys and -hashing them with Ripemd160. Note Ripemd160 produces 20byte hashes, so -the nonce ends with four 0s. - -The nonce is used to seed the encryption - it is critical that the same -nonce never be used twice with the same private key. For convenience, -the last bit of the nonce is flipped, giving us two nonces: one for -encrypting our own messages, one for decrypting our peer's. Which ever -peer has the higher public key uses the "bit-flipped" nonce for -encryption. - -Now, a challenge is generated by concatenating the ephemeral public keys -and taking the SHA256 hash. - -Each peer signs the challenge with their persistent private key, and +They then each compute the shared secret, as done in a [diffie hellman +key exhange](https://en.wikipedia.org/wiki/Diffie%E2%80%93Hellman_key_exchange). +The shared secret is used as the symmetric key for the encryption algorithm. + +We then run [hkdf-sha256](https://en.wikipedia.org/wiki/HKDF) to expand the +shared secret to generate a symmetric key for sending data, +a symmetric key for receiving data, +a challenge to authenticate the other party. +One peer will send data with their sending key, and the other peer +would decode it using their own receiving key. +We must ensure that both parties don't try to use the same key as the sending +key, and the same key as the receiving key, as in that case nothing can be +decoded. +To ensure this, the peer with the canonically smaller ephemeral pubkey +uses the first key as their receiving key, and the second key as their sending key. +If the peer has the canonically larger ephemeral pubkey, they do the reverse. + +Each peer also keeps a received message counter and sent message counter, both +are initialized to zero. +All future communication is encrypted using chacha20poly1305. +The key used to send the message is the sending key, and the key used to decode +the message is the receiving key. +The nonce for chacha20poly1305 is the relevant message counter. +It is critical that the message counter is incremented every time you send a +message and every time you receive a message that decodes correctly. + +Each peer now signs the challenge with their persistent private key, and sends the other peer an AuthSigMsg, containing their persistent public key and the signature. On receiving an AuthSigMsg, the peer verifies the signature. The peers are now authenticated. -All future communications can now be encrypted using the shared secret -and the generated nonces, where each nonce is incremented by one each -time it is used. The communications maintain Perfect Forward Secrecy, as +The communication maintains Perfect Forward Secrecy, as the persistent key pair was not used for generating secrets - only for authenticating. @@ -63,11 +61,15 @@ are connected to at least one validator. Authenticated encryption is enabled by default. +## Specification + +The full p2p specification can be found [here](https://github.com/tendermint/tendermint/tree/master/docs/spec/p2p). + ## Additional Reading -- [Implementation](https://github.com/tendermint/tendermint/blob/64bae01d007b5bee0d0827ab53259ffd5910b4e6/p2p/conn/secret_connection.go#L47) -- [Original STS paper by Whitfield Diffie, Paul C. van Oorschot and - Michael J. - Wiener](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.216.6107&rep=rep1&type=pdf) -- [Further work on secret - handshakes](https://dominictarr.github.io/secret-handshake-paper/shs.pdf) +- [Implementation](https://github.com/tendermint/tendermint/blob/64bae01d007b5bee0d0827ab53259ffd5910b4e6/p2p/conn/secret_connection.go#L47) +- [Original STS paper by Whitfield Diffie, Paul C. van Oorschot and + Michael J. + Wiener](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.216.6107&rep=rep1&type=pdf) +- [Further work on secret + handshakes](https://dominictarr.github.io/secret-handshake-paper/shs.pdf) diff --git a/docs/tendermint-core/using-tendermint.md b/docs/tendermint-core/using-tendermint.md index 11949c79847..5ee18361f64 100644 --- a/docs/tendermint-core/using-tendermint.md +++ b/docs/tendermint-core/using-tendermint.md @@ -39,20 +39,22 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g #### Fields -- `genesis_time`: Official time of blockchain start. -- `chain_id`: ID of the blockchain. This must be unique for - every blockchain. If your testnet blockchains do not have unique - chain IDs, you will have a bad time. -- `validators`: -- `pub_key`: The first element specifies the `pub_key` type. 1 - == Ed25519. The second element are the pubkey bytes. -- `power`: The validator's voting power. -- `name`: Name of the validator (optional). -- `app_hash`: The expected application hash (as returned by the - `ResponseInfo` ABCI message) upon genesis. If the app's hash does - not match, Tendermint will panic. -- `app_state`: The application state (e.g. initial distribution - of tokens). +- `genesis_time`: Official time of blockchain start. +- `chain_id`: ID of the blockchain. This must be unique for + every blockchain. If your testnet blockchains do not have unique + chain IDs, you will have a bad time. The ChainID must be less than 50 symbols. +- `validators`: List of initial validators. Note this may be overridden entirely by the + application, and may be left empty to make explicit that the + application will initialize the validator set with ResponseInitChain. + - `pub_key`: The first element specifies the `pub_key` type. 1 + == Ed25519. The second element are the pubkey bytes. + - `power`: The validator's voting power. + - `name`: Name of the validator (optional). +- `app_hash`: The expected application hash (as returned by the + `ResponseInfo` ABCI message) upon genesis. If the app's hash does + not match, Tendermint will panic. +- `app_state`: The application state (e.g. initial distribution + of tokens). #### Sample genesis.json @@ -93,8 +95,7 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g "power": "1", "name": "node3" } - ], - "app_hash": "" + ] } ``` @@ -151,10 +152,14 @@ and the `latest_app_hash` in particular: curl http://localhost:26657/status | json_pp | grep latest_app_hash ``` -Visit http://localhost:26657> in your browser to see the list of other +Visit http://localhost:26657 in your browser to see the list of other endpoints. Some take no arguments (like `/status`), while others specify the argument name and use `_` as a placeholder. +::: tip +Find the RPC Documentation [here](https://tendermint.com/rpc/) +::: + ### Formatting The following nuances when sending/formatting transactions should be @@ -208,23 +213,19 @@ Note that raw hex cannot be used in `POST` transactions. **WARNING: UNSAFE** Only do this in development and only if you can afford to lose all blockchain data! -To reset a blockchain, stop the node, remove the `~/.tendermint/data` -directory and run +To reset a blockchain, stop the node and run: ``` -tendermint unsafe_reset_priv_validator +tendermint unsafe_reset_all ``` -This final step is necessary to reset the `priv_validator.json`, which -otherwise prevents you from making conflicting votes in the consensus -(something that could get you in trouble if you do it on a real -blockchain). If you don't reset the `priv_validator.json`, your fresh -new blockchain will not make any blocks. +This command will remove the data directory and reset private validator and +address book files. ## Configuration Tendermint uses a `config.toml` for configuration. For details, see [the -config specification](./specification/configuration.md). +config specification](./configuration.md). Notable options include the socket address of the application (`proxy_app`), the listening address of the Tendermint peer @@ -235,8 +236,7 @@ Some fields from the config file can be overwritten with flags. ## No Empty Blocks -This much requested feature was implemented in version 0.10.3. While the -default behaviour of `tendermint` is still to create blocks +While the default behaviour of `tendermint` is still to create blocks approximately once per second, it is possible to disable empty blocks or set a block creation interval. In the former case, blocks will be created when there are new transactions or when the AppHash changes. @@ -305,6 +305,12 @@ can take on the order of a second. For a quick result, use `broadcast_tx_sync`, but the transaction will not be committed until later, and by that point its effect on the state may change. +Note the mempool does not provide strong guarantees - just because a tx passed +CheckTx (ie. was accepted into the mempool), doesn't mean it will be committed, +as nodes with the tx in their mempool may crash before they get to propose. +For more information, see the [mempool +write-ahead-log](../tendermint-core/running-in-production.md#mempool-wal) + ## Tendermint Networks When `tendermint init` is run, both a `genesis.json` and @@ -365,10 +371,7 @@ case, the genesis file contains the public key of our root directory will be able to make progress. Voting power uses an int64 but must be positive, thus the range is: 0 through 9223372036854775807. Because of how the current proposer selection algorithm works, we do not -recommend having voting powers greater than 10\^12 (ie. 1 trillion) (see -[Proposals section of Byzantine Consensus -Algorithm](./specification/byzantine-consensus-algorithm.md#proposals) -for details). +recommend having voting powers greater than 10\^12 (ie. 1 trillion). If we want to add more nodes to the network, we have two choices: we can add a new validator node, who will also participate in the consensus by @@ -520,14 +523,14 @@ failing, you need at least four validator nodes (e.g., 2/3). Updating validators in a live network is supported but must be explicitly programmed by the application developer. See the [application -developers guide](./app-development.md) for more details. +developers guide](../app-dev/app-development.md) for more details. ### Local Network To run a network locally, say on a single machine, you must change the `_laddr` fields in the `config.toml` (or using the flags) so that the listening addresses of the various sockets don't conflict. Additionally, -you must set `addrbook_strict=false` in the `config.toml`, otherwise +you must set `addr_book_strict=false` in the `config.toml`, otherwise Tendermint's p2p library will deny making connections to peers with the same IP address. diff --git a/docs/tendermint-core/validators.md b/docs/tendermint-core/validators.md index 0c1d7d89a7f..307d267c659 100644 --- a/docs/tendermint-core/validators.md +++ b/docs/tendermint-core/validators.md @@ -2,7 +2,7 @@ Validators are responsible for committing new blocks in the blockchain. These validators participate in the consensus protocol by broadcasting -*votes* which contain cryptographic signatures signed by each +_votes_ which contain cryptographic signatures signed by each validator's private key. Some Proof-of-Stake consensus algorithms aim to create a "completely" @@ -22,18 +22,18 @@ Validators have a cryptographic key-pair and an associated amount of There are two ways to become validator. -1. They can be pre-established in the [genesis state](../../tendermint-core/using-tendermint.md#genesis) +1. They can be pre-established in the [genesis state](./using-tendermint.md#genesis) 2. The ABCI app responds to the EndBlock message with changes to the existing validator set. ## Committing a Block -*+2/3 is short for "more than 2/3"* +_+2/3 is short for "more than 2/3"_ A block is committed when +2/3 of the validator set sign [precommit votes](../spec/blockchain/blockchain.md#vote) for that block at the same `round`. The +2/3 set of precommit votes is called a -[*commit*](../spec/blockchain/blockchain.md#commit). While any +2/3 set of +[_commit_](../spec/blockchain/blockchain.md#commit). While any +2/3 set of precommits for the same block at the same height&round can serve as validation, the canonical commit is included in the next block (see -[LastCommit](../spec/blockchain/blockchain.md#last-commit)). +[LastCommit](../spec/blockchain/blockchain.md#lastcommit)). diff --git a/docs/tools/README.md b/docs/tools/README.md new file mode 100644 index 00000000000..ef1ae7c22d8 --- /dev/null +++ b/docs/tools/README.md @@ -0,0 +1,4 @@ +# Overview + +Tendermint comes with some tools for [benchmarking](./benchmarking.md) +and [monitoring](./monitoring.md). diff --git a/docs/tools/benchmarking.md b/docs/tools/benchmarking.md index 20c368e2954..e17c2856429 100644 --- a/docs/tools/benchmarking.md +++ b/docs/tools/benchmarking.md @@ -20,10 +20,10 @@ Blocks/sec 0.818 0.386 1 9 ## Quick Start -[Install Tendermint](../introduction/install) +[Install Tendermint](../introduction/install.md) This currently is setup to work on tendermint's develop branch. Please ensure you are on that. (If not, update `tendermint` and `tmlibs` in gopkg.toml to use - the master branch.) +the master branch.) then run: @@ -32,7 +32,7 @@ tendermint init tendermint node --proxy_app=kvstore ``` -``` +``` tm-bench localhost:26657 ``` diff --git a/docs/tools/monitoring.md b/docs/tools/monitoring.md index 5cc2ad3b132..c0fa94c0936 100644 --- a/docs/tools/monitoring.md +++ b/docs/tools/monitoring.md @@ -3,7 +3,7 @@ Tendermint blockchain monitoring tool; watches over one or more nodes, collecting and providing various statistics to the user: -- https://github.com/tendermint/tools/tree/master/tm-monitor +- https://github.com/tendermint/tendermint/tree/master/tools/tm-monitor ## Quick Start @@ -26,27 +26,28 @@ use `kvstore`: docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm tendermint/tendermint node --proxy_app=kvstore ``` + ``` docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 ``` ### Using Binaries -[Install Tendermint](https://github.com/tendermint/tendermint#install) +[Install Tendermint](../introduction/install.md). -then run: +Start a Tendermint node: ``` tendermint init tendermint node --proxy_app=kvstore ``` +In another window, run the monitor: + ``` tm-monitor localhost:26657 ``` -with the last command being in a seperate window. - ## Usage ``` @@ -71,7 +72,7 @@ Flags: Run `tm-monitor` and visit http://localhost:26670 You should see the list of the available RPC endpoints: -``` +``` http://localhost:26670/status http://localhost:26670/status/network http://localhost:26670/monitor?endpoint=_ diff --git a/docs/yarn.lock b/docs/yarn.lock index 5591b8fa04b..4f453ed47d2 100644 --- a/docs/yarn.lock +++ b/docs/yarn.lock @@ -16,27 +16,27 @@ version "0.7.0" resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.7.0.tgz#9a06f4f137ee84d7df0460c1fdb1135ffa6c50fd" -"@textlint/ast-node-types@^4.0.2": - version "4.0.2" - resolved "https://registry.yarnpkg.com/@textlint/ast-node-types/-/ast-node-types-4.0.2.tgz#5386a15187798efb48eb71fa1cbf6ca2770b206a" +"@textlint/ast-node-types@^4.0.2", "@textlint/ast-node-types@^4.0.3": + version "4.0.3" + resolved "https://registry.yarnpkg.com/@textlint/ast-node-types/-/ast-node-types-4.0.3.tgz#b51c87bb86022323f764fbdc976b173f19261cc5" -"@textlint/ast-traverse@^2.0.8": - version "2.0.8" - resolved "https://registry.yarnpkg.com/@textlint/ast-traverse/-/ast-traverse-2.0.8.tgz#c180fe23dc3b8a6aa68539be70efb4ff17c38a3a" +"@textlint/ast-traverse@^2.0.8", "@textlint/ast-traverse@^2.0.9": + version "2.0.9" + resolved "https://registry.yarnpkg.com/@textlint/ast-traverse/-/ast-traverse-2.0.9.tgz#4bf427cf01b7195013e75d27540a77ad68c363d9" dependencies: - "@textlint/ast-node-types" "^4.0.2" + "@textlint/ast-node-types" "^4.0.3" -"@textlint/feature-flag@^3.0.4": - version "3.0.4" - resolved "https://registry.yarnpkg.com/@textlint/feature-flag/-/feature-flag-3.0.4.tgz#4290a4bb53da28c1f5f1d5ce0f4ae6630ab939ea" +"@textlint/feature-flag@^3.0.4", "@textlint/feature-flag@^3.0.5": + version "3.0.5" + resolved "https://registry.yarnpkg.com/@textlint/feature-flag/-/feature-flag-3.0.5.tgz#3783e0f2661053d2a74fdad775993395a2d530b4" dependencies: map-like "^2.0.0" "@textlint/fixer-formatter@^3.0.7": - version "3.0.7" - resolved "https://registry.yarnpkg.com/@textlint/fixer-formatter/-/fixer-formatter-3.0.7.tgz#4ef15d5e606e2d32b89257afd382ed9dbb218846" + version "3.0.8" + resolved "https://registry.yarnpkg.com/@textlint/fixer-formatter/-/fixer-formatter-3.0.8.tgz#90ef804c60b9e694c8c048a06febbf1f331abd49" dependencies: - "@textlint/kernel" "^2.0.9" + "@textlint/kernel" "^3.0.0" chalk "^1.1.3" debug "^2.1.0" diff "^2.2.2" @@ -60,13 +60,28 @@ object-assign "^4.1.1" structured-source "^3.0.2" +"@textlint/kernel@^3.0.0": + version "3.0.0" + resolved "https://registry.yarnpkg.com/@textlint/kernel/-/kernel-3.0.0.tgz#ba10962acff64f17b9e5fce8089a40f1f8880dcd" + dependencies: + "@textlint/ast-node-types" "^4.0.3" + "@textlint/ast-traverse" "^2.0.9" + "@textlint/feature-flag" "^3.0.5" + "@types/bluebird" "^3.5.18" + bluebird "^3.5.1" + debug "^2.6.6" + deep-equal "^1.0.1" + map-like "^2.0.0" + object-assign "^4.1.1" + structured-source "^3.0.2" + "@textlint/linter-formatter@^3.0.7": - version "3.0.7" - resolved "https://registry.yarnpkg.com/@textlint/linter-formatter/-/linter-formatter-3.0.7.tgz#66716cac94c047d94627a7e6af427a0d199eda7c" + version "3.0.8" + resolved "https://registry.yarnpkg.com/@textlint/linter-formatter/-/linter-formatter-3.0.8.tgz#030aa03ff3d85dda94ca9fa9e6bf824f9c1cb7ef" dependencies: "@azu/format-text" "^1.0.1" "@azu/style-format" "^1.0.0" - "@textlint/kernel" "^2.0.9" + "@textlint/kernel" "^3.0.0" chalk "^1.0.0" concat-stream "^1.5.1" js-yaml "^3.2.4" @@ -81,10 +96,10 @@ xml-escape "^1.0.0" "@textlint/markdown-to-ast@^6.0.8": - version "6.0.8" - resolved "https://registry.yarnpkg.com/@textlint/markdown-to-ast/-/markdown-to-ast-6.0.8.tgz#baa509c42f842b4dba36ad91547a288c063396b8" + version "6.0.9" + resolved "https://registry.yarnpkg.com/@textlint/markdown-to-ast/-/markdown-to-ast-6.0.9.tgz#e7c89e5ad15d17dcd8e5a62758358936827658fa" dependencies: - "@textlint/ast-node-types" "^4.0.2" + "@textlint/ast-node-types" "^4.0.3" debug "^2.1.3" remark-frontmatter "^1.2.0" remark-parse "^5.0.0" @@ -93,10 +108,10 @@ unified "^6.1.6" "@textlint/text-to-ast@^3.0.8": - version "3.0.8" - resolved "https://registry.yarnpkg.com/@textlint/text-to-ast/-/text-to-ast-3.0.8.tgz#6211977f369cec484447867f10dc155120f4c082" + version "3.0.9" + resolved "https://registry.yarnpkg.com/@textlint/text-to-ast/-/text-to-ast-3.0.9.tgz#dcb63f09cc79ea2096fc823c3b6cd07c79a060b5" dependencies: - "@textlint/ast-node-types" "^4.0.2" + "@textlint/ast-node-types" "^4.0.3" "@textlint/textlint-plugin-markdown@^4.0.10": version "4.0.10" @@ -111,13 +126,17 @@ "@textlint/text-to-ast" "^3.0.8" "@types/bluebird@^3.5.18": - version "3.5.21" - resolved "https://registry.yarnpkg.com/@types/bluebird/-/bluebird-3.5.21.tgz#567615589cc913e84a28ecf9edb031732bdf2634" + version "3.5.23" + resolved "https://registry.yarnpkg.com/@types/bluebird/-/bluebird-3.5.23.tgz#e805da976b76892b2b2e50eec29e84914c730670" abbrev@1: version "1.1.1" resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" +adverb-where@0.0.9: + version "0.0.9" + resolved "https://registry.yarnpkg.com/adverb-where/-/adverb-where-0.0.9.tgz#09c5cddd8d503b9fe5f76e0b8dc5c70a8f193e34" + aggregate-error@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-1.0.0.tgz#888344dad0220a72e3af50906117f48771925fac" @@ -220,8 +239,10 @@ arrify@^1.0.0: resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" asn1@~0.2.3: - version "0.2.3" - resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86" + version "0.2.4" + resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.4.tgz#8d2475dfab553bb33e77b54e59e880bb8ce23136" + dependencies: + safer-buffer "~2.1.0" assert-plus@1.0.0, assert-plus@^1.0.0: version "1.0.0" @@ -240,8 +261,8 @@ aws-sign2@~0.7.0: resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" aws4@^1.6.0: - version "1.7.0" - resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.7.0.tgz#d4d0e9b9dbfca77bf08eeb0a8a471550fe39e289" + version "1.8.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.8.0.tgz#f0e003d9ca9e7f59c7a508945d7b2ef9a04a542f" bail@^1.0.0: version "1.0.3" @@ -285,8 +306,8 @@ braces@^1.8.2: repeat-element "^1.1.2" buffer-from@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.0.tgz#87fcaa3a298358e0ade6e442cfce840740d1ad04" + version "1.1.1" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef" builtin-modules@^1.0.0: version "1.1.1" @@ -460,7 +481,7 @@ dashdash@^1.12.0: dependencies: assert-plus "^1.0.0" -debug@^2.1.0, debug@^2.1.2, debug@^2.1.3, debug@^2.2.0, debug@^2.6.6: +debug@^2.1.0, debug@^2.1.2, debug@^2.1.3, debug@^2.6.6: version "2.6.9" resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" dependencies: @@ -546,11 +567,16 @@ duplexer3@^0.1.4: version "0.1.4" resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" +e-prime@^0.10.2: + version "0.10.2" + resolved "https://registry.yarnpkg.com/e-prime/-/e-prime-0.10.2.tgz#ea9375eb985636de88013c7a9fb129ad9e15eff8" + ecc-jsbn@~0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505" + version "0.1.2" + resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" dependencies: jsbn "~0.1.0" + safer-buffer "^2.1.0" error-ex@^1.2.0, error-ex@^1.3.1: version "1.3.2" @@ -581,8 +607,8 @@ escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" esprima@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.0.tgz#4499eddcd1110e0b218bacf2fa7f7f59f55ca804" + version "4.0.1" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" expand-brackets@^0.1.4: version "0.1.5" @@ -597,8 +623,8 @@ expand-range@^1.8.1: fill-range "^2.1.0" extend@^3.0.0, extend@~3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444" + version "3.0.2" + resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" extglob@^0.3.1: version "0.3.2" @@ -863,6 +889,10 @@ has-symbol-support-x@^1.4.1: version "1.4.2" resolved "https://registry.yarnpkg.com/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz#1409f98bc00247da45da67cee0a36f282ff26455" +has-symbols@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.0.tgz#ba1a8f1af2a0fc39650f5c850367704122063b44" + has-to-string-tag-x@^1.2.0: version "1.4.1" resolved "https://registry.yarnpkg.com/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz#a045ab383d7b4b2012a00148ab0aa5f290044d4d" @@ -880,8 +910,8 @@ has@^1.0.1: function-bind "^1.1.1" hosted-git-info@^2.1.4: - version "2.6.1" - resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.6.1.tgz#6e4cee78b01bb849dcf93527708c69fdbee410df" + version "2.7.1" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.7.1.tgz#97f236977bd6e125408930ff6de3eec6281ec047" http-cache-semantics@3.8.1: version "3.8.1" @@ -1156,8 +1186,8 @@ isarray@1.0.0, isarray@~1.0.0: resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" isemail@^3.1.2: - version "3.1.2" - resolved "https://registry.yarnpkg.com/isemail/-/isemail-3.1.2.tgz#937cf919002077999a73ea8b1951d590e84e01dd" + version "3.1.3" + resolved "https://registry.yarnpkg.com/isemail/-/isemail-3.1.3.tgz#64f37fc113579ea12523165c3ebe3a71a56ce571" dependencies: punycode "2.x.x" @@ -1178,7 +1208,7 @@ isurl@^1.0.0-alpha5: has-to-string-tag-x "^1.2.0" is-object "^1.0.1" -js-yaml@^3.2.4, js-yaml@^3.6.1: +js-yaml@^3.12.0, js-yaml@^3.2.4, js-yaml@^3.6.1: version "3.12.0" resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.12.0.tgz#eaed656ec8344f10f527c6bfa1b6e2244de167d1" dependencies: @@ -1215,10 +1245,16 @@ json-stringify-safe@~5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" -json5@^0.5.0, json5@^0.5.1: +json5@^0.5.1: version "0.5.1" resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821" +json5@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.1.tgz#779fb0018604fa854eacbf6252180d83543e3dbe" + dependencies: + minimist "^1.2.0" + jsonify@~0.0.0: version "0.0.0" resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73" @@ -1297,7 +1333,7 @@ locate-path@^2.0.0: p-locate "^2.0.0" path-exists "^3.0.0" -lodash@^4.0.0: +lodash@^4.0.0, lodash@^4.17.4: version "4.17.10" resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.10.tgz#1b7793cf7259ea38fb3661d4d38b3260af8ae4e7" @@ -1372,19 +1408,19 @@ micromatch@^2.1.5: parse-glob "^3.0.4" regex-cache "^0.4.2" -mime-db@~1.33.0: - version "1.33.0" - resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.33.0.tgz#a3492050a5cb9b63450541e39d9788d2272783db" +mime-db@~1.35.0: + version "1.35.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.35.0.tgz#0569d657466491283709663ad379a99b90d9ab47" mime-types@^2.1.12, mime-types@~2.1.17: - version "2.1.18" - resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.18.tgz#6f323f60a83d11146f831ff11fd66e2fe5503bb8" + version "2.1.19" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.19.tgz#71e464537a7ef81c15f2db9d97e913fc0ff606f0" dependencies: - mime-db "~1.33.0" + mime-db "~1.35.0" mimic-response@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.0.tgz#df3d3652a73fded6b9b0b24146e6fd052353458e" + version "1.0.1" + resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.1.tgz#4923538878eef42063cb8a3e3b0798781487ab1b" minimatch@^3.0.2, minimatch@^3.0.4: version "3.0.4" @@ -1431,7 +1467,7 @@ nan@^2.9.2: version "2.10.0" resolved "https://registry.yarnpkg.com/nan/-/nan-2.10.0.tgz#96d0cd610ebd58d4b4de9cc0c6828cda99c7548f" -needle@^2.2.0: +needle@^2.2.1: version "2.2.1" resolved "https://registry.yarnpkg.com/needle/-/needle-2.2.1.tgz#b5e325bd3aae8c2678902fa296f729455d1d3a7d" dependencies: @@ -1439,13 +1475,21 @@ needle@^2.2.0: iconv-lite "^0.4.4" sax "^1.2.4" +nlcst-to-string@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/nlcst-to-string/-/nlcst-to-string-2.0.2.tgz#7125af4d4d369850c697192a658f01f36af9937b" + +no-cliches@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/no-cliches/-/no-cliches-0.1.0.tgz#f4eb81a551fecde813f8c611e35e64a5118dc38c" + node-pre-gyp@^0.10.0: - version "0.10.2" - resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.10.2.tgz#e8945c20ef6795a20aac2b44f036eb13cf5146e3" + version "0.10.3" + resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.10.3.tgz#3070040716afdc778747b61b6887bf78880b80fc" dependencies: detect-libc "^1.0.2" mkdirp "^0.5.1" - needle "^2.2.0" + needle "^2.2.1" nopt "^4.0.1" npm-packlist "^1.1.6" npmlog "^4.0.2" @@ -1489,8 +1533,8 @@ npm-bundled@^1.0.1: resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.0.3.tgz#7e71703d973af3370a9591bafe3a63aca0be2308" npm-packlist@^1.1.6: - version "1.1.10" - resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.1.10.tgz#1039db9e985727e464df066f4cf0ab6ef85c398a" + version "1.1.11" + resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.1.11.tgz#84e8c683cbe7867d34b1d357d893ce29e28a02de" dependencies: ignore-walk "^3.0.1" npm-bundled "^1.0.1" @@ -1524,10 +1568,19 @@ object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: version "4.1.1" resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" -object-keys@^1.0.8, object-keys@^1.0.9: +object-keys@^1.0.11, object-keys@^1.0.12, object-keys@^1.0.8: version "1.0.12" resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.0.12.tgz#09c53855377575310cca62f55bb334abff7b3ed2" +object.assign@^4.0.4: + version "4.1.0" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.0.tgz#968bf1100d7956bb3ca086f006f846b3bc4008da" + dependencies: + define-properties "^1.1.2" + function-bind "^1.1.1" + has-symbols "^1.0.0" + object-keys "^1.0.11" + object.omit@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa" @@ -1652,11 +1705,9 @@ parse-json@^4.0.0: error-ex "^1.3.1" json-parse-better-errors "^1.0.1" -path-exists@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b" - dependencies: - pinkie-promise "^2.0.0" +passive-voice@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/passive-voice/-/passive-voice-0.1.0.tgz#16ff91ae40ba0e92c43e671763fdc842a70270b1" path-exists@^3.0.0: version "3.0.0" @@ -1731,8 +1782,8 @@ preserve@^0.2.0: resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b" prettier@^1.13.7: - version "1.13.7" - resolved "https://registry.yarnpkg.com/prettier/-/prettier-1.13.7.tgz#850f3b8af784a49a6ea2d2eaa7ed1428a34b7281" + version "1.14.2" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-1.14.2.tgz#0ac1c6e1a90baa22a62925f41963c841983282f9" process-nextick-args@~2.0.0: version "2.0.0" @@ -1768,24 +1819,24 @@ query-string@^5.0.1: strict-uri-encode "^1.0.0" randomatic@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-3.0.0.tgz#d35490030eb4f7578de292ce6dfb04a91a128923" + version "3.1.0" + resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-3.1.0.tgz#36f2ca708e9e567f5ed2ec01949026d50aa10116" dependencies: is-number "^4.0.0" kind-of "^6.0.0" math-random "^1.0.1" rc-config-loader@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/rc-config-loader/-/rc-config-loader-2.0.1.tgz#8c8452f59bdd10d448a67762dccf7c1b247db860" + version "2.0.2" + resolved "https://registry.yarnpkg.com/rc-config-loader/-/rc-config-loader-2.0.2.tgz#46eb2f98fb5b2aa7b1119d66c0554de5133f1bc1" dependencies: - debug "^2.2.0" - js-yaml "^3.6.1" - json5 "^0.5.0" + debug "^3.1.0" + js-yaml "^3.12.0" + json5 "^1.0.1" object-assign "^4.1.0" - object-keys "^1.0.9" - path-exists "^2.1.0" - require-from-string "^2.0.1" + object-keys "^1.0.12" + path-exists "^3.0.0" + require-from-string "^2.0.2" rc@^1.1.0, rc@^1.2.7: version "1.2.8" @@ -1871,6 +1922,15 @@ remark-lint-no-dead-urls@^0.3.0: unified-lint-rule "^1.0.1" unist-util-visit "^1.1.3" +remark-lint-write-good@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/remark-lint-write-good/-/remark-lint-write-good-1.0.3.tgz#daa4cf122212cfa06e437702ef7b43a12875bd5d" + dependencies: + nlcst-to-string "^2.0.0" + unified-lint-rule "^1.0.1" + unist-util-visit "^1.1.1" + write-good "^0.11.1" + remark-parse@^5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-5.0.0.tgz#4c077f9e499044d1d5c13f80d7a98cf7b9285d95" @@ -1959,7 +2019,7 @@ request@^2.87.0: tunnel-agent "^0.6.0" uuid "^3.1.0" -require-from-string@^2.0.1: +require-from-string@^2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909" @@ -1983,7 +2043,7 @@ safe-buffer@^5.0.1, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@~5.1.0, version "5.1.2" resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" -"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2: +"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: version "2.1.2" resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" @@ -2047,6 +2107,10 @@ spdx-license-ids@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.0.tgz#7a7cd28470cc6d3a1cfe6d66886f6bc430d3ac87" +split-lines@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/split-lines/-/split-lines-1.1.0.tgz#3abba8f598614142f9db8d27ab6ab875662a1e09" + sprintf-js@~1.0.2: version "1.0.3" resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" @@ -2172,8 +2236,8 @@ table@^3.7.8: string-width "^2.0.0" tar@^4: - version "4.4.4" - resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.4.tgz#ec8409fae9f665a4355cc3b4087d0820232bb8cd" + version "4.4.6" + resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.6.tgz#63110f09c00b4e60ac8bcfe1bf3c8660235fbc9b" dependencies: chownr "^1.0.1" fs-minipass "^1.2.5" @@ -2187,6 +2251,20 @@ text-table@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" +textlint-rule-helper@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/textlint-rule-helper/-/textlint-rule-helper-2.0.0.tgz#95cb4696c95c4258d2e3389e9e64b849f9721382" + dependencies: + unist-util-visit "^1.1.0" + +textlint-rule-stop-words@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/textlint-rule-stop-words/-/textlint-rule-stop-words-1.0.3.tgz#fe2f40cbe5837331b2a09fdec57cc71758093bf0" + dependencies: + lodash "^4.17.4" + split-lines "^1.1.0" + textlint-rule-helper "^2.0.0" + textlint@^10.2.1: version "10.2.1" resolved "https://registry.yarnpkg.com/textlint/-/textlint-10.2.1.tgz#ee22b7967d59cef7c74a04a5f4e8883134e5c79d" @@ -2234,6 +2312,10 @@ to-vfile@^2.0.0: vfile "^2.0.0" x-is-function "^1.0.4" +too-wordy@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/too-wordy/-/too-wordy-0.1.4.tgz#8e7b20a7b7a4d8fc3759f4e00c4929993d1b12f0" + tough-cookie@~2.3.3: version "2.3.4" resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.4.tgz#ec60cee38ac675063ffc97a5c18970578ee83655" @@ -2253,8 +2335,8 @@ trim@0.0.1: resolved "https://registry.yarnpkg.com/trim/-/trim-0.0.1.tgz#5858547f6b290757ee95cccc666fb50084c460dd" trough@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.2.tgz#7f1663ec55c480139e2de5e486c6aef6cc24a535" + version "1.0.3" + resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.3.tgz#e29bd1614c6458d44869fc28b255ab7857ef7c24" try-resolve@^1.0.1: version "1.0.1" @@ -2351,7 +2433,7 @@ unist-util-inspect@^4.1.2: dependencies: is-empty "^1.0.0" -unist-util-is@^2.1.1: +unist-util-is@^2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/unist-util-is/-/unist-util-is-2.1.2.tgz#1193fa8f2bfbbb82150633f3a8d2eb9a1c1d55db" @@ -2371,11 +2453,17 @@ unist-util-stringify-position@^1.0.0, unist-util-stringify-position@^1.1.1: version "1.1.2" resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-1.1.2.tgz#3f37fcf351279dcbca7480ab5889bb8a832ee1c6" -unist-util-visit@^1.1.0, unist-util-visit@^1.1.3: - version "1.3.1" - resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-1.3.1.tgz#c019ac9337a62486be58531bc27e7499ae7d55c7" +unist-util-visit-parents@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/unist-util-visit-parents/-/unist-util-visit-parents-2.0.1.tgz#63fffc8929027bee04bfef7d2cce474f71cb6217" dependencies: - unist-util-is "^2.1.1" + unist-util-is "^2.1.2" + +unist-util-visit@^1.1.0, unist-util-visit@^1.1.1, unist-util-visit@^1.1.3: + version "1.4.0" + resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-1.4.0.tgz#1cb763647186dc26f5e1df5db6bd1e48b3cc2fb1" + dependencies: + unist-util-visit-parents "^2.0.0" untildify@^2.1.0: version "2.1.0" @@ -2412,8 +2500,8 @@ uuid@^3.1.0: resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.2.tgz#1b4af4955eb3077c501c23872fc6513811587131" validate-npm-package-license@^3.0.1: - version "3.0.3" - resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.3.tgz#81643bcbef1bdfecd4623793dc4648948ba98338" + version "3.0.4" + resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz#fc91f6b9c7ba15c857f4cb2c5defeec39d4f410a" dependencies: spdx-correct "^3.0.0" spdx-expression-parse "^3.0.0" @@ -2459,6 +2547,10 @@ vfile@^2.0.0: unist-util-stringify-position "^1.0.0" vfile-message "^1.0.0" +weasel-words@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/weasel-words/-/weasel-words-0.1.1.tgz#7137946585c73fe44882013853bd000c5d687a4e" + wide-align@^1.1.0: version "1.1.3" resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" @@ -2480,6 +2572,18 @@ wrappy@1: version "1.0.2" resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" +write-good@^0.11.1: + version "0.11.3" + resolved "https://registry.yarnpkg.com/write-good/-/write-good-0.11.3.tgz#8eeb5da9a8e155dafb1325d27eba33cb67d24d8c" + dependencies: + adverb-where "0.0.9" + e-prime "^0.10.2" + no-cliches "^0.1.0" + object.assign "^4.0.4" + passive-voice "^0.1.0" + too-wordy "^0.1.4" + weasel-words "^0.1.1" + write@^0.2.1: version "0.2.1" resolved "https://registry.yarnpkg.com/write/-/write-0.2.1.tgz#5fc03828e264cea3fe91455476f7a3c566cb0757" diff --git a/evidence/pool.go b/evidence/pool.go index 247629b6be4..da00a348187 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -57,9 +57,10 @@ func (evpool *EvidencePool) PriorityEvidence() []types.Evidence { return evpool.evidenceStore.PriorityEvidence() } -// PendingEvidence returns all uncommitted evidence. -func (evpool *EvidencePool) PendingEvidence() []types.Evidence { - return evpool.evidenceStore.PendingEvidence() +// PendingEvidence returns uncommitted evidence up to maxBytes. +// If maxBytes is -1, all evidence is returned. +func (evpool *EvidencePool) PendingEvidence(maxBytes int64) []types.Evidence { + return evpool.evidenceStore.PendingEvidence(maxBytes) } // State returns the current state of the evpool. @@ -126,7 +127,7 @@ func (evpool *EvidencePool) MarkEvidenceAsCommitted(height int64, evidence []typ } // remove committed evidence from the clist - maxAge := evpool.State().ConsensusParams.EvidenceParams.MaxAge + maxAge := evpool.State().ConsensusParams.Evidence.MaxAge evpool.removeEvidence(height, maxAge, blockEvidenceMap) } diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 915cba32753..4e69596bfc3 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -1,19 +1,27 @@ package evidence import ( + "os" "sync" "testing" - "time" "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tendermint/libs/db" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tendermint/libs/db" + tmtime "github.com/tendermint/tendermint/types/time" ) var mockState = sm.State{} +func TestMain(m *testing.M) { + types.RegisterMockEvidences(cdc) + + code := m.Run() + os.Exit(code) +} + func initializeValidatorState(valAddr []byte, height int64) dbm.DB { stateDB := dbm.NewMemDB() @@ -25,11 +33,12 @@ func initializeValidatorState(valAddr []byte, height int64) dbm.DB { } state := sm.State{ LastBlockHeight: 0, - LastBlockTime: time.Now(), + LastBlockTime: tmtime.Now(), Validators: valSet, + NextValidators: valSet.CopyIncrementAccum(1), LastHeightValidatorsChanged: 1, ConsensusParams: types.ConsensusParams{ - EvidenceParams: types.EvidenceParams{ + Evidence: types.EvidenceParams{ MaxAge: 1000000, }, }, diff --git a/evidence/reactor.go b/evidence/reactor.go index cfe47364c03..32753b2b9b0 100644 --- a/evidence/reactor.go +++ b/evidence/reactor.go @@ -74,6 +74,13 @@ func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { evR.Switch.StopPeerForError(src, err) return } + + if err = msg.ValidateBasic(); err != nil { + evR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err) + evR.Switch.StopPeerForError(src, err) + return + } + evR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) switch msg := msg.(type) { @@ -164,7 +171,7 @@ func (evR EvidenceReactor) checkSendEvidenceMessage(peer p2p.Peer, ev types.Evid // NOTE: We only send evidence to peers where // peerHeight - maxAge < evidenceHeight < peerHeight - maxAge := evR.evpool.State().ConsensusParams.EvidenceParams.MaxAge + maxAge := evR.evpool.State().ConsensusParams.Evidence.MaxAge peerHeight := peerState.GetHeight() if peerHeight < evHeight { // peer is behind. sleep while he catches up @@ -191,7 +198,9 @@ type PeerState interface { // Messages // EvidenceMessage is a message sent or received by the EvidenceReactor. -type EvidenceMessage interface{} +type EvidenceMessage interface { + ValidateBasic() error +} func RegisterEvidenceMessages(cdc *amino.Codec) { cdc.RegisterInterface((*EvidenceMessage)(nil), nil) @@ -209,11 +218,21 @@ func decodeMsg(bz []byte) (msg EvidenceMessage, err error) { //------------------------------------- -// EvidenceMessage contains a list of evidence. +// EvidenceListMessage contains a list of evidence. type EvidenceListMessage struct { Evidence []types.Evidence } +// ValidateBasic performs basic validation. +func (m *EvidenceListMessage) ValidateBasic() error { + for i, ev := range m.Evidence { + if err := ev.ValidateBasic(); err != nil { + return fmt.Errorf("Invalid evidence (#%d): %v", i, err) + } + } + return nil +} + // String returns a string representation of the EvidenceListMessage. func (m *EvidenceListMessage) String() string { return fmt.Sprintf("[EvidenceListMessage %v]", m.Evidence) diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index 1687f25a39c..69dcdec5779 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -6,14 +6,13 @@ import ( "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/go-kit/kit/log/term" + "github.com/stretchr/testify/assert" + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/crypto/secp256k1" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" - - cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -79,11 +78,11 @@ func waitForEvidence(t *testing.T, evs types.EvidenceList, reactors []*EvidenceR func _waitForEvidence(t *testing.T, wg *sync.WaitGroup, evs types.EvidenceList, reactorIdx int, reactors []*EvidenceReactor) { evpool := reactors[reactorIdx].evpool - for len(evpool.PendingEvidence()) != len(evs) { + for len(evpool.PendingEvidence(-1)) != len(evs) { time.Sleep(time.Millisecond * 100) } - reapedEv := evpool.PendingEvidence() + reapedEv := evpool.PendingEvidence(-1) // put the reaped evidence in a map so we can quickly check we got everything evMap := make(map[string]types.Evidence) for _, e := range reapedEv { @@ -180,3 +179,30 @@ func TestReactorSelectiveBroadcast(t *testing.T) { peers := reactors[1].Switch.Peers().List() assert.Equal(t, 1, len(peers)) } +func TestEvidenceListMessageValidationBasic(t *testing.T) { + + testCases := []struct { + testName string + malleateEvListMsg func(*EvidenceListMessage) + expectErr bool + }{ + {"Good EvidenceListMessage", func(evList *EvidenceListMessage) {}, false}, + {"Invalid EvidenceListMessage", func(evList *EvidenceListMessage) { + evList.Evidence = append(evList.Evidence, + &types.DuplicateVoteEvidence{PubKey: secp256k1.GenPrivKey().PubKey()}) + }, true}, + } + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + evListMsg := &EvidenceListMessage{} + n := 3 + valAddr := []byte("myval") + evListMsg.Evidence = make([]types.Evidence, n) + for i := 0; i < n; i++ { + evListMsg.Evidence[i] = types.NewMockGoodEvidence(int64(i+1), 0, valAddr) + } + tc.malleateEvListMsg(evListMsg) + assert.Equal(t, tc.expectErr, evListMsg.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} diff --git a/evidence/store.go b/evidence/store.go index 20b37bdb27a..ccfd2d4876b 100644 --- a/evidence/store.go +++ b/evidence/store.go @@ -3,8 +3,8 @@ package evidence import ( "fmt" - "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/types" ) /* @@ -78,26 +78,35 @@ func NewEvidenceStore(db dbm.DB) *EvidenceStore { // PriorityEvidence returns the evidence from the outqueue, sorted by highest priority. func (store *EvidenceStore) PriorityEvidence() (evidence []types.Evidence) { // reverse the order so highest priority is first - l := store.ListEvidence(baseKeyOutqueue) - l2 := make([]types.Evidence, len(l)) - for i := range l { - l2[i] = l[len(l)-1-i] + l := store.listEvidence(baseKeyOutqueue, -1) + for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 { + l[i], l[j] = l[j], l[i] } - return l2 + + return l } -// PendingEvidence returns all known uncommitted evidence. -func (store *EvidenceStore) PendingEvidence() (evidence []types.Evidence) { - return store.ListEvidence(baseKeyPending) +// PendingEvidence returns known uncommitted evidence up to maxBytes. +// If maxBytes is -1, all evidence is returned. +func (store *EvidenceStore) PendingEvidence(maxBytes int64) (evidence []types.Evidence) { + return store.listEvidence(baseKeyPending, maxBytes) } -// ListEvidence lists the evidence for the given prefix key. +// listEvidence lists the evidence for the given prefix key up to maxBytes. // It is wrapped by PriorityEvidence and PendingEvidence for convenience. -func (store *EvidenceStore) ListEvidence(prefixKey string) (evidence []types.Evidence) { +// If maxBytes is -1, there's no cap on the size of returned evidence. +func (store *EvidenceStore) listEvidence(prefixKey string, maxBytes int64) (evidence []types.Evidence) { + var bytes int64 iter := dbm.IteratePrefix(store.db, []byte(prefixKey)) + defer iter.Close() for ; iter.Valid(); iter.Next() { val := iter.Value() + if maxBytes > 0 && bytes+int64(len(val)) > maxBytes { + return evidence + } + bytes += int64(len(val)) + var ei EvidenceInfo err := cdc.UnmarshalBinaryBare(val, &ei) if err != nil { diff --git a/evidence/store_test.go b/evidence/store_test.go index 30dc1c4d5c2..35eb28d01f3 100644 --- a/evidence/store_test.go +++ b/evidence/store_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/types" ) //------------------------------------------- @@ -35,7 +35,7 @@ func TestStoreMark(t *testing.T) { // before we do anything, priority/pending are empty priorityEv := store.PriorityEvidence() - pendingEv := store.PendingEvidence() + pendingEv := store.PendingEvidence(-1) assert.Equal(0, len(priorityEv)) assert.Equal(0, len(pendingEv)) @@ -53,21 +53,21 @@ func TestStoreMark(t *testing.T) { // new evidence should be returns in priority/pending priorityEv = store.PriorityEvidence() - pendingEv = store.PendingEvidence() + pendingEv = store.PendingEvidence(-1) assert.Equal(1, len(priorityEv)) assert.Equal(1, len(pendingEv)) // priority is now empty store.MarkEvidenceAsBroadcasted(ev) priorityEv = store.PriorityEvidence() - pendingEv = store.PendingEvidence() + pendingEv = store.PendingEvidence(-1) assert.Equal(0, len(priorityEv)) assert.Equal(1, len(pendingEv)) // priority and pending are now empty store.MarkEvidenceAsCommitted(ev) priorityEv = store.PriorityEvidence() - pendingEv = store.PendingEvidence() + pendingEv = store.PendingEvidence(-1) assert.Equal(0, len(priorityEv)) assert.Equal(0, len(pendingEv)) diff --git a/libs/autofile/autofile.go b/libs/autofile/autofile.go index 2f1bb4fd527..a1e2f49e643 100644 --- a/libs/autofile/autofile.go +++ b/libs/autofile/autofile.go @@ -2,10 +2,13 @@ package autofile import ( "os" + "os/signal" "sync" + "syscall" "time" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/errors" ) /* AutoFile usage @@ -30,48 +33,75 @@ if err != nil { } */ -const autoFileOpenDuration = 1000 * time.Millisecond +const ( + autoFileClosePeriod = 1000 * time.Millisecond + autoFilePerms = os.FileMode(0600) +) -// Automatically closes and re-opens file for writing. +// AutoFile automatically closes and re-opens file for writing. The file is +// automatically setup to close itself every 1s and upon receiving SIGHUP. +// // This is useful for using a log file with the logrotate tool. type AutoFile struct { - ID string - Path string - ticker *time.Ticker - tickerStopped chan struct{} // closed when ticker is stopped - mtx sync.Mutex - file *os.File + ID string + Path string + + closeTicker *time.Ticker + closeTickerStopc chan struct{} // closed when closeTicker is stopped + hupc chan os.Signal + + mtx sync.Mutex + file *os.File } -func OpenAutoFile(path string) (af *AutoFile, err error) { - af = &AutoFile{ - ID: cmn.RandStr(12) + ":" + path, - Path: path, - ticker: time.NewTicker(autoFileOpenDuration), - tickerStopped: make(chan struct{}), +// OpenAutoFile creates an AutoFile in the path (with random ID). If there is +// an error, it will be of type *PathError or *ErrPermissionsChanged (if file's +// permissions got changed (should be 0600)). +func OpenAutoFile(path string) (*AutoFile, error) { + af := &AutoFile{ + ID: cmn.RandStr(12) + ":" + path, + Path: path, + closeTicker: time.NewTicker(autoFileClosePeriod), + closeTickerStopc: make(chan struct{}), } - if err = af.openFile(); err != nil { - return + if err := af.openFile(); err != nil { + af.Close() + return nil, err } - go af.processTicks() - sighupWatchers.addAutoFile(af) - return + + // Close file on SIGHUP. + af.hupc = make(chan os.Signal, 1) + signal.Notify(af.hupc, syscall.SIGHUP) + go func() { + for range af.hupc { + af.closeFile() + } + }() + + go af.closeFileRoutine() + + return af, nil } +// Close shuts down the closing goroutine, SIGHUP handler and closes the +// AutoFile. func (af *AutoFile) Close() error { - af.ticker.Stop() - close(af.tickerStopped) - err := af.closeFile() - sighupWatchers.removeAutoFile(af) - return err + af.closeTicker.Stop() + close(af.closeTickerStopc) + if af.hupc != nil { + close(af.hupc) + } + return af.closeFile() } -func (af *AutoFile) processTicks() { - select { - case <-af.ticker.C: - af.closeFile() - case <-af.tickerStopped: - return +func (af *AutoFile) closeFileRoutine() { + for { + select { + case <-af.closeTicker.C: + af.closeFile() + case <-af.closeTickerStopc: + return + } } } @@ -83,10 +113,15 @@ func (af *AutoFile) closeFile() (err error) { if file == nil { return nil } + af.file = nil return file.Close() } +// Write writes len(b) bytes to the AutoFile. It returns the number of bytes +// written and an error, if any. Write returns a non-nil error when n != +// len(b). +// Opens AutoFile if needed. func (af *AutoFile) Write(b []byte) (n int, err error) { af.mtx.Lock() defer af.mtx.Unlock() @@ -101,6 +136,10 @@ func (af *AutoFile) Write(b []byte) (n int, err error) { return } +// Sync commits the current contents of the file to stable storage. Typically, +// this means flushing the file system's in-memory copy of recently written +// data to disk. +// Opens AutoFile if needed. func (af *AutoFile) Sync() error { af.mtx.Lock() defer af.mtx.Unlock() @@ -114,31 +153,37 @@ func (af *AutoFile) Sync() error { } func (af *AutoFile) openFile() error { - file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) + file, err := os.OpenFile(af.Path, os.O_RDWR|os.O_CREATE|os.O_APPEND, autoFilePerms) + if err != nil { + return err + } + fileInfo, err := file.Stat() if err != nil { return err } + if fileInfo.Mode() != autoFilePerms { + return errors.NewErrPermissionsChanged(file.Name(), fileInfo.Mode(), autoFilePerms) + } af.file = file return nil } +// Size returns the size of the AutoFile. It returns -1 and an error if fails +// get stats or open file. +// Opens AutoFile if needed. func (af *AutoFile) Size() (int64, error) { af.mtx.Lock() defer af.mtx.Unlock() if af.file == nil { - err := af.openFile() - if err != nil { - if err == os.ErrNotExist { - return 0, nil - } + if err := af.openFile(); err != nil { return -1, err } } + stat, err := af.file.Stat() if err != nil { return -1, err } return stat.Size(), nil - } diff --git a/libs/autofile/autofile_test.go b/libs/autofile/autofile_test.go index 67397380bad..0b3521c2a6b 100644 --- a/libs/autofile/autofile_test.go +++ b/libs/autofile/autofile_test.go @@ -3,68 +3,51 @@ package autofile import ( "io/ioutil" "os" - "sync/atomic" "syscall" "testing" "time" + "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/errors" ) func TestSIGHUP(t *testing.T) { - // First, create an AutoFile writing to a tempfile dir file, err := ioutil.TempFile("", "sighup_test") - if err != nil { - t.Fatalf("Error creating tempfile: %v", err) - } - if err := file.Close(); err != nil { - t.Fatalf("Error closing tempfile: %v", err) - } + require.NoError(t, err) + err = file.Close() + require.NoError(t, err) name := file.Name() + // Here is the actual AutoFile af, err := OpenAutoFile(name) - if err != nil { - t.Fatalf("Error creating autofile: %v", err) - } + require.NoError(t, err) // Write to the file. _, err = af.Write([]byte("Line 1\n")) - if err != nil { - t.Fatalf("Error writing to autofile: %v", err) - } + require.NoError(t, err) _, err = af.Write([]byte("Line 2\n")) - if err != nil { - t.Fatalf("Error writing to autofile: %v", err) - } + require.NoError(t, err) // Move the file over err = os.Rename(name, name+"_old") - if err != nil { - t.Fatalf("Error moving autofile: %v", err) - } + require.NoError(t, err) // Send SIGHUP to self. - oldSighupCounter := atomic.LoadInt32(&sighupCounter) syscall.Kill(syscall.Getpid(), syscall.SIGHUP) // Wait a bit... signals are not handled synchronously. - for atomic.LoadInt32(&sighupCounter) == oldSighupCounter { - time.Sleep(time.Millisecond * 10) - } + time.Sleep(time.Millisecond * 10) // Write more to the file. _, err = af.Write([]byte("Line 3\n")) - if err != nil { - t.Fatalf("Error writing to autofile: %v", err) - } + require.NoError(t, err) _, err = af.Write([]byte("Line 4\n")) - if err != nil { - t.Fatalf("Error writing to autofile: %v", err) - } - if err := af.Close(); err != nil { - t.Fatalf("Error closing autofile") - } + require.NoError(t, err) + err = af.Close() + require.NoError(t, err) // Both files should exist if body := cmn.MustReadFile(name + "_old"); string(body) != "Line 1\nLine 2\n" { @@ -74,3 +57,67 @@ func TestSIGHUP(t *testing.T) { t.Errorf("Unexpected body %s", body) } } + +// Manually modify file permissions, close, and reopen using autofile: +// We expect the file permissions to be changed back to the intended perms. +func TestOpenAutoFilePerms(t *testing.T) { + file, err := ioutil.TempFile("", "permission_test") + require.NoError(t, err) + err = file.Close() + require.NoError(t, err) + name := file.Name() + + // open and change permissions + af, err := OpenAutoFile(name) + require.NoError(t, err) + err = af.file.Chmod(0755) + require.NoError(t, err) + err = af.Close() + require.NoError(t, err) + + // reopen and expect an ErrPermissionsChanged as Cause + af, err = OpenAutoFile(name) + require.Error(t, err) + if e, ok := err.(*errors.ErrPermissionsChanged); ok { + t.Logf("%v", e) + } else { + t.Errorf("unexpected error %v", e) + } +} + +func TestAutoFileSize(t *testing.T) { + // First, create an AutoFile writing to a tempfile dir + f, err := ioutil.TempFile("", "sighup_test") + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + // Here is the actual AutoFile. + af, err := OpenAutoFile(f.Name()) + require.NoError(t, err) + + // 1. Empty file + size, err := af.Size() + require.Zero(t, size) + require.NoError(t, err) + + // 2. Not empty file + data := []byte("Maniac\n") + _, err = af.Write(data) + require.NoError(t, err) + size, err = af.Size() + require.EqualValues(t, len(data), size) + require.NoError(t, err) + + // 3. Not existing file + err = af.Close() + require.NoError(t, err) + err = os.Remove(f.Name()) + require.NoError(t, err) + size, err = af.Size() + require.EqualValues(t, 0, size, "Expected a new file to be empty") + require.NoError(t, err) + + // Cleanup + _ = os.Remove(f.Name()) +} diff --git a/libs/autofile/cmd/logjack.go b/libs/autofile/cmd/logjack.go index 17b482bed3a..ead3f8305dc 100644 --- a/libs/autofile/cmd/logjack.go +++ b/libs/autofile/cmd/logjack.go @@ -39,13 +39,12 @@ func main() { } // Open Group - group, err := auto.OpenGroup(headPath) + group, err := auto.OpenGroup(headPath, auto.GroupHeadSizeLimit(chopSize), auto.GroupTotalSizeLimit(limitSize)) if err != nil { fmt.Printf("logjack couldn't create output file %v\n", headPath) os.Exit(1) } - group.SetHeadSizeLimit(chopSize) - group.SetTotalSizeLimit(limitSize) + err = group.Start() if err != nil { fmt.Printf("logjack couldn't start with file %v\n", headPath) diff --git a/libs/autofile/group.go b/libs/autofile/group.go index e747f04dd9e..1ec6b240da0 100644 --- a/libs/autofile/group.go +++ b/libs/autofile/group.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "log" "os" "path" "path/filepath" @@ -19,10 +18,10 @@ import ( ) const ( - groupCheckDuration = 5000 * time.Millisecond - defaultHeadSizeLimit = 10 * 1024 * 1024 // 10MB - defaultTotalSizeLimit = 1 * 1024 * 1024 * 1024 // 1GB - maxFilesToRemove = 4 // needs to be greater than 1 + defaultGroupCheckDuration = 5000 * time.Millisecond + defaultHeadSizeLimit = 10 * 1024 * 1024 // 10MB + defaultTotalSizeLimit = 1 * 1024 * 1024 * 1024 // 1GB + maxFilesToRemove = 4 // needs to be greater than 1 ) /* @@ -56,16 +55,17 @@ assuming that marker lines are written occasionally. type Group struct { cmn.BaseService - ID string - Head *AutoFile // The head AutoFile to write to - headBuf *bufio.Writer - Dir string // Directory that contains .Head - ticker *time.Ticker - mtx sync.Mutex - headSizeLimit int64 - totalSizeLimit int64 - minIndex int // Includes head - maxIndex int // Includes head, where Head will move to + ID string + Head *AutoFile // The head AutoFile to write to + headBuf *bufio.Writer + Dir string // Directory that contains .Head + ticker *time.Ticker + mtx sync.Mutex + headSizeLimit int64 + totalSizeLimit int64 + groupCheckDuration time.Duration + minIndex int // Includes head + maxIndex int // Includes head, where Head will move to // TODO: When we start deleting files, we need to start tracking GroupReaders // and their dependencies. @@ -73,7 +73,7 @@ type Group struct { // OpenGroup creates a new Group with head at headPath. It returns an error if // it fails to open head file. -func OpenGroup(headPath string) (g *Group, err error) { +func OpenGroup(headPath string, groupOptions ...func(*Group)) (g *Group, err error) { dir := path.Dir(headPath) head, err := OpenAutoFile(headPath) if err != nil { @@ -81,15 +81,21 @@ func OpenGroup(headPath string) (g *Group, err error) { } g = &Group{ - ID: "group:" + head.ID, - Head: head, - headBuf: bufio.NewWriterSize(head, 4096*10), - Dir: dir, - headSizeLimit: defaultHeadSizeLimit, - totalSizeLimit: defaultTotalSizeLimit, - minIndex: 0, - maxIndex: 0, + ID: "group:" + head.ID, + Head: head, + headBuf: bufio.NewWriterSize(head, 4096*10), + Dir: dir, + headSizeLimit: defaultHeadSizeLimit, + totalSizeLimit: defaultTotalSizeLimit, + groupCheckDuration: defaultGroupCheckDuration, + minIndex: 0, + maxIndex: 0, } + + for _, option := range groupOptions { + option(g) + } + g.BaseService = *cmn.NewBaseService(nil, "Group", g) gInfo := g.readGroupInfo() @@ -98,10 +104,31 @@ func OpenGroup(headPath string) (g *Group, err error) { return } +// GroupCheckDuration allows you to overwrite default groupCheckDuration. +func GroupCheckDuration(duration time.Duration) func(*Group) { + return func(g *Group) { + g.groupCheckDuration = duration + } +} + +// GroupHeadSizeLimit allows you to overwrite default head size limit - 10MB. +func GroupHeadSizeLimit(limit int64) func(*Group) { + return func(g *Group) { + g.headSizeLimit = limit + } +} + +// GroupTotalSizeLimit allows you to overwrite default total size limit of the group - 1GB. +func GroupTotalSizeLimit(limit int64) func(*Group) { + return func(g *Group) { + g.totalSizeLimit = limit + } +} + // OnStart implements Service by starting the goroutine that checks file and // group limits. func (g *Group) OnStart() error { - g.ticker = time.NewTicker(groupCheckDuration) + g.ticker = time.NewTicker(g.groupCheckDuration) go g.processTicks() return nil } @@ -122,13 +149,6 @@ func (g *Group) Close() { g.mtx.Unlock() } -// SetHeadSizeLimit allows you to overwrite default head size limit - 10MB. -func (g *Group) SetHeadSizeLimit(limit int64) { - g.mtx.Lock() - g.headSizeLimit = limit - g.mtx.Unlock() -} - // HeadSizeLimit returns the current head size limit. func (g *Group) HeadSizeLimit() int64 { g.mtx.Lock() @@ -136,14 +156,6 @@ func (g *Group) HeadSizeLimit() int64 { return g.headSizeLimit } -// SetTotalSizeLimit allows you to overwrite default total size limit of the -// group - 1GB. -func (g *Group) SetTotalSizeLimit(limit int64) { - g.mtx.Lock() - g.totalSizeLimit = limit - g.mtx.Unlock() -} - // TotalSizeLimit returns total size limit of the group. func (g *Group) TotalSizeLimit() int64 { g.mtx.Lock() @@ -199,12 +211,14 @@ func (g *Group) Flush() error { } func (g *Group) processTicks() { - select { - case <-g.ticker.C: - g.checkHeadSizeLimit() - g.checkTotalSizeLimit() - case <-g.Quit(): - return + for { + select { + case <-g.ticker.C: + g.checkHeadSizeLimit() + g.checkTotalSizeLimit() + case <-g.Quit(): + return + } } } @@ -216,7 +230,8 @@ func (g *Group) checkHeadSizeLimit() { } size, err := g.Head.Size() if err != nil { - panic(err) + g.Logger.Error("Group's head may grow without bound", "head", g.Head.Path, "err", err) + return } if size >= limit { g.RotateFile() @@ -238,21 +253,21 @@ func (g *Group) checkTotalSizeLimit() { } if index == gInfo.MaxIndex { // Special degenerate case, just do nothing. - log.Println("WARNING: Group's head " + g.Head.Path + "may grow without bound") + g.Logger.Error("Group's head may grow without bound", "head", g.Head.Path) return } pathToRemove := filePathForIndex(g.Head.Path, index, gInfo.MaxIndex) - fileInfo, err := os.Stat(pathToRemove) + fInfo, err := os.Stat(pathToRemove) if err != nil { - log.Println("WARNING: Failed to fetch info for file @" + pathToRemove) + g.Logger.Error("Failed to fetch info for file", "file", pathToRemove) continue } err = os.Remove(pathToRemove) if err != nil { - log.Println(err) + g.Logger.Error("Failed to remove path", "path", pathToRemove) return } - totalSize -= fileInfo.Size() + totalSize -= fInfo.Size() } } @@ -264,6 +279,14 @@ func (g *Group) RotateFile() { headPath := g.Head.Path + if err := g.headBuf.Flush(); err != nil { + panic(err) + } + + if err := g.Head.Sync(); err != nil { + panic(err) + } + if err := g.Head.closeFile(); err != nil { panic(err) } @@ -655,7 +678,6 @@ func (gr *GroupReader) ReadLine() (string, error) { // IF index > gr.Group.maxIndex, returns io.EOF // CONTRACT: caller should hold gr.mtx func (gr *GroupReader) openFile(index int) error { - // Lock on Group to ensure that head doesn't move in the meanwhile. gr.Group.mtx.Lock() defer gr.Group.mtx.Unlock() @@ -665,7 +687,7 @@ func (gr *GroupReader) openFile(index int) error { } curFilePath := filePathForIndex(gr.Head.Path, index, gr.Group.maxIndex) - curFile, err := os.Open(curFilePath) + curFile, err := os.OpenFile(curFilePath, os.O_RDONLY|os.O_CREATE, autoFilePerms) if err != nil { return err } @@ -724,11 +746,11 @@ func (gr *GroupReader) SetIndex(index int) error { func MakeSimpleSearchFunc(prefix string, target int) SearchFunc { return func(line string) (int, error) { if !strings.HasPrefix(line, prefix) { - return -1, errors.New(cmn.Fmt("Marker line did not have prefix: %v", prefix)) + return -1, fmt.Errorf("Marker line did not have prefix: %v", prefix) } i, err := strconv.Atoi(line[len(prefix):]) if err != nil { - return -1, errors.New(cmn.Fmt("Failed to parse marker line: %v", err.Error())) + return -1, fmt.Errorf("Failed to parse marker line: %v", err.Error()) } if target < i { return 1, nil diff --git a/libs/autofile/group_test.go b/libs/autofile/group_test.go index d87bdba829c..e173e4996de 100644 --- a/libs/autofile/group_test.go +++ b/libs/autofile/group_test.go @@ -23,12 +23,10 @@ func createTestGroupWithHeadSizeLimit(t *testing.T, headSizeLimit int64) *Group require.NoError(t, err, "Error creating dir") headPath := testDir + "/myfile" - g, err := OpenGroup(headPath) + g, err := OpenGroup(headPath, GroupHeadSizeLimit(headSizeLimit)) require.NoError(t, err, "Error opening Group") require.NotEqual(t, nil, g, "Failed to create Group") - g.SetHeadSizeLimit(headSizeLimit) - return g } diff --git a/libs/autofile/sighup_watcher.go b/libs/autofile/sighup_watcher.go deleted file mode 100644 index f72f12fcdd1..00000000000 --- a/libs/autofile/sighup_watcher.go +++ /dev/null @@ -1,69 +0,0 @@ -package autofile - -import ( - "os" - "os/signal" - "sync" - "sync/atomic" - "syscall" -) - -func init() { - initSighupWatcher() -} - -var sighupWatchers *SighupWatcher -var sighupCounter int32 // For testing - -func initSighupWatcher() { - sighupWatchers = newSighupWatcher() - - hup := make(chan os.Signal, 1) - signal.Notify(hup, syscall.SIGHUP) - - quit := make(chan os.Signal, 1) - signal.Notify(quit, os.Interrupt, syscall.SIGTERM) - - go func() { - select { - case <-hup: - sighupWatchers.closeAll() - atomic.AddInt32(&sighupCounter, 1) - case <-quit: - return - } - }() -} - -// Watchces for SIGHUP events and notifies registered AutoFiles -type SighupWatcher struct { - mtx sync.Mutex - autoFiles map[string]*AutoFile -} - -func newSighupWatcher() *SighupWatcher { - return &SighupWatcher{ - autoFiles: make(map[string]*AutoFile, 10), - } -} - -func (w *SighupWatcher) addAutoFile(af *AutoFile) { - w.mtx.Lock() - w.autoFiles[af.ID] = af - w.mtx.Unlock() -} - -// If AutoFile isn't registered or was already removed, does nothing. -func (w *SighupWatcher) removeAutoFile(af *AutoFile) { - w.mtx.Lock() - delete(w.autoFiles, af.ID) - w.mtx.Unlock() -} - -func (w *SighupWatcher) closeAll() { - w.mtx.Lock() - for _, af := range w.autoFiles { - af.closeFile() - } - w.mtx.Unlock() -} diff --git a/libs/clist/bench_test.go b/libs/clist/bench_test.go new file mode 100644 index 00000000000..95973cc7678 --- /dev/null +++ b/libs/clist/bench_test.go @@ -0,0 +1,46 @@ +package clist + +import "testing" + +func BenchmarkDetaching(b *testing.B) { + lst := New() + for i := 0; i < b.N+1; i++ { + lst.PushBack(i) + } + start := lst.Front() + nxt := start.Next() + b.ResetTimer() + for i := 0; i < b.N; i++ { + start.removed = true + start.DetachNext() + start.DetachPrev() + tmp := nxt + nxt = nxt.Next() + start = tmp + } +} + +// This is used to benchmark the time of RMutex. +func BenchmarkRemoved(b *testing.B) { + lst := New() + for i := 0; i < b.N+1; i++ { + lst.PushBack(i) + } + start := lst.Front() + nxt := start.Next() + b.ResetTimer() + for i := 0; i < b.N; i++ { + start.Removed() + tmp := nxt + nxt = nxt.Next() + start = tmp + } +} + +func BenchmarkPushBack(b *testing.B) { + lst := New() + b.ResetTimer() + for i := 0; i < b.N; i++ { + lst.PushBack(i) + } +} diff --git a/libs/clist/clist.go b/libs/clist/clist.go index ccb1f5777a1..393bdf73f5e 100644 --- a/libs/clist/clist.go +++ b/libs/clist/clist.go @@ -12,9 +12,15 @@ to ensure garbage collection of removed elements. */ import ( + "fmt" "sync" ) +// MaxLength is the max allowed number of elements a linked list is +// allowed to contain. +// If more elements are pushed to the list it will panic. +const MaxLength = int(^uint(0) >> 1) + /* CElement is an element of a linked-list @@ -107,51 +113,50 @@ func (e *CElement) NextWaitChan() <-chan struct{} { // Nonblocking, may return nil if at the end. func (e *CElement) Next() *CElement { e.mtx.RLock() - defer e.mtx.RUnlock() - - return e.next + val := e.next + e.mtx.RUnlock() + return val } // Nonblocking, may return nil if at the end. func (e *CElement) Prev() *CElement { e.mtx.RLock() - defer e.mtx.RUnlock() - - return e.prev + prev := e.prev + e.mtx.RUnlock() + return prev } func (e *CElement) Removed() bool { e.mtx.RLock() - defer e.mtx.RUnlock() - - return e.removed + isRemoved := e.removed + e.mtx.RUnlock() + return isRemoved } func (e *CElement) DetachNext() { - if !e.Removed() { + e.mtx.Lock() + if !e.removed { + e.mtx.Unlock() panic("DetachNext() must be called after Remove(e)") } - e.mtx.Lock() - defer e.mtx.Unlock() - e.next = nil + e.mtx.Unlock() } func (e *CElement) DetachPrev() { - if !e.Removed() { + e.mtx.Lock() + if !e.removed { + e.mtx.Unlock() panic("DetachPrev() must be called after Remove(e)") } - e.mtx.Lock() - defer e.mtx.Unlock() - e.prev = nil + e.mtx.Unlock() } // NOTE: This function needs to be safe for // concurrent goroutines waiting on nextWg. func (e *CElement) SetNext(newNext *CElement) { e.mtx.Lock() - defer e.mtx.Unlock() oldNext := e.next e.next = newNext @@ -168,13 +173,13 @@ func (e *CElement) SetNext(newNext *CElement) { e.nextWg.Done() close(e.nextWaitCh) } + e.mtx.Unlock() } // NOTE: This function needs to be safe for // concurrent goroutines waiting on prevWg func (e *CElement) SetPrev(newPrev *CElement) { e.mtx.Lock() - defer e.mtx.Unlock() oldPrev := e.prev e.prev = newPrev @@ -186,11 +191,11 @@ func (e *CElement) SetPrev(newPrev *CElement) { e.prevWg.Done() close(e.prevWaitCh) } + e.mtx.Unlock() } func (e *CElement) SetRemoved() { e.mtx.Lock() - defer e.mtx.Unlock() e.removed = true @@ -203,6 +208,7 @@ func (e *CElement) SetRemoved() { e.nextWg.Done() close(e.nextWaitCh) } + e.mtx.Unlock() } //-------------------------------------------------------------------------------- @@ -210,6 +216,7 @@ func (e *CElement) SetRemoved() { // CList represents a linked list. // The zero value for CList is an empty list ready to use. // Operations are goroutine-safe. +// Panics if length grows beyond the max. type CList struct { mtx sync.RWMutex wg *sync.WaitGroup @@ -217,34 +224,44 @@ type CList struct { head *CElement // first element tail *CElement // last element len int // list length + maxLen int // max list length } func (l *CList) Init() *CList { l.mtx.Lock() - defer l.mtx.Unlock() l.wg = waitGroup1() l.waitCh = make(chan struct{}) l.head = nil l.tail = nil l.len = 0 + l.mtx.Unlock() return l } -func New() *CList { return new(CList).Init() } +// Return CList with MaxLength. CList will panic if it goes beyond MaxLength. +func New() *CList { return newWithMax(MaxLength) } + +// Return CList with given maxLength. +// Will panic if list exceeds given maxLength. +func newWithMax(maxLength int) *CList { + l := new(CList) + l.maxLen = maxLength + return l.Init() +} func (l *CList) Len() int { l.mtx.RLock() - defer l.mtx.RUnlock() - - return l.len + len := l.len + l.mtx.RUnlock() + return len } func (l *CList) Front() *CElement { l.mtx.RLock() - defer l.mtx.RUnlock() - - return l.head + head := l.head + l.mtx.RUnlock() + return head } func (l *CList) FrontWait() *CElement { @@ -265,9 +282,9 @@ func (l *CList) FrontWait() *CElement { func (l *CList) Back() *CElement { l.mtx.RLock() - defer l.mtx.RUnlock() - - return l.tail + back := l.tail + l.mtx.RUnlock() + return back } func (l *CList) BackWait() *CElement { @@ -295,9 +312,9 @@ func (l *CList) WaitChan() <-chan struct{} { return l.waitCh } +// Panics if list grows beyond its max length. func (l *CList) PushBack(v interface{}) *CElement { l.mtx.Lock() - defer l.mtx.Unlock() // Construct a new element e := &CElement{ @@ -316,6 +333,9 @@ func (l *CList) PushBack(v interface{}) *CElement { l.wg.Done() close(l.waitCh) } + if l.len >= l.maxLen { + panic(fmt.Sprintf("clist: maximum length list reached %d", l.maxLen)) + } l.len++ // Modify the tail @@ -327,7 +347,7 @@ func (l *CList) PushBack(v interface{}) *CElement { l.tail.SetNext(e) // This will make e accessible. l.tail = e // Update the list. } - + l.mtx.Unlock() return e } @@ -335,18 +355,20 @@ func (l *CList) PushBack(v interface{}) *CElement { // NOTE: As per the contract of CList, removed elements cannot be added back. func (l *CList) Remove(e *CElement) interface{} { l.mtx.Lock() - defer l.mtx.Unlock() prev := e.Prev() next := e.Next() if l.head == nil || l.tail == nil { + l.mtx.Unlock() panic("Remove(e) on empty CList") } if prev == nil && l.head != e { + l.mtx.Unlock() panic("Remove(e) with false head") } if next == nil && l.tail != e { + l.mtx.Unlock() panic("Remove(e) with false tail") } @@ -374,6 +396,7 @@ func (l *CList) Remove(e *CElement) interface{} { // Set .Done() on e, otherwise waiters will wait forever. e.SetRemoved() + l.mtx.Unlock() return e.Value } diff --git a/libs/clist/clist_test.go b/libs/clist/clist_test.go index dbdf2f02860..4ded6177a82 100644 --- a/libs/clist/clist_test.go +++ b/libs/clist/clist_test.go @@ -7,9 +7,22 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" cmn "github.com/tendermint/tendermint/libs/common" ) +func TestPanicOnMaxLength(t *testing.T) { + maxLength := 1000 + + l := newWithMax(maxLength) + for i := 0; i < maxLength; i++ { + l.PushBack(1) + } + assert.Panics(t, func() { + l.PushBack(1) + }) +} + func TestSmall(t *testing.T) { l := New() el1 := l.PushBack(1) @@ -149,8 +162,8 @@ func _TestGCRandom(t *testing.T) { func TestScanRightDeleteRandom(t *testing.T) { - const numElements = 10000 - const numTimes = 1000 + const numElements = 1000 + const numTimes = 100 const numScanners = 10 l := New() @@ -209,7 +222,7 @@ func TestScanRightDeleteRandom(t *testing.T) { // Stop scanners close(stop) - time.Sleep(time.Second * 1) + // time.Sleep(time.Second * 1) // And remove all the elements. for el := l.Front(); el != nil; el = el.Next() { @@ -244,7 +257,7 @@ func TestWaitChan(t *testing.T) { for i := 1; i < 100; i++ { l.PushBack(i) pushed++ - time.Sleep(time.Duration(cmn.RandIntn(100)) * time.Millisecond) + time.Sleep(time.Duration(cmn.RandIntn(25)) * time.Millisecond) } close(done) }() @@ -283,7 +296,7 @@ FOR_LOOP2: if prev == nil { t.Fatal("expected PrevWaitChan to block forever on nil when reached first elem") } - case <-time.After(5 * time.Second): + case <-time.After(3 * time.Second): break FOR_LOOP2 } } diff --git a/libs/common/bit_array.go b/libs/common/bit_array.go index abf6110d80d..ebd6cc4a086 100644 --- a/libs/common/bit_array.go +++ b/libs/common/bit_array.go @@ -119,14 +119,13 @@ func (bA *BitArray) Or(o *BitArray) *BitArray { } bA.mtx.Lock() o.mtx.Lock() - defer func() { - bA.mtx.Unlock() - o.mtx.Unlock() - }() c := bA.copyBits(MaxInt(bA.Bits, o.Bits)) - for i := 0; i < len(c.Elems); i++ { + smaller := MinInt(len(bA.Elems), len(o.Elems)) + for i := 0; i < smaller; i++ { c.Elems[i] |= o.Elems[i] } + bA.mtx.Unlock() + o.mtx.Unlock() return c } @@ -173,8 +172,9 @@ func (bA *BitArray) not() *BitArray { } // Sub subtracts the two bit-arrays bitwise, without carrying the bits. -// This is essentially bA.And(o.Not()). -// If bA is longer than o, o is right padded with zeroes. +// Note that carryless subtraction of a - b is (a and not b). +// The output is the same as bA, regardless of o's size. +// If bA is longer than o, o is right padded with zeroes func (bA *BitArray) Sub(o *BitArray) *BitArray { if bA == nil || o == nil { // TODO: Decide if we should do 1's complement here? @@ -182,24 +182,20 @@ func (bA *BitArray) Sub(o *BitArray) *BitArray { } bA.mtx.Lock() o.mtx.Lock() - defer func() { - bA.mtx.Unlock() - o.mtx.Unlock() - }() - if bA.Bits > o.Bits { - c := bA.copy() - for i := 0; i < len(o.Elems)-1; i++ { - c.Elems[i] &= ^c.Elems[i] - } - i := len(o.Elems) - 1 - if i >= 0 { - for idx := i * 64; idx < o.Bits; idx++ { - c.setIndex(idx, c.getIndex(idx) && !o.getIndex(idx)) - } - } - return c - } - return bA.and(o.not()) // Note degenerate case where o == nil + // output is the same size as bA + c := bA.copyBits(bA.Bits) + // Only iterate to the minimum size between the two. + // If o is longer, those bits are ignored. + // If bA is longer, then skipping those iterations is equivalent + // to right padding with 0's + smaller := MinInt(len(bA.Elems), len(o.Elems)) + for i := 0; i < smaller; i++ { + // &^ is and not in golang + c.Elems[i] &^= o.Elems[i] + } + bA.mtx.Unlock() + o.mtx.Unlock() + return c } // IsEmpty returns true iff all bits in the bit array are 0 @@ -238,49 +234,53 @@ func (bA *BitArray) IsFull() bool { return (lastElem+1)&((uint64(1)< 0 { - randBitStart := RandIntn(64) - for j := 0; j < 64; j++ { - bitIdx := ((j + randBitStart) % 64) - if (bA.Elems[elemIdx] & (uint64(1) << uint(bitIdx))) > 0 { - return 64*elemIdx + bitIdx, true - } - } - PanicSanity("should not happen") - } - } else { - // Special case for last elem, to ignore straggler bits - elemBits := bA.Bits % 64 - if elemBits == 0 { - elemBits = 64 - } - randBitStart := RandIntn(elemBits) - for j := 0; j < elemBits; j++ { - bitIdx := ((j + randBitStart) % elemBits) - if (bA.Elems[elemIdx] & (uint64(1) << uint(bitIdx))) > 0 { - return 64*elemIdx + bitIdx, true - } + + return trueIndices[RandIntn(len(trueIndices))], true +} + +func (bA *BitArray) getTrueIndices() []int { + trueIndices := make([]int, 0, bA.Bits) + curBit := 0 + numElems := len(bA.Elems) + // set all true indices + for i := 0; i < numElems-1; i++ { + elem := bA.Elems[i] + if elem == 0 { + curBit += 64 + continue + } + for j := 0; j < 64; j++ { + if (elem & (uint64(1) << uint64(j))) > 0 { + trueIndices = append(trueIndices, curBit) } + curBit++ + } + } + // handle last element + lastElem := bA.Elems[numElems-1] + numFinalBits := bA.Bits - curBit + for i := 0; i < numFinalBits; i++ { + if (lastElem & (uint64(1) << uint64(i))) > 0 { + trueIndices = append(trueIndices, curBit) } + curBit++ } - return 0, false + return trueIndices } // String returns a string representation of BitArray: BA{}, diff --git a/libs/common/bit_array_test.go b/libs/common/bit_array_test.go index c697ba5de86..09ec8af2548 100644 --- a/libs/common/bit_array_test.go +++ b/libs/common/bit_array_test.go @@ -3,6 +3,7 @@ package common import ( "bytes" "encoding/json" + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -74,73 +75,61 @@ func TestOr(t *testing.T) { } } -func TestSub1(t *testing.T) { - - bA1, _ := randBitArray(31) - bA2, _ := randBitArray(51) - bA3 := bA1.Sub(bA2) - - bNil := (*BitArray)(nil) - require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) - require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) - require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) - - if bA3.Bits != bA1.Bits { - t.Error("Expected bA1 bits") - } - if len(bA3.Elems) != len(bA1.Elems) { - t.Error("Expected bA1 elems length") - } - for i := 0; i < bA3.Bits; i++ { - expected := bA1.GetIndex(i) - if bA2.GetIndex(i) { - expected = false - } - if bA3.GetIndex(i) != expected { - t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) - } +func TestSub(t *testing.T) { + testCases := []struct { + initBA string + subtractingBA string + expectedBA string + }{ + {`null`, `null`, `null`}, + {`"x"`, `null`, `null`}, + {`null`, `"x"`, `null`}, + {`"x"`, `"x"`, `"_"`}, + {`"xxxxxx"`, `"x_x_x_"`, `"_x_x_x"`}, + {`"x_x_x_"`, `"xxxxxx"`, `"______"`}, + {`"xxxxxx"`, `"x_x_x_xxxx"`, `"_x_x_x"`}, + {`"x_x_x_xxxx"`, `"xxxxxx"`, `"______xxxx"`}, + {`"xxxxxxxxxx"`, `"x_x_x_"`, `"_x_x_xxxxx"`}, + {`"x_x_x_"`, `"xxxxxxxxxx"`, `"______"`}, } -} - -func TestSub2(t *testing.T) { - - bA1, _ := randBitArray(51) - bA2, _ := randBitArray(31) - bA3 := bA1.Sub(bA2) + for _, tc := range testCases { + var bA *BitArray + err := json.Unmarshal([]byte(tc.initBA), &bA) + require.Nil(t, err) - bNil := (*BitArray)(nil) - require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) - require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) - require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) + var o *BitArray + err = json.Unmarshal([]byte(tc.subtractingBA), &o) + require.Nil(t, err) - if bA3.Bits != bA1.Bits { - t.Error("Expected bA1 bits") - } - if len(bA3.Elems) != len(bA1.Elems) { - t.Error("Expected bA1 elems length") - } - for i := 0; i < bA3.Bits; i++ { - expected := bA1.GetIndex(i) - if i < bA2.Bits && bA2.GetIndex(i) { - expected = false - } - if bA3.GetIndex(i) != expected { - t.Error("Wrong bit from bA3") - } + got, _ := json.Marshal(bA.Sub(o)) + require.Equal(t, tc.expectedBA, string(got), "%s minus %s doesn't equal %s", tc.initBA, tc.subtractingBA, tc.expectedBA) } } func TestPickRandom(t *testing.T) { - for idx := 0; idx < 123; idx++ { - bA1 := NewBitArray(123) - bA1.SetIndex(idx, true) - index, ok := bA1.PickRandom() - if !ok { - t.Fatal("Expected to pick element but got none") - } - if index != idx { - t.Fatalf("Expected to pick element at %v but got wrong index", idx) - } + empty16Bits := "________________" + empty64Bits := empty16Bits + empty16Bits + empty16Bits + empty16Bits + testCases := []struct { + bA string + ok bool + }{ + {`null`, false}, + {`"x"`, true}, + {`"` + empty16Bits + `"`, false}, + {`"x` + empty16Bits + `"`, true}, + {`"` + empty16Bits + `x"`, true}, + {`"x` + empty16Bits + `x"`, true}, + {`"` + empty64Bits + `"`, false}, + {`"x` + empty64Bits + `"`, true}, + {`"` + empty64Bits + `x"`, true}, + {`"x` + empty64Bits + `x"`, true}, + } + for _, tc := range testCases { + var bitArr *BitArray + err := json.Unmarshal([]byte(tc.bA), &bitArr) + require.NoError(t, err) + _, ok := bitArr.PickRandom() + require.Equal(t, tc.ok, ok, "PickRandom got an unexpected result on input %s", tc.bA) } } @@ -149,7 +138,7 @@ func TestBytes(t *testing.T) { bA.SetIndex(0, true) check := func(bA *BitArray, bz []byte) { if !bytes.Equal(bA.Bytes(), bz) { - panic(Fmt("Expected %X but got %X", bz, bA.Bytes())) + panic(fmt.Sprintf("Expected %X but got %X", bz, bA.Bytes())) } } check(bA, []byte{0x01}) diff --git a/libs/common/byteslice.go b/libs/common/byteslice.go index 57b3a8a2bc6..af2d7949d37 100644 --- a/libs/common/byteslice.go +++ b/libs/common/byteslice.go @@ -1,9 +1,5 @@ package common -import ( - "bytes" -) - // Fingerprint returns the first 6 bytes of a byte slice. // If the slice is less than 6 bytes, the fingerprint // contains trailing zeroes. @@ -12,62 +8,3 @@ func Fingerprint(slice []byte) []byte { copy(fingerprint, slice) return fingerprint } - -func IsZeros(slice []byte) bool { - for _, byt := range slice { - if byt != byte(0) { - return false - } - } - return true -} - -func RightPadBytes(slice []byte, l int) []byte { - if l < len(slice) { - return slice - } - padded := make([]byte, l) - copy(padded[0:len(slice)], slice) - return padded -} - -func LeftPadBytes(slice []byte, l int) []byte { - if l < len(slice) { - return slice - } - padded := make([]byte, l) - copy(padded[l-len(slice):], slice) - return padded -} - -func TrimmedString(b []byte) string { - trimSet := string([]byte{0}) - return string(bytes.TrimLeft(b, trimSet)) - -} - -// PrefixEndBytes returns the end byteslice for a noninclusive range -// that would include all byte slices for which the input is the prefix -func PrefixEndBytes(prefix []byte) []byte { - if prefix == nil { - return nil - } - - end := make([]byte, len(prefix)) - copy(end, prefix) - finished := false - - for !finished { - if end[len(end)-1] != byte(255) { - end[len(end)-1]++ - finished = true - } else { - end = end[:len(end)-1] - if len(end) == 0 { - end = nil - finished = true - } - } - } - return end -} diff --git a/libs/common/byteslice_test.go b/libs/common/byteslice_test.go deleted file mode 100644 index 98085d12574..00000000000 --- a/libs/common/byteslice_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package common - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestPrefixEndBytes(t *testing.T) { - assert := assert.New(t) - - var testCases = []struct { - prefix []byte - expected []byte - }{ - {[]byte{byte(55), byte(255), byte(255), byte(0)}, []byte{byte(55), byte(255), byte(255), byte(1)}}, - {[]byte{byte(55), byte(255), byte(255), byte(15)}, []byte{byte(55), byte(255), byte(255), byte(16)}}, - {[]byte{byte(55), byte(200), byte(255)}, []byte{byte(55), byte(201)}}, - {[]byte{byte(55), byte(255), byte(255)}, []byte{byte(56)}}, - {[]byte{byte(255), byte(255), byte(255)}, nil}, - {nil, nil}, - } - - for _, test := range testCases { - end := PrefixEndBytes(test.prefix) - assert.Equal(test.expected, end) - } -} diff --git a/libs/common/cmap.go b/libs/common/cmap.go index c65c27d4c42..2f7720d2e12 100644 --- a/libs/common/cmap.go +++ b/libs/common/cmap.go @@ -16,58 +16,60 @@ func NewCMap() *CMap { func (cm *CMap) Set(key string, value interface{}) { cm.l.Lock() - defer cm.l.Unlock() cm.m[key] = value + cm.l.Unlock() } func (cm *CMap) Get(key string) interface{} { cm.l.Lock() - defer cm.l.Unlock() - return cm.m[key] + val := cm.m[key] + cm.l.Unlock() + return val } func (cm *CMap) Has(key string) bool { cm.l.Lock() - defer cm.l.Unlock() _, ok := cm.m[key] + cm.l.Unlock() return ok } func (cm *CMap) Delete(key string) { cm.l.Lock() - defer cm.l.Unlock() delete(cm.m, key) + cm.l.Unlock() } func (cm *CMap) Size() int { cm.l.Lock() - defer cm.l.Unlock() - return len(cm.m) + size := len(cm.m) + cm.l.Unlock() + return size } func (cm *CMap) Clear() { cm.l.Lock() - defer cm.l.Unlock() cm.m = make(map[string]interface{}) + cm.l.Unlock() } func (cm *CMap) Keys() []string { cm.l.Lock() - defer cm.l.Unlock() keys := []string{} for k := range cm.m { keys = append(keys, k) } + cm.l.Unlock() return keys } func (cm *CMap) Values() []interface{} { cm.l.Lock() - defer cm.l.Unlock() items := []interface{}{} for _, v := range cm.m { items = append(items, v) } + cm.l.Unlock() return items } diff --git a/libs/common/cmap_test.go b/libs/common/cmap_test.go index c665a7f3ed6..33d9f047754 100644 --- a/libs/common/cmap_test.go +++ b/libs/common/cmap_test.go @@ -51,3 +51,14 @@ func TestContains(t *testing.T) { assert.False(t, cmap.Has("key2")) assert.Nil(t, cmap.Get("key2")) } + +func BenchmarkCMapHas(b *testing.B) { + m := NewCMap() + for i := 0; i < 1000; i++ { + m.Set(string(i), i) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Has(string(i)) + } +} diff --git a/libs/common/colors.go b/libs/common/colors.go index 049ce7a5056..4837f97b49a 100644 --- a/libs/common/colors.go +++ b/libs/common/colors.go @@ -88,7 +88,7 @@ func ColoredBytes(data []byte, textColor, bytesColor func(...interface{}) string if 0x21 <= b && b < 0x7F { s += textColor(string(b)) } else { - s += bytesColor(Fmt("%02X", b)) + s += bytesColor(fmt.Sprintf("%02X", b)) } } return s diff --git a/libs/common/errors.go b/libs/common/errors.go index 5c31b8968dd..10e40ebd25a 100644 --- a/libs/common/errors.go +++ b/libs/common/errors.go @@ -10,13 +10,13 @@ import ( func ErrorWrap(cause interface{}, format string, args ...interface{}) Error { if causeCmnError, ok := cause.(*cmnError); ok { - msg := Fmt(format, args...) + msg := fmt.Sprintf(format, args...) return causeCmnError.Stacktrace().Trace(1, msg) } else if cause == nil { return newCmnError(FmtError{format, args}).Stacktrace() } else { // NOTE: causeCmnError is a typed nil here. - msg := Fmt(format, args...) + msg := fmt.Sprintf(format, args...) return newCmnError(cause).Stacktrace().Trace(1, msg) } } @@ -98,7 +98,7 @@ func (err *cmnError) Stacktrace() Error { // Add tracing information with msg. // Set n=0 unless wrapped with some function, then n > 0. func (err *cmnError) Trace(offset int, format string, args ...interface{}) Error { - msg := Fmt(format, args...) + msg := fmt.Sprintf(format, args...) return err.doTrace(msg, offset) } @@ -146,7 +146,7 @@ func (err *cmnError) Format(s fmt.State, verb rune) { s.Write([]byte("--= /Error =--\n")) } else { // Write msg. - s.Write([]byte(fmt.Sprintf("Error{%v}", err.data))) // TODO tick-esc? + s.Write([]byte(fmt.Sprintf("%v", err.data))) } } } @@ -221,7 +221,7 @@ func (fe FmtError) Format() string { // and some guarantee is not satisfied. // XXX DEPRECATED func PanicSanity(v interface{}) { - panic(Fmt("Panicked on a Sanity Check: %v", v)) + panic(fmt.Sprintf("Panicked on a Sanity Check: %v", v)) } // A panic here means something has gone horribly wrong, in the form of data corruption or @@ -229,18 +229,18 @@ func PanicSanity(v interface{}) { // If they do, it's indicative of a much more serious problem. // XXX DEPRECATED func PanicCrisis(v interface{}) { - panic(Fmt("Panicked on a Crisis: %v", v)) + panic(fmt.Sprintf("Panicked on a Crisis: %v", v)) } // Indicates a failure of consensus. Someone was malicious or something has // gone horribly wrong. These should really boot us into an "emergency-recover" mode // XXX DEPRECATED func PanicConsensus(v interface{}) { - panic(Fmt("Panicked on a Consensus Failure: %v", v)) + panic(fmt.Sprintf("Panicked on a Consensus Failure: %v", v)) } // For those times when we're not sure if we should panic // XXX DEPRECATED func PanicQ(v interface{}) { - panic(Fmt("Panicked questionably: %v", v)) + panic(fmt.Sprintf("Panicked questionably: %v", v)) } diff --git a/libs/common/errors_test.go b/libs/common/errors_test.go index 52c78a765b7..b85936dd5f4 100644 --- a/libs/common/errors_test.go +++ b/libs/common/errors_test.go @@ -24,7 +24,7 @@ func TestErrorPanic(t *testing.T) { var err = capturePanic() assert.Equal(t, pnk{"something"}, err.Data()) - assert.Equal(t, "Error{{something}}", fmt.Sprintf("%v", err)) + assert.Equal(t, "{something}", fmt.Sprintf("%v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), "This is the message in ErrorWrap(r, message).") assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } @@ -34,7 +34,7 @@ func TestErrorWrapSomething(t *testing.T) { var err = ErrorWrap("something", "formatter%v%v", 0, 1) assert.Equal(t, "something", err.Data()) - assert.Equal(t, "Error{something}", fmt.Sprintf("%v", err)) + assert.Equal(t, "something", fmt.Sprintf("%v", err)) assert.Regexp(t, `formatter01\n`, fmt.Sprintf("%#v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } @@ -46,7 +46,7 @@ func TestErrorWrapNothing(t *testing.T) { assert.Equal(t, FmtError{"formatter%v%v", []interface{}{0, 1}}, err.Data()) - assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Equal(t, "formatter01", fmt.Sprintf("%v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } @@ -58,7 +58,7 @@ func TestErrorNewError(t *testing.T) { assert.Equal(t, FmtError{"formatter%v%v", []interface{}{0, 1}}, err.Data()) - assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Equal(t, "formatter01", fmt.Sprintf("%v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) assert.NotContains(t, fmt.Sprintf("%#v", err), "Stack Trace") } @@ -70,7 +70,7 @@ func TestErrorNewErrorWithStacktrace(t *testing.T) { assert.Equal(t, FmtError{"formatter%v%v", []interface{}{0, 1}}, err.Data()) - assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Equal(t, "formatter01", fmt.Sprintf("%v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) assert.Contains(t, fmt.Sprintf("%#v", err), "Stack Trace:\n 0") } @@ -85,7 +85,7 @@ func TestErrorNewErrorWithTrace(t *testing.T) { assert.Equal(t, FmtError{"formatter%v%v", []interface{}{0, 1}}, err.Data()) - assert.Equal(t, "Error{formatter01}", fmt.Sprintf("%v", err)) + assert.Equal(t, "formatter01", fmt.Sprintf("%v", err)) assert.Contains(t, fmt.Sprintf("%#v", err), `Data: common.FmtError{format:"formatter%v%v", args:[]interface {}{0, 1}}`) dump := fmt.Sprintf("%#v", err) assert.NotContains(t, dump, "Stack Trace") diff --git a/libs/common/int.go b/libs/common/int.go index a8a5f1e00f1..845dc97f678 100644 --- a/libs/common/int.go +++ b/libs/common/int.go @@ -1,59 +1,5 @@ package common -import ( - "encoding/binary" - "sort" -) - -// Sort for []uint64 - -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p Uint64Slice) Sort() { sort.Sort(p) } - -func SearchUint64s(a []uint64, x uint64) int { - return sort.Search(len(a), func(i int) bool { return a[i] >= x }) -} - -func (p Uint64Slice) Search(x uint64) int { return SearchUint64s(p, x) } - -//-------------------------------------------------------------------------------- - -func PutUint64LE(dest []byte, i uint64) { - binary.LittleEndian.PutUint64(dest, i) -} - -func GetUint64LE(src []byte) uint64 { - return binary.LittleEndian.Uint64(src) -} - -func PutUint64BE(dest []byte, i uint64) { - binary.BigEndian.PutUint64(dest, i) -} - -func GetUint64BE(src []byte) uint64 { - return binary.BigEndian.Uint64(src) -} - -func PutInt64LE(dest []byte, i int64) { - binary.LittleEndian.PutUint64(dest, uint64(i)) -} - -func GetInt64LE(src []byte) int64 { - return int64(binary.LittleEndian.Uint64(src)) -} - -func PutInt64BE(dest []byte, i int64) { - binary.BigEndian.PutUint64(dest, uint64(i)) -} - -func GetInt64BE(src []byte) int64 { - return int64(binary.BigEndian.Uint64(src)) -} - // IntInSlice returns true if a is found in the list. func IntInSlice(a int, list []int) bool { for _, b := range list { diff --git a/libs/common/math.go b/libs/common/math.go index b037d1a71ef..ae91f2058df 100644 --- a/libs/common/math.go +++ b/libs/common/math.go @@ -1,47 +1,5 @@ package common -func MaxInt8(a, b int8) int8 { - if a > b { - return a - } - return b -} - -func MaxUint8(a, b uint8) uint8 { - if a > b { - return a - } - return b -} - -func MaxInt16(a, b int16) int16 { - if a > b { - return a - } - return b -} - -func MaxUint16(a, b uint16) uint16 { - if a > b { - return a - } - return b -} - -func MaxInt32(a, b int32) int32 { - if a > b { - return a - } - return b -} - -func MaxUint32(a, b uint32) uint32 { - if a > b { - return a - } - return b -} - func MaxInt64(a, b int64) int64 { if a > b { return a @@ -49,13 +7,6 @@ func MaxInt64(a, b int64) int64 { return b } -func MaxUint64(a, b uint64) uint64 { - if a > b { - return a - } - return b -} - func MaxInt(a, b int) int { if a > b { return a @@ -63,57 +14,8 @@ func MaxInt(a, b int) int { return b } -func MaxUint(a, b uint) uint { - if a > b { - return a - } - return b -} - //----------------------------------------------------------------------------- -func MinInt8(a, b int8) int8 { - if a < b { - return a - } - return b -} - -func MinUint8(a, b uint8) uint8 { - if a < b { - return a - } - return b -} - -func MinInt16(a, b int16) int16 { - if a < b { - return a - } - return b -} - -func MinUint16(a, b uint16) uint16 { - if a < b { - return a - } - return b -} - -func MinInt32(a, b int32) int32 { - if a < b { - return a - } - return b -} - -func MinUint32(a, b uint32) uint32 { - if a < b { - return a - } - return b -} - func MinInt64(a, b int64) int64 { if a < b { return a @@ -121,37 +23,9 @@ func MinInt64(a, b int64) int64 { return b } -func MinUint64(a, b uint64) uint64 { - if a < b { - return a - } - return b -} - func MinInt(a, b int) int { if a < b { return a } return b } - -func MinUint(a, b uint) uint { - if a < b { - return a - } - return b -} - -//----------------------------------------------------------------------------- - -func ExpUint64(a, b uint64) uint64 { - accum := uint64(1) - for b > 0 { - if b&1 == 1 { - accum *= a - } - a *= a - b >>= 1 - } - return accum -} diff --git a/libs/common/os.go b/libs/common/os.go index b8419764e9f..501bb564008 100644 --- a/libs/common/os.go +++ b/libs/common/os.go @@ -106,7 +106,7 @@ func ReadFile(filePath string) ([]byte, error) { func MustReadFile(filePath string) []byte { fileBytes, err := ioutil.ReadFile(filePath) if err != nil { - Exit(Fmt("MustReadFile failed: %v", err)) + Exit(fmt.Sprintf("MustReadFile failed: %v", err)) return nil } return fileBytes @@ -119,7 +119,7 @@ func WriteFile(filePath string, contents []byte, mode os.FileMode) error { func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { err := WriteFile(filePath, contents, mode) if err != nil { - Exit(Fmt("MustWriteFile failed: %v", err)) + Exit(fmt.Sprintf("MustWriteFile failed: %v", err)) } } diff --git a/libs/common/random.go b/libs/common/random.go index 4b0594d0e58..2de65945c58 100644 --- a/libs/common/random.go +++ b/libs/common/random.go @@ -295,7 +295,7 @@ func (r *Rand) Perm(n int) []int { // NOTE: This relies on the os's random number generator. // For real security, we should salt that with some seed. -// See github.com/tendermint/go-crypto for a more secure reader. +// See github.com/tendermint/tendermint/crypto for a more secure reader. func cRandBytes(numBytes int) []byte { b := make([]byte, numBytes) _, err := crand.Read(b) diff --git a/libs/common/service.go b/libs/common/service.go index b6f166e77cf..96a5e632af0 100644 --- a/libs/common/service.go +++ b/libs/common/service.go @@ -9,8 +9,15 @@ import ( ) var ( + // ErrAlreadyStarted is returned when somebody tries to start an already + // running service. ErrAlreadyStarted = errors.New("already started") + // ErrAlreadyStopped is returned when somebody tries to stop an already + // stopped service (without resetting it). ErrAlreadyStopped = errors.New("already stopped") + // ErrNotStarted is returned when somebody tries to stop a not running + // service. + ErrNotStarted = errors.New("not started") ) // Service defines a service that can be started, stopped, and reset. @@ -123,10 +130,12 @@ func (bs *BaseService) SetLogger(l log.Logger) { func (bs *BaseService) Start() error { if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { if atomic.LoadUint32(&bs.stopped) == 1 { - bs.Logger.Error(Fmt("Not starting %v -- already stopped", bs.name), "impl", bs.impl) + bs.Logger.Error(fmt.Sprintf("Not starting %v -- already stopped", bs.name), "impl", bs.impl) + // revert flag + atomic.StoreUint32(&bs.started, 0) return ErrAlreadyStopped } - bs.Logger.Info(Fmt("Starting %v", bs.name), "impl", bs.impl) + bs.Logger.Info(fmt.Sprintf("Starting %v", bs.name), "impl", bs.impl) err := bs.impl.OnStart() if err != nil { // revert flag @@ -135,7 +144,7 @@ func (bs *BaseService) Start() error { } return nil } - bs.Logger.Debug(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl) + bs.Logger.Debug(fmt.Sprintf("Not starting %v -- already started", bs.name), "impl", bs.impl) return ErrAlreadyStarted } @@ -148,12 +157,18 @@ func (bs *BaseService) OnStart() error { return nil } // channel. An error will be returned if the service is already stopped. func (bs *BaseService) Stop() error { if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { - bs.Logger.Info(Fmt("Stopping %v", bs.name), "impl", bs.impl) + if atomic.LoadUint32(&bs.started) == 0 { + bs.Logger.Error(fmt.Sprintf("Not stopping %v -- have not been started yet", bs.name), "impl", bs.impl) + // revert flag + atomic.StoreUint32(&bs.stopped, 0) + return ErrNotStarted + } + bs.Logger.Info(fmt.Sprintf("Stopping %v", bs.name), "impl", bs.impl) bs.impl.OnStop() close(bs.quit) return nil } - bs.Logger.Debug(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) + bs.Logger.Debug(fmt.Sprintf("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) return ErrAlreadyStopped } @@ -166,7 +181,7 @@ func (bs *BaseService) OnStop() {} // will be returned if the service is running. func (bs *BaseService) Reset() error { if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { - bs.Logger.Debug(Fmt("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) + bs.Logger.Debug(fmt.Sprintf("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) return fmt.Errorf("can't reset running %s", bs.name) } diff --git a/libs/common/string.go b/libs/common/string.go index fac1be6c959..e125043d160 100644 --- a/libs/common/string.go +++ b/libs/common/string.go @@ -1,36 +1,10 @@ package common import ( - "encoding/hex" "fmt" "strings" ) -// Like fmt.Sprintf, but skips formatting if args are empty. -var Fmt = func(format string, a ...interface{}) string { - if len(a) == 0 { - return format - } - return fmt.Sprintf(format, a...) -} - -// IsHex returns true for non-empty hex-string prefixed with "0x" -func IsHex(s string) bool { - if len(s) > 2 && strings.EqualFold(s[:2], "0x") { - _, err := hex.DecodeString(s[2:]) - return err == nil - } - return false -} - -// StripHex returns hex string without leading "0x" -func StripHex(s string) string { - if IsHex(s) { - return s[2:] - } - return s -} - // StringInSlice returns true if a is found the list. func StringInSlice(a string, list []string) bool { for _, b := range list { diff --git a/libs/common/string_test.go b/libs/common/string_test.go index 5d1b68febc3..5e7ae98ccdb 100644 --- a/libs/common/string_test.go +++ b/libs/common/string_test.go @@ -13,49 +13,12 @@ func TestStringInSlice(t *testing.T) { assert.False(t, StringInSlice("", []string{})) } -func TestIsHex(t *testing.T) { - notHex := []string{ - "", " ", "a", "x", "0", "0x", "0X", "0x ", "0X ", "0X a", - "0xf ", "0x f", "0xp", "0x-", - "0xf", "0XBED", "0xF", "0xbed", // Odd lengths - } - for _, v := range notHex { - assert.False(t, IsHex(v), "%q is not hex", v) - } - hex := []string{ - "0x00", "0x0a", "0x0F", "0xFFFFFF", "0Xdeadbeef", "0x0BED", - "0X12", "0X0A", - } - for _, v := range hex { - assert.True(t, IsHex(v), "%q is hex", v) - } -} - -func TestSplitAndTrim(t *testing.T) { - testCases := []struct { - s string - sep string - cutset string - expected []string - }{ - {"a,b,c", ",", " ", []string{"a", "b", "c"}}, - {" a , b , c ", ",", " ", []string{"a", "b", "c"}}, - {" a, b, c ", ",", " ", []string{"a", "b", "c"}}, - {" , ", ",", " ", []string{"", ""}}, - {" ", ",", " ", []string{""}}, - } - - for _, tc := range testCases { - assert.Equal(t, tc.expected, SplitAndTrim(tc.s, tc.sep, tc.cutset), "%s", tc.s) - } -} - func TestIsASCIIText(t *testing.T) { notASCIIText := []string{ "", "\xC2", "\xC2\xA2", "\xFF", "\x80", "\xF0", "\n", "\t", } for _, v := range notASCIIText { - assert.False(t, IsHex(v), "%q is not ascii-text", v) + assert.False(t, IsASCIIText(v), "%q is not ascii-text", v) } asciiText := []string{ " ", ".", "x", "$", "_", "abcdefg;", "-", "0x00", "0", "123", diff --git a/libs/common/types.pb.go b/libs/common/types.pb.go index 6442daeb49a..716d28a0651 100644 --- a/libs/common/types.pb.go +++ b/libs/common/types.pb.go @@ -1,16 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: libs/common/types.proto -/* - Package common is a generated protocol buffer package. - - It is generated from these files: - libs/common/types.proto - - It has these top-level messages: - KVPair - KI64Pair -*/ //nolint package common @@ -36,16 +26,47 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -// Define these here for compatibility but use tmlibs/common.KVPair. +// Define these here for compatibility but use libs/common.KVPair. type KVPair struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *KVPair) Reset() { *m = KVPair{} } -func (m *KVPair) String() string { return proto.CompactTextString(m) } -func (*KVPair) ProtoMessage() {} -func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } +func (m *KVPair) Reset() { *m = KVPair{} } +func (m *KVPair) String() string { return proto.CompactTextString(m) } +func (*KVPair) ProtoMessage() {} +func (*KVPair) Descriptor() ([]byte, []int) { + return fileDescriptor_types_611b4364a8604338, []int{0} +} +func (m *KVPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KVPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KVPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *KVPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_KVPair.Merge(dst, src) +} +func (m *KVPair) XXX_Size() int { + return m.Size() +} +func (m *KVPair) XXX_DiscardUnknown() { + xxx_messageInfo_KVPair.DiscardUnknown(m) +} + +var xxx_messageInfo_KVPair proto.InternalMessageInfo func (m *KVPair) GetKey() []byte { if m != nil { @@ -61,16 +82,47 @@ func (m *KVPair) GetValue() []byte { return nil } -// Define these here for compatibility but use tmlibs/common.KI64Pair. +// Define these here for compatibility but use libs/common.KI64Pair. type KI64Pair struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KI64Pair) Reset() { *m = KI64Pair{} } +func (m *KI64Pair) String() string { return proto.CompactTextString(m) } +func (*KI64Pair) ProtoMessage() {} +func (*KI64Pair) Descriptor() ([]byte, []int) { + return fileDescriptor_types_611b4364a8604338, []int{1} +} +func (m *KI64Pair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KI64Pair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KI64Pair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *KI64Pair) XXX_Merge(src proto.Message) { + xxx_messageInfo_KI64Pair.Merge(dst, src) +} +func (m *KI64Pair) XXX_Size() int { + return m.Size() +} +func (m *KI64Pair) XXX_DiscardUnknown() { + xxx_messageInfo_KI64Pair.DiscardUnknown(m) } -func (m *KI64Pair) Reset() { *m = KI64Pair{} } -func (m *KI64Pair) String() string { return proto.CompactTextString(m) } -func (*KI64Pair) ProtoMessage() {} -func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } +var xxx_messageInfo_KI64Pair proto.InternalMessageInfo func (m *KI64Pair) GetKey() []byte { if m != nil { @@ -117,6 +169,9 @@ func (this *KVPair) Equal(that interface{}) bool { if !bytes.Equal(this.Value, that1.Value) { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *KI64Pair) Equal(that interface{}) bool { @@ -144,6 +199,9 @@ func (this *KI64Pair) Equal(that interface{}) bool { if this.Value != that1.Value { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (m *KVPair) Marshal() (dAtA []byte, err error) { @@ -173,6 +231,9 @@ func (m *KVPair) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -202,6 +263,9 @@ func (m *KI64Pair) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintTypes(dAtA, i, uint64(m.Value)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -227,6 +291,7 @@ func NewPopulatedKVPair(r randyTypes, easy bool) *KVPair { this.Value[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } return this } @@ -243,6 +308,7 @@ func NewPopulatedKI64Pair(r randyTypes, easy bool) *KI64Pair { this.Value *= -1 } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } return this } @@ -330,6 +396,9 @@ func (m *KVPair) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -343,6 +412,9 @@ func (m *KI64Pair) Size() (n int) { if m.Value != 0 { n += 1 + sovTypes(uint64(m.Value)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -462,6 +534,7 @@ func (m *KVPair) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -562,6 +635,7 @@ func (m *KI64Pair) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -676,10 +750,12 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("libs/common/types.proto", fileDescriptorTypes) } -func init() { golang_proto.RegisterFile("libs/common/types.proto", fileDescriptorTypes) } +func init() { proto.RegisterFile("libs/common/types.proto", fileDescriptor_types_611b4364a8604338) } +func init() { + golang_proto.RegisterFile("libs/common/types.proto", fileDescriptor_types_611b4364a8604338) +} -var fileDescriptorTypes = []byte{ +var fileDescriptor_types_611b4364a8604338 = []byte{ // 174 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcf, 0xc9, 0x4c, 0x2a, 0xd6, 0x4f, 0xce, 0xcf, 0xcd, 0xcd, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, diff --git a/libs/common/typespb_test.go b/libs/common/typespb_test.go index 583c9050247..439cc1273ab 100644 --- a/libs/common/typespb_test.go +++ b/libs/common/typespb_test.go @@ -1,23 +1,14 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: libs/common/types.proto -/* -Package common is a generated protocol buffer package. - -It is generated from these files: - libs/common/types.proto - -It has these top-level messages: - KVPair - KI64Pair -*/ package common import testing "testing" -import rand "math/rand" +import math_rand "math/rand" import time "time" +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" +import github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" import proto "github.com/gogo/protobuf/proto" -import jsonpb "github.com/gogo/protobuf/jsonpb" import golang_proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" @@ -31,14 +22,14 @@ var _ = math.Inf func TestKVPairProto(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKVPair(popr, false) - dAtA, err := proto.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &KVPair{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } littlefuzz := make([]byte, len(dAtA)) @@ -56,13 +47,13 @@ func TestKVPairProto(t *testing.T) { littlefuzz = append(littlefuzz, byte(popr.Intn(256))) } // shouldn't panic - _ = proto.Unmarshal(littlefuzz, msg) + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) } } func TestKVPairMarshalTo(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKVPair(popr, false) size := p.Size() dAtA := make([]byte, size) @@ -74,7 +65,7 @@ func TestKVPairMarshalTo(t *testing.T) { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &KVPair{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } for i := range dAtA { @@ -87,14 +78,14 @@ func TestKVPairMarshalTo(t *testing.T) { func TestKI64PairProto(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKI64Pair(popr, false) - dAtA, err := proto.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &KI64Pair{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } littlefuzz := make([]byte, len(dAtA)) @@ -112,13 +103,13 @@ func TestKI64PairProto(t *testing.T) { littlefuzz = append(littlefuzz, byte(popr.Intn(256))) } // shouldn't panic - _ = proto.Unmarshal(littlefuzz, msg) + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) } } func TestKI64PairMarshalTo(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKI64Pair(popr, false) size := p.Size() dAtA := make([]byte, size) @@ -130,7 +121,7 @@ func TestKI64PairMarshalTo(t *testing.T) { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &KI64Pair{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } for i := range dAtA { @@ -143,15 +134,15 @@ func TestKI64PairMarshalTo(t *testing.T) { func TestKVPairJSON(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKVPair(popr, true) - marshaler := jsonpb.Marshaler{} + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &KVPair{} - err = jsonpb.UnmarshalString(jsondata, msg) + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -161,15 +152,15 @@ func TestKVPairJSON(t *testing.T) { } func TestKI64PairJSON(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKI64Pair(popr, true) - marshaler := jsonpb.Marshaler{} + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &KI64Pair{} - err = jsonpb.UnmarshalString(jsondata, msg) + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -179,11 +170,11 @@ func TestKI64PairJSON(t *testing.T) { } func TestKVPairProtoText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKVPair(popr, true) - dAtA := proto.MarshalTextString(p) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) msg := &KVPair{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -193,11 +184,11 @@ func TestKVPairProtoText(t *testing.T) { func TestKVPairProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKVPair(popr, true) - dAtA := proto.CompactTextString(p) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) msg := &KVPair{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -207,11 +198,11 @@ func TestKVPairProtoCompactText(t *testing.T) { func TestKI64PairProtoText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKI64Pair(popr, true) - dAtA := proto.MarshalTextString(p) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) msg := &KI64Pair{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -221,11 +212,11 @@ func TestKI64PairProtoText(t *testing.T) { func TestKI64PairProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKI64Pair(popr, true) - dAtA := proto.CompactTextString(p) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) msg := &KI64Pair{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -235,10 +226,10 @@ func TestKI64PairProtoCompactText(t *testing.T) { func TestKVPairSize(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKVPair(popr, true) - size2 := proto.Size(p) - dAtA, err := proto.Marshal(p) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -249,7 +240,7 @@ func TestKVPairSize(t *testing.T) { if size2 != size { t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) } - size3 := proto.Size(p) + size3 := github_com_gogo_protobuf_proto.Size(p) if size3 != size { t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) } @@ -257,10 +248,10 @@ func TestKVPairSize(t *testing.T) { func TestKI64PairSize(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKI64Pair(popr, true) - size2 := proto.Size(p) - dAtA, err := proto.Marshal(p) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -271,7 +262,7 @@ func TestKI64PairSize(t *testing.T) { if size2 != size { t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) } - size3 := proto.Size(p) + size3 := github_com_gogo_protobuf_proto.Size(p) if size3 != size { t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) } diff --git a/libs/common/word.go b/libs/common/word.go deleted file mode 100644 index a5b841f553f..00000000000 --- a/libs/common/word.go +++ /dev/null @@ -1,90 +0,0 @@ -package common - -import ( - "bytes" - "sort" -) - -var ( - Zero256 = Word256{0} - One256 = Word256{1} -) - -type Word256 [32]byte - -func (w Word256) String() string { return string(w[:]) } -func (w Word256) TrimmedString() string { return TrimmedString(w.Bytes()) } -func (w Word256) Copy() Word256 { return w } -func (w Word256) Bytes() []byte { return w[:] } // copied. -func (w Word256) Prefix(n int) []byte { return w[:n] } -func (w Word256) Postfix(n int) []byte { return w[32-n:] } -func (w Word256) IsZero() bool { - accum := byte(0) - for _, byt := range w { - accum |= byt - } - return accum == 0 -} -func (w Word256) Compare(other Word256) int { - return bytes.Compare(w[:], other[:]) -} - -func Uint64ToWord256(i uint64) Word256 { - buf := [8]byte{} - PutUint64BE(buf[:], i) - return LeftPadWord256(buf[:]) -} - -func Int64ToWord256(i int64) Word256 { - buf := [8]byte{} - PutInt64BE(buf[:], i) - return LeftPadWord256(buf[:]) -} - -func RightPadWord256(bz []byte) (word Word256) { - copy(word[:], bz) - return -} - -func LeftPadWord256(bz []byte) (word Word256) { - copy(word[32-len(bz):], bz) - return -} - -func Uint64FromWord256(word Word256) uint64 { - buf := word.Postfix(8) - return GetUint64BE(buf) -} - -func Int64FromWord256(word Word256) int64 { - buf := word.Postfix(8) - return GetInt64BE(buf) -} - -//------------------------------------- - -type Tuple256 struct { - First Word256 - Second Word256 -} - -func (tuple Tuple256) Compare(other Tuple256) int { - firstCompare := tuple.First.Compare(other.First) - if firstCompare == 0 { - return tuple.Second.Compare(other.Second) - } - return firstCompare -} - -func Tuple256Split(t Tuple256) (Word256, Word256) { - return t.First, t.Second -} - -type Tuple256Slice []Tuple256 - -func (p Tuple256Slice) Len() int { return len(p) } -func (p Tuple256Slice) Less(i, j int) bool { - return p[i].Compare(p[j]) < 0 -} -func (p Tuple256Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p Tuple256Slice) Sort() { sort.Sort(p) } diff --git a/libs/db/LICENSE.md b/libs/db/LICENSE.md deleted file mode 100644 index ab8da59d848..00000000000 --- a/libs/db/LICENSE.md +++ /dev/null @@ -1,3 +0,0 @@ -Tendermint Go-DB Copyright (C) 2015 All in Bits, Inc - -Released under the Apache2.0 license diff --git a/libs/db/README.md b/libs/db/README.md deleted file mode 100644 index ca5ab33f9a7..00000000000 --- a/libs/db/README.md +++ /dev/null @@ -1 +0,0 @@ -TODO: syndtr/goleveldb should be replaced with actual LevelDB instance diff --git a/libs/db/backend_test.go b/libs/db/backend_test.go index b31b4d74ad0..2aebde1c6b2 100644 --- a/libs/db/backend_test.go +++ b/libs/db/backend_test.go @@ -13,7 +13,10 @@ import ( ) func cleanupDBDir(dir, name string) { - os.RemoveAll(filepath.Join(dir, name) + ".db") + err := os.RemoveAll(filepath.Join(dir, name) + ".db") + if err != nil { + panic(err) + } } func testBackendGetSetDelete(t *testing.T, backend DBBackendType) { @@ -21,6 +24,7 @@ func testBackendGetSetDelete(t *testing.T, backend DBBackendType) { dirname, err := ioutil.TempDir("", fmt.Sprintf("test_backend_%s_", backend)) require.Nil(t, err) db := NewDB("testdb", backend, dirname) + defer cleanupDBDir(dirname, "testdb") // A nonexistent key should return nil, even if the key is empty require.Nil(t, db.Get([]byte(""))) @@ -54,10 +58,11 @@ func TestBackendsGetSetDelete(t *testing.T) { } func withDB(t *testing.T, creator dbCreator, fn func(DB)) { - name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db, err := creator(name, "") - defer cleanupDBDir("", name) - assert.Nil(t, err) + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) + dir := os.TempDir() + db, err := creator(name, dir) + require.Nil(t, err) + defer cleanupDBDir(dir, name) fn(db) db.Close() } @@ -143,7 +148,7 @@ func TestBackendsNilKeys(t *testing.T) { } func TestGoLevelDBBackend(t *testing.T) { - name := cmn.Fmt("test_%x", cmn.RandStr(12)) + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) db := NewDB(name, GoLevelDBBackend, "") defer cleanupDBDir("", name) @@ -160,9 +165,10 @@ func TestDBIterator(t *testing.T) { } func testDBIterator(t *testing.T, backend DBBackendType) { - name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db := NewDB(name, backend, "") - defer cleanupDBDir("", name) + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) + dir := os.TempDir() + db := NewDB(name, backend, dir) + defer cleanupDBDir(dir, name) for i := 0; i < 10; i++ { if i != 6 { // but skip 6. diff --git a/libs/db/c_level_db_test.go b/libs/db/c_level_db_test.go index 2d30500dd07..eab3dfc41e0 100644 --- a/libs/db/c_level_db_test.go +++ b/libs/db/c_level_db_test.go @@ -5,9 +5,11 @@ package db import ( "bytes" "fmt" + "os" "testing" "github.com/stretchr/testify/assert" + cmn "github.com/tendermint/tendermint/libs/common" ) @@ -19,7 +21,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) { for i := 0; i < int(numItems); i++ { internal[int64(i)] = int64(0) } - db, err := NewCLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "") + db, err := NewCLevelDB(fmt.Sprintf("test_%x", cmn.RandStr(12)), "") if err != nil { b.Fatal(err.Error()) return @@ -32,7 +34,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) { // Write something { idx := (int64(cmn.RandInt()) % numItems) - internal[idx] += 1 + internal[idx]++ val := internal[idx] idxBytes := int642Bytes(int64(idx)) valBytes := int642Bytes(int64(val)) @@ -87,9 +89,12 @@ func bytes2Int64(buf []byte) int64 { */ func TestCLevelDBBackend(t *testing.T) { - name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db := NewDB(name, LevelDBBackend, "") - defer cleanupDBDir("", name) + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) + // Can't use "" (current directory) or "./" here because levigo.Open returns: + // "Error initializing DB: IO error: test_XXX.db: Invalid argument" + dir := os.TempDir() + db := NewDB(name, LevelDBBackend, dir) + defer cleanupDBDir(dir, name) _, ok := db.(*CLevelDB) assert.True(t, ok) diff --git a/libs/db/common_test.go b/libs/db/common_test.go index 68420cd2b98..1e27a7cac88 100644 --- a/libs/db/common_test.go +++ b/libs/db/common_test.go @@ -57,14 +57,13 @@ func checkKeyPanics(t *testing.T, itr Iterator) { } func checkValuePanics(t *testing.T, itr Iterator) { - assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't") + assert.Panics(t, func() { itr.Value() }, "checkValuePanics expected panic but didn't") } -func newTempDB(t *testing.T, backend DBBackendType) (db DB) { +func newTempDB(t *testing.T, backend DBBackendType) (db DB, dbDir string) { dirname, err := ioutil.TempDir("", "db_common_test") require.Nil(t, err) - db = NewDB("testdb", backend, dirname) - return db + return NewDB("testdb", backend, dirname), dirname } //---------------------------------------- diff --git a/libs/db/db.go b/libs/db/db.go index 86993766008..8a3975a849a 100644 --- a/libs/db/db.go +++ b/libs/db/db.go @@ -1,6 +1,9 @@ package db -import "fmt" +import ( + "fmt" + "strings" +) //---------------------------------------- // Main entry @@ -27,8 +30,23 @@ func registerDBCreator(backend DBBackendType, creator dbCreator, force bool) { backends[backend] = creator } +// NewDB creates a new database of type backend with the given name. +// NOTE: function panics if: +// - backend is unknown (not registered) +// - creator function, provided during registration, returns error func NewDB(name string, backend DBBackendType, dir string) DB { - db, err := backends[backend](name, dir) + dbCreator, ok := backends[backend] + if !ok { + keys := make([]string, len(backends)) + i := 0 + for k := range backends { + keys[i] = string(k) + i++ + } + panic(fmt.Sprintf("Unknown db_backend %s, expected either %s", backend, strings.Join(keys, " or "))) + } + + db, err := dbCreator(name, dir) if err != nil { panic(fmt.Sprintf("Error initializing DB: %v", err)) } diff --git a/libs/db/db_test.go b/libs/db/db_test.go index a5690101673..ffa7bb6aadb 100644 --- a/libs/db/db_test.go +++ b/libs/db/db_test.go @@ -2,6 +2,7 @@ package db import ( "fmt" + "os" "testing" "github.com/stretchr/testify/assert" @@ -10,7 +11,9 @@ import ( func TestDBIteratorSingleKey(t *testing.T) { for backend := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) + db, dir := newTempDB(t, backend) + defer os.RemoveAll(dir) + db.SetSync(bz("1"), bz("value_1")) itr := db.Iterator(nil, nil) @@ -28,7 +31,9 @@ func TestDBIteratorSingleKey(t *testing.T) { func TestDBIteratorTwoKeys(t *testing.T) { for backend := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) + db, dir := newTempDB(t, backend) + defer os.RemoveAll(dir) + db.SetSync(bz("1"), bz("value_1")) db.SetSync(bz("2"), bz("value_1")) @@ -54,7 +59,8 @@ func TestDBIteratorTwoKeys(t *testing.T) { func TestDBIteratorMany(t *testing.T) { for backend := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) + db, dir := newTempDB(t, backend) + defer os.RemoveAll(dir) keys := make([][]byte, 100) for i := 0; i < 100; i++ { @@ -78,7 +84,9 @@ func TestDBIteratorMany(t *testing.T) { func TestDBIteratorEmpty(t *testing.T) { for backend := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) + db, dir := newTempDB(t, backend) + defer os.RemoveAll(dir) + itr := db.Iterator(nil, nil) checkInvalid(t, itr) @@ -89,7 +97,9 @@ func TestDBIteratorEmpty(t *testing.T) { func TestDBIteratorEmptyBeginAfter(t *testing.T) { for backend := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) + db, dir := newTempDB(t, backend) + defer os.RemoveAll(dir) + itr := db.Iterator(bz("1"), nil) checkInvalid(t, itr) @@ -100,7 +110,9 @@ func TestDBIteratorEmptyBeginAfter(t *testing.T) { func TestDBIteratorNonemptyBeginAfter(t *testing.T) { for backend := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) + db, dir := newTempDB(t, backend) + defer os.RemoveAll(dir) + db.SetSync(bz("1"), bz("value_1")) itr := db.Iterator(bz("2"), nil) diff --git a/libs/db/fsdb.go b/libs/db/fsdb.go index fc861deccba..92c059d42e5 100644 --- a/libs/db/fsdb.go +++ b/libs/db/fsdb.go @@ -10,7 +10,9 @@ import ( "sync" "github.com/pkg/errors" + cmn "github.com/tendermint/tendermint/libs/common" + tmerrors "github.com/tendermint/tendermint/libs/errors" ) const ( @@ -205,6 +207,13 @@ func write(path string, d []byte) error { return err } defer f.Close() + fInfo, err := f.Stat() + if err != nil { + return err + } + if fInfo.Mode() != keyPerm { + return tmerrors.NewErrPermissionsChanged(f.Name(), keyPerm, fInfo.Mode()) + } _, err = f.Write(d) if err != nil { return err diff --git a/libs/db/go_level_db.go b/libs/db/go_level_db.go index 349e447b2bf..8a48879218c 100644 --- a/libs/db/go_level_db.go +++ b/libs/db/go_level_db.go @@ -28,8 +28,12 @@ type GoLevelDB struct { } func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { + return NewGoLevelDBWithOpts(name, dir, nil) +} + +func NewGoLevelDBWithOpts(name string, dir string, o *opt.Options) (*GoLevelDB, error) { dbPath := filepath.Join(dir, name+".db") - db, err := leveldb.OpenFile(dbPath, nil) + db, err := leveldb.OpenFile(dbPath, o) if err != nil { return nil, err } diff --git a/libs/db/go_level_db_test.go b/libs/db/go_level_db_test.go index 47be216a652..c24eec3c8bb 100644 --- a/libs/db/go_level_db_test.go +++ b/libs/db/go_level_db_test.go @@ -4,11 +4,34 @@ import ( "bytes" "encoding/binary" "fmt" + "os" "testing" + "github.com/syndtr/goleveldb/leveldb/opt" + + "github.com/stretchr/testify/require" cmn "github.com/tendermint/tendermint/libs/common" ) +func TestNewGoLevelDB(t *testing.T) { + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) + // Test write locks + db, err := NewGoLevelDB(name, "") + require.Nil(t, err) + defer os.RemoveAll("./" + name + ".db") + _, err = NewGoLevelDB(name, "") + require.NotNil(t, err) + db.Close() // Close the db to release the lock + + // Open the db twice in a row to test read-only locks + ro1, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true}) + defer ro1.Close() + require.Nil(t, err) + ro2, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true}) + defer ro2.Close() + require.Nil(t, err) +} + func BenchmarkRandomReadsWrites(b *testing.B) { b.StopTimer() @@ -17,7 +40,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) { for i := 0; i < int(numItems); i++ { internal[int64(i)] = int64(0) } - db, err := NewGoLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "") + db, err := NewGoLevelDB(fmt.Sprintf("test_%x", cmn.RandStr(12)), "") if err != nil { b.Fatal(err.Error()) return diff --git a/libs/db/prefix_db.go b/libs/db/prefix_db.go index 5bb53ebd9d3..9dc4ee97d43 100644 --- a/libs/db/prefix_db.go +++ b/libs/db/prefix_db.go @@ -265,6 +265,8 @@ func (pb prefixBatch) WriteSync() { //---------------------------------------- // prefixIterator +var _ Iterator = (*prefixIterator)(nil) + // Strips prefix while iterating from Iterator. type prefixIterator struct { prefix []byte @@ -274,9 +276,9 @@ type prefixIterator struct { valid bool } -func newPrefixIterator(prefix, start, end []byte, source Iterator) prefixIterator { +func newPrefixIterator(prefix, start, end []byte, source Iterator) *prefixIterator { if !source.Valid() || !bytes.HasPrefix(source.Key(), prefix) { - return prefixIterator{ + return &prefixIterator{ prefix: prefix, start: start, end: end, @@ -284,7 +286,7 @@ func newPrefixIterator(prefix, start, end []byte, source Iterator) prefixIterato valid: false, } } else { - return prefixIterator{ + return &prefixIterator{ prefix: prefix, start: start, end: end, @@ -294,15 +296,15 @@ func newPrefixIterator(prefix, start, end []byte, source Iterator) prefixIterato } } -func (itr prefixIterator) Domain() (start []byte, end []byte) { +func (itr *prefixIterator) Domain() (start []byte, end []byte) { return itr.start, itr.end } -func (itr prefixIterator) Valid() bool { +func (itr *prefixIterator) Valid() bool { return itr.valid && itr.source.Valid() } -func (itr prefixIterator) Next() { +func (itr *prefixIterator) Next() { if !itr.valid { panic("prefixIterator invalid, cannot call Next()") } @@ -314,21 +316,21 @@ func (itr prefixIterator) Next() { } } -func (itr prefixIterator) Key() (key []byte) { +func (itr *prefixIterator) Key() (key []byte) { if !itr.valid { panic("prefixIterator invalid, cannot call Key()") } return stripPrefix(itr.source.Key(), itr.prefix) } -func (itr prefixIterator) Value() (value []byte) { +func (itr *prefixIterator) Value() (value []byte) { if !itr.valid { panic("prefixIterator invalid, cannot call Value()") } return itr.source.Value() } -func (itr prefixIterator) Close() { +func (itr *prefixIterator) Close() { itr.source.Close() } diff --git a/libs/db/util_test.go b/libs/db/util_test.go index 44f1f9f73cd..07f9dd23eec 100644 --- a/libs/db/util_test.go +++ b/libs/db/util_test.go @@ -2,6 +2,7 @@ package db import ( "fmt" + "os" "testing" ) @@ -9,7 +10,8 @@ import ( func TestPrefixIteratorNoMatchNil(t *testing.T) { for backend := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) + db, dir := newTempDB(t, backend) + defer os.RemoveAll(dir) itr := IteratePrefix(db, []byte("2")) checkInvalid(t, itr) @@ -21,7 +23,8 @@ func TestPrefixIteratorNoMatchNil(t *testing.T) { func TestPrefixIteratorNoMatch1(t *testing.T) { for backend := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) + db, dir := newTempDB(t, backend) + defer os.RemoveAll(dir) itr := IteratePrefix(db, []byte("2")) db.SetSync(bz("1"), bz("value_1")) @@ -34,7 +37,8 @@ func TestPrefixIteratorNoMatch1(t *testing.T) { func TestPrefixIteratorNoMatch2(t *testing.T) { for backend := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) + db, dir := newTempDB(t, backend) + defer os.RemoveAll(dir) db.SetSync(bz("3"), bz("value_3")) itr := IteratePrefix(db, []byte("4")) @@ -47,7 +51,8 @@ func TestPrefixIteratorNoMatch2(t *testing.T) { func TestPrefixIteratorMatch1(t *testing.T) { for backend := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) + db, dir := newTempDB(t, backend) + defer os.RemoveAll(dir) db.SetSync(bz("2"), bz("value_2")) itr := IteratePrefix(db, bz("2")) @@ -65,7 +70,8 @@ func TestPrefixIteratorMatch1(t *testing.T) { func TestPrefixIteratorMatches1N(t *testing.T) { for backend := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) + db, dir := newTempDB(t, backend) + defer os.RemoveAll(dir) // prefixed db.SetSync(bz("a/1"), bz("value_1")) diff --git a/libs/errors/errors.go b/libs/errors/errors.go new file mode 100644 index 00000000000..ae5d9439270 --- /dev/null +++ b/libs/errors/errors.go @@ -0,0 +1,26 @@ +// Package errors contains errors that are thrown across packages. +package errors + +import ( + "fmt" + "os" +) + +// ErrPermissionsChanged occurs if the file permission have changed since the file was created. +type ErrPermissionsChanged struct { + name string + got, want os.FileMode +} + +func NewErrPermissionsChanged(name string, got, want os.FileMode) *ErrPermissionsChanged { + return &ErrPermissionsChanged{name: name, got: got, want: want} +} + +func (e ErrPermissionsChanged) Error() string { + return fmt.Sprintf( + "file: [%v]\nexpected file permissions: %v, got: %v", + e.name, + e.want, + e.got, + ) +} diff --git a/libs/events/events.go b/libs/events/events.go index 9c7f0fd0525..fb90bbea673 100644 --- a/libs/events/events.go +++ b/libs/events/events.go @@ -1,36 +1,52 @@ -/* -Pub-Sub in go with event caching -*/ +// Package events - Pub-Sub in go with event caching package events import ( + "fmt" "sync" cmn "github.com/tendermint/tendermint/libs/common" ) -// Generic event data can be typed and registered with tendermint/go-amino -// via concrete implementation of this interface -type EventData interface { - //AssertIsEventData() +// ErrListenerWasRemoved is returned by AddEvent if the listener was removed. +type ErrListenerWasRemoved struct { + listenerID string } -// reactors and other modules should export -// this interface to become eventable +// Error implements the error interface. +func (e ErrListenerWasRemoved) Error() string { + return fmt.Sprintf("listener #%s was removed", e.listenerID) +} + +// EventData is a generic event data can be typed and registered with +// tendermint/go-amino via concrete implementation of this interface. +type EventData interface{} + +// Eventable is the interface reactors and other modules must export to become +// eventable. type Eventable interface { SetEventSwitch(evsw EventSwitch) } -// an event switch or cache implements fireable +// Fireable is the interface that wraps the FireEvent method. +// +// FireEvent fires an event with the given name and data. type Fireable interface { FireEvent(event string, data EventData) } +// EventSwitch is the interface for synchronous pubsub, where listeners +// subscribe to certain events and, when an event is fired (see Fireable), +// notified via a callback function. +// +// Listeners are added by calling AddListenerForEvent function. +// They can be removed by calling either RemoveListenerForEvent or +// RemoveListener (for all events). type EventSwitch interface { cmn.Service Fireable - AddListenerForEvent(listenerID, event string, cb EventCallback) + AddListenerForEvent(listenerID, event string, cb EventCallback) error RemoveListenerForEvent(event string, listenerID string) RemoveListener(listenerID string) } @@ -58,8 +74,8 @@ func (evsw *eventSwitch) OnStart() error { func (evsw *eventSwitch) OnStop() {} -func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventCallback) { - // Get/Create eventCell and listener +func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventCallback) error { + // Get/Create eventCell and listener. evsw.mtx.Lock() eventCell := evsw.eventCells[event] if eventCell == nil { @@ -73,13 +89,17 @@ func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventC } evsw.mtx.Unlock() - // Add event and listener + // Add event and listener. + if err := listener.AddEvent(event); err != nil { + return err + } eventCell.AddListener(listenerID, cb) - listener.AddEvent(event) + + return nil } func (evsw *eventSwitch) RemoveListener(listenerID string) { - // Get and remove listener + // Get and remove listener. evsw.mtx.RLock() listener := evsw.listeners[listenerID] evsw.mtx.RUnlock() @@ -168,10 +188,15 @@ func (cell *eventCell) RemoveListener(listenerID string) int { func (cell *eventCell) FireEvent(data EventData) { cell.mtx.RLock() - for _, listener := range cell.listeners { - listener(data) + var eventCallbacks []EventCallback + for _, cb := range cell.listeners { + eventCallbacks = append(eventCallbacks, cb) } cell.mtx.RUnlock() + + for _, cb := range eventCallbacks { + cb(data) + } } //----------------------------------------------------------------------------- @@ -194,27 +219,29 @@ func newEventListener(id string) *eventListener { } } -func (evl *eventListener) AddEvent(event string) { +func (evl *eventListener) AddEvent(event string) error { evl.mtx.Lock() - defer evl.mtx.Unlock() if evl.removed { - return + evl.mtx.Unlock() + return ErrListenerWasRemoved{listenerID: evl.id} } + evl.events = append(evl.events, event) + evl.mtx.Unlock() + return nil } func (evl *eventListener) GetEvents() []string { evl.mtx.RLock() - defer evl.mtx.RUnlock() - events := make([]string, len(evl.events)) copy(events, evl.events) + evl.mtx.RUnlock() return events } func (evl *eventListener) SetRemoved() { evl.mtx.Lock() - defer evl.mtx.Unlock() evl.removed = true + evl.mtx.Unlock() } diff --git a/libs/events/events_test.go b/libs/events/events_test.go index a01fbbb770a..8d87986c758 100644 --- a/libs/events/events_test.go +++ b/libs/events/events_test.go @@ -6,6 +6,8 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" ) @@ -14,12 +16,14 @@ import ( func TestAddListenerForEventFireOnce(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) + defer evsw.Stop() + messages := make(chan EventData) evsw.AddListenerForEvent("listener", "event", func(data EventData) { + // test there's no deadlock if we remove the listener inside a callback + evsw.RemoveListener("listener") messages <- data }) go evsw.FireEvent("event", "data") @@ -34,9 +38,9 @@ func TestAddListenerForEventFireOnce(t *testing.T) { func TestAddListenerForEventFireMany(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) + defer evsw.Stop() + doneSum := make(chan uint64) doneSending := make(chan uint64) numbers := make(chan uint64, 4) @@ -63,9 +67,9 @@ func TestAddListenerForEventFireMany(t *testing.T) { func TestAddListenerForDifferentEvents(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) + defer evsw.Stop() + doneSum := make(chan uint64) doneSending1 := make(chan uint64) doneSending2 := make(chan uint64) @@ -108,9 +112,9 @@ func TestAddListenerForDifferentEvents(t *testing.T) { func TestAddDifferentListenerForDifferentEvents(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) + defer evsw.Stop() + doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) doneSending1 := make(chan uint64) @@ -162,15 +166,61 @@ func TestAddDifferentListenerForDifferentEvents(t *testing.T) { } } +func TestAddAndRemoveListenerConcurrency(t *testing.T) { + var ( + stopInputEvent = false + roundCount = 2000 + ) + + evsw := NewEventSwitch() + err := evsw.Start() + require.NoError(t, err) + defer evsw.Stop() + + done1 := make(chan struct{}) + done2 := make(chan struct{}) + + // Must be executed concurrently to uncover the data race. + // 1. RemoveListener + go func() { + for i := 0; i < roundCount; i++ { + evsw.RemoveListener("listener") + } + close(done1) + }() + + // 2. AddListenerForEvent + go func() { + for i := 0; i < roundCount; i++ { + index := i + evsw.AddListenerForEvent("listener", fmt.Sprintf("event%d", index), + func(data EventData) { + t.Errorf("should not run callback for %d.\n", index) + stopInputEvent = true + }) + } + close(done2) + }() + + <-done1 + <-done2 + + evsw.RemoveListener("listener") // remove the last listener + + for i := 0; i < roundCount && !stopInputEvent; i++ { + evsw.FireEvent(fmt.Sprintf("event%d", i), uint64(1001)) + } +} + // TestAddAndRemoveListener sets up an EventSwitch, subscribes a listener to // two events, fires a thousand integers for the first event, then unsubscribes // the listener and fires a thousand integers for the second event. func TestAddAndRemoveListener(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) + defer evsw.Stop() + doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) doneSending1 := make(chan uint64) @@ -213,9 +263,9 @@ func TestAddAndRemoveListener(t *testing.T) { func TestRemoveListener(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) + defer evsw.Stop() + count := 10 sum1, sum2 := 0, 0 // add some listeners and make sure they work @@ -266,9 +316,9 @@ func TestRemoveListener(t *testing.T) { func TestRemoveListenersAsync(t *testing.T) { evsw := NewEventSwitch() err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } + require.NoError(t, err) + defer evsw.Stop() + doneSum1 := make(chan uint64) doneSum2 := make(chan uint64) doneSending1 := make(chan uint64) @@ -351,7 +401,7 @@ func TestRemoveListenersAsync(t *testing.T) { // until the receiving channel `numbers` is closed; it then sends the sum // on `doneSum` and closes that channel. Expected to be run in a go-routine. func sumReceivedNumbers(numbers, doneSum chan uint64) { - var sum uint64 = 0 + var sum uint64 for { j, more := <-numbers sum += j @@ -370,7 +420,7 @@ func sumReceivedNumbers(numbers, doneSum chan uint64) { // the test to assert all events have also been received. func fireEvents(evsw EventSwitch, event string, doneChan chan uint64, offset uint64) { - var sentSum uint64 = 0 + var sentSum uint64 for i := offset; i <= offset+uint64(999); i++ { sentSum += i evsw.FireEvent(event, i) diff --git a/libs/fail/fail.go b/libs/fail/fail.go new file mode 100644 index 00000000000..edfca13e310 --- /dev/null +++ b/libs/fail/fail.go @@ -0,0 +1,78 @@ +package fail + +import ( + "fmt" + "math/rand" + "os" + "strconv" +) + +var callIndexToFail int + +func init() { + callIndexToFailS := os.Getenv("FAIL_TEST_INDEX") + + if callIndexToFailS == "" { + callIndexToFail = -1 + } else { + var err error + callIndexToFail, err = strconv.Atoi(callIndexToFailS) + if err != nil { + callIndexToFail = -1 + } + } +} + +// Fail when FAIL_TEST_INDEX == callIndex +var ( + callIndex int //indexes Fail calls + + callRandIndex int // indexes a run of FailRand calls + callRandIndexToFail = -1 // the callRandIndex to fail on in FailRand +) + +func Fail() { + if callIndexToFail < 0 { + return + } + + if callIndex == callIndexToFail { + Exit() + } + + callIndex += 1 +} + +// FailRand should be called n successive times. +// It will fail on a random one of those calls +// n must be greater than 0 +func FailRand(n int) { + if callIndexToFail < 0 { + return + } + + if callRandIndexToFail < 0 { + // first call in the loop, pick a random index to fail at + callRandIndexToFail = rand.Intn(n) + callRandIndex = 0 + } + + if callIndex == callIndexToFail { + if callRandIndex == callRandIndexToFail { + Exit() + } + } + + callRandIndex += 1 + + if callRandIndex == n { + callIndex += 1 + } +} + +func Exit() { + fmt.Printf("*** fail-test %d ***\n", callIndex) + proc, _ := os.FindProcess(os.Getpid()) + proc.Signal(os.Interrupt) + // panic(fmt.Sprintf("*** fail-test %d ***", callIndex)) +} diff --git a/libs/log/tmfmt_logger.go b/libs/log/tmfmt_logger.go index d0397971858..247ce8fc0c8 100644 --- a/libs/log/tmfmt_logger.go +++ b/libs/log/tmfmt_logger.go @@ -84,13 +84,13 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error { // Form a custom Tendermint line // // Example: - // D[05-02|11:06:44.322] Stopping AddrBook (ignoring: already stopped) + // D[2016-05-02|11:06:44.322] Stopping AddrBook (ignoring: already stopped) // // Description: // D - first character of the level, uppercase (ASCII only) - // [05-02|11:06:44.322] - our time format (see https://golang.org/src/time/format.go) + // [2016-05-02|11:06:44.322] - our time format (see https://golang.org/src/time/format.go) // Stopping ... - message - enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().UTC().Format("01-02|15:04:05.000"), msg)) + enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().Format("2016-01-02|15:04:05.000"), msg)) if module != unknown { enc.buf.WriteString("module=" + module + " ") diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 4c0d97e2ffc..18f098d876a 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -9,6 +9,39 @@ // When some message is published, we match it with all queries. If there is a // match, this message will be pushed to all clients, subscribed to that query. // See query subpackage for our implementation. +// +// Due to the blocking send implementation, a single subscriber can freeze an +// entire server by not reading messages before it unsubscribes. To avoid such +// scenario, subscribers must either: +// +// a) make sure they continue to read from the out channel until +// Unsubscribe(All) is called +// +// s.Subscribe(ctx, sub, qry, out) +// go func() { +// for msg := range out { +// // handle msg +// // will exit automatically when out is closed by Unsubscribe(All) +// } +// }() +// s.UnsubscribeAll(ctx, sub) +// +// b) drain the out channel before calling Unsubscribe(All) +// +// s.Subscribe(ctx, sub, qry, out) +// defer func() { +// for range out { +// // drain out to make sure we don't block +// } +// s.UnsubscribeAll(ctx, sub) +// }() +// for msg := range out { +// // handle msg +// if err != nil { +// return err +// } +// } +// package pubsub import ( @@ -291,7 +324,8 @@ loop: } func (state *state) add(clientID string, q Query, ch chan<- interface{}) { - // add query if needed + + // initialize clientToChannelMap per query if needed if _, ok := state.queries[q]; !ok { state.queries[q] = make(map[string]chan<- interface{}) } diff --git a/lite/base_verifier.go b/lite/base_verifier.go new file mode 100644 index 00000000000..fcde01c0e22 --- /dev/null +++ b/lite/base_verifier.go @@ -0,0 +1,72 @@ +package lite + +import ( + "bytes" + + cmn "github.com/tendermint/tendermint/libs/common" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" +) + +var _ Verifier = (*BaseVerifier)(nil) + +// BaseVerifier lets us check the validity of SignedHeaders at height or +// later, requiring sufficient votes (> 2/3) from the given valset. +// To verify blocks produced by a blockchain with mutable validator sets, +// use the DynamicVerifier. +// TODO: Handle unbonding time. +type BaseVerifier struct { + chainID string + height int64 + valset *types.ValidatorSet +} + +// NewBaseVerifier returns a new Verifier initialized with a validator set at +// some height. +func NewBaseVerifier(chainID string, height int64, valset *types.ValidatorSet) *BaseVerifier { + if valset.IsNilOrEmpty() { + panic("NewBaseVerifier requires a valid valset") + } + return &BaseVerifier{ + chainID: chainID, + height: height, + valset: valset, + } +} + +// Implements Verifier. +func (bc *BaseVerifier) ChainID() string { + return bc.chainID +} + +// Implements Verifier. +func (bc *BaseVerifier) Verify(signedHeader types.SignedHeader) error { + + // We can't verify commits older than bc.height. + if signedHeader.Height < bc.height { + return cmn.NewError("BaseVerifier height is %v, cannot verify height %v", + bc.height, signedHeader.Height) + } + + // We can't verify with the wrong validator set. + if !bytes.Equal(signedHeader.ValidatorsHash, + bc.valset.Hash()) { + return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bc.valset.Hash()) + } + + // Do basic sanity checks. + err := signedHeader.ValidateBasic(bc.chainID) + if err != nil { + return cmn.ErrorWrap(err, "in verify") + } + + // Check commit signatures. + err = bc.valset.VerifyCommit( + bc.chainID, signedHeader.Commit.BlockID, + signedHeader.Height, signedHeader.Commit) + if err != nil { + return cmn.ErrorWrap(err, "in verify") + } + + return nil +} diff --git a/lite/base_verifier_test.go b/lite/base_verifier_test.go new file mode 100644 index 00000000000..2ef1203fbdb --- /dev/null +++ b/lite/base_verifier_test.go @@ -0,0 +1,56 @@ +package lite + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" +) + +func TestBaseCert(t *testing.T) { + assert := assert.New(t) + + keys := genPrivKeys(4) + // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! + vals := keys.ToValidators(20, 10) + // and a Verifier based on our known set + chainID := "test-static" + cert := NewBaseVerifier(chainID, 2, vals) + + cases := []struct { + keys privKeys + vals *types.ValidatorSet + height int64 + first, last int // who actually signs + proper bool // true -> expect no error + changed bool // true -> expect validator change error + }{ + // height regression + {keys, vals, 1, 0, len(keys), false, false}, + // perfect, signed by everyone + {keys, vals, 2, 0, len(keys), true, false}, + // skip little guy is okay + {keys, vals, 3, 1, len(keys), true, false}, + // but not the big guy + {keys, vals, 4, 0, len(keys) - 1, false, false}, + // Changing the power a little bit breaks the static validator. + // The sigs are enough, but the validator hash is unknown. + {keys, keys.ToValidators(20, 11), 5, 0, len(keys), false, true}, + } + + for _, tc := range cases { + sh := tc.keys.GenSignedHeader(chainID, tc.height, nil, tc.vals, tc.vals, + []byte("foo"), []byte("params"), []byte("results"), tc.first, tc.last) + err := cert.Verify(sh) + if tc.proper { + assert.Nil(err, "%+v", err) + } else { + assert.NotNil(err) + if tc.changed { + assert.True(lerr.IsErrUnexpectedValidators(err), "%+v", err) + } + } + } +} diff --git a/lite/client/main_test.go b/lite/client/main_test.go deleted file mode 100644 index 49b19436685..00000000000 --- a/lite/client/main_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package client_test - -import ( - "os" - "testing" - - "github.com/tendermint/tendermint/abci/example/kvstore" - - nm "github.com/tendermint/tendermint/node" - rpctest "github.com/tendermint/tendermint/rpc/test" -) - -var node *nm.Node - -func TestMain(m *testing.M) { - // start a tendermint node (and merkleeyes) in the background to test against - app := kvstore.NewKVStoreApplication() - node = rpctest.StartTendermint(app) - code := m.Run() - - // and shut down proper at the end - node.Stop() - node.Wait() - os.Exit(code) -} diff --git a/lite/client/provider.go b/lite/client/provider.go index 5f3d724500a..e0c0a331b29 100644 --- a/lite/client/provider.go +++ b/lite/client/provider.go @@ -1,19 +1,19 @@ /* Package client defines a provider that uses a rpcclient to get information, which is used to get new headers -and validators directly from a node. +and validators directly from a Tendermint client. */ package client import ( - "bytes" + "fmt" + log "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/lite" + lerr "github.com/tendermint/tendermint/lite/errors" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" ) // SignStatusClient combines a SignClient and StatusClient. @@ -23,119 +23,112 @@ type SignStatusClient interface { } type provider struct { - node SignStatusClient - lastHeight int64 + logger log.Logger + chainID string + client SignStatusClient } -// NewProvider can wrap any rpcclient to expose it as -// a read-only provider. -func NewProvider(node SignStatusClient) lite.Provider { - return &provider{node: node} +// NewProvider implements Provider (but not PersistentProvider). +func NewProvider(chainID string, client SignStatusClient) lite.Provider { + return &provider{ + logger: log.NewNopLogger(), + chainID: chainID, + client: client, + } } // NewHTTPProvider can connect to a tendermint json-rpc endpoint // at the given url, and uses that as a read-only provider. -func NewHTTPProvider(remote string) lite.Provider { - return &provider{ - node: rpcclient.NewHTTP(remote, "/websocket"), - } +func NewHTTPProvider(chainID, remote string) lite.Provider { + return NewProvider(chainID, rpcclient.NewHTTP(remote, "/websocket")) } -// StatusClient returns the internal node as a StatusClient -func (p *provider) StatusClient() rpcclient.StatusClient { - return p.node +// Implements Provider. +func (p *provider) SetLogger(logger log.Logger) { + logger = logger.With("module", "lite/client") + p.logger = logger } -// StoreCommit is a noop, as clients can only read from the chain... -func (p *provider) StoreCommit(_ lite.FullCommit) error { return nil } +// StatusClient returns the internal client as a StatusClient +func (p *provider) StatusClient() rpcclient.StatusClient { + return p.client +} -// GetHash gets the most recent validator and sees if it matches -// -// TODO: improve when the rpc interface supports more functionality -func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { - var fc lite.FullCommit - vals, err := p.node.Validators(nil) - // if we get no validators, or a different height, return an error - if err != nil { - return fc, err +// LatestFullCommit implements Provider. +func (p *provider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc lite.FullCommit, err error) { + if chainID != p.chainID { + err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID) + return } - p.updateHeight(vals.BlockHeight) - vhash := types.NewValidatorSet(vals.Validators).Hash() - if !bytes.Equal(hash, vhash) { - return fc, liteErr.ErrCommitNotFound() + if maxHeight != 0 && maxHeight < minHeight { + err = fmt.Errorf("need maxHeight == 0 or minHeight <= maxHeight, got min %v and max %v", + minHeight, maxHeight) + return } - return p.seedFromVals(vals) -} - -// GetByHeight gets the validator set by height -func (p *provider) GetByHeight(h int64) (fc lite.FullCommit, err error) { - commit, err := p.node.Commit(&h) + commit, err := p.fetchLatestCommit(minHeight, maxHeight) if err != nil { - return fc, err + return } - return p.seedFromCommit(commit) + fc, err = p.fillFullCommit(commit.SignedHeader) + return } -// LatestCommit returns the newest commit stored. -func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { - commit, err := p.GetLatestCommit() +// fetchLatestCommit fetches the latest commit from the client. +func (p *provider) fetchLatestCommit(minHeight int64, maxHeight int64) (*ctypes.ResultCommit, error) { + status, err := p.client.Status() if err != nil { - return fc, err + return nil, err } - return p.seedFromCommit(commit) -} - -// GetLatestCommit should return the most recent commit there is, -// which handles queries for future heights as per the semantics -// of GetByHeight. -func (p *provider) GetLatestCommit() (*ctypes.ResultCommit, error) { - status, err := p.node.Status() - if err != nil { + if status.SyncInfo.LatestBlockHeight < minHeight { + err = fmt.Errorf("provider is at %v but require minHeight=%v", + status.SyncInfo.LatestBlockHeight, minHeight) return nil, err } - return p.node.Commit(&status.SyncInfo.LatestBlockHeight) + if maxHeight == 0 { + maxHeight = status.SyncInfo.LatestBlockHeight + } else if status.SyncInfo.LatestBlockHeight < maxHeight { + maxHeight = status.SyncInfo.LatestBlockHeight + } + return p.client.Commit(&maxHeight) } -// CommitFromResult ... -func CommitFromResult(result *ctypes.ResultCommit) lite.Commit { - return (lite.Commit)(result.SignedHeader) +// Implements Provider. +func (p *provider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + return p.getValidatorSet(chainID, height) } -func (p *provider) seedFromVals(vals *ctypes.ResultValidators) (lite.FullCommit, error) { - // now get the commits and build a full commit - commit, err := p.node.Commit(&vals.BlockHeight) +func (p *provider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + if chainID != p.chainID { + err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID) + return + } + if height < 1 { + err = fmt.Errorf("expected height >= 1, got height %v", height) + return + } + res, err := p.client.Validators(&height) if err != nil { - return lite.FullCommit{}, err + // TODO pass through other types of errors. + return nil, lerr.ErrUnknownValidators(chainID, height) } - fc := lite.NewFullCommit( - CommitFromResult(commit), - types.NewValidatorSet(vals.Validators), - ) - return fc, nil + valset = types.NewValidatorSet(res.Validators) + return } -func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc lite.FullCommit, err error) { - fc.Commit = CommitFromResult(commit) +// This does no validation. +func (p *provider) fillFullCommit(signedHeader types.SignedHeader) (fc lite.FullCommit, err error) { - // now get the proper validators - vals, err := p.node.Validators(&commit.Header.Height) + // Get the validators. + valset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height) if err != nil { - return fc, err + return lite.FullCommit{}, err } - // make sure they match the commit (as we cannot enforce height) - vset := types.NewValidatorSet(vals.Validators) - if !bytes.Equal(vset.Hash(), commit.Header.ValidatorsHash) { - return fc, liteErr.ErrValidatorsChanged() + // Get the next validators. + nextValset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height+1) + if err != nil { + return lite.FullCommit{}, err } - p.updateHeight(commit.Header.Height) - fc.Validators = vset - return fc, nil -} - -func (p *provider) updateHeight(h int64) { - if h > p.lastHeight { - p.lastHeight = h - } + return lite.NewFullCommit(signedHeader, valset, nextValset), nil } diff --git a/lite/client/provider_test.go b/lite/client/provider_test.go index 94d47da3f36..d8704a52ec5 100644 --- a/lite/client/provider_test.go +++ b/lite/client/provider_test.go @@ -1,63 +1,61 @@ package client import ( + "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/abci/example/kvstore" rpcclient "github.com/tendermint/tendermint/rpc/client" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" ) +func TestMain(m *testing.M) { + app := kvstore.NewKVStoreApplication() + node := rpctest.StartTendermint(app) + + code := m.Run() + + node.Stop() + node.Wait() + os.Exit(code) +} + func TestProvider(t *testing.T) { assert, require := assert.New(t), require.New(t) cfg := rpctest.GetConfig() rpcAddr := cfg.RPC.ListenAddress - genDoc, _ := types.GenesisDocFromFile(cfg.GenesisFile()) + genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) + if err != nil { + panic(err) + } chainID := genDoc.ChainID - p := NewHTTPProvider(rpcAddr) + t.Log("chainID:", chainID) + p := NewHTTPProvider(chainID, rpcAddr) require.NotNil(t, p) // let it produce some blocks - err := rpcclient.WaitForHeight(p.(*provider).node, 6, nil) + err = rpcclient.WaitForHeight(p.(*provider).client, 6, nil) require.Nil(err) // let's get the highest block - seed, err := p.LatestCommit() + fc, err := p.LatestFullCommit(chainID, 1, 1<<63-1) require.Nil(err, "%+v", err) - sh := seed.Height() - vhash := seed.Header.ValidatorsHash + sh := fc.Height() assert.True(sh < 5000) // let's check this is valid somehow - assert.Nil(seed.ValidateBasic(chainID)) - cert := lite.NewStaticCertifier(chainID, seed.Validators) + assert.Nil(fc.ValidateFull(chainID)) // historical queries now work :) lower := sh - 5 - seed, err = p.GetByHeight(lower) + fc, err = p.LatestFullCommit(chainID, lower, lower) assert.Nil(err, "%+v", err) - assert.Equal(lower, seed.Height()) + assert.Equal(lower, fc.Height()) - // also get by hash (given the match) - seed, err = p.GetByHash(vhash) - require.Nil(err, "%+v", err) - require.Equal(vhash, seed.Header.ValidatorsHash) - err = cert.Certify(seed.Commit) - assert.Nil(err, "%+v", err) - - // get by hash fails without match - seed, err = p.GetByHash([]byte("foobar")) - assert.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - // storing the seed silently ignored - err = p.StoreCommit(seed) - assert.Nil(err, "%+v", err) } diff --git a/lite/commit.go b/lite/commit.go index 11ae6d7ffac..25efb8dc088 100644 --- a/lite/commit.go +++ b/lite/commit.go @@ -2,98 +2,86 @@ package lite import ( "bytes" - - "github.com/pkg/errors" + "errors" + "fmt" "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" ) -// Certifier checks the votes to make sure the block really is signed properly. -// Certifier must know the current set of validitors by some other means. -type Certifier interface { - Certify(check Commit) error - ChainID() string -} - -// Commit is basically the rpc /commit response, but extended -// -// This is the basepoint for proving anything on the blockchain. It contains -// a signed header. If the signatures are valid and > 2/3 of the known set, -// we can store this checkpoint and use it to prove any number of aspects of -// the system: such as txs, abci state, validator sets, etc... -type Commit types.SignedHeader - -// FullCommit is a commit and the actual validator set, -// the base info you need to update to a given point, -// assuming knowledge of some previous validator set +// FullCommit is a signed header (the block header and a commit that signs it), +// the validator set which signed the commit, and the next validator set. The +// next validator set (which is proven from the block header) allows us to +// revert to block-by-block updating of lite Verifier's latest validator set, +// even in the face of arbitrarily large power changes. type FullCommit struct { - Commit `json:"commit"` - Validators *types.ValidatorSet `json:"validator_set"` + SignedHeader types.SignedHeader `json:"signed_header"` + Validators *types.ValidatorSet `json:"validator_set"` + NextValidators *types.ValidatorSet `json:"next_validator_set"` } // NewFullCommit returns a new FullCommit. -func NewFullCommit(commit Commit, vals *types.ValidatorSet) FullCommit { +func NewFullCommit(signedHeader types.SignedHeader, valset, nextValset *types.ValidatorSet) FullCommit { return FullCommit{ - Commit: commit, - Validators: vals, + SignedHeader: signedHeader, + Validators: valset, + NextValidators: nextValset, } } -// Height returns the height of the header. -func (c Commit) Height() int64 { - if c.Header == nil { - return 0 +// Validate the components and check for consistency. +// This also checks to make sure that Validators actually +// signed the SignedHeader.Commit. +// If > 2/3 did not sign the Commit from fc.Validators, it +// is not a valid commit! +func (fc FullCommit) ValidateFull(chainID string) error { + // Ensure that Validators exists and matches the header. + if fc.Validators.Size() == 0 { + return errors.New("need FullCommit.Validators") } - return c.Header.Height -} - -// ValidatorsHash returns the hash of the validator set. -func (c Commit) ValidatorsHash() []byte { - if c.Header == nil { - return nil + if !bytes.Equal( + fc.SignedHeader.ValidatorsHash, + fc.Validators.Hash()) { + return fmt.Errorf("header has vhash %X but valset hash is %X", + fc.SignedHeader.ValidatorsHash, + fc.Validators.Hash(), + ) } - return c.Header.ValidatorsHash -} - -// ValidateBasic does basic consistency checks and makes sure the headers -// and commits are all consistent and refer to our chain. -// -// Make sure to use a Verifier to validate the signatures actually provide -// a significantly strong proof for this header's validity. -func (c Commit) ValidateBasic(chainID string) error { - // make sure the header is reasonable - if c.Header == nil { - return errors.New("Commit missing header") + // Ensure that NextValidators exists and matches the header. + if fc.NextValidators.Size() == 0 { + return errors.New("need FullCommit.NextValidators") } - if c.Header.ChainID != chainID { - return errors.Errorf("Header belongs to another chain '%s' not '%s'", - c.Header.ChainID, chainID) + if !bytes.Equal( + fc.SignedHeader.NextValidatorsHash, + fc.NextValidators.Hash()) { + return fmt.Errorf("header has next vhash %X but next valset hash is %X", + fc.SignedHeader.NextValidatorsHash, + fc.NextValidators.Hash(), + ) } - - if c.Commit == nil { - return errors.New("Commit missing signatures") + // Validate the header. + err := fc.SignedHeader.ValidateBasic(chainID) + if err != nil { + return err } + // Validate the signatures on the commit. + hdr, cmt := fc.SignedHeader.Header, fc.SignedHeader.Commit + return fc.Validators.VerifyCommit( + hdr.ChainID, cmt.BlockID, + hdr.Height, cmt) +} - // make sure the header and commit match (height and hash) - if c.Commit.Height() != c.Header.Height { - return liteErr.ErrHeightMismatch(c.Commit.Height(), c.Header.Height) - } - hhash := c.Header.Hash() - chash := c.Commit.BlockID.Hash - if !bytes.Equal(hhash, chash) { - return errors.Errorf("Commits sign block %X header is block %X", - chash, hhash) +// Height returns the height of the header. +func (fc FullCommit) Height() int64 { + if fc.SignedHeader.Header == nil { + panic("should not happen") } + return fc.SignedHeader.Height +} - // make sure the commit is reasonable - err := c.Commit.ValidateBasic() - if err != nil { - return errors.WithStack(err) +// ChainID returns the chainID of the header. +func (fc FullCommit) ChainID() string { + if fc.SignedHeader.Header == nil { + panic("should not happen") } - - // looks good, we just need to make sure the signatures are really from - // empowered validators - return nil + return fc.SignedHeader.ChainID } diff --git a/lite/dbprovider.go b/lite/dbprovider.go new file mode 100644 index 00000000000..e0c4e65b4a6 --- /dev/null +++ b/lite/dbprovider.go @@ -0,0 +1,268 @@ +package lite + +import ( + "fmt" + "regexp" + "strconv" + + amino "github.com/tendermint/go-amino" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" + dbm "github.com/tendermint/tendermint/libs/db" + log "github.com/tendermint/tendermint/libs/log" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" +) + +type DBProvider struct { + logger log.Logger + label string + db dbm.DB + cdc *amino.Codec + limit int +} + +func NewDBProvider(label string, db dbm.DB) *DBProvider { + + // NOTE: when debugging, this type of construction might be useful. + //db = dbm.NewDebugDB("db provider "+cmn.RandStr(4), db) + + cdc := amino.NewCodec() + cryptoAmino.RegisterAmino(cdc) + dbp := &DBProvider{ + logger: log.NewNopLogger(), + label: label, + db: db, + cdc: cdc, + } + return dbp +} + +func (dbp *DBProvider) SetLogger(logger log.Logger) { + dbp.logger = logger.With("label", dbp.label) +} + +func (dbp *DBProvider) SetLimit(limit int) *DBProvider { + dbp.limit = limit + return dbp +} + +// Implements PersistentProvider. +func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { + + dbp.logger.Info("DBProvider.SaveFullCommit()...", "fc", fc) + batch := dbp.db.NewBatch() + + // Save the fc.validators. + // We might be overwriting what we already have, but + // it makes the logic easier for now. + vsKey := validatorSetKey(fc.ChainID(), fc.Height()) + vsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.Validators) + if err != nil { + return err + } + batch.Set(vsKey, vsBz) + + // Save the fc.NextValidators. + nvsKey := validatorSetKey(fc.ChainID(), fc.Height()+1) + nvsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.NextValidators) + if err != nil { + return err + } + batch.Set(nvsKey, nvsBz) + + // Save the fc.SignedHeader + shKey := signedHeaderKey(fc.ChainID(), fc.Height()) + shBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.SignedHeader) + if err != nil { + return err + } + batch.Set(shKey, shBz) + + // And write sync. + batch.WriteSync() + + // Garbage collect. + // TODO: optimize later. + if dbp.limit > 0 { + dbp.deleteAfterN(fc.ChainID(), dbp.limit) + } + + return nil +} + +// Implements Provider. +func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) ( + FullCommit, error) { + + dbp.logger.Info("DBProvider.LatestFullCommit()...", + "chainID", chainID, "minHeight", minHeight, "maxHeight", maxHeight) + + if minHeight <= 0 { + minHeight = 1 + } + if maxHeight == 0 { + maxHeight = 1<<63 - 1 + } + + itr := dbp.db.ReverseIterator( + signedHeaderKey(chainID, maxHeight), + signedHeaderKey(chainID, minHeight-1), + ) + defer itr.Close() + + for itr.Valid() { + key := itr.Key() + _, _, ok := parseSignedHeaderKey(key) + if !ok { + // Skip over other keys. + itr.Next() + continue + } else { + // Found the latest full commit signed header. + shBz := itr.Value() + sh := types.SignedHeader{} + err := dbp.cdc.UnmarshalBinaryLengthPrefixed(shBz, &sh) + if err != nil { + return FullCommit{}, err + } else { + lfc, err := dbp.fillFullCommit(sh) + if err == nil { + dbp.logger.Info("DBProvider.LatestFullCommit() found latest.", "height", lfc.Height()) + return lfc, nil + } else { + dbp.logger.Error("DBProvider.LatestFullCommit() got error", "lfc", lfc) + dbp.logger.Error(fmt.Sprintf("%+v", err)) + return lfc, err + } + } + } + } + return FullCommit{}, lerr.ErrCommitNotFound() +} + +func (dbp *DBProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + return dbp.getValidatorSet(chainID, height) +} + +func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + vsBz := dbp.db.Get(validatorSetKey(chainID, height)) + if vsBz == nil { + err = lerr.ErrUnknownValidators(chainID, height) + return + } + err = dbp.cdc.UnmarshalBinaryLengthPrefixed(vsBz, &valset) + if err != nil { + return + } + + // To test deep equality. This makes it easier to test for e.g. valset + // equivalence using assert.Equal (tests for deep equality) in our tests, + // which also tests for unexported/private field equivalence. + valset.TotalVotingPower() + + return +} + +func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error) { + var chainID = sh.ChainID + var height = sh.Height + var valset, nextValset *types.ValidatorSet + // Load the validator set. + valset, err := dbp.getValidatorSet(chainID, height) + if err != nil { + return FullCommit{}, err + } + // Load the next validator set. + nextValset, err = dbp.getValidatorSet(chainID, height+1) + if err != nil { + return FullCommit{}, err + } + // Return filled FullCommit. + return FullCommit{ + SignedHeader: sh, + Validators: valset, + NextValidators: nextValset, + }, nil +} + +func (dbp *DBProvider) deleteAfterN(chainID string, after int) error { + + dbp.logger.Info("DBProvider.deleteAfterN()...", "chainID", chainID, "after", after) + + itr := dbp.db.ReverseIterator( + signedHeaderKey(chainID, 1<<63-1), + signedHeaderKey(chainID, 0), + ) + defer itr.Close() + + var lastHeight int64 = 1<<63 - 1 + var numSeen = 0 + var numDeleted = 0 + + for itr.Valid() { + key := itr.Key() + _, height, ok := parseChainKeyPrefix(key) + if !ok { + return fmt.Errorf("unexpected key %v", key) + } else { + if height < lastHeight { + lastHeight = height + numSeen += 1 + } + if numSeen > after { + dbp.db.Delete(key) + numDeleted += 1 + } + } + itr.Next() + } + + dbp.logger.Info(fmt.Sprintf("DBProvider.deleteAfterN() deleted %v items", numDeleted)) + return nil +} + +//---------------------------------------- +// key encoding + +func signedHeaderKey(chainID string, height int64) []byte { + return []byte(fmt.Sprintf("%s/%010d/sh", chainID, height)) +} + +func validatorSetKey(chainID string, height int64) []byte { + return []byte(fmt.Sprintf("%s/%010d/vs", chainID, height)) +} + +//---------------------------------------- +// key parsing + +var keyPattern = regexp.MustCompile(`^([^/]+)/([0-9]*)/(.*)$`) + +func parseKey(key []byte) (chainID string, height int64, part string, ok bool) { + submatch := keyPattern.FindSubmatch(key) + if submatch == nil { + return "", 0, "", false + } + chainID = string(submatch[1]) + heightStr := string(submatch[2]) + heightInt, err := strconv.Atoi(heightStr) + if err != nil { + return "", 0, "", false + } + height = int64(heightInt) + part = string(submatch[3]) + ok = true // good! + return +} + +func parseSignedHeaderKey(key []byte) (chainID string, height int64, ok bool) { + chainID, height, part, ok := parseKey(key) + if part != "sh" { + return "", 0, false + } + return chainID, height, true +} + +func parseChainKeyPrefix(key []byte) (chainID string, height int64, ok bool) { + chainID, height, _, ok = parseKey(key) + return chainID, height, true +} diff --git a/lite/doc.go b/lite/doc.go index 89dc702fcf9..00dcce68cdd 100644 --- a/lite/doc.go +++ b/lite/doc.go @@ -1,133 +1,140 @@ /* -Package lite allows you to securely validate headers -without a full node. +Package lite allows you to securely validate headers without a full node. -This library pulls together all the crypto and algorithms, -so given a relatively recent (< unbonding period) known -validator set, one can get indisputable proof that data is in -the chain (current state) or detect if the node is lying to -the client. +This library pulls together all the crypto and algorithms, so given a +relatively recent (< unbonding period) known validator set, one can get +indisputable proof that data is in the chain (current state) or detect if the +node is lying to the client. -Tendermint RPC exposes a lot of info, but a malicious node -could return any data it wants to queries, or even to block -headers, even making up fake signatures from non-existent -validators to justify it. This is a lot of logic to get -right, to be contained in a small, easy to use library, -that does this for you, so you can just build nice UI. +Tendermint RPC exposes a lot of info, but a malicious node could return any +data it wants to queries, or even to block headers, even making up fake +signatures from non-existent validators to justify it. This is a lot of logic +to get right, to be contained in a small, easy to use library, that does this +for you, so you can just build nice applications. -We design for clients who have no strong trust relationship -with any tendermint node, just the validator set as a whole. -Beyond building nice mobile or desktop applications, the -cosmos hub is another important example of a client, -that needs undeniable proof without syncing the full chain, -in order to efficiently implement IBC. +We design for clients who have no strong trust relationship with any Tendermint +node, just the blockchain and validator set as a whole. -Commits +# Data structures -There are two main data structures that we pass around - Commit -and FullCommit. Both of them mirror what information is -exposed in tendermint rpc. +## SignedHeader -Commit is a block header along with enough validator signatures -to prove its validity (> 2/3 of the voting power). A FullCommit -is a Commit along with the full validator set. When the -validator set doesn't change, the Commit is enough, but since -the block header only has a hash, we need the FullCommit to -follow any changes to the validator set. +SignedHeader is a block header along with a commit -- enough validator +precommit-vote signatures to prove its validity (> 2/3 of the voting power) +given the validator set responsible for signing that header. A FullCommit is a +SignedHeader along with the current and next validator sets. -Certifiers +The hash of the next validator set is included and signed in the SignedHeader. +This lets the lite client keep track of arbitrary changes to the validator set, +as every change to the validator set must be approved by inclusion in the +header and signed in the commit. -A Certifier validates a new Commit given the currently known -state. There are three different types of Certifiers exposed, -each one building on the last one, with additional complexity. +In the worst case, with every block changing the validators around completely, +a lite client can sync up with every block header to verify each validator set +change on the chain. In practice, most applications will not have frequent +drastic updates to the validator set, so the logic defined in this package for +lite client syncing is optimized to use intelligent bisection and +block-skipping for efficient sourcing and verification of these data structures +and updates to the validator set (see the DynamicVerifier for more +information). -Static - given the validator set upon initialization. Verifies -all signatures against that set and if the validator set -changes, it will reject all headers. +The FullCommit is also declared in this package as a convenience structure, +which includes the SignedHeader along with the full current and next +ValidatorSets. -Dynamic - This wraps Static and has the same Certify -method. However, it adds an Update method, which can be called -with a FullCommit when the validator set changes. If it can -prove this is a valid transition, it will update the validator -set. +## Verifier -Inquiring - this wraps Dynamic and implements an auto-update -strategy on top of the Dynamic update. If a call to -Certify fails as the validator set has changed, then it -attempts to find a FullCommit and Update to that header. -To get these FullCommits, it makes use of a Provider. +A Verifier validates a new SignedHeader given the currently known state. There +are two different types of Verifiers provided. -Providers +BaseVerifier - given a validator set and a height, this Verifier verifies +that > 2/3 of the voting power of the given validator set had signed the +SignedHeader, and that the SignedHeader was to be signed by the exact given +validator set, and that the height of the commit is at least height (or +greater). -A Provider allows us to store and retrieve the FullCommits, -to provide memory to the Inquiring Certifier. +SignedHeader.Commit may be signed by a different validator set, it can get +verified with a BaseVerifier as long as sufficient signatures from the +previous validator set are present in the commit. -NewMemStoreProvider - in-memory cache. +DynamicVerifier - this Verifier implements an auto-update and persistence +strategy to verify any SignedHeader of the blockchain. -files.NewProvider - disk backed storage. +## Provider and PersistentProvider -client.NewHTTPProvider - query tendermint rpc. +A Provider allows us to store and retrieve the FullCommits. -NewCacheProvider - combine multiple providers. +```go +type Provider interface { + // LatestFullCommit returns the latest commit with + // minHeight <= height <= maxHeight. + // If maxHeight is zero, returns the latest where + // minHeight <= height. + LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) +} +``` -The suggested use for local light clients is -client.NewHTTPProvider for getting new data (Source), -and NewCacheProvider(NewMemStoreProvider(), -files.NewProvider()) to store confirmed headers (Trusted) +* client.NewHTTPProvider - query Tendermint rpc. -How We Track Validators +A PersistentProvider is a Provider that also allows for saving state. This is +used by the DynamicVerifier for persistence. -Unless you want to blindly trust the node you talk with, you -need to trace every response back to a hash in a block header -and validate the commit signatures of that block header match -the proper validator set. If there is a contant validator -set, you store it locally upon initialization of the client, +```go +type PersistentProvider interface { + Provider + + // SaveFullCommit saves a FullCommit (without verification). + SaveFullCommit(fc FullCommit) error +} +``` + +* DBProvider - persistence provider for use with any libs/DB. +* MultiProvider - combine multiple providers. + +The suggested use for local light clients is client.NewHTTPProvider(...) for +getting new data (Source), and NewMultiProvider(NewDBProvider("label", +dbm.NewMemDB()), NewDBProvider("label", db.NewFileDB(...))) to store confirmed +full commits (Trusted) + + +# How We Track Validators + +Unless you want to blindly trust the node you talk with, you need to trace +every response back to a hash in a block header and validate the commit +signatures of that block header match the proper validator set. If there is a +static validator set, you store it locally upon initialization of the client, and check against that every time. -Once there is a dynamic validator set, the issue of -verifying a block becomes a bit more tricky. There is -background information in a -github issue (https://github.com/tendermint/tendermint/issues/377). - -In short, if there is a block at height H with a known -(trusted) validator set V, and another block at height H' -(H' > H) with validator set V' != V, then we want a way to -safely update it. - -First, get the new (unconfirmed) validator set V' and -verify H' is internally consistent and properly signed by -this V'. Assuming it is a valid block, we check that at -least 2/3 of the validators in V also signed it, meaning -it would also be valid under our old assumptions. -That should be enough, but we can also check that the -V counts for at least 2/3 of the total votes in H' -for extra safety (we can have a discussion if this is -strictly required). If we can verify all this, -then we can accept H' and V' as valid and use that to -validate all blocks X > H'. - -If we cannot update directly from H -> H' because there was -too much change to the validator set, then we can look for -some Hm (H < Hm < H') with a validator set Vm. Then we try -to update H -> Hm and Hm -> H' in two separate steps. -If one of these steps doesn't work, then we continue -bisecting, until we eventually have to externally -validate the valdiator set changes at every block. - -Since we never trust any server in this protocol, only the -signatures themselves, it doesn't matter if the seed comes -from a (possibly malicious) node or a (possibly malicious) user. -We can accept it or reject it based only on our trusted -validator set and cryptographic proofs. This makes it -extremely important to verify that you have the proper -validator set when initializing the client, as that is the -root of all trust. - -Or course, this assumes that the known block is within the -unbonding period to avoid the "nothing at stake" problem. -If you haven't seen the state in a few months, you will need -to manually verify the new validator set hash using off-chain -means (the same as getting the initial hash). +If the validator set for the blockchain is dynamic, verifying block commits is +a bit more involved -- if there is a block at height H with a known (trusted) +validator set V, and another block at height H' (H' > H) with validator set V' +!= V, then we want a way to safely update it. + +First, we get the new (unconfirmed) validator set V' and verify that H' is +internally consistent and properly signed by this V'. Assuming it is a valid +block, we check that at least 2/3 of the validators in V also signed it, +meaning it would also be valid under our old assumptions. Then, we accept H' +and V' as valid and trusted and use that to validate for heights X > H' until a +more recent and updated validator set is found. + +If we cannot update directly from H -> H' because there was too much change to +the validator set, then we can look for some Hm (H < Hm < H') with a validator +set Vm. Then we try to update H -> Hm and then Hm -> H' in two steps. If one +of these steps doesn't work, then we continue bisecting, until we eventually +have to externally validate the valdiator set changes at every block. + +Since we never trust any server in this protocol, only the signatures +themselves, it doesn't matter if the seed comes from a (possibly malicious) +node or a (possibly malicious) user. We can accept it or reject it based only +on our trusted validator set and cryptographic proofs. This makes it extremely +important to verify that you have the proper validator set when initializing +the client, as that is the root of all trust. + +The software currently assumes that the unbonding period is infinite in +duration. If the DynamicVerifier hasn't been updated in a while, you should +manually verify the block headers using other sources. + +TODO: Update the software to handle cases around the unbonding period. */ package lite diff --git a/lite/dynamic_certifier.go b/lite/dynamic_certifier.go deleted file mode 100644 index 0ddace8b6d5..00000000000 --- a/lite/dynamic_certifier.go +++ /dev/null @@ -1,96 +0,0 @@ -package lite - -import ( - "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -var _ Certifier = (*DynamicCertifier)(nil) - -// DynamicCertifier uses a StaticCertifier for Certify, but adds an -// Update method to allow for a change of validators. -// -// You can pass in a FullCommit with another validator set, -// and if this is a provably secure transition (< 1/3 change, -// sufficient signatures), then it will update the -// validator set for the next Certify call. -// For security, it will only follow validator set changes -// going forward. -type DynamicCertifier struct { - cert *StaticCertifier - lastHeight int64 -} - -// NewDynamic returns a new dynamic certifier. -func NewDynamicCertifier(chainID string, vals *types.ValidatorSet, height int64) *DynamicCertifier { - return &DynamicCertifier{ - cert: NewStaticCertifier(chainID, vals), - lastHeight: height, - } -} - -// ChainID returns the chain id of this certifier. -// Implements Certifier. -func (dc *DynamicCertifier) ChainID() string { - return dc.cert.ChainID() -} - -// Validators returns the validators of this certifier. -func (dc *DynamicCertifier) Validators() *types.ValidatorSet { - return dc.cert.vSet -} - -// Hash returns the hash of this certifier. -func (dc *DynamicCertifier) Hash() []byte { - return dc.cert.Hash() -} - -// LastHeight returns the last height of this certifier. -func (dc *DynamicCertifier) LastHeight() int64 { - return dc.lastHeight -} - -// Certify will verify whether the commit is valid and will update the height if it is or return an -// error if it is not. -// Implements Certifier. -func (dc *DynamicCertifier) Certify(check Commit) error { - err := dc.cert.Certify(check) - if err == nil { - // update last seen height if input is valid - dc.lastHeight = check.Height() - } - return err -} - -// Update will verify if this is a valid change and update -// the certifying validator set if safe to do so. -// -// Returns an error if update is impossible (invalid proof or IsTooMuchChangeErr) -func (dc *DynamicCertifier) Update(fc FullCommit) error { - // ignore all checkpoints in the past -> only to the future - h := fc.Height() - if h <= dc.lastHeight { - return liteErr.ErrPastTime() - } - - // first, verify if the input is self-consistent.... - err := fc.ValidateBasic(dc.ChainID()) - if err != nil { - return err - } - - // now, make sure not too much change... meaning this commit - // would be approved by the currently known validator set - // as well as the new set - commit := fc.Commit.Commit - err = dc.Validators().VerifyCommitAny(fc.Validators, dc.ChainID(), commit.BlockID, h, commit) - if err != nil { - return liteErr.ErrTooMuchChange() - } - - // looks good, we can update - dc.cert = NewStaticCertifier(dc.ChainID(), fc.Validators) - dc.lastHeight = h - return nil -} diff --git a/lite/dynamic_certifier_test.go b/lite/dynamic_certifier_test.go deleted file mode 100644 index 88c145f958e..00000000000 --- a/lite/dynamic_certifier_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package lite_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/lite" - "github.com/tendermint/tendermint/lite/errors" -) - -// TestDynamicCert just makes sure it still works like StaticCert -func TestDynamicCert(t *testing.T) { - // assert, require := assert.New(t), require.New(t) - assert := assert.New(t) - // require := require.New(t) - - keys := lite.GenValKeys(4) - // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals := keys.ToValidators(20, 10) - // and a certifier based on our known set - chainID := "test-dyno" - cert := lite.NewDynamicCertifier(chainID, vals, 0) - - cases := []struct { - keys lite.ValKeys - vals *types.ValidatorSet - height int64 - first, last int // who actually signs - proper bool // true -> expect no error - changed bool // true -> expect validator change error - }{ - // perfect, signed by everyone - {keys, vals, 1, 0, len(keys), true, false}, - // skip little guy is okay - {keys, vals, 2, 1, len(keys), true, false}, - // but not the big guy - {keys, vals, 3, 0, len(keys) - 1, false, false}, - // even changing the power a little bit breaks the static validator - // the sigs are enough, but the validator hash is unknown - {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, - } - - for _, tc := range cases { - check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, - []byte("bar"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Certify(check) - if tc.proper { - assert.Nil(err, "%+v", err) - assert.Equal(cert.LastHeight(), tc.height) - } else { - assert.NotNil(err) - if tc.changed { - assert.True(errors.IsValidatorsChangedErr(err), "%+v", err) - } - } - } -} - -// TestDynamicUpdate makes sure we update safely and sanely -func TestDynamicUpdate(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - chainID := "test-dyno-up" - keys := lite.GenValKeys(5) - vals := keys.ToValidators(20, 0) - cert := lite.NewDynamicCertifier(chainID, vals, 40) - - // one valid block to give us a sense of time - h := int64(100) - good := keys.GenCommit(chainID, h, nil, vals, []byte("foo"), []byte("params"), []byte("results"), 0, len(keys)) - err := cert.Certify(good) - require.Nil(err, "%+v", err) - - // some new sets to try later - keys2 := keys.Extend(2) - keys3 := keys2.Extend(4) - - // we try to update with some blocks - cases := []struct { - keys lite.ValKeys - vals *types.ValidatorSet - height int64 - first, last int // who actually signs - proper bool // true -> expect no error - changed bool // true -> expect too much change error - }{ - // same validator set, well signed, of course it is okay - {keys, vals, h + 10, 0, len(keys), true, false}, - // same validator set, poorly signed, fails - {keys, vals, h + 20, 2, len(keys), false, false}, - - // shift the power a little, works if properly signed - {keys, keys.ToValidators(10, 0), h + 30, 1, len(keys), true, false}, - // but not on a poor signature - {keys, keys.ToValidators(10, 0), h + 40, 2, len(keys), false, false}, - // and not if it was in the past - {keys, keys.ToValidators(10, 0), h + 25, 0, len(keys), false, false}, - - // let's try to adjust to a whole new validator set (we have 5/7 of the votes) - {keys2, keys2.ToValidators(10, 0), h + 33, 0, len(keys2), true, false}, - - // properly signed but too much change, not allowed (only 7/11 validators known) - {keys3, keys3.ToValidators(10, 0), h + 50, 0, len(keys3), false, true}, - } - - for _, tc := range cases { - fc := tc.keys.GenFullCommit(chainID, tc.height, nil, tc.vals, - []byte("bar"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Update(fc) - if tc.proper { - assert.Nil(err, "%d: %+v", tc.height, err) - // we update last seen height - assert.Equal(cert.LastHeight(), tc.height) - // and we update the proper validators - assert.EqualValues(fc.Header.ValidatorsHash, cert.Hash()) - } else { - assert.NotNil(err, "%d", tc.height) - // we don't update the height - assert.NotEqual(cert.LastHeight(), tc.height) - if tc.changed { - assert.True(errors.IsTooMuchChangeErr(err), - "%d: %+v", tc.height, err) - } - } - } -} diff --git a/lite/dynamic_verifier.go b/lite/dynamic_verifier.go new file mode 100644 index 00000000000..6a7720913ce --- /dev/null +++ b/lite/dynamic_verifier.go @@ -0,0 +1,258 @@ +package lite + +import ( + "bytes" + "fmt" + "sync" + + log "github.com/tendermint/tendermint/libs/log" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" +) + +const sizeOfPendingMap = 1024 + +var _ Verifier = (*DynamicVerifier)(nil) + +// DynamicVerifier implements an auto-updating Verifier. It uses a +// "source" provider to obtain the needed FullCommits to securely sync with +// validator set changes. It stores properly validated data on the +// "trusted" local system. +type DynamicVerifier struct { + logger log.Logger + chainID string + // These are only properly validated data, from local system. + trusted PersistentProvider + // This is a source of new info, like a node rpc, or other import method. + source Provider + + // pending map to synchronize concurrent verification requests + mtx sync.Mutex + pendingVerifications map[int64]chan struct{} +} + +// NewDynamicVerifier returns a new DynamicVerifier. It uses the +// trusted provider to store validated data and the source provider to +// obtain missing data (e.g. FullCommits). +// +// The trusted provider should a CacheProvider, MemProvider or +// files.Provider. The source provider should be a client.HTTPProvider. +func NewDynamicVerifier(chainID string, trusted PersistentProvider, source Provider) *DynamicVerifier { + return &DynamicVerifier{ + logger: log.NewNopLogger(), + chainID: chainID, + trusted: trusted, + source: source, + pendingVerifications: make(map[int64]chan struct{}, sizeOfPendingMap), + } +} + +func (ic *DynamicVerifier) SetLogger(logger log.Logger) { + logger = logger.With("module", "lite") + ic.logger = logger + ic.trusted.SetLogger(logger) + ic.source.SetLogger(logger) +} + +// Implements Verifier. +func (ic *DynamicVerifier) ChainID() string { + return ic.chainID +} + +// Implements Verifier. +// +// If the validators have changed since the last known time, it looks to +// ic.trusted and ic.source to prove the new validators. On success, it will +// try to store the SignedHeader in ic.trusted if the next +// validator can be sourced. +func (ic *DynamicVerifier) Verify(shdr types.SignedHeader) error { + + // Performs synchronization for multi-threads verification at the same height. + ic.mtx.Lock() + if pending := ic.pendingVerifications[shdr.Height]; pending != nil { + ic.mtx.Unlock() + <-pending // pending is chan struct{} + } else { + pending := make(chan struct{}) + ic.pendingVerifications[shdr.Height] = pending + defer func() { + close(pending) + ic.mtx.Lock() + delete(ic.pendingVerifications, shdr.Height) + ic.mtx.Unlock() + }() + ic.mtx.Unlock() + } + //Get the exact trusted commit for h, and if it is + // equal to shdr, then don't even verify it, + // and just return nil. + trustedFCSameHeight, err := ic.trusted.LatestFullCommit(ic.chainID, shdr.Height, shdr.Height) + if err == nil { + // If loading trust commit successfully, and trust commit equal to shdr, then don't verify it, + // just return nil. + if bytes.Equal(trustedFCSameHeight.SignedHeader.Hash(), shdr.Hash()) { + ic.logger.Info(fmt.Sprintf("Load full commit at height %d from cache, there is not need to verify.", shdr.Height)) + return nil + } + } else if !lerr.IsErrCommitNotFound(err) { + // Return error if it is not CommitNotFound error + ic.logger.Info(fmt.Sprintf("Encountered unknown error in loading full commit at height %d.", shdr.Height)) + return err + } + + // Get the latest known full commit <= h-1 from our trusted providers. + // The full commit at h-1 contains the valset to sign for h. + h := shdr.Height - 1 + trustedFC, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h) + if err != nil { + return err + } + + if trustedFC.Height() == h { + // Return error if valset doesn't match. + if !bytes.Equal( + trustedFC.NextValidators.Hash(), + shdr.Header.ValidatorsHash) { + return lerr.ErrUnexpectedValidators( + trustedFC.NextValidators.Hash(), + shdr.Header.ValidatorsHash) + } + } else { + // If valset doesn't match... + if !bytes.Equal(trustedFC.NextValidators.Hash(), + shdr.Header.ValidatorsHash) { + // ... update. + trustedFC, err = ic.updateToHeight(h) + if err != nil { + return err + } + // Return error if valset _still_ doesn't match. + if !bytes.Equal(trustedFC.NextValidators.Hash(), + shdr.Header.ValidatorsHash) { + return lerr.ErrUnexpectedValidators( + trustedFC.NextValidators.Hash(), + shdr.Header.ValidatorsHash) + } + } + } + + // Verify the signed header using the matching valset. + cert := NewBaseVerifier(ic.chainID, trustedFC.Height()+1, trustedFC.NextValidators) + err = cert.Verify(shdr) + if err != nil { + return err + } + + // Get the next validator set. + nextValset, err := ic.source.ValidatorSet(ic.chainID, shdr.Height+1) + if lerr.IsErrUnknownValidators(err) { + // Ignore this error. + return nil + } else if err != nil { + return err + } + + // Create filled FullCommit. + nfc := FullCommit{ + SignedHeader: shdr, + Validators: trustedFC.NextValidators, + NextValidators: nextValset, + } + // Validate the full commit. This checks the cryptographic + // signatures of Commit against Validators. + if err := nfc.ValidateFull(ic.chainID); err != nil { + return err + } + // Trust it. + return ic.trusted.SaveFullCommit(nfc) +} + +// verifyAndSave will verify if this is a valid source full commit given the +// best match trusted full commit, and if good, persist to ic.trusted. +// Returns ErrTooMuchChange when >2/3 of trustedFC did not sign sourceFC. +// Panics if trustedFC.Height() >= sourceFC.Height(). +func (ic *DynamicVerifier) verifyAndSave(trustedFC, sourceFC FullCommit) error { + if trustedFC.Height() >= sourceFC.Height() { + panic("should not happen") + } + err := trustedFC.NextValidators.VerifyFutureCommit( + sourceFC.Validators, + ic.chainID, sourceFC.SignedHeader.Commit.BlockID, + sourceFC.SignedHeader.Height, sourceFC.SignedHeader.Commit, + ) + if err != nil { + return err + } + + return ic.trusted.SaveFullCommit(sourceFC) +} + +// updateToHeight will use divide-and-conquer to find a path to h. +// Returns nil error iff we successfully verify and persist a full commit +// for height h, using repeated applications of bisection if necessary. +// +// Returns ErrCommitNotFound if source provider doesn't have the commit for h. +func (ic *DynamicVerifier) updateToHeight(h int64) (FullCommit, error) { + + // Fetch latest full commit from source. + sourceFC, err := ic.source.LatestFullCommit(ic.chainID, h, h) + if err != nil { + return FullCommit{}, err + } + + // Validate the full commit. This checks the cryptographic + // signatures of Commit against Validators. + if err := sourceFC.ValidateFull(ic.chainID); err != nil { + return FullCommit{}, err + } + + // If sourceFC.Height() != h, we can't do it. + if sourceFC.Height() != h { + return FullCommit{}, lerr.ErrCommitNotFound() + } + +FOR_LOOP: + for { + // Fetch latest full commit from trusted. + trustedFC, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h) + if err != nil { + return FullCommit{}, err + } + // We have nothing to do. + if trustedFC.Height() == h { + return trustedFC, nil + } + + // Try to update to full commit with checks. + err = ic.verifyAndSave(trustedFC, sourceFC) + if err == nil { + // All good! + return sourceFC, nil + } + + // Handle special case when err is ErrTooMuchChange. + if lerr.IsErrTooMuchChange(err) { + // Divide and conquer. + start, end := trustedFC.Height(), sourceFC.Height() + if !(start < end) { + panic("should not happen") + } + mid := (start + end) / 2 + _, err = ic.updateToHeight(mid) + if err != nil { + return FullCommit{}, err + } + // If we made it to mid, we retry. + continue FOR_LOOP + } + return FullCommit{}, err + } +} + +func (ic *DynamicVerifier) LastTrustedHeight() int64 { + fc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, 1<<63-1) + if err != nil { + panic("should not happen") + } + return fc.Height() +} diff --git a/lite/dynamic_verifier_test.go b/lite/dynamic_verifier_test.go new file mode 100644 index 00000000000..9ff8ed81f89 --- /dev/null +++ b/lite/dynamic_verifier_test.go @@ -0,0 +1,211 @@ +package lite + +import ( + "fmt" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + dbm "github.com/tendermint/tendermint/libs/db" + log "github.com/tendermint/tendermint/libs/log" +) + +func TestInquirerValidPath(t *testing.T) { + assert, require := assert.New(t), require.New(t) + trust := NewDBProvider("trust", dbm.NewMemDB()) + source := NewDBProvider("source", dbm.NewMemDB()) + + // Set up the validators to generate test blocks. + var vote int64 = 10 + keys := genPrivKeys(5) + nkeys := keys.Extend(1) + + // Construct a bunch of commits, each with one more height than the last. + chainID := "inquiry-test" + consHash := []byte("params") + resHash := []byte("results") + count := 50 + fcz := make([]FullCommit, count) + for i := 0; i < count; i++ { + vals := keys.ToValidators(vote, 0) + nextVals := nkeys.ToValidators(vote, 0) + h := int64(1 + i) + appHash := []byte(fmt.Sprintf("h=%d", h)) + fcz[i] = keys.GenFullCommit( + chainID, h, nil, + vals, nextVals, + appHash, consHash, resHash, 0, len(keys)) + // Extend the keys by 1 each time. + keys = nkeys + nkeys = nkeys.Extend(1) + } + + // Initialize a Verifier with the initial state. + err := trust.SaveFullCommit(fcz[0]) + require.Nil(err) + cert := NewDynamicVerifier(chainID, trust, source) + cert.SetLogger(log.TestingLogger()) + + // This should fail validation: + sh := fcz[count-1].SignedHeader + err = cert.Verify(sh) + require.NotNil(err) + + // Adding a few commits in the middle should be insufficient. + for i := 10; i < 13; i++ { + err := source.SaveFullCommit(fcz[i]) + require.Nil(err) + } + err = cert.Verify(sh) + assert.NotNil(err) + + // With more info, we succeed. + for i := 0; i < count; i++ { + err := source.SaveFullCommit(fcz[i]) + require.Nil(err) + } + err = cert.Verify(sh) + assert.Nil(err, "%+v", err) +} + +func TestInquirerVerifyHistorical(t *testing.T) { + assert, require := assert.New(t), require.New(t) + trust := NewDBProvider("trust", dbm.NewMemDB()) + source := NewDBProvider("source", dbm.NewMemDB()) + + // Set up the validators to generate test blocks. + var vote int64 = 10 + keys := genPrivKeys(5) + nkeys := keys.Extend(1) + + // Construct a bunch of commits, each with one more height than the last. + chainID := "inquiry-test" + count := 10 + consHash := []byte("special-params") + fcz := make([]FullCommit, count) + for i := 0; i < count; i++ { + vals := keys.ToValidators(vote, 0) + nextVals := nkeys.ToValidators(vote, 0) + h := int64(1 + i) + appHash := []byte(fmt.Sprintf("h=%d", h)) + resHash := []byte(fmt.Sprintf("res=%d", h)) + fcz[i] = keys.GenFullCommit( + chainID, h, nil, + vals, nextVals, + appHash, consHash, resHash, 0, len(keys)) + // Extend the keys by 1 each time. + keys = nkeys + nkeys = nkeys.Extend(1) + } + + // Initialize a Verifier with the initial state. + err := trust.SaveFullCommit(fcz[0]) + require.Nil(err) + cert := NewDynamicVerifier(chainID, trust, source) + cert.SetLogger(log.TestingLogger()) + + // Store a few full commits as trust. + for _, i := range []int{2, 5} { + trust.SaveFullCommit(fcz[i]) + } + + // See if we can jump forward using trusted full commits. + // Souce doesn't have fcz[9] so cert.LastTrustedHeight wont' change. + err = source.SaveFullCommit(fcz[7]) + require.Nil(err, "%+v", err) + sh := fcz[8].SignedHeader + err = cert.Verify(sh) + require.Nil(err, "%+v", err) + assert.Equal(fcz[7].Height(), cert.LastTrustedHeight()) + fc_, err := trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) + require.NotNil(err, "%+v", err) + assert.Equal(fc_, (FullCommit{})) + + // With fcz[9] Verify will update last trusted height. + err = source.SaveFullCommit(fcz[9]) + require.Nil(err, "%+v", err) + sh = fcz[8].SignedHeader + err = cert.Verify(sh) + require.Nil(err, "%+v", err) + assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) + fc_, err = trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) + require.Nil(err, "%+v", err) + assert.Equal(fc_.Height(), fcz[8].Height()) + + // Add access to all full commits via untrusted source. + for i := 0; i < count; i++ { + err := source.SaveFullCommit(fcz[i]) + require.Nil(err) + } + + // Try to check an unknown seed in the past. + sh = fcz[3].SignedHeader + err = cert.Verify(sh) + require.Nil(err, "%+v", err) + assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) + + // Jump all the way forward again. + sh = fcz[count-1].SignedHeader + err = cert.Verify(sh) + require.Nil(err, "%+v", err) + assert.Equal(fcz[9].Height(), cert.LastTrustedHeight()) +} + +func TestConcurrencyInquirerVerify(t *testing.T) { + _, require := assert.New(t), require.New(t) + trust := NewDBProvider("trust", dbm.NewMemDB()).SetLimit(10) + source := NewDBProvider("source", dbm.NewMemDB()) + + // Set up the validators to generate test blocks. + var vote int64 = 10 + keys := genPrivKeys(5) + nkeys := keys.Extend(1) + + // Construct a bunch of commits, each with one more height than the last. + chainID := "inquiry-test" + count := 10 + consHash := []byte("special-params") + fcz := make([]FullCommit, count) + for i := 0; i < count; i++ { + vals := keys.ToValidators(vote, 0) + nextVals := nkeys.ToValidators(vote, 0) + h := int64(1 + i) + appHash := []byte(fmt.Sprintf("h=%d", h)) + resHash := []byte(fmt.Sprintf("res=%d", h)) + fcz[i] = keys.GenFullCommit( + chainID, h, nil, + vals, nextVals, + appHash, consHash, resHash, 0, len(keys)) + // Extend the keys by 1 each time. + keys = nkeys + nkeys = nkeys.Extend(1) + } + + // Initialize a Verifier with the initial state. + err := trust.SaveFullCommit(fcz[0]) + require.Nil(err) + cert := NewDynamicVerifier(chainID, trust, source) + cert.SetLogger(log.TestingLogger()) + + err = source.SaveFullCommit(fcz[7]) + err = source.SaveFullCommit(fcz[8]) + require.Nil(err, "%+v", err) + sh := fcz[8].SignedHeader + + var wg sync.WaitGroup + count = 100 + errList := make([]error, count) + for i := 0; i < count; i++ { + wg.Add(1) + go func(index int) { + errList[index] = cert.Verify(sh) + defer wg.Done() + }(i) + } + wg.Wait() + for _, err := range errList { + require.Nil(err) + } +} diff --git a/lite/errors/errors.go b/lite/errors/errors.go index 99e42a0bdd3..59b6380d89b 100644 --- a/lite/errors/errors.go +++ b/lite/errors/errors.go @@ -3,90 +3,131 @@ package errors import ( "fmt" - "github.com/pkg/errors" + cmn "github.com/tendermint/tendermint/libs/common" ) -var ( - errValidatorsChanged = fmt.Errorf("Validators differ between header and certifier") - errCommitNotFound = fmt.Errorf("Commit not found by provider") - errTooMuchChange = fmt.Errorf("Validators change too much to safely update") - errPastTime = fmt.Errorf("Update older than certifier height") - errNoPathFound = fmt.Errorf("Cannot find a path of validators") -) +//---------------------------------------- +// Error types + +type errCommitNotFound struct{} -// IsCommitNotFoundErr checks whether an error is due to missing data -func IsCommitNotFoundErr(err error) bool { - return err != nil && (errors.Cause(err) == errCommitNotFound) +func (e errCommitNotFound) Error() string { + return "Commit not found by provider" } -// ErrCommitNotFound indicates that a the requested commit was not found. -func ErrCommitNotFound() error { - return errors.WithStack(errCommitNotFound) +type errUnexpectedValidators struct { + got []byte + want []byte } -// IsValidatorsChangedErr checks whether an error is due -// to a differing validator set. -func IsValidatorsChangedErr(err error) bool { - return err != nil && (errors.Cause(err) == errValidatorsChanged) +func (e errUnexpectedValidators) Error() string { + return fmt.Sprintf("Validator set is different. Got %X want %X", + e.got, e.want) } -// ErrValidatorsChanged indicates that the validator set was changed between two commits. -func ErrValidatorsChanged() error { - return errors.WithStack(errValidatorsChanged) +type errTooMuchChange struct{} + +func (e errTooMuchChange) Error() string { + return "Insufficient signatures to validate due to valset changes" } -// IsTooMuchChangeErr checks whether an error is due to too much change -// between these validators sets. -func IsTooMuchChangeErr(err error) bool { - return err != nil && (errors.Cause(err) == errTooMuchChange) +type errUnknownValidators struct { + chainID string + height int64 } -// ErrTooMuchChange indicates that the underlying validator set was changed by >1/3. -func ErrTooMuchChange() error { - return errors.WithStack(errTooMuchChange) +func (e errUnknownValidators) Error() string { + return fmt.Sprintf("Validators are unknown or missing for chain %s and height %d", + e.chainID, e.height) } -// IsPastTimeErr ... -func IsPastTimeErr(err error) bool { - return err != nil && (errors.Cause(err) == errPastTime) +type errEmptyTree struct{} + +func (e errEmptyTree) Error() string { + return "Tree is empty" } -// ErrPastTime ... -func ErrPastTime() error { - return errors.WithStack(errPastTime) +//---------------------------------------- +// Methods for above error types + +//----------------- +// ErrCommitNotFound + +// ErrCommitNotFound indicates that a the requested commit was not found. +func ErrCommitNotFound() error { + return cmn.ErrorWrap(errCommitNotFound{}, "") } -// IsNoPathFoundErr checks whether an error is due to no path of -// validators in provider from where we are to where we want to be -func IsNoPathFoundErr(err error) bool { - return err != nil && (errors.Cause(err) == errNoPathFound) +func IsErrCommitNotFound(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errCommitNotFound) + return ok + } + return false } -// ErrNoPathFound ... -func ErrNoPathFound() error { - return errors.WithStack(errNoPathFound) +//----------------- +// ErrUnexpectedValidators + +// ErrUnexpectedValidators indicates a validator set mismatch. +func ErrUnexpectedValidators(got, want []byte) error { + return cmn.ErrorWrap(errUnexpectedValidators{ + got: got, + want: want, + }, "") } -//-------------------------------------------- +func IsErrUnexpectedValidators(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errUnexpectedValidators) + return ok + } + return false +} -type errHeightMismatch struct { - h1, h2 int64 +//----------------- +// ErrTooMuchChange + +// ErrTooMuchChange indicates that the underlying validator set was changed by >1/3. +func ErrTooMuchChange() error { + return cmn.ErrorWrap(errTooMuchChange{}, "") } -func (e errHeightMismatch) Error() string { - return fmt.Sprintf("Blocks don't match - %d vs %d", e.h1, e.h2) +func IsErrTooMuchChange(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errTooMuchChange) + return ok + } + return false } -// IsHeightMismatchErr checks whether an error is due to data from different blocks -func IsHeightMismatchErr(err error) bool { - if err == nil { - return false +//----------------- +// ErrUnknownValidators + +// ErrUnknownValidators indicates that some validator set was missing or unknown. +func ErrUnknownValidators(chainID string, height int64) error { + return cmn.ErrorWrap(errUnknownValidators{chainID, height}, "") +} + +func IsErrUnknownValidators(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errUnknownValidators) + return ok } - _, ok := errors.Cause(err).(errHeightMismatch) - return ok + return false +} + +//----------------- +// ErrEmptyTree + +func ErrEmptyTree() error { + return cmn.ErrorWrap(errEmptyTree{}, "") } -// ErrHeightMismatch returns an mismatch error with stack-trace -func ErrHeightMismatch(h1, h2 int64) error { - return errors.WithStack(errHeightMismatch{h1, h2}) +func IsErrEmptyTree(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errEmptyTree) + return ok + } + return false } diff --git a/lite/errors/errors_test.go b/lite/errors/errors_test.go deleted file mode 100644 index 479215e476a..00000000000 --- a/lite/errors/errors_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package errors - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestErrorHeight(t *testing.T) { - e1 := ErrHeightMismatch(2, 3) - e1.Error() - assert.True(t, IsHeightMismatchErr(e1)) - - e2 := errors.New("foobar") - assert.False(t, IsHeightMismatchErr(e2)) - assert.False(t, IsHeightMismatchErr(nil)) -} diff --git a/lite/files/commit.go b/lite/files/commit.go deleted file mode 100644 index 8a7e4721e12..00000000000 --- a/lite/files/commit.go +++ /dev/null @@ -1,93 +0,0 @@ -package files - -import ( - "io/ioutil" - "os" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -const ( - // MaxFullCommitSize is the maximum number of bytes we will - // read in for a full commit to avoid excessive allocations - // in the deserializer - MaxFullCommitSize = 1024 * 1024 -) - -// SaveFullCommit exports the seed in binary / go-amino style -func SaveFullCommit(fc lite.FullCommit, path string) error { - f, err := os.Create(path) - if err != nil { - return errors.WithStack(err) - } - defer f.Close() - - _, err = cdc.MarshalBinaryWriter(f, fc) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -// SaveFullCommitJSON exports the seed in a json format -func SaveFullCommitJSON(fc lite.FullCommit, path string) error { - f, err := os.Create(path) - if err != nil { - return errors.WithStack(err) - } - defer f.Close() - bz, err := cdc.MarshalJSON(fc) - if err != nil { - return errors.WithStack(err) - } - _, err = f.Write(bz) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -// LoadFullCommit loads the full commit from the file system. -func LoadFullCommit(path string) (lite.FullCommit, error) { - var fc lite.FullCommit - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return fc, liteErr.ErrCommitNotFound() - } - return fc, errors.WithStack(err) - } - defer f.Close() - - _, err = cdc.UnmarshalBinaryReader(f, &fc, 0) - if err != nil { - return fc, errors.WithStack(err) - } - return fc, nil -} - -// LoadFullCommitJSON loads the commit from the file system in JSON format. -func LoadFullCommitJSON(path string) (lite.FullCommit, error) { - var fc lite.FullCommit - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return fc, liteErr.ErrCommitNotFound() - } - return fc, errors.WithStack(err) - } - defer f.Close() - - bz, err := ioutil.ReadAll(f) - if err != nil { - return fc, errors.WithStack(err) - } - err = cdc.UnmarshalJSON(bz, &fc) - if err != nil { - return fc, errors.WithStack(err) - } - return fc, nil -} diff --git a/lite/files/commit_test.go b/lite/files/commit_test.go deleted file mode 100644 index 2891e58091f..00000000000 --- a/lite/files/commit_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package files - -import ( - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - cmn "github.com/tendermint/tendermint/libs/common" - - "github.com/tendermint/tendermint/lite" -) - -func tmpFile() string { - suffix := cmn.RandStr(16) - return filepath.Join(os.TempDir(), "fc-test-"+suffix) -} - -func TestSerializeFullCommits(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // some constants - appHash := []byte("some crazy thing") - chainID := "ser-ial" - h := int64(25) - - // build a fc - keys := lite.GenValKeys(5) - vals := keys.ToValidators(10, 0) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - - require.Equal(h, fc.Height()) - require.Equal(vals.Hash(), fc.ValidatorsHash()) - - // try read/write with json - jfile := tmpFile() - defer os.Remove(jfile) - jseed, err := LoadFullCommitJSON(jfile) - assert.NotNil(err) - err = SaveFullCommitJSON(fc, jfile) - require.Nil(err) - jseed, err = LoadFullCommitJSON(jfile) - assert.Nil(err, "%+v", err) - assert.Equal(h, jseed.Height()) - assert.Equal(vals.Hash(), jseed.ValidatorsHash()) - - // try read/write with binary - bfile := tmpFile() - defer os.Remove(bfile) - bseed, err := LoadFullCommit(bfile) - assert.NotNil(err) - err = SaveFullCommit(fc, bfile) - require.Nil(err) - bseed, err = LoadFullCommit(bfile) - assert.Nil(err, "%+v", err) - assert.Equal(h, bseed.Height()) - assert.Equal(vals.Hash(), bseed.ValidatorsHash()) - - // make sure they don't read the other format (different) - _, err = LoadFullCommit(jfile) - assert.NotNil(err) - _, err = LoadFullCommitJSON(bfile) - assert.NotNil(err) -} diff --git a/lite/files/provider.go b/lite/files/provider.go deleted file mode 100644 index 327b0331afc..00000000000 --- a/lite/files/provider.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Package files defines a Provider that stores all data in the filesystem - -We assume the same validator hash may be reused by many different -headers/Commits, and thus store it separately. This leaves us -with three issues: - - 1. Given a validator hash, retrieve the validator set if previously stored - 2. Given a block height, find the Commit with the highest height <= h - 3. Given a FullCommit, store it quickly to satisfy 1 and 2 - -Note that we do not worry about caching, as that can be achieved by -pairing this with a MemStoreProvider and CacheProvider from certifiers -*/ -package files - -import ( - "encoding/hex" - "fmt" - "math" - "os" - "path/filepath" - "sort" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -// nolint -const ( - Ext = ".tsd" - ValDir = "validators" - CheckDir = "checkpoints" - dirPerm = os.FileMode(0755) - //filePerm = os.FileMode(0644) -) - -type provider struct { - valDir string - checkDir string -} - -// NewProvider creates the parent dir and subdirs -// for validators and checkpoints as needed -func NewProvider(dir string) lite.Provider { - valDir := filepath.Join(dir, ValDir) - checkDir := filepath.Join(dir, CheckDir) - for _, d := range []string{valDir, checkDir} { - err := os.MkdirAll(d, dirPerm) - if err != nil { - panic(err) - } - } - return &provider{valDir: valDir, checkDir: checkDir} -} - -func (p *provider) encodeHash(hash []byte) string { - return hex.EncodeToString(hash) + Ext -} - -func (p *provider) encodeHeight(h int64) string { - // pad up to 10^12 for height... - return fmt.Sprintf("%012d%s", h, Ext) -} - -// StoreCommit saves a full commit after it has been verified. -func (p *provider) StoreCommit(fc lite.FullCommit) error { - // make sure the fc is self-consistent before saving - err := fc.ValidateBasic(fc.Commit.Header.ChainID) - if err != nil { - return err - } - - paths := []string{ - filepath.Join(p.checkDir, p.encodeHeight(fc.Height())), - filepath.Join(p.valDir, p.encodeHash(fc.Header.ValidatorsHash)), - } - for _, path := range paths { - err := SaveFullCommit(fc, path) - // unknown error in creating or writing immediately breaks - if err != nil { - return err - } - } - return nil -} - -// GetByHeight returns the closest commit with height <= h. -func (p *provider) GetByHeight(h int64) (lite.FullCommit, error) { - // first we look for exact match, then search... - path := filepath.Join(p.checkDir, p.encodeHeight(h)) - fc, err := LoadFullCommit(path) - if liteErr.IsCommitNotFoundErr(err) { - path, err = p.searchForHeight(h) - if err == nil { - fc, err = LoadFullCommit(path) - } - } - return fc, err -} - -// LatestCommit returns the newest commit stored. -func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { - // Note to future: please update by 2077 to avoid rollover - return p.GetByHeight(math.MaxInt32 - 1) -} - -// search for height, looks for a file with highest height < h -// return certifiers.ErrCommitNotFound() if not there... -func (p *provider) searchForHeight(h int64) (string, error) { - d, err := os.Open(p.checkDir) - if err != nil { - return "", errors.WithStack(err) - } - files, err := d.Readdirnames(0) - - d.Close() - if err != nil { - return "", errors.WithStack(err) - } - - desired := p.encodeHeight(h) - sort.Strings(files) - i := sort.SearchStrings(files, desired) - if i == 0 { - return "", liteErr.ErrCommitNotFound() - } - found := files[i-1] - path := filepath.Join(p.checkDir, found) - return path, errors.WithStack(err) -} - -// GetByHash returns a commit exactly matching this validator hash. -func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { - path := filepath.Join(p.valDir, p.encodeHash(hash)) - return LoadFullCommit(path) -} diff --git a/lite/files/provider_test.go b/lite/files/provider_test.go deleted file mode 100644 index 5deebb1a289..00000000000 --- a/lite/files/provider_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package files_test - -import ( - "bytes" - "errors" - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" - "github.com/tendermint/tendermint/lite/files" -) - -func checkEqual(stored, loaded lite.FullCommit, chainID string) error { - err := loaded.ValidateBasic(chainID) - if err != nil { - return err - } - if !bytes.Equal(stored.ValidatorsHash(), loaded.ValidatorsHash()) { - return errors.New("Different block hashes") - } - return nil -} - -func TestFileProvider(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - dir, err := ioutil.TempDir("", "fileprovider-test") - assert.Nil(err) - defer os.RemoveAll(dir) - p := files.NewProvider(dir) - - chainID := "test-files" - appHash := []byte("some-data") - keys := lite.GenValKeys(5) - count := 10 - - // make a bunch of seeds... - seeds := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // two seeds for each validator, to check how we handle dups - // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... - vals := keys.ToValidators(10, int64(count/2)) - h := int64(20 + 10*i) - check := keys.GenCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - seeds[i] = lite.NewFullCommit(check, vals) - } - - // check provider is empty - seed, err := p.GetByHeight(20) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - seed, err = p.GetByHash(seeds[3].ValidatorsHash()) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - // now add them all to the provider - for _, s := range seeds { - err = p.StoreCommit(s) - require.Nil(err) - // and make sure we can get it back - s2, err := p.GetByHash(s.ValidatorsHash()) - assert.Nil(err) - err = checkEqual(s, s2, chainID) - assert.Nil(err) - // by height as well - s2, err = p.GetByHeight(s.Height()) - err = checkEqual(s, s2, chainID) - assert.Nil(err) - } - - // make sure we get the last hash if we overstep - seed, err = p.GetByHeight(5000) - if assert.Nil(err, "%+v", err) { - assert.Equal(seeds[count-1].Height(), seed.Height()) - err = checkEqual(seeds[count-1], seed, chainID) - assert.Nil(err) - } - - // and middle ones as well - seed, err = p.GetByHeight(47) - if assert.Nil(err, "%+v", err) { - // we only step by 10, so 40 must be the one below this - assert.EqualValues(40, seed.Height()) - } - - // and proper error for too low - _, err = p.GetByHeight(5) - assert.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) -} diff --git a/lite/files/wire.go b/lite/files/wire.go deleted file mode 100644 index e7864831e0a..00000000000 --- a/lite/files/wire.go +++ /dev/null @@ -1,12 +0,0 @@ -package files - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/types" -) - -var cdc = amino.NewCodec() - -func init() { - types.RegisterBlockAmino(cdc) -} diff --git a/lite/helpers.go b/lite/helpers.go index 9f404f247a9..5177ee50b8f 100644 --- a/lite/helpers.go +++ b/lite/helpers.go @@ -1,29 +1,26 @@ package lite import ( - "time" - - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) -// ValKeys is a helper for testing. +// privKeys is a helper type for testing. // -// It lets us simulate signing with many keys, either ed25519 or secp256k1. -// The main use case is to create a set, and call GenCommit -// to get properly signed header for testing. +// It lets us simulate signing with many keys. The main use case is to create +// a set, and call GenSignedHeader to get properly signed header for testing. // -// You can set different weights of validators each time you call -// ToValidators, and can optionally extend the validator set later -// with Extend or ExtendSecp -type ValKeys []crypto.PrivKey - -// GenValKeys produces an array of private keys to generate commits. -func GenValKeys(n int) ValKeys { - res := make(ValKeys, n) +// You can set different weights of validators each time you call ToValidators, +// and can optionally extend the validator set later with Extend. +type privKeys []crypto.PrivKey + +// genPrivKeys produces an array of private keys to generate commits. +func genPrivKeys(n int) privKeys { + res := make(privKeys, n) for i := range res { res[i] = ed25519.GenPrivKey() } @@ -31,22 +28,22 @@ func GenValKeys(n int) ValKeys { } // Change replaces the key at index i. -func (v ValKeys) Change(i int) ValKeys { - res := make(ValKeys, len(v)) - copy(res, v) +func (pkz privKeys) Change(i int) privKeys { + res := make(privKeys, len(pkz)) + copy(res, pkz) res[i] = ed25519.GenPrivKey() return res } // Extend adds n more keys (to remove, just take a slice). -func (v ValKeys) Extend(n int) ValKeys { - extra := GenValKeys(n) - return append(v, extra...) +func (pkz privKeys) Extend(n int) privKeys { + extra := genPrivKeys(n) + return append(pkz, extra...) } -// GenSecpValKeys produces an array of secp256k1 private keys to generate commits. -func GenSecpValKeys(n int) ValKeys { - res := make(ValKeys, n) +// GenSecpPrivKeys produces an array of secp256k1 private keys to generate commits. +func GenSecpPrivKeys(n int) privKeys { + res := make(privKeys, n) for i := range res { res[i] = secp256k1.GenPrivKey() } @@ -54,33 +51,33 @@ func GenSecpValKeys(n int) ValKeys { } // ExtendSecp adds n more secp256k1 keys (to remove, just take a slice). -func (v ValKeys) ExtendSecp(n int) ValKeys { - extra := GenSecpValKeys(n) - return append(v, extra...) +func (pkz privKeys) ExtendSecp(n int) privKeys { + extra := GenSecpPrivKeys(n) + return append(pkz, extra...) } -// ToValidators produces a list of validators from the set of keys +// ToValidators produces a valset from the set of keys. // The first key has weight `init` and it increases by `inc` every step // so we can have all the same weight, or a simple linear distribution // (should be enough for testing). -func (v ValKeys) ToValidators(init, inc int64) *types.ValidatorSet { - res := make([]*types.Validator, len(v)) - for i, k := range v { +func (pkz privKeys) ToValidators(init, inc int64) *types.ValidatorSet { + res := make([]*types.Validator, len(pkz)) + for i, k := range pkz { res[i] = types.NewValidator(k.PubKey(), init+int64(i)*inc) } return types.NewValidatorSet(res) } // signHeader properly signs the header with all keys from first to last exclusive. -func (v ValKeys) signHeader(header *types.Header, first, last int) *types.Commit { - votes := make([]*types.Vote, len(v)) +func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Commit { + votes := make([]*types.Vote, len(pkz)) - // we need this list to keep the ordering... - vset := v.ToValidators(1, 0) + // We need this list to keep the ordering. + vset := pkz.ToValidators(1, 0) - // fill in the votes we want - for i := first; i < last && i < len(v); i++ { - vote := makeVote(header, vset, v[i]) + // Fill in the votes we want. + for i := first; i < last && i < len(pkz); i++ { + vote := makeVote(header, vset, pkz[i]) votes[vote.ValidatorIndex] = vote } @@ -91,16 +88,16 @@ func (v ValKeys) signHeader(header *types.Header, first, last int) *types.Commit return res } -func makeVote(header *types.Header, vals *types.ValidatorSet, key crypto.PrivKey) *types.Vote { +func makeVote(header *types.Header, valset *types.ValidatorSet, key crypto.PrivKey) *types.Vote { addr := key.PubKey().Address() - idx, _ := vals.GetByAddress(addr) + idx, _ := valset.GetByAddress(addr) vote := &types.Vote{ ValidatorAddress: addr, ValidatorIndex: idx, Height: header.Height, Round: 1, - Timestamp: time.Now().UTC(), - Type: types.VoteTypePrecommit, + Timestamp: tmtime.Now(), + Type: types.PrecommitType, BlockID: types.BlockID{Hash: header.Hash()}, } // Sign it @@ -115,47 +112,46 @@ func makeVote(header *types.Header, vals *types.ValidatorSet, key crypto.PrivKey return vote } -// Silences warning that vals can also be merkle.Hashable -// nolint: interfacer func genHeader(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { + valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { return &types.Header{ ChainID: chainID, Height: height, - Time: time.Now(), + Time: tmtime.Now(), NumTxs: int64(len(txs)), TotalTxs: int64(len(txs)), // LastBlockID // LastCommitHash - ValidatorsHash: vals.Hash(), - DataHash: txs.Hash(), - AppHash: appHash, - ConsensusHash: consHash, - LastResultsHash: resHash, + ValidatorsHash: valset.Hash(), + NextValidatorsHash: nextValset.Hash(), + DataHash: txs.Hash(), + AppHash: appHash, + ConsensusHash: consHash, + LastResultsHash: resHash, } } -// GenCommit calls genHeader and signHeader and combines them into a Commit. -func (v ValKeys) GenCommit(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) Commit { +// GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader. +func (pkz privKeys) GenSignedHeader(chainID string, height int64, txs types.Txs, + valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) types.SignedHeader { - header := genHeader(chainID, height, txs, vals, appHash, consHash, resHash) - check := Commit{ + header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash) + check := types.SignedHeader{ Header: header, - Commit: v.signHeader(header, first, last), + Commit: pkz.signHeader(header, first, last), } return check } -// GenFullCommit calls genHeader and signHeader and combines them into a Commit. -func (v ValKeys) GenFullCommit(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { +// GenFullCommit calls genHeader and signHeader and combines them into a FullCommit. +func (pkz privKeys) GenFullCommit(chainID string, height int64, txs types.Txs, + valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { - header := genHeader(chainID, height, txs, vals, appHash, consHash, resHash) - commit := Commit{ + header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash) + commit := types.SignedHeader{ Header: header, - Commit: v.signHeader(header, first, last), + Commit: pkz.signHeader(header, first, last), } - return NewFullCommit(commit, vals) + return NewFullCommit(commit, valset, nextValset) } diff --git a/lite/inquiring_certifier.go b/lite/inquiring_certifier.go deleted file mode 100644 index 042bd08e3ec..00000000000 --- a/lite/inquiring_certifier.go +++ /dev/null @@ -1,163 +0,0 @@ -package lite - -import ( - "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -var _ Certifier = (*InquiringCertifier)(nil) - -// InquiringCertifier wraps a dynamic certifier and implements an auto-update strategy. If a call -// to Certify fails due to a change it validator set, InquiringCertifier will try and find a -// previous FullCommit which it can use to safely update the validator set. It uses a source -// provider to obtain the needed FullCommits. It stores properly validated data on the local system. -type InquiringCertifier struct { - cert *DynamicCertifier - // These are only properly validated data, from local system - trusted Provider - // This is a source of new info, like a node rpc, or other import method - Source Provider -} - -// NewInquiringCertifier returns a new Inquiring object. It uses the trusted provider to store -// validated data and the source provider to obtain missing FullCommits. -// -// Example: The trusted provider should a CacheProvider, MemProvider or files.Provider. The source -// provider should be a client.HTTPProvider. -func NewInquiringCertifier(chainID string, fc FullCommit, trusted Provider, - source Provider) (*InquiringCertifier, error) { - - // store the data in trusted - err := trusted.StoreCommit(fc) - if err != nil { - return nil, err - } - - return &InquiringCertifier{ - cert: NewDynamicCertifier(chainID, fc.Validators, fc.Height()), - trusted: trusted, - Source: source, - }, nil -} - -// ChainID returns the chain id. -// Implements Certifier. -func (ic *InquiringCertifier) ChainID() string { - return ic.cert.ChainID() -} - -// Validators returns the validator set. -func (ic *InquiringCertifier) Validators() *types.ValidatorSet { - return ic.cert.cert.vSet -} - -// LastHeight returns the last height. -func (ic *InquiringCertifier) LastHeight() int64 { - return ic.cert.lastHeight -} - -// Certify makes sure this is checkpoint is valid. -// -// If the validators have changed since the last know time, it looks -// for a path to prove the new validators. -// -// On success, it will store the checkpoint in the store for later viewing -// Implements Certifier. -func (ic *InquiringCertifier) Certify(commit Commit) error { - err := ic.useClosestTrust(commit.Height()) - if err != nil { - return err - } - - err = ic.cert.Certify(commit) - if !liteErr.IsValidatorsChangedErr(err) { - return err - } - err = ic.updateToHash(commit.Header.ValidatorsHash) - if err != nil { - return err - } - - err = ic.cert.Certify(commit) - if err != nil { - return err - } - - // store the new checkpoint - return ic.trusted.StoreCommit(NewFullCommit(commit, ic.Validators())) -} - -// Update will verify if this is a valid change and update -// the certifying validator set if safe to do so. -func (ic *InquiringCertifier) Update(fc FullCommit) error { - err := ic.useClosestTrust(fc.Height()) - if err != nil { - return err - } - - err = ic.cert.Update(fc) - if err == nil { - err = ic.trusted.StoreCommit(fc) - } - return err -} - -func (ic *InquiringCertifier) useClosestTrust(h int64) error { - closest, err := ic.trusted.GetByHeight(h) - if err != nil { - return err - } - - // if the best seed is not the one we currently use, - // let's just reset the dynamic validator - if closest.Height() != ic.LastHeight() { - ic.cert = NewDynamicCertifier(ic.ChainID(), closest.Validators, closest.Height()) - } - return nil -} - -// updateToHash gets the validator hash we want to update to -// if IsTooMuchChangeErr, we try to find a path by binary search over height -func (ic *InquiringCertifier) updateToHash(vhash []byte) error { - // try to get the match, and update - fc, err := ic.Source.GetByHash(vhash) - if err != nil { - return err - } - err = ic.cert.Update(fc) - // handle IsTooMuchChangeErr by using divide and conquer - if liteErr.IsTooMuchChangeErr(err) { - err = ic.updateToHeight(fc.Height()) - } - return err -} - -// updateToHeight will use divide-and-conquer to find a path to h -func (ic *InquiringCertifier) updateToHeight(h int64) error { - // try to update to this height (with checks) - fc, err := ic.Source.GetByHeight(h) - if err != nil { - return err - } - start, end := ic.LastHeight(), fc.Height() - if end <= start { - return liteErr.ErrNoPathFound() - } - err = ic.Update(fc) - - // we can handle IsTooMuchChangeErr specially - if !liteErr.IsTooMuchChangeErr(err) { - return err - } - - // try to update to mid - mid := (start + end) / 2 - err = ic.updateToHeight(mid) - if err != nil { - return err - } - - // if we made it to mid, we recurse - return ic.updateToHeight(h) -} diff --git a/lite/inquiring_certifier_test.go b/lite/inquiring_certifier_test.go deleted file mode 100644 index db8160bdc8d..00000000000 --- a/lite/inquiring_certifier_test.go +++ /dev/null @@ -1,173 +0,0 @@ -// nolint: vetshadow -package lite_test - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/lite" -) - -func TestInquirerValidPath(t *testing.T) { - assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() - - // set up the validators to generate test blocks - var vote int64 = 10 - keys := lite.GenValKeys(5) - - // construct a bunch of commits, each with one more height than the last - chainID := "inquiry-test" - consHash := []byte("params") - resHash := []byte("results") - count := 50 - commits := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // extend the keys by 1 each time - keys = keys.Extend(1) - vals := keys.ToValidators(vote, 0) - h := int64(20 + 10*i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) - } - - // initialize a certifier with the initial state - cert, err := lite.NewInquiringCertifier(chainID, commits[0], trust, source) - require.Nil(err) - - // this should fail validation.... - commit := commits[count-1].Commit - err = cert.Certify(commit) - require.NotNil(err) - - // adding a few commits in the middle should be insufficient - for i := 10; i < 13; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.NotNil(err) - - // with more info, we succeed - for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.Nil(err, "%+v", err) -} - -func TestInquirerMinimalPath(t *testing.T) { - assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() - - // set up the validators to generate test blocks - var vote int64 = 10 - keys := lite.GenValKeys(5) - - // construct a bunch of commits, each with one more height than the last - chainID := "minimal-path" - consHash := []byte("other-params") - count := 12 - commits := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // extend the validators, so we are just below 2/3 - keys = keys.Extend(len(keys)/2 - 1) - vals := keys.ToValidators(vote, 0) - h := int64(5 + 10*i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) - } - - // initialize a certifier with the initial state - cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source) - - // this should fail validation.... - commit := commits[count-1].Commit - err := cert.Certify(commit) - require.NotNil(err) - - // add a few seed in the middle should be insufficient - for i := 5; i < 8; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.NotNil(err) - - // with more info, we succeed - for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.Nil(err, "%+v", err) -} - -func TestInquirerVerifyHistorical(t *testing.T) { - assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() - - // set up the validators to generate test blocks - var vote int64 = 10 - keys := lite.GenValKeys(5) - - // construct a bunch of commits, each with one more height than the last - chainID := "inquiry-test" - count := 10 - consHash := []byte("special-params") - commits := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // extend the keys by 1 each time - keys = keys.Extend(1) - vals := keys.ToValidators(vote, 0) - h := int64(20 + 10*i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) - } - - // initialize a certifier with the initial state - cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source) - - // store a few commits as trust - for _, i := range []int{2, 5} { - trust.StoreCommit(commits[i]) - } - - // let's see if we can jump forward using trusted commits - err := source.StoreCommit(commits[7]) - require.Nil(err, "%+v", err) - check := commits[7].Commit - err = cert.Certify(check) - require.Nil(err, "%+v", err) - assert.Equal(check.Height(), cert.LastHeight()) - - // add access to all commits via untrusted source - for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - - // try to check an unknown seed in the past - mid := commits[3].Commit - err = cert.Certify(mid) - require.Nil(err, "%+v", err) - assert.Equal(mid.Height(), cert.LastHeight()) - - // and jump all the way forward again - end := commits[count-1].Commit - err = cert.Certify(end) - require.Nil(err, "%+v", err) - assert.Equal(end.Height(), cert.LastHeight()) -} diff --git a/lite/memprovider.go b/lite/memprovider.go deleted file mode 100644 index ac0d832156d..00000000000 --- a/lite/memprovider.go +++ /dev/null @@ -1,152 +0,0 @@ -package lite - -import ( - "encoding/hex" - "sort" - "sync" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -type memStoreProvider struct { - mtx sync.RWMutex - // byHeight is always sorted by Height... need to support range search (nil, h] - // btree would be more efficient for larger sets - byHeight fullCommits - byHash map[string]FullCommit - - sorted bool -} - -// fullCommits just exists to allow easy sorting -type fullCommits []FullCommit - -func (s fullCommits) Len() int { return len(s) } -func (s fullCommits) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s fullCommits) Less(i, j int) bool { - return s[i].Height() < s[j].Height() -} - -// NewMemStoreProvider returns a new in-memory provider. -func NewMemStoreProvider() Provider { - return &memStoreProvider{ - byHeight: fullCommits{}, - byHash: map[string]FullCommit{}, - } -} - -func (m *memStoreProvider) encodeHash(hash []byte) string { - return hex.EncodeToString(hash) -} - -// StoreCommit stores a FullCommit after verifying it. -func (m *memStoreProvider) StoreCommit(fc FullCommit) error { - // make sure the fc is self-consistent before saving - err := fc.ValidateBasic(fc.Commit.Header.ChainID) - if err != nil { - return err - } - - // store the valid fc - key := m.encodeHash(fc.ValidatorsHash()) - - m.mtx.Lock() - defer m.mtx.Unlock() - m.byHash[key] = fc - m.byHeight = append(m.byHeight, fc) - m.sorted = false - return nil -} - -// GetByHeight returns the FullCommit for height h or an error if the commit is not found. -func (m *memStoreProvider) GetByHeight(h int64) (FullCommit, error) { - // By heuristics, GetByHeight with linearsearch is fast enough - // for about 50 keys but after that, it needs binary search. - // See https://github.com/tendermint/tendermint/pull/1043#issue-285188242 - m.mtx.RLock() - n := len(m.byHeight) - m.mtx.RUnlock() - - if n <= 50 { - return m.getByHeightLinearSearch(h) - } - return m.getByHeightBinarySearch(h) -} - -func (m *memStoreProvider) sortByHeightIfNecessaryLocked() { - if !m.sorted { - sort.Sort(m.byHeight) - m.sorted = true - } -} - -func (m *memStoreProvider) getByHeightLinearSearch(h int64) (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.sortByHeightIfNecessaryLocked() - // search from highest to lowest - for i := len(m.byHeight) - 1; i >= 0; i-- { - if fc := m.byHeight[i]; fc.Height() <= h { - return fc, nil - } - } - return FullCommit{}, liteErr.ErrCommitNotFound() -} - -func (m *memStoreProvider) getByHeightBinarySearch(h int64) (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.sortByHeightIfNecessaryLocked() - low, high := 0, len(m.byHeight)-1 - var mid int - var hmid int64 - var midFC FullCommit - // Our goal is to either find: - // * item ByHeight with the query - // * greatest height with a height <= query - for low <= high { - mid = int(uint(low+high) >> 1) // Avoid an overflow - midFC = m.byHeight[mid] - hmid = midFC.Height() - switch { - case hmid == h: - return midFC, nil - case hmid < h: - low = mid + 1 - case hmid > h: - high = mid - 1 - } - } - - if high >= 0 { - if highFC := m.byHeight[high]; highFC.Height() < h { - return highFC, nil - } - } - return FullCommit{}, liteErr.ErrCommitNotFound() -} - -// GetByHash returns the FullCommit for the hash or an error if the commit is not found. -func (m *memStoreProvider) GetByHash(hash []byte) (FullCommit, error) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - fc, ok := m.byHash[m.encodeHash(hash)] - if !ok { - return fc, liteErr.ErrCommitNotFound() - } - return fc, nil -} - -// LatestCommit returns the latest FullCommit or an error if no commits exist. -func (m *memStoreProvider) LatestCommit() (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - l := len(m.byHeight) - if l == 0 { - return FullCommit{}, liteErr.ErrCommitNotFound() - } - m.sortByHeightIfNecessaryLocked() - return m.byHeight[l-1], nil -} diff --git a/lite/multiprovider.go b/lite/multiprovider.go new file mode 100644 index 00000000000..734d042c4b9 --- /dev/null +++ b/lite/multiprovider.go @@ -0,0 +1,83 @@ +package lite + +import ( + log "github.com/tendermint/tendermint/libs/log" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" +) + +// multiProvider allows you to place one or more caches in front of a source +// Provider. It runs through them in order until a match is found. +type multiProvider struct { + logger log.Logger + providers []PersistentProvider +} + +// NewMultiProvider returns a new provider which wraps multiple other providers. +func NewMultiProvider(providers ...PersistentProvider) *multiProvider { + return &multiProvider{ + logger: log.NewNopLogger(), + providers: providers, + } +} + +// SetLogger sets logger on self and all subproviders. +func (mc *multiProvider) SetLogger(logger log.Logger) { + mc.logger = logger + for _, p := range mc.providers { + p.SetLogger(logger) + } +} + +// SaveFullCommit saves on all providers, and aborts on the first error. +func (mc *multiProvider) SaveFullCommit(fc FullCommit) (err error) { + for _, p := range mc.providers { + err = p.SaveFullCommit(fc) + if err != nil { + return + } + } + return +} + +// LatestFullCommit loads the latest from all providers and provides +// the latest FullCommit that satisfies the conditions. +// Returns the first error encountered. +func (mc *multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc FullCommit, err error) { + for _, p := range mc.providers { + var fc_ FullCommit + fc_, err = p.LatestFullCommit(chainID, minHeight, maxHeight) + if lerr.IsErrCommitNotFound(err) { + err = nil + continue + } else if err != nil { + return + } + if fc == (FullCommit{}) { + fc = fc_ + } else if fc_.Height() > fc.Height() { + fc = fc_ + } + if fc.Height() == maxHeight { + return + } + } + if fc == (FullCommit{}) { + err = lerr.ErrCommitNotFound() + return + } + return +} + +// ValidatorSet returns validator set at height as provided by the first +// provider which has it, or an error otherwise. +func (mc *multiProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + for _, p := range mc.providers { + valset, err = p.ValidatorSet(chainID, height) + if err == nil { + // TODO Log unexpected types of errors. + return valset, nil + } + } + return nil, lerr.ErrUnknownValidators(chainID, height) +} diff --git a/lite/performance_test.go b/lite/performance_test.go deleted file mode 100644 index 3b805a41855..00000000000 --- a/lite/performance_test.go +++ /dev/null @@ -1,368 +0,0 @@ -package lite - -import ( - "fmt" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - cmn "github.com/tendermint/tendermint/libs/common" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -func TestMemStoreProvidergetByHeightBinaryAndLinearSameResult(t *testing.T) { - p := NewMemStoreProvider().(*memStoreProvider) - - // Store a bunch of commits at specific heights - // and then ensure that: - // * getByHeightLinearSearch - // * getByHeightBinarySearch - // both return the exact same result - - // 1. Non-existent height commits - nonExistent := []int64{-1000, -1, 0, 1, 10, 11, 17, 31, 67, 1000, 1e9} - ensureNonExistentCommitsAtHeight(t, "getByHeightLinearSearch", p.getByHeightLinearSearch, nonExistent) - ensureNonExistentCommitsAtHeight(t, "getByHeightBinarySearch", p.getByHeightBinarySearch, nonExistent) - - // 2. Save some known height commits - knownHeights := []int64{0, 1, 7, 9, 12, 13, 18, 44, 23, 16, 1024, 100, 199, 1e9} - createAndStoreCommits(t, p, knownHeights) - - // 3. Now check if those heights are retrieved - ensureExistentCommitsAtHeight(t, "getByHeightLinearSearch", p.getByHeightLinearSearch, knownHeights) - ensureExistentCommitsAtHeight(t, "getByHeightBinarySearch", p.getByHeightBinarySearch, knownHeights) - - // 4. And now for the height probing to ensure that any height - // requested returns a fullCommit of height <= requestedHeight. - comparegetByHeightAlgorithms(t, p, 0, 0) - comparegetByHeightAlgorithms(t, p, 1, 1) - comparegetByHeightAlgorithms(t, p, 2, 1) - comparegetByHeightAlgorithms(t, p, 5, 1) - comparegetByHeightAlgorithms(t, p, 7, 7) - comparegetByHeightAlgorithms(t, p, 10, 9) - comparegetByHeightAlgorithms(t, p, 12, 12) - comparegetByHeightAlgorithms(t, p, 14, 13) - comparegetByHeightAlgorithms(t, p, 19, 18) - comparegetByHeightAlgorithms(t, p, 43, 23) - comparegetByHeightAlgorithms(t, p, 45, 44) - comparegetByHeightAlgorithms(t, p, 1025, 1024) - comparegetByHeightAlgorithms(t, p, 101, 100) - comparegetByHeightAlgorithms(t, p, 1e3, 199) - comparegetByHeightAlgorithms(t, p, 1e4, 1024) - comparegetByHeightAlgorithms(t, p, 1e9, 1e9) - comparegetByHeightAlgorithms(t, p, 1e9+1, 1e9) -} - -func createAndStoreCommits(t *testing.T, p Provider, heights []int64) { - chainID := "cache-best-height-binary-and-linear" - appHash := []byte("0xdeadbeef") - keys := GenValKeys(len(heights) / 2) - - for _, h := range heights { - vals := keys.ToValidators(10, int64(len(heights)/2)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p.StoreCommit(fc) - require.NoError(t, err, "StoreCommit height=%d", h) - } -} - -func comparegetByHeightAlgorithms(t *testing.T, p *memStoreProvider, ask, expect int64) { - algos := map[string]func(int64) (FullCommit, error){ - "getHeightByLinearSearch": p.getByHeightLinearSearch, - "getHeightByBinarySearch": p.getByHeightBinarySearch, - } - - for algo, fn := range algos { - fc, err := fn(ask) - // t.Logf("%s got=%v want=%d", algo, expect, fc.Height()) - require.Nil(t, err, "%s: %+v", algo, err) - if assert.Equal(t, expect, fc.Height()) { - err = p.StoreCommit(fc) - require.Nil(t, err, "%s: %+v", algo, err) - } - } -} - -var blankFullCommit FullCommit - -func ensureNonExistentCommitsAtHeight(t *testing.T, prefix string, fn func(int64) (FullCommit, error), data []int64) { - for i, qh := range data { - fc, err := fn(qh) - assert.NotNil(t, err, "#%d: %s: height=%d should return non-nil error", i, prefix, qh) - assert.Equal(t, fc, blankFullCommit, "#%d: %s: height=%d\ngot =%+v\nwant=%+v", i, prefix, qh, fc, blankFullCommit) - } -} - -func ensureExistentCommitsAtHeight(t *testing.T, prefix string, fn func(int64) (FullCommit, error), data []int64) { - for i, qh := range data { - fc, err := fn(qh) - assert.Nil(t, err, "#%d: %s: height=%d should not return an error: %v", i, prefix, qh, err) - assert.NotEqual(t, fc, blankFullCommit, "#%d: %s: height=%d got a blankCommit", i, prefix, qh) - } -} - -func BenchmarkGenCommit20(b *testing.B) { - keys := GenValKeys(20) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommit100(b *testing.B) { - keys := GenValKeys(100) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommitSec20(b *testing.B) { - keys := GenSecpValKeys(20) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommitSec100(b *testing.B) { - keys := GenSecpValKeys(100) - benchmarkGenCommit(b, keys) -} - -func benchmarkGenCommit(b *testing.B, keys ValKeys) { - chainID := fmt.Sprintf("bench-%d", len(keys)) - vals := keys.ToValidators(20, 10) - for i := 0; i < b.N; i++ { - h := int64(1 + i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - keys.GenCommit(chainID, h, nil, vals, appHash, []byte("params"), resHash, 0, len(keys)) - } -} - -// this benchmarks generating one key -func BenchmarkGenValKeys(b *testing.B) { - keys := GenValKeys(20) - for i := 0; i < b.N; i++ { - keys = keys.Extend(1) - } -} - -// this benchmarks generating one key -func BenchmarkGenSecpValKeys(b *testing.B) { - keys := GenSecpValKeys(20) - for i := 0; i < b.N; i++ { - keys = keys.Extend(1) - } -} - -func BenchmarkToValidators20(b *testing.B) { - benchmarkToValidators(b, 20) -} - -func BenchmarkToValidators100(b *testing.B) { - benchmarkToValidators(b, 100) -} - -// this benchmarks constructing the validator set (.PubKey() * nodes) -func benchmarkToValidators(b *testing.B, nodes int) { - keys := GenValKeys(nodes) - for i := 1; i <= b.N; i++ { - keys.ToValidators(int64(2*i), int64(i)) - } -} - -func BenchmarkToValidatorsSec100(b *testing.B) { - benchmarkToValidatorsSec(b, 100) -} - -// this benchmarks constructing the validator set (.PubKey() * nodes) -func benchmarkToValidatorsSec(b *testing.B, nodes int) { - keys := GenSecpValKeys(nodes) - for i := 1; i <= b.N; i++ { - keys.ToValidators(int64(2*i), int64(i)) - } -} - -func BenchmarkCertifyCommit20(b *testing.B) { - keys := GenValKeys(20) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommit100(b *testing.B) { - keys := GenValKeys(100) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommitSec20(b *testing.B) { - keys := GenSecpValKeys(20) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommitSec100(b *testing.B) { - keys := GenSecpValKeys(100) - benchmarkCertifyCommit(b, keys) -} - -func benchmarkCertifyCommit(b *testing.B, keys ValKeys) { - chainID := "bench-certify" - vals := keys.ToValidators(20, 10) - cert := NewStaticCertifier(chainID, vals) - check := keys.GenCommit(chainID, 123, nil, vals, []byte("foo"), []byte("params"), []byte("res"), 0, len(keys)) - for i := 0; i < b.N; i++ { - err := cert.Certify(check) - if err != nil { - panic(err) - } - } - -} - -type algo bool - -const ( - linearSearch = true - binarySearch = false -) - -// Lazy load the commits -var fcs5, fcs50, fcs100, fcs500, fcs1000 []FullCommit -var h5, h50, h100, h500, h1000 []int64 -var commitsOnce sync.Once - -func lazyGenerateFullCommits(b *testing.B) { - b.Logf("Generating FullCommits") - commitsOnce.Do(func() { - fcs5, h5 = genFullCommits(nil, nil, 5) - b.Logf("Generated 5 FullCommits") - fcs50, h50 = genFullCommits(fcs5, h5, 50) - b.Logf("Generated 50 FullCommits") - fcs100, h100 = genFullCommits(fcs50, h50, 100) - b.Logf("Generated 100 FullCommits") - fcs500, h500 = genFullCommits(fcs100, h100, 500) - b.Logf("Generated 500 FullCommits") - fcs1000, h1000 = genFullCommits(fcs500, h500, 1000) - b.Logf("Generated 1000 FullCommits") - }) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch5(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs5, h5, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch50(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs50, h50, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch100(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs100, h100, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch500(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs500, h500, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch1000(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch5(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs5, h5, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch50(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs50, h50, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch100(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs100, h100, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch500(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs500, h500, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch1000(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, binarySearch) -} - -var rng = cmn.NewRand() - -func init() { - rng.Seed(10) -} - -func benchmarkMemStoreProvidergetByHeight(b *testing.B, fcs []FullCommit, fHeights []int64, algo algo) { - lazyGenerateFullCommits(b) - - b.StopTimer() - mp := NewMemStoreProvider() - for i, fc := range fcs { - if err := mp.StoreCommit(fc); err != nil { - b.Fatalf("FullCommit #%d: err: %v", i, err) - } - } - qHeights := make([]int64, len(fHeights)) - copy(qHeights, fHeights) - // Append some non-existent heights to trigger the worst cases. - qHeights = append(qHeights, 19, -100, -10000, 1e7, -17, 31, -1e9) - - memP := mp.(*memStoreProvider) - searchFn := memP.getByHeightLinearSearch - if algo == binarySearch { // nolint - searchFn = memP.getByHeightBinarySearch - } - - hPerm := rng.Perm(len(qHeights)) - b.StartTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, j := range hPerm { - h := qHeights[j] - if _, err := searchFn(h); err != nil { - } - } - } - b.ReportAllocs() -} - -func genFullCommits(prevFC []FullCommit, prevH []int64, want int) ([]FullCommit, []int64) { - fcs := make([]FullCommit, len(prevFC)) - copy(fcs, prevFC) - heights := make([]int64, len(prevH)) - copy(heights, prevH) - - appHash := []byte("benchmarks") - chainID := "benchmarks-gen-full-commits" - n := want - keys := GenValKeys(2 + (n / 3)) - for i := 0; i < n; i++ { - vals := keys.ToValidators(10, int64(n/2)) - h := int64(20 + 10*i) - fcs = append(fcs, keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5)) - heights = append(heights, h) - } - return fcs, heights -} - -func TestMemStoreProviderLatestCommitAlwaysUsesSorted(t *testing.T) { - p := NewMemStoreProvider().(*memStoreProvider) - // 1. With no commits yet stored, it should return ErrCommitNotFound - got, err := p.LatestCommit() - require.Equal(t, err.Error(), liteErr.ErrCommitNotFound().Error(), "should return ErrCommitNotFound()") - require.Equal(t, got, blankFullCommit, "With no fullcommits, it should return a blank FullCommit") - - // 2. Generate some full commits now and we'll add them unsorted. - genAndStoreCommitsOfHeight(t, p, 27, 100, 1, 12, 1000, 17, 91) - fc, err := p.LatestCommit() - require.Nil(t, err, "with commits saved no error expected") - require.NotEqual(t, fc, blankFullCommit, "with commits saved no blank FullCommit") - require.Equal(t, fc.Height(), int64(1000), "the latest commit i.e. the largest expected") -} - -func genAndStoreCommitsOfHeight(t *testing.T, p Provider, heights ...int64) { - n := len(heights) - appHash := []byte("tests") - chainID := "tests-gen-full-commits" - keys := GenValKeys(2 + (n / 3)) - for i := 0; i < n; i++ { - h := heights[i] - vals := keys.ToValidators(10, int64(n/2)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p.StoreCommit(fc) - require.NoError(t, err, "StoreCommit height=%d", h) - } -} diff --git a/lite/provider.go b/lite/provider.go index 22dc964a1a0..97e06a06d3b 100644 --- a/lite/provider.go +++ b/lite/provider.go @@ -1,103 +1,32 @@ package lite -// Provider is used to get more validators by other means. -// -// Examples: MemProvider, files.Provider, client.Provider, CacheProvider.... +import ( + log "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +// Provider provides information for the lite client to sync validators. +// Examples: MemProvider, files.Provider, client.Provider, CacheProvider. type Provider interface { - // StoreCommit saves a FullCommit after we have verified it, - // so we can query for it later. Important for updating our - // store of trusted commits. - StoreCommit(fc FullCommit) error - // GetByHeight returns the closest commit with height <= h. - GetByHeight(h int64) (FullCommit, error) - // GetByHash returns a commit exactly matching this validator hash. - GetByHash(hash []byte) (FullCommit, error) - // LatestCommit returns the newest commit stored. - LatestCommit() (FullCommit, error) -} -// cacheProvider allows you to place one or more caches in front of a source -// Provider. It runs through them in order until a match is found. -// So you can keep a local cache, and check with the network if -// no data is there. -type cacheProvider struct { - Providers []Provider -} + // LatestFullCommit returns the latest commit with minHeight <= height <= + // maxHeight. + // If maxHeight is zero, returns the latest where minHeight <= height. + LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) -// NewCacheProvider returns a new provider which wraps multiple other providers. -func NewCacheProvider(providers ...Provider) Provider { - return cacheProvider{ - Providers: providers, - } -} + // Get the valset that corresponds to chainID and height and return. + // Height must be >= 1. + ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) -// StoreCommit tries to add the seed to all providers. -// -// Aborts on first error it encounters (closest provider) -func (c cacheProvider) StoreCommit(fc FullCommit) (err error) { - for _, p := range c.Providers { - err = p.StoreCommit(fc) - if err != nil { - break - } - } - return err + // Set a logger. + SetLogger(logger log.Logger) } -// GetByHeight should return the closest possible match from all providers. -// -// The Cache is usually organized in order from cheapest call (memory) -// to most expensive calls (disk/network). However, since GetByHeight returns -// a FullCommit at h' <= h, if the memory has a seed at h-10, but the network would -// give us the exact match, a naive "stop at first non-error" would hide -// the actual desired results. -// -// Thus, we query each provider in order until we find an exact match -// or we finished querying them all. If at least one returned a non-error, -// then this returns the best match (minimum h-h'). -func (c cacheProvider) GetByHeight(h int64) (fc FullCommit, err error) { - for _, p := range c.Providers { - var tfc FullCommit - tfc, err = p.GetByHeight(h) - if err == nil { - if tfc.Height() > fc.Height() { - fc = tfc - } - if tfc.Height() == h { - break - } - } - } - // even if the last one had an error, if any was a match, this is good - if fc.Height() > 0 { - err = nil - } - return fc, err -} - -// GetByHash returns the FullCommit for the hash or an error if the commit is not found. -func (c cacheProvider) GetByHash(hash []byte) (fc FullCommit, err error) { - for _, p := range c.Providers { - fc, err = p.GetByHash(hash) - if err == nil { - break - } - } - return fc, err -} +// A provider that can also persist new information. +// Examples: MemProvider, files.Provider, CacheProvider. +type PersistentProvider interface { + Provider -// LatestCommit returns the latest FullCommit or an error if no commit exists. -func (c cacheProvider) LatestCommit() (fc FullCommit, err error) { - for _, p := range c.Providers { - var tfc FullCommit - tfc, err = p.LatestCommit() - if err == nil && tfc.Height() > fc.Height() { - fc = tfc - } - } - // even if the last one had an error, if any was a match, this is good - if fc.Height() > 0 { - err = nil - } - return fc, err + // SaveFullCommit saves a FullCommit (without verification). + SaveFullCommit(fc FullCommit) error } diff --git a/lite/provider_test.go b/lite/provider_test.go index 77b5b1a858f..94b467de812 100644 --- a/lite/provider_test.go +++ b/lite/provider_test.go @@ -1,98 +1,90 @@ -// nolint: vetshadow -package lite_test +package lite import ( + "errors" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" + dbm "github.com/tendermint/tendermint/libs/db" + log "github.com/tendermint/tendermint/libs/log" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" ) -// missingProvider doesn't store anything, always a miss -// Designed as a mock for testing +// missingProvider doesn't store anything, always a miss. +// Designed as a mock for testing. type missingProvider struct{} // NewMissingProvider returns a provider which does not store anything and always misses. -func NewMissingProvider() lite.Provider { +func NewMissingProvider() PersistentProvider { return missingProvider{} } -func (missingProvider) StoreCommit(lite.FullCommit) error { return nil } -func (missingProvider) GetByHeight(int64) (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() +func (missingProvider) SaveFullCommit(FullCommit) error { return nil } +func (missingProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) { + return FullCommit{}, lerr.ErrCommitNotFound() } -func (missingProvider) GetByHash([]byte) (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() -} -func (missingProvider) LatestCommit() (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() +func (missingProvider) ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) { + return nil, errors.New("missing validator set") } +func (missingProvider) SetLogger(_ log.Logger) {} func TestMemProvider(t *testing.T) { - p := lite.NewMemStoreProvider() + p := NewDBProvider("mem", dbm.NewMemDB()) checkProvider(t, p, "test-mem", "empty") } -func TestCacheProvider(t *testing.T) { - p := lite.NewCacheProvider( +func TestMultiProvider(t *testing.T) { + p := NewMultiProvider( NewMissingProvider(), - lite.NewMemStoreProvider(), + NewDBProvider("mem", dbm.NewMemDB()), NewMissingProvider(), ) checkProvider(t, p, "test-cache", "kjfhekfhkewhgit") } -func checkProvider(t *testing.T, p lite.Provider, chainID, app string) { +func checkProvider(t *testing.T, p PersistentProvider, chainID, app string) { assert, require := assert.New(t), require.New(t) appHash := []byte(app) - keys := lite.GenValKeys(5) + keys := genPrivKeys(5) count := 10 - // make a bunch of commits... - commits := make([]lite.FullCommit, count) + // Make a bunch of full commits. + fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - // two commits for each validator, to check how we handle dups - // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... vals := keys.ToValidators(10, int64(count/2)) h := int64(20 + 10*i) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) + fcz[i] = keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5) } - // check provider is empty - fc, err := p.GetByHeight(20) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - fc, err = p.GetByHash(commits[3].ValidatorsHash()) + // Check that provider is initially empty. + fc, err := p.LatestFullCommit(chainID, 1, 1<<63-1) require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) + assert.True(lerr.IsErrCommitNotFound(err)) - // now add them all to the provider - for _, s := range commits { - err = p.StoreCommit(s) + // Save all full commits to the provider. + for _, fc := range fcz { + err = p.SaveFullCommit(fc) require.Nil(err) - // and make sure we can get it back - s2, err := p.GetByHash(s.ValidatorsHash()) - assert.Nil(err) - assert.Equal(s, s2) - // by height as well - s2, err = p.GetByHeight(s.Height()) + // Make sure we can get it back. + fc2, err := p.LatestFullCommit(chainID, fc.Height(), fc.Height()) assert.Nil(err) - assert.Equal(s, s2) + assert.Equal(fc.SignedHeader, fc2.SignedHeader) + assert.Equal(fc.Validators, fc2.Validators) + assert.Equal(fc.NextValidators, fc2.NextValidators) } - // make sure we get the last hash if we overstep - fc, err = p.GetByHeight(5000) + // Make sure we get the last hash if we overstep. + fc, err = p.LatestFullCommit(chainID, 1, 5000) if assert.Nil(err) { - assert.Equal(commits[count-1].Height(), fc.Height()) - assert.Equal(commits[count-1], fc) + assert.Equal(fcz[count-1].Height(), fc.Height()) + assert.Equal(fcz[count-1], fc) } - // and middle ones as well - fc, err = p.GetByHeight(47) + // ... and middle ones as well. + fc, err = p.LatestFullCommit(chainID, 1, 47) if assert.Nil(err) { // we only step by 10, so 40 must be the one below this assert.EqualValues(40, fc.Height()) @@ -100,50 +92,49 @@ func checkProvider(t *testing.T, p lite.Provider, chainID, app string) { } -// this will make a get height, and if it is good, set the data as well -func checkGetHeight(t *testing.T, p lite.Provider, ask, expect int64) { - fc, err := p.GetByHeight(ask) - require.Nil(t, err, "GetByHeight") +// This will make a get height, and if it is good, set the data as well. +func checkLatestFullCommit(t *testing.T, p PersistentProvider, chainID string, ask, expect int64) { + fc, err := p.LatestFullCommit(chainID, 1, ask) + require.Nil(t, err) if assert.Equal(t, expect, fc.Height()) { - err = p.StoreCommit(fc) - require.Nil(t, err, "StoreCommit") + err = p.SaveFullCommit(fc) + require.Nil(t, err) } } -func TestCacheGetsBestHeight(t *testing.T) { - // assert, require := assert.New(t), require.New(t) +func TestMultiLatestFullCommit(t *testing.T) { require := require.New(t) - // we will write data to the second level of the cache (p2), - // and see what gets cached, stored in - p := lite.NewMemStoreProvider() - p2 := lite.NewMemStoreProvider() - cp := lite.NewCacheProvider(p, p2) + // We will write data to the second level of the cache (p2), and see what + // gets cached/stored in. + p := NewDBProvider("mem1", dbm.NewMemDB()) + p2 := NewDBProvider("mem2", dbm.NewMemDB()) + cp := NewMultiProvider(p, p2) chainID := "cache-best-height" appHash := []byte("01234567") - keys := lite.GenValKeys(5) + keys := genPrivKeys(5) count := 10 - // set a bunch of commits + // Set a bunch of full commits. for i := 0; i < count; i++ { vals := keys.ToValidators(10, int64(count/2)) h := int64(10 * (i + 1)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p2.StoreCommit(fc) + fc := keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5) + err := p2.SaveFullCommit(fc) require.NoError(err) } - // let's get a few heights from the cache and set them proper - checkGetHeight(t, cp, 57, 50) - checkGetHeight(t, cp, 33, 30) + // Get a few heights from the cache and set them proper. + checkLatestFullCommit(t, cp, chainID, 57, 50) + checkLatestFullCommit(t, cp, chainID, 33, 30) // make sure they are set in p as well (but nothing else) - checkGetHeight(t, p, 44, 30) - checkGetHeight(t, p, 50, 50) - checkGetHeight(t, p, 99, 50) + checkLatestFullCommit(t, p, chainID, 44, 30) + checkLatestFullCommit(t, p, chainID, 50, 50) + checkLatestFullCommit(t, p, chainID, 99, 50) // now, query the cache for a higher value - checkGetHeight(t, p2, 99, 90) - checkGetHeight(t, cp, 99, 90) + checkLatestFullCommit(t, p2, chainID, 99, 90) + checkLatestFullCommit(t, cp, chainID, 99, 90) } diff --git a/lite/proxy/block.go b/lite/proxy/block.go index 00b8c87fb05..035871a1185 100644 --- a/lite/proxy/block.go +++ b/lite/proxy/block.go @@ -2,27 +2,24 @@ package proxy import ( "bytes" + "errors" - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - certerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" ) -func ValidateBlockMeta(meta *types.BlockMeta, check lite.Commit) error { +func ValidateBlockMeta(meta *types.BlockMeta, sh types.SignedHeader) error { if meta == nil { return errors.New("expecting a non-nil BlockMeta") } // TODO: check the BlockID?? - return ValidateHeader(&meta.Header, check) + return ValidateHeader(&meta.Header, sh) } -func ValidateBlock(meta *types.Block, check lite.Commit) error { +func ValidateBlock(meta *types.Block, sh types.SignedHeader) error { if meta == nil { return errors.New("expecting a non-nil Block") } - err := ValidateHeader(&meta.Header, check) + err := ValidateHeader(&meta.Header, sh) if err != nil { return err } @@ -32,17 +29,19 @@ func ValidateBlock(meta *types.Block, check lite.Commit) error { return nil } -func ValidateHeader(head *types.Header, check lite.Commit) error { +func ValidateHeader(head *types.Header, sh types.SignedHeader) error { if head == nil { return errors.New("expecting a non-nil Header") } - // make sure they are for the same height (obvious fail) - if head.Height != check.Height() { - return certerr.ErrHeightMismatch(head.Height, check.Height()) + if sh.Header == nil { + return errors.New("unexpected empty SignedHeader") + } + // Make sure they are for the same height (obvious fail). + if head.Height != sh.Height { + return errors.New("Header heights mismatched") } - // check if they are equal by using hashes - chead := check.Header - if !bytes.Equal(head.Hash(), chead.Hash()) { + // Check if they are equal by using hashes. + if !bytes.Equal(head.Hash(), sh.Hash()) { return errors.New("Headers don't match") } return nil diff --git a/lite/proxy/certifier.go b/lite/proxy/certifier.go deleted file mode 100644 index 6e319dc0d25..00000000000 --- a/lite/proxy/certifier.go +++ /dev/null @@ -1,35 +0,0 @@ -package proxy - -import ( - "github.com/tendermint/tendermint/lite" - certclient "github.com/tendermint/tendermint/lite/client" - "github.com/tendermint/tendermint/lite/files" -) - -func GetCertifier(chainID, rootDir, nodeAddr string) (*lite.InquiringCertifier, error) { - trust := lite.NewCacheProvider( - lite.NewMemStoreProvider(), - files.NewProvider(rootDir), - ) - - source := certclient.NewHTTPProvider(nodeAddr) - - // XXX: total insecure hack to avoid `init` - fc, err := source.LatestCommit() - /* XXX - // this gets the most recent verified commit - fc, err := trust.LatestCommit() - if certerr.IsCommitNotFoundErr(err) { - return nil, errors.New("Please run init first to establish a root of trust") - }*/ - if err != nil { - return nil, err - } - - cert, err := lite.NewInquiringCertifier(chainID, fc, trust, source) - if err != nil { - return nil, err - } - - return cert, nil -} diff --git a/lite/proxy/errors.go b/lite/proxy/errors.go index 5a2713e3c0f..6a7c2354cd5 100644 --- a/lite/proxy/errors.go +++ b/lite/proxy/errors.go @@ -1,22 +1,24 @@ package proxy import ( - "fmt" - - "github.com/pkg/errors" + cmn "github.com/tendermint/tendermint/libs/common" ) -//-------------------------------------------- +type errNoData struct{} -var errNoData = fmt.Errorf("No data returned for query") +func (e errNoData) Error() string { + return "No data returned for query" +} -// IsNoDataErr checks whether an error is due to a query returning empty data -func IsNoDataErr(err error) bool { - return errors.Cause(err) == errNoData +// IsErrNoData checks whether an error is due to a query returning empty data +func IsErrNoData(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errNoData) + return ok + } + return false } func ErrNoData() error { - return errors.WithStack(errNoData) + return cmn.ErrorWrap(errNoData{}, "") } - -//-------------------------------------------- diff --git a/lite/proxy/errors_test.go b/lite/proxy/errors_test.go deleted file mode 100644 index 7f51be50fff..00000000000 --- a/lite/proxy/errors_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package proxy - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestErrorNoData(t *testing.T) { - e1 := ErrNoData() - assert.True(t, IsNoDataErr(e1)) - - e2 := errors.New("foobar") - assert.False(t, IsNoDataErr(e2)) - assert.False(t, IsNoDataErr(nil)) -} diff --git a/lite/proxy/proof.go b/lite/proxy/proof.go new file mode 100644 index 00000000000..452dee277a1 --- /dev/null +++ b/lite/proxy/proof.go @@ -0,0 +1,14 @@ +package proxy + +import ( + "github.com/tendermint/tendermint/crypto/merkle" +) + +func defaultProofRuntime() *merkle.ProofRuntime { + prt := merkle.NewProofRuntime() + prt.RegisterOpDecoder( + merkle.ProofOpSimpleValue, + merkle.SimpleValueOpDecoder, + ) + return prt +} diff --git a/lite/proxy/proxy.go b/lite/proxy/proxy.go index 0294ddf6883..ffd9db1d7e4 100644 --- a/lite/proxy/proxy.go +++ b/lite/proxy/proxy.go @@ -19,7 +19,7 @@ const ( // StartProxy will start the websocket manager on the client, // set up the rpc routes to proxy via the given client, // and start up an http/rpc server on the location given by bind (eg. :1234) -func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger) error { +func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger, maxOpenConnections int) error { err := c.Start() if err != nil { return err @@ -38,8 +38,7 @@ func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger) error core.SetLogger(logger) mux.HandleFunc(wsEndpoint, wm.WebsocketHandler) - // TODO: limit max number of open connections rpc.Config{MaxOpenConnections: X} - _, err = rpc.StartHTTPServer(listenAddr, mux, logger, rpc.Config{}) + _, err = rpc.StartHTTPServer(listenAddr, mux, logger, rpc.Config{MaxOpenConnections: maxOpenConnections}) return err } diff --git a/lite/proxy/query.go b/lite/proxy/query.go index 0ca5be1749d..3acf826b87e 100644 --- a/lite/proxy/query.go +++ b/lite/proxy/query.go @@ -1,152 +1,122 @@ package proxy import ( - "github.com/pkg/errors" + "fmt" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/lite" - "github.com/tendermint/tendermint/lite/client" - certerr "github.com/tendermint/tendermint/lite/errors" + lerr "github.com/tendermint/tendermint/lite/errors" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" ) -// KeyProof represents a proof of existence or absence of a single key. -// Copied from iavl repo. TODO -type KeyProof interface { - // Verify verfies the proof is valid. To verify absence, - // the value should be nil. - Verify(key, value, root []byte) error - - // Root returns the root hash of the proof. - Root() []byte - - // Serialize itself - Bytes() []byte -} - // GetWithProof will query the key on the given node, and verify it has -// a valid proof, as defined by the certifier. +// a valid proof, as defined by the Verifier. // // If there is any error in checking, returns an error. -// If val is non-empty, proof should be KeyExistsProof -// If val is empty, proof should be KeyMissingProof -func GetWithProof(key []byte, reqHeight int64, node rpcclient.Client, - cert lite.Certifier) ( - val cmn.HexBytes, height int64, proof KeyProof, err error) { +func GetWithProof(prt *merkle.ProofRuntime, key []byte, reqHeight int64, node rpcclient.Client, + cert lite.Verifier) ( + val cmn.HexBytes, height int64, proof *merkle.Proof, err error) { if reqHeight < 0 { - err = errors.Errorf("Height cannot be negative") + err = cmn.NewError("Height cannot be negative") return } - _resp, proof, err := GetWithProofOptions("/key", key, - rpcclient.ABCIQueryOptions{Height: int64(reqHeight)}, + res, err := GetWithProofOptions(prt, "/key", key, + rpcclient.ABCIQueryOptions{Height: int64(reqHeight), Prove: true}, node, cert) - if _resp != nil { - resp := _resp.Response - val, height = resp.Value, resp.Height + if err != nil { + return } + + resp := res.Response + val, height = resp.Value, resp.Height return val, height, proof, err } -// GetWithProofOptions is useful if you want full access to the ABCIQueryOptions -func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOptions, - node rpcclient.Client, cert lite.Certifier) ( - *ctypes.ResultABCIQuery, KeyProof, error) { +// GetWithProofOptions is useful if you want full access to the ABCIQueryOptions. +// XXX Usage of path? It's not used, and sometimes it's /, sometimes /key, sometimes /store. +func GetWithProofOptions(prt *merkle.ProofRuntime, path string, key []byte, opts rpcclient.ABCIQueryOptions, + node rpcclient.Client, cert lite.Verifier) ( + *ctypes.ResultABCIQuery, error) { + + if !opts.Prove { + return nil, cmn.NewError("require ABCIQueryOptions.Prove to be true") + } - _resp, err := node.ABCIQueryWithOptions(path, key, opts) + res, err := node.ABCIQueryWithOptions(path, key, opts) if err != nil { - return nil, nil, err + return nil, err } - resp := _resp.Response + resp := res.Response - // make sure the proof is the proper height + // Validate the response, e.g. height. if resp.IsErr() { - err = errors.Errorf("Query error for key %d: %d", key, resp.Code) - return nil, nil, err + err = cmn.NewError("Query error for key %d: %d", key, resp.Code) + return nil, err } - if len(resp.Key) == 0 || len(resp.Proof) == 0 { - return nil, nil, ErrNoData() + + if len(resp.Key) == 0 || resp.Proof == nil { + return nil, lerr.ErrEmptyTree() } if resp.Height == 0 { - return nil, nil, errors.New("Height returned is zero") + return nil, cmn.NewError("Height returned is zero") } // AppHash for height H is in header H+1 - commit, err := GetCertifiedCommit(resp.Height+1, node, cert) + signedHeader, err := GetCertifiedCommit(resp.Height+1, node, cert) if err != nil { - return nil, nil, err + return nil, err } - _ = commit - return &ctypes.ResultABCIQuery{Response: resp}, nil, nil - - /* // TODO refactor so iavl stuff is not in tendermint core - // https://github.com/tendermint/tendermint/issues/1183 - if len(resp.Value) > 0 { - // The key was found, construct a proof of existence. - proof, err := iavl.ReadKeyProof(resp.Proof) + // Validate the proof against the certified header to ensure data integrity. + if resp.Value != nil { + // Value exists + // XXX How do we encode the key into a string... + err = prt.VerifyValue(resp.Proof, signedHeader.AppHash, string(resp.Key), resp.Value) if err != nil { - return nil, nil, errors.Wrap(err, "Error reading proof") + return nil, cmn.ErrorWrap(err, "Couldn't verify value proof") } - - eproof, ok := proof.(*iavl.KeyExistsProof) - if !ok { - return nil, nil, errors.New("Expected KeyExistsProof for non-empty value") - } - + return &ctypes.ResultABCIQuery{Response: resp}, nil + } else { + // Value absent // Validate the proof against the certified header to ensure data integrity. - err = eproof.Verify(resp.Key, resp.Value, commit.Header.AppHash) + // XXX How do we encode the key into a string... + err = prt.VerifyAbsence(resp.Proof, signedHeader.AppHash, string(resp.Key)) if err != nil { - return nil, nil, errors.Wrap(err, "Couldn't verify proof") + return nil, cmn.ErrorWrap(err, "Couldn't verify absence proof") } - return &ctypes.ResultABCIQuery{Response: resp}, eproof, nil - } - - // The key wasn't found, construct a proof of non-existence. - proof, err := iavl.ReadKeyProof(resp.Proof) - if err != nil { - return nil, nil, errors.Wrap(err, "Error reading proof") - } - - aproof, ok := proof.(*iavl.KeyAbsentProof) - if !ok { - return nil, nil, errors.New("Expected KeyAbsentProof for empty Value") - } - - // Validate the proof against the certified header to ensure data integrity. - err = aproof.Verify(resp.Key, nil, commit.Header.AppHash) - if err != nil { - return nil, nil, errors.Wrap(err, "Couldn't verify proof") + return &ctypes.ResultABCIQuery{Response: resp}, nil } - return &ctypes.ResultABCIQuery{Response: resp}, aproof, ErrNoData() - */ } -// GetCertifiedCommit gets the signed header for a given height -// and certifies it. Returns error if unable to get a proven header. -func GetCertifiedCommit(h int64, node rpcclient.Client, cert lite.Certifier) (lite.Commit, error) { +// GetCertifiedCommit gets the signed header for a given height and certifies +// it. Returns error if unable to get a proven header. +func GetCertifiedCommit(h int64, client rpcclient.Client, cert lite.Verifier) (types.SignedHeader, error) { // FIXME: cannot use cert.GetByHeight for now, as it also requires // Validators and will fail on querying tendermint for non-current height. // When this is supported, we should use it instead... - rpcclient.WaitForHeight(node, h, nil) - cresp, err := node.Commit(&h) + rpcclient.WaitForHeight(client, h, nil) + cresp, err := client.Commit(&h) if err != nil { - return lite.Commit{}, err + return types.SignedHeader{}, err } - commit := client.CommitFromResult(cresp) - // validate downloaded checkpoint with our request and trust store. - if commit.Height() != h { - return lite.Commit{}, certerr.ErrHeightMismatch(h, commit.Height()) + // Validate downloaded checkpoint with our request and trust store. + sh := cresp.SignedHeader + if sh.Height != h { + return types.SignedHeader{}, fmt.Errorf("height mismatch: want %v got %v", + h, sh.Height) } - if err = cert.Certify(commit); err != nil { - return lite.Commit{}, err + if err = cert.Verify(sh); err != nil { + return types.SignedHeader{}, err } - return commit, nil + return sh, nil } diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go index 38a43af2b81..0e30d75587b 100644 --- a/lite/proxy/query_test.go +++ b/lite/proxy/query_test.go @@ -4,12 +4,12 @@ import ( "fmt" "os" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/lite" certclient "github.com/tendermint/tendermint/lite/client" nm "github.com/tendermint/tendermint/node" @@ -19,12 +19,13 @@ import ( ) var node *nm.Node +var chainID = "tendermint_test" // TODO use from config. +var waitForEventTimeout = 5 * time.Second // TODO fix tests!! func TestMain(m *testing.M) { app := kvstore.NewKVStoreApplication() - node = rpctest.StartTendermint(app) code := m.Run() @@ -38,70 +39,87 @@ func kvstoreTx(k, v []byte) []byte { return []byte(fmt.Sprintf("%s=%s", k, v)) } +// TODO: enable it after general proof format has been adapted +// in abci/examples/kvstore.go func _TestAppProofs(t *testing.T) { assert, require := assert.New(t), require.New(t) + prt := defaultProofRuntime() cl := client.NewLocal(node) client.WaitForHeight(cl, 1, nil) + // This sets up our trust on the node based on some past point. + source := certclient.NewProvider(chainID, cl) + seed, err := source.LatestFullCommit(chainID, 1, 1) + require.NoError(err, "%#v", err) + cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators) + + // Wait for tx confirmation. + done := make(chan int64) + go func() { + evtTyp := types.EventTx + _, err = client.WaitForOneEvent(cl, evtTyp, waitForEventTimeout) + require.Nil(err, "%#v", err) + close(done) + }() + + // Submit a transaction. k := []byte("my-key") v := []byte("my-value") - tx := kvstoreTx(k, v) br, err := cl.BroadcastTxCommit(tx) - require.NoError(err, "%+v", err) + require.NoError(err, "%#v", err) require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) require.EqualValues(0, br.DeliverTx.Code) brh := br.Height - // This sets up our trust on the node based on some past point. - source := certclient.NewProvider(cl) - seed, err := source.GetByHeight(brh - 2) - require.NoError(err, "%+v", err) - cert := lite.NewStaticCertifier("my-chain", seed.Validators) - - client.WaitForHeight(cl, 3, nil) - latest, err := source.LatestCommit() - require.NoError(err, "%+v", err) - rootHash := latest.Header.AppHash + // Fetch latest after tx commit. + <-done + latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1) + require.NoError(err, "%#v", err) + rootHash := latest.SignedHeader.AppHash + if rootHash == nil { + // Fetch one block later, AppHash hasn't been committed yet. + // TODO find a way to avoid doing this. + client.WaitForHeight(cl, latest.SignedHeader.Height+1, nil) + latest, err = source.LatestFullCommit(chainID, latest.SignedHeader.Height+1, 1<<63-1) + require.NoError(err, "%#v", err) + rootHash = latest.SignedHeader.AppHash + } + require.NotNil(rootHash) // verify a query before the tx block has no data (and valid non-exist proof) - bs, height, proof, err := GetWithProof(k, brh-1, cl, cert) - fmt.Println(bs, height, proof, err) - require.NotNil(err) - require.True(IsNoDataErr(err), err.Error()) + bs, height, proof, err := GetWithProof(prt, k, brh-1, cl, cert) + require.NoError(err, "%#v", err) + // require.NotNil(proof) + // TODO: Ensure that *some* keys will be there, ensuring that proof is nil, + // (currently there's a race condition) + // and ensure that proof proves absence of k. require.Nil(bs) // but given that block it is good - bs, height, proof, err = GetWithProof(k, brh, cl, cert) - require.NoError(err, "%+v", err) + bs, height, proof, err = GetWithProof(prt, k, brh, cl, cert) + require.NoError(err, "%#v", err) require.NotNil(proof) - require.True(height >= int64(latest.Header.Height)) - - // Alexis there is a bug here, somehow the above code gives us rootHash = nil - // and proof.Verify doesn't care, while proofNotExists.Verify fails. - // I am hacking this in to make it pass, but please investigate further. - rootHash = proof.Root() + require.Equal(height, brh) - //err = wire.ReadBinaryBytes(bs, &data) - //require.NoError(err, "%+v", err) assert.EqualValues(v, bs) - err = proof.Verify(k, bs, rootHash) - assert.NoError(err, "%+v", err) + err = prt.VerifyValue(proof, rootHash, string(k), bs) // XXX key encoding + assert.NoError(err, "%#v", err) // Test non-existing key. missing := []byte("my-missing-key") - bs, _, proof, err = GetWithProof(missing, 0, cl, cert) - require.True(IsNoDataErr(err)) + bs, _, proof, err = GetWithProof(prt, missing, 0, cl, cert) + require.NoError(err) require.Nil(bs) require.NotNil(proof) - err = proof.Verify(missing, nil, rootHash) - assert.NoError(err, "%+v", err) - err = proof.Verify(k, nil, rootHash) - assert.Error(err) + err = prt.VerifyAbsence(proof, rootHash, string(missing)) // XXX VerifyAbsence(), keyencoding + assert.NoError(err, "%#v", err) + err = prt.VerifyAbsence(proof, rootHash, string(k)) // XXX VerifyAbsence(), keyencoding + assert.Error(err, "%#v", err) } -func _TestTxProofs(t *testing.T) { +func TestTxProofs(t *testing.T) { assert, require := assert.New(t), require.New(t) cl := client.NewLocal(node) @@ -109,15 +127,15 @@ func _TestTxProofs(t *testing.T) { tx := kvstoreTx([]byte("key-a"), []byte("value-a")) br, err := cl.BroadcastTxCommit(tx) - require.NoError(err, "%+v", err) + require.NoError(err, "%#v", err) require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) require.EqualValues(0, br.DeliverTx.Code) brh := br.Height - source := certclient.NewProvider(cl) - seed, err := source.GetByHeight(brh - 2) - require.NoError(err, "%+v", err) - cert := lite.NewStaticCertifier("my-chain", seed.Validators) + source := certclient.NewProvider(chainID, cl) + seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) + require.NoError(err, "%#v", err) + cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators) // First let's make sure a bogus transaction hash returns a valid non-existence proof. key := types.Tx([]byte("bogus")).Hash() @@ -128,12 +146,12 @@ func _TestTxProofs(t *testing.T) { // Now let's check with the real tx hash. key = types.Tx(tx).Hash() res, err = cl.Tx(key, true) - require.NoError(err, "%+v", err) + require.NoError(err, "%#v", err) require.NotNil(res) err = res.Proof.Validate(key) - assert.NoError(err, "%+v", err) + assert.NoError(err, "%#v", err) commit, err := GetCertifiedCommit(br.Height, cl, cert) - require.Nil(err, "%+v", err) + require.Nil(err, "%#v", err) require.Equal(res.Proof.RootHash, commit.Header.DataHash) } diff --git a/lite/proxy/validate_test.go b/lite/proxy/validate_test.go index 47c0ff6deb7..1ce4d667e33 100644 --- a/lite/proxy/validate_test.go +++ b/lite/proxy/validate_test.go @@ -6,7 +6,6 @@ import ( "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/lite" "github.com/tendermint/tendermint/lite/proxy" "github.com/tendermint/tendermint/types" ) @@ -26,40 +25,40 @@ var hdrHeight11 = types.Header{ func TestValidateBlock(t *testing.T) { tests := []struct { - block *types.Block - commit lite.Commit - wantErr string + block *types.Block + signedHeader types.SignedHeader + wantErr string }{ { block: nil, wantErr: "non-nil Block", }, { - block: &types.Block{}, + block: &types.Block{}, wantErr: "unexpected empty SignedHeader", }, // Start Header.Height mismatch test { - block: &types.Block{Header: types.Header{Height: 10}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "don't match - 10 vs 11", + block: &types.Block{Header: types.Header{Height: 10}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Header heights mismatched", }, { - block: &types.Block{Header: types.Header{Height: 11}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, + block: &types.Block{Header: types.Header{Height: 11}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, }, // End Header.Height mismatch test // Start Header.Hash mismatch test { - block: &types.Block{Header: hdrHeight11}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "Headers don't match", + block: &types.Block{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Headers don't match", }, { - block: &types.Block{Header: hdrHeight11}, - commit: lite.Commit{Header: &hdrHeight11}, + block: &types.Block{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: &hdrHeight11}, }, // End Header.Hash mismatch test @@ -69,7 +68,7 @@ func TestValidateBlock(t *testing.T) { Header: types.Header{Height: 11}, Data: types.Data{Txs: []types.Tx{[]byte("0xDE"), []byte("AD")}}, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{Height: 11}, Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("0xDEADBEEF")}}, }, @@ -80,7 +79,7 @@ func TestValidateBlock(t *testing.T) { Header: types.Header{Height: 11, DataHash: deadBeefHash}, Data: types.Data{Txs: deadBeefTxs}, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{Height: 11}, Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}}, }, @@ -89,7 +88,7 @@ func TestValidateBlock(t *testing.T) { } for i, tt := range tests { - err := proxy.ValidateBlock(tt.block, tt.commit) + err := proxy.ValidateBlock(tt.block, tt.signedHeader) if tt.wantErr != "" { if err == nil { assert.FailNowf(t, "Unexpectedly passed", "#%d", i) @@ -105,40 +104,40 @@ func TestValidateBlock(t *testing.T) { func TestValidateBlockMeta(t *testing.T) { tests := []struct { - meta *types.BlockMeta - commit lite.Commit - wantErr string + meta *types.BlockMeta + signedHeader types.SignedHeader + wantErr string }{ { meta: nil, wantErr: "non-nil BlockMeta", }, { - meta: &types.BlockMeta{}, + meta: &types.BlockMeta{}, wantErr: "unexpected empty SignedHeader", }, // Start Header.Height mismatch test { - meta: &types.BlockMeta{Header: types.Header{Height: 10}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "don't match - 10 vs 11", + meta: &types.BlockMeta{Header: types.Header{Height: 10}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Header heights mismatched", }, { - meta: &types.BlockMeta{Header: types.Header{Height: 11}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, + meta: &types.BlockMeta{Header: types.Header{Height: 11}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, }, // End Header.Height mismatch test // Start Headers don't match test { - meta: &types.BlockMeta{Header: hdrHeight11}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "Headers don't match", + meta: &types.BlockMeta{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Headers don't match", }, { - meta: &types.BlockMeta{Header: hdrHeight11}, - commit: lite.Commit{Header: &hdrHeight11}, + meta: &types.BlockMeta{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: &hdrHeight11}, }, { @@ -150,7 +149,7 @@ func TestValidateBlockMeta(t *testing.T) { Time: testTime1, }, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{Height: 11, DataHash: deadBeefHash}, }, wantErr: "Headers don't match", @@ -164,7 +163,7 @@ func TestValidateBlockMeta(t *testing.T) { Time: testTime1, }, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{ Height: 11, DataHash: deadBeefHash, ValidatorsHash: []byte("Tendermint"), @@ -183,7 +182,7 @@ func TestValidateBlockMeta(t *testing.T) { Time: testTime2, }, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{ Height: 11, DataHash: deadBeefHash, ValidatorsHash: []byte("Tendermint-x"), @@ -197,7 +196,7 @@ func TestValidateBlockMeta(t *testing.T) { } for i, tt := range tests { - err := proxy.ValidateBlockMeta(tt.meta, tt.commit) + err := proxy.ValidateBlockMeta(tt.meta, tt.signedHeader) if tt.wantErr != "" { if err == nil { assert.FailNowf(t, "Unexpectedly passed", "#%d: wanted error %q", i, tt.wantErr) diff --git a/lite/proxy/verifier.go b/lite/proxy/verifier.go new file mode 100644 index 00000000000..b7c11f18ef6 --- /dev/null +++ b/lite/proxy/verifier.go @@ -0,0 +1,41 @@ +package proxy + +import ( + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + log "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/lite" + lclient "github.com/tendermint/tendermint/lite/client" +) + +func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger, cacheSize int) (*lite.DynamicVerifier, error) { + + logger = logger.With("module", "lite/proxy") + logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client) + + memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(cacheSize) + lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir)) + trust := lite.NewMultiProvider( + memProvider, + lvlProvider, + ) + source := lclient.NewProvider(chainID, client) + cert := lite.NewDynamicVerifier(chainID, trust, source) + cert.SetLogger(logger) // Sets logger recursively. + + // TODO: Make this more secure, e.g. make it interactive in the console? + _, err := trust.LatestFullCommit(chainID, 1, 1<<63-1) + if err != nil { + logger.Info("lite/proxy/NewVerifier found no trusted full commit, initializing from source from height 1...") + fc, err := source.LatestFullCommit(chainID, 1, 1) + if err != nil { + return nil, cmn.ErrorWrap(err, "fetching source full commit @ height 1") + } + err = trust.SaveFullCommit(fc) + if err != nil { + return nil, cmn.ErrorWrap(err, "saving full commit to trusted") + } + } + + return cert, nil +} diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go index f0eb6b41e20..7ddb3b8addb 100644 --- a/lite/proxy/wrapper.go +++ b/lite/proxy/wrapper.go @@ -3,27 +3,29 @@ package proxy import ( cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/lite" - certclient "github.com/tendermint/tendermint/lite/client" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" ) var _ rpcclient.Client = Wrapper{} -// Wrapper wraps a rpcclient with a Certifier and double-checks any input that is +// Wrapper wraps a rpcclient with a Verifier and double-checks any input that is // provable before passing it along. Allows you to make any rpcclient fully secure. type Wrapper struct { rpcclient.Client - cert *lite.InquiringCertifier + cert *lite.DynamicVerifier + prt *merkle.ProofRuntime } -// SecureClient uses a given certifier to wrap an connection to an untrusted +// SecureClient uses a given Verifier to wrap an connection to an untrusted // host and return a cryptographically secure rpc client. // // If it is wrapping an HTTP rpcclient, it will also wrap the websocket interface -func SecureClient(c rpcclient.Client, cert *lite.InquiringCertifier) Wrapper { - wrap := Wrapper{c, cert} +func SecureClient(c rpcclient.Client, cert *lite.DynamicVerifier) Wrapper { + prt := defaultProofRuntime() + wrap := Wrapper{c, cert, prt} // TODO: no longer possible as no more such interface exposed.... // if we wrap http client, then we can swap out the event switch to filter // if hc, ok := c.(*rpcclient.HTTP); ok { @@ -37,7 +39,7 @@ func SecureClient(c rpcclient.Client, cert *lite.InquiringCertifier) Wrapper { func (w Wrapper) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - res, _, err := GetWithProofOptions(path, data, opts, w.Client, w.cert) + res, err := GetWithProofOptions(w.prt, path, data, opts, w.Client, w.cert) return res, err } @@ -53,11 +55,11 @@ func (w Wrapper) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { return res, err } h := int64(res.Height) - check, err := GetCertifiedCommit(h, w.Client, w.cert) + sh, err := GetCertifiedCommit(h, w.Client, w.cert) if err != nil { return res, err } - err = res.Proof.Validate(check.Header.DataHash) + err = res.Proof.Validate(sh.DataHash) return res, err } @@ -74,12 +76,12 @@ func (w Wrapper) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlock // go and verify every blockmeta in the result.... for _, meta := range r.BlockMetas { // get a checkpoint to verify from - c, err := w.Commit(&meta.Header.Height) + res, err := w.Commit(&meta.Header.Height) if err != nil { return nil, err } - check := certclient.CommitFromResult(c) - err = ValidateBlockMeta(meta, check) + sh := res.SignedHeader + err = ValidateBlockMeta(meta, sh) if err != nil { return nil, err } @@ -90,41 +92,57 @@ func (w Wrapper) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlock // Block returns an entire block and verifies all signatures func (w Wrapper) Block(height *int64) (*ctypes.ResultBlock, error) { - r, err := w.Client.Block(height) + resBlock, err := w.Client.Block(height) if err != nil { return nil, err } // get a checkpoint to verify from - c, err := w.Commit(height) + resCommit, err := w.Commit(height) if err != nil { return nil, err } - check := certclient.CommitFromResult(c) + sh := resCommit.SignedHeader // now verify - err = ValidateBlockMeta(r.BlockMeta, check) + err = ValidateBlockMeta(resBlock.BlockMeta, sh) if err != nil { return nil, err } - err = ValidateBlock(r.Block, check) + err = ValidateBlock(resBlock.Block, sh) if err != nil { return nil, err } - return r, nil + return resBlock, nil } // Commit downloads the Commit and certifies it with the lite. // // This is the foundation for all other verification in this module func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) { + if height == nil { + resStatus, err := w.Client.Status() + if err != nil { + return nil, err + } + // NOTE: If resStatus.CatchingUp, there is a race + // condition where the validator set for the next height + // isn't available until some time after the blockstore + // has height h on the remote node. This isn't an issue + // once the node has caught up, and a syncing node likely + // won't have this issue esp with the implementation we + // have here, but we may have to address this at some + // point. + height = new(int64) + *height = resStatus.SyncInfo.LatestBlockHeight + } rpcclient.WaitForHeight(w.Client, *height, nil) - r, err := w.Client.Commit(height) - // if we got it, then certify it + res, err := w.Client.Commit(height) + // if we got it, then verify it if err == nil { - check := certclient.CommitFromResult(r) - err = w.cert.Certify(check) + sh := res.SignedHeader + err = w.cert.Verify(sh) } - return r, err + return res, err } // // WrappedSwitch creates a websocket connection that auto-verifies any info diff --git a/lite/static_certifier.go b/lite/static_certifier.go deleted file mode 100644 index 1ec3b809a81..00000000000 --- a/lite/static_certifier.go +++ /dev/null @@ -1,73 +0,0 @@ -package lite - -import ( - "bytes" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -var _ Certifier = (*StaticCertifier)(nil) - -// StaticCertifier assumes a static set of validators, set on -// initilization and checks against them. -// The signatures on every header is checked for > 2/3 votes -// against the known validator set upon Certify -// -// Good for testing or really simple chains. Building block -// to support real-world functionality. -type StaticCertifier struct { - chainID string - vSet *types.ValidatorSet - vhash []byte -} - -// NewStaticCertifier returns a new certifier with a static validator set. -func NewStaticCertifier(chainID string, vals *types.ValidatorSet) *StaticCertifier { - return &StaticCertifier{ - chainID: chainID, - vSet: vals, - } -} - -// ChainID returns the chain id. -// Implements Certifier. -func (sc *StaticCertifier) ChainID() string { - return sc.chainID -} - -// Validators returns the validator set. -func (sc *StaticCertifier) Validators() *types.ValidatorSet { - return sc.vSet -} - -// Hash returns the hash of the validator set. -func (sc *StaticCertifier) Hash() []byte { - if len(sc.vhash) == 0 { - sc.vhash = sc.vSet.Hash() - } - return sc.vhash -} - -// Certify makes sure that the commit is valid. -// Implements Certifier. -func (sc *StaticCertifier) Certify(commit Commit) error { - // do basic sanity checks - err := commit.ValidateBasic(sc.chainID) - if err != nil { - return err - } - - // make sure it has the same validator set we have (static means static) - if !bytes.Equal(sc.Hash(), commit.Header.ValidatorsHash) { - return liteErr.ErrValidatorsChanged() - } - - // then make sure we have the proper signatures for this - err = sc.vSet.VerifyCommit(sc.chainID, commit.Commit.BlockID, - commit.Header.Height, commit.Commit) - return errors.WithStack(err) -} diff --git a/lite/static_certifier_test.go b/lite/static_certifier_test.go deleted file mode 100644 index 03567daa667..00000000000 --- a/lite/static_certifier_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package lite_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -func TestStaticCert(t *testing.T) { - // assert, require := assert.New(t), require.New(t) - assert := assert.New(t) - // require := require.New(t) - - keys := lite.GenValKeys(4) - // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals := keys.ToValidators(20, 10) - // and a certifier based on our known set - chainID := "test-static" - cert := lite.NewStaticCertifier(chainID, vals) - - cases := []struct { - keys lite.ValKeys - vals *types.ValidatorSet - height int64 - first, last int // who actually signs - proper bool // true -> expect no error - changed bool // true -> expect validator change error - }{ - // perfect, signed by everyone - {keys, vals, 1, 0, len(keys), true, false}, - // skip little guy is okay - {keys, vals, 2, 1, len(keys), true, false}, - // but not the big guy - {keys, vals, 3, 0, len(keys) - 1, false, false}, - // even changing the power a little bit breaks the static validator - // the sigs are enough, but the validator hash is unknown - {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, - } - - for _, tc := range cases { - check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, - []byte("foo"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Certify(check) - if tc.proper { - assert.Nil(err, "%+v", err) - } else { - assert.NotNil(err) - if tc.changed { - assert.True(liteErr.IsValidatorsChangedErr(err), "%+v", err) - } - } - } - -} diff --git a/lite/types.go b/lite/types.go new file mode 100644 index 00000000000..643f5ad4898 --- /dev/null +++ b/lite/types.go @@ -0,0 +1,13 @@ +package lite + +import ( + "github.com/tendermint/tendermint/types" +) + +// Verifier checks the votes to make sure the block really is signed properly. +// Verifier must know the current or recent set of validitors by some other +// means. +type Verifier interface { + Verify(sheader types.SignedHeader) error + ChainID() string +} diff --git a/mempool/bench_test.go b/mempool/bench_test.go new file mode 100644 index 00000000000..68b033caae7 --- /dev/null +++ b/mempool/bench_test.go @@ -0,0 +1,55 @@ +package mempool + +import ( + "encoding/binary" + "testing" + + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/proxy" +) + +func BenchmarkReap(b *testing.B) { + app := kvstore.NewKVStoreApplication() + cc := proxy.NewLocalClientCreator(app) + mempool := newMempoolWithApp(cc) + + size := 10000 + for i := 0; i < size; i++ { + tx := make([]byte, 8) + binary.BigEndian.PutUint64(tx, uint64(i)) + mempool.CheckTx(tx, nil) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + mempool.ReapMaxBytesMaxGas(100000000, 10000000) + } +} + +func BenchmarkCacheInsertTime(b *testing.B) { + cache := newMapTxCache(b.N) + txs := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + txs[i] = make([]byte, 8) + binary.BigEndian.PutUint64(txs[i], uint64(i)) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Push(txs[i]) + } +} + +// This benchmark is probably skewed, since we actually will be removing +// txs in parallel, which may cause some overhead due to mutex locking. +func BenchmarkCacheRemoveTime(b *testing.B) { + cache := newMapTxCache(b.N) + txs := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + txs[i] = make([]byte, 8) + binary.BigEndian.PutUint64(txs[i], uint64(i)) + cache.Push(txs[i]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Remove(txs[i]) + } +} diff --git a/mempool/mempool.go b/mempool/mempool.go index e1ed4f260a4..b84eb4a6825 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -3,6 +3,7 @@ package mempool import ( "bytes" "container/list" + "crypto/sha256" "fmt" "sync" "sync/atomic" @@ -11,16 +12,25 @@ import ( "github.com/pkg/errors" abci "github.com/tendermint/tendermint/abci/types" + cfg "github.com/tendermint/tendermint/config" auto "github.com/tendermint/tendermint/libs/autofile" "github.com/tendermint/tendermint/libs/clist" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" - - cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) +// PreCheckFunc is an optional filter executed before CheckTx and rejects +// transaction if false is returned. An example would be to ensure that a +// transaction doesn't exceeded the block size. +type PreCheckFunc func(types.Tx) error + +// PostCheckFunc is an optional filter executed after CheckTx and rejects +// transaction if false is returned. An example would be to ensure a +// transaction doesn't require more gas than available for the block. +type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error + /* The mempool pushes new txs onto the proxyAppConn. @@ -57,6 +67,55 @@ var ( ErrMempoolIsFull = errors.New("Mempool is full") ) +// ErrPreCheck is returned when tx is too big +type ErrPreCheck struct { + Reason error +} + +func (e ErrPreCheck) Error() string { + return e.Reason.Error() +} + +// IsPreCheckError returns true if err is due to pre check failure. +func IsPreCheckError(err error) bool { + _, ok := err.(ErrPreCheck) + return ok +} + +// PreCheckAminoMaxBytes checks that the size of the transaction plus the amino +// overhead is smaller or equal to the expected maxBytes. +func PreCheckAminoMaxBytes(maxBytes int64) PreCheckFunc { + return func(tx types.Tx) error { + // We have to account for the amino overhead in the tx size as well + // NOTE: fieldNum = 1 as types.Block.Data contains Txs []Tx as first field. + // If this field order ever changes this needs to updated here accordingly. + // NOTE: if some []Tx are encoded without a parenting struct, the + // fieldNum is also equal to 1. + aminoOverhead := types.ComputeAminoOverhead(tx, 1) + txSize := int64(len(tx)) + aminoOverhead + if txSize > maxBytes { + return fmt.Errorf("Tx size (including amino overhead) is too big: %d, max: %d", + txSize, maxBytes) + } + return nil + } +} + +// PostCheckMaxGas checks that the wanted gas is smaller or equal to the passed +// maxGas. Returns nil if maxGas is -1. +func PostCheckMaxGas(maxGas int64) PostCheckFunc { + return func(tx types.Tx, res *abci.ResponseCheckTx) error { + if maxGas == -1 { + return nil + } + if res.GasWanted > maxGas { + return fmt.Errorf("gas wanted %d is greater than max gas %d", + res.GasWanted, maxGas) + } + return nil + } +} + // TxID is the hex encoded hash of the bytes as a types.Tx. func TxID(tx []byte) string { return fmt.Sprintf("%X", types.Tx(tx).Hash()) @@ -79,6 +138,8 @@ type Mempool struct { recheckEnd *clist.CElement // re-checking stops here notifiedTxsAvailable bool txsAvailable chan struct{} // fires once for each height, when the mempool is not empty + preCheck PreCheckFunc + postCheck PostCheckFunc // Keep a cache of already-seen txs. // This reduces the pressure on the proxyApp. @@ -138,44 +199,50 @@ func (mem *Mempool) SetLogger(l log.Logger) { mem.logger = l } +// WithPreCheck sets a filter for the mempool to reject a tx if f(tx) returns +// false. This is ran before CheckTx. +func WithPreCheck(f PreCheckFunc) MempoolOption { + return func(mem *Mempool) { mem.preCheck = f } +} + +// WithPostCheck sets a filter for the mempool to reject a tx if f(tx) returns +// false. This is ran after CheckTx. +func WithPostCheck(f PostCheckFunc) MempoolOption { + return func(mem *Mempool) { mem.postCheck = f } +} + // WithMetrics sets the metrics. func WithMetrics(metrics *Metrics) MempoolOption { return func(mem *Mempool) { mem.metrics = metrics } } -// CloseWAL closes and discards the underlying WAL file. -// Any further writes will not be relayed to disk. -func (mem *Mempool) CloseWAL() bool { - if mem == nil { - return false +// InitWAL creates a directory for the WAL file and opens a file itself. +// +// *panics* if can't create directory or open file. +// *not thread safe* +func (mem *Mempool) InitWAL() { + walDir := mem.config.WalDir() + err := cmn.EnsureDir(walDir, 0700) + if err != nil { + panic(errors.Wrap(err, "Error ensuring Mempool WAL dir")) + } + af, err := auto.OpenAutoFile(walDir + "/wal") + if err != nil { + panic(errors.Wrap(err, "Error opening Mempool WAL file")) } + mem.wal = af +} +// CloseWAL closes and discards the underlying WAL file. +// Any further writes will not be relayed to disk. +func (mem *Mempool) CloseWAL() { mem.proxyMtx.Lock() defer mem.proxyMtx.Unlock() - if mem.wal == nil { - return false - } - if err := mem.wal.Close(); err != nil && mem.logger != nil { - mem.logger.Error("Mempool.CloseWAL", "err", err) + if err := mem.wal.Close(); err != nil { + mem.logger.Error("Error closing WAL", "err", err) } mem.wal = nil - return true -} - -func (mem *Mempool) InitWAL() { - walDir := mem.config.WalDir() - if walDir != "" { - err := cmn.EnsureDir(walDir, 0700) - if err != nil { - cmn.PanicSanity(errors.Wrap(err, "Error ensuring Mempool wal dir")) - } - af, err := auto.OpenAutoFile(walDir + "/wal") - if err != nil { - cmn.PanicSanity(errors.Wrap(err, "Error opening Mempool wal file")) - } - mem.wal = af - } } // Lock locks the mempool. The consensus must be able to hold lock to safely update. @@ -239,6 +306,12 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { return ErrMempoolIsFull } + if mem.preCheck != nil { + if err := mem.preCheck(tx); err != nil { + return ErrPreCheck{err} + } + } + // CACHE if !mem.cache.Push(tx) { return ErrTxInCache @@ -276,6 +349,7 @@ func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) { if mem.recheckCursor == nil { mem.resCbNormal(req, res) } else { + mem.metrics.RecheckTimes.Add(1) mem.resCbRecheck(req, res) } mem.metrics.Size.Set(float64(mem.Size())) @@ -285,20 +359,32 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { switch r := res.Value.(type) { case *abci.Response_CheckTx: tx := req.GetCheckTx().Tx - if r.CheckTx.Code == abci.CodeTypeOK { + var postCheckErr error + if mem.postCheck != nil { + postCheckErr = mem.postCheck(tx, r.CheckTx) + } + if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { mem.counter++ memTx := &mempoolTx{ - counter: mem.counter, - height: mem.height, - tx: tx, + counter: mem.counter, + height: mem.height, + gasWanted: r.CheckTx.GasWanted, + tx: tx, } mem.txs.PushBack(memTx) - mem.logger.Info("Added good transaction", "tx", TxID(tx), "res", r, "total", mem.Size()) + mem.logger.Info("Added good transaction", + "tx", TxID(tx), + "res", r, + "height", memTx.height, + "total", mem.Size(), + "counter", memTx.counter, + ) + mem.metrics.TxSizeBytes.Observe(float64(len(tx))) mem.notifyTxsAvailable() } else { // ignore bad transaction - mem.logger.Info("Rejected bad transaction", "tx", TxID(tx), "res", r) - + mem.logger.Info("Rejected bad transaction", "tx", TxID(tx), "res", r, "err", postCheckErr) + mem.metrics.FailedTxs.Add(1) // remove from cache (it might be good later) mem.cache.Remove(tx) } @@ -310,20 +396,31 @@ func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) { switch r := res.Value.(type) { case *abci.Response_CheckTx: + tx := req.GetCheckTx().Tx memTx := mem.recheckCursor.Value.(*mempoolTx) if !bytes.Equal(req.GetCheckTx().Tx, memTx.tx) { - cmn.PanicSanity(cmn.Fmt("Unexpected tx response from proxy during recheck\n"+ - "Expected %X, got %X", r.CheckTx.Data, memTx.tx)) + cmn.PanicSanity( + fmt.Sprintf( + "Unexpected tx response from proxy during recheck\nExpected %X, got %X", + r.CheckTx.Data, + memTx.tx, + ), + ) + } + var postCheckErr error + if mem.postCheck != nil { + postCheckErr = mem.postCheck(tx, r.CheckTx) } - if r.CheckTx.Code == abci.CodeTypeOK { + if (r.CheckTx.Code == abci.CodeTypeOK) && postCheckErr == nil { // Good, nothing to do. } else { // Tx became invalidated due to newly committed block. + mem.logger.Info("Tx is no longer valid", "tx", TxID(tx), "res", r, "err", postCheckErr) mem.txs.Remove(mem.recheckCursor) mem.recheckCursor.DetachPrev() // remove from cache (it might be good later) - mem.cache.Remove(req.GetCheckTx().Tx) + mem.cache.Remove(tx) } if mem.recheckCursor == mem.recheckEnd { mem.recheckCursor = nil @@ -366,9 +463,11 @@ func (mem *Mempool) notifyTxsAvailable() { } } -// Reap returns a list of transactions currently in the mempool. -// If maxTxs is -1, there is no cap on the number of returned transactions. -func (mem *Mempool) Reap(maxTxs int) types.Txs { +// ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes bytes total +// with the condition that the total gasWanted must be less than maxGas. +// If both maxes are negative, there is no cap on the size of all returned +// transactions (~ all available transactions). +func (mem *Mempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { mem.proxyMtx.Lock() defer mem.proxyMtx.Unlock() @@ -377,19 +476,48 @@ func (mem *Mempool) Reap(maxTxs int) types.Txs { time.Sleep(time.Millisecond * 10) } - txs := mem.collectTxs(maxTxs) + var totalBytes int64 + var totalGas int64 + // TODO: we will get a performance boost if we have a good estimate of avg + // size per tx, and set the initial capacity based off of that. + // txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max/mem.avgTxSize)) + txs := make([]types.Tx, 0, mem.txs.Len()) + for e := mem.txs.Front(); e != nil; e = e.Next() { + memTx := e.Value.(*mempoolTx) + // Check total size requirement + aminoOverhead := types.ComputeAminoOverhead(memTx.tx, 1) + if maxBytes > -1 && totalBytes+int64(len(memTx.tx))+aminoOverhead > maxBytes { + return txs + } + totalBytes += int64(len(memTx.tx)) + aminoOverhead + // Check total gas requirement + if maxGas > -1 && totalGas+memTx.gasWanted > maxGas { + return txs + } + totalGas += memTx.gasWanted + txs = append(txs, memTx.tx) + } return txs } -// maxTxs: -1 means uncapped, 0 means none -func (mem *Mempool) collectTxs(maxTxs int) types.Txs { - if maxTxs == 0 { - return []types.Tx{} - } else if maxTxs < 0 { - maxTxs = mem.txs.Len() +// ReapMaxTxs reaps up to max transactions from the mempool. +// If max is negative, there is no cap on the size of all returned +// transactions (~ all available transactions). +func (mem *Mempool) ReapMaxTxs(max int) types.Txs { + mem.proxyMtx.Lock() + defer mem.proxyMtx.Unlock() + + if max < 0 { + max = mem.txs.Len() + } + + for atomic.LoadInt32(&mem.rechecking) > 0 { + // TODO: Something better? + time.Sleep(time.Millisecond * 10) } - txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), maxTxs)) - for e := mem.txs.Front(); e != nil && len(txs) < maxTxs; e = e.Next() { + + txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max)) + for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { memTx := e.Value.(*mempoolTx) txs = append(txs, memTx.tx) } @@ -399,9 +527,14 @@ func (mem *Mempool) collectTxs(maxTxs int) types.Txs { // Update informs the mempool that the given txs were committed and can be discarded. // NOTE: this should be called *after* block is committed by consensus. // NOTE: unsafe; Lock/Unlock must be managed by caller -func (mem *Mempool) Update(height int64, txs types.Txs) error { +func (mem *Mempool) Update( + height int64, + txs types.Txs, + preCheck PreCheckFunc, + postCheck PostCheckFunc, +) error { // First, create a lookup map of txns in new txs. - txsMap := make(map[string]struct{}) + txsMap := make(map[string]struct{}, len(txs)) for _, tx := range txs { txsMap[string(tx)] = struct{}{} } @@ -410,19 +543,27 @@ func (mem *Mempool) Update(height int64, txs types.Txs) error { mem.height = height mem.notifiedTxsAvailable = false + if preCheck != nil { + mem.preCheck = preCheck + } + if postCheck != nil { + mem.postCheck = postCheck + } + // Remove transactions that are already in txs. goodTxs := mem.filterTxs(txsMap) // Recheck mempool txs if any txs were committed in the block - // NOTE/XXX: in some apps a tx could be invalidated due to EndBlock, - // so we really still do need to recheck, but this is for debugging - if mem.config.Recheck && (mem.config.RecheckEmpty || len(goodTxs) > 0) { + if mem.config.Recheck && len(goodTxs) > 0 { mem.logger.Info("Recheck txs", "numtxs", len(goodTxs), "height", height) mem.recheckTxs(goodTxs) // At this point, mem.txs are being rechecked. // mem.recheckCursor re-scans mem.txs and possibly removes some txs. // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. } + + // Update metrics mem.metrics.Size.Set(float64(mem.Size())) + return nil } @@ -466,9 +607,10 @@ func (mem *Mempool) recheckTxs(goodTxs []types.Tx) { // mempoolTx is a transaction that successfully ran type mempoolTx struct { - counter int64 // a simple incrementing counter - height int64 // height that this tx had been validated in - tx types.Tx // + counter int64 // a simple incrementing counter + height int64 // height that this tx had been validated in + gasWanted int64 // amount of gas this tx states it will require + tx types.Tx // } // Height returns the height for this transaction @@ -484,11 +626,12 @@ type txCache interface { Remove(tx types.Tx) } -// mapTxCache maintains a cache of transactions. +// mapTxCache maintains a cache of transactions. This only stores +// the hash of the tx, due to memory concerns. type mapTxCache struct { mtx sync.Mutex size int - map_ map[string]struct{} + map_ map[[sha256.Size]byte]*list.Element list *list.List // to remove oldest tx when cache gets too big } @@ -498,7 +641,7 @@ var _ txCache = (*mapTxCache)(nil) func newMapTxCache(cacheSize int) *mapTxCache { return &mapTxCache{ size: cacheSize, - map_: make(map[string]struct{}, cacheSize), + map_: make(map[[sha256.Size]byte]*list.Element, cacheSize), list: list.New(), } } @@ -506,7 +649,7 @@ func newMapTxCache(cacheSize int) *mapTxCache { // Reset resets the cache to an empty state. func (cache *mapTxCache) Reset() { cache.mtx.Lock() - cache.map_ = make(map[string]struct{}, cache.size) + cache.map_ = make(map[[sha256.Size]byte]*list.Element, cache.size) cache.list.Init() cache.mtx.Unlock() } @@ -517,27 +660,36 @@ func (cache *mapTxCache) Push(tx types.Tx) bool { cache.mtx.Lock() defer cache.mtx.Unlock() - if _, exists := cache.map_[string(tx)]; exists { + // Use the tx hash in the cache + txHash := sha256.Sum256(tx) + if moved, exists := cache.map_[txHash]; exists { + cache.list.MoveToFront(moved) return false } if cache.list.Len() >= cache.size { popped := cache.list.Front() - poppedTx := popped.Value.(types.Tx) - // NOTE: the tx may have already been removed from the map - // but deleting a non-existent element is fine - delete(cache.map_, string(poppedTx)) - cache.list.Remove(popped) + poppedTxHash := popped.Value.([sha256.Size]byte) + delete(cache.map_, poppedTxHash) + if popped != nil { + cache.list.Remove(popped) + } } - cache.map_[string(tx)] = struct{}{} - cache.list.PushBack(tx) + cache.list.PushBack(txHash) + cache.map_[txHash] = cache.list.Back() return true } // Remove removes the given tx from the cache. func (cache *mapTxCache) Remove(tx types.Tx) { cache.mtx.Lock() - delete(cache.map_, string(tx)) + txHash := sha256.Sum256(tx) + popped := cache.map_[txHash] + delete(cache.map_, txHash) + if popped != nil { + cache.list.Remove(popped) + } + cache.mtx.Unlock() } diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index c0f66051f9f..d7ab827379b 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -11,17 +11,16 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" + + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" - - "github.com/stretchr/testify/require" ) func newMempoolWithApp(cc proxy.ClientCreator) *Mempool { @@ -66,12 +65,104 @@ func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs { t.Error(err) } if err := mempool.CheckTx(txBytes, nil); err != nil { - t.Fatalf("Error after CheckTx: %v", err) + // Skip invalid txs. + // TestMempoolFilters will fail otherwise. It asserts a number of txs + // returned. + if IsPreCheckError(err) { + continue + } + t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) } } return txs } +func TestReapMaxBytesMaxGas(t *testing.T) { + app := kvstore.NewKVStoreApplication() + cc := proxy.NewLocalClientCreator(app) + mempool := newMempoolWithApp(cc) + + // Ensure gas calculation behaves as expected + checkTxs(t, mempool, 1) + tx0 := mempool.TxsFront().Value.(*mempoolTx) + // assert that kv store has gas wanted = 1. + require.Equal(t, app.CheckTx(tx0.tx).GasWanted, int64(1), "KVStore had a gas value neq to 1") + require.Equal(t, tx0.gasWanted, int64(1), "transactions gas was set incorrectly") + // ensure each tx is 20 bytes long + require.Equal(t, len(tx0.tx), 20, "Tx is longer than 20 bytes") + mempool.Flush() + + // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. + // each tx has 20 bytes + amino overhead = 21 bytes, 1 gas + tests := []struct { + numTxsToCreate int + maxBytes int64 + maxGas int64 + expectedNumTxs int + }{ + {20, -1, -1, 20}, + {20, -1, 0, 0}, + {20, -1, 10, 10}, + {20, -1, 30, 20}, + {20, 0, -1, 0}, + {20, 0, 10, 0}, + {20, 10, 10, 0}, + {20, 22, 10, 1}, + {20, 220, -1, 10}, + {20, 220, 5, 5}, + {20, 220, 10, 10}, + {20, 220, 15, 10}, + {20, 20000, -1, 20}, + {20, 20000, 5, 5}, + {20, 20000, 30, 20}, + } + for tcIndex, tt := range tests { + checkTxs(t, mempool, tt.numTxsToCreate) + got := mempool.ReapMaxBytesMaxGas(tt.maxBytes, tt.maxGas) + assert.Equal(t, tt.expectedNumTxs, len(got), "Got %d txs, expected %d, tc #%d", + len(got), tt.expectedNumTxs, tcIndex) + mempool.Flush() + } +} + +func TestMempoolFilters(t *testing.T) { + app := kvstore.NewKVStoreApplication() + cc := proxy.NewLocalClientCreator(app) + mempool := newMempoolWithApp(cc) + emptyTxArr := []types.Tx{[]byte{}} + + nopPreFilter := func(tx types.Tx) error { return nil } + nopPostFilter := func(tx types.Tx, res *abci.ResponseCheckTx) error { return nil } + + // each table driven test creates numTxsToCreate txs with checkTx, and at the end clears all remaining txs. + // each tx has 20 bytes + amino overhead = 21 bytes, 1 gas + tests := []struct { + numTxsToCreate int + preFilter PreCheckFunc + postFilter PostCheckFunc + expectedNumTxs int + }{ + {10, nopPreFilter, nopPostFilter, 10}, + {10, PreCheckAminoMaxBytes(10), nopPostFilter, 0}, + {10, PreCheckAminoMaxBytes(20), nopPostFilter, 0}, + {10, PreCheckAminoMaxBytes(22), nopPostFilter, 10}, + {10, nopPreFilter, PostCheckMaxGas(-1), 10}, + {10, nopPreFilter, PostCheckMaxGas(0), 0}, + {10, nopPreFilter, PostCheckMaxGas(1), 10}, + {10, nopPreFilter, PostCheckMaxGas(3000), 10}, + {10, PreCheckAminoMaxBytes(10), PostCheckMaxGas(20), 0}, + {10, PreCheckAminoMaxBytes(30), PostCheckMaxGas(20), 10}, + {10, PreCheckAminoMaxBytes(22), PostCheckMaxGas(1), 10}, + {10, PreCheckAminoMaxBytes(22), PostCheckMaxGas(0), 0}, + } + for tcIndex, tt := range tests { + mempool.Update(1, emptyTxArr, tt.preFilter, tt.postFilter) + checkTxs(t, mempool, tt.numTxsToCreate) + require.Equal(t, tt.expectedNumTxs, mempool.Size(), "mempool had the incorrect size, on test case %d", tcIndex) + mempool.Flush() + } +} + func TestTxsAvailable(t *testing.T) { app := kvstore.NewKVStoreApplication() cc := proxy.NewLocalClientCreator(app) @@ -92,7 +183,7 @@ func TestTxsAvailable(t *testing.T) { // it should fire once now for the new height // since there are still txs left committedTxs, txs := txs[:50], txs[50:] - if err := mempool.Update(1, committedTxs); err != nil { + if err := mempool.Update(1, committedTxs, nil, nil); err != nil { t.Error(err) } ensureFire(t, mempool.TxsAvailable(), timeoutMS) @@ -104,7 +195,7 @@ func TestTxsAvailable(t *testing.T) { // now call update with all the txs. it should not fire as there are no txs left committedTxs = append(txs, moreTxs...) - if err := mempool.Update(2, committedTxs); err != nil { + if err := mempool.Update(2, committedTxs, nil, nil); err != nil { t.Error(err) } ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) @@ -150,8 +241,8 @@ func TestSerialReap(t *testing.T) { } reapCheck := func(exp int) { - txs := mempool.Reap(-1) - require.Equal(t, len(txs), exp, cmn.Fmt("Expected to reap %v txs but got %v", exp, len(txs))) + txs := mempool.ReapMaxBytesMaxGas(-1, -1) + require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs))) } updateRange := func(start, end int) { @@ -161,7 +252,7 @@ func TestSerialReap(t *testing.T) { binary.BigEndian.PutUint64(txBytes, uint64(i)) txs = append(txs, txBytes) } - if err := mempool.Update(0, txs); err != nil { + if err := mempool.Update(0, txs, nil, nil); err != nil { t.Error(err) } } @@ -224,6 +315,28 @@ func TestSerialReap(t *testing.T) { reapCheck(600) } +func TestCacheRemove(t *testing.T) { + cache := newMapTxCache(100) + numTxs := 10 + txs := make([][]byte, numTxs) + for i := 0; i < numTxs; i++ { + // probability of collision is 2**-256 + txBytes := make([]byte, 32) + rand.Read(txBytes) + txs[i] = txBytes + cache.Push(txBytes) + // make sure its added to both the linked list and the map + require.Equal(t, i+1, len(cache.map_)) + require.Equal(t, i+1, cache.list.Len()) + } + for i := 0; i < numTxs; i++ { + cache.Remove(txs[i]) + // make sure its removed from both the map and the linked list + require.Equal(t, numTxs-(i+1), len(cache.map_)) + require.Equal(t, numTxs-(i+1), cache.list.Len()) + } +} + func TestMempoolCloseWAL(t *testing.T) { // 1. Create the temporary directory for mempool and WAL testing. rootDir, err := ioutil.TempDir("", "mempool-test") @@ -259,15 +372,12 @@ func TestMempoolCloseWAL(t *testing.T) { // 7. Invoke CloseWAL() and ensure it discards the // WAL thus any other write won't go through. - require.True(t, mempool.CloseWAL(), "CloseWAL should CloseWAL") + mempool.CloseWAL() mempool.CheckTx(types.Tx([]byte("bar")), nil) sum2 := checksumFile(walFilepath, t) require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded") - // 8. Second CloseWAL should do nothing - require.False(t, mempool.CloseWAL(), "CloseWAL should CloseWAL") - - // 9. Sanity check to ensure that the WAL file still exists + // 8. Sanity check to ensure that the WAL file still exists m3, err := filepath.Glob(filepath.Join(rootDir, "*")) require.Nil(t, err, "successful globbing expected") require.Equal(t, 1, len(m3), "expecting the wal match in") diff --git a/mempool/metrics.go b/mempool/metrics.go index f381678cb31..3418f1efecd 100644 --- a/mempool/metrics.go +++ b/mempool/metrics.go @@ -3,32 +3,62 @@ package mempool import ( "github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics/discard" - - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/go-kit/kit/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) +const MetricsSubsytem = "mempool" + // Metrics contains metrics exposed by this package. // see MetricsProvider for descriptions. type Metrics struct { // Size of the mempool. Size metrics.Gauge + // Histogram of transaction sizes, in bytes. + TxSizeBytes metrics.Histogram + // Number of failed transactions. + FailedTxs metrics.Counter + // Number of times transactions are rechecked in the mempool. + RecheckTimes metrics.Counter } // PrometheusMetrics returns Metrics build using Prometheus client library. -func PrometheusMetrics() *Metrics { +func PrometheusMetrics(namespace string) *Metrics { return &Metrics{ Size: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "mempool", + Namespace: namespace, + Subsystem: MetricsSubsytem, Name: "size", Help: "Size of the mempool (number of uncommitted transactions).", }, []string{}), + TxSizeBytes: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsytem, + Name: "tx_size_bytes", + Help: "Transaction sizes in bytes.", + Buckets: stdprometheus.ExponentialBuckets(1, 3, 17), + }, []string{}), + FailedTxs: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsytem, + Name: "failed_txs", + Help: "Number of failed transactions.", + }, []string{}), + RecheckTimes: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsytem, + Name: "recheck_times", + Help: "Number of times transactions are rechecked in the mempool.", + }, []string{}), } } // NopMetrics returns no-op Metrics. func NopMetrics() *Metrics { return &Metrics{ - Size: discard.NewGauge(), + Size: discard.NewGauge(), + TxSizeBytes: discard.NewHistogram(), + FailedTxs: discard.NewCounter(), + RecheckTimes: discard.NewCounter(), } } diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index b4362032a95..8ac400b0a4e 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -86,7 +86,7 @@ func _waitForTxs(t *testing.T, wg *sync.WaitGroup, txs types.Txs, reactorIdx int time.Sleep(time.Millisecond * 100) } - reapedTxs := mempool.Reap(len(txs)) + reapedTxs := mempool.ReapMaxTxs(len(txs)) for i, tx := range txs { assert.Equal(t, tx, reapedTxs[i], fmt.Sprintf("txs at index %d on reactor %d don't match: %v vs %v", i, reactorIdx, tx, reapedTxs[i])) } diff --git a/networks/local/README.md b/networks/local/README.md index 09a0b12cb87..8d4299693a3 100644 --- a/networks/local/README.md +++ b/networks/local/README.md @@ -1,5 +1,9 @@ # Local Cluster with Docker Compose +DEPRECATED! + +See the [docs](https://tendermint.com/docs/networks/docker-compose.html). + ## Requirements - [Install tendermint](/docs/install.md) diff --git a/networks/remote/README.md b/networks/remote/README.md index 2094fcc98ab..4c035be8c81 100644 --- a/networks/remote/README.md +++ b/networks/remote/README.md @@ -1,3 +1,3 @@ # Remote Cluster with Terraform and Ansible -See the [docs](/docs/terraform-and-ansible.md) +See the [docs](https://tendermint.com/docs/networks/terraform-and-ansible.html). diff --git a/node/node.go b/node/node.go index 0c3396dc686..0652a392fbc 100644 --- a/node/node.go +++ b/node/node.go @@ -7,21 +7,23 @@ import ( "fmt" "net" "net/http" + _ "net/http/pprof" + "strings" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" amino "github.com/tendermint/go-amino" abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/crypto/ed25519" - cmn "github.com/tendermint/tendermint/libs/common" - dbm "github.com/tendermint/tendermint/libs/db" - "github.com/tendermint/tendermint/libs/log" - bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" cs "github.com/tendermint/tendermint/consensus" + "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/evidence" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/pex" @@ -30,16 +32,14 @@ import ( rpccore "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" grpccore "github.com/tendermint/tendermint/rpc/grpc" - rpc "github.com/tendermint/tendermint/rpc/lib" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" + "github.com/tendermint/tendermint/rpc/lib/server" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/state/txindex/kv" "github.com/tendermint/tendermint/state/txindex/null" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/version" - - _ "net/http/pprof" ) //------------------------------------------------------------------------------ @@ -80,8 +80,14 @@ type NodeProvider func(*cfg.Config, log.Logger) (*Node, error) // PrivValidator, ClientCreator, GenesisDoc, and DBProvider. // It implements NodeProvider. func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { + // Generate node PrivKey + nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + if err != nil { + return nil, err + } return NewNode(config, privval.LoadOrGenFilePV(config.PrivValidatorFile()), + nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), DefaultDBProvider, @@ -91,16 +97,17 @@ func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { } // MetricsProvider returns a consensus, p2p and mempool Metrics. -type MetricsProvider func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) +type MetricsProvider func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) // DefaultMetricsProvider returns Metrics build using Prometheus client library // if Prometheus is enabled. Otherwise, it returns no-op Metrics. func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { - return func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) { + return func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) { if config.Prometheus { - return cs.PrometheusMetrics(), p2p.PrometheusMetrics(), mempl.PrometheusMetrics() + return cs.PrometheusMetrics(config.Namespace), p2p.PrometheusMetrics(config.Namespace), + mempl.PrometheusMetrics(config.Namespace), sm.PrometheusMetrics(config.Namespace) } - return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics() + return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics() } } @@ -117,8 +124,12 @@ type Node struct { privValidator types.PrivValidator // local node's validator key // network - sw *p2p.Switch // p2p connections - addrBook pex.AddrBook // known peers + transport *p2p.MultiplexTransport + sw *p2p.Switch // p2p connections + addrBook pex.AddrBook // known peers + nodeInfo p2p.NodeInfo + nodeKey *p2p.NodeKey // our node privkey + isListening bool // services eventBus *types.EventBus // pub/sub for services @@ -139,6 +150,7 @@ type Node struct { // NewNode returns a new, ready to go, Tendermint Node. func NewNode(config *cfg.Config, privValidator types.PrivValidator, + nodeKey *p2p.NodeKey, clientCreator proxy.ClientCreator, genesisDocProvider GenesisDocProvider, dbProvider DBProvider, @@ -176,21 +188,37 @@ func NewNode(config *cfg.Config, return nil, err } - // Create the proxyApp, which manages connections (consensus, mempool, query) - // and sync tendermint and the app by performing a handshake - // and replaying any necessary blocks - consensusLogger := logger.With("module", "consensus") - handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc) - handshaker.SetLogger(consensusLogger) - proxyApp := proxy.NewAppConns(clientCreator, handshaker) + // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). + proxyApp := proxy.NewAppConns(clientCreator) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { return nil, fmt.Errorf("Error starting proxy app connections: %v", err) } - // reload the state (it may have been updated by the handshake) + // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, + // and replays any blocks as necessary to sync tendermint with the app. + consensusLogger := logger.With("module", "consensus") + handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc) + handshaker.SetLogger(consensusLogger) + if err := handshaker.Handshake(proxyApp); err != nil { + return nil, fmt.Errorf("Error during handshake: %v", err) + } + + // Reload the state. It will have the Version.Consensus.App set by the + // Handshake, and may have other modifications as well (ie. depending on + // what happened during block replay). state = sm.LoadState(stateDB) + // Ensure the state's block version matches that of the software. + if state.Version.Consensus.Block != version.BlockProtocol { + return nil, fmt.Errorf( + "Block version of the software does not match that of the state.\n"+ + "Got version.BlockProtocol=%v, state.Version.Consensus.Block=%v", + version.BlockProtocol, + state.Version.Consensus.Block, + ) + } + // If an address is provided, listen on the socket for a // connection from an external signing process. if config.PrivValidatorListenAddr != "" { @@ -198,7 +226,7 @@ func NewNode(config *cfg.Config, // TODO: persist this key so external signer // can actually authenticate us privKey = ed25519.GenPrivKey() - pvsc = privval.NewSocketPV( + pvsc = privval.NewTCPVal( logger.With("module", "privval"), config.PrivValidatorListenAddr, privKey, @@ -229,18 +257,22 @@ func NewNode(config *cfg.Config, consensusLogger.Info("This node is not a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey()) } - csMetrics, p2pMetrics, memplMetrics := metricsProvider() + csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider() // Make MempoolReactor - mempoolLogger := logger.With("module", "mempool") mempool := mempl.NewMempool( config.Mempool, proxyApp.Mempool(), state.LastBlockHeight, mempl.WithMetrics(memplMetrics), + mempl.WithPreCheck(sm.TxPreCheck(state)), + mempl.WithPostCheck(sm.TxPostCheck(state)), ) + mempoolLogger := logger.With("module", "mempool") mempool.SetLogger(mempoolLogger) - mempool.InitWAL() // no need to have the mempool wal during tests + if config.Mempool.WalEnabled() { + mempool.InitWAL() // no need to have the mempool wal during tests + } mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool) mempoolReactor.SetLogger(mempoolLogger) @@ -262,7 +294,14 @@ func NewNode(config *cfg.Config, blockExecLogger := logger.With("module", "state") // make block executor for consensus and blockchain reactors to execute blocks - blockExec := sm.NewBlockExecutor(stateDB, blockExecLogger, proxyApp.Consensus(), mempool, evidencePool) + blockExec := sm.NewBlockExecutor( + stateDB, + blockExecLogger, + proxyApp.Consensus(), + mempool, + evidencePool, + sm.BlockExecutorWithMetrics(smMetrics), + ) // Make BlockchainReactor bcReactor := bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) @@ -276,23 +315,128 @@ func NewNode(config *cfg.Config, blockStore, mempool, evidencePool, - cs.WithMetrics(csMetrics), + cs.StateMetrics(csMetrics), ) consensusState.SetLogger(consensusLogger) if privValidator != nil { consensusState.SetPrivValidator(privValidator) } - consensusReactor := cs.NewConsensusReactor(consensusState, fastSync) + consensusReactor := cs.NewConsensusReactor(consensusState, fastSync, cs.ReactorMetrics(csMetrics)) consensusReactor.SetLogger(consensusLogger) - p2pLogger := logger.With("module", "p2p") + eventBus := types.NewEventBus() + eventBus.SetLogger(logger.With("module", "events")) + + // services which will be publishing and/or subscribing for messages (events) + // consensusReactor will set it on consensusState and blockExecutor + consensusReactor.SetEventBus(eventBus) + + // Transaction indexing + var txIndexer txindex.TxIndexer + switch config.TxIndex.Indexer { + case "kv": + store, err := dbProvider(&DBContext{"tx_index", config}) + if err != nil { + return nil, err + } + if config.TxIndex.IndexTags != "" { + txIndexer = kv.NewTxIndex(store, kv.IndexTags(splitAndTrimEmpty(config.TxIndex.IndexTags, ",", " "))) + } else if config.TxIndex.IndexAllTags { + txIndexer = kv.NewTxIndex(store, kv.IndexAllTags()) + } else { + txIndexer = kv.NewTxIndex(store) + } + default: + txIndexer = &null.TxIndex{} + } + + indexerService := txindex.NewIndexerService(txIndexer, eventBus) + indexerService.SetLogger(logger.With("module", "txindex")) - sw := p2p.NewSwitch(config.P2P, p2p.WithMetrics(p2pMetrics)) + var ( + p2pLogger = logger.With("module", "p2p") + nodeInfo = makeNodeInfo( + config, + nodeKey.ID(), + txIndexer, + genDoc.ChainID, + p2p.NewProtocolVersion( + version.P2PProtocol, // global + state.Version.Consensus.Block, + state.Version.Consensus.App, + ), + ) + ) + + // Setup Transport. + var ( + transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey) + connFilters = []p2p.ConnFilterFunc{} + peerFilters = []p2p.PeerFilterFunc{} + ) + + if !config.P2P.AllowDuplicateIP { + connFilters = append(connFilters, p2p.ConnDuplicateIPFilter()) + } + + // Filter peers by addr or pubkey with an ABCI query. + // If the query return code is OK, add peer. + if config.FilterPeers { + connFilters = append( + connFilters, + // ABCI query for address filtering. + func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error { + res, err := proxyApp.Query().QuerySync(abci.RequestQuery{ + Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()), + }) + if err != nil { + return err + } + if res.IsErr() { + return fmt.Errorf("Error querying abci app: %v", res) + } + + return nil + }, + ) + + peerFilters = append( + peerFilters, + // ABCI query for ID filtering. + func(_ p2p.IPeerSet, p p2p.Peer) error { + res, err := proxyApp.Query().QuerySync(abci.RequestQuery{ + Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()), + }) + if err != nil { + return err + } + if res.IsErr() { + return fmt.Errorf("Error querying abci app: %v", res) + } + + return nil + }, + ) + } + + p2p.MultiplexTransportConnFilters(connFilters...)(transport) + + // Setup Switch. + sw := p2p.NewSwitch( + config.P2P, + transport, + p2p.WithMetrics(p2pMetrics), + p2p.SwitchPeerFilters(peerFilters...), + ) sw.SetLogger(p2pLogger) sw.AddReactor("MEMPOOL", mempoolReactor) sw.AddReactor("BLOCKCHAIN", bcReactor) sw.AddReactor("CONSENSUS", consensusReactor) sw.AddReactor("EVIDENCE", evidenceReactor) + sw.SetNodeInfo(nodeInfo) + sw.SetNodeKey(nodeKey) + + p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile()) // Optionally, start the pex reactor // @@ -307,12 +451,16 @@ func NewNode(config *cfg.Config, // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. // Note we currently use the addrBook regardless at least for AddOurAddress addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) + + // Add ourselves to addrbook to prevent dialing ourselves + addrBook.AddOurAddress(nodeInfo.NetAddress()) + addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) if config.P2P.PexReactor { // TODO persistent peers ? so we can have their DNS addrs saved pexReactor := pex.NewPEXReactor(addrBook, &pex.PEXReactorConfig{ - Seeds: cmn.SplitAndTrim(config.P2P.Seeds, ",", " "), + Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), SeedMode: config.P2P.SeedMode, }) pexReactor.SetLogger(p2pLogger) @@ -321,62 +469,6 @@ func NewNode(config *cfg.Config, sw.SetAddrBook(addrBook) - // Filter peers by addr or pubkey with an ABCI query. - // If the query return code is OK, add peer. - // XXX: Query format subject to change - if config.FilterPeers { - // NOTE: addr is ip:port - sw.SetAddrFilter(func(addr net.Addr) error { - resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/addr/%s", addr.String())}) - if err != nil { - return err - } - if resQuery.IsErr() { - return fmt.Errorf("Error querying abci app: %v", resQuery) - } - return nil - }) - sw.SetIDFilter(func(id p2p.ID) error { - resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/id/%s", id)}) - if err != nil { - return err - } - if resQuery.IsErr() { - return fmt.Errorf("Error querying abci app: %v", resQuery) - } - return nil - }) - } - - eventBus := types.NewEventBus() - eventBus.SetLogger(logger.With("module", "events")) - - // services which will be publishing and/or subscribing for messages (events) - // consensusReactor will set it on consensusState and blockExecutor - consensusReactor.SetEventBus(eventBus) - - // Transaction indexing - var txIndexer txindex.TxIndexer - switch config.TxIndex.Indexer { - case "kv": - store, err := dbProvider(&DBContext{"tx_index", config}) - if err != nil { - return nil, err - } - if config.TxIndex.IndexTags != "" { - txIndexer = kv.NewTxIndex(store, kv.IndexTags(cmn.SplitAndTrim(config.TxIndex.IndexTags, ",", " "))) - } else if config.TxIndex.IndexAllTags { - txIndexer = kv.NewTxIndex(store, kv.IndexAllTags()) - } else { - txIndexer = kv.NewTxIndex(store) - } - default: - txIndexer = &null.TxIndex{} - } - - indexerService := txindex.NewIndexerService(txIndexer, eventBus) - indexerService.SetLogger(logger.With("module", "txindex")) - // run the profile server profileHost := config.ProfListenAddress if profileHost != "" { @@ -390,8 +482,11 @@ func NewNode(config *cfg.Config, genesisDoc: genDoc, privValidator: privValidator, - sw: sw, - addrBook: addrBook, + transport: transport, + sw: sw, + addrBook: addrBook, + nodeInfo: nodeInfo, + nodeKey: nodeKey, stateDB: stateDB, blockStore: blockStore, @@ -411,36 +506,20 @@ func NewNode(config *cfg.Config, // OnStart starts the Node. It implements cmn.Service. func (n *Node) OnStart() error { - err := n.eventBus.Start() - if err != nil { - return err + now := tmtime.Now() + genTime := n.genesisDoc.GenesisTime + if genTime.After(now) { + n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime) + time.Sleep(genTime.Sub(now)) } - // Create & add listener - l := p2p.NewDefaultListener( - n.config.P2P.ListenAddress, - n.config.P2P.ExternalAddress, - n.config.P2P.UPNP, - n.Logger.With("module", "p2p")) - n.sw.AddListener(l) - - // Generate node PrivKey - // TODO: pass in like privValidator - nodeKey, err := p2p.LoadOrGenNodeKey(n.config.NodeKeyFile()) + err := n.eventBus.Start() if err != nil { return err } - n.Logger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", n.config.NodeKeyFile()) - - nodeInfo := n.makeNodeInfo(nodeKey.ID()) - n.sw.SetNodeInfo(nodeInfo) - n.sw.SetNodeKey(nodeKey) - - // Add ourselves to addrbook to prevent dialing ourselves - n.addrBook.AddOurAddress(nodeInfo.NetAddress()) // Add private IDs to addrbook to block those peers being added - n.addrBook.AddPrivateIDs(cmn.SplitAndTrim(n.config.P2P.PrivatePeerIDs, ",", " ")) + n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) // Start the RPC server before the P2P server // so we can eg. receive txs for the first block @@ -457,6 +536,17 @@ func (n *Node) OnStart() error { n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr) } + // Start the transport. + addr, err := p2p.NewNetAddressStringWithOptionalID(n.config.P2P.ListenAddress) + if err != nil { + return err + } + if err := n.transport.Listen(*addr); err != nil { + return err + } + + n.isListening = true + // Start the switch (the P2P server). err = n.sw.Start() if err != nil { @@ -465,7 +555,7 @@ func (n *Node) OnStart() error { // Always connect to persistent peers if n.config.P2P.PersistentPeers != "" { - err = n.sw.DialPeersAsync(n.addrBook, cmn.SplitAndTrim(n.config.P2P.PersistentPeers, ",", " "), true) + err = n.sw.DialPeersAsync(n.addrBook, splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "), true) if err != nil { return err } @@ -489,6 +579,17 @@ func (n *Node) OnStop() { // TODO: gracefully disconnect from peers. n.sw.Stop() + // stop mempool WAL + if n.config.Mempool.WalEnabled() { + n.mempoolReactor.Mempool.CloseWAL() + } + + if err := n.transport.Close(); err != nil { + n.Logger.Error("Error closing transport", "err", err) + } + + n.isListening = false + // finally stop the listeners / external services for _, l := range n.rpcListeners { n.Logger.Info("Closing rpc listener", "listener", l) @@ -497,7 +598,7 @@ func (n *Node) OnStop() { } } - if pvsc, ok := n.privValidator.(*privval.SocketPV); ok { + if pvsc, ok := n.privValidator.(*privval.TCPVal); ok { if err := pvsc.Stop(); err != nil { n.Logger.Error("Error stopping priv validator socket client", "err", err) } @@ -511,21 +612,6 @@ func (n *Node) OnStop() { } } -// RunForever waits for an interrupt signal and stops the node. -func (n *Node) RunForever() { - // Sleep forever and then... - cmn.TrapSignal(func() { - n.Stop() - }) -} - -// AddListener adds a listener to accept inbound peer connections. -// It should be called before starting the Node. -// The first listener is the primary listener (in NodeInfo) -func (n *Node) AddListener(l p2p.Listener) { - n.sw.AddListener(l) -} - // ConfigureRPC sets all variables in rpccore so they will serve // rpc calls from this node func (n *Node) ConfigureRPC() { @@ -534,7 +620,8 @@ func (n *Node) ConfigureRPC() { rpccore.SetConsensusState(n.consensusState) rpccore.SetMempool(n.mempoolReactor.Mempool) rpccore.SetEvidencePool(n.evidencePool) - rpccore.SetSwitch(n.sw) + rpccore.SetP2PPeers(n.sw) + rpccore.SetP2PTransport(n) rpccore.SetPubKey(n.privValidator.GetPubKey()) rpccore.SetGenesisDoc(n.genesisDoc) rpccore.SetAddrBook(n.addrBook) @@ -547,7 +634,7 @@ func (n *Node) ConfigureRPC() { func (n *Node) startRPC() ([]net.Listener, error) { n.ConfigureRPC() - listenAddrs := cmn.SplitAndTrim(n.config.RPC.ListenAddress, ",", " ") + listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") coreCodec := amino.NewCodec() ctypes.RegisterAmino(coreCodec) @@ -666,59 +753,69 @@ func (n *Node) ProxyApp() proxy.AppConns { return n.proxyApp } -func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo { +//------------------------------------------------------------------------------ + +func (n *Node) Listeners() []string { + return []string{ + fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress), + } +} + +func (n *Node) IsListening() bool { + return n.isListening +} + +// NodeInfo returns the Node's Info from the Switch. +func (n *Node) NodeInfo() p2p.NodeInfo { + return n.nodeInfo +} + +func makeNodeInfo( + config *cfg.Config, + nodeID p2p.ID, + txIndexer txindex.TxIndexer, + chainID string, + protocolVersion p2p.ProtocolVersion, +) p2p.NodeInfo { txIndexerStatus := "on" - if _, ok := n.txIndexer.(*null.TxIndex); ok { + if _, ok := txIndexer.(*null.TxIndex); ok { txIndexerStatus = "off" } - nodeInfo := p2p.NodeInfo{ - ID: nodeID, - Network: n.genesisDoc.ChainID, - Version: version.Version, + nodeInfo := p2p.DefaultNodeInfo{ + ProtocolVersion: protocolVersion, + ID_: nodeID, + Network: chainID, + Version: version.TMCoreSemVer, Channels: []byte{ bc.BlockchainChannel, cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, mempl.MempoolChannel, evidence.EvidenceChannel, }, - Moniker: n.config.Moniker, - Other: []string{ - cmn.Fmt("amino_version=%v", amino.Version), - cmn.Fmt("p2p_version=%v", p2p.Version), - cmn.Fmt("consensus_version=%v", cs.Version), - cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version), - cmn.Fmt("tx_index=%v", txIndexerStatus), + Moniker: config.Moniker, + Other: p2p.DefaultNodeInfoOther{ + TxIndex: txIndexerStatus, + RPCAddress: config.RPC.ListenAddress, }, } - if n.config.P2P.PexReactor { + if config.P2P.PexReactor { nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) } - rpcListenAddr := n.config.RPC.ListenAddress - nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("rpc_addr=%v", rpcListenAddr)) + lAddr := config.P2P.ExternalAddress - if !n.sw.IsListening() { - return nodeInfo + if lAddr == "" { + lAddr = config.P2P.ListenAddress } - p2pListener := n.sw.Listeners()[0] - p2pHost := p2pListener.ExternalAddressHost() - p2pPort := p2pListener.ExternalAddress().Port - nodeInfo.ListenAddr = cmn.Fmt("%v:%v", p2pHost, p2pPort) + nodeInfo.ListenAddr = lAddr return nodeInfo } //------------------------------------------------------------------------------ -// NodeInfo returns the Node's Info from the Switch. -func (n *Node) NodeInfo() p2p.NodeInfo { - return n.sw.NodeInfo() -} - -//------------------------------------------------------------------------------ - var ( genesisDocKey = []byte("genesisDoc") ) @@ -745,3 +842,24 @@ func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { } db.SetSync(genesisDocKey, bytes) } + +// splitAndTrimEmpty slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. also filter out empty strings, only return non-empty strings. +func splitAndTrimEmpty(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + nonEmptyStrings := make([]string, 0, len(spl)) + for i := 0; i < len(spl); i++ { + element := strings.Trim(spl[i], cutset) + if element != "" { + nonEmptyStrings = append(nonEmptyStrings, element) + } + } + return nonEmptyStrings +} diff --git a/node/node_test.go b/node/node_test.go index ca074e1bc34..3a33e6bbb79 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -10,10 +10,16 @@ import ( "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/version" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" + + tmtime "github.com/tendermint/tendermint/types/time" ) func TestNodeStartStop(t *testing.T) { @@ -56,3 +62,54 @@ func TestNodeStartStop(t *testing.T) { t.Fatal("timed out waiting for shutdown") } } + +func TestSplitAndTrimEmpty(t *testing.T) { + testCases := []struct { + s string + sep string + cutset string + expected []string + }{ + {"a,b,c", ",", " ", []string{"a", "b", "c"}}, + {" a , b , c ", ",", " ", []string{"a", "b", "c"}}, + {" a, b, c ", ",", " ", []string{"a", "b", "c"}}, + {" a, ", ",", " ", []string{"a"}}, + {" ", ",", " ", []string{}}, + } + + for _, tc := range testCases { + assert.Equal(t, tc.expected, splitAndTrimEmpty(tc.s, tc.sep, tc.cutset), "%s", tc.s) + } +} + +func TestNodeDelayedStop(t *testing.T) { + config := cfg.ResetTestRoot("node_delayed_node_test") + now := tmtime.Now() + + // create & start node + n, err := DefaultNewNode(config, log.TestingLogger()) + n.GenesisDoc().GenesisTime = now.Add(5 * time.Second) + assert.NoError(t, err) + + n.Start() + startTime := tmtime.Now() + assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime)) +} + +func TestNodeSetAppVersion(t *testing.T) { + config := cfg.ResetTestRoot("node_app_version_test") + + // create & start node + n, err := DefaultNewNode(config, log.TestingLogger()) + assert.NoError(t, err, "expected no err on DefaultNewNode") + + // default config uses the kvstore app + var appVersion version.Protocol = kvstore.ProtocolVersion + + // check version is set in state + state := sm.LoadState(n.stateDB) + assert.Equal(t, state.Version.Consensus.App, appVersion) + + // check version is set in node info + assert.Equal(t, n.nodeInfo.(p2p.DefaultNodeInfo).ProtocolVersion.App, appVersion) +} diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go index da1296da0d8..be65d2f14d8 100644 --- a/p2p/base_reactor.go +++ b/p2p/base_reactor.go @@ -1,8 +1,8 @@ package p2p import ( - "github.com/tendermint/tendermint/p2p/conn" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/p2p/conn" ) type Reactor interface { diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index 9672e01174f..80fc53ddb59 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -257,7 +257,7 @@ func (c *MConnection) Send(chID byte, msgBytes []byte) bool { // Send message to channel. channel, ok := c.channelsIdx[chID] if !ok { - c.Logger.Error(cmn.Fmt("Cannot send bytes, unknown channel %X", chID)) + c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) return false } @@ -286,7 +286,7 @@ func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool { // Send message to channel. channel, ok := c.channelsIdx[chID] if !ok { - c.Logger.Error(cmn.Fmt("Cannot send bytes, unknown channel %X", chID)) + c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) return false } @@ -311,7 +311,7 @@ func (c *MConnection) CanSend(chID byte) bool { channel, ok := c.channelsIdx[chID] if !ok { - c.Logger.Error(cmn.Fmt("Unknown channel %X", chID)) + c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID)) return false } return channel.canSend() @@ -337,7 +337,7 @@ FOR_LOOP: } case <-c.pingTimer.Chan(): c.Logger.Debug("Send Ping") - _n, err = cdc.MarshalBinaryWriter(c.bufConnWriter, PacketPing{}) + _n, err = cdc.MarshalBinaryLengthPrefixedWriter(c.bufConnWriter, PacketPing{}) if err != nil { break SELECTION } @@ -359,7 +359,7 @@ FOR_LOOP: } case <-c.pong: c.Logger.Debug("Send Pong") - _n, err = cdc.MarshalBinaryWriter(c.bufConnWriter, PacketPong{}) + _n, err = cdc.MarshalBinaryLengthPrefixedWriter(c.bufConnWriter, PacketPong{}) if err != nil { break SELECTION } @@ -477,7 +477,7 @@ FOR_LOOP: var packet Packet var _n int64 var err error - _n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize)) + _n, err = cdc.UnmarshalBinaryLengthPrefixedReader(c.bufConnReader, &packet, int64(c._maxPacketMsgSize)) c.recvMonitor.Update(int(_n)) if err != nil { if c.IsRunning() { @@ -553,7 +553,7 @@ func (c *MConnection) stopPongTimer() { // maxPacketMsgSize returns a maximum size of PacketMsg, including the overhead // of amino encoding. func (c *MConnection) maxPacketMsgSize() int { - return len(cdc.MustMarshalBinary(PacketMsg{ + return len(cdc.MustMarshalBinaryLengthPrefixed(PacketMsg{ ChannelID: 0x01, EOF: 1, Bytes: make([]byte, c.config.MaxPacketMsgPayloadSize), @@ -585,9 +585,9 @@ func (c *MConnection) Status() ConnectionStatus { status.Channels[i] = ChannelStatus{ ID: channel.desc.ID, SendQueueCapacity: cap(channel.sendQueue), - SendQueueSize: int(channel.sendQueueSize), // TODO use atomic + SendQueueSize: int(atomic.LoadInt32(&channel.sendQueueSize)), Priority: channel.desc.Priority, - RecentlySent: channel.recentlySent, + RecentlySent: atomic.LoadInt64(&channel.recentlySent), } } return status @@ -723,8 +723,8 @@ func (ch *Channel) nextPacketMsg() PacketMsg { // Not goroutine-safe func (ch *Channel) writePacketMsgTo(w io.Writer) (n int64, err error) { var packet = ch.nextPacketMsg() - n, err = cdc.MarshalBinaryWriter(w, packet) - ch.recentlySent += n + n, err = cdc.MarshalBinaryLengthPrefixedWriter(w, packet) + atomic.AddInt64(&ch.recentlySent, n) return } @@ -756,7 +756,7 @@ func (ch *Channel) recvPacketMsg(packet PacketMsg) ([]byte, error) { func (ch *Channel) updateStats() { // Exponential decay of stats. // TODO: optimize. - ch.recentlySent = int64(float64(ch.recentlySent) * 0.8) + atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8)) } //---------------------------------------- diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index 19e05fbc794..59fe0d1df2a 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -140,7 +140,7 @@ func TestMConnectionPongTimeoutResultsInError(t *testing.T) { go func() { // read ping var pkt PacketPing - _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) assert.Nil(t, err) serverGotPing <- struct{}{} }() @@ -176,22 +176,22 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { defer mconn.Stop() // sending 3 pongs in a row (abuse) - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) require.Nil(t, err) serverGotPing := make(chan struct{}) go func() { // read ping (one byte) var packet, err = Packet(nil), error(nil) - _, err = cdc.UnmarshalBinaryReader(server, &packet, maxPingPongPacketSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &packet, maxPingPongPacketSize) require.Nil(t, err) serverGotPing <- struct{}{} // respond with pong - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) require.Nil(t, err) }() <-serverGotPing @@ -227,18 +227,18 @@ func TestMConnectionMultiplePings(t *testing.T) { // sending 3 pings in a row (abuse) // see https://github.com/tendermint/tendermint/issues/1190 - _, err = server.Write(cdc.MustMarshalBinary(PacketPing{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPing{})) require.Nil(t, err) var pkt PacketPong - _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinary(PacketPing{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPing{})) require.Nil(t, err) - _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinary(PacketPing{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPing{})) require.Nil(t, err) - _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) require.Nil(t, err) assert.True(t, mconn.IsRunning()) @@ -270,20 +270,20 @@ func TestMConnectionPingPongs(t *testing.T) { go func() { // read ping var pkt PacketPing - _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) require.Nil(t, err) serverGotPing <- struct{}{} // respond with pong - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) require.Nil(t, err) time.Sleep(mconn.config.PingInterval) // read ping - _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPingPongPacketSize) + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(server, &pkt, maxPingPongPacketSize) require.Nil(t, err) // respond with pong - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) + _, err = server.Write(cdc.MustMarshalBinaryLengthPrefixed(PacketPong{})) require.Nil(t, err) }() <-serverGotPing @@ -380,7 +380,7 @@ func TestMConnectionReadErrorBadEncoding(t *testing.T) { client := mconnClient.conn // send badly encoded msgPacket - bz := cdc.MustMarshalBinary(PacketMsg{}) + bz := cdc.MustMarshalBinaryLengthPrefixed(PacketMsg{}) bz[4] += 0x01 // Invalid prefix bytes. // Write it. @@ -428,12 +428,11 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { EOF: 1, Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize), } - _, err = cdc.MarshalBinaryWriter(buf, packet) + _, err = cdc.MarshalBinaryLengthPrefixedWriter(buf, packet) assert.Nil(t, err) _, err = client.Write(buf.Bytes()) assert.Nil(t, err) assert.True(t, expectSend(chOnRcv), "msg just right") - assert.False(t, expectSend(chOnErr), "msg just right") // send msg thats too long buf = new(bytes.Buffer) @@ -442,11 +441,10 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { EOF: 1, Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize+100), } - _, err = cdc.MarshalBinaryWriter(buf, packet) + _, err = cdc.MarshalBinaryLengthPrefixedWriter(buf, packet) assert.Nil(t, err) _, err = client.Write(buf.Bytes()) assert.NotNil(t, err) - assert.False(t, expectSend(chOnRcv), "msg too long") assert.True(t, expectSend(chOnErr), "msg too long") } diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go index 75199ee6ba3..1dc66afff91 100644 --- a/p2p/conn/secret_connection.go +++ b/p2p/conn/secret_connection.go @@ -10,6 +10,7 @@ import ( "net" "time" + // forked to github.com/tendermint/crypto "golang.org/x/crypto/chacha20poly1305" "golang.org/x/crypto/curve25519" "golang.org/x/crypto/nacl/box" @@ -123,7 +124,7 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) { data = nil } chunkLength := len(chunk) - binary.BigEndian.PutUint32(frame, uint32(chunkLength)) + binary.LittleEndian.PutUint32(frame, uint32(chunkLength)) copy(frame[dataLenSize:], chunk) aead, err := chacha20poly1305.New(sc.sendSecret[:]) @@ -172,7 +173,7 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) { incrNonce(sc.recvNonce) // end decryption - var chunkLength = binary.BigEndian.Uint32(frame) // read the first two bytes + var chunkLength = binary.LittleEndian.Uint32(frame) // read the first four bytes if chunkLength > dataMaxSize { return 0, errors.New("chunkLength is greater than dataMaxSize") } @@ -210,7 +211,7 @@ func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[3 // Send our pubkey and receive theirs in tandem. var trs, _ = cmn.Parallel( func(_ int) (val interface{}, err error, abort bool) { - var _, err1 = cdc.MarshalBinaryWriter(conn, locEphPub) + var _, err1 = cdc.MarshalBinaryLengthPrefixedWriter(conn, locEphPub) if err1 != nil { return nil, err1, true // abort } @@ -218,7 +219,7 @@ func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[3 }, func(_ int) (val interface{}, err error, abort bool) { var _remEphPub [32]byte - var _, err2 = cdc.UnmarshalBinaryReader(conn, &_remEphPub, 1024*1024) // TODO + var _, err2 = cdc.UnmarshalBinaryLengthPrefixedReader(conn, &_remEphPub, 1024*1024) // TODO if err2 != nil { return nil, err2, true // abort } @@ -304,7 +305,7 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature [] // Send our info and receive theirs in tandem. var trs, _ = cmn.Parallel( func(_ int) (val interface{}, err error, abort bool) { - var _, err1 = cdc.MarshalBinaryWriter(sc, authSigMessage{pubKey, signature}) + var _, err1 = cdc.MarshalBinaryLengthPrefixedWriter(sc, authSigMessage{pubKey, signature}) if err1 != nil { return nil, err1, true // abort } @@ -312,7 +313,7 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature [] }, func(_ int) (val interface{}, err error, abort bool) { var _recvMsg authSigMessage - var _, err2 = cdc.UnmarshalBinaryReader(sc, &_recvMsg, 1024*1024) // TODO + var _, err2 = cdc.UnmarshalBinaryLengthPrefixedReader(sc, &_recvMsg, 1024*1024) // TODO if err2 != nil { return nil, err2, true // abort } @@ -332,13 +333,12 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature [] //-------------------------------------------------------------------------------- -// increment nonce big-endian by 1 with wraparound. +// Increment nonce little-endian by 1 with wraparound. +// Due to chacha20poly1305 expecting a 12 byte nonce we do not use the first four +// bytes. We only increment a 64 bit unsigned int in the remaining 8 bytes +// (little-endian in nonce[4:]). func incrNonce(nonce *[aeadNonceSize]byte) { - for i := aeadNonceSize - 1; 0 <= i; i-- { - nonce[i]++ - // if this byte wrapped around to zero, we need to increment the next byte - if nonce[i] != 0 { - return - } - } + counter := binary.LittleEndian.Uint64(nonce[4:]) + counter++ + binary.LittleEndian.PutUint64(nonce[4:], counter) } diff --git a/p2p/conn_set.go b/p2p/conn_set.go new file mode 100644 index 00000000000..f960c0e8836 --- /dev/null +++ b/p2p/conn_set.go @@ -0,0 +1,73 @@ +package p2p + +import ( + "net" + "sync" +) + +// ConnSet is a lookup table for connections and all their ips. +type ConnSet interface { + Has(net.Conn) bool + HasIP(net.IP) bool + Set(net.Conn, []net.IP) + Remove(net.Conn) +} + +type connSetItem struct { + conn net.Conn + ips []net.IP +} + +type connSet struct { + sync.RWMutex + + conns map[string]connSetItem +} + +// NewConnSet returns a ConnSet implementation. +func NewConnSet() *connSet { + return &connSet{ + conns: map[string]connSetItem{}, + } +} + +func (cs *connSet) Has(c net.Conn) bool { + cs.RLock() + defer cs.RUnlock() + + _, ok := cs.conns[c.RemoteAddr().String()] + + return ok +} + +func (cs *connSet) HasIP(ip net.IP) bool { + cs.RLock() + defer cs.RUnlock() + + for _, c := range cs.conns { + for _, known := range c.ips { + if known.Equal(ip) { + return true + } + } + } + + return false +} + +func (cs *connSet) Remove(c net.Conn) { + cs.Lock() + defer cs.Unlock() + + delete(cs.conns, c.RemoteAddr().String()) +} + +func (cs *connSet) Set(c net.Conn, ips []net.IP) { + cs.Lock() + defer cs.Unlock() + + cs.conns[c.RemoteAddr().String()] = connSetItem{ + conn: c, + ips: ips, + } +} diff --git a/p2p/dummy/peer.go b/p2p/dummy/peer.go index bb6e822fcb7..65ff65fb2d4 100644 --- a/p2p/dummy/peer.go +++ b/p2p/dummy/peer.go @@ -42,7 +42,7 @@ func (p *peer) IsPersistent() bool { // NodeInfo always returns empty node info. func (p *peer) NodeInfo() p2p.NodeInfo { - return p2p.NodeInfo{} + return p2p.DefaultNodeInfo{} } // RemoteIP always returns localhost. diff --git a/p2p/errors.go b/p2p/errors.go index fc477d1c2f0..706150945b8 100644 --- a/p2p/errors.go +++ b/p2p/errors.go @@ -5,6 +5,97 @@ import ( "net" ) +// ErrFilterTimeout indicates that a filter operation timed out. +type ErrFilterTimeout struct{} + +func (e ErrFilterTimeout) Error() string { + return "filter timed out" +} + +// ErrRejected indicates that a Peer was rejected carrying additional +// information as to the reason. +type ErrRejected struct { + addr NetAddress + conn net.Conn + err error + id ID + isAuthFailure bool + isDuplicate bool + isFiltered bool + isIncompatible bool + isNodeInfoInvalid bool + isSelf bool +} + +// Addr returns the NetAddress for the rejected Peer. +func (e ErrRejected) Addr() NetAddress { + return e.addr +} + +func (e ErrRejected) Error() string { + if e.isAuthFailure { + return fmt.Sprintf("auth failure: %s", e.err) + } + + if e.isDuplicate { + if e.conn != nil { + return fmt.Sprintf( + "duplicate CONN<%s>", + e.conn.RemoteAddr().String(), + ) + } + if e.id != "" { + return fmt.Sprintf("duplicate ID<%v>", e.id) + } + } + + if e.isFiltered { + if e.conn != nil { + return fmt.Sprintf( + "filtered CONN<%s>: %s", + e.conn.RemoteAddr().String(), + e.err, + ) + } + + if e.id != "" { + return fmt.Sprintf("filtered ID<%v>: %s", e.id, e.err) + } + } + + if e.isIncompatible { + return fmt.Sprintf("incompatible: %s", e.err) + } + + if e.isNodeInfoInvalid { + return fmt.Sprintf("invalid NodeInfo: %s", e.err) + } + + if e.isSelf { + return fmt.Sprintf("self ID<%v>", e.id) + } + + return fmt.Sprintf("%s", e.err) +} + +// IsAuthFailure when Peer authentication was unsuccessful. +func (e ErrRejected) IsAuthFailure() bool { return e.isAuthFailure } + +// IsDuplicate when Peer ID or IP are present already. +func (e ErrRejected) IsDuplicate() bool { return e.isDuplicate } + +// IsFiltered when Peer ID or IP was filtered. +func (e ErrRejected) IsFiltered() bool { return e.isFiltered } + +// IsIncompatible when Peer NodeInfo is not compatible with our own. +func (e ErrRejected) IsIncompatible() bool { return e.isIncompatible } + +// IsNodeInfoInvalid when the sent NodeInfo is not valid. +func (e ErrRejected) IsNodeInfoInvalid() bool { return e.isNodeInfoInvalid } + +// IsSelf when Peer is our own node. +func (e ErrRejected) IsSelf() bool { return e.isSelf } + // ErrSwitchDuplicatePeerID to be raised when a peer is connecting with a known // ID. type ErrSwitchDuplicatePeerID struct { @@ -47,6 +138,13 @@ func (e ErrSwitchAuthenticationFailure) Error() string { ) } +// ErrTransportClosed is raised when the Transport has been closed. +type ErrTransportClosed struct{} + +func (e ErrTransportClosed) Error() string { + return "transport has been closed" +} + //------------------------------------------------------------------- type ErrNetAddressNoID struct { diff --git a/p2p/key.go b/p2p/key.go index 4d1ecd82f7f..fc64f27bb68 100644 --- a/p2p/key.go +++ b/p2p/key.go @@ -16,7 +16,7 @@ type ID string // IDByteLength is the length of a crypto.Address. Currently only 20. // TODO: support other length addresses ? -const IDByteLength = 20 +const IDByteLength = crypto.AddressSize //------------------------------------------------------------------------------ // Persistent peer ID diff --git a/p2p/listener.go b/p2p/listener.go deleted file mode 100644 index d73b3cabfef..00000000000 --- a/p2p/listener.go +++ /dev/null @@ -1,286 +0,0 @@ -package p2p - -import ( - "fmt" - "net" - "strconv" - "strings" - "time" - - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/p2p/upnp" -) - -// Listener is a network listener for stream-oriented protocols, providing -// convenient methods to get listener's internal and external addresses. -// Clients are supposed to read incoming connections from a channel, returned -// by Connections() method. -type Listener interface { - Connections() <-chan net.Conn - InternalAddress() *NetAddress - ExternalAddress() *NetAddress - ExternalAddressHost() string - String() string - Stop() error -} - -// DefaultListener is a cmn.Service, running net.Listener underneath. -// Optionally, UPnP is used upon calling NewDefaultListener to resolve external -// address. -type DefaultListener struct { - cmn.BaseService - - listener net.Listener - intAddr *NetAddress - extAddr *NetAddress - connections chan net.Conn -} - -var _ Listener = (*DefaultListener)(nil) - -const ( - numBufferedConnections = 10 - defaultExternalPort = 8770 - tryListenSeconds = 5 -) - -func splitHostPort(addr string) (host string, port int) { - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - panic(err) - } - port, err = strconv.Atoi(portStr) - if err != nil { - panic(err) - } - return host, port -} - -// NewDefaultListener creates a new DefaultListener on lAddr, optionally trying -// to determine external address using UPnP. -func NewDefaultListener( - fullListenAddrString string, - externalAddrString string, - useUPnP bool, - logger log.Logger) Listener { - - // Split protocol, address, and port. - protocol, lAddr := cmn.ProtocolAndAddress(fullListenAddrString) - lAddrIP, lAddrPort := splitHostPort(lAddr) - - // Create listener - var listener net.Listener - var err error - for i := 0; i < tryListenSeconds; i++ { - listener, err = net.Listen(protocol, lAddr) - if err == nil { - break - } else if i < tryListenSeconds-1 { - time.Sleep(time.Second * 1) - } - } - if err != nil { - panic(err) - } - // Actual listener local IP & port - listenerIP, listenerPort := splitHostPort(listener.Addr().String()) - logger.Info("Local listener", "ip", listenerIP, "port", listenerPort) - - // Determine internal address... - var intAddr *NetAddress - intAddr, err = NewNetAddressStringWithOptionalID(lAddr) - if err != nil { - panic(err) - } - - inAddrAny := lAddrIP == "" || lAddrIP == "0.0.0.0" - - // Determine external address. - var extAddr *NetAddress - - if externalAddrString != "" { - var err error - extAddr, err = NewNetAddressStringWithOptionalID(externalAddrString) - if err != nil { - panic(fmt.Sprintf("Error in ExternalAddress: %v", err)) - } - } - - // If the lAddrIP is INADDR_ANY, try UPnP. - if extAddr == nil && useUPnP && inAddrAny { - extAddr = getUPNPExternalAddress(lAddrPort, listenerPort, logger) - } - - // Otherwise just use the local address. - if extAddr == nil { - defaultToIPv4 := inAddrAny - extAddr = getNaiveExternalAddress(defaultToIPv4, listenerPort, false, logger) - } - if extAddr == nil { - panic("Could not determine external address!") - } - - dl := &DefaultListener{ - listener: listener, - intAddr: intAddr, - extAddr: extAddr, - connections: make(chan net.Conn, numBufferedConnections), - } - dl.BaseService = *cmn.NewBaseService(logger, "DefaultListener", dl) - err = dl.Start() // Started upon construction - if err != nil { - logger.Error("Error starting base service", "err", err) - } - return dl -} - -// OnStart implements cmn.Service by spinning a goroutine, listening for new -// connections. -func (l *DefaultListener) OnStart() error { - if err := l.BaseService.OnStart(); err != nil { - return err - } - go l.listenRoutine() - return nil -} - -// OnStop implements cmn.Service by closing the listener. -func (l *DefaultListener) OnStop() { - l.BaseService.OnStop() - l.listener.Close() // nolint: errcheck -} - -// Accept connections and pass on the channel. -func (l *DefaultListener) listenRoutine() { - for { - conn, err := l.listener.Accept() - - if !l.IsRunning() { - break // Go to cleanup - } - - // listener wasn't stopped, - // yet we encountered an error. - if err != nil { - panic(err) - } - - l.connections <- conn - } - - // Cleanup - close(l.connections) - for range l.connections { - // Drain - } -} - -// Connections returns a channel of inbound connections. -// It gets closed when the listener closes. -// It is the callers responsibility to close any connections received -// over this channel. -func (l *DefaultListener) Connections() <-chan net.Conn { - return l.connections -} - -// InternalAddress returns the internal NetAddress (address used for -// listening). -func (l *DefaultListener) InternalAddress() *NetAddress { - return l.intAddr -} - -// ExternalAddress returns the external NetAddress (publicly available, -// determined using either UPnP or local resolver). -func (l *DefaultListener) ExternalAddress() *NetAddress { - return l.extAddr -} - -// ExternalAddressHost returns the external NetAddress IP string. If an IP is -// IPv6, it's wrapped in brackets ("[2001:db8:1f70::999:de8:7648:6e8]"). -func (l *DefaultListener) ExternalAddressHost() string { - ip := l.ExternalAddress().IP - if isIpv6(ip) { - // Means it's ipv6, so format it with brackets - return "[" + ip.String() + "]" - } - return ip.String() -} - -func (l *DefaultListener) String() string { - return fmt.Sprintf("Listener(@%v)", l.extAddr) -} - -/* external address helpers */ - -// UPNP external address discovery & port mapping -func getUPNPExternalAddress(externalPort, internalPort int, logger log.Logger) *NetAddress { - logger.Info("Getting UPNP external address") - nat, err := upnp.Discover() - if err != nil { - logger.Info("Could not perform UPNP discover", "err", err) - return nil - } - - ext, err := nat.GetExternalAddress() - if err != nil { - logger.Info("Could not get UPNP external address", "err", err) - return nil - } - - // UPnP can't seem to get the external port, so let's just be explicit. - if externalPort == 0 { - externalPort = defaultExternalPort - } - - externalPort, err = nat.AddPortMapping("tcp", externalPort, internalPort, "tendermint", 0) - if err != nil { - logger.Info("Could not add UPNP port mapping", "err", err) - return nil - } - - logger.Info("Got UPNP external address", "address", ext) - return NewNetAddressIPPort(ext, uint16(externalPort)) -} - -func isIpv6(ip net.IP) bool { - v4 := ip.To4() - if v4 != nil { - return false - } - - ipString := ip.String() - - // Extra check just to be sure it's IPv6 - return (strings.Contains(ipString, ":") && !strings.Contains(ipString, ".")) -} - -// TODO: use syscalls: see issue #712 -func getNaiveExternalAddress(defaultToIPv4 bool, port int, settleForLocal bool, logger log.Logger) *NetAddress { - addrs, err := net.InterfaceAddrs() - if err != nil { - panic(cmn.Fmt("Could not fetch interface addresses: %v", err)) - } - - for _, a := range addrs { - ipnet, ok := a.(*net.IPNet) - if !ok { - continue - } - if defaultToIPv4 || !isIpv6(ipnet.IP) { - v4 := ipnet.IP.To4() - if v4 == nil || (!settleForLocal && v4[0] == 127) { - // loopback - continue - } - } else if !settleForLocal && ipnet.IP.IsLoopback() { - // IPv6, check for loopback - continue - } - return NewNetAddressIPPort(ipnet.IP, uint16(port)) - } - - // try again, but settle for local - logger.Info("Node may not be connected to internet. Settling for local address") - return getNaiveExternalAddress(defaultToIPv4, port, true, logger) -} diff --git a/p2p/listener_test.go b/p2p/listener_test.go deleted file mode 100644 index f87b5d6f5ad..00000000000 --- a/p2p/listener_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package p2p - -import ( - "bytes" - "net" - "strings" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/libs/log" -) - -func TestListener(t *testing.T) { - // Create a listener - l := NewDefaultListener("tcp://:8001", "", false, log.TestingLogger()) - - // Dial the listener - lAddr := l.ExternalAddress() - connOut, err := lAddr.Dial() - if err != nil { - t.Fatalf("Could not connect to listener address %v", lAddr) - } else { - t.Logf("Created a connection to listener address %v", lAddr) - } - connIn, ok := <-l.Connections() - if !ok { - t.Fatalf("Could not get inbound connection from listener") - } - - msg := []byte("hi!") - go func() { - _, err := connIn.Write(msg) - if err != nil { - t.Error(err) - } - }() - b := make([]byte, 32) - n, err := connOut.Read(b) - if err != nil { - t.Fatalf("Error reading off connection: %v", err) - } - - b = b[:n] - if !bytes.Equal(msg, b) { - t.Fatalf("Got %s, expected %s", b, msg) - } - - // Close the server, no longer needed. - l.Stop() -} - -func TestExternalAddress(t *testing.T) { - { - // Create a listener with no external addr. Should default - // to local ipv4. - l := NewDefaultListener("tcp://:8001", "", false, log.TestingLogger()) - lAddr := l.ExternalAddress().String() - _, _, err := net.SplitHostPort(lAddr) - require.Nil(t, err) - spl := strings.Split(lAddr, ".") - require.Equal(t, len(spl), 4) - l.Stop() - } - - { - // Create a listener with set external ipv4 addr. - setExAddr := "8.8.8.8:8080" - l := NewDefaultListener("tcp://:8001", setExAddr, false, log.TestingLogger()) - lAddr := l.ExternalAddress().String() - require.Equal(t, lAddr, setExAddr) - l.Stop() - } - - { - // Invalid external addr causes panic - setExAddr := "awrlsckjnal:8080" - require.Panics(t, func() { NewDefaultListener("tcp://:8001", setExAddr, false, log.TestingLogger()) }) - } -} diff --git a/p2p/metrics.go b/p2p/metrics.go index ab876ee7c62..ed26d11928c 100644 --- a/p2p/metrics.go +++ b/p2p/metrics.go @@ -3,31 +3,69 @@ package p2p import ( "github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics/discard" - - prometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/go-kit/kit/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" ) +const MetricsSubsystem = "p2p" + // Metrics contains metrics exposed by this package. type Metrics struct { // Number of peers. Peers metrics.Gauge + // Number of bytes received from a given peer. + PeerReceiveBytesTotal metrics.Counter + // Number of bytes sent to a given peer. + PeerSendBytesTotal metrics.Counter + // Pending bytes to be sent to a given peer. + PeerPendingSendBytes metrics.Gauge + // Number of transactions submitted by each peer. + NumTxs metrics.Gauge } // PrometheusMetrics returns Metrics build using Prometheus client library. -func PrometheusMetrics() *Metrics { +func PrometheusMetrics(namespace string) *Metrics { return &Metrics{ Peers: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "p2p", + Namespace: namespace, + Subsystem: MetricsSubsystem, Name: "peers", Help: "Number of peers.", }, []string{}), + PeerReceiveBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peer_receive_bytes_total", + Help: "Number of bytes received from a given peer.", + }, []string{"peer_id"}), + PeerSendBytesTotal: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peer_send_bytes_total", + Help: "Number of bytes sent to a given peer.", + }, []string{"peer_id"}), + PeerPendingSendBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "peer_pending_send_bytes", + Help: "Number of pending bytes to be sent to a given peer.", + }, []string{"peer_id"}), + NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "num_txs", + Help: "Number of transactions submitted by each peer.", + }, []string{"peer_id"}), } } // NopMetrics returns no-op Metrics. func NopMetrics() *Metrics { return &Metrics{ - Peers: discard.NewGauge(), + Peers: discard.NewGauge(), + PeerReceiveBytesTotal: discard.NewCounter(), + PeerSendBytesTotal: discard.NewCounter(), + PeerPendingSendBytes: discard.NewGauge(), + NumTxs: discard.NewGauge(), } } diff --git a/p2p/netaddress.go b/p2p/netaddress.go index ebac8cc82d5..f60271bccfd 100644 --- a/p2p/netaddress.go +++ b/p2p/netaddress.go @@ -13,6 +13,8 @@ import ( "strings" "time" + "errors" + cmn "github.com/tendermint/tendermint/libs/common" ) @@ -30,8 +32,10 @@ type NetAddress struct { str string } -// IDAddressString returns id@hostPort. -func IDAddressString(id ID, hostPort string) string { +// IDAddressString returns id@hostPort. It strips the leading +// protocol from protocolHostPort if it exists. +func IDAddressString(id ID, protocolHostPort string) string { + hostPort := removeProtocolIfDefined(protocolHostPort) return fmt.Sprintf("%s@%s", id, hostPort) } @@ -44,7 +48,7 @@ func NewNetAddress(id ID, addr net.Addr) *NetAddress { tcpAddr, ok := addr.(*net.TCPAddr) if !ok { if flag.Lookup("test.v") == nil { // normal run - cmn.PanicSanity(cmn.Fmt("Only TCPAddrs are supported. Got: %v", addr)) + cmn.PanicSanity(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr)) } else { // in testing netAddr := NewNetAddressIPPort(net.IP("0.0.0.0"), 0) netAddr.ID = id @@ -97,16 +101,19 @@ func NewNetAddressStringWithOptionalID(addr string) (*NetAddress, error) { if err != nil { return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} } + if len(host) == 0 { + return nil, ErrNetAddressInvalid{ + addrWithoutProtocol, + errors.New("host is empty")} + } ip := net.ParseIP(host) if ip == nil { - if len(host) > 0 { - ips, err := net.LookupIP(host) - if err != nil { - return nil, ErrNetAddressLookup{host, err} - } - ip = ips[0] + ips, err := net.LookupIP(host) + if err != nil { + return nil, ErrNetAddressLookup{host, err} } + ip = ips[0] } port, err := strconv.ParseUint(portStr, 10, 16) @@ -213,10 +220,22 @@ func (na *NetAddress) Routable() bool { // For IPv4 these are either a 0 or all bits set address. For IPv6 a zero // address or one that matches the RFC3849 documentation address format. func (na *NetAddress) Valid() bool { + if string(na.ID) != "" { + data, err := hex.DecodeString(string(na.ID)) + if err != nil || len(data) != IDByteLength { + return false + } + } return na.IP != nil && !(na.IP.IsUnspecified() || na.RFC3849() || na.IP.Equal(net.IPv4bcast)) } +// HasID returns true if the address has an ID. +// NOTE: It does not check whether the ID is valid or not. +func (na *NetAddress) HasID() bool { + return string(na.ID) != "" +} + // Local returns true if it is a local address. func (na *NetAddress) Local() bool { return na.IP.IsLoopback() || zero4.Contains(na.IP) diff --git a/p2p/netaddress_test.go b/p2p/netaddress_test.go index 653b436a6f0..e7b184a76de 100644 --- a/p2p/netaddress_test.go +++ b/p2p/netaddress_test.go @@ -22,48 +22,52 @@ func TestNewNetAddress(t *testing.T) { func TestNewNetAddressStringWithOptionalID(t *testing.T) { testCases := []struct { + name string addr string expected string correct bool }{ - {"127.0.0.1:8080", "127.0.0.1:8080", true}, - {"tcp://127.0.0.1:8080", "127.0.0.1:8080", true}, - {"udp://127.0.0.1:8080", "127.0.0.1:8080", true}, - {"udp//127.0.0.1:8080", "", false}, + {"no node id, no protocol", "127.0.0.1:8080", "127.0.0.1:8080", true}, + {"no node id, tcp input", "tcp://127.0.0.1:8080", "127.0.0.1:8080", true}, + {"no node id, udp input", "udp://127.0.0.1:8080", "127.0.0.1:8080", true}, + {"malformed udp input", "udp//127.0.0.1:8080", "", false}, // {"127.0.0:8080", false}, - {"notahost", "", false}, - {"127.0.0.1:notapath", "", false}, - {"notahost:8080", "", false}, - {"8082", "", false}, - {"127.0.0:8080000", "", false}, - - {"deadbeef@127.0.0.1:8080", "", false}, - {"this-isnot-hex@127.0.0.1:8080", "", false}, - {"xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, - - {"tcp://deadbeef@127.0.0.1:8080", "", false}, - {"tcp://this-isnot-hex@127.0.0.1:8080", "", false}, - {"tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - {"tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, - - {"tcp://@127.0.0.1:8080", "", false}, - {"tcp://@", "", false}, - {"", "", false}, - {"@", "", false}, - {" @", "", false}, - {" @ ", "", false}, + {"invalid host", "notahost", "", false}, + {"invalid port", "127.0.0.1:notapath", "", false}, + {"invalid host w/ port", "notahost:8080", "", false}, + {"just a port", "8082", "", false}, + {"non-existent port", "127.0.0:8080000", "", false}, + + {"too short nodeId", "deadbeef@127.0.0.1:8080", "", false}, + {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080", "", false}, + {"not hex nodeId", "xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + {"correct nodeId", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, + + {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false}, + {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false}, + {"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + {"correct nodeId w/tcp", "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, + + {"no node id when expected", "tcp://@127.0.0.1:8080", "", false}, + {"no node id or IP", "tcp://@", "", false}, + {"tcp no host, w/ port", "tcp://:26656", "", false}, + {"empty", "", "", false}, + {"node id delimiter 1", "@", "", false}, + {"node id delimiter 2", " @", "", false}, + {"node id delimiter 3", " @ ", "", false}, } for _, tc := range testCases { - addr, err := NewNetAddressStringWithOptionalID(tc.addr) - if tc.correct { - if assert.Nil(t, err, tc.addr) { - assert.Equal(t, tc.expected, addr.String()) + t.Run(tc.name, func(t *testing.T) { + addr, err := NewNetAddressStringWithOptionalID(tc.addr) + if tc.correct { + if assert.Nil(t, err, tc.addr) { + assert.Equal(t, tc.expected, addr.String()) + } + } else { + assert.NotNil(t, err, tc.addr) } - } else { - assert.NotNil(t, err, tc.addr) - } + }) } } diff --git a/p2p/node_info.go b/p2p/node_info.go index fa1333b2795..c36d98d9cd3 100644 --- a/p2p/node_info.go +++ b/p2p/node_info.go @@ -2,9 +2,10 @@ package p2p import ( "fmt" - "strings" + "reflect" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/version" ) const ( @@ -17,12 +18,67 @@ func MaxNodeInfoSize() int { return maxNodeInfoSize } -// NodeInfo is the basic node information exchanged +//------------------------------------------------------------- + +// NodeInfo exposes basic info of a node +// and determines if we're compatible. +type NodeInfo interface { + nodeInfoAddress + nodeInfoTransport +} + +// nodeInfoAddress exposes just the core info of a node. +type nodeInfoAddress interface { + ID() ID + NetAddress() *NetAddress +} + +// nodeInfoTransport validates a nodeInfo and checks +// our compatibility with it. It's for use in the handshake. +type nodeInfoTransport interface { + ValidateBasic() error + CompatibleWith(other NodeInfo) error +} + +//------------------------------------------------------------- + +// ProtocolVersion contains the protocol versions for the software. +type ProtocolVersion struct { + P2P version.Protocol `json:"p2p"` + Block version.Protocol `json:"block"` + App version.Protocol `json:"app"` +} + +// defaultProtocolVersion populates the Block and P2P versions using +// the global values, but not the App. +var defaultProtocolVersion = NewProtocolVersion( + version.P2PProtocol, + version.BlockProtocol, + 0, +) + +// NewProtocolVersion returns a fully populated ProtocolVersion. +func NewProtocolVersion(p2p, block, app version.Protocol) ProtocolVersion { + return ProtocolVersion{ + P2P: p2p, + Block: block, + App: app, + } +} + +//------------------------------------------------------------- + +// Assert DefaultNodeInfo satisfies NodeInfo +var _ NodeInfo = DefaultNodeInfo{} + +// DefaultNodeInfo is the basic node information exchanged // between two peers during the Tendermint P2P handshake. -type NodeInfo struct { +type DefaultNodeInfo struct { + ProtocolVersion ProtocolVersion `json:"protocol_version"` + // Authenticate // TODO: replace with NetAddress - ID ID `json:"id"` // authenticated identifier + ID_ ID `json:"id"` // authenticated identifier ListenAddr string `json:"listen_addr"` // accepting incoming // Check compatibility. @@ -32,11 +88,22 @@ type NodeInfo struct { Channels cmn.HexBytes `json:"channels"` // channels this node knows about // ASCIIText fields - Moniker string `json:"moniker"` // arbitrary moniker - Other []string `json:"other"` // other application specific data + Moniker string `json:"moniker"` // arbitrary moniker + Other DefaultNodeInfoOther `json:"other"` // other application specific data +} + +// DefaultNodeInfoOther is the misc. applcation specific data +type DefaultNodeInfoOther struct { + TxIndex string `json:"tx_index"` + RPCAddress string `json:"rpc_address"` +} + +// ID returns the node's peer ID. +func (info DefaultNodeInfo) ID() ID { + return info.ID_ } -// Validate checks the self-reported NodeInfo is safe. +// ValidateBasic checks the self-reported DefaultNodeInfo is safe. // It returns an error if there // are too many Channels, if there are any duplicate Channels, // if the ListenAddr is malformed, or if the ListenAddr is a host name @@ -49,21 +116,29 @@ type NodeInfo struct { // International clients could then use punycode (or we could use // url-encoding), and we just need to be careful with how we handle that in our // clients. (e.g. off by default). -func (info NodeInfo) Validate() error { - if len(info.Channels) > maxNumChannels { - return fmt.Errorf("info.Channels is too long (%v). Max is %v", len(info.Channels), maxNumChannels) - } +func (info DefaultNodeInfo) ValidateBasic() error { - // Sanitize ASCII text fields. - if !cmn.IsASCIIText(info.Moniker) || cmn.ASCIITrim(info.Moniker) == "" { - return fmt.Errorf("info.Moniker must be valid non-empty ASCII text without tabs, but got %v.", info.Moniker) + // ID is already validated. + + // Validate ListenAddr. + _, err := NewNetAddressString(IDAddressString(info.ID(), info.ListenAddr)) + if err != nil { + return err } - for i, s := range info.Other { - if !cmn.IsASCIIText(s) || cmn.ASCIITrim(s) == "" { - return fmt.Errorf("info.Other[%v] must be valid non-empty ASCII text without tabs, but got %v.", i, s) - } + + // Network is validated in CompatibleWith. + + // Validate Version + if len(info.Version) > 0 && + (!cmn.IsASCIIText(info.Version) || cmn.ASCIITrim(info.Version) == "") { + + return fmt.Errorf("info.Version must be valid ASCII text without tabs, but got %v", info.Version) } + // Validate Channels - ensure max and check for duplicates. + if len(info.Channels) > maxNumChannels { + return fmt.Errorf("info.Channels is too long (%v). Max is %v", len(info.Channels), maxNumChannels) + } channels := make(map[byte]struct{}) for _, ch := range info.Channels { _, ok := channels[ch] @@ -73,31 +148,40 @@ func (info NodeInfo) Validate() error { channels[ch] = struct{}{} } - // ensure ListenAddr is good - _, err := NewNetAddressString(IDAddressString(info.ID, info.ListenAddr)) - return err -} - -// CompatibleWith checks if two NodeInfo are compatible with eachother. -// CONTRACT: two nodes are compatible if the major version matches and network match -// and they have at least one channel in common. -func (info NodeInfo) CompatibleWith(other NodeInfo) error { - iMajor, _, _, iErr := splitVersion(info.Version) - oMajor, _, _, oErr := splitVersion(other.Version) + // Validate Moniker. + if !cmn.IsASCIIText(info.Moniker) || cmn.ASCIITrim(info.Moniker) == "" { + return fmt.Errorf("info.Moniker must be valid non-empty ASCII text without tabs, but got %v", info.Moniker) + } - // if our own version number is not formatted right, we messed up - if iErr != nil { - return iErr + // Validate Other. + other := info.Other + txIndex := other.TxIndex + switch txIndex { + case "", "on", "off": + default: + return fmt.Errorf("info.Other.TxIndex should be either 'on', 'off', or empty string, got '%v'", txIndex) + } + // XXX: Should we be more strict about address formats? + rpcAddr := other.RPCAddress + if len(rpcAddr) > 0 && (!cmn.IsASCIIText(rpcAddr) || cmn.ASCIITrim(rpcAddr) == "") { + return fmt.Errorf("info.Other.RPCAddress=%v must be valid ASCII text without tabs", rpcAddr) } - // version number must be formatted correctly ("x.x.x") - if oErr != nil { - return oErr + return nil +} + +// CompatibleWith checks if two DefaultNodeInfo are compatible with eachother. +// CONTRACT: two nodes are compatible if the Block version and network match +// and they have at least one channel in common. +func (info DefaultNodeInfo) CompatibleWith(other_ NodeInfo) error { + other, ok := other_.(DefaultNodeInfo) + if !ok { + return fmt.Errorf("wrong NodeInfo type. Expected DefaultNodeInfo, got %v", reflect.TypeOf(other_)) } - // major version must match - if iMajor != oMajor { - return fmt.Errorf("Peer is on a different major version. Got %v, expected %v", oMajor, iMajor) + if info.ProtocolVersion.Block != other.ProtocolVersion.Block { + return fmt.Errorf("Peer is on a different Block version. Got %v, expected %v", + other.ProtocolVersion.Block, info.ProtocolVersion.Block) } // nodes must be on the same network @@ -127,34 +211,22 @@ OUTER_LOOP: return nil } -// NetAddress returns a NetAddress derived from the NodeInfo - +// NetAddress returns a NetAddress derived from the DefaultNodeInfo - // it includes the authenticated peer ID and the self-reported // ListenAddr. Note that the ListenAddr is not authenticated and // may not match that address actually dialed if its an outbound peer. -func (info NodeInfo) NetAddress() *NetAddress { - netAddr, err := NewNetAddressString(IDAddressString(info.ID, info.ListenAddr)) +func (info DefaultNodeInfo) NetAddress() *NetAddress { + idAddr := IDAddressString(info.ID(), info.ListenAddr) + netAddr, err := NewNetAddressString(idAddr) if err != nil { switch err.(type) { case ErrNetAddressLookup: // XXX If the peer provided a host name and the lookup fails here // we're out of luck. - // TODO: use a NetAddress in NodeInfo + // TODO: use a NetAddress in DefaultNodeInfo default: panic(err) // everything should be well formed by now } } return netAddr } - -func (info NodeInfo) String() string { - return fmt.Sprintf("NodeInfo{id: %v, moniker: %v, network: %v [listen %v], version: %v (%v)}", - info.ID, info.Moniker, info.Network, info.ListenAddr, info.Version, info.Other) -} - -func splitVersion(version string) (string, string, string, error) { - spl := strings.Split(version, ".") - if len(spl) != 3 { - return "", "", "", fmt.Errorf("Invalid version format %v", version) - } - return spl[0], spl[1], spl[2], nil -} diff --git a/p2p/node_info_test.go b/p2p/node_info_test.go new file mode 100644 index 00000000000..c9a72dbc25e --- /dev/null +++ b/p2p/node_info_test.go @@ -0,0 +1,123 @@ +package p2p + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/crypto/ed25519" +) + +func TestNodeInfoValidate(t *testing.T) { + + // empty fails + ni := DefaultNodeInfo{} + assert.Error(t, ni.ValidateBasic()) + + channels := make([]byte, maxNumChannels) + for i := 0; i < maxNumChannels; i++ { + channels[i] = byte(i) + } + dupChannels := make([]byte, 5) + copy(dupChannels[:], channels[:5]) + dupChannels = append(dupChannels, testCh) + + nonAscii := "¢§µ" + emptyTab := fmt.Sprintf("\t") + emptySpace := fmt.Sprintf(" ") + + testCases := []struct { + testName string + malleateNodeInfo func(*DefaultNodeInfo) + expectErr bool + }{ + {"Too Many Channels", func(ni *DefaultNodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, true}, + {"Duplicate Channel", func(ni *DefaultNodeInfo) { ni.Channels = dupChannels }, true}, + {"Good Channels", func(ni *DefaultNodeInfo) { ni.Channels = ni.Channels[:5] }, false}, + + {"Invalid NetAddress", func(ni *DefaultNodeInfo) { ni.ListenAddr = "not-an-address" }, true}, + {"Good NetAddress", func(ni *DefaultNodeInfo) { ni.ListenAddr = "0.0.0.0:26656" }, false}, + + {"Non-ASCII Version", func(ni *DefaultNodeInfo) { ni.Version = nonAscii }, true}, + {"Empty tab Version", func(ni *DefaultNodeInfo) { ni.Version = emptyTab }, true}, + {"Empty space Version", func(ni *DefaultNodeInfo) { ni.Version = emptySpace }, true}, + {"Empty Version", func(ni *DefaultNodeInfo) { ni.Version = "" }, false}, + + {"Non-ASCII Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = nonAscii }, true}, + {"Empty tab Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = emptyTab }, true}, + {"Empty space Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = emptySpace }, true}, + {"Empty Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = "" }, true}, + {"Good Moniker", func(ni *DefaultNodeInfo) { ni.Moniker = "hey its me" }, false}, + + {"Non-ASCII TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = nonAscii }, true}, + {"Empty tab TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = emptyTab }, true}, + {"Empty space TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = emptySpace }, true}, + {"Empty TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = "" }, false}, + {"Off TxIndex", func(ni *DefaultNodeInfo) { ni.Other.TxIndex = "off" }, false}, + + {"Non-ASCII RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = nonAscii }, true}, + {"Empty tab RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = emptyTab }, true}, + {"Empty space RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = emptySpace }, true}, + {"Empty RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = "" }, false}, + {"Good RPCAddress", func(ni *DefaultNodeInfo) { ni.Other.RPCAddress = "0.0.0.0:26657" }, false}, + } + + nodeKey := NodeKey{PrivKey: ed25519.GenPrivKey()} + name := "testing" + + // test case passes + ni = testNodeInfo(nodeKey.ID(), name).(DefaultNodeInfo) + ni.Channels = channels + assert.NoError(t, ni.ValidateBasic()) + + for _, tc := range testCases { + ni := testNodeInfo(nodeKey.ID(), name).(DefaultNodeInfo) + ni.Channels = channels + tc.malleateNodeInfo(&ni) + err := ni.ValidateBasic() + if tc.expectErr { + assert.Error(t, err, tc.testName) + } else { + assert.NoError(t, err, tc.testName) + } + } + +} + +func TestNodeInfoCompatible(t *testing.T) { + + nodeKey1 := NodeKey{PrivKey: ed25519.GenPrivKey()} + nodeKey2 := NodeKey{PrivKey: ed25519.GenPrivKey()} + name := "testing" + + var newTestChannel byte = 0x2 + + // test NodeInfo is compatible + ni1 := testNodeInfo(nodeKey1.ID(), name).(DefaultNodeInfo) + ni2 := testNodeInfo(nodeKey2.ID(), name).(DefaultNodeInfo) + assert.NoError(t, ni1.CompatibleWith(ni2)) + + // add another channel; still compatible + ni2.Channels = []byte{newTestChannel, testCh} + assert.NoError(t, ni1.CompatibleWith(ni2)) + + // wrong NodeInfo type is not compatible + _, netAddr := CreateRoutableAddr() + ni3 := mockNodeInfo{netAddr} + assert.Error(t, ni1.CompatibleWith(ni3)) + + testCases := []struct { + testName string + malleateNodeInfo func(*DefaultNodeInfo) + }{ + {"Wrong block version", func(ni *DefaultNodeInfo) { ni.ProtocolVersion.Block += 1 }}, + {"Wrong network", func(ni *DefaultNodeInfo) { ni.Network += "-wrong" }}, + {"No common channels", func(ni *DefaultNodeInfo) { ni.Channels = []byte{newTestChannel} }}, + } + + for _, tc := range testCases { + ni := testNodeInfo(nodeKey2.ID(), name).(DefaultNodeInfo) + tc.malleateNodeInfo(&ni) + assert.Error(t, ni1.CompatibleWith(ni)) + } +} diff --git a/p2p/peer.go b/p2p/peer.go index 4f59fef7646..e98c16d2617 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -3,10 +3,8 @@ package p2p import ( "fmt" "net" - "sync/atomic" "time" - crypto "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" @@ -14,16 +12,18 @@ import ( tmconn "github.com/tendermint/tendermint/p2p/conn" ) -var testIPSuffix uint32 +const metricsTickerDuration = 10 * time.Second // Peer is an interface representing a peer connected on a reactor. type Peer interface { cmn.Service - ID() ID // peer's cryptographic ID - RemoteIP() net.IP // remote IP of the connection + ID() ID // peer's cryptographic ID + RemoteIP() net.IP // remote IP of the connection + IsOutbound() bool // did we dial the peer IsPersistent() bool // do we redial this peer when we disconnect + NodeInfo() NodeInfo // peer's info Status() tmconn.ConnectionStatus OriginalAddr() *NetAddress @@ -39,12 +39,31 @@ type Peer interface { // peerConn contains the raw connection and its config. type peerConn struct { - outbound bool - persistent bool - config *config.P2PConfig - conn net.Conn // source connection - ip net.IP + outbound bool + persistent bool + config *config.P2PConfig + conn net.Conn // source connection + originalAddr *NetAddress // nil for inbound connections + + // cached RemoteIP() + ip net.IP +} + +func newPeerConn( + outbound, persistent bool, + config *config.P2PConfig, + conn net.Conn, + originalAddr *NetAddress, +) peerConn { + + return peerConn{ + outbound: outbound, + persistent: persistent, + config: config, + conn: conn, + originalAddr: originalAddr, + } } // ID only exists for SecretConnection. @@ -59,14 +78,6 @@ func (pc peerConn) RemoteIP() net.IP { return pc.ip } - // In test cases a conn could not be present at all or be an in-memory - // implementation where we want to return a fake ip. - if pc.conn == nil || pc.conn.RemoteAddr().String() == "pipe" { - pc.ip = net.IP{172, 16, 0, byte(atomic.AddUint32(&testIPSuffix, 1))} - - return pc.ip - } - host, _, err := net.SplitHostPort(pc.conn.RemoteAddr().String()) if err != nil { panic(err) @@ -100,8 +111,13 @@ type peer struct { // User data Data *cmn.CMap + + metrics *Metrics + metricsTicker *time.Ticker } +type PeerOption func(*peer) + func newPeer( pc peerConn, mConfig tmconn.MConnConfig, @@ -109,12 +125,15 @@ func newPeer( reactorsByCh map[byte]Reactor, chDescs []*tmconn.ChannelDescriptor, onPeerError func(Peer, interface{}), + options ...PeerOption, ) *peer { p := &peer{ - peerConn: pc, - nodeInfo: nodeInfo, - channels: nodeInfo.Channels, - Data: cmn.NewCMap(), + peerConn: pc, + nodeInfo: nodeInfo, + channels: nodeInfo.(DefaultNodeInfo).Channels, // TODO + Data: cmn.NewCMap(), + metricsTicker: time.NewTicker(metricsTickerDuration), + metrics: NopMetrics(), } p.mconn = createMConnection( @@ -126,89 +145,20 @@ func newPeer( mConfig, ) p.BaseService = *cmn.NewBaseService(nil, "Peer", p) - - return p -} - -func newOutboundPeerConn( - addr *NetAddress, - config *config.P2PConfig, - persistent bool, - ourNodePrivKey crypto.PrivKey, -) (peerConn, error) { - conn, err := dial(addr, config) - if err != nil { - return peerConn{}, cmn.ErrorWrap(err, "Error creating peer") - } - - pc, err := newPeerConn(conn, config, true, persistent, ourNodePrivKey, addr) - if err != nil { - if cerr := conn.Close(); cerr != nil { - return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) - } - return peerConn{}, err - } - - // ensure dialed ID matches connection ID - if addr.ID != pc.ID() { - if cerr := conn.Close(); cerr != nil { - return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) - } - return peerConn{}, ErrSwitchAuthenticationFailure{addr, pc.ID()} + for _, option := range options { + option(p) } - return pc, nil -} - -func newInboundPeerConn( - conn net.Conn, - config *config.P2PConfig, - ourNodePrivKey crypto.PrivKey, -) (peerConn, error) { - - // TODO: issue PoW challenge - - return newPeerConn(conn, config, false, false, ourNodePrivKey, nil) + return p } -func newPeerConn( - rawConn net.Conn, - cfg *config.P2PConfig, - outbound, persistent bool, - ourNodePrivKey crypto.PrivKey, - originalAddr *NetAddress, -) (pc peerConn, err error) { - conn := rawConn - - // Fuzz connection - if cfg.TestFuzz { - // so we have time to do peer handshakes and get set up - conn = FuzzConnAfterFromConfig(conn, 10*time.Second, cfg.TestFuzzConfig) - } - - // Set deadline for secret handshake - dl := time.Now().Add(cfg.HandshakeTimeout) - if err := conn.SetDeadline(dl); err != nil { - return pc, cmn.ErrorWrap( - err, - "Error setting deadline while encrypting connection", - ) - } - - // Encrypt connection - conn, err = tmconn.MakeSecretConnection(conn, ourNodePrivKey) - if err != nil { - return pc, cmn.ErrorWrap(err, "Error creating peer") +// String representation. +func (p *peer) String() string { + if p.outbound { + return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.ID()) } - // Only the information we already have - return peerConn{ - config: cfg, - outbound: outbound, - persistent: persistent, - conn: conn, - originalAddr: originalAddr, - }, nil + return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.ID()) } //--------------------------------------------------- @@ -225,12 +175,18 @@ func (p *peer) OnStart() error { if err := p.BaseService.OnStart(); err != nil { return err } - err := p.mconn.Start() - return err + + if err := p.mconn.Start(); err != nil { + return err + } + + go p.metricsReporter() + return nil } // OnStop implements BaseService. func (p *peer) OnStop() { + p.metricsTicker.Stop() p.BaseService.OnStop() p.mconn.Stop() // stop everything and close the conn } @@ -240,7 +196,7 @@ func (p *peer) OnStop() { // ID returns the peer's ID - the hex encoded hash of its pubkey. func (p *peer) ID() ID { - return p.nodeInfo.ID + return p.nodeInfo.ID() } // IsOutbound returns true if the connection is outbound, false otherwise. @@ -282,7 +238,11 @@ func (p *peer) Send(chID byte, msgBytes []byte) bool { } else if !p.hasChannel(chID) { return false } - return p.mconn.Send(chID, msgBytes) + res := p.mconn.Send(chID, msgBytes) + if res { + p.metrics.PeerSendBytesTotal.With("peer_id", string(p.ID())).Add(float64(len(msgBytes))) + } + return res } // TrySend msg bytes to the channel identified by chID byte. Immediately returns @@ -293,7 +253,11 @@ func (p *peer) TrySend(chID byte, msgBytes []byte) bool { } else if !p.hasChannel(chID) { return false } - return p.mconn.TrySend(chID, msgBytes) + res := p.mconn.TrySend(chID, msgBytes) + if res { + p.metrics.PeerSendBytesTotal.With("peer_id", string(p.ID())).Add(float64(len(msgBytes))) + } + return res } // Get the data for a given key. @@ -327,53 +291,14 @@ func (p *peer) hasChannel(chID byte) bool { } //--------------------------------------------------- -// methods used by the Switch +// methods only used for testing +// TODO: can we remove these? -// CloseConn should be called by the Switch if the peer was created but never -// started. +// CloseConn closes the underlying connection func (pc *peerConn) CloseConn() { pc.conn.Close() // nolint: errcheck } -// HandshakeTimeout performs the Tendermint P2P handshake between a given node -// and the peer by exchanging their NodeInfo. It sets the received nodeInfo on -// the peer. -// NOTE: blocking -func (pc *peerConn) HandshakeTimeout( - ourNodeInfo NodeInfo, - timeout time.Duration, -) (peerNodeInfo NodeInfo, err error) { - // Set deadline for handshake so we don't block forever on conn.ReadFull - if err := pc.conn.SetDeadline(time.Now().Add(timeout)); err != nil { - return peerNodeInfo, cmn.ErrorWrap(err, "Error setting deadline") - } - - var trs, _ = cmn.Parallel( - func(_ int) (val interface{}, err error, abort bool) { - _, err = cdc.MarshalBinaryWriter(pc.conn, ourNodeInfo) - return - }, - func(_ int) (val interface{}, err error, abort bool) { - _, err = cdc.UnmarshalBinaryReader( - pc.conn, - &peerNodeInfo, - int64(MaxNodeInfoSize()), - ) - return - }, - ) - if err := trs.FirstError(); err != nil { - return peerNodeInfo, cmn.ErrorWrap(err, "Error during handshake") - } - - // Remove deadline - if err := pc.conn.SetDeadline(time.Time{}); err != nil { - return peerNodeInfo, cmn.ErrorWrap(err, "Error removing deadline") - } - - return peerNodeInfo, nil -} - // Addr returns peer's remote network address. func (p *peer) Addr() net.Addr { return p.peerConn.conn.RemoteAddr() @@ -387,30 +312,34 @@ func (p *peer) CanSend(chID byte) bool { return p.mconn.CanSend(chID) } -// String representation. -func (p *peer) String() string { - if p.outbound { - return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.ID()) +//--------------------------------------------------- + +func PeerMetrics(metrics *Metrics) PeerOption { + return func(p *peer) { + p.metrics = metrics } +} - return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.ID()) +func (p *peer) metricsReporter() { + for { + select { + case <-p.metricsTicker.C: + status := p.mconn.Status() + var sendQueueSize float64 + for _, chStatus := range status.Channels { + sendQueueSize += float64(chStatus.SendQueueSize) + } + + p.metrics.PeerPendingSendBytes.With("peer_id", string(p.ID())).Set(sendQueueSize) + case <-p.Quit(): + return + } + } } //------------------------------------------------------------------ // helper funcs -func dial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) { - if cfg.TestDialFail { - return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)") - } - - conn, err := addr.DialTimeout(cfg.DialTimeout) - if err != nil { - return nil, err - } - return conn, nil -} - func createMConnection( conn net.Conn, p *peer, @@ -425,8 +354,9 @@ func createMConnection( if reactor == nil { // Note that its ok to panic here as it's caught in the conn._recover, // which does onPeerError. - panic(cmn.Fmt("Unknown channel %X", chID)) + panic(fmt.Sprintf("Unknown channel %X", chID)) } + p.metrics.PeerReceiveBytesTotal.With("peer_id", string(p.ID())).Add(float64(len(msgBytes))) reactor.Receive(chID, p, msgBytes) } diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index 4582ab6442d..daa9b2c821a 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -11,23 +11,35 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" ) -// Returns an empty kvstore peer -func randPeer(ip net.IP) *peer { +// mockPeer for testing the PeerSet +type mockPeer struct { + cmn.BaseService + ip net.IP + id ID +} + +func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true } +func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true } +func (mp *mockPeer) NodeInfo() NodeInfo { return DefaultNodeInfo{} } +func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } +func (mp *mockPeer) ID() ID { return mp.id } +func (mp *mockPeer) IsOutbound() bool { return false } +func (mp *mockPeer) IsPersistent() bool { return true } +func (mp *mockPeer) Get(s string) interface{} { return s } +func (mp *mockPeer) Set(string, interface{}) {} +func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } +func (mp *mockPeer) OriginalAddr() *NetAddress { return nil } + +// Returns a mock peer +func newMockPeer(ip net.IP) *mockPeer { if ip == nil { ip = net.IP{127, 0, 0, 1} } - nodeKey := NodeKey{PrivKey: ed25519.GenPrivKey()} - p := &peer{ - nodeInfo: NodeInfo{ - ID: nodeKey.ID(), - ListenAddr: cmn.Fmt("%v.%v.%v.%v:26656", cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256), - }, + return &mockPeer{ + ip: ip, + id: nodeKey.ID(), } - - p.ip = ip - - return p } func TestPeerSetAddRemoveOne(t *testing.T) { @@ -37,7 +49,7 @@ func TestPeerSetAddRemoveOne(t *testing.T) { var peerList []Peer for i := 0; i < 5; i++ { - p := randPeer(net.IP{127, 0, 0, byte(i)}) + p := newMockPeer(net.IP{127, 0, 0, byte(i)}) if err := peerSet.Add(p); err != nil { t.Error(err) } @@ -81,7 +93,7 @@ func TestPeerSetAddRemoveMany(t *testing.T) { peers := []Peer{} N := 100 for i := 0; i < N; i++ { - peer := randPeer(net.IP{127, 0, 0, byte(i)}) + peer := newMockPeer(net.IP{127, 0, 0, byte(i)}) if err := peerSet.Add(peer); err != nil { t.Errorf("Failed to add new peer") } @@ -105,7 +117,7 @@ func TestPeerSetAddRemoveMany(t *testing.T) { func TestPeerSetAddDuplicate(t *testing.T) { t.Parallel() peerSet := NewPeerSet() - peer := randPeer(nil) + peer := newMockPeer(nil) n := 20 errsChan := make(chan error) @@ -147,7 +159,7 @@ func TestPeerSetGet(t *testing.T) { var ( peerSet = NewPeerSet() - peer = randPeer(nil) + peer = newMockPeer(nil) ) assert.Nil(t, peerSet.Get(peer.ID()), "expecting a nil lookup, before .Add") diff --git a/p2p/peer_test.go b/p2p/peer_test.go index f0e9153288e..d3d9f0c7252 100644 --- a/p2p/peer_test.go +++ b/p2p/peer_test.go @@ -1,6 +1,7 @@ package p2p import ( + "fmt" golog "log" "net" "testing" @@ -18,8 +19,6 @@ import ( tmconn "github.com/tendermint/tendermint/p2p/conn" ) -const testCh = 0x01 - func TestPeerBasic(t *testing.T) { assert, require := assert.New(t), require.New(t) @@ -76,26 +75,64 @@ func createOutboundPeerAndPerformHandshake( } reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)} pk := ed25519.GenPrivKey() - pc, err := newOutboundPeerConn(addr, config, false, pk) + pc, err := testOutboundPeerConn(addr, config, false, pk) if err != nil { return nil, err } - nodeInfo, err := pc.HandshakeTimeout(NodeInfo{ - ID: addr.ID, - Moniker: "host_peer", - Network: "testing", - Version: "123.123.123", - Channels: []byte{testCh}, - }, 1*time.Second) + timeout := 1 * time.Second + ourNodeInfo := testNodeInfo(addr.ID, "host_peer") + peerNodeInfo, err := handshake(pc.conn, timeout, ourNodeInfo) if err != nil { return nil, err } - p := newPeer(pc, mConfig, nodeInfo, reactorsByCh, chDescs, func(p Peer, r interface{}) {}) + p := newPeer(pc, mConfig, peerNodeInfo, reactorsByCh, chDescs, func(p Peer, r interface{}) {}) p.SetLogger(log.TestingLogger().With("peer", addr)) return p, nil } +func testDial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) { + if cfg.TestDialFail { + return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)") + } + + conn, err := addr.DialTimeout(cfg.DialTimeout) + if err != nil { + return nil, err + } + return conn, nil +} + +func testOutboundPeerConn( + addr *NetAddress, + config *config.P2PConfig, + persistent bool, + ourNodePrivKey crypto.PrivKey, +) (peerConn, error) { + conn, err := testDial(addr, config) + if err != nil { + return peerConn{}, cmn.ErrorWrap(err, "Error creating peer") + } + + pc, err := testPeerConn(conn, config, true, persistent, ourNodePrivKey, addr) + if err != nil { + if cerr := conn.Close(); cerr != nil { + return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) + } + return peerConn{}, err + } + + // ensure dialed ID matches connection ID + if addr.ID != pc.ID() { + if cerr := conn.Close(); cerr != nil { + return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) + } + return peerConn{}, ErrSwitchAuthenticationFailure{addr, pc.ID()} + } + + return pc, nil +} + type remotePeer struct { PrivKey crypto.PrivKey Config *config.P2PConfig @@ -143,19 +180,12 @@ func (rp *remotePeer) accept(l net.Listener) { golog.Fatalf("Failed to accept conn: %+v", err) } - pc, err := newInboundPeerConn(conn, rp.Config, rp.PrivKey) + pc, err := testInboundPeerConn(conn, rp.Config, rp.PrivKey) if err != nil { golog.Fatalf("Failed to create a peer: %+v", err) } - _, err = pc.HandshakeTimeout(NodeInfo{ - ID: rp.Addr().ID, - Moniker: "remote_peer", - Network: "testing", - Version: "123.123.123", - ListenAddr: l.Addr().String(), - Channels: rp.channels, - }, 1*time.Second) + _, err = handshake(pc.conn, time.Second, rp.nodeInfo(l)) if err != nil { golog.Fatalf("Failed to perform handshake: %+v", err) } @@ -174,3 +204,15 @@ func (rp *remotePeer) accept(l net.Listener) { } } } + +func (rp *remotePeer) nodeInfo(l net.Listener) NodeInfo { + return DefaultNodeInfo{ + ProtocolVersion: defaultProtocolVersion, + ID_: rp.Addr().ID, + ListenAddr: l.Addr().String(), + Network: "testing", + Version: "1.2.3-rc0-deadbeef", + Channels: rp.channels, + Moniker: "remote_peer", + } +} diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index ad6e0c00be2..405a4628b65 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -7,6 +7,7 @@ package pex import ( "crypto/sha256" "encoding/binary" + "fmt" "math" "net" "sync" @@ -559,11 +560,11 @@ func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) { func (a *addrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool { // Sanity check if ka.isNew() { - a.Logger.Error(cmn.Fmt("Cannot add new address to old bucket: %v", ka)) + a.Logger.Error(fmt.Sprintf("Cannot add new address to old bucket: %v", ka)) return false } if len(ka.Buckets) != 0 { - a.Logger.Error(cmn.Fmt("Cannot add already old address to another old bucket: %v", ka)) + a.Logger.Error(fmt.Sprintf("Cannot add already old address to another old bucket: %v", ka)) return false } @@ -594,7 +595,7 @@ func (a *addrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool { func (a *addrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) { if ka.BucketType != bucketType { - a.Logger.Error(cmn.Fmt("Bucket type mismatch: %v", ka)) + a.Logger.Error(fmt.Sprintf("Bucket type mismatch: %v", ka)) return } bucket := a.getBucket(bucketType, bucketIdx) @@ -647,6 +648,14 @@ func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { return ErrAddrBookNonRoutable{addr} } + if !addr.Valid() { + return ErrAddrBookInvalidAddr{addr} + } + + if !addr.HasID() { + return ErrAddrBookInvalidAddrNoID{addr} + } + // TODO: we should track ourAddrs by ID and by IP:PORT and refuse both. if _, ok := a.ourAddrs[addr.String()]; ok { return ErrAddrBookSelf{addr} @@ -690,7 +699,7 @@ func (a *addrBook) expireNew(bucketIdx int) { for addrStr, ka := range a.bucketsNew[bucketIdx] { // If an entry is bad, throw it away if ka.isBad() { - a.Logger.Info(cmn.Fmt("expiring bad address %v", addrStr)) + a.Logger.Info(fmt.Sprintf("expiring bad address %v", addrStr)) a.removeFromBucket(ka, bucketTypeNew, bucketIdx) return } @@ -707,11 +716,11 @@ func (a *addrBook) expireNew(bucketIdx int) { func (a *addrBook) moveToOld(ka *knownAddress) { // Sanity check if ka.isOld() { - a.Logger.Error(cmn.Fmt("Cannot promote address that is already old %v", ka)) + a.Logger.Error(fmt.Sprintf("Cannot promote address that is already old %v", ka)) return } if len(ka.Buckets) == 0 { - a.Logger.Error(cmn.Fmt("Cannot promote address that isn't in any new buckets %v", ka)) + a.Logger.Error(fmt.Sprintf("Cannot promote address that isn't in any new buckets %v", ka)) return } @@ -733,7 +742,7 @@ func (a *addrBook) moveToOld(ka *knownAddress) { // Finally, add our ka to old bucket again. added = a.addToOldBucket(ka, oldBucketIdx) if !added { - a.Logger.Error(cmn.Fmt("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)) + a.Logger.Error(fmt.Sprintf("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)) } } } diff --git a/p2p/pex/errors.go b/p2p/pex/errors.go index 7f660bdc5a7..1f44ceee7e0 100644 --- a/p2p/pex/errors.go +++ b/p2p/pex/errors.go @@ -46,3 +46,19 @@ type ErrAddrBookNilAddr struct { func (err ErrAddrBookNilAddr) Error() string { return fmt.Sprintf("Cannot add a nil address. Got (addr, src) = (%v, %v)", err.Addr, err.Src) } + +type ErrAddrBookInvalidAddr struct { + Addr *p2p.NetAddress +} + +func (err ErrAddrBookInvalidAddr) Error() string { + return fmt.Sprintf("Cannot add invalid address %v", err.Addr) +} + +type ErrAddrBookInvalidAddrNoID struct { + Addr *p2p.NetAddress +} + +func (err ErrAddrBookInvalidAddrNoID) Error() string { + return fmt.Sprintf("Cannot add address with no ID %v", err.Addr) +} diff --git a/p2p/pex/file.go b/p2p/pex/file.go index 3237e12537e..33fec033695 100644 --- a/p2p/pex/file.go +++ b/p2p/pex/file.go @@ -2,6 +2,7 @@ package pex import ( "encoding/json" + "fmt" "os" cmn "github.com/tendermint/tendermint/libs/common" @@ -53,14 +54,14 @@ func (a *addrBook) loadFromFile(filePath string) bool { // Load addrBookJSON{} r, err := os.Open(filePath) if err != nil { - cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err)) + cmn.PanicCrisis(fmt.Sprintf("Error opening file %s: %v", filePath, err)) } defer r.Close() // nolint: errcheck aJSON := &addrBookJSON{} dec := json.NewDecoder(r) err = dec.Decode(aJSON) if err != nil { - cmn.PanicCrisis(cmn.Fmt("Error reading file %s: %v", filePath, err)) + cmn.PanicCrisis(fmt.Sprintf("Error reading file %s: %v", filePath, err)) } // Restore all the fields... diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 288cb0d150e..85d292b09f3 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -31,8 +31,7 @@ const ( maxMsgSize = maxAddressSize * maxGetSelection // ensure we have enough peers - defaultEnsurePeersPeriod = 30 * time.Second - defaultMinNumOutboundPeers = p2p.DefaultMinNumOutboundPeers + defaultEnsurePeersPeriod = 30 * time.Second // Seed/Crawler constants @@ -222,7 +221,11 @@ func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) { // 2) limit the output size if r.config.SeedMode { r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers)) - r.Switch.StopPeerGracefully(src) + go func() { + // TODO Fix properly #2092 + time.Sleep(time.Second * 5) + r.Switch.StopPeerGracefully(src) + }() } else { r.SendAddrs(src, r.book.GetSelection()) } @@ -289,21 +292,37 @@ func (r *PEXReactor) RequestAddrs(p Peer) { func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { id := string(src.ID()) if !r.requestsSent.Has(id) { - return cmn.NewError("Received unsolicited pexAddrsMessage") + return errors.New("Unsolicited pexAddrsMessage") } r.requestsSent.Delete(id) srcAddr := src.NodeInfo().NetAddress() for _, netAddr := range addrs { - // NOTE: GetSelection methods should never return nil addrs + // Validate netAddr. Disconnect from a peer if it sends us invalid data. if netAddr == nil { - return cmn.NewError("received nil addr") + return errors.New("nil address in pexAddrsMessage") + } + // TODO: extract validating logic from NewNetAddressStringWithOptionalID + // and put it in netAddr#Valid (#2722) + na, err := p2p.NewNetAddressString(netAddr.String()) + if err != nil { + return fmt.Errorf("%s address in pexAddrsMessage is invalid: %v", + netAddr.String(), + err, + ) } - err := r.book.AddAddress(netAddr, srcAddr) - r.logErrAddrBook(err) + // NOTE: we check netAddr validity and routability in book#AddAddress. + err = r.book.AddAddress(na, srcAddr) + if err != nil { + r.logErrAddrBook(err) + // XXX: should we be strict about incoming data and disconnect from a + // peer here too? + continue + } - // If this address came from a seed node, try to connect to it without waiting. + // If this address came from a seed node, try to connect to it without + // waiting. for _, seedAddr := range r.seedAddrs { if seedAddr.Equals(srcAddr) { r.ensurePeers() @@ -362,7 +381,7 @@ func (r *PEXReactor) ensurePeersRoutine() { func (r *PEXReactor) ensurePeers() { var ( out, in, dial = r.Switch.NumPeers() - numToDial = defaultMinNumOutboundPeers - (out + dial) + numToDial = r.Switch.MaxNumOutboundPeers() - (out + dial) ) r.Logger.Info( "Ensure peers", @@ -393,10 +412,7 @@ func (r *PEXReactor) ensurePeers() { if _, selected := toDial[try.ID]; selected { continue } - if dialling := r.Switch.IsDialing(try.ID); dialling { - continue - } - if connected := r.Switch.Peers().Has(try.ID); connected { + if r.Switch.IsDialingOrExistingAddress(try) { continue } // TODO: consider moving some checks from toDial into here diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index f72f81e06f6..9d3f49bba53 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -109,9 +109,7 @@ func TestPEXReactorRunning(t *testing.T) { addOtherNodeAddrToAddrBook(1, 0) addOtherNodeAddrToAddrBook(2, 1) - for i, sw := range switches { - sw.AddListener(p2p.NewDefaultListener("tcp://"+sw.NodeInfo().ListenAddr, "", false, logger.With("pex", i))) - + for _, sw := range switches { err := sw.Start() // start switch and reactors require.Nil(t, err) } @@ -322,7 +320,7 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { peer := p2p.CreateRandomPeer(false) pexR, book := createReactor(&PEXReactorConfig{}) - book.AddPrivateIDs([]string{string(peer.NodeInfo().ID)}) + book.AddPrivateIDs([]string{string(peer.NodeInfo().ID())}) defer teardownReactor(book) // we have to send a request to receive responses @@ -393,8 +391,8 @@ func (mp mockPeer) ID() p2p.ID { return mp.addr.ID } func (mp mockPeer) IsOutbound() bool { return mp.outbound } func (mp mockPeer) IsPersistent() bool { return mp.persistent } func (mp mockPeer) NodeInfo() p2p.NodeInfo { - return p2p.NodeInfo{ - ID: mp.addr.ID, + return p2p.DefaultNodeInfo{ + ID_: mp.addr.ID, ListenAddr: mp.addr.DialString(), } } @@ -474,9 +472,6 @@ func testCreatePeerWithConfig(dir string, id int, config *PEXReactorConfig) *p2p return sw }, ) - peer.AddListener( - p2p.NewDefaultListener("tcp://"+peer.NodeInfo().ListenAddr, "", false, log.TestingLogger()), - ) return peer } @@ -510,9 +505,6 @@ func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) return sw }, ) - seed.AddListener( - p2p.NewDefaultListener("tcp://"+seed.NodeInfo().ListenAddr, "", false, log.TestingLogger()), - ) return seed } diff --git a/p2p/switch.go b/p2p/switch.go index da94fa4b04d..b70900ea919 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -3,7 +3,6 @@ package p2p import ( "fmt" "math" - "net" "sync" "time" @@ -26,10 +25,6 @@ const ( // ie. 3**10 = 16hrs reconnectBackOffAttempts = 10 reconnectBackOffBaseSeconds = 3 - - // keep at least this many outbound peers - // TODO: move to config - DefaultMinNumOutboundPeers = 10 ) //----------------------------------------------------------------------------- @@ -46,6 +41,10 @@ type AddrBook interface { Save() } +// PeerFilterFunc to be implemented by filter hooks after a new Peer has been +// fully setup. +type PeerFilterFunc func(IPeerSet, Peer) error + //----------------------------------------------------------------------------- // Switch handles peer connections and exposes an API to receive incoming messages @@ -56,7 +55,6 @@ type Switch struct { cmn.BaseService config *config.P2PConfig - listeners []Listener reactors map[string]Reactor chDescs []*conn.ChannelDescriptor reactorsByCh map[byte]Reactor @@ -67,8 +65,10 @@ type Switch struct { nodeKey *NodeKey // our node privkey addrBook AddrBook - filterConnByAddr func(net.Addr) error - filterConnByID func(ID) error + transport Transport + + filterTimeout time.Duration + peerFilters []PeerFilterFunc mConfig conn.MConnConfig @@ -81,23 +81,29 @@ type Switch struct { type SwitchOption func(*Switch) // NewSwitch creates a new Switch with the given config. -func NewSwitch(cfg *config.P2PConfig, options ...SwitchOption) *Switch { +func NewSwitch( + cfg *config.P2PConfig, + transport Transport, + options ...SwitchOption, +) *Switch { sw := &Switch{ - config: cfg, - reactors: make(map[string]Reactor), - chDescs: make([]*conn.ChannelDescriptor, 0), - reactorsByCh: make(map[byte]Reactor), - peers: NewPeerSet(), - dialing: cmn.NewCMap(), - reconnecting: cmn.NewCMap(), - metrics: NopMetrics(), + config: cfg, + reactors: make(map[string]Reactor), + chDescs: make([]*conn.ChannelDescriptor, 0), + reactorsByCh: make(map[byte]Reactor), + peers: NewPeerSet(), + dialing: cmn.NewCMap(), + reconnecting: cmn.NewCMap(), + metrics: NopMetrics(), + transport: transport, + filterTimeout: defaultFilterTimeout, } // Ensure we have a completely undeterministic PRNG. sw.rng = cmn.NewRand() mConfig := conn.DefaultMConnConfig() - mConfig.FlushThrottle = time.Duration(cfg.FlushThrottleTimeout) * time.Millisecond + mConfig.FlushThrottle = cfg.FlushThrottleTimeout mConfig.SendRate = cfg.SendRate mConfig.RecvRate = cfg.RecvRate mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize @@ -113,6 +119,16 @@ func NewSwitch(cfg *config.P2PConfig, options ...SwitchOption) *Switch { return sw } +// SwitchFilterTimeout sets the timeout used for peer filters. +func SwitchFilterTimeout(timeout time.Duration) SwitchOption { + return func(sw *Switch) { sw.filterTimeout = timeout } +} + +// SwitchPeerFilters sets the filters for rejection of new peers. +func SwitchPeerFilters(filters ...PeerFilterFunc) SwitchOption { + return func(sw *Switch) { sw.peerFilters = filters } +} + // WithMetrics sets the metrics. func WithMetrics(metrics *Metrics) SwitchOption { return func(sw *Switch) { sw.metrics = metrics } @@ -152,24 +168,6 @@ func (sw *Switch) Reactor(name string) Reactor { return sw.reactors[name] } -// AddListener adds the given listener to the switch for listening to incoming peer connections. -// NOTE: Not goroutine safe. -func (sw *Switch) AddListener(l Listener) { - sw.listeners = append(sw.listeners, l) -} - -// Listeners returns the list of listeners the switch listens on. -// NOTE: Not goroutine safe. -func (sw *Switch) Listeners() []Listener { - return sw.listeners -} - -// IsListening returns true if the switch has at least one listener. -// NOTE: Not goroutine safe. -func (sw *Switch) IsListening() bool { - return len(sw.listeners) > 0 -} - // SetNodeInfo sets the switch's NodeInfo for checking compatibility and handshaking with other nodes. // NOTE: Not goroutine safe. func (sw *Switch) SetNodeInfo(nodeInfo NodeInfo) { @@ -191,7 +189,7 @@ func (sw *Switch) SetNodeKey(nodeKey *NodeKey) { //--------------------------------------------------------------------- // Service start/stop -// OnStart implements BaseService. It starts all the reactors, peers, and listeners. +// OnStart implements BaseService. It starts all the reactors and peers. func (sw *Switch) OnStart() error { // Start reactors for _, reactor := range sw.reactors { @@ -200,25 +198,21 @@ func (sw *Switch) OnStart() error { return cmn.ErrorWrap(err, "failed to start %v", reactor) } } - // Start listeners - for _, listener := range sw.listeners { - go sw.listenerRoutine(listener) - } + + // Start accepting Peers. + go sw.acceptRoutine() + return nil } -// OnStop implements BaseService. It stops all listeners, peers, and reactors. +// OnStop implements BaseService. It stops all peers and reactors. func (sw *Switch) OnStop() { - // Stop listeners - for _, listener := range sw.listeners { - listener.Stop() - } - sw.listeners = nil // Stop peers - for _, peer := range sw.peers.List() { - peer.Stop() - sw.peers.Remove(peer) + for _, p := range sw.peers.List() { + p.Stop() + sw.peers.Remove(p) } + // Stop reactors sw.Logger.Debug("Switch: Stopping reactors") for _, reactor := range sw.reactors { @@ -268,6 +262,11 @@ func (sw *Switch) NumPeers() (outbound, inbound, dialing int) { return } +// MaxNumOutboundPeers returns a maximum number of outbound peers. +func (sw *Switch) MaxNumOutboundPeers() int { + return sw.config.MaxNumOutboundPeers +} + // Peers returns the set of peers that are connected to the switch. func (sw *Switch) Peers() IPeerSet { return sw.peers @@ -329,6 +328,11 @@ func (sw *Switch) reconnectToPeer(addr *NetAddress) { return } + if sw.IsDialingOrExistingAddress(addr) { + sw.Logger.Debug("Peer connection has been established or dialed while we waiting next try", "addr", addr) + return + } + err := sw.DialPeerWithAddress(addr, true) if err == nil { return // success @@ -375,11 +379,6 @@ func (sw *Switch) MarkPeerAsGood(peer Peer) { //--------------------------------------------------------------------- // Dialing -// IsDialing returns true if the switch is currently dialing the given ID. -func (sw *Switch) IsDialing(id ID) bool { - return sw.dialing.Has(string(id)) -} - // DialPeersAsync dials a list of peers asynchronously in random order (optionally, making them persistent). // Used to dial peers from config on startup or from unsafe-RPC (trusted sources). // TODO: remove addrBook arg since it's now set on the switch @@ -416,14 +415,20 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b for i := 0; i < len(perm); i++ { go func(i int) { j := perm[i] - addr := netAddrs[j] - // do not dial ourselves + if addr.Same(ourAddr) { + sw.Logger.Debug("Ignore attempt to connect to ourselves", "addr", addr, "ourAddr", ourAddr) return } sw.randomSleep(0) + + if sw.IsDialingOrExistingAddress(addr) { + sw.Logger.Debug("Ignore attempt to connect to an existing peer", "addr", addr) + return + } + err := sw.DialPeerWithAddress(addr, persistent) if err != nil { switch err.(type) { @@ -452,81 +457,82 @@ func (sw *Switch) randomSleep(interval time.Duration) { time.Sleep(r + interval) } -//------------------------------------------------------------------------------------ -// Connection filtering - -// FilterConnByAddr returns an error if connecting to the given address is forbidden. -func (sw *Switch) FilterConnByAddr(addr net.Addr) error { - if sw.filterConnByAddr != nil { - return sw.filterConnByAddr(addr) - } - return nil +// IsDialingOrExistingAddress returns true if switch has a peer with the given +// address or dialing it at the moment. +func (sw *Switch) IsDialingOrExistingAddress(addr *NetAddress) bool { + return sw.dialing.Has(string(addr.ID)) || + sw.peers.Has(addr.ID) || + (!sw.config.AllowDuplicateIP && sw.peers.HasIP(addr.IP)) } -// FilterConnByID returns an error if connecting to the given peer ID is forbidden. -func (sw *Switch) FilterConnByID(id ID) error { - if sw.filterConnByID != nil { - return sw.filterConnByID(id) - } - return nil - -} - -// SetAddrFilter sets the function for filtering connections by address. -func (sw *Switch) SetAddrFilter(f func(net.Addr) error) { - sw.filterConnByAddr = f -} - -// SetIDFilter sets the function for filtering connections by peer ID. -func (sw *Switch) SetIDFilter(f func(ID) error) { - sw.filterConnByID = f -} +func (sw *Switch) acceptRoutine() { + for { + p, err := sw.transport.Accept(peerConfig{ + chDescs: sw.chDescs, + onPeerError: sw.StopPeerForError, + reactorsByCh: sw.reactorsByCh, + metrics: sw.metrics, + }) + if err != nil { + switch err.(type) { + case ErrRejected: + rErr := err.(ErrRejected) + + if rErr.IsSelf() { + // Remove the given address from the address book and add to our addresses + // to avoid dialing in the future. + addr := rErr.Addr() + sw.addrBook.RemoveAddress(&addr) + sw.addrBook.AddOurAddress(&addr) + } -//------------------------------------------------------------------------------------ + sw.Logger.Info( + "Inbound Peer rejected", + "err", err, + "numPeers", sw.peers.Size(), + ) + + continue + case *ErrTransportClosed: + sw.Logger.Error( + "Stopped accept routine, as transport is closed", + "numPeers", sw.peers.Size(), + ) + default: + sw.Logger.Error( + "Accept on transport errored", + "err", err, + "numPeers", sw.peers.Size(), + ) + } -func (sw *Switch) listenerRoutine(l Listener) { - for { - inConn, ok := <-l.Connections() - if !ok { break } - // ignore connection if we already have enough - // leave room for MinNumOutboundPeers - maxPeers := sw.config.MaxNumPeers - DefaultMinNumOutboundPeers - if maxPeers <= sw.peers.Size() { - sw.Logger.Info("Ignoring inbound connection: already have enough peers", "address", inConn.RemoteAddr().String(), "numPeers", sw.peers.Size(), "max", maxPeers) - inConn.Close() - continue - } + // Ignore connection if we already have enough peers. + _, in, _ := sw.NumPeers() + if in >= sw.config.MaxNumInboundPeers { + sw.Logger.Info( + "Ignoring inbound connection: already have enough inbound peers", + "address", p.NodeInfo().NetAddress().String(), + "have", in, + "max", sw.config.MaxNumInboundPeers, + ) + + _ = p.Stop() - // New inbound connection! - err := sw.addInboundPeerWithConfig(inConn, sw.config) - if err != nil { - sw.Logger.Info("Ignoring inbound connection: error while adding peer", "address", inConn.RemoteAddr().String(), "err", err) continue } - } - - // cleanup -} -// closes conn if err is returned -func (sw *Switch) addInboundPeerWithConfig( - conn net.Conn, - config *config.P2PConfig, -) error { - peerConn, err := newInboundPeerConn(conn, config, sw.nodeKey.PrivKey) - if err != nil { - conn.Close() // peer is nil - return err - } - if err = sw.addPeer(peerConn); err != nil { - peerConn.CloseConn() - return err + if err := sw.addPeer(p); err != nil { + _ = p.Stop() + sw.Logger.Info( + "Ignoring inbound connection: error while adding peer", + "err", err, + "id", p.ID(), + ) + } } - - return nil } // dial the peer; make secret connection; authenticate against the dialed ID; @@ -536,107 +542,93 @@ func (sw *Switch) addInboundPeerWithConfig( // StopPeerForError is called func (sw *Switch) addOutboundPeerWithConfig( addr *NetAddress, - config *config.P2PConfig, + cfg *config.P2PConfig, persistent bool, ) error { sw.Logger.Info("Dialing peer", "address", addr) - peerConn, err := newOutboundPeerConn( - addr, - config, - persistent, - sw.nodeKey.PrivKey, - ) + + // XXX(xla): Remove the leakage of test concerns in implementation. + if cfg.TestDialFail { + go sw.reconnectToPeer(addr) + return fmt.Errorf("dial err (peerConfig.DialFail == true)") + } + + p, err := sw.transport.Dial(*addr, peerConfig{ + chDescs: sw.chDescs, + onPeerError: sw.StopPeerForError, + persistent: persistent, + reactorsByCh: sw.reactorsByCh, + metrics: sw.metrics, + }) if err != nil { + switch e := err.(type) { + case ErrRejected: + if e.IsSelf() { + // Remove the given address from the address book and add to our addresses + // to avoid dialing in the future. + sw.addrBook.RemoveAddress(addr) + sw.addrBook.AddOurAddress(addr) + + return err + } + } + + // retry persistent peers after + // any dial error besides IsSelf() if persistent { go sw.reconnectToPeer(addr) } - return err - } - if err := sw.addPeer(peerConn); err != nil { - peerConn.CloseConn() return err } - return nil -} -// addPeer performs the Tendermint P2P handshake with a peer -// that already has a SecretConnection. If all goes well, -// it starts the peer and adds it to the switch. -// NOTE: This performs a blocking handshake before the peer is added. -// NOTE: If error is returned, caller is responsible for calling -// peer.CloseConn() -func (sw *Switch) addPeer(pc peerConn) error { - - addr := pc.conn.RemoteAddr() - if err := sw.FilterConnByAddr(addr); err != nil { + if err := sw.addPeer(p); err != nil { + _ = p.Stop() return err } - // Exchange NodeInfo on the conn - peerNodeInfo, err := pc.HandshakeTimeout(sw.nodeInfo, time.Duration(sw.config.HandshakeTimeout)) - if err != nil { - return err - } - - peerID := peerNodeInfo.ID - - // ensure connection key matches self reported key - connID := pc.ID() + return nil +} - if peerID != connID { - return fmt.Errorf( - "nodeInfo.ID() (%v) doesn't match conn.ID() (%v)", - peerID, - connID, - ) +func (sw *Switch) filterPeer(p Peer) error { + // Avoid duplicate + if sw.peers.Has(p.ID()) { + return ErrRejected{id: p.ID(), isDuplicate: true} } - // Validate the peers nodeInfo - if err := peerNodeInfo.Validate(); err != nil { - return err - } + errc := make(chan error, len(sw.peerFilters)) - // Avoid self - if sw.nodeKey.ID() == peerID { - addr := peerNodeInfo.NetAddress() - // remove the given address from the address book - // and add to our addresses to avoid dialing again - sw.addrBook.RemoveAddress(addr) - sw.addrBook.AddOurAddress(addr) - return ErrSwitchConnectToSelf{addr} + for _, f := range sw.peerFilters { + go func(f PeerFilterFunc, p Peer, errc chan<- error) { + errc <- f(sw.peers, p) + }(f, p, errc) } - // Avoid duplicate - if sw.peers.Has(peerID) { - return ErrSwitchDuplicatePeerID{peerID} - } - - // Check for duplicate connection or peer info IP. - if !sw.config.AllowDuplicateIP && - (sw.peers.HasIP(pc.RemoteIP()) || - sw.peers.HasIP(peerNodeInfo.NetAddress().IP)) { - return ErrSwitchDuplicatePeerIP{pc.RemoteIP()} + for i := 0; i < cap(errc); i++ { + select { + case err := <-errc: + if err != nil { + return ErrRejected{id: p.ID(), err: err, isFiltered: true} + } + case <-time.After(sw.filterTimeout): + return ErrFilterTimeout{} + } } - // Filter peer against ID white list - if err := sw.FilterConnByID(peerID); err != nil { - return err - } + return nil +} - // Check version, chain id - if err := sw.nodeInfo.CompatibleWith(peerNodeInfo); err != nil { +// addPeer starts up the Peer and adds it to the Switch. +func (sw *Switch) addPeer(p Peer) error { + if err := sw.filterPeer(p); err != nil { return err } - peer := newPeer(pc, sw.mConfig, peerNodeInfo, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError) - peer.SetLogger(sw.Logger.With("peer", addr)) - - peer.Logger.Info("Successful handshake with peer", "peerNodeInfo", peerNodeInfo) + p.SetLogger(sw.Logger.With("peer", p.NodeInfo().NetAddress())) // All good. Start peer if sw.IsRunning() { - if err = sw.startInitPeer(peer); err != nil { + if err := sw.startInitPeer(p); err != nil { return err } } @@ -644,25 +636,30 @@ func (sw *Switch) addPeer(pc peerConn) error { // Add the peer to .peers. // We start it first so that a peer in the list is safe to Stop. // It should not err since we already checked peers.Has(). - if err := sw.peers.Add(peer); err != nil { + if err := sw.peers.Add(p); err != nil { return err } + + sw.Logger.Info("Added peer", "peer", p) sw.metrics.Peers.Add(float64(1)) - sw.Logger.Info("Added peer", "peer", peer) return nil } -func (sw *Switch) startInitPeer(peer *peer) error { - err := peer.Start() // spawn send/recv routines +func (sw *Switch) startInitPeer(p Peer) error { + err := p.Start() // spawn send/recv routines if err != nil { // Should never happen - sw.Logger.Error("Error starting peer", "peer", peer, "err", err) + sw.Logger.Error( + "Error starting peer", + "err", err, + "peer", p, + ) return err } for _, reactor := range sw.reactors { - reactor.AddPeer(peer) + reactor.AddPeer(p) } return nil diff --git a/p2p/switch_test.go b/p2p/switch_test.go index 2ce297761f0..f52e47f06a6 100644 --- a/p2p/switch_test.go +++ b/p2p/switch_test.go @@ -3,7 +3,6 @@ package p2p import ( "bytes" "fmt" - "net" "sync" "testing" "time" @@ -11,10 +10,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/log" - - "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p/conn" ) @@ -145,41 +143,13 @@ func assertMsgReceivedWithTimeout(t *testing.T, msgBytes []byte, channel byte, r } return } + case <-time.After(timeout): t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel) } } } -func TestConnAddrFilter(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) - s2 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) - defer s1.Stop() - defer s2.Stop() - - c1, c2 := conn.NetPipe() - - s1.SetAddrFilter(func(addr net.Addr) error { - if addr.String() == c1.RemoteAddr().String() { - return fmt.Errorf("Error: pipe is blacklisted") - } - return nil - }) - - // connect to good peer - go func() { - err := s1.addPeerWithConnection(c1) - assert.NotNil(t, err, "expected err") - }() - go func() { - err := s2.addPeerWithConnection(c2) - assert.NotNil(t, err, "expected err") - }() - - assertNoPeersAfterTimeout(t, s1, 400*time.Millisecond) - assertNoPeersAfterTimeout(t, s2, 400*time.Millisecond) -} - func TestSwitchFiltersOutItself(t *testing.T) { s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc) // addr := s1.NodeInfo().NetAddress() @@ -194,11 +164,16 @@ func TestSwitchFiltersOutItself(t *testing.T) { // addr should be rejected in addPeer based on the same ID err := s1.DialPeerWithAddress(rp.Addr(), false) if assert.Error(t, err) { - assert.Equal(t, ErrSwitchConnectToSelf{rp.Addr()}.Error(), err.Error()) + if err, ok := err.(ErrRejected); ok { + if !err.IsSelf() { + t.Errorf("expected self to be rejected") + } + } else { + t.Errorf("expected ErrRejected") + } } assert.True(t, s1.addrBook.OurAddress(rp.Addr())) - assert.False(t, s1.addrBook.HasAddress(rp.Addr())) rp.Stop() @@ -206,46 +181,124 @@ func TestSwitchFiltersOutItself(t *testing.T) { assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond) } -func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) { - time.Sleep(timeout) - if sw.Peers().Size() != 0 { - t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size()) +func TestSwitchPeerFilter(t *testing.T) { + var ( + filters = []PeerFilterFunc{ + func(_ IPeerSet, _ Peer) error { return nil }, + func(_ IPeerSet, _ Peer) error { return fmt.Errorf("denied!") }, + func(_ IPeerSet, _ Peer) error { return nil }, + } + sw = MakeSwitch( + cfg, + 1, + "testing", + "123.123.123", + initSwitchFunc, + SwitchPeerFilters(filters...), + ) + ) + defer sw.Stop() + + // simulate remote peer + rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} + rp.Start() + defer rp.Stop() + + p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ + chDescs: sw.chDescs, + onPeerError: sw.StopPeerForError, + reactorsByCh: sw.reactorsByCh, + }) + if err != nil { + t.Fatal(err) + } + + err = sw.addPeer(p) + if err, ok := err.(ErrRejected); ok { + if !err.IsFiltered() { + t.Errorf("expected peer to be filtered") + } + } else { + t.Errorf("expected ErrRejected") } } -func TestConnIDFilter(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) - s2 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) - defer s1.Stop() - defer s2.Stop() +func TestSwitchPeerFilterTimeout(t *testing.T) { + var ( + filters = []PeerFilterFunc{ + func(_ IPeerSet, _ Peer) error { + time.Sleep(10 * time.Millisecond) + return nil + }, + } + sw = MakeSwitch( + cfg, + 1, + "testing", + "123.123.123", + initSwitchFunc, + SwitchFilterTimeout(5*time.Millisecond), + SwitchPeerFilters(filters...), + ) + ) + defer sw.Stop() - c1, c2 := conn.NetPipe() + // simulate remote peer + rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} + rp.Start() + defer rp.Stop() - s1.SetIDFilter(func(id ID) error { - if id == s2.nodeInfo.ID { - return fmt.Errorf("Error: pipe is blacklisted") - } - return nil + p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ + chDescs: sw.chDescs, + onPeerError: sw.StopPeerForError, + reactorsByCh: sw.reactorsByCh, }) + if err != nil { + t.Fatal(err) + } - s2.SetIDFilter(func(id ID) error { - if id == s1.nodeInfo.ID { - return fmt.Errorf("Error: pipe is blacklisted") - } - return nil + err = sw.addPeer(p) + if _, ok := err.(ErrFilterTimeout); !ok { + t.Errorf("expected ErrFilterTimeout") + } +} + +func TestSwitchPeerFilterDuplicate(t *testing.T) { + sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) + + // simulate remote peer + rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} + rp.Start() + defer rp.Stop() + + p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ + chDescs: sw.chDescs, + onPeerError: sw.StopPeerForError, + reactorsByCh: sw.reactorsByCh, }) + if err != nil { + t.Fatal(err) + } - go func() { - err := s1.addPeerWithConnection(c1) - assert.NotNil(t, err, "expected error") - }() - go func() { - err := s2.addPeerWithConnection(c2) - assert.NotNil(t, err, "expected error") - }() + if err := sw.addPeer(p); err != nil { + t.Fatal(err) + } + + err = sw.addPeer(p) + if err, ok := err.(ErrRejected); ok { + if !err.IsDuplicate() { + t.Errorf("expected peer to be duplicate") + } + } else { + t.Errorf("expected ErrRejected") + } +} - assertNoPeersAfterTimeout(t, s1, 400*time.Millisecond) - assertNoPeersAfterTimeout(t, s2, 400*time.Millisecond) +func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) { + time.Sleep(timeout) + if sw.Peers().Size() != 0 { + t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size()) + } } func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { @@ -263,19 +316,23 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { rp.Start() defer rp.Stop() - pc, err := newOutboundPeerConn(rp.Addr(), cfg, false, sw.nodeKey.PrivKey) + p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ + chDescs: sw.chDescs, + onPeerError: sw.StopPeerForError, + reactorsByCh: sw.reactorsByCh, + }) require.Nil(err) - err = sw.addPeer(pc) + + err = sw.addPeer(p) require.Nil(err) - peer := sw.Peers().Get(rp.ID()) - require.NotNil(peer) + require.NotNil(sw.Peers().Get(rp.ID())) // simulate failure by closing connection - pc.CloseConn() + p.(*peer).CloseConn() assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond) - assert.False(peer.IsRunning()) + assert.False(p.IsRunning()) } func TestSwitchReconnectsToPersistentPeer(t *testing.T) { @@ -293,17 +350,20 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) { rp.Start() defer rp.Stop() - pc, err := newOutboundPeerConn(rp.Addr(), cfg, true, sw.nodeKey.PrivKey) - // sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodeKey.PrivKey, + p, err := sw.transport.Dial(*rp.Addr(), peerConfig{ + chDescs: sw.chDescs, + onPeerError: sw.StopPeerForError, + persistent: true, + reactorsByCh: sw.reactorsByCh, + }) require.Nil(err) - require.Nil(sw.addPeer(pc)) + require.Nil(sw.addPeer(p)) - peer := sw.Peers().Get(rp.ID()) - require.NotNil(peer) + require.NotNil(sw.Peers().Get(rp.ID())) // simulate failure by closing connection - pc.CloseConn() + p.(*peer).CloseConn() // TODO: remove sleep, detect the disconnection, wait for reconnect npeers := sw.Peers().Size() @@ -315,7 +375,7 @@ func TestSwitchReconnectsToPersistentPeer(t *testing.T) { } } assert.NotZero(npeers) - assert.False(peer.IsRunning()) + assert.False(p.IsRunning()) // simulate another remote peer rp = &remotePeer{ diff --git a/p2p/test_util.go b/p2p/test_util.go index fdf9ae76474..d72c0c76051 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -3,7 +3,9 @@ package p2p import ( "fmt" "net" + "time" + crypto "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" @@ -12,6 +14,19 @@ import ( "github.com/tendermint/tendermint/p2p/conn" ) +const testCh = 0x01 + +//------------------------------------------------ + +type mockNodeInfo struct { + addr *NetAddress +} + +func (ni mockNodeInfo) ID() ID { return ni.addr.ID } +func (ni mockNodeInfo) NetAddress() *NetAddress { return ni.addr } +func (ni mockNodeInfo) ValidateBasic() error { return nil } +func (ni mockNodeInfo) CompatibleWith(other NodeInfo) error { return nil } + func AddPeerToSwitch(sw *Switch, peer Peer) { sw.peers.Add(peer) } @@ -22,11 +37,9 @@ func CreateRandomPeer(outbound bool) *peer { peerConn: peerConn{ outbound: outbound, }, - nodeInfo: NodeInfo{ - ID: netAddr.ID, - ListenAddr: netAddr.DialString(), - }, - mconn: &conn.MConnection{}, + nodeInfo: mockNodeInfo{netAddr}, + mconn: &conn.MConnection{}, + metrics: NopMetrics(), } p.SetLogger(log.TestingLogger().With("peer", addr)) return p @@ -35,7 +48,7 @@ func CreateRandomPeer(outbound bool) *peer { func CreateRoutableAddr() (addr string, netAddr *NetAddress) { for { var err error - addr = cmn.Fmt("%X@%v.%v.%v.%v:26656", cmn.RandBytes(20), cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256) + addr = fmt.Sprintf("%X@%v.%v.%v.%v:26656", cmn.RandBytes(20), cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256) netAddr, err = NewNetAddressString(addr) if err != nil { panic(err) @@ -104,14 +117,32 @@ func Connect2Switches(switches []*Switch, i, j int) { } func (sw *Switch) addPeerWithConnection(conn net.Conn) error { - pc, err := newInboundPeerConn(conn, sw.config, sw.nodeKey.PrivKey) + pc, err := testInboundPeerConn(conn, sw.config, sw.nodeKey.PrivKey) + if err != nil { + if err := conn.Close(); err != nil { + sw.Logger.Error("Error closing connection", "err", err) + } + return err + } + + ni, err := handshake(conn, 50*time.Millisecond, sw.nodeInfo) if err != nil { if err := conn.Close(); err != nil { sw.Logger.Error("Error closing connection", "err", err) } return err } - if err = sw.addPeer(pc); err != nil { + + p := newPeer( + pc, + sw.mConfig, + ni, + sw.reactorsByCh, + sw.chDescs, + sw.StopPeerForError, + ) + + if err = sw.addPeer(p); err != nil { pc.CloseConn() return err } @@ -131,26 +162,103 @@ func StartSwitches(switches []*Switch) error { return nil } -func MakeSwitch(cfg *config.P2PConfig, i int, network, version string, initSwitch func(int, *Switch) *Switch) *Switch { - // new switch, add reactors - // TODO: let the config be passed in? - nodeKey := &NodeKey{ +func MakeSwitch( + cfg *config.P2PConfig, + i int, + network, version string, + initSwitch func(int, *Switch) *Switch, + opts ...SwitchOption, +) *Switch { + + nodeKey := NodeKey{ PrivKey: ed25519.GenPrivKey(), } - sw := NewSwitch(cfg) - sw.SetLogger(log.TestingLogger()) - sw = initSwitch(i, sw) - ni := NodeInfo{ - ID: nodeKey.ID(), - Moniker: cmn.Fmt("switch%d", i), - Network: network, - Version: version, - ListenAddr: fmt.Sprintf("127.0.0.1:%d", cmn.RandIntn(64512)+1023), + nodeInfo := testNodeInfo(nodeKey.ID(), fmt.Sprintf("node%d", i)) + + t := NewMultiplexTransport(nodeInfo, nodeKey) + + addr := nodeInfo.NetAddress() + if err := t.Listen(*addr); err != nil { + panic(err) } + + // TODO: let the config be passed in? + sw := initSwitch(i, NewSwitch(cfg, t, opts...)) + sw.SetLogger(log.TestingLogger()) + sw.SetNodeKey(&nodeKey) + + ni := nodeInfo.(DefaultNodeInfo) for ch := range sw.reactorsByCh { ni.Channels = append(ni.Channels, ch) } - sw.SetNodeInfo(ni) - sw.SetNodeKey(nodeKey) + nodeInfo = ni + + // TODO: We need to setup reactors ahead of time so the NodeInfo is properly + // populated and we don't have to do those awkward overrides and setters. + t.nodeInfo = nodeInfo + sw.SetNodeInfo(nodeInfo) + return sw } + +func testInboundPeerConn( + conn net.Conn, + config *config.P2PConfig, + ourNodePrivKey crypto.PrivKey, +) (peerConn, error) { + return testPeerConn(conn, config, false, false, ourNodePrivKey, nil) +} + +func testPeerConn( + rawConn net.Conn, + cfg *config.P2PConfig, + outbound, persistent bool, + ourNodePrivKey crypto.PrivKey, + originalAddr *NetAddress, +) (pc peerConn, err error) { + conn := rawConn + + // Fuzz connection + if cfg.TestFuzz { + // so we have time to do peer handshakes and get set up + conn = FuzzConnAfterFromConfig(conn, 10*time.Second, cfg.TestFuzzConfig) + } + + // Encrypt connection + conn, err = upgradeSecretConn(conn, cfg.HandshakeTimeout, ourNodePrivKey) + if err != nil { + return pc, cmn.ErrorWrap(err, "Error creating peer") + } + + // Only the information we already have + return peerConn{ + config: cfg, + outbound: outbound, + persistent: persistent, + conn: conn, + originalAddr: originalAddr, + }, nil +} + +//---------------------------------------------------------------- +// rand node info + +func testNodeInfo(id ID, name string) NodeInfo { + return testNodeInfoWithNetwork(id, name, "testing") +} + +func testNodeInfoWithNetwork(id ID, name, network string) NodeInfo { + return DefaultNodeInfo{ + ProtocolVersion: defaultProtocolVersion, + ID_: id, + ListenAddr: fmt.Sprintf("127.0.0.1:%d", cmn.RandIntn(64512)+1023), + Network: network, + Version: "1.2.3-rc0-deadbeef", + Channels: []byte{testCh}, + Moniker: name, + Other: DefaultNodeInfoOther{ + TxIndex: "on", + RPCAddress: fmt.Sprintf("127.0.0.1:%d", cmn.RandIntn(64512)+1023), + }, + } +} diff --git a/p2p/transport.go b/p2p/transport.go new file mode 100644 index 00000000000..0b9b436f0d3 --- /dev/null +++ b/p2p/transport.go @@ -0,0 +1,506 @@ +package p2p + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/p2p/conn" +) + +const ( + defaultDialTimeout = time.Second + defaultFilterTimeout = 5 * time.Second + defaultHandshakeTimeout = 3 * time.Second +) + +// IPResolver is a behaviour subset of net.Resolver. +type IPResolver interface { + LookupIPAddr(context.Context, string) ([]net.IPAddr, error) +} + +// accept is the container to carry the upgraded connection and NodeInfo from an +// asynchronously running routine to the Accept method. +type accept struct { + conn net.Conn + nodeInfo NodeInfo + err error +} + +// peerConfig is used to bundle data we need to fully setup a Peer with an +// MConn, provided by the caller of Accept and Dial (currently the Switch). This +// a temporary measure until reactor setup is less dynamic and we introduce the +// concept of PeerBehaviour to communicate about significant Peer lifecycle +// events. +// TODO(xla): Refactor out with more static Reactor setup and PeerBehaviour. +type peerConfig struct { + chDescs []*conn.ChannelDescriptor + onPeerError func(Peer, interface{}) + outbound, persistent bool + reactorsByCh map[byte]Reactor + metrics *Metrics +} + +// Transport emits and connects to Peers. The implementation of Peer is left to +// the transport. Each transport is also responsible to filter establishing +// peers specific to its domain. +type Transport interface { + // Accept returns a newly connected Peer. + Accept(peerConfig) (Peer, error) + + // Dial connects to the Peer for the address. + Dial(NetAddress, peerConfig) (Peer, error) +} + +// transportLifecycle bundles the methods for callers to control start and stop +// behaviour. +type transportLifecycle interface { + Close() error + Listen(NetAddress) error +} + +// ConnFilterFunc to be implemented by filter hooks after a new connection has +// been established. The set of exisiting connections is passed along together +// with all resolved IPs for the new connection. +type ConnFilterFunc func(ConnSet, net.Conn, []net.IP) error + +// ConnDuplicateIPFilter resolves and keeps all ips for an incoming connection +// and refuses new ones if they come from a known ip. +func ConnDuplicateIPFilter() ConnFilterFunc { + return func(cs ConnSet, c net.Conn, ips []net.IP) error { + for _, ip := range ips { + if cs.HasIP(ip) { + return ErrRejected{ + conn: c, + err: fmt.Errorf("IP<%v> already connected", ip), + isDuplicate: true, + } + } + } + + return nil + } +} + +// MultiplexTransportOption sets an optional parameter on the +// MultiplexTransport. +type MultiplexTransportOption func(*MultiplexTransport) + +// MultiplexTransportConnFilters sets the filters for rejection new connections. +func MultiplexTransportConnFilters( + filters ...ConnFilterFunc, +) MultiplexTransportOption { + return func(mt *MultiplexTransport) { mt.connFilters = filters } +} + +// MultiplexTransportFilterTimeout sets the timeout waited for filter calls to +// return. +func MultiplexTransportFilterTimeout( + timeout time.Duration, +) MultiplexTransportOption { + return func(mt *MultiplexTransport) { mt.filterTimeout = timeout } +} + +// MultiplexTransportResolver sets the Resolver used for ip lokkups, defaults to +// net.DefaultResolver. +func MultiplexTransportResolver(resolver IPResolver) MultiplexTransportOption { + return func(mt *MultiplexTransport) { mt.resolver = resolver } +} + +// MultiplexTransport accepts and dials tcp connections and upgrades them to +// multiplexed peers. +type MultiplexTransport struct { + listener net.Listener + + acceptc chan accept + closec chan struct{} + + // Lookup table for duplicate ip and id checks. + conns ConnSet + connFilters []ConnFilterFunc + + dialTimeout time.Duration + filterTimeout time.Duration + handshakeTimeout time.Duration + nodeInfo NodeInfo + nodeKey NodeKey + resolver IPResolver + + // TODO(xla): Those configs are still needed as we parameterise peerConn and + // peer currently. All relevant configuration should be refactored into options + // with sane defaults. + mConfig conn.MConnConfig + p2pConfig config.P2PConfig +} + +// Test multiplexTransport for interface completeness. +var _ Transport = (*MultiplexTransport)(nil) +var _ transportLifecycle = (*MultiplexTransport)(nil) + +// NewMultiplexTransport returns a tcp connected multiplexed peer. +func NewMultiplexTransport( + nodeInfo NodeInfo, + nodeKey NodeKey, +) *MultiplexTransport { + return &MultiplexTransport{ + acceptc: make(chan accept), + closec: make(chan struct{}), + dialTimeout: defaultDialTimeout, + filterTimeout: defaultFilterTimeout, + handshakeTimeout: defaultHandshakeTimeout, + mConfig: conn.DefaultMConnConfig(), + nodeInfo: nodeInfo, + nodeKey: nodeKey, + conns: NewConnSet(), + resolver: net.DefaultResolver, + } +} + +// Accept implements Transport. +func (mt *MultiplexTransport) Accept(cfg peerConfig) (Peer, error) { + select { + // This case should never have any side-effectful/blocking operations to + // ensure that quality peers are ready to be used. + case a := <-mt.acceptc: + if a.err != nil { + return nil, a.err + } + + cfg.outbound = false + + return mt.wrapPeer(a.conn, a.nodeInfo, cfg, nil), nil + case <-mt.closec: + return nil, &ErrTransportClosed{} + } +} + +// Dial implements Transport. +func (mt *MultiplexTransport) Dial( + addr NetAddress, + cfg peerConfig, +) (Peer, error) { + c, err := addr.DialTimeout(mt.dialTimeout) + if err != nil { + return nil, err + } + + // TODO(xla): Evaluate if we should apply filters if we explicitly dial. + if err := mt.filterConn(c); err != nil { + return nil, err + } + + secretConn, nodeInfo, err := mt.upgrade(c) + if err != nil { + return nil, err + } + + cfg.outbound = true + + p := mt.wrapPeer(secretConn, nodeInfo, cfg, &addr) + + return p, nil +} + +// Close implements transportLifecycle. +func (mt *MultiplexTransport) Close() error { + close(mt.closec) + + if mt.listener != nil { + return mt.listener.Close() + } + + return nil +} + +// Listen implements transportLifecycle. +func (mt *MultiplexTransport) Listen(addr NetAddress) error { + ln, err := net.Listen("tcp", addr.DialString()) + if err != nil { + return err + } + + mt.listener = ln + + go mt.acceptPeers() + + return nil +} + +func (mt *MultiplexTransport) acceptPeers() { + for { + c, err := mt.listener.Accept() + if err != nil { + // If Close() has been called, silently exit. + select { + case _, ok := <-mt.closec: + if !ok { + return + } + default: + // Transport is not closed + } + + mt.acceptc <- accept{err: err} + return + } + + // Connection upgrade and filtering should be asynchronous to avoid + // Head-of-line blocking[0]. + // Reference: https://github.com/tendermint/tendermint/issues/2047 + // + // [0] https://en.wikipedia.org/wiki/Head-of-line_blocking + go func(c net.Conn) { + var ( + nodeInfo NodeInfo + secretConn *conn.SecretConnection + ) + + err := mt.filterConn(c) + if err == nil { + secretConn, nodeInfo, err = mt.upgrade(c) + } + + select { + case mt.acceptc <- accept{secretConn, nodeInfo, err}: + // Make the upgraded peer available. + case <-mt.closec: + // Give up if the transport was closed. + _ = c.Close() + return + } + }(c) + } +} + +func (mt *MultiplexTransport) cleanup(c net.Conn) error { + mt.conns.Remove(c) + + return c.Close() +} + +func (mt *MultiplexTransport) filterConn(c net.Conn) (err error) { + defer func() { + if err != nil { + _ = c.Close() + } + }() + + // Reject if connection is already present. + if mt.conns.Has(c) { + return ErrRejected{conn: c, isDuplicate: true} + } + + // Resolve ips for incoming conn. + ips, err := resolveIPs(mt.resolver, c) + if err != nil { + return err + } + + errc := make(chan error, len(mt.connFilters)) + + for _, f := range mt.connFilters { + go func(f ConnFilterFunc, c net.Conn, ips []net.IP, errc chan<- error) { + errc <- f(mt.conns, c, ips) + }(f, c, ips, errc) + } + + for i := 0; i < cap(errc); i++ { + select { + case err := <-errc: + if err != nil { + return ErrRejected{conn: c, err: err, isFiltered: true} + } + case <-time.After(mt.filterTimeout): + return ErrFilterTimeout{} + } + + } + + mt.conns.Set(c, ips) + + return nil +} + +func (mt *MultiplexTransport) upgrade( + c net.Conn, +) (secretConn *conn.SecretConnection, nodeInfo NodeInfo, err error) { + defer func() { + if err != nil { + _ = mt.cleanup(c) + } + }() + + secretConn, err = upgradeSecretConn(c, mt.handshakeTimeout, mt.nodeKey.PrivKey) + if err != nil { + return nil, nil, ErrRejected{ + conn: c, + err: fmt.Errorf("secrect conn failed: %v", err), + isAuthFailure: true, + } + } + + nodeInfo, err = handshake(secretConn, mt.handshakeTimeout, mt.nodeInfo) + if err != nil { + return nil, nil, ErrRejected{ + conn: c, + err: fmt.Errorf("handshake failed: %v", err), + isAuthFailure: true, + } + } + + if err := nodeInfo.ValidateBasic(); err != nil { + return nil, nil, ErrRejected{ + conn: c, + err: err, + isNodeInfoInvalid: true, + } + } + + // Ensure connection key matches self reported key. + if connID := PubKeyToID(secretConn.RemotePubKey()); connID != nodeInfo.ID() { + return nil, nil, ErrRejected{ + conn: c, + id: connID, + err: fmt.Errorf( + "conn.ID (%v) NodeInfo.ID (%v) missmatch", + connID, + nodeInfo.ID(), + ), + isAuthFailure: true, + } + } + + // Reject self. + if mt.nodeInfo.ID() == nodeInfo.ID() { + return nil, nil, ErrRejected{ + addr: *NewNetAddress(nodeInfo.ID(), c.RemoteAddr()), + conn: c, + id: nodeInfo.ID(), + isSelf: true, + } + } + + if err := mt.nodeInfo.CompatibleWith(nodeInfo); err != nil { + return nil, nil, ErrRejected{ + conn: c, + err: err, + id: nodeInfo.ID(), + isIncompatible: true, + } + } + + return secretConn, nodeInfo, nil +} + +func (mt *MultiplexTransport) wrapPeer( + c net.Conn, + ni NodeInfo, + cfg peerConfig, + dialedAddr *NetAddress, +) Peer { + + peerConn := newPeerConn( + cfg.outbound, + cfg.persistent, + &mt.p2pConfig, + c, + dialedAddr, + ) + + p := newPeer( + peerConn, + mt.mConfig, + ni, + cfg.reactorsByCh, + cfg.chDescs, + cfg.onPeerError, + PeerMetrics(cfg.metrics), + ) + + // Wait for Peer to Stop so we can cleanup. + go func(c net.Conn) { + <-p.Quit() + _ = mt.cleanup(c) + }(c) + + return p +} + +func handshake( + c net.Conn, + timeout time.Duration, + nodeInfo NodeInfo, +) (NodeInfo, error) { + if err := c.SetDeadline(time.Now().Add(timeout)); err != nil { + return nil, err + } + + var ( + errc = make(chan error, 2) + + peerNodeInfo DefaultNodeInfo + ourNodeInfo = nodeInfo.(DefaultNodeInfo) + ) + + go func(errc chan<- error, c net.Conn) { + _, err := cdc.MarshalBinaryLengthPrefixedWriter(c, ourNodeInfo) + errc <- err + }(errc, c) + go func(errc chan<- error, c net.Conn) { + _, err := cdc.UnmarshalBinaryLengthPrefixedReader( + c, + &peerNodeInfo, + int64(MaxNodeInfoSize()), + ) + errc <- err + }(errc, c) + + for i := 0; i < cap(errc); i++ { + err := <-errc + if err != nil { + return nil, err + } + } + + return peerNodeInfo, c.SetDeadline(time.Time{}) +} + +func upgradeSecretConn( + c net.Conn, + timeout time.Duration, + privKey crypto.PrivKey, +) (*conn.SecretConnection, error) { + if err := c.SetDeadline(time.Now().Add(timeout)); err != nil { + return nil, err + } + + sc, err := conn.MakeSecretConnection(c, privKey) + if err != nil { + return nil, err + } + + return sc, sc.SetDeadline(time.Time{}) +} + +func resolveIPs(resolver IPResolver, c net.Conn) ([]net.IP, error) { + host, _, err := net.SplitHostPort(c.RemoteAddr().String()) + if err != nil { + return nil, err + } + + addrs, err := resolver.LookupIPAddr(context.Background(), host) + if err != nil { + return nil, err + } + + ips := []net.IP{} + + for _, addr := range addrs { + ips = append(ips, addr.IP) + } + + return ips, nil +} diff --git a/p2p/transport_test.go b/p2p/transport_test.go new file mode 100644 index 00000000000..8a5c06bc373 --- /dev/null +++ b/p2p/transport_test.go @@ -0,0 +1,615 @@ +package p2p + +import ( + "fmt" + "math/rand" + "net" + "reflect" + "testing" + "time" + + "github.com/tendermint/tendermint/crypto/ed25519" +) + +var defaultNodeName = "host_peer" + +func emptyNodeInfo() NodeInfo { + return DefaultNodeInfo{} +} + +func TestTransportMultiplexConnFilter(t *testing.T) { + mt := NewMultiplexTransport( + emptyNodeInfo(), + NodeKey{ + PrivKey: ed25519.GenPrivKey(), + }, + ) + + MultiplexTransportConnFilters( + func(_ ConnSet, _ net.Conn, _ []net.IP) error { return nil }, + func(_ ConnSet, _ net.Conn, _ []net.IP) error { return nil }, + func(_ ConnSet, _ net.Conn, _ []net.IP) error { + return fmt.Errorf("rejected") + }, + )(mt) + + addr, err := NewNetAddressStringWithOptionalID("127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + if err := mt.Listen(*addr); err != nil { + t.Fatal(err) + } + + errc := make(chan error) + + go func() { + addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String()) + if err != nil { + errc <- err + return + } + + _, err = addr.Dial() + if err != nil { + errc <- err + return + } + + close(errc) + }() + + if err := <-errc; err != nil { + t.Errorf("connection failed: %v", err) + } + + _, err = mt.Accept(peerConfig{}) + if err, ok := err.(ErrRejected); ok { + if !err.IsFiltered() { + t.Errorf("expected peer to be filtered") + } + } else { + t.Errorf("expected ErrRejected") + } +} + +func TestTransportMultiplexConnFilterTimeout(t *testing.T) { + mt := NewMultiplexTransport( + emptyNodeInfo(), + NodeKey{ + PrivKey: ed25519.GenPrivKey(), + }, + ) + + MultiplexTransportFilterTimeout(5 * time.Millisecond)(mt) + MultiplexTransportConnFilters( + func(_ ConnSet, _ net.Conn, _ []net.IP) error { + time.Sleep(10 * time.Millisecond) + return nil + }, + )(mt) + + addr, err := NewNetAddressStringWithOptionalID("127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + if err := mt.Listen(*addr); err != nil { + t.Fatal(err) + } + + errc := make(chan error) + + go func() { + addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String()) + if err != nil { + errc <- err + return + } + + _, err = addr.Dial() + if err != nil { + errc <- err + return + } + + close(errc) + }() + + if err := <-errc; err != nil { + t.Errorf("connection failed: %v", err) + } + + _, err = mt.Accept(peerConfig{}) + if _, ok := err.(ErrFilterTimeout); !ok { + t.Errorf("expected ErrFilterTimeout") + } +} + +func TestTransportMultiplexAcceptMultiple(t *testing.T) { + mt := testSetupMultiplexTransport(t) + + var ( + seed = rand.New(rand.NewSource(time.Now().UnixNano())) + errc = make(chan error, seed.Intn(64)+64) + ) + + // Setup dialers. + for i := 0; i < cap(errc); i++ { + go func() { + var ( + pv = ed25519.GenPrivKey() + dialer = NewMultiplexTransport( + testNodeInfo(PubKeyToID(pv.PubKey()), defaultNodeName), + NodeKey{ + PrivKey: pv, + }, + ) + ) + + addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String()) + if err != nil { + errc <- err + return + } + + _, err = dialer.Dial(*addr, peerConfig{}) + if err != nil { + errc <- err + return + } + + // Signal that the connection was established. + errc <- nil + }() + } + + // Catch connection errors. + for i := 0; i < cap(errc); i++ { + if err := <-errc; err != nil { + t.Fatal(err) + } + } + + ps := []Peer{} + + // Accept all peers. + for i := 0; i < cap(errc); i++ { + p, err := mt.Accept(peerConfig{}) + if err != nil { + t.Fatal(err) + } + + if err := p.Start(); err != nil { + t.Fatal(err) + } + + ps = append(ps, p) + } + + if have, want := len(ps), cap(errc); have != want { + t.Errorf("have %v, want %v", have, want) + } + + // Stop all peers. + for _, p := range ps { + if err := p.Stop(); err != nil { + t.Fatal(err) + } + } + + if err := mt.Close(); err != nil { + t.Errorf("close errored: %v", err) + } +} + +func TestTransportMultiplexAcceptNonBlocking(t *testing.T) { + mt := testSetupMultiplexTransport(t) + + var ( + fastNodePV = ed25519.GenPrivKey() + fastNodeInfo = testNodeInfo(PubKeyToID(fastNodePV.PubKey()), "fastnode") + errc = make(chan error) + fastc = make(chan struct{}) + slowc = make(chan struct{}) + ) + + // Simulate slow Peer. + go func() { + addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String()) + if err != nil { + errc <- err + return + } + + c, err := addr.Dial() + if err != nil { + errc <- err + return + } + + close(slowc) + + select { + case <-fastc: + // Fast peer connected. + case <-time.After(50 * time.Millisecond): + // We error if the fast peer didn't succeed. + errc <- fmt.Errorf("Fast peer timed out") + } + + sc, err := upgradeSecretConn(c, 20*time.Millisecond, ed25519.GenPrivKey()) + if err != nil { + errc <- err + return + } + + _, err = handshake(sc, 20*time.Millisecond, + testNodeInfo( + PubKeyToID(ed25519.GenPrivKey().PubKey()), + "slow_peer", + )) + if err != nil { + errc <- err + return + } + }() + + // Simulate fast Peer. + go func() { + <-slowc + + var ( + dialer = NewMultiplexTransport( + fastNodeInfo, + NodeKey{ + PrivKey: fastNodePV, + }, + ) + ) + + addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String()) + if err != nil { + errc <- err + return + } + + _, err = dialer.Dial(*addr, peerConfig{}) + if err != nil { + errc <- err + return + } + + close(errc) + close(fastc) + }() + + if err := <-errc; err != nil { + t.Errorf("connection failed: %v", err) + } + + p, err := mt.Accept(peerConfig{}) + if err != nil { + t.Fatal(err) + } + + if have, want := p.NodeInfo(), fastNodeInfo; !reflect.DeepEqual(have, want) { + t.Errorf("have %v, want %v", have, want) + } +} + +func TestTransportMultiplexValidateNodeInfo(t *testing.T) { + mt := testSetupMultiplexTransport(t) + + errc := make(chan error) + + go func() { + var ( + pv = ed25519.GenPrivKey() + dialer = NewMultiplexTransport( + testNodeInfo(PubKeyToID(pv.PubKey()), ""), // Should not be empty + NodeKey{ + PrivKey: pv, + }, + ) + ) + + addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String()) + if err != nil { + errc <- err + return + } + + _, err = dialer.Dial(*addr, peerConfig{}) + if err != nil { + errc <- err + return + } + + close(errc) + }() + + if err := <-errc; err != nil { + t.Errorf("connection failed: %v", err) + } + + _, err := mt.Accept(peerConfig{}) + if err, ok := err.(ErrRejected); ok { + if !err.IsNodeInfoInvalid() { + t.Errorf("expected NodeInfo to be invalid") + } + } else { + t.Errorf("expected ErrRejected") + } +} + +func TestTransportMultiplexRejectMissmatchID(t *testing.T) { + mt := testSetupMultiplexTransport(t) + + errc := make(chan error) + + go func() { + dialer := NewMultiplexTransport( + testNodeInfo( + PubKeyToID(ed25519.GenPrivKey().PubKey()), "dialer", + ), + NodeKey{ + PrivKey: ed25519.GenPrivKey(), + }, + ) + + addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String()) + if err != nil { + errc <- err + return + } + + _, err = dialer.Dial(*addr, peerConfig{}) + if err != nil { + errc <- err + return + } + + close(errc) + }() + + if err := <-errc; err != nil { + t.Errorf("connection failed: %v", err) + } + + _, err := mt.Accept(peerConfig{}) + if err, ok := err.(ErrRejected); ok { + if !err.IsAuthFailure() { + t.Errorf("expected auth failure") + } + } else { + t.Errorf("expected ErrRejected") + } +} + +func TestTransportMultiplexRejectIncompatible(t *testing.T) { + mt := testSetupMultiplexTransport(t) + + errc := make(chan error) + + go func() { + var ( + pv = ed25519.GenPrivKey() + dialer = NewMultiplexTransport( + testNodeInfoWithNetwork(PubKeyToID(pv.PubKey()), "dialer", "incompatible-network"), + NodeKey{ + PrivKey: pv, + }, + ) + ) + + addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String()) + if err != nil { + errc <- err + return + } + + _, err = dialer.Dial(*addr, peerConfig{}) + if err != nil { + errc <- err + return + } + + close(errc) + }() + + _, err := mt.Accept(peerConfig{}) + if err, ok := err.(ErrRejected); ok { + if !err.IsIncompatible() { + t.Errorf("expected to reject incompatible") + } + } else { + t.Errorf("expected ErrRejected") + } +} + +func TestTransportMultiplexRejectSelf(t *testing.T) { + mt := testSetupMultiplexTransport(t) + + errc := make(chan error) + + go func() { + addr, err := NewNetAddressStringWithOptionalID(mt.listener.Addr().String()) + if err != nil { + errc <- err + return + } + + _, err = mt.Dial(*addr, peerConfig{}) + if err != nil { + errc <- err + return + } + + close(errc) + }() + + if err := <-errc; err != nil { + if err, ok := err.(ErrRejected); ok { + if !err.IsSelf() { + t.Errorf("expected to reject self") + } + } else { + t.Errorf("expected ErrRejected") + } + } else { + t.Errorf("expected connection failure") + } + + _, err := mt.Accept(peerConfig{}) + if err, ok := err.(ErrRejected); ok { + if !err.IsSelf() { + t.Errorf("expected to reject self") + } + } else { + t.Errorf("expected ErrRejected") + } +} + +func TestTransportConnDuplicateIPFilter(t *testing.T) { + filter := ConnDuplicateIPFilter() + + if err := filter(nil, &testTransportConn{}, nil); err != nil { + t.Fatal(err) + } + + var ( + c = &testTransportConn{} + cs = NewConnSet() + ) + + cs.Set(c, []net.IP{ + net.IP{10, 0, 10, 1}, + net.IP{10, 0, 10, 2}, + net.IP{10, 0, 10, 3}, + }) + + if err := filter(cs, c, []net.IP{ + net.IP{10, 0, 10, 2}, + }); err == nil { + t.Errorf("expected Peer to be rejected as duplicate") + } +} + +func TestTransportHandshake(t *testing.T) { + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + var ( + peerPV = ed25519.GenPrivKey() + peerNodeInfo = testNodeInfo(PubKeyToID(peerPV.PubKey()), defaultNodeName) + ) + + go func() { + c, err := net.Dial(ln.Addr().Network(), ln.Addr().String()) + if err != nil { + t.Error(err) + return + } + + go func(c net.Conn) { + _, err := cdc.MarshalBinaryLengthPrefixedWriter(c, peerNodeInfo.(DefaultNodeInfo)) + if err != nil { + t.Error(err) + } + }(c) + go func(c net.Conn) { + var ni DefaultNodeInfo + + _, err := cdc.UnmarshalBinaryLengthPrefixedReader( + c, + &ni, + int64(MaxNodeInfoSize()), + ) + if err != nil { + t.Error(err) + } + }(c) + }() + + c, err := ln.Accept() + if err != nil { + t.Fatal(err) + } + + ni, err := handshake(c, 20*time.Millisecond, emptyNodeInfo()) + if err != nil { + t.Fatal(err) + } + + if have, want := ni, peerNodeInfo; !reflect.DeepEqual(have, want) { + t.Errorf("have %v, want %v", have, want) + } +} + +func testSetupMultiplexTransport(t *testing.T) *MultiplexTransport { + var ( + pv = ed25519.GenPrivKey() + mt = NewMultiplexTransport( + testNodeInfo( + PubKeyToID(pv.PubKey()), "transport", + ), + NodeKey{ + PrivKey: pv, + }, + ) + ) + + addr, err := NewNetAddressStringWithOptionalID("127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + if err := mt.Listen(*addr); err != nil { + t.Fatal(err) + } + + return mt +} + +type testTransportAddr struct{} + +func (a *testTransportAddr) Network() string { return "tcp" } +func (a *testTransportAddr) String() string { return "test.local:1234" } + +type testTransportConn struct{} + +func (c *testTransportConn) Close() error { + return fmt.Errorf("Close() not implemented") +} + +func (c *testTransportConn) LocalAddr() net.Addr { + return &testTransportAddr{} +} + +func (c *testTransportConn) RemoteAddr() net.Addr { + return &testTransportAddr{} +} + +func (c *testTransportConn) Read(_ []byte) (int, error) { + return -1, fmt.Errorf("Read() not implemented") +} + +func (c *testTransportConn) SetDeadline(_ time.Time) error { + return fmt.Errorf("SetDeadline() not implemented") +} + +func (c *testTransportConn) SetReadDeadline(_ time.Time) error { + return fmt.Errorf("SetReadDeadline() not implemented") +} + +func (c *testTransportConn) SetWriteDeadline(_ time.Time) error { + return fmt.Errorf("SetWriteDeadline() not implemented") +} + +func (c *testTransportConn) Write(_ []byte) (int, error) { + return -1, fmt.Errorf("Write() not implemented") +} diff --git a/p2p/trust/store.go b/p2p/trust/store.go index 31f659a43f5..d6b4c049d51 100644 --- a/p2p/trust/store.go +++ b/p2p/trust/store.go @@ -5,6 +5,7 @@ package trust import ( "encoding/json" + "fmt" "sync" "time" @@ -155,7 +156,7 @@ func (tms *TrustMetricStore) loadFromDB() bool { peers := make(map[string]MetricHistoryJSON) err := json.Unmarshal(bytes, &peers) if err != nil { - cmn.PanicCrisis(cmn.Fmt("Could not unmarshal Trust Metric Store DB data: %v", err)) + cmn.PanicCrisis(fmt.Sprintf("Could not unmarshal Trust Metric Store DB data: %v", err)) } // If history data exists in the file, diff --git a/p2p/upnp/probe.go b/p2p/upnp/probe.go index 2de5e790505..16a53724144 100644 --- a/p2p/upnp/probe.go +++ b/p2p/upnp/probe.go @@ -5,7 +5,6 @@ import ( "net" "time" - cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" ) @@ -19,19 +18,19 @@ func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Lis if err != nil { return nil, nil, nil, fmt.Errorf("NAT upnp could not be discovered: %v", err) } - logger.Info(cmn.Fmt("ourIP: %v", nat.(*upnpNAT).ourIP)) + logger.Info(fmt.Sprintf("ourIP: %v", nat.(*upnpNAT).ourIP)) ext, err := nat.GetExternalAddress() if err != nil { return nat, nil, nil, fmt.Errorf("External address error: %v", err) } - logger.Info(cmn.Fmt("External address: %v", ext)) + logger.Info(fmt.Sprintf("External address: %v", ext)) port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0) if err != nil { return nat, nil, ext, fmt.Errorf("Port mapping error: %v", err) } - logger.Info(cmn.Fmt("Port mapping mapped: %v", port)) + logger.Info(fmt.Sprintf("Port mapping mapped: %v", port)) // also run the listener, open for all remote addresses. listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort)) @@ -46,17 +45,17 @@ func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supp go func() { inConn, err := listener.Accept() if err != nil { - logger.Info(cmn.Fmt("Listener.Accept() error: %v", err)) + logger.Info(fmt.Sprintf("Listener.Accept() error: %v", err)) return } - logger.Info(cmn.Fmt("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr())) + logger.Info(fmt.Sprintf("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr())) buf := make([]byte, 1024) n, err := inConn.Read(buf) if err != nil { - logger.Info(cmn.Fmt("Incoming connection read error: %v", err)) + logger.Info(fmt.Sprintf("Incoming connection read error: %v", err)) return } - logger.Info(cmn.Fmt("Incoming connection read %v bytes: %X", n, buf)) + logger.Info(fmt.Sprintf("Incoming connection read %v bytes: %X", n, buf)) if string(buf) == "test data" { supportsHairpin = true return @@ -66,16 +65,16 @@ func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supp // Establish outgoing outConn, err := net.Dial("tcp", extAddr) if err != nil { - logger.Info(cmn.Fmt("Outgoing connection dial error: %v", err)) + logger.Info(fmt.Sprintf("Outgoing connection dial error: %v", err)) return } n, err := outConn.Write([]byte("test data")) if err != nil { - logger.Info(cmn.Fmt("Outgoing connection write error: %v", err)) + logger.Info(fmt.Sprintf("Outgoing connection write error: %v", err)) return } - logger.Info(cmn.Fmt("Outgoing connection wrote %v bytes", n)) + logger.Info(fmt.Sprintf("Outgoing connection wrote %v bytes", n)) // Wait for data receipt time.Sleep(1 * time.Second) @@ -96,10 +95,10 @@ func Probe(logger log.Logger) (caps UPNPCapabilities, err error) { // Deferred cleanup defer func() { if err := nat.DeletePortMapping("tcp", intPort, extPort); err != nil { - logger.Error(cmn.Fmt("Port mapping delete error: %v", err)) + logger.Error(fmt.Sprintf("Port mapping delete error: %v", err)) } if err := listener.Close(); err != nil { - logger.Error(cmn.Fmt("Listener closing error: %v", err)) + logger.Error(fmt.Sprintf("Listener closing error: %v", err)) } }() diff --git a/p2p/version.go b/p2p/version.go deleted file mode 100644 index 9a4c7bbaf3b..00000000000 --- a/p2p/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package p2p - -const Version = "0.5.0" diff --git a/privval/ipc.go b/privval/ipc.go new file mode 100644 index 00000000000..eda23fe6f24 --- /dev/null +++ b/privval/ipc.go @@ -0,0 +1,120 @@ +package privval + +import ( + "net" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +// IPCValOption sets an optional parameter on the SocketPV. +type IPCValOption func(*IPCVal) + +// IPCValConnTimeout sets the read and write timeout for connections +// from external signing processes. +func IPCValConnTimeout(timeout time.Duration) IPCValOption { + return func(sc *IPCVal) { sc.connTimeout = timeout } +} + +// IPCValHeartbeat sets the period on which to check the liveness of the +// connected Signer connections. +func IPCValHeartbeat(period time.Duration) IPCValOption { + return func(sc *IPCVal) { sc.connHeartbeat = period } +} + +// IPCVal implements PrivValidator, it uses a unix socket to request signatures +// from an external process. +type IPCVal struct { + cmn.BaseService + *RemoteSignerClient + + addr string + + connTimeout time.Duration + connHeartbeat time.Duration + + conn net.Conn + cancelPing chan struct{} + pingTicker *time.Ticker +} + +// Check that IPCVal implements PrivValidator. +var _ types.PrivValidator = (*IPCVal)(nil) + +// NewIPCVal returns an instance of IPCVal. +func NewIPCVal( + logger log.Logger, + socketAddr string, +) *IPCVal { + sc := &IPCVal{ + addr: socketAddr, + connTimeout: connTimeout, + connHeartbeat: connHeartbeat, + } + + sc.BaseService = *cmn.NewBaseService(logger, "IPCVal", sc) + + return sc +} + +// OnStart implements cmn.Service. +func (sc *IPCVal) OnStart() error { + err := sc.connect() + if err != nil { + sc.Logger.Error("OnStart", "err", err) + return err + } + + sc.RemoteSignerClient = NewRemoteSignerClient(sc.conn) + + // Start a routine to keep the connection alive + sc.cancelPing = make(chan struct{}, 1) + sc.pingTicker = time.NewTicker(sc.connHeartbeat) + go func() { + for { + select { + case <-sc.pingTicker.C: + err := sc.Ping() + if err != nil { + sc.Logger.Error("Ping", "err", err) + } + case <-sc.cancelPing: + sc.pingTicker.Stop() + return + } + } + }() + + return nil +} + +// OnStop implements cmn.Service. +func (sc *IPCVal) OnStop() { + if sc.cancelPing != nil { + close(sc.cancelPing) + } + + if sc.conn != nil { + if err := sc.conn.Close(); err != nil { + sc.Logger.Error("OnStop", "err", err) + } + } +} + +func (sc *IPCVal) connect() error { + la, err := net.ResolveUnixAddr("unix", sc.addr) + if err != nil { + return err + } + + conn, err := net.DialUnix("unix", nil, la) + if err != nil { + return err + } + + sc.conn = newTimeoutConn(conn, sc.connTimeout) + + return nil +} diff --git a/privval/ipc_server.go b/privval/ipc_server.go new file mode 100644 index 00000000000..d3907cbdbb4 --- /dev/null +++ b/privval/ipc_server.go @@ -0,0 +1,131 @@ +package privval + +import ( + "io" + "net" + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +// IPCRemoteSignerOption sets an optional parameter on the IPCRemoteSigner. +type IPCRemoteSignerOption func(*IPCRemoteSigner) + +// IPCRemoteSignerConnDeadline sets the read and write deadline for connections +// from external signing processes. +func IPCRemoteSignerConnDeadline(deadline time.Duration) IPCRemoteSignerOption { + return func(ss *IPCRemoteSigner) { ss.connDeadline = deadline } +} + +// IPCRemoteSignerConnRetries sets the amount of attempted retries to connect. +func IPCRemoteSignerConnRetries(retries int) IPCRemoteSignerOption { + return func(ss *IPCRemoteSigner) { ss.connRetries = retries } +} + +// IPCRemoteSigner is a RPC implementation of PrivValidator that listens on a unix socket. +type IPCRemoteSigner struct { + cmn.BaseService + + addr string + chainID string + connDeadline time.Duration + connRetries int + privVal types.PrivValidator + + listener *net.UnixListener +} + +// NewIPCRemoteSigner returns an instance of IPCRemoteSigner. +func NewIPCRemoteSigner( + logger log.Logger, + chainID, socketAddr string, + privVal types.PrivValidator, +) *IPCRemoteSigner { + rs := &IPCRemoteSigner{ + addr: socketAddr, + chainID: chainID, + connDeadline: time.Second * defaultConnDeadlineSeconds, + connRetries: defaultDialRetries, + privVal: privVal, + } + + rs.BaseService = *cmn.NewBaseService(logger, "IPCRemoteSigner", rs) + + return rs +} + +// OnStart implements cmn.Service. +func (rs *IPCRemoteSigner) OnStart() error { + err := rs.listen() + if err != nil { + err = cmn.ErrorWrap(err, "listen") + rs.Logger.Error("OnStart", "err", err) + return err + } + + go func() { + for { + conn, err := rs.listener.AcceptUnix() + if err != nil { + return + } + go rs.handleConnection(conn) + } + }() + + return nil +} + +// OnStop implements cmn.Service. +func (rs *IPCRemoteSigner) OnStop() { + if rs.listener != nil { + if err := rs.listener.Close(); err != nil { + rs.Logger.Error("OnStop", "err", cmn.ErrorWrap(err, "closing listener failed")) + } + } +} + +func (rs *IPCRemoteSigner) listen() error { + la, err := net.ResolveUnixAddr("unix", rs.addr) + if err != nil { + return err + } + + rs.listener, err = net.ListenUnix("unix", la) + + return err +} + +func (rs *IPCRemoteSigner) handleConnection(conn net.Conn) { + for { + if !rs.IsRunning() { + return // Ignore error from listener closing. + } + + // Reset the connection deadline + conn.SetDeadline(time.Now().Add(rs.connDeadline)) + + req, err := readMsg(conn) + if err != nil { + if err != io.EOF { + rs.Logger.Error("handleConnection", "err", err) + } + return + } + + res, err := handleRequest(req, rs.chainID, rs.privVal) + + if err != nil { + // only log the error; we'll reply with an error in res + rs.Logger.Error("handleConnection", "err", err) + } + + err = writeMsg(conn, res) + if err != nil { + rs.Logger.Error("handleConnection", "err", err) + return + } + } +} diff --git a/privval/ipc_test.go b/privval/ipc_test.go new file mode 100644 index 00000000000..c8d6dfc77a9 --- /dev/null +++ b/privval/ipc_test.go @@ -0,0 +1,147 @@ +package privval + +import ( + "io/ioutil" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +func TestIPCPVVote(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupIPCSocketPair(t, chainID, types.NewMockPV()) + + ts = time.Now() + vType = types.PrecommitType + want = &types.Vote{Timestamp: ts, Type: vType} + have = &types.Vote{Timestamp: ts, Type: vType} + ) + defer sc.Stop() + defer rs.Stop() + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) +} + +func TestIPCPVVoteResetDeadline(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupIPCSocketPair(t, chainID, types.NewMockPV()) + + ts = time.Now() + vType = types.PrecommitType + want = &types.Vote{Timestamp: ts, Type: vType} + have = &types.Vote{Timestamp: ts, Type: vType} + ) + defer sc.Stop() + defer rs.Stop() + + time.Sleep(3 * time.Millisecond) + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) + + // This would exceed the deadline if it was not extended by the previous message + time.Sleep(3 * time.Millisecond) + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) +} + +func TestIPCPVVoteKeepalive(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupIPCSocketPair(t, chainID, types.NewMockPV()) + + ts = time.Now() + vType = types.PrecommitType + want = &types.Vote{Timestamp: ts, Type: vType} + have = &types.Vote{Timestamp: ts, Type: vType} + ) + defer sc.Stop() + defer rs.Stop() + + time.Sleep(10 * time.Millisecond) + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) +} + +func testSetupIPCSocketPair( + t *testing.T, + chainID string, + privValidator types.PrivValidator, +) (*IPCVal, *IPCRemoteSigner) { + addr, err := testUnixAddr() + require.NoError(t, err) + + var ( + logger = log.TestingLogger() + privVal = privValidator + readyc = make(chan struct{}) + rs = NewIPCRemoteSigner( + logger, + chainID, + addr, + privVal, + ) + sc = NewIPCVal( + logger, + addr, + ) + ) + + IPCValConnTimeout(5 * time.Millisecond)(sc) + IPCValHeartbeat(time.Millisecond)(sc) + + IPCRemoteSignerConnDeadline(time.Millisecond * 5)(rs) + + testStartIPCRemoteSigner(t, readyc, rs) + + <-readyc + + require.NoError(t, sc.Start()) + assert.True(t, sc.IsRunning()) + + return sc, rs +} + +func testStartIPCRemoteSigner(t *testing.T, readyc chan struct{}, rs *IPCRemoteSigner) { + go func(rs *IPCRemoteSigner) { + require.NoError(t, rs.Start()) + assert.True(t, rs.IsRunning()) + + readyc <- struct{}{} + }(rs) +} + +func testUnixAddr() (string, error) { + f, err := ioutil.TempFile("/tmp", "nettest") + if err != nil { + return "", err + } + + addr := f.Name() + err = f.Close() + if err != nil { + return "", err + } + err = os.Remove(addr) + if err != nil { + return "", err + } + + return addr, nil +} diff --git a/privval/priv_validator.go b/privval/priv_validator.go index a81751a9147..a13f5426b69 100644 --- a/privval/priv_validator.go +++ b/privval/priv_validator.go @@ -12,6 +12,7 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) // TODO: type ? @@ -24,9 +25,9 @@ const ( func voteToStep(vote *types.Vote) int8 { switch vote.Type { - case types.VoteTypePrevote: + case types.PrevoteType: return stepPrevote - case types.VoteTypePrecommit: + case types.PrecommitType: return stepPrecommit default: cmn.PanicSanity("Unknown vote type") @@ -37,14 +38,16 @@ func voteToStep(vote *types.Vote) int8 { // FilePV implements PrivValidator using data persisted to disk // to prevent double signing. // NOTE: the directory containing the pv.filePath must already exist. +// It includes the LastSignature and LastSignBytes so we don't lose the signature +// if the process crashes after signing but before the resulting consensus message is processed. type FilePV struct { Address types.Address `json:"address"` PubKey crypto.PubKey `json:"pub_key"` LastHeight int64 `json:"last_height"` LastRound int `json:"last_round"` LastStep int8 `json:"last_step"` - LastSignature []byte `json:"last_signature,omitempty"` // so we dont lose signatures XXX Why would we lose signatures? - LastSignBytes cmn.HexBytes `json:"last_signbytes,omitempty"` // so we dont lose signatures XXX Why would we lose signatures? + LastSignature []byte `json:"last_signature,omitempty"` + LastSignBytes cmn.HexBytes `json:"last_signbytes,omitempty"` PrivKey crypto.PrivKey `json:"priv_key"` // For persistence. @@ -89,7 +92,7 @@ func LoadFilePV(filePath string) *FilePV { pv := &FilePV{} err = cdc.UnmarshalJSON(pvJSONBytes, &pv) if err != nil { - cmn.Exit(cmn.Fmt("Error reading PrivValidator from %v: %v\n", filePath, err)) + cmn.Exit(fmt.Sprintf("Error reading PrivValidator from %v: %v\n", filePath, err)) } // overwrite pubkey and address for convenience @@ -153,7 +156,7 @@ func (pv *FilePV) SignVote(chainID string, vote *types.Vote) error { pv.mtx.Lock() defer pv.mtx.Unlock() if err := pv.signVote(chainID, vote); err != nil { - return errors.New(cmn.Fmt("Error signing vote: %v", err)) + return fmt.Errorf("Error signing vote: %v", err) } return nil } @@ -310,21 +313,18 @@ func (pv *FilePV) String() string { // returns the timestamp from the lastSignBytes. // returns true if the only difference in the votes is their timestamp. func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { - var lastVote, newVote types.CanonicalJSONVote - if err := cdc.UnmarshalJSON(lastSignBytes, &lastVote); err != nil { + var lastVote, newVote types.CanonicalVote + if err := cdc.UnmarshalBinaryLengthPrefixed(lastSignBytes, &lastVote); err != nil { panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err)) } - if err := cdc.UnmarshalJSON(newSignBytes, &newVote); err != nil { + if err := cdc.UnmarshalBinaryLengthPrefixed(newSignBytes, &newVote); err != nil { panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err)) } - lastTime, err := time.Parse(types.TimeFormat, lastVote.Timestamp) - if err != nil { - panic(err) - } + lastTime := lastVote.Timestamp // set the times to the same value and check equality - now := types.CanonicalTime(time.Now()) + now := tmtime.Now() lastVote.Timestamp = now newVote.Timestamp = now lastVoteBytes, _ := cdc.MarshalJSON(lastVote) @@ -336,25 +336,21 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.T // returns the timestamp from the lastSignBytes. // returns true if the only difference in the proposals is their timestamp func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { - var lastProposal, newProposal types.CanonicalJSONProposal - if err := cdc.UnmarshalJSON(lastSignBytes, &lastProposal); err != nil { + var lastProposal, newProposal types.CanonicalProposal + if err := cdc.UnmarshalBinaryLengthPrefixed(lastSignBytes, &lastProposal); err != nil { panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err)) } - if err := cdc.UnmarshalJSON(newSignBytes, &newProposal); err != nil { + if err := cdc.UnmarshalBinaryLengthPrefixed(newSignBytes, &newProposal); err != nil { panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err)) } - lastTime, err := time.Parse(types.TimeFormat, lastProposal.Timestamp) - if err != nil { - panic(err) - } - + lastTime := lastProposal.Timestamp // set the times to the same value and check equality - now := types.CanonicalTime(time.Now()) + now := tmtime.Now() lastProposal.Timestamp = now newProposal.Timestamp = now - lastProposalBytes, _ := cdc.MarshalJSON(lastProposal) - newProposalBytes, _ := cdc.MarshalJSON(newProposal) + lastProposalBytes, _ := cdc.MarshalBinaryLengthPrefixed(lastProposal) + newProposalBytes, _ := cdc.MarshalBinaryLengthPrefixed(newProposal) return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes) } diff --git a/privval/priv_validator_test.go b/privval/priv_validator_test.go index b4f9ddbc49c..4f4eed97b37 100644 --- a/privval/priv_validator_test.go +++ b/privval/priv_validator_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) func TestGenLoadValidator(t *testing.T) { @@ -100,7 +101,7 @@ func TestSignVote(t *testing.T) { block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}} block2 := types.BlockID{[]byte{3, 2, 1}, types.PartSetHeader{}} height, round := int64(10), 1 - voteType := types.VoteTypePrevote + voteType := byte(types.PrevoteType) // sign a vote for first time vote := newVote(privVal.Address, 0, height, round, voteType, block1) @@ -139,8 +140,8 @@ func TestSignProposal(t *testing.T) { require.Nil(t, err) privVal := GenFilePV(tempFile.Name()) - block1 := types.PartSetHeader{5, []byte{1, 2, 3}} - block2 := types.PartSetHeader{10, []byte{3, 2, 1}} + block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{5, []byte{1, 2, 3}}} + block2 := types.BlockID{[]byte{3, 2, 1}, types.PartSetHeader{10, []byte{3, 2, 1}}} height, round := int64(10), 1 // sign a proposal for first time @@ -178,7 +179,7 @@ func TestDifferByTimestamp(t *testing.T) { require.Nil(t, err) privVal := GenFilePV(tempFile.Name()) - block1 := types.PartSetHeader{5, []byte{1, 2, 3}} + block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{5, []byte{1, 2, 3}}} height, round := int64(10), 1 chainID := "mychainid" @@ -205,7 +206,7 @@ func TestDifferByTimestamp(t *testing.T) { // test vote { - voteType := types.VoteTypePrevote + voteType := byte(types.PrevoteType) blockID := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}} vote := newVote(privVal.Address, 0, height, round, voteType, blockID) err := privVal.SignVote("mychainid", vote) @@ -234,17 +235,17 @@ func newVote(addr types.Address, idx int, height int64, round int, typ byte, blo ValidatorIndex: idx, Height: height, Round: round, - Type: typ, - Timestamp: time.Now().UTC(), + Type: types.SignedMsgType(typ), + Timestamp: tmtime.Now(), BlockID: blockID, } } -func newProposal(height int64, round int, partsHeader types.PartSetHeader) *types.Proposal { +func newProposal(height int64, round int, blockID types.BlockID) *types.Proposal { return &types.Proposal{ - Height: height, - Round: round, - BlockPartsHeader: partsHeader, - Timestamp: time.Now().UTC(), + Height: height, + Round: round, + BlockID: blockID, + Timestamp: tmtime.Now(), } } diff --git a/privval/remote_signer.go b/privval/remote_signer.go new file mode 100644 index 00000000000..eacc840c51e --- /dev/null +++ b/privval/remote_signer.go @@ -0,0 +1,303 @@ +package privval + +import ( + "fmt" + "io" + "net" + "sync" + + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/types" +) + +// RemoteSignerClient implements PrivValidator, it uses a socket to request signatures +// from an external process. +type RemoteSignerClient struct { + conn net.Conn + lock sync.Mutex +} + +// Check that RemoteSignerClient implements PrivValidator. +var _ types.PrivValidator = (*RemoteSignerClient)(nil) + +// NewRemoteSignerClient returns an instance of RemoteSignerClient. +func NewRemoteSignerClient( + conn net.Conn, +) *RemoteSignerClient { + sc := &RemoteSignerClient{ + conn: conn, + } + return sc +} + +// GetAddress implements PrivValidator. +func (sc *RemoteSignerClient) GetAddress() types.Address { + pubKey, err := sc.getPubKey() + if err != nil { + panic(err) + } + + return pubKey.Address() +} + +// GetPubKey implements PrivValidator. +func (sc *RemoteSignerClient) GetPubKey() crypto.PubKey { + pubKey, err := sc.getPubKey() + if err != nil { + panic(err) + } + + return pubKey +} + +func (sc *RemoteSignerClient) getPubKey() (crypto.PubKey, error) { + sc.lock.Lock() + defer sc.lock.Unlock() + + err := writeMsg(sc.conn, &PubKeyMsg{}) + if err != nil { + return nil, err + } + + res, err := readMsg(sc.conn) + if err != nil { + return nil, err + } + + return res.(*PubKeyMsg).PubKey, nil +} + +// SignVote implements PrivValidator. +func (sc *RemoteSignerClient) SignVote(chainID string, vote *types.Vote) error { + sc.lock.Lock() + defer sc.lock.Unlock() + + err := writeMsg(sc.conn, &SignVoteRequest{Vote: vote}) + if err != nil { + return err + } + + res, err := readMsg(sc.conn) + if err != nil { + return err + } + + resp, ok := res.(*SignedVoteResponse) + if !ok { + return ErrUnexpectedResponse + } + if resp.Error != nil { + return resp.Error + } + *vote = *resp.Vote + + return nil +} + +// SignProposal implements PrivValidator. +func (sc *RemoteSignerClient) SignProposal( + chainID string, + proposal *types.Proposal, +) error { + sc.lock.Lock() + defer sc.lock.Unlock() + + err := writeMsg(sc.conn, &SignProposalRequest{Proposal: proposal}) + if err != nil { + return err + } + + res, err := readMsg(sc.conn) + if err != nil { + return err + } + resp, ok := res.(*SignedProposalResponse) + if !ok { + return ErrUnexpectedResponse + } + if resp.Error != nil { + return resp.Error + } + *proposal = *resp.Proposal + + return nil +} + +// SignHeartbeat implements PrivValidator. +func (sc *RemoteSignerClient) SignHeartbeat( + chainID string, + heartbeat *types.Heartbeat, +) error { + sc.lock.Lock() + defer sc.lock.Unlock() + + err := writeMsg(sc.conn, &SignHeartbeatRequest{Heartbeat: heartbeat}) + if err != nil { + return err + } + + res, err := readMsg(sc.conn) + if err != nil { + return err + } + resp, ok := res.(*SignedHeartbeatResponse) + if !ok { + return ErrUnexpectedResponse + } + if resp.Error != nil { + return resp.Error + } + *heartbeat = *resp.Heartbeat + + return nil +} + +// Ping is used to check connection health. +func (sc *RemoteSignerClient) Ping() error { + sc.lock.Lock() + defer sc.lock.Unlock() + + err := writeMsg(sc.conn, &PingRequest{}) + if err != nil { + return err + } + + res, err := readMsg(sc.conn) + if err != nil { + return err + } + _, ok := res.(*PingResponse) + if !ok { + return ErrUnexpectedResponse + } + + return nil +} + +// RemoteSignerMsg is sent between RemoteSigner and the RemoteSigner client. +type RemoteSignerMsg interface{} + +func RegisterRemoteSignerMsg(cdc *amino.Codec) { + cdc.RegisterInterface((*RemoteSignerMsg)(nil), nil) + cdc.RegisterConcrete(&PubKeyMsg{}, "tendermint/remotesigner/PubKeyMsg", nil) + cdc.RegisterConcrete(&SignVoteRequest{}, "tendermint/remotesigner/SignVoteRequest", nil) + cdc.RegisterConcrete(&SignedVoteResponse{}, "tendermint/remotesigner/SignedVoteResponse", nil) + cdc.RegisterConcrete(&SignProposalRequest{}, "tendermint/remotesigner/SignProposalRequest", nil) + cdc.RegisterConcrete(&SignedProposalResponse{}, "tendermint/remotesigner/SignedProposalResponse", nil) + cdc.RegisterConcrete(&SignHeartbeatRequest{}, "tendermint/remotesigner/SignHeartbeatRequest", nil) + cdc.RegisterConcrete(&SignedHeartbeatResponse{}, "tendermint/remotesigner/SignedHeartbeatResponse", nil) + cdc.RegisterConcrete(&PingRequest{}, "tendermint/remotesigner/PingRequest", nil) + cdc.RegisterConcrete(&PingResponse{}, "tendermint/remotesigner/PingResponse", nil) +} + +// PubKeyMsg is a PrivValidatorSocket message containing the public key. +type PubKeyMsg struct { + PubKey crypto.PubKey +} + +// SignVoteRequest is a PrivValidatorSocket message containing a vote. +type SignVoteRequest struct { + Vote *types.Vote +} + +// SignedVoteResponse is a PrivValidatorSocket message containing a signed vote along with a potenial error message. +type SignedVoteResponse struct { + Vote *types.Vote + Error *RemoteSignerError +} + +// SignProposalRequest is a PrivValidatorSocket message containing a Proposal. +type SignProposalRequest struct { + Proposal *types.Proposal +} + +type SignedProposalResponse struct { + Proposal *types.Proposal + Error *RemoteSignerError +} + +// SignHeartbeatRequest is a PrivValidatorSocket message containing a Heartbeat. +type SignHeartbeatRequest struct { + Heartbeat *types.Heartbeat +} + +type SignedHeartbeatResponse struct { + Heartbeat *types.Heartbeat + Error *RemoteSignerError +} + +// PingRequest is a PrivValidatorSocket message to keep the connection alive. +type PingRequest struct { +} + +type PingResponse struct { +} + +// RemoteSignerError allows (remote) validators to include meaningful error descriptions in their reply. +type RemoteSignerError struct { + // TODO(ismail): create an enum of known errors + Code int + Description string +} + +func (e *RemoteSignerError) Error() string { + return fmt.Sprintf("RemoteSigner returned error #%d: %s", e.Code, e.Description) +} + +func readMsg(r io.Reader) (msg RemoteSignerMsg, err error) { + const maxRemoteSignerMsgSize = 1024 * 10 + _, err = cdc.UnmarshalBinaryLengthPrefixedReader(r, &msg, maxRemoteSignerMsgSize) + if _, ok := err.(timeoutError); ok { + err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) + } + return +} + +func writeMsg(w io.Writer, msg interface{}) (err error) { + _, err = cdc.MarshalBinaryLengthPrefixedWriter(w, msg) + if _, ok := err.(timeoutError); ok { + err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) + } + return +} + +func handleRequest(req RemoteSignerMsg, chainID string, privVal types.PrivValidator) (RemoteSignerMsg, error) { + var res RemoteSignerMsg + var err error + + switch r := req.(type) { + case *PubKeyMsg: + var p crypto.PubKey + p = privVal.GetPubKey() + res = &PubKeyMsg{p} + case *SignVoteRequest: + err = privVal.SignVote(chainID, r.Vote) + if err != nil { + res = &SignedVoteResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedVoteResponse{r.Vote, nil} + } + case *SignProposalRequest: + err = privVal.SignProposal(chainID, r.Proposal) + if err != nil { + res = &SignedProposalResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedProposalResponse{r.Proposal, nil} + } + case *SignHeartbeatRequest: + err = privVal.SignHeartbeat(chainID, r.Heartbeat) + if err != nil { + res = &SignedHeartbeatResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedHeartbeatResponse{r.Heartbeat, nil} + } + case *PingRequest: + res = &PingResponse{} + default: + err = fmt.Errorf("unknown msg: %v", r) + } + + return res, err +} diff --git a/privval/socket.go b/privval/socket.go deleted file mode 100644 index d5ede471c2e..00000000000 --- a/privval/socket.go +++ /dev/null @@ -1,539 +0,0 @@ -package privval - -import ( - "errors" - "fmt" - "io" - "net" - "time" - - amino "github.com/tendermint/go-amino" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - p2pconn "github.com/tendermint/tendermint/p2p/conn" - "github.com/tendermint/tendermint/types" -) - -const ( - defaultAcceptDeadlineSeconds = 3 - defaultConnDeadlineSeconds = 3 - defaultConnHeartBeatSeconds = 30 - defaultConnWaitSeconds = 60 - defaultDialRetries = 10 -) - -// Socket errors. -var ( - ErrDialRetryMax = errors.New("dialed maximum retries") - ErrConnWaitTimeout = errors.New("waited for remote signer for too long") - ErrConnTimeout = errors.New("remote signer timed out") -) - -var ( - acceptDeadline = time.Second * defaultAcceptDeadlineSeconds - connDeadline = time.Second * defaultConnDeadlineSeconds - connHeartbeat = time.Second * defaultConnHeartBeatSeconds -) - -// SocketPVOption sets an optional parameter on the SocketPV. -type SocketPVOption func(*SocketPV) - -// SocketPVAcceptDeadline sets the deadline for the SocketPV listener. -// A zero time value disables the deadline. -func SocketPVAcceptDeadline(deadline time.Duration) SocketPVOption { - return func(sc *SocketPV) { sc.acceptDeadline = deadline } -} - -// SocketPVConnDeadline sets the read and write deadline for connections -// from external signing processes. -func SocketPVConnDeadline(deadline time.Duration) SocketPVOption { - return func(sc *SocketPV) { sc.connDeadline = deadline } -} - -// SocketPVHeartbeat sets the period on which to check the liveness of the -// connected Signer connections. -func SocketPVHeartbeat(period time.Duration) SocketPVOption { - return func(sc *SocketPV) { sc.connHeartbeat = period } -} - -// SocketPVConnWait sets the timeout duration before connection of external -// signing processes are considered to be unsuccessful. -func SocketPVConnWait(timeout time.Duration) SocketPVOption { - return func(sc *SocketPV) { sc.connWaitTimeout = timeout } -} - -// SocketPV implements PrivValidator, it uses a socket to request signatures -// from an external process. -type SocketPV struct { - cmn.BaseService - - addr string - acceptDeadline time.Duration - connDeadline time.Duration - connHeartbeat time.Duration - connWaitTimeout time.Duration - privKey ed25519.PrivKeyEd25519 - - conn net.Conn - listener net.Listener -} - -// Check that SocketPV implements PrivValidator. -var _ types.PrivValidator = (*SocketPV)(nil) - -// NewSocketPV returns an instance of SocketPV. -func NewSocketPV( - logger log.Logger, - socketAddr string, - privKey ed25519.PrivKeyEd25519, -) *SocketPV { - sc := &SocketPV{ - addr: socketAddr, - acceptDeadline: acceptDeadline, - connDeadline: connDeadline, - connHeartbeat: connHeartbeat, - connWaitTimeout: time.Second * defaultConnWaitSeconds, - privKey: privKey, - } - - sc.BaseService = *cmn.NewBaseService(logger, "SocketPV", sc) - - return sc -} - -// GetAddress implements PrivValidator. -func (sc *SocketPV) GetAddress() types.Address { - addr, err := sc.getAddress() - if err != nil { - panic(err) - } - - return addr -} - -// Address is an alias for PubKey().Address(). -func (sc *SocketPV) getAddress() (cmn.HexBytes, error) { - p, err := sc.getPubKey() - if err != nil { - return nil, err - } - - return p.Address(), nil -} - -// GetPubKey implements PrivValidator. -func (sc *SocketPV) GetPubKey() crypto.PubKey { - pubKey, err := sc.getPubKey() - if err != nil { - panic(err) - } - - return pubKey -} - -func (sc *SocketPV) getPubKey() (crypto.PubKey, error) { - err := writeMsg(sc.conn, &PubKeyMsg{}) - if err != nil { - return nil, err - } - - res, err := readMsg(sc.conn) - if err != nil { - return nil, err - } - - return res.(*PubKeyMsg).PubKey, nil -} - -// SignVote implements PrivValidator. -func (sc *SocketPV) SignVote(chainID string, vote *types.Vote) error { - err := writeMsg(sc.conn, &SignVoteMsg{Vote: vote}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - - *vote = *res.(*SignVoteMsg).Vote - - return nil -} - -// SignProposal implements PrivValidator. -func (sc *SocketPV) SignProposal( - chainID string, - proposal *types.Proposal, -) error { - err := writeMsg(sc.conn, &SignProposalMsg{Proposal: proposal}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - - *proposal = *res.(*SignProposalMsg).Proposal - - return nil -} - -// SignHeartbeat implements PrivValidator. -func (sc *SocketPV) SignHeartbeat( - chainID string, - heartbeat *types.Heartbeat, -) error { - err := writeMsg(sc.conn, &SignHeartbeatMsg{Heartbeat: heartbeat}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - - *heartbeat = *res.(*SignHeartbeatMsg).Heartbeat - - return nil -} - -// OnStart implements cmn.Service. -func (sc *SocketPV) OnStart() error { - if err := sc.listen(); err != nil { - err = cmn.ErrorWrap(err, "failed to listen") - sc.Logger.Error( - "OnStart", - "err", err, - ) - return err - } - - conn, err := sc.waitConnection() - if err != nil { - err = cmn.ErrorWrap(err, "failed to accept connection") - sc.Logger.Error( - "OnStart", - "err", err, - ) - - return err - } - - sc.conn = conn - - return nil -} - -// OnStop implements cmn.Service. -func (sc *SocketPV) OnStop() { - if sc.conn != nil { - if err := sc.conn.Close(); err != nil { - err = cmn.ErrorWrap(err, "failed to close connection") - sc.Logger.Error( - "OnStop", - "err", err, - ) - } - } - - if sc.listener != nil { - if err := sc.listener.Close(); err != nil { - err = cmn.ErrorWrap(err, "failed to close listener") - sc.Logger.Error( - "OnStop", - "err", err, - ) - } - } -} - -func (sc *SocketPV) acceptConnection() (net.Conn, error) { - conn, err := sc.listener.Accept() - if err != nil { - if !sc.IsRunning() { - return nil, nil // Ignore error from listener closing. - } - return nil, err - - } - - conn, err = p2pconn.MakeSecretConnection(conn, sc.privKey) - if err != nil { - return nil, err - } - - return conn, nil -} - -func (sc *SocketPV) listen() error { - ln, err := net.Listen(cmn.ProtocolAndAddress(sc.addr)) - if err != nil { - return err - } - - sc.listener = newTCPTimeoutListener( - ln, - sc.acceptDeadline, - sc.connDeadline, - sc.connHeartbeat, - ) - - return nil -} - -// waitConnection uses the configured wait timeout to error if no external -// process connects in the time period. -func (sc *SocketPV) waitConnection() (net.Conn, error) { - var ( - connc = make(chan net.Conn, 1) - errc = make(chan error, 1) - ) - - go func(connc chan<- net.Conn, errc chan<- error) { - conn, err := sc.acceptConnection() - if err != nil { - errc <- err - return - } - - connc <- conn - }(connc, errc) - - select { - case conn := <-connc: - return conn, nil - case err := <-errc: - if _, ok := err.(timeoutError); ok { - return nil, cmn.ErrorWrap(ErrConnWaitTimeout, err.Error()) - } - return nil, err - case <-time.After(sc.connWaitTimeout): - return nil, ErrConnWaitTimeout - } -} - -//--------------------------------------------------------- - -// RemoteSignerOption sets an optional parameter on the RemoteSigner. -type RemoteSignerOption func(*RemoteSigner) - -// RemoteSignerConnDeadline sets the read and write deadline for connections -// from external signing processes. -func RemoteSignerConnDeadline(deadline time.Duration) RemoteSignerOption { - return func(ss *RemoteSigner) { ss.connDeadline = deadline } -} - -// RemoteSignerConnRetries sets the amount of attempted retries to connect. -func RemoteSignerConnRetries(retries int) RemoteSignerOption { - return func(ss *RemoteSigner) { ss.connRetries = retries } -} - -// RemoteSigner implements PrivValidator by dialing to a socket. -type RemoteSigner struct { - cmn.BaseService - - addr string - chainID string - connDeadline time.Duration - connRetries int - privKey ed25519.PrivKeyEd25519 - privVal types.PrivValidator - - conn net.Conn -} - -// NewRemoteSigner returns an instance of RemoteSigner. -func NewRemoteSigner( - logger log.Logger, - chainID, socketAddr string, - privVal types.PrivValidator, - privKey ed25519.PrivKeyEd25519, -) *RemoteSigner { - rs := &RemoteSigner{ - addr: socketAddr, - chainID: chainID, - connDeadline: time.Second * defaultConnDeadlineSeconds, - connRetries: defaultDialRetries, - privKey: privKey, - privVal: privVal, - } - - rs.BaseService = *cmn.NewBaseService(logger, "RemoteSigner", rs) - - return rs -} - -// OnStart implements cmn.Service. -func (rs *RemoteSigner) OnStart() error { - conn, err := rs.connect() - if err != nil { - err = cmn.ErrorWrap(err, "connect") - rs.Logger.Error("OnStart", "err", err) - return err - } - - go rs.handleConnection(conn) - - return nil -} - -// OnStop implements cmn.Service. -func (rs *RemoteSigner) OnStop() { - if rs.conn == nil { - return - } - - if err := rs.conn.Close(); err != nil { - rs.Logger.Error("OnStop", "err", cmn.ErrorWrap(err, "closing listener failed")) - } -} - -func (rs *RemoteSigner) connect() (net.Conn, error) { - for retries := rs.connRetries; retries > 0; retries-- { - // Don't sleep if it is the first retry. - if retries != rs.connRetries { - time.Sleep(rs.connDeadline) - } - - conn, err := cmn.Connect(rs.addr) - if err != nil { - err = cmn.ErrorWrap(err, "connection failed") - rs.Logger.Error( - "connect", - "addr", rs.addr, - "err", err, - ) - - continue - } - - if err := conn.SetDeadline(time.Now().Add(connDeadline)); err != nil { - err = cmn.ErrorWrap(err, "setting connection timeout failed") - rs.Logger.Error( - "connect", - "err", err, - ) - continue - } - - conn, err = p2pconn.MakeSecretConnection(conn, rs.privKey) - if err != nil { - err = cmn.ErrorWrap(err, "encrypting connection failed") - rs.Logger.Error( - "connect", - "err", err, - ) - - continue - } - - return conn, nil - } - - return nil, ErrDialRetryMax -} - -func (rs *RemoteSigner) handleConnection(conn net.Conn) { - for { - if !rs.IsRunning() { - return // Ignore error from listener closing. - } - - req, err := readMsg(conn) - if err != nil { - if err != io.EOF { - rs.Logger.Error("handleConnection", "err", err) - } - return - } - - var res SocketPVMsg - - switch r := req.(type) { - case *PubKeyMsg: - var p crypto.PubKey - p = rs.privVal.GetPubKey() - res = &PubKeyMsg{p} - case *SignVoteMsg: - err = rs.privVal.SignVote(rs.chainID, r.Vote) - res = &SignVoteMsg{r.Vote} - case *SignProposalMsg: - err = rs.privVal.SignProposal(rs.chainID, r.Proposal) - res = &SignProposalMsg{r.Proposal} - case *SignHeartbeatMsg: - err = rs.privVal.SignHeartbeat(rs.chainID, r.Heartbeat) - res = &SignHeartbeatMsg{r.Heartbeat} - default: - err = fmt.Errorf("unknown msg: %v", r) - } - - if err != nil { - rs.Logger.Error("handleConnection", "err", err) - return - } - - err = writeMsg(conn, res) - if err != nil { - rs.Logger.Error("handleConnection", "err", err) - return - } - } -} - -//--------------------------------------------------------- - -// SocketPVMsg is sent between RemoteSigner and SocketPV. -type SocketPVMsg interface{} - -func RegisterSocketPVMsg(cdc *amino.Codec) { - cdc.RegisterInterface((*SocketPVMsg)(nil), nil) - cdc.RegisterConcrete(&PubKeyMsg{}, "tendermint/socketpv/PubKeyMsg", nil) - cdc.RegisterConcrete(&SignVoteMsg{}, "tendermint/socketpv/SignVoteMsg", nil) - cdc.RegisterConcrete(&SignProposalMsg{}, "tendermint/socketpv/SignProposalMsg", nil) - cdc.RegisterConcrete(&SignHeartbeatMsg{}, "tendermint/socketpv/SignHeartbeatMsg", nil) -} - -// PubKeyMsg is a PrivValidatorSocket message containing the public key. -type PubKeyMsg struct { - PubKey crypto.PubKey -} - -// SignVoteMsg is a PrivValidatorSocket message containing a vote. -type SignVoteMsg struct { - Vote *types.Vote -} - -// SignProposalMsg is a PrivValidatorSocket message containing a Proposal. -type SignProposalMsg struct { - Proposal *types.Proposal -} - -// SignHeartbeatMsg is a PrivValidatorSocket message containing a Heartbeat. -type SignHeartbeatMsg struct { - Heartbeat *types.Heartbeat -} - -func readMsg(r io.Reader) (msg SocketPVMsg, err error) { - const maxSocketPVMsgSize = 1024 * 10 - _, err = cdc.UnmarshalBinaryReader(r, &msg, maxSocketPVMsgSize) - if _, ok := err.(timeoutError); ok { - err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) - } - return -} - -func writeMsg(w io.Writer, msg interface{}) (err error) { - _, err = cdc.MarshalBinaryWriter(w, msg) - if _, ok := err.(timeoutError); ok { - err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) - } - return -} diff --git a/privval/socket_test.go b/privval/socket_test.go deleted file mode 100644 index 461ce3f856a..00000000000 --- a/privval/socket_test.go +++ /dev/null @@ -1,282 +0,0 @@ -package privval - -import ( - "fmt" - "net" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto/ed25519" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" - - p2pconn "github.com/tendermint/tendermint/p2p/conn" - "github.com/tendermint/tendermint/types" -) - -func TestSocketPVAddress(t *testing.T) { - var ( - chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) - ) - defer sc.Stop() - defer rs.Stop() - - serverAddr := rs.privVal.GetAddress() - - clientAddr, err := sc.getAddress() - require.NoError(t, err) - - assert.Equal(t, serverAddr, clientAddr) - - // TODO(xla): Remove when PrivValidator2 replaced PrivValidator. - assert.Equal(t, serverAddr, sc.GetAddress()) - -} - -func TestSocketPVPubKey(t *testing.T) { - var ( - chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) - ) - defer sc.Stop() - defer rs.Stop() - - clientKey, err := sc.getPubKey() - require.NoError(t, err) - - privKey := rs.privVal.GetPubKey() - - assert.Equal(t, privKey, clientKey) - - // TODO(xla): Remove when PrivValidator2 replaced PrivValidator. - assert.Equal(t, privKey, sc.GetPubKey()) -} - -func TestSocketPVProposal(t *testing.T) { - var ( - chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) - - ts = time.Now() - privProposal = &types.Proposal{Timestamp: ts} - clientProposal = &types.Proposal{Timestamp: ts} - ) - defer sc.Stop() - defer rs.Stop() - - require.NoError(t, rs.privVal.SignProposal(chainID, privProposal)) - require.NoError(t, sc.SignProposal(chainID, clientProposal)) - assert.Equal(t, privProposal.Signature, clientProposal.Signature) -} - -func TestSocketPVVote(t *testing.T) { - var ( - chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) - - ts = time.Now() - vType = types.VoteTypePrecommit - want = &types.Vote{Timestamp: ts, Type: vType} - have = &types.Vote{Timestamp: ts, Type: vType} - ) - defer sc.Stop() - defer rs.Stop() - - require.NoError(t, rs.privVal.SignVote(chainID, want)) - require.NoError(t, sc.SignVote(chainID, have)) - assert.Equal(t, want.Signature, have.Signature) -} - -func TestSocketPVHeartbeat(t *testing.T) { - var ( - chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) - - want = &types.Heartbeat{} - have = &types.Heartbeat{} - ) - defer sc.Stop() - defer rs.Stop() - - require.NoError(t, rs.privVal.SignHeartbeat(chainID, want)) - require.NoError(t, sc.SignHeartbeat(chainID, have)) - assert.Equal(t, want.Signature, have.Signature) -} - -func TestSocketPVAcceptDeadline(t *testing.T) { - var ( - sc = NewSocketPV( - log.TestingLogger(), - "127.0.0.1:0", - ed25519.GenPrivKey(), - ) - ) - defer sc.Stop() - - SocketPVAcceptDeadline(time.Millisecond)(sc) - - assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnWaitTimeout) -} - -func TestSocketPVDeadline(t *testing.T) { - var ( - addr = testFreeAddr(t) - listenc = make(chan struct{}) - sc = NewSocketPV( - log.TestingLogger(), - addr, - ed25519.GenPrivKey(), - ) - ) - - SocketPVConnDeadline(100 * time.Millisecond)(sc) - SocketPVConnWait(500 * time.Millisecond)(sc) - - go func(sc *SocketPV) { - defer close(listenc) - - require.NoError(t, sc.Start()) - - assert.True(t, sc.IsRunning()) - }(sc) - - for { - conn, err := cmn.Connect(addr) - if err != nil { - continue - } - - _, err = p2pconn.MakeSecretConnection( - conn, - ed25519.GenPrivKey(), - ) - if err == nil { - break - } - } - - <-listenc - - // Sleep to guarantee deadline has been hit. - time.Sleep(20 * time.Microsecond) - - _, err := sc.getPubKey() - assert.Equal(t, err.(cmn.Error).Data(), ErrConnTimeout) -} - -func TestSocketPVWait(t *testing.T) { - sc := NewSocketPV( - log.TestingLogger(), - "127.0.0.1:0", - ed25519.GenPrivKey(), - ) - defer sc.Stop() - - SocketPVConnWait(time.Millisecond)(sc) - - assert.Equal(t, sc.Start().(cmn.Error).Data(), ErrConnWaitTimeout) -} - -func TestRemoteSignerRetry(t *testing.T) { - var ( - attemptc = make(chan int) - retries = 2 - ) - - ln, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - - go func(ln net.Listener, attemptc chan<- int) { - attempts := 0 - - for { - conn, err := ln.Accept() - require.NoError(t, err) - - err = conn.Close() - require.NoError(t, err) - - attempts++ - - if attempts == retries { - attemptc <- attempts - break - } - } - }(ln, attemptc) - - rs := NewRemoteSigner( - log.TestingLogger(), - cmn.RandStr(12), - ln.Addr().String(), - types.NewMockPV(), - ed25519.GenPrivKey(), - ) - defer rs.Stop() - - RemoteSignerConnDeadline(time.Millisecond)(rs) - RemoteSignerConnRetries(retries)(rs) - - assert.Equal(t, rs.Start().(cmn.Error).Data(), ErrDialRetryMax) - - select { - case attempts := <-attemptc: - assert.Equal(t, retries, attempts) - case <-time.After(100 * time.Millisecond): - t.Error("expected remote to observe connection attempts") - } -} - -func testSetupSocketPair( - t *testing.T, - chainID string, -) (*SocketPV, *RemoteSigner) { - var ( - addr = testFreeAddr(t) - logger = log.TestingLogger() - privVal = types.NewMockPV() - readyc = make(chan struct{}) - rs = NewRemoteSigner( - logger, - chainID, - addr, - privVal, - ed25519.GenPrivKey(), - ) - sc = NewSocketPV( - logger, - addr, - ed25519.GenPrivKey(), - ) - ) - - go func(sc *SocketPV) { - require.NoError(t, sc.Start()) - assert.True(t, sc.IsRunning()) - - readyc <- struct{}{} - }(sc) - - RemoteSignerConnDeadline(time.Millisecond)(rs) - RemoteSignerConnRetries(1e6)(rs) - - require.NoError(t, rs.Start()) - assert.True(t, rs.IsRunning()) - - <-readyc - - return sc, rs -} - -// testFreeAddr claims a free port so we don't block on listener being ready. -func testFreeAddr(t *testing.T) string { - ln, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - defer ln.Close() - - return fmt.Sprintf("127.0.0.1:%d", ln.Addr().(*net.TCPAddr).Port) -} diff --git a/privval/tcp.go b/privval/tcp.go new file mode 100644 index 00000000000..11bd833c009 --- /dev/null +++ b/privval/tcp.go @@ -0,0 +1,214 @@ +package privval + +import ( + "errors" + "net" + "time" + + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + p2pconn "github.com/tendermint/tendermint/p2p/conn" + "github.com/tendermint/tendermint/types" +) + +const ( + defaultAcceptDeadlineSeconds = 3 + defaultConnDeadlineSeconds = 3 + defaultConnHeartBeatSeconds = 2 + defaultDialRetries = 10 +) + +// Socket errors. +var ( + ErrDialRetryMax = errors.New("dialed maximum retries") + ErrConnTimeout = errors.New("remote signer timed out") + ErrUnexpectedResponse = errors.New("received unexpected response") +) + +var ( + acceptDeadline = time.Second * defaultAcceptDeadlineSeconds + connTimeout = time.Second * defaultConnDeadlineSeconds + connHeartbeat = time.Second * defaultConnHeartBeatSeconds +) + +// TCPValOption sets an optional parameter on the SocketPV. +type TCPValOption func(*TCPVal) + +// TCPValAcceptDeadline sets the deadline for the TCPVal listener. +// A zero time value disables the deadline. +func TCPValAcceptDeadline(deadline time.Duration) TCPValOption { + return func(sc *TCPVal) { sc.acceptDeadline = deadline } +} + +// TCPValConnTimeout sets the read and write timeout for connections +// from external signing processes. +func TCPValConnTimeout(timeout time.Duration) TCPValOption { + return func(sc *TCPVal) { sc.connTimeout = timeout } +} + +// TCPValHeartbeat sets the period on which to check the liveness of the +// connected Signer connections. +func TCPValHeartbeat(period time.Duration) TCPValOption { + return func(sc *TCPVal) { sc.connHeartbeat = period } +} + +// TCPVal implements PrivValidator, it uses a socket to request signatures +// from an external process. +type TCPVal struct { + cmn.BaseService + *RemoteSignerClient + + addr string + acceptDeadline time.Duration + connTimeout time.Duration + connHeartbeat time.Duration + privKey ed25519.PrivKeyEd25519 + + conn net.Conn + listener net.Listener + cancelPing chan struct{} + pingTicker *time.Ticker +} + +// Check that TCPVal implements PrivValidator. +var _ types.PrivValidator = (*TCPVal)(nil) + +// NewTCPVal returns an instance of TCPVal. +func NewTCPVal( + logger log.Logger, + socketAddr string, + privKey ed25519.PrivKeyEd25519, +) *TCPVal { + sc := &TCPVal{ + addr: socketAddr, + acceptDeadline: acceptDeadline, + connTimeout: connTimeout, + connHeartbeat: connHeartbeat, + privKey: privKey, + } + + sc.BaseService = *cmn.NewBaseService(logger, "TCPVal", sc) + + return sc +} + +// OnStart implements cmn.Service. +func (sc *TCPVal) OnStart() error { + if err := sc.listen(); err != nil { + sc.Logger.Error("OnStart", "err", err) + return err + } + + conn, err := sc.waitConnection() + if err != nil { + sc.Logger.Error("OnStart", "err", err) + return err + } + + sc.conn = conn + + sc.RemoteSignerClient = NewRemoteSignerClient(sc.conn) + + // Start a routine to keep the connection alive + sc.cancelPing = make(chan struct{}, 1) + sc.pingTicker = time.NewTicker(sc.connHeartbeat) + go func() { + for { + select { + case <-sc.pingTicker.C: + err := sc.Ping() + if err != nil { + sc.Logger.Error( + "Ping", + "err", err, + ) + } + case <-sc.cancelPing: + sc.pingTicker.Stop() + return + } + } + }() + + return nil +} + +// OnStop implements cmn.Service. +func (sc *TCPVal) OnStop() { + if sc.cancelPing != nil { + close(sc.cancelPing) + } + + if sc.conn != nil { + if err := sc.conn.Close(); err != nil { + sc.Logger.Error("OnStop", "err", err) + } + } + + if sc.listener != nil { + if err := sc.listener.Close(); err != nil { + sc.Logger.Error("OnStop", "err", err) + } + } +} + +func (sc *TCPVal) acceptConnection() (net.Conn, error) { + conn, err := sc.listener.Accept() + if err != nil { + if !sc.IsRunning() { + return nil, nil // Ignore error from listener closing. + } + return nil, err + + } + + conn, err = p2pconn.MakeSecretConnection(conn, sc.privKey) + if err != nil { + return nil, err + } + + return conn, nil +} + +func (sc *TCPVal) listen() error { + ln, err := net.Listen(cmn.ProtocolAndAddress(sc.addr)) + if err != nil { + return err + } + + sc.listener = newTCPTimeoutListener( + ln, + sc.acceptDeadline, + sc.connTimeout, + sc.connHeartbeat, + ) + + return nil +} + +// waitConnection uses the configured wait timeout to error if no external +// process connects in the time period. +func (sc *TCPVal) waitConnection() (net.Conn, error) { + var ( + connc = make(chan net.Conn, 1) + errc = make(chan error, 1) + ) + + go func(connc chan<- net.Conn, errc chan<- error) { + conn, err := sc.acceptConnection() + if err != nil { + errc <- err + return + } + + connc <- conn + }(connc, errc) + + select { + case conn := <-connc: + return conn, nil + case err := <-errc: + return nil, err + } +} diff --git a/privval/tcp_server.go b/privval/tcp_server.go new file mode 100644 index 00000000000..694023d76e2 --- /dev/null +++ b/privval/tcp_server.go @@ -0,0 +1,160 @@ +package privval + +import ( + "io" + "net" + "time" + + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + p2pconn "github.com/tendermint/tendermint/p2p/conn" + "github.com/tendermint/tendermint/types" +) + +// RemoteSignerOption sets an optional parameter on the RemoteSigner. +type RemoteSignerOption func(*RemoteSigner) + +// RemoteSignerConnDeadline sets the read and write deadline for connections +// from external signing processes. +func RemoteSignerConnDeadline(deadline time.Duration) RemoteSignerOption { + return func(ss *RemoteSigner) { ss.connDeadline = deadline } +} + +// RemoteSignerConnRetries sets the amount of attempted retries to connect. +func RemoteSignerConnRetries(retries int) RemoteSignerOption { + return func(ss *RemoteSigner) { ss.connRetries = retries } +} + +// RemoteSigner implements PrivValidator by dialing to a socket. +type RemoteSigner struct { + cmn.BaseService + + addr string + chainID string + connDeadline time.Duration + connRetries int + privKey ed25519.PrivKeyEd25519 + privVal types.PrivValidator + + conn net.Conn +} + +// NewRemoteSigner returns an instance of RemoteSigner. +func NewRemoteSigner( + logger log.Logger, + chainID, socketAddr string, + privVal types.PrivValidator, + privKey ed25519.PrivKeyEd25519, +) *RemoteSigner { + rs := &RemoteSigner{ + addr: socketAddr, + chainID: chainID, + connDeadline: time.Second * defaultConnDeadlineSeconds, + connRetries: defaultDialRetries, + privKey: privKey, + privVal: privVal, + } + + rs.BaseService = *cmn.NewBaseService(logger, "RemoteSigner", rs) + + return rs +} + +// OnStart implements cmn.Service. +func (rs *RemoteSigner) OnStart() error { + conn, err := rs.connect() + if err != nil { + rs.Logger.Error("OnStart", "err", err) + return err + } + + go rs.handleConnection(conn) + + return nil +} + +// OnStop implements cmn.Service. +func (rs *RemoteSigner) OnStop() { + if rs.conn == nil { + return + } + + if err := rs.conn.Close(); err != nil { + rs.Logger.Error("OnStop", "err", cmn.ErrorWrap(err, "closing listener failed")) + } +} + +func (rs *RemoteSigner) connect() (net.Conn, error) { + for retries := rs.connRetries; retries > 0; retries-- { + // Don't sleep if it is the first retry. + if retries != rs.connRetries { + time.Sleep(rs.connDeadline) + } + + conn, err := cmn.Connect(rs.addr) + if err != nil { + rs.Logger.Error( + "connect", + "addr", rs.addr, + "err", err, + ) + + continue + } + + if err := conn.SetDeadline(time.Now().Add(connTimeout)); err != nil { + rs.Logger.Error( + "connect", + "err", err, + ) + continue + } + + conn, err = p2pconn.MakeSecretConnection(conn, rs.privKey) + if err != nil { + rs.Logger.Error( + "connect", + "err", err, + ) + + continue + } + + return conn, nil + } + + return nil, ErrDialRetryMax +} + +func (rs *RemoteSigner) handleConnection(conn net.Conn) { + for { + if !rs.IsRunning() { + return // Ignore error from listener closing. + } + + // Reset the connection deadline + conn.SetDeadline(time.Now().Add(rs.connDeadline)) + + req, err := readMsg(conn) + if err != nil { + if err != io.EOF { + rs.Logger.Error("handleConnection", "err", err) + } + return + } + + res, err := handleRequest(req, rs.chainID, rs.privVal) + + if err != nil { + // only log the error; we'll reply with an error in res + rs.Logger.Error("handleConnection", "err", err) + } + + err = writeMsg(conn, res) + if err != nil { + rs.Logger.Error("handleConnection", "err", err) + return + } + } +} diff --git a/privval/socket_tcp.go b/privval/tcp_socket.go similarity index 59% rename from privval/socket_tcp.go rename to privval/tcp_socket.go index b26db00c27d..2b17bf26eaf 100644 --- a/privval/socket_tcp.go +++ b/privval/tcp_socket.go @@ -24,6 +24,13 @@ type tcpTimeoutListener struct { period time.Duration } +// timeoutConn wraps a net.Conn to standardise protocol timeouts / deadline resets. +type timeoutConn struct { + net.Conn + + connDeadline time.Duration +} + // newTCPTimeoutListener returns an instance of tcpTimeoutListener. func newTCPTimeoutListener( ln net.Listener, @@ -38,6 +45,16 @@ func newTCPTimeoutListener( } } +// newTimeoutConn returns an instance of newTCPTimeoutConn. +func newTimeoutConn( + conn net.Conn, + connDeadline time.Duration) *timeoutConn { + return &timeoutConn{ + conn, + connDeadline, + } +} + // Accept implements net.Listener. func (ln tcpTimeoutListener) Accept() (net.Conn, error) { err := ln.SetDeadline(time.Now().Add(ln.acceptDeadline)) @@ -50,17 +67,24 @@ func (ln tcpTimeoutListener) Accept() (net.Conn, error) { return nil, err } - if err := tc.SetDeadline(time.Now().Add(ln.connDeadline)); err != nil { - return nil, err - } + // Wrap the conn in our timeout wrapper + conn := newTimeoutConn(tc, ln.connDeadline) - if err := tc.SetKeepAlive(true); err != nil { - return nil, err - } + return conn, nil +} - if err := tc.SetKeepAlivePeriod(ln.period); err != nil { - return nil, err - } +// Read implements net.Listener. +func (c timeoutConn) Read(b []byte) (n int, err error) { + // Reset deadline + c.Conn.SetReadDeadline(time.Now().Add(c.connDeadline)) + + return c.Conn.Read(b) +} + +// Write implements net.Listener. +func (c timeoutConn) Write(b []byte) (n int, err error) { + // Reset deadline + c.Conn.SetWriteDeadline(time.Now().Add(c.connDeadline)) - return tc, nil + return c.Conn.Write(b) } diff --git a/privval/socket_tcp_test.go b/privval/tcp_socket_test.go similarity index 91% rename from privval/socket_tcp_test.go rename to privval/tcp_socket_test.go index 44a673c0ca5..285e73ed5e8 100644 --- a/privval/socket_tcp_test.go +++ b/privval/tcp_socket_test.go @@ -44,13 +44,14 @@ func TestTCPTimeoutListenerConnDeadline(t *testing.T) { time.Sleep(2 * time.Millisecond) - _, err = c.Write([]byte("foo")) + msg := make([]byte, 200) + _, err = c.Read(msg) opErr, ok := err.(*net.OpError) if !ok { t.Fatalf("have %v, want *net.OpError", err) } - if have, want := opErr.Op, "write"; have != want { + if have, want := opErr.Op, "read"; have != want { t.Errorf("have %v, want %v", have, want) } }(ln) diff --git a/privval/tcp_test.go b/privval/tcp_test.go new file mode 100644 index 00000000000..6549759d07b --- /dev/null +++ b/privval/tcp_test.go @@ -0,0 +1,459 @@ +package privval + +import ( + "fmt" + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/libs/log" + + p2pconn "github.com/tendermint/tendermint/p2p/conn" + "github.com/tendermint/tendermint/types" +) + +func TestSocketPVAddress(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) + ) + defer sc.Stop() + defer rs.Stop() + + serverAddr := rs.privVal.GetAddress() + + clientAddr := sc.GetAddress() + + assert.Equal(t, serverAddr, clientAddr) + + // TODO(xla): Remove when PrivValidator2 replaced PrivValidator. + assert.Equal(t, serverAddr, sc.GetAddress()) + +} + +func TestSocketPVPubKey(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) + ) + defer sc.Stop() + defer rs.Stop() + + clientKey, err := sc.getPubKey() + require.NoError(t, err) + + privKey := rs.privVal.GetPubKey() + + assert.Equal(t, privKey, clientKey) + + // TODO(xla): Remove when PrivValidator2 replaced PrivValidator. + assert.Equal(t, privKey, sc.GetPubKey()) +} + +func TestSocketPVProposal(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) + + ts = time.Now() + privProposal = &types.Proposal{Timestamp: ts} + clientProposal = &types.Proposal{Timestamp: ts} + ) + defer sc.Stop() + defer rs.Stop() + + require.NoError(t, rs.privVal.SignProposal(chainID, privProposal)) + require.NoError(t, sc.SignProposal(chainID, clientProposal)) + assert.Equal(t, privProposal.Signature, clientProposal.Signature) +} + +func TestSocketPVVote(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) + + ts = time.Now() + vType = types.PrecommitType + want = &types.Vote{Timestamp: ts, Type: vType} + have = &types.Vote{Timestamp: ts, Type: vType} + ) + defer sc.Stop() + defer rs.Stop() + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) +} + +func TestSocketPVVoteResetDeadline(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) + + ts = time.Now() + vType = types.PrecommitType + want = &types.Vote{Timestamp: ts, Type: vType} + have = &types.Vote{Timestamp: ts, Type: vType} + ) + defer sc.Stop() + defer rs.Stop() + + time.Sleep(3 * time.Millisecond) + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) + + // This would exceed the deadline if it was not extended by the previous message + time.Sleep(3 * time.Millisecond) + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) +} + +func TestSocketPVVoteKeepalive(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) + + ts = time.Now() + vType = types.PrecommitType + want = &types.Vote{Timestamp: ts, Type: vType} + have = &types.Vote{Timestamp: ts, Type: vType} + ) + defer sc.Stop() + defer rs.Stop() + + time.Sleep(10 * time.Millisecond) + + require.NoError(t, rs.privVal.SignVote(chainID, want)) + require.NoError(t, sc.SignVote(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) +} + +func TestSocketPVHeartbeat(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) + + want = &types.Heartbeat{} + have = &types.Heartbeat{} + ) + defer sc.Stop() + defer rs.Stop() + + require.NoError(t, rs.privVal.SignHeartbeat(chainID, want)) + require.NoError(t, sc.SignHeartbeat(chainID, have)) + assert.Equal(t, want.Signature, have.Signature) +} + +func TestSocketPVDeadline(t *testing.T) { + var ( + addr = testFreeAddr(t) + listenc = make(chan struct{}) + sc = NewTCPVal( + log.TestingLogger(), + addr, + ed25519.GenPrivKey(), + ) + ) + + TCPValConnTimeout(100 * time.Millisecond)(sc) + + go func(sc *TCPVal) { + defer close(listenc) + + require.NoError(t, sc.Start()) + + assert.True(t, sc.IsRunning()) + }(sc) + + for { + conn, err := cmn.Connect(addr) + if err != nil { + continue + } + + _, err = p2pconn.MakeSecretConnection( + conn, + ed25519.GenPrivKey(), + ) + if err == nil { + break + } + } + + <-listenc + + _, err := sc.getPubKey() + assert.Equal(t, err.(cmn.Error).Data(), ErrConnTimeout) +} + +func TestRemoteSignerRetry(t *testing.T) { + var ( + attemptc = make(chan int) + retries = 2 + ) + + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + + go func(ln net.Listener, attemptc chan<- int) { + attempts := 0 + + for { + conn, err := ln.Accept() + require.NoError(t, err) + + err = conn.Close() + require.NoError(t, err) + + attempts++ + + if attempts == retries { + attemptc <- attempts + break + } + } + }(ln, attemptc) + + rs := NewRemoteSigner( + log.TestingLogger(), + cmn.RandStr(12), + ln.Addr().String(), + types.NewMockPV(), + ed25519.GenPrivKey(), + ) + defer rs.Stop() + + RemoteSignerConnDeadline(time.Millisecond)(rs) + RemoteSignerConnRetries(retries)(rs) + + assert.Equal(t, rs.Start(), ErrDialRetryMax) + + select { + case attempts := <-attemptc: + assert.Equal(t, retries, attempts) + case <-time.After(100 * time.Millisecond): + t.Error("expected remote to observe connection attempts") + } +} + +func TestRemoteSignVoteErrors(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV()) + + ts = time.Now() + vType = types.PrecommitType + vote = &types.Vote{Timestamp: ts, Type: vType} + ) + defer sc.Stop() + defer rs.Stop() + + err := writeMsg(sc.conn, &SignVoteRequest{Vote: vote}) + require.NoError(t, err) + + res, err := readMsg(sc.conn) + require.NoError(t, err) + + resp := *res.(*SignedVoteResponse) + require.NotNil(t, resp.Error) + require.Equal(t, resp.Error.Description, types.ErroringMockPVErr.Error()) + + err = rs.privVal.SignVote(chainID, vote) + require.Error(t, err) + err = sc.SignVote(chainID, vote) + require.Error(t, err) +} + +func TestRemoteSignProposalErrors(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV()) + + ts = time.Now() + proposal = &types.Proposal{Timestamp: ts} + ) + defer sc.Stop() + defer rs.Stop() + + err := writeMsg(sc.conn, &SignProposalRequest{Proposal: proposal}) + require.NoError(t, err) + + res, err := readMsg(sc.conn) + require.NoError(t, err) + + resp := *res.(*SignedProposalResponse) + require.NotNil(t, resp.Error) + require.Equal(t, resp.Error.Description, types.ErroringMockPVErr.Error()) + + err = rs.privVal.SignProposal(chainID, proposal) + require.Error(t, err) + + err = sc.SignProposal(chainID, proposal) + require.Error(t, err) +} + +func TestRemoteSignHeartbeatErrors(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV()) + hb = &types.Heartbeat{} + ) + defer sc.Stop() + defer rs.Stop() + + err := writeMsg(sc.conn, &SignHeartbeatRequest{Heartbeat: hb}) + require.NoError(t, err) + + res, err := readMsg(sc.conn) + require.NoError(t, err) + + resp := *res.(*SignedHeartbeatResponse) + require.NotNil(t, resp.Error) + require.Equal(t, resp.Error.Description, types.ErroringMockPVErr.Error()) + + err = rs.privVal.SignHeartbeat(chainID, hb) + require.Error(t, err) + + err = sc.SignHeartbeat(chainID, hb) + require.Error(t, err) +} + +func TestErrUnexpectedResponse(t *testing.T) { + var ( + addr = testFreeAddr(t) + logger = log.TestingLogger() + chainID = cmn.RandStr(12) + readyc = make(chan struct{}) + errc = make(chan error, 1) + + rs = NewRemoteSigner( + logger, + chainID, + addr, + types.NewMockPV(), + ed25519.GenPrivKey(), + ) + sc = NewTCPVal( + logger, + addr, + ed25519.GenPrivKey(), + ) + ) + + testStartSocketPV(t, readyc, sc) + defer sc.Stop() + RemoteSignerConnDeadline(time.Millisecond)(rs) + RemoteSignerConnRetries(1e6)(rs) + + // we do not want to Start() the remote signer here and instead use the connection to + // reply with intentionally wrong replies below: + rsConn, err := rs.connect() + defer rsConn.Close() + require.NoError(t, err) + require.NotNil(t, rsConn) + <-readyc + + // Heartbeat: + go func(errc chan error) { + errc <- sc.SignHeartbeat(chainID, &types.Heartbeat{}) + }(errc) + // read request and write wrong response: + go testReadWriteResponse(t, &SignedVoteResponse{}, rsConn) + err = <-errc + require.Error(t, err) + require.Equal(t, err, ErrUnexpectedResponse) + + // Proposal: + go func(errc chan error) { + errc <- sc.SignProposal(chainID, &types.Proposal{}) + }(errc) + // read request and write wrong response: + go testReadWriteResponse(t, &SignedHeartbeatResponse{}, rsConn) + err = <-errc + require.Error(t, err) + require.Equal(t, err, ErrUnexpectedResponse) + + // Vote: + go func(errc chan error) { + errc <- sc.SignVote(chainID, &types.Vote{}) + }(errc) + // read request and write wrong response: + go testReadWriteResponse(t, &SignedHeartbeatResponse{}, rsConn) + err = <-errc + require.Error(t, err) + require.Equal(t, err, ErrUnexpectedResponse) +} + +func testSetupSocketPair( + t *testing.T, + chainID string, + privValidator types.PrivValidator, +) (*TCPVal, *RemoteSigner) { + var ( + addr = testFreeAddr(t) + logger = log.TestingLogger() + privVal = privValidator + readyc = make(chan struct{}) + rs = NewRemoteSigner( + logger, + chainID, + addr, + privVal, + ed25519.GenPrivKey(), + ) + sc = NewTCPVal( + logger, + addr, + ed25519.GenPrivKey(), + ) + ) + + TCPValConnTimeout(5 * time.Millisecond)(sc) + TCPValHeartbeat(2 * time.Millisecond)(sc) + RemoteSignerConnDeadline(5 * time.Millisecond)(rs) + RemoteSignerConnRetries(1e6)(rs) + + testStartSocketPV(t, readyc, sc) + + require.NoError(t, rs.Start()) + assert.True(t, rs.IsRunning()) + + <-readyc + + return sc, rs +} + +func testReadWriteResponse(t *testing.T, resp RemoteSignerMsg, rsConn net.Conn) { + _, err := readMsg(rsConn) + require.NoError(t, err) + + err = writeMsg(rsConn, resp) + require.NoError(t, err) +} + +func testStartSocketPV(t *testing.T, readyc chan struct{}, sc *TCPVal) { + go func(sc *TCPVal) { + require.NoError(t, sc.Start()) + assert.True(t, sc.IsRunning()) + + readyc <- struct{}{} + }(sc) +} + +// testFreeAddr claims a free port so we don't block on listener being ready. +func testFreeAddr(t *testing.T) string { + ln, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer ln.Close() + + return fmt.Sprintf("127.0.0.1:%d", ln.Addr().(*net.TCPAddr).Port) +} diff --git a/privval/wire.go b/privval/wire.go index 50660ff34f3..2e11677e417 100644 --- a/privval/wire.go +++ b/privval/wire.go @@ -9,5 +9,5 @@ var cdc = amino.NewCodec() func init() { cryptoAmino.RegisterAmino(cdc) - RegisterSocketPVMsg(cdc) + RegisterRemoteSignerMsg(cdc) } diff --git a/proxy/app_conn_test.go b/proxy/app_conn_test.go index 44936056aea..ca98f1be43b 100644 --- a/proxy/app_conn_test.go +++ b/proxy/app_conn_test.go @@ -1,6 +1,7 @@ package proxy import ( + "fmt" "strings" "testing" @@ -45,7 +46,7 @@ func (app *appConnTest) InfoSync(req types.RequestInfo) (*types.ResponseInfo, er var SOCKET = "socket" func TestEcho(t *testing.T) { - sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) // Start server @@ -70,7 +71,7 @@ func TestEcho(t *testing.T) { t.Log("Connected") for i := 0; i < 1000; i++ { - proxy.EchoAsync(cmn.Fmt("echo-%v", i)) + proxy.EchoAsync(fmt.Sprintf("echo-%v", i)) } if err := proxy.FlushSync(); err != nil { t.Error(err) @@ -79,7 +80,7 @@ func TestEcho(t *testing.T) { func BenchmarkEcho(b *testing.B) { b.StopTimer() // Initialize - sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) // Start server @@ -118,7 +119,7 @@ func BenchmarkEcho(b *testing.B) { } func TestInfo(t *testing.T) { - sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) // Start server @@ -142,7 +143,7 @@ func TestInfo(t *testing.T) { proxy := NewAppConnTest(cli) t.Log("Connected") - resInfo, err := proxy.InfoSync(types.RequestInfo{Version: ""}) + resInfo, err := proxy.InfoSync(RequestInfo) if err != nil { t.Errorf("Unexpected error: %v", err) } diff --git a/proxy/multi_app_conn.go b/proxy/multi_app_conn.go index 279fa42eedd..b5897d8a5c0 100644 --- a/proxy/multi_app_conn.go +++ b/proxy/multi_app_conn.go @@ -17,26 +17,19 @@ type AppConns interface { Query() AppConnQuery } -func NewAppConns(clientCreator ClientCreator, handshaker Handshaker) AppConns { - return NewMultiAppConn(clientCreator, handshaker) +func NewAppConns(clientCreator ClientCreator) AppConns { + return NewMultiAppConn(clientCreator) } //----------------------------- // multiAppConn implements AppConns -type Handshaker interface { - Handshake(AppConns) error -} - // a multiAppConn is made of a few appConns (mempool, consensus, query) -// and manages their underlying abci clients, including the handshake -// which ensures the app and tendermint are synced. +// and manages their underlying abci clients // TODO: on app restart, clients must reboot together type multiAppConn struct { cmn.BaseService - handshaker Handshaker - mempoolConn *appConnMempool consensusConn *appConnConsensus queryConn *appConnQuery @@ -45,9 +38,8 @@ type multiAppConn struct { } // Make all necessary abci connections to the application -func NewMultiAppConn(clientCreator ClientCreator, handshaker Handshaker) *multiAppConn { +func NewMultiAppConn(clientCreator ClientCreator) *multiAppConn { multiAppConn := &multiAppConn{ - handshaker: handshaker, clientCreator: clientCreator, } multiAppConn.BaseService = *cmn.NewBaseService(nil, "multiAppConn", multiAppConn) @@ -103,10 +95,5 @@ func (app *multiAppConn) OnStart() error { } app.consensusConn = NewAppConnConsensus(concli) - // ensure app is synced to the latest state - if app.handshaker != nil { - return app.handshaker.Handshake(app) - } - return nil } diff --git a/proxy/version.go b/proxy/version.go new file mode 100644 index 00000000000..fb506e65995 --- /dev/null +++ b/proxy/version.go @@ -0,0 +1,15 @@ +package proxy + +import ( + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/version" +) + +// RequestInfo contains all the information for sending +// the abci.RequestInfo message during handshake with the app. +// It contains only compile-time version information. +var RequestInfo = abci.RequestInfo{ + Version: version.Version, + BlockVersion: version.BlockProtocol.Uint64(), + P2PVersion: version.P2PProtocol.Uint64(), +} diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 79c452fc937..da4625d51d5 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -8,9 +8,9 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) var waitForEventTimeout = 5 * time.Second diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index 4b85bf01d77..a1b59ffa0c1 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -7,11 +7,11 @@ import ( "github.com/pkg/errors" amino "github.com/tendermint/go-amino" + cmn "github.com/tendermint/tendermint/libs/common" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpcclient "github.com/tendermint/tendermint/rpc/lib/client" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) /* @@ -75,7 +75,7 @@ func (c *HTTP) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuer func (c *HTTP) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { result := new(ctypes.ResultABCIQuery) _, err := c.rpc.Call("abci_query", - map[string]interface{}{"path": path, "data": data, "height": opts.Height, "trusted": opts.Trusted}, + map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, result) if err != nil { return nil, errors.Wrap(err, "ABCIQuery") diff --git a/rpc/client/interface.go b/rpc/client/interface.go index f939c855bf8..f34410c5bb8 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -21,9 +21,9 @@ implementation. */ import ( + cmn "github.com/tendermint/tendermint/libs/common" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) // ABCIClient groups together the functionality that principally diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go index b3c5e3090b8..8d89b7150f4 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/localclient.go @@ -61,7 +61,7 @@ func (c *Local) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQue } func (Local) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(path, data, opts.Height, opts.Trusted) + return core.ABCIQuery(path, data, opts.Height, opts.Prove) } func (Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 4502c087aef..e63d22e0ca3 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -2,11 +2,11 @@ package mock import ( abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" - cmn "github.com/tendermint/tendermint/libs/common" ) // ABCIApp will send all abci related request to the named app, @@ -23,7 +23,7 @@ var ( ) func (a ABCIApp) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - return &ctypes.ResultABCIInfo{a.App.Info(abci.RequestInfo{Version: version.Version})}, nil + return &ctypes.ResultABCIInfo{a.App.Info(proxy.RequestInfo)}, nil } func (a ABCIApp) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { @@ -31,10 +31,18 @@ func (a ABCIApp) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQu } func (a ABCIApp) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - q := a.App.Query(abci.RequestQuery{Data: data, Path: path, Height: opts.Height, Prove: opts.Trusted}) + q := a.App.Query(abci.RequestQuery{ + Data: data, + Path: path, + Height: opts.Height, + Prove: opts.Prove, + }) return &ctypes.ResultABCIQuery{q}, nil } +// NOTE: Caller should call a.App.Commit() separately, +// this function does not actually wait for a commit. +// TODO: Make it wait for a commit and set res.Height appropriately. func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { res := ctypes.ResultBroadcastTxCommit{} res.CheckTx = a.App.CheckTx(tx) @@ -42,6 +50,7 @@ func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit return &res, nil } res.DeliverTx = a.App.DeliverTx(tx) + res.Height = -1 // TODO return &res, nil } @@ -86,7 +95,7 @@ func (m ABCIMock) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQ } func (m ABCIMock) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Trusted}) + res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Prove}) if err != nil { return nil, err } @@ -133,10 +142,10 @@ func NewABCIRecorder(client client.ABCIClient) *ABCIRecorder { } type QueryArgs struct { - Path string - Data cmn.HexBytes - Height int64 - Trusted bool + Path string + Data cmn.HexBytes + Height int64 + Prove bool } func (r *ABCIRecorder) addCall(call Call) { @@ -161,7 +170,7 @@ func (r *ABCIRecorder) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts res, err := r.Client.ABCIQueryWithOptions(path, data, opts) r.addCall(Call{ Name: "abci_query", - Args: QueryArgs{path, data, opts.Height, opts.Trusted}, + Args: QueryArgs{path, data, opts.Height, opts.Prove}, Response: res, Error: err, }) diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index bcf443cf04b..ca220c84e31 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -11,11 +11,11 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/mock" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) func TestABCIMock(t *testing.T) { @@ -51,7 +51,7 @@ func TestABCIMock(t *testing.T) { assert.Equal("foobar", err.Error()) // query always returns the response - _query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Trusted: true}) + _query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Prove: false}) query := _query.Response require.Nil(err) require.NotNil(query) @@ -98,7 +98,7 @@ func TestABCIRecorder(t *testing.T) { _, err := r.ABCIInfo() assert.Nil(err, "expected no err on info") - _, err = r.ABCIQueryWithOptions("path", cmn.HexBytes("data"), client.ABCIQueryOptions{Trusted: false}) + _, err = r.ABCIQueryWithOptions("path", cmn.HexBytes("data"), client.ABCIQueryOptions{Prove: false}) assert.NotNil(err, "expected error on query") require.Equal(2, len(r.Calls)) @@ -122,7 +122,7 @@ func TestABCIRecorder(t *testing.T) { require.True(ok) assert.Equal("path", qa.Path) assert.EqualValues("data", qa.Data) - assert.False(qa.Trusted) + assert.False(qa.Prove) // now add some broadcasts (should all err) txs := []types.Tx{{1}, {2}, {3}} @@ -173,9 +173,17 @@ func TestABCIApp(t *testing.T) { require.NotNil(res.DeliverTx) assert.True(res.DeliverTx.IsOK()) + // commit + // TODO: This may not be necessary in the future + if res.Height == -1 { + m.App.Commit() + } + // check the key - _qres, err := m.ABCIQueryWithOptions("/key", cmn.HexBytes(key), client.ABCIQueryOptions{Trusted: true}) + _qres, err := m.ABCIQueryWithOptions("/key", cmn.HexBytes(key), client.ABCIQueryOptions{Prove: true}) qres := _qres.Response require.Nil(err) assert.EqualValues(value, qres.Value) + + // XXX Check proof } diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 955df62774d..ef2d4f19706 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -1,3 +1,5 @@ +package mock + /* package mock returns a Client implementation that accepts various (mock) implementations of the various methods. @@ -11,16 +13,15 @@ For real clients, you probably want the "http" package. If you want to directly call a tendermint node in process, you can use the "local" package. */ -package mock import ( "reflect" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) // Client wraps arbitrary implementations of the various interfaces. @@ -87,7 +88,7 @@ func (c Client) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQue } func (c Client) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(path, data, opts.Height, opts.Trusted) + return core.ABCIQuery(path, data, opts.Height, opts.Prove) } func (c Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go index 8e3c1506125..fa64c6a837f 100644 --- a/rpc/client/mock/status_test.go +++ b/rpc/client/mock/status_test.go @@ -6,9 +6,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/rpc/client/mock" ctypes "github.com/tendermint/tendermint/rpc/core/types" - cmn "github.com/tendermint/tendermint/libs/common" ) func TestStatus(t *testing.T) { diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index e7e9042a78a..602525b51fb 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -166,10 +166,10 @@ func TestAppCalls(t *testing.T) { if err := client.WaitForHeight(c, apph, nil); err != nil { t.Error(err) } - _qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: true}) + _qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Prove: false}) qres := _qres.Response if assert.Nil(err) && assert.True(qres.IsOK()) { - // assert.Equal(k, data.GetKey()) // only returned for proofs + assert.Equal(k, qres.Key) assert.EqualValues(v, qres.Value) } @@ -221,10 +221,12 @@ func TestAppCalls(t *testing.T) { assert.Equal(block.Block.LastCommit, commit2.Commit) // and we got a proof that works! - _pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: false}) + _pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Prove: true}) pres := _pres.Response assert.Nil(err) assert.True(pres.IsOK()) + + // XXX Test proof } } @@ -242,7 +244,7 @@ func TestBroadcastTxSync(t *testing.T) { require.Equal(initMempoolSize+1, mempool.Size()) - txs := mempool.Reap(1) + txs := mempool.ReapMaxTxs(len(tx)) require.EqualValues(tx, txs[0]) mempool.Flush() } @@ -310,7 +312,7 @@ func TestTx(t *testing.T) { // time to verify the proof proof := ptx.Proof if tc.prove && assert.EqualValues(t, tx, proof.Data) { - assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) + assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) } } } @@ -348,16 +350,21 @@ func TestTxSearch(t *testing.T) { // time to verify the proof proof := ptx.Proof if assert.EqualValues(t, tx, proof.Data) { - assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) + assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) } + // query by height + result, err = c.TxSearch(fmt.Sprintf("tx.height >= %d", txHeight), true, 1, 30) + require.Nil(t, err, "%+v", err) + require.Len(t, result.Txs, 1) + // we query for non existing tx result, err = c.TxSearch(fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, 1, 30) require.Nil(t, err, "%+v", err) require.Len(t, result.Txs, 0) // we query using a tag (see kvstore application) - result, err = c.TxSearch("app.creator='jae'", false, 1, 30) + result, err = c.TxSearch("app.creator='Cosmoshi Netowoko'", false, 1, 30) require.Nil(t, err, "%+v", err) if len(result.Txs) == 0 { t.Fatal("expected a lot of transactions") diff --git a/rpc/client/types.go b/rpc/client/types.go index 89bd2f98c48..6a23fa4509d 100644 --- a/rpc/client/types.go +++ b/rpc/client/types.go @@ -3,10 +3,9 @@ package client // ABCIQueryOptions can be used to provide options for ABCIQuery call other // than the DefaultABCIQueryOptions. type ABCIQueryOptions struct { - Height int64 - Trusted bool + Height int64 + Prove bool } -// DefaultABCIQueryOptions are latest height (0) and trusted equal to false -// (which will result in a proof being returned). -var DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Trusted: false} +// DefaultABCIQueryOptions are latest height (0) and prove false. +var DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Prove: false} diff --git a/rpc/core/abci.go b/rpc/core/abci.go index 3f399be80fd..2468a5f05cf 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -1,18 +1,16 @@ package core import ( - "fmt" - abci "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/proxy" ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/version" ) // Query the application for some information. // // ```shell -// curl 'localhost:26657/abci_query?path=""&data="abcd"&trusted=false' +// curl 'localhost:26657/abci_query?path=""&data="abcd"&prove=false' // ``` // // ```go @@ -28,12 +26,12 @@ import ( // "result": { // "response": { // "log": "exists", -// "height": 0, +// "height": "0", // "proof": "010114FED0DAD959F36091AD761C922ABA3CBF1D8349990101020103011406AA2262E2F448242DF2C2607C3CDC705313EE3B0001149D16177BC71E445476174622EA559715C293740C", // "value": "61626364", // "key": "61626364", -// "index": -1, -// "code": 0 +// "index": "-1", +// "code": "0" // } // }, // "id": "", @@ -47,18 +45,14 @@ import ( // |-----------+--------+---------+----------+------------------------------------------------| // | path | string | false | false | Path to the data ("/a/b/c") | // | data | []byte | false | true | Data | -// | height | int64 | 0 | false | Height (0 means latest) | -// | trusted | bool | false | false | Does not include a proof of the data inclusion | -func ABCIQuery(path string, data cmn.HexBytes, height int64, trusted bool) (*ctypes.ResultABCIQuery, error) { - if height < 0 { - return nil, fmt.Errorf("height must be non-negative") - } - +// | height | int64 | 0 | false | Height (0 means latest) | +// | prove | bool | false | false | Includes proof if true | +func ABCIQuery(path string, data cmn.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error) { resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{ Path: path, Data: data, Height: height, - Prove: !trusted, + Prove: prove, }) if err != nil { return nil, err @@ -93,7 +87,7 @@ func ABCIQuery(path string, data cmn.HexBytes, height int64, trusted bool) (*cty // } // ``` func ABCIInfo() (*ctypes.ResultABCIInfo, error) { - resInfo, err := proxyAppQuery.InfoSync(abci.RequestInfo{Version: version.Version}) + resInfo, err := proxyAppQuery.InfoSync(proxy.RequestInfo) if err != nil { return nil, err } diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index 671250db847..a9252f5538e 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -32,13 +32,13 @@ import ( // "header": { // "app_hash": "", // "chain_id": "test-chain-6UTNIN", -// "height": 10, +// "height": "10", // "time": "2017-05-29T15:05:53.877Z", -// "num_txs": 0, +// "num_txs": "0", // "last_block_id": { // "parts": { // "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 +// "total": "1" // }, // "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" // }, @@ -49,13 +49,13 @@ import ( // "block_id": { // "parts": { // "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": 1 +// "total": "1" // }, // "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" // } // } // ], -// "last_height": 5493 +// "last_height": "5493" // }, // "id": "", // "jsonrpc": "2.0" @@ -143,21 +143,21 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) { // "block_id": { // "parts": { // "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 +// "total": "1" // }, // "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" // }, -// "type": 2, -// "round": 0, -// "height": 9, -// "validator_index": 0, +// "type": "2", +// "round": "0", +// "height": "9", +// "validator_index": "0", // "validator_address": "E89A51D60F68385E09E716D353373B11F8FACD62" // } // ], // "blockID": { // "parts": { // "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 +// "total": "1" // }, // "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" // } @@ -168,13 +168,13 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) { // "header": { // "app_hash": "", // "chain_id": "test-chain-6UTNIN", -// "height": 10, +// "height": "10", // "time": "2017-05-29T15:05:53.877Z", -// "num_txs": 0, +// "num_txs": "0", // "last_block_id": { // "parts": { // "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 +// "total": "1" // }, // "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" // }, @@ -187,13 +187,13 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) { // "header": { // "app_hash": "", // "chain_id": "test-chain-6UTNIN", -// "height": 10, +// "height": "10", // "time": "2017-05-29T15:05:53.877Z", -// "num_txs": 0, +// "num_txs": "0", // "last_block_id": { // "parts": { // "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 +// "total": "1" // }, // "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" // }, @@ -204,7 +204,7 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) { // "block_id": { // "parts": { // "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": 1 +// "total": "1" // }, // "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" // } @@ -255,21 +255,21 @@ func Block(heightPtr *int64) (*ctypes.ResultBlock, error) { // "block_id": { // "parts": { // "hash": "9E37CBF266BC044A779E09D81C456E653B89E006", -// "total": 1 +// "total": "1" // }, // "hash": "CC6E861E31CA4334E9888381B4A9137D1458AB6A" // }, -// "type": 2, -// "round": 0, -// "height": 11, -// "validator_index": 0, +// "type": "2", +// "round": "0", +// "height": "11", +// "validator_index": "0", // "validator_address": "E89A51D60F68385E09E716D353373B11F8FACD62" // } // ], // "blockID": { // "parts": { // "hash": "9E37CBF266BC044A779E09D81C456E653B89E006", -// "total": 1 +// "total": "1" // }, // "hash": "CC6E861E31CA4334E9888381B4A9137D1458AB6A" // } @@ -277,13 +277,13 @@ func Block(heightPtr *int64) (*ctypes.ResultBlock, error) { // "header": { // "app_hash": "", // "chain_id": "test-chain-6UTNIN", -// "height": 11, +// "height": "11", // "time": "2017-05-29T15:05:54.893Z", -// "num_txs": 0, +// "num_txs": "0", // "last_block_id": { // "parts": { // "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": 1 +// "total": "1" // }, // "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" // }, @@ -337,14 +337,14 @@ func Commit(heightPtr *int64) (*ctypes.ResultCommit, error) { // // ```json // { -// "height": 10, +// "height": "10", // "results": [ // { -// "code": 0, +// "code": "0", // "data": "CAFE00F00D" // }, // { -// "code": 102, +// "code": "102", // "data": "" // } // ] @@ -370,16 +370,16 @@ func BlockResults(heightPtr *int64) (*ctypes.ResultBlockResults, error) { return res, nil } -func getHeight(storeHeight int64, heightPtr *int64) (int64, error) { +func getHeight(currentHeight int64, heightPtr *int64) (int64, error) { if heightPtr != nil { height := *heightPtr if height <= 0 { return 0, fmt.Errorf("Height must be greater than 0") } - if height > storeHeight { + if height > currentHeight { return 0, fmt.Errorf("Height must be less than or equal to the current blockchain height") } return height, nil } - return storeHeight, nil + return currentHeight, nil } diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index c026cd91f49..1c2619d5c1d 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -2,7 +2,6 @@ package core import ( cm "github.com/tendermint/tendermint/consensus" - "github.com/tendermint/tendermint/p2p" ctypes "github.com/tendermint/tendermint/rpc/core/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" @@ -28,8 +27,8 @@ import ( // "result": { // "validators": [ // { -// "accum": 0, -// "voting_power": 10, +// "accum": "0", +// "voting_power": "10", // "pub_key": { // "data": "68DFDA7E50F82946E7E8546BED37944A422CD1B831E70DF66BA3B8430593944D", // "type": "ed25519" @@ -37,15 +36,17 @@ import ( // "address": "E89A51D60F68385E09E716D353373B11F8FACD62" // } // ], -// "block_height": 5241 +// "block_height": "5241" // }, // "id": "", // "jsonrpc": "2.0" // } // ``` func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) + // The latest validator that we know is the + // NextValidator of the last block. + height := consensusState.GetState().LastBlockHeight + 1 + height, err := getHeight(height, heightPtr) if err != nil { return nil, err } @@ -77,9 +78,9 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "id": "", // "result": { // "round_state": { -// "height": 7185, -// "round": 0, -// "step": 1, +// "height": "7185", +// "round": "0", +// "step": "1", // "start_time": "2018-05-12T13:57:28.440293621-07:00", // "commit_time": "2018-05-12T13:57:27.440293621-07:00", // "validators": { @@ -90,8 +91,8 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "type": "tendermint/PubKeyEd25519", // "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" // }, -// "voting_power": 10, -// "accum": 0 +// "voting_power": "10", +// "accum": "0" // } // ], // "proposer": { @@ -100,27 +101,27 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "type": "tendermint/PubKeyEd25519", // "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" // }, -// "voting_power": 10, -// "accum": 0 +// "voting_power": "10", +// "accum": "0" // } // }, // "proposal": null, // "proposal_block": null, // "proposal_block_parts": null, -// "locked_round": 0, +// "locked_round": "0", // "locked_block": null, // "locked_block_parts": null, -// "valid_round": 0, +// "valid_round": "0", // "valid_block": null, // "valid_block_parts": null, // "votes": [ // { -// "round": 0, +// "round": "0", // "prevotes": "_", // "precommits": "_" // } // ], -// "commit_round": -1, +// "commit_round": "-1", // "last_commit": { // "votes": [ // "Vote{0:B5B3D40BE539 7184/00/2(Precommit) 14F946FA7EF0 /702B1B1A602A.../ @ 2018-05-12T20:57:27.342Z}" @@ -136,8 +137,8 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "type": "tendermint/PubKeyEd25519", // "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" // }, -// "voting_power": 10, -// "accum": 0 +// "voting_power": "10", +// "accum": "0" // } // ], // "proposer": { @@ -146,8 +147,8 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "type": "tendermint/PubKeyEd25519", // "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" // }, -// "voting_power": 10, -// "accum": 0 +// "voting_power": "10", +// "accum": "0" // } // } // }, @@ -156,30 +157,30 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // "node_address": "30ad1854af22506383c3f0e57fb3c7f90984c5e8@172.16.63.221:26656", // "peer_state": { // "round_state": { -// "height": 7185, -// "round": 0, -// "step": 1, +// "height": "7185", +// "round": "0", +// "step": "1", // "start_time": "2018-05-12T13:57:27.438039872-07:00", // "proposal": false, // "proposal_block_parts_header": { -// "total": 0, +// "total": "0", // "hash": "" // }, // "proposal_block_parts": null, -// "proposal_pol_round": -1, +// "proposal_pol_round": "-1", // "proposal_pol": "_", // "prevotes": "_", // "precommits": "_", -// "last_commit_round": 0, +// "last_commit_round": "0", // "last_commit": "x", -// "catchup_commit_round": -1, +// "catchup_commit_round": "-1", // "catchup_commit": "_" // }, // "stats": { -// "last_vote_height": 7184, -// "votes": 255, -// "last_block_part_height": 7184, -// "block_parts": 255 +// "last_vote_height": "7184", +// "votes": "255", +// "last_block_part_height": "7184", +// "block_parts": "255" // } // } // } @@ -189,7 +190,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { // ``` func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { // Get Peer consensus states. - peers := p2pSwitch.Peers().List() + peers := p2pPeers.Peers().List() peerStates := make([]ctypes.PeerStateInfo, len(peers)) for i, peer := range peers { peerState := peer.Get(types.PeerStateKey).(*cm.PeerState) @@ -199,7 +200,7 @@ func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { } peerStates[i] = ctypes.PeerStateInfo{ // Peer basic info. - NodeAddress: p2p.IDAddressString(peer.ID(), peer.NodeInfo().ListenAddr), + NodeAddress: peer.NodeInfo().NetAddress().String(), // Peer consensus state. PeerState: peerStateJSON, } @@ -239,7 +240,7 @@ func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { // "valid_block_hash": "", // "height_vote_set": [ // { -// "round": 0, +// "round": "0", // "prevotes": [ // "nil-Vote" // ], @@ -259,3 +260,49 @@ func ConsensusState() (*ctypes.ResultConsensusState, error) { bz, err := consensusState.GetRoundStateSimpleJSON() return &ctypes.ResultConsensusState{bz}, err } + +// Get the consensus parameters at the given block height. +// If no height is provided, it will fetch the current consensus params. +// +// ```shell +// curl 'localhost:26657/consensus_params' +// ``` +// +// ```go +// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") +// state, err := client.ConsensusParams() +// ``` +// +// The above command returns JSON structured like this: +// +// ```json +// { +// "jsonrpc": "2.0", +// "id": "", +// "result": { +// "block_height": "1", +// "consensus_params": { +// "block_size_params": { +// "max_txs_bytes": "22020096", +// "max_gas": "-1" +// }, +// "evidence_params": { +// "max_age": "100000" +// } +// } +// } +// } +// ``` +func ConsensusParams(heightPtr *int64) (*ctypes.ResultConsensusParams, error) { + height := consensusState.GetState().LastBlockHeight + 1 + height, err := getHeight(height, heightPtr) + if err != nil { + return nil, err + } + + consensusparams, err := sm.LoadConsensusParams(stateDB, height) + if err != nil { + return nil, err + } + return &ctypes.ResultConsensusParams{BlockHeight: height, ConsensusParams: consensusparams}, nil +} diff --git a/rpc/core/doc.go b/rpc/core/doc.go index 75f6ac82e61..5378dde24c7 100644 --- a/rpc/core/doc.go +++ b/rpc/core/doc.go @@ -7,7 +7,8 @@ Tendermint supports the following RPC protocols: * JSONRPC over HTTP * JSONRPC over websockets -Tendermint RPC is built using [our own RPC library](https://github.com/tendermint/tendermint/tree/master/rpc/lib) which contains its own set of documentation and tests. +Tendermint RPC is built using our own RPC library which contains its own set of documentation and tests. +See it here: https://github.com/tendermint/tendermint/tree/master/rpc/lib ## Configuration @@ -32,7 +33,7 @@ curl 'localhost:26657/broadcast_tx_sync?tx="abc"' "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF", "log": "", "data": "", - "code": 0 + "code": "0" }, "id": "", "jsonrpc": "2.0" diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index ecc41ce12b2..c015363af22 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -8,9 +8,9 @@ import ( "github.com/pkg/errors" abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) //----------------------------------------------------------------------------- @@ -36,7 +36,7 @@ import ( // "hash": "E39AAB7A537ABAA237831742DCE1117F187C3C52", // "log": "", // "data": "", -// "code": 0 +// "code": "0" // }, // "id": "", // "jsonrpc": "2.0" @@ -74,7 +74,7 @@ func BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { // "jsonrpc": "2.0", // "id": "", // "result": { -// "code": 0, +// "code": "0", // "data": "", // "log": "", // "hash": "0D33F2F03A5234F38706E43004489E061AC40A2E" @@ -126,17 +126,17 @@ func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { // { // "error": "", // "result": { -// "height": 26682, +// "height": "26682", // "hash": "75CA0F856A4DA078FC4911580360E70CEFB2EBEE", // "deliver_tx": { // "log": "", // "data": "", -// "code": 0 +// "code": "0" // }, // "check_tx": { // "log": "", // "data": "", -// "code": 0 +// "code": "0" // } // }, // "id": "", @@ -227,7 +227,7 @@ func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { // "error": "", // "result": { // "txs": [], -// "n_txs": 0 +// "n_txs": "0" // }, // "id": "", // "jsonrpc": "2.0" @@ -243,7 +243,7 @@ func UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { // reuse per_page validator limit = validatePerPage(limit) - txs := mempool.Reap(limit) + txs := mempool.ReapMaxTxs(limit) return &ctypes.ResultUnconfirmedTxs{len(txs), txs}, nil } @@ -265,7 +265,7 @@ func UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { // "error": "", // "result": { // "txs": null, -// "n_txs": 0 +// "n_txs": "0" // }, // "id": "", // "jsonrpc": "2.0" diff --git a/rpc/core/net.go b/rpc/core/net.go index ba9753d81a9..dbd4d8c0bce 100644 --- a/rpc/core/net.go +++ b/rpc/core/net.go @@ -1,8 +1,11 @@ package core import ( + "fmt" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/p2p" ctypes "github.com/tendermint/tendermint/rpc/core/types" ) @@ -23,7 +26,7 @@ import ( // { // "error": "", // "result": { -// "n_peers": 0, +// "n_peers": "0", // "peers": [], // "listeners": [ // "Listener(@10.0.2.15:26656)" @@ -35,15 +38,14 @@ import ( // } // ``` func NetInfo() (*ctypes.ResultNetInfo, error) { - listening := p2pSwitch.IsListening() - listeners := []string{} - for _, listener := range p2pSwitch.Listeners() { - listeners = append(listeners, listener.String()) - } peers := []ctypes.Peer{} - for _, peer := range p2pSwitch.Peers().List() { + for _, peer := range p2pPeers.Peers().List() { + nodeInfo, ok := peer.NodeInfo().(p2p.DefaultNodeInfo) + if !ok { + return nil, fmt.Errorf("peer.NodeInfo() is not DefaultNodeInfo") + } peers = append(peers, ctypes.Peer{ - NodeInfo: peer.NodeInfo(), + NodeInfo: nodeInfo, IsOutbound: peer.IsOutbound(), ConnectionStatus: peer.Status(), }) @@ -52,8 +54,8 @@ func NetInfo() (*ctypes.ResultNetInfo, error) { // PRO: useful info // CON: privacy return &ctypes.ResultNetInfo{ - Listening: listening, - Listeners: listeners, + Listening: p2pTransport.IsListening(), + Listeners: p2pTransport.Listeners(), NPeers: len(peers), Peers: peers, }, nil @@ -65,7 +67,7 @@ func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { } // starts go routines to dial each peer after random delays logger.Info("DialSeeds", "addrBook", addrBook, "seeds", seeds) - err := p2pSwitch.DialPeersAsync(addrBook, seeds, false) + err := p2pPeers.DialPeersAsync(addrBook, seeds, false) if err != nil { return &ctypes.ResultDialSeeds{}, err } @@ -78,7 +80,7 @@ func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, } // starts go routines to dial each peer after random delays logger.Info("DialPeers", "addrBook", addrBook, "peers", peers, "persistent", persistent) - err := p2pSwitch.DialPeersAsync(addrBook, peers, persistent) + err := p2pPeers.DialPeersAsync(addrBook, peers, persistent) if err != nil { return &ctypes.ResultDialPeers{}, err } @@ -107,7 +109,7 @@ func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, // "validators": [ // { // "name": "", -// "power": 10, +// "power": "10", // "pub_key": { // "data": "68DFDA7E50F82946E7E8546BED37944A422CD1B831E70DF66BA3B8430593944D", // "type": "ed25519" diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index 128b3e9a739..188ea1c3679 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -5,13 +5,14 @@ import ( "github.com/tendermint/tendermint/consensus" crypto "github.com/tendermint/tendermint/crypto" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" + mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tendermint/libs/db" - "github.com/tendermint/tendermint/libs/log" ) const ( @@ -28,17 +29,21 @@ var subscribeTimeout = 5 * time.Second type Consensus interface { GetState() sm.State GetValidators() (int64, []*types.Validator) + GetLastHeight() int64 GetRoundStateJSON() ([]byte, error) GetRoundStateSimpleJSON() ([]byte, error) } -type P2P interface { - Listeners() []p2p.Listener - Peers() p2p.IPeerSet - NumPeers() (outbound, inbound, dialig int) - NodeInfo() p2p.NodeInfo +type transport interface { + Listeners() []string IsListening() bool + NodeInfo() p2p.NodeInfo +} + +type peers interface { DialPeersAsync(p2p.AddrBook, []string, bool) error + NumPeers() (outbound, inbound, dialig int) + Peers() p2p.IPeerSet } //---------------------------------------------- @@ -52,10 +57,10 @@ var ( // interfaces defined in types and above stateDB dbm.DB blockStore sm.BlockStore - mempool sm.Mempool evidencePool sm.EvidencePool consensusState Consensus - p2pSwitch P2P + p2pPeers peers + p2pTransport transport // objects pubKey crypto.PubKey @@ -64,6 +69,7 @@ var ( txIndexer txindex.TxIndexer consensusReactor *consensus.ConsensusReactor eventBus *types.EventBus // thread safe + mempool *mempl.Mempool logger log.Logger ) @@ -76,7 +82,7 @@ func SetBlockStore(bs sm.BlockStore) { blockStore = bs } -func SetMempool(mem sm.Mempool) { +func SetMempool(mem *mempl.Mempool) { mempool = mem } @@ -88,8 +94,12 @@ func SetConsensusState(cs Consensus) { consensusState = cs } -func SetSwitch(sw P2P) { - p2pSwitch = sw +func SetP2PPeers(p peers) { + p2pPeers = p +} + +func SetP2PTransport(t transport) { + p2pTransport = t } func SetPubKey(pk crypto.PubKey) { diff --git a/rpc/core/routes.go b/rpc/core/routes.go index f26fadb62a7..736ded60787 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -26,6 +26,7 @@ var Routes = map[string]*rpc.RPCFunc{ "validators": rpc.NewRPCFunc(Validators, "height"), "dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""), "consensus_state": rpc.NewRPCFunc(ConsensusState, ""), + "consensus_params": rpc.NewRPCFunc(ConsensusParams, "height"), "unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"), "num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""), @@ -35,7 +36,7 @@ var Routes = map[string]*rpc.RPCFunc{ "broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"), // abci API - "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,trusted"), + "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"), "abci_info": rpc.NewRPCFunc(ABCIInfo, ""), } diff --git a/rpc/core/status.go b/rpc/core/status.go index 4cb7667b756..793e1ade7bd 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -5,6 +5,7 @@ import ( "time" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/p2p" ctypes "github.com/tendermint/tendermint/rpc/core/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" @@ -25,46 +26,52 @@ import ( // > The above command returns JSON structured like this: // // ```json -//{ -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "node_info": { -// "id": "562dd7f579f0ecee8c94a11a3c1e378c1876f433", -// "listen_addr": "192.168.1.2:26656", -// "network": "test-chain-I6zScH", -// "version": "0.19.0", -// "channels": "4020212223303800", -// "moniker": "Ethans-MacBook-Pro.local", -// "other": [ -// "amino_version=0.9.8", -// "p2p_version=0.5.0", -// "consensus_version=v1/0.2.2", -// "rpc_version=0.7.0/3", -// "tx_index=on", -// "rpc_addr=tcp://0.0.0.0:26657" -// ] -// }, -// "sync_info": { -// "latest_block_hash": "2D4D7055BE685E3CB2410603C92AD37AE557AC59", -// "latest_app_hash": "0000000000000000", -// "latest_block_height": 231, -// "latest_block_time": "2018-04-27T23:18:08.459766485-04:00", -// "catching_up": false -// }, -// "validator_info": { -// "address": "5875562FF0FFDECC895C20E32FC14988952E99E7", -// "pub_key": { -// "type": "tendermint/PubKeyEd25519", -// "value": "PpDJRUrLG2RgFqYYjawfn/AcAgacSXpLFrmfYYQnuzE=" -// }, -// "voting_power": 10 -// } -// } -//} +// { +// "jsonrpc": "2.0", +// "id": "", +// "result": { +// "node_info": { +// "protocol_version": { +// "p2p": "4", +// "block": "7", +// "app": "0" +// }, +// "id": "53729852020041b956e86685e24394e0bee4373f", +// "listen_addr": "10.0.2.15:26656", +// "network": "test-chain-Y1OHx6", +// "version": "0.24.0-2ce1abc2", +// "channels": "4020212223303800", +// "moniker": "ubuntu-xenial", +// "other": { +// "tx_index": "on", +// "rpc_addr": "tcp://0.0.0.0:26657" +// } +// }, +// "sync_info": { +// "latest_block_hash": "F51538DA498299F4C57AC8162AAFA0254CE08286", +// "latest_app_hash": "0000000000000000", +// "latest_block_height": "18", +// "latest_block_time": "2018-09-17T11:42:19.149920551Z", +// "catching_up": false +// }, +// "validator_info": { +// "address": "D9F56456D7C5793815D0E9AF07C3A355D0FC64FD", +// "pub_key": { +// "type": "tendermint/PubKeyEd25519", +// "value": "wVxKNtEsJmR4vvh651LrVoRguPs+6yJJ9Bz174gw9DM=" +// }, +// "voting_power": "10" +// } +// } +// } // ``` func Status() (*ctypes.ResultStatus, error) { - latestHeight := blockStore.Height() + var latestHeight int64 = -1 + if consensusReactor.FastSync() { + latestHeight = blockStore.Height() + } else { + latestHeight = consensusState.GetLastHeight() + } var ( latestBlockMeta *types.BlockMeta latestBlockHash cmn.HexBytes @@ -86,7 +93,7 @@ func Status() (*ctypes.ResultStatus, error) { } result := &ctypes.ResultStatus{ - NodeInfo: p2pSwitch.NodeInfo(), + NodeInfo: p2pTransport.NodeInfo().(p2p.DefaultNodeInfo), SyncInfo: ctypes.SyncInfo{ LatestBlockHash: latestBlockHash, LatestAppHash: latestAppHash, @@ -107,9 +114,8 @@ func Status() (*ctypes.ResultStatus, error) { func validatorAtHeight(h int64) *types.Validator { privValAddress := pubKey.Address() + // If we're still at height h, search in the current validator set. lastBlockHeight, vals := consensusState.GetValidators() - - // if we're still at height h, search in the current validator set if lastBlockHeight == h { for _, val := range vals { if bytes.Equal(val.Address, privValAddress) { @@ -118,12 +124,11 @@ func validatorAtHeight(h int64) *types.Validator { } } - // if we've moved to the next height, retrieve the validator set from DB + // If we've moved to the next height, retrieve the validator set from DB. if lastBlockHeight > h { vals, err := sm.LoadValidators(stateDB, h) if err != nil { - // should not happen - return nil + return nil // should not happen } _, val := vals.GetByAddress(privValAddress) return val diff --git a/rpc/core/tx.go b/rpc/core/tx.go index f53d82f140f..ba6320016a5 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -36,17 +36,17 @@ import ( // }, // "Data": "YWJjZA==", // "RootHash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF", -// "Total": 1, -// "Index": 0 +// "Total": "1", +// "Index": "0" // }, // "tx": "YWJjZA==", // "tx_result": { // "log": "", // "data": "", -// "code": 0 +// "code": "0" // }, -// "index": 0, -// "height": 52, +// "index": "0", +// "height": "52", // "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF" // }, // "id": "", @@ -140,17 +140,17 @@ func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { // }, // "Data": "mvZHHa7HhZ4aRT0xMDA=", // "RootHash": "F6541223AA46E428CB1070E9840D2C3DF3B6D776", -// "Total": 32, -// "Index": 31 +// "Total": "32", +// "Index": "31" // }, // "tx": "mvZHHa7HhZ4aRT0xMDA=", // "tx_result": {}, -// "index": 31, -// "height": 12, +// "index": "31", +// "height": "12", // "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF" // } // ], -// "total_count": 1 +// "total_count": "1" // } // } // ``` diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 4fec416eda1..07628d1c60e 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -2,7 +2,6 @@ package core_types import ( "encoding/json" - "strings" "time" abci "github.com/tendermint/tendermint/abci/types" @@ -33,10 +32,8 @@ type ResultBlock struct { // Commit and Header type ResultCommit struct { - // SignedHeader is header and commit, embedded so we only have - // one level in the json output - types.SignedHeader - CanonicalCommit bool `json:"canonical"` + types.SignedHeader `json:"signed_header"` + CanonicalCommit bool `json:"canonical"` } // ABCI results from a block @@ -77,9 +74,9 @@ type ValidatorInfo struct { // Node Status type ResultStatus struct { - NodeInfo p2p.NodeInfo `json:"node_info"` - SyncInfo SyncInfo `json:"sync_info"` - ValidatorInfo ValidatorInfo `json:"validator_info"` + NodeInfo p2p.DefaultNodeInfo `json:"node_info"` + SyncInfo SyncInfo `json:"sync_info"` + ValidatorInfo ValidatorInfo `json:"validator_info"` } // Is TxIndexing enabled @@ -87,13 +84,7 @@ func (s *ResultStatus) TxIndexEnabled() bool { if s == nil { return false } - for _, s := range s.NodeInfo.Other { - info := strings.Split(s, "=") - if len(info) == 2 && info[0] == "tx_index" { - return info[1] == "on" - } - } - return false + return s.NodeInfo.Other.TxIndex == "on" } // Info about peer connections @@ -116,7 +107,7 @@ type ResultDialPeers struct { // A peer type Peer struct { - p2p.NodeInfo `json:"node_info"` + NodeInfo p2p.DefaultNodeInfo `json:"node_info"` IsOutbound bool `json:"is_outbound"` ConnectionStatus p2p.ConnectionStatus `json:"connection_status"` } @@ -127,6 +118,12 @@ type ResultValidators struct { Validators []*types.Validator `json:"validators"` } +// ConsensusParams for given height +type ResultConsensusParams struct { + BlockHeight int64 `json:"block_height"` + ConsensusParams types.ConsensusParams `json:"consensus_params"` +} + // Info about the consensus state. // UNSTABLE type ResultDumpConsensusState struct { diff --git a/rpc/core/types/responses_test.go b/rpc/core/types/responses_test.go index e410d47ae90..796299d33fe 100644 --- a/rpc/core/types/responses_test.go +++ b/rpc/core/types/responses_test.go @@ -9,31 +9,27 @@ import ( ) func TestStatusIndexer(t *testing.T) { - assert := assert.New(t) - var status *ResultStatus - assert.False(status.TxIndexEnabled()) + assert.False(t, status.TxIndexEnabled()) status = &ResultStatus{} - assert.False(status.TxIndexEnabled()) + assert.False(t, status.TxIndexEnabled()) - status.NodeInfo = p2p.NodeInfo{} - assert.False(status.TxIndexEnabled()) + status.NodeInfo = p2p.DefaultNodeInfo{} + assert.False(t, status.TxIndexEnabled()) cases := []struct { expected bool - other []string + other p2p.DefaultNodeInfoOther }{ - {false, nil}, - {false, []string{}}, - {false, []string{"a=b"}}, - {false, []string{"tx_indexiskv", "some=dood"}}, - {true, []string{"tx_index=on", "tx_index=other"}}, - {true, []string{"^(*^(", "tx_index=on", "a=n=b=d="}}, + {false, p2p.DefaultNodeInfoOther{}}, + {false, p2p.DefaultNodeInfoOther{TxIndex: "aa"}}, + {false, p2p.DefaultNodeInfoOther{TxIndex: "off"}}, + {true, p2p.DefaultNodeInfoOther{TxIndex: "on"}}, } for _, tc := range cases { status.NodeInfo.Other = tc.other - assert.Equal(tc.expected, status.TxIndexEnabled()) + assert.Equal(t, tc.expected, status.TxIndexEnabled()) } } diff --git a/rpc/core/version.go b/rpc/core/version.go deleted file mode 100644 index e283de4798b..00000000000 --- a/rpc/core/version.go +++ /dev/null @@ -1,5 +0,0 @@ -package core - -// a single integer is sufficient here - -const Version = "3" // rpc routes for profiling, setting config diff --git a/rpc/grpc/grpc_test.go b/rpc/grpc/grpc_test.go index fe979c549de..eda3896fbde 100644 --- a/rpc/grpc/grpc_test.go +++ b/rpc/grpc/grpc_test.go @@ -26,7 +26,7 @@ func TestMain(m *testing.M) { func TestBroadcastTx(t *testing.T) { require := require.New(t) - res, err := rpctest.GetGRPCClient().BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{[]byte("this is a tx")}) + res, err := rpctest.GetGRPCClient().BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{Tx: []byte("this is a tx")}) require.Nil(err, "%+v", err) require.EqualValues(0, res.CheckTx.Code) require.EqualValues(0, res.DeliverTx.Code) diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go index 8bc9761a40d..b33397dab45 100644 --- a/rpc/grpc/types.pb.go +++ b/rpc/grpc/types.pb.go @@ -1,18 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: rpc/grpc/types.proto -/* - Package core_grpc is a generated protocol buffer package. - - It is generated from these files: - rpc/grpc/types.proto - - It has these top-level messages: - RequestPing - RequestBroadcastTx - ResponsePing - ResponseBroadcastTx -*/ //nolint package core_grpc @@ -25,8 +13,10 @@ import types "github.com/tendermint/tendermint/abci/types" import bytes "bytes" -import context "golang.org/x/net/context" -import grpc "google.golang.org/grpc" +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) import io "io" @@ -43,21 +33,83 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type RequestPing struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *RequestPing) Reset() { *m = RequestPing{} } -func (m *RequestPing) String() string { return proto.CompactTextString(m) } -func (*RequestPing) ProtoMessage() {} -func (*RequestPing) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } +func (m *RequestPing) Reset() { *m = RequestPing{} } +func (m *RequestPing) String() string { return proto.CompactTextString(m) } +func (*RequestPing) ProtoMessage() {} +func (*RequestPing) Descriptor() ([]byte, []int) { + return fileDescriptor_types_48bb8d9591d37e66, []int{0} +} +func (m *RequestPing) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestPing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestPing.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RequestPing) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestPing.Merge(dst, src) +} +func (m *RequestPing) XXX_Size() int { + return m.Size() +} +func (m *RequestPing) XXX_DiscardUnknown() { + xxx_messageInfo_RequestPing.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestPing proto.InternalMessageInfo type RequestBroadcastTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } -func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } -func (*RequestBroadcastTx) ProtoMessage() {} -func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } +func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } +func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } +func (*RequestBroadcastTx) ProtoMessage() {} +func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { + return fileDescriptor_types_48bb8d9591d37e66, []int{1} +} +func (m *RequestBroadcastTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestBroadcastTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RequestBroadcastTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestBroadcastTx.Merge(dst, src) +} +func (m *RequestBroadcastTx) XXX_Size() int { + return m.Size() +} +func (m *RequestBroadcastTx) XXX_DiscardUnknown() { + xxx_messageInfo_RequestBroadcastTx.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestBroadcastTx proto.InternalMessageInfo func (m *RequestBroadcastTx) GetTx() []byte { if m != nil { @@ -67,22 +119,84 @@ func (m *RequestBroadcastTx) GetTx() []byte { } type ResponsePing struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponsePing) Reset() { *m = ResponsePing{} } +func (m *ResponsePing) String() string { return proto.CompactTextString(m) } +func (*ResponsePing) ProtoMessage() {} +func (*ResponsePing) Descriptor() ([]byte, []int) { + return fileDescriptor_types_48bb8d9591d37e66, []int{2} +} +func (m *ResponsePing) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponsePing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponsePing.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ResponsePing) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponsePing.Merge(dst, src) +} +func (m *ResponsePing) XXX_Size() int { + return m.Size() +} +func (m *ResponsePing) XXX_DiscardUnknown() { + xxx_messageInfo_ResponsePing.DiscardUnknown(m) } -func (m *ResponsePing) Reset() { *m = ResponsePing{} } -func (m *ResponsePing) String() string { return proto.CompactTextString(m) } -func (*ResponsePing) ProtoMessage() {} -func (*ResponsePing) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} } +var xxx_messageInfo_ResponsePing proto.InternalMessageInfo type ResponseBroadcastTx struct { - CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx" json:"check_tx,omitempty"` - DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx" json:"deliver_tx,omitempty"` + CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx" json:"check_tx,omitempty"` + DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx" json:"deliver_tx,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } -func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } -func (*ResponseBroadcastTx) ProtoMessage() {} -func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{3} } +func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } +func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } +func (*ResponseBroadcastTx) ProtoMessage() {} +func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { + return fileDescriptor_types_48bb8d9591d37e66, []int{3} +} +func (m *ResponseBroadcastTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseBroadcastTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ResponseBroadcastTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseBroadcastTx.Merge(dst, src) +} +func (m *ResponseBroadcastTx) XXX_Size() int { + return m.Size() +} +func (m *ResponseBroadcastTx) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseBroadcastTx.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseBroadcastTx proto.InternalMessageInfo func (m *ResponseBroadcastTx) GetCheckTx() *types.ResponseCheckTx { if m != nil { @@ -127,6 +241,9 @@ func (this *RequestPing) Equal(that interface{}) bool { } else if this == nil { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *RequestBroadcastTx) Equal(that interface{}) bool { @@ -151,6 +268,9 @@ func (this *RequestBroadcastTx) Equal(that interface{}) bool { if !bytes.Equal(this.Tx, that1.Tx) { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *ResponsePing) Equal(that interface{}) bool { @@ -172,6 +292,9 @@ func (this *ResponsePing) Equal(that interface{}) bool { } else if this == nil { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *ResponseBroadcastTx) Equal(that interface{}) bool { @@ -199,6 +322,9 @@ func (this *ResponseBroadcastTx) Equal(that interface{}) bool { if !this.DeliverTx.Equal(that1.DeliverTx) { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } @@ -210,8 +336,9 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// Client API for BroadcastAPI service - +// BroadcastAPIClient is the client API for BroadcastAPI service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type BroadcastAPIClient interface { Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) @@ -227,7 +354,7 @@ func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient { func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { out := new(ResponsePing) - err := grpc.Invoke(ctx, "/core_grpc.BroadcastAPI/Ping", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/core_grpc.BroadcastAPI/Ping", in, out, opts...) if err != nil { return nil, err } @@ -236,7 +363,7 @@ func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ... func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { out := new(ResponseBroadcastTx) - err := grpc.Invoke(ctx, "/core_grpc.BroadcastAPI/BroadcastTx", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/core_grpc.BroadcastAPI/BroadcastTx", in, out, opts...) if err != nil { return nil, err } @@ -322,6 +449,9 @@ func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -346,6 +476,9 @@ func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) i += copy(dAtA[i:], m.Tx) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -364,6 +497,9 @@ func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -402,6 +538,9 @@ func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { } i += n2 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -417,6 +556,7 @@ func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { func NewPopulatedRequestPing(r randyTypes, easy bool) *RequestPing { this := &RequestPing{} if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 1) } return this } @@ -429,6 +569,7 @@ func NewPopulatedRequestBroadcastTx(r randyTypes, easy bool) *RequestBroadcastTx this.Tx[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 2) } return this } @@ -436,6 +577,7 @@ func NewPopulatedRequestBroadcastTx(r randyTypes, easy bool) *RequestBroadcastTx func NewPopulatedResponsePing(r randyTypes, easy bool) *ResponsePing { this := &ResponsePing{} if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 1) } return this } @@ -449,6 +591,7 @@ func NewPopulatedResponseBroadcastTx(r randyTypes, easy bool) *ResponseBroadcast this.DeliverTx = types.NewPopulatedResponseDeliverTx(r, easy) } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } return this } @@ -528,6 +671,9 @@ func encodeVarintPopulateTypes(dAtA []byte, v uint64) []byte { func (m *RequestPing) Size() (n int) { var l int _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -538,12 +684,18 @@ func (m *RequestBroadcastTx) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ResponsePing) Size() (n int) { var l int _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -558,6 +710,9 @@ func (m *ResponseBroadcastTx) Size() (n int) { l = m.DeliverTx.Size() n += 1 + l + sovTypes(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -615,6 +770,7 @@ func (m *RequestPing) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -696,6 +852,7 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -746,6 +903,7 @@ func (m *ResponsePing) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -862,6 +1020,7 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -976,10 +1135,10 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("rpc/grpc/types.proto", fileDescriptorTypes) } -func init() { golang_proto.RegisterFile("rpc/grpc/types.proto", fileDescriptorTypes) } +func init() { proto.RegisterFile("rpc/grpc/types.proto", fileDescriptor_types_48bb8d9591d37e66) } +func init() { golang_proto.RegisterFile("rpc/grpc/types.proto", fileDescriptor_types_48bb8d9591d37e66) } -var fileDescriptorTypes = []byte{ +var fileDescriptor_types_48bb8d9591d37e66 = []byte{ // 321 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x29, 0x2a, 0x48, 0xd6, 0x4f, 0x07, 0x11, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x9c, diff --git a/rpc/grpc/typespb_test.go b/rpc/grpc/typespb_test.go index 3d28002b1b1..da076bf64a5 100644 --- a/rpc/grpc/typespb_test.go +++ b/rpc/grpc/typespb_test.go @@ -1,25 +1,14 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: rpc/grpc/types.proto -/* -Package core_grpc is a generated protocol buffer package. - -It is generated from these files: - rpc/grpc/types.proto - -It has these top-level messages: - RequestPing - RequestBroadcastTx - ResponsePing - ResponseBroadcastTx -*/ package core_grpc import testing "testing" -import rand "math/rand" +import math_rand "math/rand" import time "time" +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" +import github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" import proto "github.com/gogo/protobuf/proto" -import jsonpb "github.com/gogo/protobuf/jsonpb" import golang_proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" @@ -34,14 +23,14 @@ var _ = math.Inf func TestRequestPingProto(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestPing(popr, false) - dAtA, err := proto.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &RequestPing{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } littlefuzz := make([]byte, len(dAtA)) @@ -59,13 +48,13 @@ func TestRequestPingProto(t *testing.T) { littlefuzz = append(littlefuzz, byte(popr.Intn(256))) } // shouldn't panic - _ = proto.Unmarshal(littlefuzz, msg) + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) } } func TestRequestPingMarshalTo(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestPing(popr, false) size := p.Size() dAtA := make([]byte, size) @@ -77,7 +66,7 @@ func TestRequestPingMarshalTo(t *testing.T) { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &RequestPing{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } for i := range dAtA { @@ -90,14 +79,14 @@ func TestRequestPingMarshalTo(t *testing.T) { func TestRequestBroadcastTxProto(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestBroadcastTx(popr, false) - dAtA, err := proto.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &RequestBroadcastTx{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } littlefuzz := make([]byte, len(dAtA)) @@ -115,13 +104,13 @@ func TestRequestBroadcastTxProto(t *testing.T) { littlefuzz = append(littlefuzz, byte(popr.Intn(256))) } // shouldn't panic - _ = proto.Unmarshal(littlefuzz, msg) + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) } } func TestRequestBroadcastTxMarshalTo(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestBroadcastTx(popr, false) size := p.Size() dAtA := make([]byte, size) @@ -133,7 +122,7 @@ func TestRequestBroadcastTxMarshalTo(t *testing.T) { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &RequestBroadcastTx{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } for i := range dAtA { @@ -146,14 +135,14 @@ func TestRequestBroadcastTxMarshalTo(t *testing.T) { func TestResponsePingProto(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponsePing(popr, false) - dAtA, err := proto.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &ResponsePing{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } littlefuzz := make([]byte, len(dAtA)) @@ -171,13 +160,13 @@ func TestResponsePingProto(t *testing.T) { littlefuzz = append(littlefuzz, byte(popr.Intn(256))) } // shouldn't panic - _ = proto.Unmarshal(littlefuzz, msg) + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) } } func TestResponsePingMarshalTo(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponsePing(popr, false) size := p.Size() dAtA := make([]byte, size) @@ -189,7 +178,7 @@ func TestResponsePingMarshalTo(t *testing.T) { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &ResponsePing{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } for i := range dAtA { @@ -202,14 +191,14 @@ func TestResponsePingMarshalTo(t *testing.T) { func TestResponseBroadcastTxProto(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponseBroadcastTx(popr, false) - dAtA, err := proto.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &ResponseBroadcastTx{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } littlefuzz := make([]byte, len(dAtA)) @@ -227,13 +216,13 @@ func TestResponseBroadcastTxProto(t *testing.T) { littlefuzz = append(littlefuzz, byte(popr.Intn(256))) } // shouldn't panic - _ = proto.Unmarshal(littlefuzz, msg) + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) } } func TestResponseBroadcastTxMarshalTo(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponseBroadcastTx(popr, false) size := p.Size() dAtA := make([]byte, size) @@ -245,7 +234,7 @@ func TestResponseBroadcastTxMarshalTo(t *testing.T) { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &ResponseBroadcastTx{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } for i := range dAtA { @@ -258,15 +247,15 @@ func TestResponseBroadcastTxMarshalTo(t *testing.T) { func TestRequestPingJSON(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestPing(popr, true) - marshaler := jsonpb.Marshaler{} + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &RequestPing{} - err = jsonpb.UnmarshalString(jsondata, msg) + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -276,15 +265,15 @@ func TestRequestPingJSON(t *testing.T) { } func TestRequestBroadcastTxJSON(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestBroadcastTx(popr, true) - marshaler := jsonpb.Marshaler{} + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &RequestBroadcastTx{} - err = jsonpb.UnmarshalString(jsondata, msg) + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -294,15 +283,15 @@ func TestRequestBroadcastTxJSON(t *testing.T) { } func TestResponsePingJSON(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponsePing(popr, true) - marshaler := jsonpb.Marshaler{} + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &ResponsePing{} - err = jsonpb.UnmarshalString(jsondata, msg) + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -312,15 +301,15 @@ func TestResponsePingJSON(t *testing.T) { } func TestResponseBroadcastTxJSON(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponseBroadcastTx(popr, true) - marshaler := jsonpb.Marshaler{} + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &ResponseBroadcastTx{} - err = jsonpb.UnmarshalString(jsondata, msg) + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -330,11 +319,11 @@ func TestResponseBroadcastTxJSON(t *testing.T) { } func TestRequestPingProtoText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestPing(popr, true) - dAtA := proto.MarshalTextString(p) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) msg := &RequestPing{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -344,11 +333,11 @@ func TestRequestPingProtoText(t *testing.T) { func TestRequestPingProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestPing(popr, true) - dAtA := proto.CompactTextString(p) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) msg := &RequestPing{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -358,11 +347,11 @@ func TestRequestPingProtoCompactText(t *testing.T) { func TestRequestBroadcastTxProtoText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestBroadcastTx(popr, true) - dAtA := proto.MarshalTextString(p) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) msg := &RequestBroadcastTx{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -372,11 +361,11 @@ func TestRequestBroadcastTxProtoText(t *testing.T) { func TestRequestBroadcastTxProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestBroadcastTx(popr, true) - dAtA := proto.CompactTextString(p) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) msg := &RequestBroadcastTx{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -386,11 +375,11 @@ func TestRequestBroadcastTxProtoCompactText(t *testing.T) { func TestResponsePingProtoText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponsePing(popr, true) - dAtA := proto.MarshalTextString(p) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) msg := &ResponsePing{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -400,11 +389,11 @@ func TestResponsePingProtoText(t *testing.T) { func TestResponsePingProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponsePing(popr, true) - dAtA := proto.CompactTextString(p) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) msg := &ResponsePing{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -414,11 +403,11 @@ func TestResponsePingProtoCompactText(t *testing.T) { func TestResponseBroadcastTxProtoText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponseBroadcastTx(popr, true) - dAtA := proto.MarshalTextString(p) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) msg := &ResponseBroadcastTx{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -428,11 +417,11 @@ func TestResponseBroadcastTxProtoText(t *testing.T) { func TestResponseBroadcastTxProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponseBroadcastTx(popr, true) - dAtA := proto.CompactTextString(p) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) msg := &ResponseBroadcastTx{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -442,10 +431,10 @@ func TestResponseBroadcastTxProtoCompactText(t *testing.T) { func TestRequestPingSize(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestPing(popr, true) - size2 := proto.Size(p) - dAtA, err := proto.Marshal(p) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -456,7 +445,7 @@ func TestRequestPingSize(t *testing.T) { if size2 != size { t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) } - size3 := proto.Size(p) + size3 := github_com_gogo_protobuf_proto.Size(p) if size3 != size { t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) } @@ -464,10 +453,10 @@ func TestRequestPingSize(t *testing.T) { func TestRequestBroadcastTxSize(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestBroadcastTx(popr, true) - size2 := proto.Size(p) - dAtA, err := proto.Marshal(p) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -478,7 +467,7 @@ func TestRequestBroadcastTxSize(t *testing.T) { if size2 != size { t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) } - size3 := proto.Size(p) + size3 := github_com_gogo_protobuf_proto.Size(p) if size3 != size { t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) } @@ -486,10 +475,10 @@ func TestRequestBroadcastTxSize(t *testing.T) { func TestResponsePingSize(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponsePing(popr, true) - size2 := proto.Size(p) - dAtA, err := proto.Marshal(p) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -500,7 +489,7 @@ func TestResponsePingSize(t *testing.T) { if size2 != size { t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) } - size3 := proto.Size(p) + size3 := github_com_gogo_protobuf_proto.Size(p) if size3 != size { t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) } @@ -508,10 +497,10 @@ func TestResponsePingSize(t *testing.T) { func TestResponseBroadcastTxSize(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponseBroadcastTx(popr, true) - size2 := proto.Size(p) - dAtA, err := proto.Marshal(p) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -522,7 +511,7 @@ func TestResponseBroadcastTxSize(t *testing.T) { if size2 != size { t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) } - size3 := proto.Size(p) + size3 := github_com_gogo_protobuf_proto.Size(p) if size3 != size { t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) } diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go index 9a07c86761d..6da996e2c27 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/lib/client/ws_client.go @@ -14,8 +14,8 @@ import ( metrics "github.com/rcrowley/go-metrics" "github.com/tendermint/go-amino" - types "github.com/tendermint/tendermint/rpc/lib/types" cmn "github.com/tendermint/tendermint/libs/common" + types "github.com/tendermint/tendermint/rpc/lib/types" ) const ( @@ -174,9 +174,6 @@ func (c *WSClient) OnStart() error { return nil } -// OnStop implements cmn.Service. -func (c *WSClient) OnStop() {} - // Stop overrides cmn.Service#Stop. There is no other way to wait until Quit // channel is closed. func (c *WSClient) Stop() error { diff --git a/rpc/lib/doc.go b/rpc/lib/doc.go index b96b9123cfb..dbdb362dada 100644 --- a/rpc/lib/doc.go +++ b/rpc/lib/doc.go @@ -1,103 +1,88 @@ -/* -HTTP RPC server supporting calls via uri params, jsonrpc, and jsonrpc over websockets - -# Client Requests - -Suppose we want to expose the rpc function `HelloWorld(name string, num int)`. - -## GET (URI) - -As a GET request, it would have URI encoded parameters, and look like: - -``` -curl 'http://localhost:8008/hello_world?name="my_world"&num=5' -``` - -Note the `'` around the url, which is just so bash doesn't ignore the quotes in `"my_world"`. -This should also work: - -``` -curl http://localhost:8008/hello_world?name=\"my_world\"&num=5 -``` - -A GET request to `/` returns a list of available endpoints. -For those which take arguments, the arguments will be listed in order, with `_` where the actual value should be. - -## POST (JSONRPC) - -As a POST request, we use JSONRPC. For instance, the same request would have this as the body: - -``` -{ - "jsonrpc": "2.0", - "id": "anything", - "method": "hello_world", - "params": { - "name": "my_world", - "num": 5 - } -} -``` - -With the above saved in file `data.json`, we can make the request with - -``` -curl --data @data.json http://localhost:8008 -``` - -## WebSocket (JSONRPC) - -All requests are exposed over websocket in the same form as the POST JSONRPC. -Websocket connections are available at their own endpoint, typically `/websocket`, -though this is configurable when starting the server. - -# Server Definition - -Define some types and routes: - -``` -type ResultStatus struct { - Value string -} - +// HTTP RPC server supporting calls via uri params, jsonrpc, and jsonrpc over websockets +// +// Client Requests +// +// Suppose we want to expose the rpc function `HelloWorld(name string, num int)`. +// +// GET (URI) +// +// As a GET request, it would have URI encoded parameters, and look like: +// +// curl 'http://localhost:8008/hello_world?name="my_world"&num=5' +// +// Note the `'` around the url, which is just so bash doesn't ignore the quotes in `"my_world"`. +// This should also work: +// +// curl http://localhost:8008/hello_world?name=\"my_world\"&num=5 +// +// A GET request to `/` returns a list of available endpoints. +// For those which take arguments, the arguments will be listed in order, with `_` where the actual value should be. +// +// POST (JSONRPC) +// +// As a POST request, we use JSONRPC. For instance, the same request would have this as the body: +// +// { +// "jsonrpc": "2.0", +// "id": "anything", +// "method": "hello_world", +// "params": { +// "name": "my_world", +// "num": 5 +// } +// } +// +// With the above saved in file `data.json`, we can make the request with +// +// curl --data @data.json http://localhost:8008 +// +// +// WebSocket (JSONRPC) +// +// All requests are exposed over websocket in the same form as the POST JSONRPC. +// Websocket connections are available at their own endpoint, typically `/websocket`, +// though this is configurable when starting the server. +// +// Server Definition +// +// Define some types and routes: +// +// type ResultStatus struct { +// Value string +// } +// // Define some routes -var Routes = map[string]*rpcserver.RPCFunc{ - "status": rpcserver.NewRPCFunc(Status, "arg"), -} - -// an rpc function -func Status(v string) (*ResultStatus, error) { - return &ResultStatus{v}, nil -} - -``` - -Now start the server: - -``` -mux := http.NewServeMux() -rpcserver.RegisterRPCFuncs(mux, Routes) -wm := rpcserver.NewWebsocketManager(Routes) -mux.HandleFunc("/websocket", wm.WebsocketHandler) -logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) -go func() { - _, err := rpcserver.StartHTTPServer("0.0.0.0:8008", mux, logger) - if err != nil { - panic(err) - } -}() - -``` - -Note that unix sockets are supported as well (eg. `/path/to/socket` instead of `0.0.0.0:8008`) - -Now see all available endpoints by sending a GET request to `0.0.0.0:8008`. -Each route is available as a GET request, as a JSONRPCv2 POST request, and via JSONRPCv2 over websockets. - - -# Examples - -* [Tendermint](https://github.com/tendermint/tendermint/blob/master/rpc/core/routes.go) -* [tm-monitor](https://github.com/tendermint/tendermint/blob/master/tools/tm-monitor/rpc.go) -*/ +// +// var Routes = map[string]*rpcserver.RPCFunc{ +// "status": rpcserver.NewRPCFunc(Status, "arg"), +// } +// +// An rpc function: +// +// func Status(v string) (*ResultStatus, error) { +// return &ResultStatus{v}, nil +// } +// +// Now start the server: +// +// mux := http.NewServeMux() +// rpcserver.RegisterRPCFuncs(mux, Routes) +// wm := rpcserver.NewWebsocketManager(Routes) +// mux.HandleFunc("/websocket", wm.WebsocketHandler) +// logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) +// go func() { +// _, err := rpcserver.StartHTTPServer("0.0.0.0:8008", mux, logger) +// if err != nil { +// panic(err) +// } +// }() +// +// Note that unix sockets are supported as well (eg. `/path/to/socket` instead of `0.0.0.0:8008`) +// Now see all available endpoints by sending a GET request to `0.0.0.0:8008`. +// Each route is available as a GET request, as a JSONRPCv2 POST request, and via JSONRPCv2 over websockets. +// +// Examples +// +// - [Tendermint](https://github.com/tendermint/tendermint/blob/master/rpc/core/routes.go) +// - [tm-monitor](https://github.com/tendermint/tendermint/blob/master/tools/tm-monitor/rpc.go) package rpc diff --git a/rpc/lib/server/handlers_test.go b/rpc/lib/server/handlers_test.go index 3471eb791df..6004959ae93 100644 --- a/rpc/lib/server/handlers_test.go +++ b/rpc/lib/server/handlers_test.go @@ -14,9 +14,9 @@ import ( "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/libs/log" rs "github.com/tendermint/tendermint/rpc/lib/server" types "github.com/tendermint/tendermint/rpc/lib/types" - "github.com/tendermint/tendermint/libs/log" ) ////////////////////////////////////////////////////////////////////////////// diff --git a/rpc/lib/server/http_server.go b/rpc/lib/server/http_server.go index 5d816ef22af..6de376c291b 100644 --- a/rpc/lib/server/http_server.go +++ b/rpc/lib/server/http_server.go @@ -14,8 +14,8 @@ import ( "github.com/pkg/errors" "golang.org/x/net/netutil" - types "github.com/tendermint/tendermint/rpc/lib/types" "github.com/tendermint/tendermint/libs/log" + types "github.com/tendermint/tendermint/rpc/lib/types" ) // Config is an RPC server configuration. @@ -61,7 +61,7 @@ func StartHTTPServer( listener, RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger), ) - logger.Error("RPC HTTP server stopped", "err", err) + logger.Info("RPC HTTP server stopped", "err", err) }() return listener, nil } @@ -102,15 +102,16 @@ func StartHTTPAndTLSServer( listener = netutil.LimitListener(listener, config.MaxOpenConnections) } - go func() { - err := http.ServeTLS( - listener, - RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger), - certFile, - keyFile, - ) + err = http.ServeTLS( + listener, + RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger), + certFile, + keyFile, + ) + if err != nil { logger.Error("RPC HTTPS server stopped", "err", err) - }() + return nil, err + } return listener, nil } @@ -172,8 +173,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler "Panic in RPC HTTP handler", "err", e, "stack", string(debug.Stack()), ) - rww.WriteHeader(http.StatusInternalServerError) - WriteRPCResponseHTTP(rww, types.RPCInternalError("", e.(error))) + WriteRPCResponseHTTPError(rww, http.StatusInternalServerError, types.RPCInternalError("", e.(error))) } } diff --git a/rpc/lib/server/http_server_test.go b/rpc/lib/server/http_server_test.go index 3cbe0d9062c..73ebc2e7e79 100644 --- a/rpc/lib/server/http_server_test.go +++ b/rpc/lib/server/http_server_test.go @@ -5,11 +5,14 @@ import ( "io" "io/ioutil" "net/http" + "os" "sync" "sync/atomic" "testing" "time" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" ) @@ -60,3 +63,15 @@ func TestMaxOpenConnections(t *testing.T) { t.Errorf("%d requests failed within %d attempts", failed, attempts) } } + +func TestStartHTTPAndTLSServer(t *testing.T) { + // set up fixtures + listenerAddr := "tcp://0.0.0.0:0" + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {}) + + // test failure + gotListener, err := StartHTTPAndTLSServer(listenerAddr, mux, "", "", log.TestingLogger(), Config{MaxOpenConnections: 1}) + require.Nil(t, gotListener) + require.IsType(t, (*os.PathError)(nil), err) +} diff --git a/rpc/lib/test/main.go b/rpc/lib/test/main.go index cb9560e1286..544284b9c1f 100644 --- a/rpc/lib/test/main.go +++ b/rpc/lib/test/main.go @@ -6,9 +6,9 @@ import ( "os" amino "github.com/tendermint/go-amino" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" + rpcserver "github.com/tendermint/tendermint/rpc/lib/server" ) var routes = map[string]*rpcserver.RPCFunc{ diff --git a/rpc/lib/version.go b/rpc/lib/version.go deleted file mode 100644 index 8828f260bf4..00000000000 --- a/rpc/lib/version.go +++ /dev/null @@ -1,7 +0,0 @@ -package rpc - -const Maj = "0" -const Min = "7" -const Fix = "0" - -const Version = Maj + "." + Min + "." + Fix diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 7e0cba0edd5..0a9cd9847e7 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -15,6 +15,7 @@ import ( cfg "github.com/tendermint/tendermint/config" nm "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" ctypes "github.com/tendermint/tendermint/rpc/core/types" @@ -84,7 +85,7 @@ func GetConfig() *cfg.Config { globalConfig.P2P.ListenAddress = tm globalConfig.RPC.ListenAddress = rpc globalConfig.RPC.GRPCListenAddress = grpc - globalConfig.TxIndex.IndexTags = "app.creator" // see kvstore application + globalConfig.TxIndex.IndexTags = "app.creator,tx.height" // see kvstore application } return globalConfig } @@ -120,7 +121,11 @@ func NewTendermint(app abci.Application) *nm.Node { pvFile := config.PrivValidatorFile() pv := privval.LoadOrGenFilePV(pvFile) papp := proxy.NewLocalClientCreator(app) - node, err := nm.NewNode(config, pv, papp, + nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + if err != nil { + panic(err) + } + node, err := nm.NewNode(config, pv, nodeKey, papp, nm.DefaultGenesisDocProviderFunc(config), nm.DefaultDBProvider, nm.DefaultMetricsProvider(config.Instrumentation), diff --git a/scripts/authors.sh b/scripts/authors.sh new file mode 100755 index 00000000000..7aafb0127e6 --- /dev/null +++ b/scripts/authors.sh @@ -0,0 +1,16 @@ +#! /bin/bash + +# Usage: +# `./authors.sh` +# Print a list of all authors who have committed to develop since master. +# +# `./authors.sh ` +# Lookup the email address on Github and print the associated username + +author=$1 + +if [[ "$author" == "" ]]; then + git log master..develop | grep Author | sort | uniq +else + curl -s "https://api.github.com/search/users?q=$author+in%3Aemail&type=Users&utf8=%E2%9C%93" | jq .items[0].login +fi diff --git a/scripts/get_tools.sh b/scripts/get_tools.sh new file mode 100755 index 00000000000..955ec943a7f --- /dev/null +++ b/scripts/get_tools.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +set -e + +# This file downloads all of the binary dependencies we have, and checks out a +# specific git hash. +# +# repos it installs: +# github.com/mitchellh/gox +# github.com/golang/dep/cmd/dep +# gopkg.in/alecthomas/gometalinter.v2 +# github.com/gogo/protobuf/protoc-gen-gogo +# github.com/square/certstrap + +## check if GOPATH is set +if [ -z ${GOPATH+x} ]; then + echo "please set GOPATH (https://github.com/golang/go/wiki/SettingGOPATH)" + exit 1 +fi + +mkdir -p "$GOPATH/src/github.com" +cd "$GOPATH/src/github.com" || exit 1 + +installFromGithub() { + repo=$1 + commit=$2 + # optional + subdir=$3 + echo "--> Installing $repo ($commit)..." + if [ ! -d "$repo" ]; then + mkdir -p "$repo" + git clone "https://github.com/$repo.git" "$repo" + fi + if [ ! -z ${subdir+x} ] && [ ! -d "$repo/$subdir" ]; then + echo "ERROR: no such directory $repo/$subdir" + exit 1 + fi + pushd "$repo" && \ + git fetch origin && \ + git checkout -q "$commit" && \ + if [ ! -z ${subdir+x} ]; then cd "$subdir" || exit 1; fi && \ + go install && \ + if [ ! -z ${subdir+x} ]; then cd - || exit 1; fi && \ + popd || exit 1 + echo "--> Done" + echo "" +} + +installFromGithub mitchellh/gox 51ed453898ca5579fea9ad1f08dff6b121d9f2e8 +installFromGithub golang/dep 22125cfaa6ddc71e145b1535d4b7ee9744fefff2 cmd/dep +## gometalinter v2.0.11 +installFromGithub alecthomas/gometalinter 17a7ffa42374937bfecabfb8d2efbd4db0c26741 +installFromGithub gogo/protobuf 61dbc136cf5d2f08d68a011382652244990a53a9 protoc-gen-gogo +installFromGithub square/certstrap e27060a3643e814151e65b9807b6b06d169580a7 diff --git a/scripts/install/install_tendermint_bsd.sh b/scripts/install/install_tendermint_bsd.sh index aba584f2ebf..5b30eab3196 100644 --- a/scripts/install/install_tendermint_bsd.sh +++ b/scripts/install/install_tendermint_bsd.sh @@ -17,16 +17,15 @@ set BRANCH=master sudo pkg update -sudo pkg upgrade -y sudo pkg install -y gmake sudo pkg install -y git # get and unpack golang -curl -O https://storage.googleapis.com/golang/go1.10.freebsd-amd64.tar.gz -tar -xvf go1.10.freebsd-amd64.tar.gz +curl -O https://storage.googleapis.com/golang/go1.11.freebsd-amd64.tar.gz +tar -xvf go1.11.freebsd-amd64.tar.gz -# move go binary and add to path -mv go /usr/local +# move go folder and add go binary to path +sudo mv go /usr/local set path=($path /usr/local/go/bin) @@ -41,7 +40,7 @@ source ~/.tcshrc # get the code and move into repo set REPO=github.com/tendermint/tendermint go get $REPO -cd $GOPATH/src/$REPO +cd "$GOPATH/src/$REPO" # build & install master git checkout $BRANCH diff --git a/scripts/install/install_tendermint_ubuntu.sh b/scripts/install/install_tendermint_ubuntu.sh index 0e1de1177de..29e975088ad 100644 --- a/scripts/install/install_tendermint_ubuntu.sh +++ b/scripts/install/install_tendermint_ubuntu.sh @@ -14,27 +14,25 @@ REPO=github.com/tendermint/tendermint BRANCH=master sudo apt-get update -y -sudo apt-get upgrade -y sudo apt-get install -y make # get and unpack golang -curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz -tar -xvf go1.10.linux-amd64.tar.gz +curl -O https://storage.googleapis.com/golang/go1.11.linux-amd64.tar.gz +tar -xvf go1.11.linux-amd64.tar.gz -# move go binary and add to path -mv go /usr/local +# move go folder and add go binary to path +sudo mv go /usr/local echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile # create the goApps directory, set GOPATH, and put it on PATH mkdir goApps -echo "export GOPATH=/root/goApps" >> ~/.profile +echo "export GOPATH=$HOME/goApps" >> ~/.profile echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile - source ~/.profile # get the code and move into repo go get $REPO -cd $GOPATH/src/$REPO +cd "$GOPATH/src/$REPO" # build & install git checkout $BRANCH diff --git a/scripts/json2wal/main.go b/scripts/json2wal/main.go new file mode 100644 index 00000000000..be3487e5f7a --- /dev/null +++ b/scripts/json2wal/main.go @@ -0,0 +1,74 @@ +/* + json2wal converts JSON file to binary WAL file. + + Usage: + json2wal +*/ + +package main + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" + + "github.com/tendermint/go-amino" + cs "github.com/tendermint/tendermint/consensus" + "github.com/tendermint/tendermint/types" +) + +var cdc = amino.NewCodec() + +func init() { + cs.RegisterConsensusMessages(cdc) + cs.RegisterWALMessages(cdc) + types.RegisterBlockAmino(cdc) +} + +func main() { + if len(os.Args) < 3 { + fmt.Fprintln(os.Stderr, "missing arguments: Usage:json2wal ") + os.Exit(1) + } + + f, err := os.Open(os.Args[1]) + if err != nil { + panic(fmt.Errorf("failed to open WAL file: %v", err)) + } + defer f.Close() + + walFile, err := os.OpenFile(os.Args[2], os.O_EXCL|os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + panic(fmt.Errorf("failed to open WAL file: %v", err)) + } + defer walFile.Close() + + br := bufio.NewReader(f) + dec := cs.NewWALEncoder(walFile) + + for { + msgJson, _, err := br.ReadLine() + if err == io.EOF { + break + } else if err != nil { + panic(fmt.Errorf("failed to read file: %v", err)) + } + // ignore the ENDHEIGHT in json.File + if strings.HasPrefix(string(msgJson), "ENDHEIGHT") { + continue + } + + var msg cs.TimedWALMessage + err = cdc.UnmarshalJSON(msgJson, &msg) + if err != nil { + panic(fmt.Errorf("failed to unmarshal json: %v", err)) + } + + err = dec.Encode(&msg) + if err != nil { + panic(fmt.Errorf("failed to encode msg: %v", err)) + } + } +} diff --git a/scripts/linkify_changelog.py b/scripts/linkify_changelog.py new file mode 100644 index 00000000000..16647c05f4c --- /dev/null +++ b/scripts/linkify_changelog.py @@ -0,0 +1,13 @@ +import fileinput +import re + +# This script goes through the provided file, and replaces any " \#", +# with the valid mark down formatted link to it. e.g. +# " [\#number](https://github.com/tendermint/tendermint/issues/) +# Note that if the number is for a PR, github will auto-redirect you when you click the link. +# It is safe to run the script multiple times in succession. +# +# Example usage $ python3 linkify_changelog.py ../CHANGELOG_PENDING.md +for line in fileinput.input(inplace=1): + line = re.sub(r"\s\\#([0-9]*)", r" [\\#\1](https://github.com/tendermint/tendermint/issues/\1)", line.rstrip()) + print(line) \ No newline at end of file diff --git a/scripts/localnet-blocks-test.sh b/scripts/localnet-blocks-test.sh new file mode 100755 index 00000000000..a33ab00f3a7 --- /dev/null +++ b/scripts/localnet-blocks-test.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +ITERATIONS=$1 +SLEEP=$2 +NUMBLOCKS=$3 +NODEADDR=$4 + +if [ -z "$1" ]; then + echo "Need to input number of iterations to run..." + exit 1 +fi + +if [ -z "$2" ]; then + echo "Need to input number of seconds to sleep between iterations" + exit 1 +fi + +if [ -z "$3" ]; then + echo "Need to input block height to declare completion..." + exit 1 +fi + +if [ -z "$4" ]; then + echo "Need to input node address to poll..." + exit 1 +fi + +I=0 +while [ ${I} -lt "$ITERATIONS" ]; do + var=$(curl -s "$NODEADDR:26657/status" | jq -r ".result.sync_info.latest_block_height") + echo "Number of Blocks: ${var}" + if [ ! -z "${var}" ] && [ "${var}" -gt "${NUMBLOCKS}" ]; then + echo "Number of blocks reached, exiting success..." + exit 0 + fi + I=$((I+1)) + sleep "$SLEEP" +done + +echo "Timeout reached, exiting failure..." +exit 1 diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go index f6ffea4319c..cf8ae86c1b8 100644 --- a/scripts/wal2json/main.go +++ b/scripts/wal2json/main.go @@ -8,14 +8,23 @@ package main import ( - "encoding/json" "fmt" "io" "os" + "github.com/tendermint/go-amino" cs "github.com/tendermint/tendermint/consensus" + "github.com/tendermint/tendermint/types" ) +var cdc = amino.NewCodec() + +func init() { + cs.RegisterConsensusMessages(cdc) + cs.RegisterWALMessages(cdc) + types.RegisterBlockAmino(cdc) +} + func main() { if len(os.Args) < 2 { fmt.Println("missing one argument: ") @@ -37,7 +46,7 @@ func main() { panic(fmt.Errorf("failed to decode msg: %v", err)) } - json, err := json.Marshal(msg) + json, err := cdc.MarshalJSON(msg) if err != nil { panic(fmt.Errorf("failed to marshal msg: %v", err)) } diff --git a/state/errors.go b/state/errors.go index d40c7e1413d..6010ed9bbd6 100644 --- a/state/errors.go +++ b/state/errors.go @@ -1,8 +1,6 @@ package state -import ( - cmn "github.com/tendermint/tendermint/libs/common" -) +import "fmt" type ( ErrInvalidBlock error @@ -48,32 +46,32 @@ type ( ) func (e ErrUnknownBlock) Error() string { - return cmn.Fmt("Could not find block #%d", e.Height) + return fmt.Sprintf("Could not find block #%d", e.Height) } func (e ErrBlockHashMismatch) Error() string { - return cmn.Fmt("App block hash (%X) does not match core block hash (%X) for height %d", e.AppHash, e.CoreHash, e.Height) + return fmt.Sprintf("App block hash (%X) does not match core block hash (%X) for height %d", e.AppHash, e.CoreHash, e.Height) } func (e ErrAppBlockHeightTooHigh) Error() string { - return cmn.Fmt("App block height (%d) is higher than core (%d)", e.AppHeight, e.CoreHeight) + return fmt.Sprintf("App block height (%d) is higher than core (%d)", e.AppHeight, e.CoreHeight) } func (e ErrLastStateMismatch) Error() string { - return cmn.Fmt("Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", e.Height, e.Core, e.App) + return fmt.Sprintf("Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", e.Height, e.Core, e.App) } func (e ErrStateMismatch) Error() string { - return cmn.Fmt("State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", e.Got, e.Expected) + return fmt.Sprintf("State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", e.Got, e.Expected) } func (e ErrNoValSetForHeight) Error() string { - return cmn.Fmt("Could not find validator set for height #%d", e.Height) + return fmt.Sprintf("Could not find validator set for height #%d", e.Height) } func (e ErrNoConsensusParamsForHeight) Error() string { - return cmn.Fmt("Could not find consensus params for height #%d", e.Height) + return fmt.Sprintf("Could not find consensus params for height #%d", e.Height) } func (e ErrNoABCIResponsesForHeight) Error() string { - return cmn.Fmt("Could not find results for height #%d", e.Height) + return fmt.Sprintf("Could not find results for height #%d", e.Height) } diff --git a/state/execution.go b/state/execution.go index 54e1ec73ae0..4f5a1545cc3 100644 --- a/state/execution.go +++ b/state/execution.go @@ -2,10 +2,11 @@ package state import ( "fmt" + "time" - fail "github.com/ebuchman/fail-test" abci "github.com/tendermint/tendermint/abci/types" dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/fail" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" @@ -32,20 +33,37 @@ type BlockExecutor struct { evpool EvidencePool logger log.Logger + + metrics *Metrics +} + +type BlockExecutorOption func(executor *BlockExecutor) + +func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption { + return func(blockExec *BlockExecutor) { + blockExec.metrics = metrics + } } // NewBlockExecutor returns a new BlockExecutor with a NopEventBus. // Call SetEventBus to provide one. func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus, - mempool Mempool, evpool EvidencePool) *BlockExecutor { - return &BlockExecutor{ + mempool Mempool, evpool EvidencePool, options ...BlockExecutorOption) *BlockExecutor { + res := &BlockExecutor{ db: db, proxyApp: proxyApp, eventBus: types.NopEventBus{}, mempool: mempool, evpool: evpool, logger: logger, + metrics: NopMetrics(), } + + for _, option := range options { + option(res) + } + + return res } // SetEventBus - sets the event bus for publishing block related events. @@ -73,26 +91,29 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b return state, ErrInvalidBlock(err) } + startTime := time.Now().UnixNano() abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, state.LastValidators, blockExec.db) + endTime := time.Now().UnixNano() + blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) if err != nil { return state, ErrProxyAppConn(err) } fail.Fail() // XXX - // save the results before we commit + // Save the results before we commit. saveABCIResponses(blockExec.db, block.Height, abciResponses) fail.Fail() // XXX - // update the state with the block and responses + // Update the state with the block and responses. state, err = updateState(state, blockID, &block.Header, abciResponses) if err != nil { return state, fmt.Errorf("Commit failed for application: %v", err) } - // lock mempool, commit app state, update mempoool - appHash, err := blockExec.Commit(block) + // Lock mempool, commit app state, update mempoool. + appHash, err := blockExec.Commit(state, block) if err != nil { return state, fmt.Errorf("Commit failed for application: %v", err) } @@ -102,24 +123,29 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b fail.Fail() // XXX - // update the app hash and save the state + // Update the app hash and save the state. state.AppHash = appHash SaveState(blockExec.db, state) fail.Fail() // XXX - // events are fired after everything else + // Events are fired after everything else. // NOTE: if we crash between Commit and Save, events wont be fired during replay fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses) return state, nil } -// Commit locks the mempool, runs the ABCI Commit message, and updates the mempool. +// Commit locks the mempool, runs the ABCI Commit message, and updates the +// mempool. // It returns the result of calling abci.Commit (the AppHash), and an error. -// The Mempool must be locked during commit and update because state is typically reset on Commit and old txs must be replayed -// against committed state before new txs are run in the mempool, lest they be invalid. -func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) { +// The Mempool must be locked during commit and update because state is +// typically reset on Commit and old txs must be replayed against committed +// state before new txs are run in the mempool, lest they be invalid. +func (blockExec *BlockExecutor) Commit( + state State, + block *types.Block, +) ([]byte, error) { blockExec.mempool.Lock() defer blockExec.mempool.Unlock() @@ -134,22 +160,30 @@ func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) { // Commit block, get hash back res, err := blockExec.proxyApp.CommitSync() if err != nil { - blockExec.logger.Error("Client error during proxyAppConn.CommitSync", "err", err) + blockExec.logger.Error( + "Client error during proxyAppConn.CommitSync", + "err", err, + ) return nil, err } // ResponseCommit has no error code - just data - blockExec.logger.Info("Committed state", + blockExec.logger.Info( + "Committed state", "height", block.Height, "txs", block.NumTxs, - "appHash", fmt.Sprintf("%X", res.Data)) + "appHash", fmt.Sprintf("%X", res.Data), + ) // Update mempool. - if err := blockExec.mempool.Update(block.Height, block.Txs); err != nil { - return nil, err - } - - return res.Data, nil + err = blockExec.mempool.Update( + block.Height, + block.Txs, + TxPreCheck(state), + TxPostCheck(state), + ) + + return res.Data, err } //--------------------------------------------------------- @@ -157,14 +191,19 @@ func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) { // Executes block's transactions on proxyAppConn. // Returns a list of transaction results and updates to the validator set -func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, - block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) (*ABCIResponses, error) { +func execBlockOnProxyApp( + logger log.Logger, + proxyAppConn proxy.AppConnConsensus, + block *types.Block, + lastValSet *types.ValidatorSet, + stateDB dbm.DB, +) (*ABCIResponses, error) { var validTxs, invalidTxs = 0, 0 txIndex := 0 abciResponses := NewABCIResponses(block) - // Execute transactions and get hash + // Execute transactions and get hash. proxyCb := func(req *abci.Request, res *abci.Response) { switch r := res.Value.(type) { case *abci.Response_DeliverTx: @@ -184,16 +223,13 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, } proxyAppConn.SetResponseCallback(proxyCb) - signVals, byzVals := getBeginBlockValidatorInfo(block, lastValSet, stateDB) + commitInfo, byzVals := getBeginBlockValidatorInfo(block, lastValSet, stateDB) - // Begin block + // Begin block. _, err := proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{ - Hash: block.Hash(), - Header: types.TM2PB.Header(&block.Header), - LastCommitInfo: abci.LastCommitInfo{ - CommitRound: int32(block.LastCommit.Round()), - Validators: signVals, - }, + Hash: block.Hash(), + Header: types.TM2PB.Header(&block.Header), + LastCommitInfo: commitInfo, ByzantineValidators: byzVals, }) if err != nil { @@ -201,7 +237,7 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, return nil, err } - // Run txs of block + // Run txs of block. for _, tx := range block.Txs { proxyAppConn.DeliverTxAsync(tx) if err := proxyAppConn.Error(); err != nil { @@ -209,7 +245,7 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, } } - // End block + // End block. abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(abci.RequestEndBlock{Height: block.Height}) if err != nil { logger.Error("Error in proxyAppConn.EndBlock", "err", err) @@ -220,13 +256,14 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, valUpdates := abciResponses.EndBlock.ValidatorUpdates if len(valUpdates) > 0 { - logger.Info("Updates to validators", "updates", abci.ValidatorsString(valUpdates)) + // TODO: cleanup the formatting + logger.Info("Updates to validators", "updates", valUpdates) } return abciResponses, nil } -func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) ([]abci.SigningValidator, []abci.Evidence) { +func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) (abci.LastCommitInfo, []abci.Evidence) { // Sanity check that commit length matches validator set size - // only applies after first block @@ -240,18 +277,23 @@ func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorS } } - // determine which validators did not sign last block. - signVals := make([]abci.SigningValidator, len(lastValSet.Validators)) + // Collect the vote info (list of validators and whether or not they signed). + voteInfos := make([]abci.VoteInfo, len(lastValSet.Validators)) for i, val := range lastValSet.Validators { var vote *types.Vote if i < len(block.LastCommit.Precommits) { vote = block.LastCommit.Precommits[i] } - val := abci.SigningValidator{ - Validator: types.TM2PB.ValidatorWithoutPubKey(val), + voteInfo := abci.VoteInfo{ + Validator: types.TM2PB.Validator(val), SignedLastBlock: vote != nil, } - signVals[i] = val + voteInfos[i] = voteInfo + } + + commitInfo := abci.LastCommitInfo{ + Round: int32(block.LastCommit.Round()), + Votes: voteInfos, } byzVals := make([]abci.Evidence, len(block.Evidence.Evidence)) @@ -266,15 +308,15 @@ func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorS byzVals[i] = types.TM2PB.Evidence(ev, valset, block.Time) } - return signVals, byzVals + return commitInfo, byzVals } // If more or equal than 1/3 of total voting power changed in one block, then // a light client could never prove the transition externally. See // ./lite/doc.go for details on how a light client tracks validators. -func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.Validator) error { - updates, err := types.PB2TM.Validators(abciUpdates) +func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.ValidatorUpdate) error { + updates, err := types.PB2TM.ValidatorUpdates(abciUpdates) if err != nil { return err } @@ -311,29 +353,32 @@ func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.Validat } // updateState returns a new State updated according to the header and responses. -func updateState(state State, blockID types.BlockID, header *types.Header, - abciResponses *ABCIResponses) (State, error) { - - // copy the valset so we can apply changes from EndBlock - // and update s.LastValidators and s.Validators - prevValSet := state.Validators.Copy() - nextValSet := prevValSet.Copy() - - // update the validator set with the latest abciResponses +func updateState( + state State, + blockID types.BlockID, + header *types.Header, + abciResponses *ABCIResponses, +) (State, error) { + + // Copy the valset so we can apply changes from EndBlock + // and update s.LastValidators and s.Validators. + nValSet := state.NextValidators.Copy() + + // Update the validator set with the latest abciResponses. lastHeightValsChanged := state.LastHeightValidatorsChanged if len(abciResponses.EndBlock.ValidatorUpdates) > 0 { - err := updateValidators(nextValSet, abciResponses.EndBlock.ValidatorUpdates) + err := updateValidators(nValSet, abciResponses.EndBlock.ValidatorUpdates) if err != nil { return state, fmt.Errorf("Error changing validator set: %v", err) } - // change results from this height but only applies to the next height - lastHeightValsChanged = header.Height + 1 + // Change results from this height but only applies to the next next height. + lastHeightValsChanged = header.Height + 1 + 1 } - // Update validator accums and set state variables - nextValSet.IncrementAccum(1) + // Update validator accums and set state variables. + nValSet.IncrementAccum(1) - // update the params with the latest abciResponses + // Update the params with the latest abciResponses. nextParams := state.ConsensusParams lastHeightParamsChanged := state.LastHeightConsensusParamsChanged if abciResponses.EndBlock.ConsensusParamUpdates != nil { @@ -343,19 +388,24 @@ func updateState(state State, blockID types.BlockID, header *types.Header, if err != nil { return state, fmt.Errorf("Error updating consensus params: %v", err) } - // change results from this height but only applies to the next height + // Change results from this height but only applies to the next height. lastHeightParamsChanged = header.Height + 1 } + // TODO: allow app to upgrade version + nextVersion := state.Version + // NOTE: the AppHash has not been populated. // It will be filled on state.Save. return State{ + Version: nextVersion, ChainID: state.ChainID, LastBlockHeight: header.Height, LastBlockTotalTx: state.LastBlockTotalTx + header.NumTxs, LastBlockID: blockID, LastBlockTime: header.Time, - Validators: nextValSet, + NextValidators: nValSet, + Validators: state.NextValidators.Copy(), LastValidators: state.Validators.Copy(), LastHeightValidatorsChanged: lastHeightValsChanged, ConsensusParams: nextParams, @@ -380,6 +430,14 @@ func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *ty Result: *(abciResponses.DeliverTx[i]), }}) } + + abciValUpdates := abciResponses.EndBlock.ValidatorUpdates + if len(abciValUpdates) > 0 { + // if there were an error, we would've stopped in updateValidators + updates, _ := types.PB2TM.ValidatorUpdates(abciValUpdates) + eventBus.PublishEventValidatorSetUpdates( + types.EventDataValidatorSetUpdates{ValidatorUpdates: updates}) + } } //---------------------------------------------------------------------------------------------------- @@ -387,8 +445,13 @@ func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *ty // ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state. // It returns the application root hash (result of abci.Commit). -func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block, - logger log.Logger, lastValSet *types.ValidatorSet, stateDB dbm.DB) ([]byte, error) { +func ExecCommitBlock( + appConnConsensus proxy.AppConnConsensus, + block *types.Block, + logger log.Logger, + lastValSet *types.ValidatorSet, + stateDB dbm.DB, +) ([]byte, error) { _, err := execBlockOnProxyApp(logger, appConnConsensus, block, lastValSet, stateDB) if err != nil { logger.Error("Error executing block on proxy app", "height", block.Height, "err", err) diff --git a/state/execution_test.go b/state/execution_test.go index 53c5c882be6..273e9ebea09 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -1,6 +1,7 @@ package state import ( + "context" "fmt" "testing" "time" @@ -14,6 +15,7 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" @@ -27,7 +29,7 @@ var ( func TestApplyBlock(t *testing.T) { cc := proxy.NewLocalClientCreator(kvstore.NewKVStoreApplication()) - proxyApp := proxy.NewAppConns(cc, nil) + proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() @@ -50,7 +52,7 @@ func TestApplyBlock(t *testing.T) { func TestBeginBlockValidators(t *testing.T) { app := &testApp{} cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc, nil) + proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() @@ -61,8 +63,8 @@ func TestBeginBlockValidators(t *testing.T) { prevParts := types.PartSetHeader{} prevBlockID := types.BlockID{prevHash, prevParts} - now := time.Now().UTC() - vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit} + now := tmtime.Now() + vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.PrecommitType} vote1 := &types.Vote{ValidatorIndex: 1, Timestamp: now} testCases := []struct { @@ -79,13 +81,14 @@ func TestBeginBlockValidators(t *testing.T) { lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: tc.lastCommitPrecommits} // block for height 2 - block, _ := state.MakeBlock(2, makeTxs(2), lastCommit, nil) + block, _ := state.MakeBlock(2, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) + _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB) require.Nil(t, err, tc.desc) // -> app receives a list of validators with a bool indicating if they signed ctr := 0 - for i, v := range app.Validators { + for i, v := range app.CommitVotes { if ctr < len(tc.expectedAbsentValidators) && tc.expectedAbsentValidators[ctr] == i { @@ -102,7 +105,7 @@ func TestBeginBlockValidators(t *testing.T) { func TestBeginBlockByzantineValidators(t *testing.T) { app := &testApp{} cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc, nil) + proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() @@ -118,7 +121,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { ev1 := types.NewMockGoodEvidence(height1, idx1, val1) ev2 := types.NewMockGoodEvidence(height2, idx2, val2) - now := time.Now() + now := tmtime.Now() valSet := state.Validators testCases := []struct { desc string @@ -132,13 +135,13 @@ func TestBeginBlockByzantineValidators(t *testing.T) { types.TM2PB.Evidence(ev2, valSet, now)}}, } - vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit} + vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.PrecommitType} vote1 := &types.Vote{ValidatorIndex: 1, Timestamp: now} votes := []*types.Vote{vote0, vote1} lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: votes} for _, tc := range testCases { - block, _ := state.MakeBlock(10, makeTxs(2), lastCommit, nil) + block, _ := state.MakeBlock(10, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) block.Time = now block.Evidence.Evidence = tc.evidence _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB) @@ -159,7 +162,7 @@ func TestUpdateValidators(t *testing.T) { name string currentSet *types.ValidatorSet - abciUpdates []abci.Validator + abciUpdates []abci.ValidatorUpdate resultingSet *types.ValidatorSet shouldErr bool @@ -168,7 +171,7 @@ func TestUpdateValidators(t *testing.T) { "adding a validator is OK", types.NewValidatorSet([]*types.Validator{val1}), - []abci.Validator{{Address: []byte{}, PubKey: types.TM2PB.PubKey(pubkey2), Power: 20}}, + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: 20}}, types.NewValidatorSet([]*types.Validator{val1, val2}), false, @@ -177,7 +180,7 @@ func TestUpdateValidators(t *testing.T) { "updating a validator is OK", types.NewValidatorSet([]*types.Validator{val1}), - []abci.Validator{{Address: []byte{}, PubKey: types.TM2PB.PubKey(pubkey1), Power: 20}}, + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey1), Power: 20}}, types.NewValidatorSet([]*types.Validator{types.NewValidator(pubkey1, 20)}), false, @@ -186,7 +189,7 @@ func TestUpdateValidators(t *testing.T) { "removing a validator is OK", types.NewValidatorSet([]*types.Validator{val1, val2}), - []abci.Validator{{Address: []byte{}, PubKey: types.TM2PB.PubKey(pubkey2), Power: 0}}, + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: 0}}, types.NewValidatorSet([]*types.Validator{val1}), false, @@ -196,7 +199,7 @@ func TestUpdateValidators(t *testing.T) { "removing a non-existing validator results in error", types.NewValidatorSet([]*types.Validator{val1}), - []abci.Validator{{Address: []byte{}, PubKey: types.TM2PB.PubKey(pubkey2), Power: 0}}, + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: 0}}, types.NewValidatorSet([]*types.Validator{val1}), true, @@ -206,7 +209,7 @@ func TestUpdateValidators(t *testing.T) { "adding a validator with negative power results in error", types.NewValidatorSet([]*types.Validator{val1}), - []abci.Validator{{Address: []byte{}, PubKey: types.TM2PB.PubKey(pubkey2), Power: -100}}, + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: -100}}, types.NewValidatorSet([]*types.Validator{val1}), true, @@ -232,6 +235,62 @@ func TestUpdateValidators(t *testing.T) { } } +// TestEndBlockValidatorUpdates ensures we update validator set and send an event. +func TestEndBlockValidatorUpdates(t *testing.T) { + app := &testApp{} + cc := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(cc) + err := proxyApp.Start() + require.Nil(t, err) + defer proxyApp.Stop() + + state, stateDB := state(1, 1) + + blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), + MockMempool{}, MockEvidencePool{}) + eventBus := types.NewEventBus() + err = eventBus.Start() + require.NoError(t, err) + defer eventBus.Stop() + blockExec.SetEventBus(eventBus) + + updatesCh := make(chan interface{}, 1) + err = eventBus.Subscribe(context.Background(), "TestEndBlockValidatorUpdates", types.EventQueryValidatorSetUpdates, updatesCh) + require.NoError(t, err) + + block := makeBlock(state, 1) + blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} + + pubkey := ed25519.GenPrivKey().PubKey() + app.ValidatorUpdates = []abci.ValidatorUpdate{ + {PubKey: types.TM2PB.PubKey(pubkey), Power: 10}, + } + + state, err = blockExec.ApplyBlock(state, blockID, block) + require.Nil(t, err) + + // test new validator was added to NextValidators + if assert.Equal(t, state.Validators.Size()+1, state.NextValidators.Size()) { + idx, _ := state.NextValidators.GetByAddress(pubkey.Address()) + if idx < 0 { + t.Fatalf("can't find address %v in the set %v", pubkey.Address(), state.NextValidators) + } + } + + // test we threw an event + select { + case e := <-updatesCh: + event, ok := e.(types.EventDataValidatorSetUpdates) + require.True(t, ok, "Expected event of type EventDataValidatorSetUpdates, got %T", e) + if assert.NotEmpty(t, event.ValidatorUpdates) { + assert.Equal(t, pubkey, event.ValidatorUpdates[0].PubKey) + assert.EqualValues(t, 10, event.ValidatorUpdates[0].VotingPower) + } + case <-time.After(1 * time.Second): + t.Fatal("Did not receive EventValidatorSetUpdates within 1 sec.") + } +} + //---------------------------------------------------------------------------- // make some bogus txs @@ -248,7 +307,10 @@ func state(nVals, height int) (State, dbm.DB) { secret := []byte(fmt.Sprintf("test%d", i)) pk := ed25519.GenPrivKeyFromSecret(secret) vals[i] = types.GenesisValidator{ - pk.PubKey(), 1000, fmt.Sprintf("test%d", i), + pk.PubKey().Address(), + pk.PubKey(), + 1000, + fmt.Sprintf("test%d", i), } } s, _ := MakeGenesisState(&types.GenesisDoc{ @@ -263,41 +325,43 @@ func state(nVals, height int) (State, dbm.DB) { for i := 1; i < height; i++ { s.LastBlockHeight++ + s.LastValidators = s.Validators.Copy() SaveState(stateDB, s) } return s, stateDB } func makeBlock(state State, height int64) *types.Block { - block, _ := state.MakeBlock(height, makeTxs(state.LastBlockHeight), new(types.Commit), nil) + block, _ := state.MakeBlock(height, makeTxs(state.LastBlockHeight), new(types.Commit), nil, state.Validators.GetProposer().Address) return block } //---------------------------------------------------------------------------- -var _ abci.Application = (*testApp)(nil) - type testApp struct { abci.BaseApplication - Validators []abci.SigningValidator + CommitVotes []abci.VoteInfo ByzantineValidators []abci.Evidence + ValidatorUpdates []abci.ValidatorUpdate } -func NewKVStoreApplication() *testApp { - return &testApp{} -} +var _ abci.Application = (*testApp)(nil) func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) { return abci.ResponseInfo{} } func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { - app.Validators = req.LastCommitInfo.Validators + app.CommitVotes = req.LastCommitInfo.Votes app.ByzantineValidators = req.ByzantineValidators return abci.ResponseBeginBlock{} } +func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { + return abci.ResponseEndBlock{ValidatorUpdates: app.ValidatorUpdates} +} + func (app *testApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { return abci.ResponseDeliverTx{Tags: []cmn.KVPair{}} } diff --git a/state/metrics.go b/state/metrics.go new file mode 100644 index 00000000000..4e99753f0ea --- /dev/null +++ b/state/metrics.go @@ -0,0 +1,33 @@ +package state + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +const MetricsSubsystem = "state" + +type Metrics struct { + // Time between BeginBlock and EndBlock. + BlockProcessingTime metrics.Histogram +} + +func PrometheusMetrics(namespace string) *Metrics { + return &Metrics{ + BlockProcessingTime: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "block_processing_time", + Help: "Time between BeginBlock and EndBlock in ms.", + Buckets: stdprometheus.LinearBuckets(1, 10, 10), + }, []string{}), + } +} + +func NopMetrics() *Metrics { + return &Metrics{ + BlockProcessingTime: discard.NewHistogram(), + } +} diff --git a/state/services.go b/state/services.go index c51fa97521a..b8f1febe1fb 100644 --- a/state/services.go +++ b/state/services.go @@ -2,6 +2,7 @@ package state import ( abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/types" ) @@ -22,8 +23,8 @@ type Mempool interface { Size() int CheckTx(types.Tx, func(*abci.Response)) error - Reap(int) types.Txs - Update(height int64, txs types.Txs) error + ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs + Update(int64, types.Txs, mempool.PreCheckFunc, mempool.PostCheckFunc) error Flush() FlushAppConn() error @@ -32,19 +33,27 @@ type Mempool interface { } // MockMempool is an empty implementation of a Mempool, useful for testing. -type MockMempool struct { +type MockMempool struct{} + +var _ Mempool = MockMempool{} + +func (MockMempool) Lock() {} +func (MockMempool) Unlock() {} +func (MockMempool) Size() int { return 0 } +func (MockMempool) CheckTx(_ types.Tx, _ func(*abci.Response)) error { return nil } +func (MockMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } +func (MockMempool) Update( + _ int64, + _ types.Txs, + _ mempool.PreCheckFunc, + _ mempool.PostCheckFunc, +) error { + return nil } - -func (m MockMempool) Lock() {} -func (m MockMempool) Unlock() {} -func (m MockMempool) Size() int { return 0 } -func (m MockMempool) CheckTx(tx types.Tx, cb func(*abci.Response)) error { return nil } -func (m MockMempool) Reap(n int) types.Txs { return types.Txs{} } -func (m MockMempool) Update(height int64, txs types.Txs) error { return nil } -func (m MockMempool) Flush() {} -func (m MockMempool) FlushAppConn() error { return nil } -func (m MockMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } -func (m MockMempool) EnableTxsAvailable() {} +func (MockMempool) Flush() {} +func (MockMempool) FlushAppConn() error { return nil } +func (MockMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (MockMempool) EnableTxsAvailable() {} //------------------------------------------------------ // blockstore @@ -72,15 +81,14 @@ type BlockStore interface { // EvidencePool defines the EvidencePool interface used by the ConsensusState. type EvidencePool interface { - PendingEvidence() []types.Evidence + PendingEvidence(int64) []types.Evidence AddEvidence(types.Evidence) error Update(*types.Block, State) } // MockMempool is an empty implementation of a Mempool, useful for testing. -type MockEvidencePool struct { -} +type MockEvidencePool struct{} -func (m MockEvidencePool) PendingEvidence() []types.Evidence { return nil } -func (m MockEvidencePool) AddEvidence(types.Evidence) error { return nil } -func (m MockEvidencePool) Update(*types.Block, State) {} +func (m MockEvidencePool) PendingEvidence(int64) []types.Evidence { return nil } +func (m MockEvidencePool) AddEvidence(types.Evidence) error { return nil } +func (m MockEvidencePool) Update(*types.Block, State) {} diff --git a/state/state.go b/state/state.go index fb589a2404b..0dbd718dab3 100644 --- a/state/state.go +++ b/state/state.go @@ -7,6 +7,8 @@ import ( "time" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" + "github.com/tendermint/tendermint/version" ) // database keys @@ -16,6 +18,29 @@ var ( //----------------------------------------------------------------------------- +// Version is for versioning the State. +// It holds the Block and App version needed for making blocks, +// and the software version to support upgrades to the format of +// the State as stored on disk. +type Version struct { + Consensus version.Consensus + Software string +} + +// initStateVersion sets the Consensus.Block and Software versions, +// but leaves the Consensus.App version blank. +// The Consensus.App version will be set during the Handshake, once +// we hear from the app what protocol version it is running. +var initStateVersion = Version{ + Consensus: version.Consensus{ + Block: version.BlockProtocol, + App: 0, + }, + Software: version.TMCoreSemVer, +} + +//----------------------------------------------------------------------------- + // State is a short description of the latest committed block of the Tendermint consensus. // It keeps all information necessary to validate new blocks, // including the last validator set and the consensus params. @@ -24,7 +49,9 @@ var ( // Instead, use state.Copy() or state.NextState(...). // NOTE: not goroutine-safe. type State struct { - // Immutable + Version Version + + // immutable ChainID string // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) @@ -38,6 +65,7 @@ type State struct { // so we can query for historical validator sets. // Note that if s.LastBlockHeight causes a valset change, // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + NextValidators *types.ValidatorSet Validators *types.ValidatorSet LastValidators *types.ValidatorSet LastHeightValidatorsChanged int64 @@ -50,13 +78,14 @@ type State struct { // Merkle root of the results from executing prev block LastResultsHash []byte - // The latest AppHash we've received from calling abci.Commit() + // the latest AppHash we've received from calling abci.Commit() AppHash []byte } // Copy makes a copy of the State for mutating. func (state State) Copy() State { return State{ + Version: state.Version, ChainID: state.ChainID, LastBlockHeight: state.LastBlockHeight, @@ -64,6 +93,7 @@ func (state State) Copy() State { LastBlockID: state.LastBlockID, LastBlockTime: state.LastBlockTime, + NextValidators: state.NextValidators.Copy(), Validators: state.Validators.Copy(), LastValidators: state.LastValidators.Copy(), LastHeightValidatorsChanged: state.LastHeightValidatorsChanged, @@ -93,37 +123,61 @@ func (state State) IsEmpty() bool { return state.Validators == nil // XXX can't compare to Empty } -// GetValidators returns the last and current validator sets. -func (state State) GetValidators() (last *types.ValidatorSet, current *types.ValidatorSet) { - return state.LastValidators, state.Validators -} - //------------------------------------------------------------------------ // Create a block from the latest state -// MakeBlock builds a block from the current state with the given txs, commit, and evidence. +// MakeBlock builds a block from the current state with the given txs, commit, +// and evidence. Note it also takes a proposerAddress because the state does not +// track rounds, and hence does not know the correct proposer. TODO: fix this! func (state State) MakeBlock( height int64, txs []types.Tx, commit *types.Commit, evidence []types.Evidence, + proposerAddress []byte, ) (*types.Block, *types.PartSet) { // Build base block with block data. block := types.MakeBlock(height, txs, commit, evidence) + // Set time. + var timestamp time.Time + if height == 1 { + timestamp = state.LastBlockTime // genesis time + } else { + timestamp = MedianTime(commit, state.LastValidators) + } + // Fill rest of header with state data. - block.ChainID = state.ChainID + block.Header.Populate( + state.Version.Consensus, state.ChainID, + timestamp, state.LastBlockID, state.LastBlockTotalTx+block.NumTxs, + state.Validators.Hash(), state.NextValidators.Hash(), + state.ConsensusParams.Hash(), state.AppHash, state.LastResultsHash, + proposerAddress, + ) + + return block, block.MakePartSet(types.BlockPartSizeBytes) +} - block.LastBlockID = state.LastBlockID - block.TotalTxs = state.LastBlockTotalTx + block.NumTxs +// MedianTime computes a median time for a given Commit (based on Timestamp field of votes messages) and the +// corresponding validator set. The computed time is always between timestamps of +// the votes sent by honest processes, i.e., a faulty processes can not arbitrarily increase or decrease the +// computed value. +func MedianTime(commit *types.Commit, validators *types.ValidatorSet) time.Time { - block.ValidatorsHash = state.Validators.Hash() - block.ConsensusHash = state.ConsensusParams.Hash() - block.AppHash = state.AppHash - block.LastResultsHash = state.LastResultsHash + weightedTimes := make([]*tmtime.WeightedTime, len(commit.Precommits)) + totalVotingPower := int64(0) - return block, block.MakePartSet(state.ConsensusParams.BlockGossip.BlockPartSizeBytes) + for i, vote := range commit.Precommits { + if vote != nil { + _, validator := validators.GetByIndex(vote.ValidatorIndex) + totalVotingPower += validator.VotingPower + weightedTimes[i] = tmtime.NewWeightedTime(vote.Timestamp, validator.VotingPower) + } + } + + return tmtime.WeightedMedian(weightedTimes, totalVotingPower) } //------------------------------------------------------------------------ @@ -161,29 +215,29 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { return State{}, fmt.Errorf("Error in genesis file: %v", err) } - // Make validators slice - validators := make([]*types.Validator, len(genDoc.Validators)) - for i, val := range genDoc.Validators { - pubKey := val.PubKey - address := pubKey.Address() - - // Make validator - validators[i] = &types.Validator{ - Address: address, - PubKey: pubKey, - VotingPower: val.Power, + var validatorSet, nextValidatorSet *types.ValidatorSet + if genDoc.Validators == nil { + validatorSet = types.NewValidatorSet(nil) + nextValidatorSet = types.NewValidatorSet(nil) + } else { + validators := make([]*types.Validator, len(genDoc.Validators)) + for i, val := range genDoc.Validators { + validators[i] = types.NewValidator(val.PubKey, val.Power) } + validatorSet = types.NewValidatorSet(validators) + nextValidatorSet = types.NewValidatorSet(validators).CopyIncrementAccum(1) } return State{ - + Version: initStateVersion, ChainID: genDoc.ChainID, LastBlockHeight: 0, LastBlockID: types.BlockID{}, LastBlockTime: genDoc.GenesisTime, - Validators: types.NewValidatorSet(validators), + NextValidators: nextValidatorSet, + Validators: validatorSet, LastValidators: types.NewValidatorSet(nil), LastHeightValidatorsChanged: 1, diff --git a/state/state_test.go b/state/state_test.go index 05c1859efb0..88200e17e56 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -17,7 +17,7 @@ import ( "github.com/tendermint/tendermint/types" ) -// setupTestCase does setup common to all test cases +// setupTestCase does setup common to all test cases. func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, State) { config := cfg.ResetTestRoot("state_") dbType := dbm.DBBackendType(config.DBBackend) @@ -40,14 +40,27 @@ func TestStateCopy(t *testing.T) { stateCopy := state.Copy() assert.True(state.Equals(stateCopy), - cmn.Fmt("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", + fmt.Sprintf("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", stateCopy, state)) stateCopy.LastBlockHeight++ - assert.False(state.Equals(stateCopy), cmn.Fmt(`expected states to be different. got same + assert.False(state.Equals(stateCopy), fmt.Sprintf(`expected states to be different. got same %v`, state)) } +//TestMakeGenesisStateNilValidators tests state's consistency when genesis file's validators field is nil. +func TestMakeGenesisStateNilValidators(t *testing.T) { + doc := types.GenesisDoc{ + ChainID: "dummy", + Validators: nil, + } + require.Nil(t, doc.ValidateAndComplete()) + state, err := MakeGenesisState(&doc) + require.Nil(t, err) + require.Equal(t, 0, len(state.Validators.Validators)) + require.Equal(t, 0, len(state.NextValidators.Validators)) +} + // TestStateSaveLoad tests saving and loading State from a db. func TestStateSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) @@ -60,7 +73,7 @@ func TestStateSaveLoad(t *testing.T) { loadedState := LoadState(stateDB) assert.True(state.Equals(loadedState), - cmn.Fmt("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", + fmt.Sprintf("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", loadedState, state)) } @@ -73,24 +86,24 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { state.LastBlockHeight++ - // build mock responses + // Build mock responses. block := makeBlock(state, 2) abciResponses := NewABCIResponses(block) abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Tags: nil} abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Tags: nil} - abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.Validator{ - types.TM2PB.ValidatorFromPubKeyAndPower(ed25519.GenPrivKey().PubKey(), 10), + abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{ + types.TM2PB.NewValidatorUpdate(ed25519.GenPrivKey().PubKey(), 10), }} saveABCIResponses(stateDB, block.Height, abciResponses) loadedABCIResponses, err := LoadABCIResponses(stateDB, block.Height) assert.Nil(err) assert.Equal(abciResponses, loadedABCIResponses, - cmn.Fmt("ABCIResponses don't match:\ngot: %v\nexpected: %v\n", + fmt.Sprintf("ABCIResponses don't match:\ngot: %v\nexpected: %v\n", loadedABCIResponses, abciResponses)) } -// TestResultsSaveLoad tests saving and loading abci results. +// TestResultsSaveLoad tests saving and loading ABCI results. func TestABCIResponsesSaveLoad2(t *testing.T) { tearDown, stateDB, _ := setupTestCase(t) defer tearDown(t) @@ -98,8 +111,8 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { assert := assert.New(t) cases := [...]struct { - // height is implied index+2 - // as block 1 is created from genesis + // Height is implied to equal index+2, + // as block 1 is created from genesis. added []*abci.ResponseDeliverTx expected types.ABCIResults }{ @@ -119,8 +132,8 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { {Code: 383}, {Data: []byte("Gotcha!"), Tags: []cmn.KVPair{ - cmn.KVPair{[]byte("a"), []byte("1")}, - cmn.KVPair{[]byte("build"), []byte("stuff")}, + cmn.KVPair{Key: []byte("a"), Value: []byte("1")}, + cmn.KVPair{Key: []byte("build"), Value: []byte("stuff")}, }}, }, types.ABCIResults{ @@ -133,14 +146,14 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { }, } - // query all before, should return error + // Query all before, this should return error. for i := range cases { h := int64(i + 1) res, err := LoadABCIResponses(stateDB, h) assert.Error(err, "%d: %#v", i, res) } - // add all cases + // Add all cases. for i, tc := range cases { h := int64(i + 1) // last block height, one below what we save responses := &ABCIResponses{ @@ -150,7 +163,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { saveABCIResponses(stateDB, h, responses) } - // query all before, should return expected value + // Query all before, should return expected value. for i, tc := range cases { h := int64(i + 1) res, err := LoadABCIResponses(stateDB, h) @@ -166,34 +179,30 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { // nolint: vetshadow assert := assert.New(t) - // can't load anything for height 0 + // Can't load anything for height 0. v, err := LoadValidators(stateDB, 0) assert.IsType(ErrNoValSetForHeight{}, err, "expected err at height 0") - // should be able to load for height 1 + // Should be able to load for height 1. v, err = LoadValidators(stateDB, 1) assert.Nil(err, "expected no err at height 1") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") - // increment height, save; should be able to load for next height + // Should be able to load for height 2. + v, err = LoadValidators(stateDB, 2) + assert.Nil(err, "expected no err at height 2") + assert.Equal(v.Hash(), state.NextValidators.Hash(), "expected validator hashes to match") + + // Increment height, save; should be able to load for next & next next height. state.LastBlockHeight++ nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - v, err = LoadValidators(stateDB, nextHeight) + saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + vp0, err := LoadValidators(stateDB, nextHeight+0) assert.Nil(err, "expected no err") - assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") - - // increment height, save; should be able to load for next height - state.LastBlockHeight += 10 - nextHeight = state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - v, err = LoadValidators(stateDB, nextHeight) + vp1, err := LoadValidators(stateDB, nextHeight+1) assert.Nil(err, "expected no err") - assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") - - // should be able to load for next next height - _, err = LoadValidators(stateDB, state.LastBlockHeight+2) - assert.IsType(ErrNoValSetForHeight{}, err, "expected err at unknown height") + assert.Equal(vp0.Hash(), state.Validators.Hash(), "expected validator hashes to match") + assert.Equal(vp1.Hash(), state.NextValidators.Hash(), "expected next validator hashes to match") } // TestValidatorChangesSaveLoad tests saving and loading a validator set with changes. @@ -201,20 +210,19 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - // change vals at these heights + // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} N := len(changeHeights) - // build the validator history by running updateState - // with the right validator set for each height + // Build the validator history by running updateState + // with the right validator set for each height. highestHeight := changeHeights[N-1] + 5 changeIndex := 0 _, val := state.Validators.GetByIndex(0) power := val.VotingPower var err error for i := int64(1); i < highestHeight; i++ { - // when we get to a change height, - // use the next pubkey + // When we get to a change height, use the next pubkey. if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { changeIndex++ power++ @@ -223,16 +231,16 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { state, err = updateState(state, blockID, &header, responses) assert.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) } - // on each change height, increment the power by one. + // On each height change, increment the power by one. testCases := make([]int64, highestHeight) changeIndex = 0 power = val.VotingPower for i := int64(1); i < highestHeight+1; i++ { - // we we get to the height after a change height - // use the next pubkey (note our counter starts at 0 this time) + // We get to the height after a change height use the next pubkey (note + // our counter starts at 0 this time). if changeIndex < len(changeHeights) && i == changeHeights[changeIndex]+1 { changeIndex++ power++ @@ -241,7 +249,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } for i, power := range testCases { - v, err := LoadValidators(stateDB, int64(i+1)) + v, err := LoadValidators(stateDB, int64(i+1+1)) // +1 because vset changes delayed by 1 block. assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) _, val := v.GetByIndex(0) @@ -256,25 +264,42 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { func TestManyValidatorChangesSaveLoad(t *testing.T) { const valSetSize = 7 tearDown, stateDB, state := setupTestCase(t) + require.Equal(t, int64(0), state.LastBlockHeight) state.Validators = genValSet(valSetSize) + state.NextValidators = state.Validators.CopyIncrementAccum(1) SaveState(stateDB, state) defer tearDown(t) - const height = 1 + _, valOld := state.Validators.GetByIndex(0) + var pubkeyOld = valOld.PubKey pubkey := ed25519.GenPrivKey().PubKey() - // swap the first validator with a new one ^^^ (validator set size stays the same) + const height = 1 + + // Swap the first validator with a new one (validator set size stays the same). header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, height, pubkey) + + // Save state etc. var err error state, err = updateState(state, blockID, &header, responses) require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) - v, err := LoadValidators(stateDB, height+1) + // Load nextheight, it should be the oldpubkey. + v0, err := LoadValidators(stateDB, nextHeight) assert.Nil(t, err) - assert.Equal(t, valSetSize, v.Size()) + assert.Equal(t, valSetSize, v0.Size()) + index, val := v0.GetByAddress(pubkeyOld.Address()) + assert.NotNil(t, val) + if index < 0 { + t.Fatal("expected to find old validator") + } - index, val := v.GetByAddress(pubkey.Address()) + // Load nextheight+1, it should be the new pubkey. + v1, err := LoadValidators(stateDB, nextHeight+1) + assert.Nil(t, err) + assert.Equal(t, valSetSize, v1.Size()) + index, val = v1.GetByAddress(pubkey.Address()) assert.NotNil(t, val) if index < 0 { t.Fatal("expected to find newly added validator") @@ -289,34 +314,46 @@ func genValSet(size int) *types.ValidatorSet { return types.NewValidatorSet(vals) } +func TestStateMakeBlock(t *testing.T) { + tearDown, _, state := setupTestCase(t) + defer tearDown(t) + + proposerAddress := state.Validators.GetProposer().Address + stateVersion := state.Version.Consensus + block := makeBlock(state, 2) + + // test we set some fields + assert.Equal(t, stateVersion, block.Version) + assert.Equal(t, proposerAddress, block.ProposerAddress) +} + // TestConsensusParamsChangesSaveLoad tests saving and loading consensus params // with changes. func TestConsensusParamsChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - // change vals at these heights + // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} N := len(changeHeights) - // each valset is just one validator - // create list of them + // Each valset is just one validator. + // create list of them. params := make([]types.ConsensusParams, N+1) params[0] = state.ConsensusParams for i := 1; i < N+1; i++ { params[i] = *types.DefaultConsensusParams() - params[i].BlockSize.MaxBytes += i + params[i].BlockSize.MaxBytes += int64(i) } - // build the params history by running updateState - // with the right params set for each height + // Build the params history by running updateState + // with the right params set for each height. highestHeight := changeHeights[N-1] + 5 changeIndex := 0 cp := params[changeIndex] var err error for i := int64(1); i < highestHeight; i++ { - // when we get to a change height, - // use the next params + // When we get to a change height, use the next params. if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { changeIndex++ cp = params[changeIndex] @@ -329,13 +366,13 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { saveConsensusParamsInfo(stateDB, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) } - // make all the test cases by using the same params until after the change + // Make all the test cases by using the same params until after the change. testCases := make([]paramsChangeTestCase, highestHeight) changeIndex = 0 cp = params[changeIndex] for i := int64(1); i < highestHeight+1; i++ { - // we we get to the height after a change height - // use the next pubkey (note our counter starts at 0 this time) + // We get to the height after a change height use the next pubkey (note + // our counter starts at 0 this time). if changeIndex < len(changeHeights) && i == changeHeights[changeIndex]+1 { changeIndex++ cp = params[changeIndex] @@ -351,21 +388,14 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { } } -func makeParams(blockBytes, blockTx, blockGas, txBytes, - txGas, partSize int) types.ConsensusParams { - +func makeParams(blockBytes, blockGas, evidenceAge int64) types.ConsensusParams { return types.ConsensusParams{ - BlockSize: types.BlockSize{ + BlockSize: types.BlockSizeParams{ MaxBytes: blockBytes, - MaxTxs: blockTx, - MaxGas: int64(blockGas), + MaxGas: blockGas, }, - TxSize: types.TxSize{ - MaxBytes: txBytes, - MaxGas: int64(txGas), - }, - BlockGossip: types.BlockGossip{ - BlockPartSizeBytes: partSize, + Evidence: types.EvidenceParams{ + MaxAge: evidenceAge, }, } } @@ -375,7 +405,7 @@ func pk() []byte { } func TestApplyUpdates(t *testing.T) { - initParams := makeParams(1, 2, 3, 4, 5, 6) + initParams := makeParams(1, 2, 3) cases := [...]struct { init types.ConsensusParams @@ -386,32 +416,19 @@ func TestApplyUpdates(t *testing.T) { 1: {initParams, abci.ConsensusParams{}, initParams}, 2: {initParams, abci.ConsensusParams{ - TxSize: &abci.TxSize{ - MaxBytes: 123, + BlockSize: &abci.BlockSizeParams{ + MaxBytes: 44, + MaxGas: 55, }, }, - makeParams(1, 2, 3, 123, 5, 6)}, + makeParams(44, 55, 3)}, 3: {initParams, abci.ConsensusParams{ - BlockSize: &abci.BlockSize{ - MaxTxs: 44, - MaxGas: 55, - }, - }, - makeParams(1, 44, 55, 4, 5, 6)}, - 4: {initParams, - abci.ConsensusParams{ - BlockSize: &abci.BlockSize{ - MaxTxs: 789, - }, - TxSize: &abci.TxSize{ - MaxGas: 888, - }, - BlockGossip: &abci.BlockGossip{ - BlockPartSizeBytes: 2002, + Evidence: &abci.EvidenceParams{ + MaxAge: 66, }, }, - makeParams(1, 789, 3, 4, 888, 2002)}, + makeParams(1, 2, 66)}, } for i, tc := range cases { @@ -423,18 +440,18 @@ func TestApplyUpdates(t *testing.T) { func makeHeaderPartsResponsesValPubKeyChange(state State, height int64, pubkey crypto.PubKey) (types.Header, types.BlockID, *ABCIResponses) { - block := makeBlock(state, height) + block := makeBlock(state, state.LastBlockHeight+1) abciResponses := &ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - // if the pubkey is new, remove the old and add the new - _, val := state.Validators.GetByIndex(0) + // If the pubkey is new, remove the old and add the new. + _, val := state.NextValidators.GetByIndex(0) if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { abciResponses.EndBlock = &abci.ResponseEndBlock{ - ValidatorUpdates: []abci.Validator{ - types.TM2PB.ValidatorFromPubKeyAndPower(val.PubKey, 0), - types.TM2PB.ValidatorFromPubKeyAndPower(pubkey, 10), + ValidatorUpdates: []abci.ValidatorUpdate{ + types.TM2PB.NewValidatorUpdate(val.PubKey, 0), + types.TM2PB.NewValidatorUpdate(pubkey, 10), }, } } @@ -445,17 +462,17 @@ func makeHeaderPartsResponsesValPubKeyChange(state State, height int64, func makeHeaderPartsResponsesValPowerChange(state State, height int64, power int64) (types.Header, types.BlockID, *ABCIResponses) { - block := makeBlock(state, height) + block := makeBlock(state, state.LastBlockHeight+1) abciResponses := &ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - // if the pubkey is new, remove the old and add the new - _, val := state.Validators.GetByIndex(0) + // If the pubkey is new, remove the old and add the new. + _, val := state.NextValidators.GetByIndex(0) if val.VotingPower != power { abciResponses.EndBlock = &abci.ResponseEndBlock{ - ValidatorUpdates: []abci.Validator{ - types.TM2PB.ValidatorFromPubKeyAndPower(val.PubKey, power), + ValidatorUpdates: []abci.ValidatorUpdate{ + types.TM2PB.NewValidatorUpdate(val.PubKey, power), }, } } @@ -466,7 +483,7 @@ func makeHeaderPartsResponsesValPowerChange(state State, height int64, func makeHeaderPartsResponsesParams(state State, height int64, params types.ConsensusParams) (types.Header, types.BlockID, *ABCIResponses) { - block := makeBlock(state, height) + block := makeBlock(state, state.LastBlockHeight+1) abciResponses := &ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: types.TM2PB.ConsensusParams(¶ms)}, } diff --git a/state/store.go b/state/store.go index 3a1a6231b73..7a0ef255a7a 100644 --- a/state/store.go +++ b/state/store.go @@ -12,15 +12,15 @@ import ( //------------------------------------------------------------------------ func calcValidatorsKey(height int64) []byte { - return []byte(cmn.Fmt("validatorsKey:%v", height)) + return []byte(fmt.Sprintf("validatorsKey:%v", height)) } func calcConsensusParamsKey(height int64) []byte { - return []byte(cmn.Fmt("consensusParamsKey:%v", height)) + return []byte(fmt.Sprintf("consensusParamsKey:%v", height)) } func calcABCIResponsesKey(height int64) []byte { - return []byte(cmn.Fmt("abciResponsesKey:%v", height)) + return []byte(fmt.Sprintf("abciResponsesKey:%v", height)) } // LoadStateFromDBOrGenesisFile loads the most recent state from the database, @@ -71,7 +71,7 @@ func loadState(db dbm.DB, key []byte) (state State) { err := cdc.UnmarshalBinaryBare(buf, &state) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadState: Data has been corrupted or its spec has changed: + cmn.Exit(fmt.Sprintf(`LoadState: Data has been corrupted or its spec has changed: %v\n`, err)) } // TODO: ensure that buf is completely read. @@ -87,7 +87,14 @@ func SaveState(db dbm.DB, state State) { func saveState(db dbm.DB, state State, key []byte) { nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(db, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + // If first block, save validators for block 1. + if nextHeight == 1 { + lastHeightVoteChanged := int64(1) // Due to Tendermint validator set changes being delayed 1 block. + saveValidatorsInfo(db, nextHeight, lastHeightVoteChanged, state.Validators) + } + // Save next validators. + saveValidatorsInfo(db, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + // Save next consensus params. saveConsensusParamsInfo(db, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) db.SetSync(stateKey, state.Bytes()) } @@ -137,7 +144,7 @@ func LoadABCIResponses(db dbm.DB, height int64) (*ABCIResponses, error) { err := cdc.UnmarshalBinaryBare(buf, abciResponses) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadABCIResponses: Data has been corrupted or its spec has + cmn.Exit(fmt.Sprintf(`LoadABCIResponses: Data has been corrupted or its spec has changed: %v\n`, err)) } // TODO: ensure that buf is completely read. @@ -200,7 +207,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo { err := cdc.UnmarshalBinaryBare(buf, v) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadValidators: Data has been corrupted or its spec has changed: + cmn.Exit(fmt.Sprintf(`LoadValidators: Data has been corrupted or its spec has changed: %v\n`, err)) } // TODO: ensure that buf is completely read. @@ -244,7 +251,7 @@ func LoadConsensusParams(db dbm.DB, height int64) (types.ConsensusParams, error) return empty, ErrNoConsensusParamsForHeight{height} } - if paramsInfo.ConsensusParams == empty { + if paramsInfo.ConsensusParams.Equals(&empty) { paramsInfo2 := loadConsensusParamsInfo(db, paramsInfo.LastHeightChanged) if paramsInfo2 == nil { panic( @@ -271,7 +278,7 @@ func loadConsensusParamsInfo(db dbm.DB, height int64) *ConsensusParamsInfo { err := cdc.UnmarshalBinaryBare(buf, paramsInfo) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadConsensusParams: Data has been corrupted or its spec has changed: + cmn.Exit(fmt.Sprintf(`LoadConsensusParams: Data has been corrupted or its spec has changed: %v\n`, err)) } // TODO: ensure that buf is completely read. diff --git a/state/tx_filter.go b/state/tx_filter.go new file mode 100644 index 00000000000..518eb187750 --- /dev/null +++ b/state/tx_filter.go @@ -0,0 +1,22 @@ +package state + +import ( + mempl "github.com/tendermint/tendermint/mempool" + "github.com/tendermint/tendermint/types" +) + +// TxPreCheck returns a function to filter transactions before processing. +// The function limits the size of a transaction to the block's maximum data size. +func TxPreCheck(state State) mempl.PreCheckFunc { + maxDataBytes := types.MaxDataBytesUnknownEvidence( + state.ConsensusParams.BlockSize.MaxBytes, + state.Validators.Size(), + ) + return mempl.PreCheckAminoMaxBytes(maxDataBytes) +} + +// TxPostCheck returns a function to filter transactions after processing. +// The function limits the gas wanted by a transaction to the block's maximum total gas. +func TxPostCheck(state State) mempl.PostCheckFunc { + return mempl.PostCheckMaxGas(state.ConsensusParams.BlockSize.MaxGas) +} diff --git a/state/tx_filter_test.go b/state/tx_filter_test.go new file mode 100644 index 00000000000..52ae396bf96 --- /dev/null +++ b/state/tx_filter_test.go @@ -0,0 +1,57 @@ +package state + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" +) + +func TestTxFilter(t *testing.T) { + genDoc := randomGenesisDoc() + genDoc.ConsensusParams.BlockSize.MaxBytes = 3000 + + // Max size of Txs is much smaller than size of block, + // since we need to account for commits and evidence. + testCases := []struct { + tx types.Tx + isErr bool + }{ + {types.Tx(cmn.RandBytes(250)), false}, + {types.Tx(cmn.RandBytes(1809)), false}, + {types.Tx(cmn.RandBytes(1810)), false}, + {types.Tx(cmn.RandBytes(1811)), true}, + {types.Tx(cmn.RandBytes(1812)), true}, + {types.Tx(cmn.RandBytes(3000)), true}, + } + + for i, tc := range testCases { + stateDB := dbm.NewDB("state", "memdb", os.TempDir()) + state, err := LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + require.NoError(t, err) + + f := TxPreCheck(state) + if tc.isErr { + assert.NotNil(t, f(tc.tx), "#%v", i) + } else { + assert.Nil(t, f(tc.tx), "#%v", i) + } + } +} + +func randomGenesisDoc() *types.GenesisDoc { + pubkey := ed25519.GenPrivKey().PubKey() + return &types.GenesisDoc{ + GenesisTime: tmtime.Now(), + ChainID: "abc", + Validators: []types.GenesisValidator{{pubkey.Address(), pubkey, 10, "myval"}}, + ConsensusParams: types.DefaultConsensusParams(), + } +} diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index 707325929aa..363ab1193ec 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -89,6 +89,11 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { } } + // index tx by height + if txi.indexAllTags || cmn.StringInSlice(types.TxHeightKey, txi.tagsToIndex) { + storeBatch.Set(keyForHeight(result), hash) + } + // index tx by hash rawBytes, err := cdc.MarshalBinaryBare(result) if err != nil { @@ -114,6 +119,11 @@ func (txi *TxIndex) Index(result *types.TxResult) error { } } + // index tx by height + if txi.indexAllTags || cmn.StringInSlice(types.TxHeightKey, txi.tagsToIndex) { + b.Set(keyForHeight(result), hash) + } + // index tx by hash rawBytes, err := cdc.MarshalBinaryBare(result) if err != nil { @@ -153,12 +163,6 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) { // conditions to skip because they're handled before "everything else" skipIndexes := make([]int, 0) - // if there is a height condition ("tx.height=3"), extract it for faster lookups - height, heightIndex := lookForHeight(conditions) - if heightIndex >= 0 { - skipIndexes = append(skipIndexes, heightIndex) - } - // extract ranges // if both upper and lower bounds exist, it's better to get them in order not // no iterate over kvs that are not within range. @@ -176,6 +180,9 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) { } } + // if there is a height condition ("tx.height=3"), extract it + height := lookForHeight(conditions) + // for all other conditions for i, c := range conditions { if cmn.IntInSlice(i, skipIndexes) { @@ -218,13 +225,13 @@ func lookForHash(conditions []query.Condition) (hash []byte, err error, ok bool) return } -func lookForHeight(conditions []query.Condition) (height int64, index int) { - for i, c := range conditions { +func lookForHeight(conditions []query.Condition) (height int64) { + for _, c := range conditions { if c.Tag == types.TxHeightKey { - return c.Operand.(int64), i + return c.Operand.(int64) } } - return 0, -1 + return 0 } // special map to hold range conditions @@ -421,6 +428,10 @@ func keyForTag(tag cmn.KVPair, result *types.TxResult) []byte { return []byte(fmt.Sprintf("%s/%s/%d/%d", tag.Key, tag.Value, result.Height, result.Index)) } +func keyForHeight(result *types.TxResult) []byte { + return []byte(fmt.Sprintf("%s/%d/%d/%d", types.TxHeightKey, result.Height, result.Height, result.Index)) +} + /////////////////////////////////////////////////////////////////////////////// // Utils diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index c32c827d40a..78a76168d1d 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -156,8 +156,8 @@ func TestIndexAllTags(t *testing.T) { indexer := NewTxIndex(db.NewMemDB(), IndexAllTags()) txResult := txResultWithTags([]cmn.KVPair{ - cmn.KVPair{[]byte("account.owner"), []byte("Ivan")}, - cmn.KVPair{[]byte("account.number"), []byte("1")}, + cmn.KVPair{Key: []byte("account.owner"), Value: []byte("Ivan")}, + cmn.KVPair{Key: []byte("account.number"), Value: []byte("1")}, }) err := indexer.Index(txResult) @@ -190,19 +190,6 @@ func txResultWithTags(tags []cmn.KVPair) *types.TxResult { } func benchmarkTxIndex(txsCount int64, b *testing.B) { - tx := types.Tx("HELLO WORLD") - txResult := &types.TxResult{ - Height: 1, - Index: 0, - Tx: tx, - Result: abci.ResponseDeliverTx{ - Data: []byte{0}, - Code: abci.CodeTypeOK, - Log: "", - Tags: []cmn.KVPair{}, - }, - } - dir, err := ioutil.TempDir("", "tx_index_db") if err != nil { b.Fatal(err) @@ -213,11 +200,24 @@ func benchmarkTxIndex(txsCount int64, b *testing.B) { indexer := NewTxIndex(store) batch := txindex.NewBatch(txsCount) + txIndex := uint32(0) for i := int64(0); i < txsCount; i++ { + tx := cmn.RandBytes(250) + txResult := &types.TxResult{ + Height: 1, + Index: txIndex, + Tx: tx, + Result: abci.ResponseDeliverTx{ + Data: []byte{0}, + Code: abci.CodeTypeOK, + Log: "", + Tags: []cmn.KVPair{}, + }, + } if err := batch.Add(txResult); err != nil { b.Fatal(err) } - txResult.Index++ + txIndex++ } b.ResetTimer() diff --git a/state/validation.go b/state/validation.go index c3633920322..e28d40e8bd9 100644 --- a/state/validation.go +++ b/state/validation.go @@ -5,66 +5,99 @@ import ( "errors" "fmt" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/crypto" dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/types" ) //----------------------------------------------------- // Validate block func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { - // validate internal consistency + // Validate internal consistency. if err := block.ValidateBasic(); err != nil { return err } - // validate basic info + // Validate basic info. + if block.Version != state.Version.Consensus { + return fmt.Errorf("Wrong Block.Header.Version. Expected %v, got %v", + state.Version.Consensus, + block.Version, + ) + } if block.ChainID != state.ChainID { - return fmt.Errorf("Wrong Block.Header.ChainID. Expected %v, got %v", state.ChainID, block.ChainID) + return fmt.Errorf("Wrong Block.Header.ChainID. Expected %v, got %v", + state.ChainID, + block.ChainID, + ) } if block.Height != state.LastBlockHeight+1 { - return fmt.Errorf("Wrong Block.Header.Height. Expected %v, got %v", state.LastBlockHeight+1, block.Height) + return fmt.Errorf("Wrong Block.Header.Height. Expected %v, got %v", + state.LastBlockHeight+1, + block.Height, + ) } - /* TODO: Determine bounds for Time - See blockchain/reactor "stopSyncingDurationMinutes" - - if !block.Time.After(lastBlockTime) { - return errors.New("Invalid Block.Header.Time") - } - */ - // validate prev block info + // Validate prev block info. if !block.LastBlockID.Equals(state.LastBlockID) { - return fmt.Errorf("Wrong Block.Header.LastBlockID. Expected %v, got %v", state.LastBlockID, block.LastBlockID) + return fmt.Errorf("Wrong Block.Header.LastBlockID. Expected %v, got %v", + state.LastBlockID, + block.LastBlockID, + ) } + newTxs := int64(len(block.Data.Txs)) if block.TotalTxs != state.LastBlockTotalTx+newTxs { - return fmt.Errorf("Wrong Block.Header.TotalTxs. Expected %v, got %v", state.LastBlockTotalTx+newTxs, block.TotalTxs) + return fmt.Errorf("Wrong Block.Header.TotalTxs. Expected %v, got %v", + state.LastBlockTotalTx+newTxs, + block.TotalTxs, + ) } - // validate app info + // Validate app info if !bytes.Equal(block.AppHash, state.AppHash) { - return fmt.Errorf("Wrong Block.Header.AppHash. Expected %X, got %v", state.AppHash, block.AppHash) + return fmt.Errorf("Wrong Block.Header.AppHash. Expected %X, got %v", + state.AppHash, + block.AppHash, + ) } if !bytes.Equal(block.ConsensusHash, state.ConsensusParams.Hash()) { - return fmt.Errorf("Wrong Block.Header.ConsensusHash. Expected %X, got %v", state.ConsensusParams.Hash(), block.ConsensusHash) + return fmt.Errorf("Wrong Block.Header.ConsensusHash. Expected %X, got %v", + state.ConsensusParams.Hash(), + block.ConsensusHash, + ) } if !bytes.Equal(block.LastResultsHash, state.LastResultsHash) { - return fmt.Errorf("Wrong Block.Header.LastResultsHash. Expected %X, got %v", state.LastResultsHash, block.LastResultsHash) + return fmt.Errorf("Wrong Block.Header.LastResultsHash. Expected %X, got %v", + state.LastResultsHash, + block.LastResultsHash, + ) } if !bytes.Equal(block.ValidatorsHash, state.Validators.Hash()) { - return fmt.Errorf("Wrong Block.Header.ValidatorsHash. Expected %X, got %v", state.Validators.Hash(), block.ValidatorsHash) + return fmt.Errorf("Wrong Block.Header.ValidatorsHash. Expected %X, got %v", + state.Validators.Hash(), + block.ValidatorsHash, + ) + } + if !bytes.Equal(block.NextValidatorsHash, state.NextValidators.Hash()) { + return fmt.Errorf("Wrong Block.Header.NextValidatorsHash. Expected %X, got %v", + state.NextValidators.Hash(), + block.NextValidatorsHash, + ) } // Validate block LastCommit. if block.Height == 1 { if len(block.LastCommit.Precommits) != 0 { - return errors.New("Block at height 1 (first block) should have no LastCommit precommits") + return errors.New("Block at height 1 can't have LastCommit precommits") } } else { if len(block.LastCommit.Precommits) != state.LastValidators.Size() { return fmt.Errorf("Invalid block commit size. Expected %v, got %v", - state.LastValidators.Size(), len(block.LastCommit.Precommits)) + state.LastValidators.Size(), + len(block.LastCommit.Precommits), + ) } err := state.LastValidators.VerifyCommit( state.ChainID, state.LastBlockID, block.Height-1, block.LastCommit) @@ -73,15 +106,56 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { } } - // TODO: Each check requires loading an old validator set. - // We should cap the amount of evidence per block - // to prevent potential proposer DoS. + // Validate block Time + if block.Height > 1 { + if !block.Time.After(state.LastBlockTime) { + return fmt.Errorf("Block time %v not greater than last block time %v", + block.Time, + state.LastBlockTime, + ) + } + + medianTime := MedianTime(block.LastCommit, state.LastValidators) + if !block.Time.Equal(medianTime) { + return fmt.Errorf("Invalid block time. Expected %v, got %v", + medianTime, + block.Time, + ) + } + } else if block.Height == 1 { + genesisTime := state.LastBlockTime + if !block.Time.Equal(genesisTime) { + return fmt.Errorf("Block time %v is not equal to genesis time %v", + block.Time, + genesisTime, + ) + } + } + + // Limit the amount of evidence + maxEvidenceBytes := types.MaxEvidenceBytesPerBlock(state.ConsensusParams.BlockSize.MaxBytes) + evidenceBytes := int64(len(block.Evidence.Evidence)) * types.MaxEvidenceBytes + if evidenceBytes > maxEvidenceBytes { + return types.NewErrEvidenceOverflow(maxEvidenceBytes, evidenceBytes) + } + + // Validate all evidence. for _, ev := range block.Evidence.Evidence { if err := VerifyEvidence(stateDB, state, ev); err != nil { - return types.NewEvidenceInvalidErr(ev, err) + return types.NewErrEvidenceInvalid(ev, err) } } + // NOTE: We can't actually verify it's the right proposer because we dont + // know what round the block was first proposed. So just check that it's + // a legit address and a known validator. + if len(block.ProposerAddress) != crypto.AddressSize || + !state.Validators.HasAddress(block.ProposerAddress) { + return fmt.Errorf("Block.Header.ProposerAddress, %X, is not a validator", + block.ProposerAddress, + ) + } + return nil } @@ -94,7 +168,7 @@ func VerifyEvidence(stateDB dbm.DB, state State, evidence types.Evidence) error height := state.LastBlockHeight evidenceAge := height - evidence.Height() - maxAge := state.ConsensusParams.EvidenceParams.MaxAge + maxAge := state.ConsensusParams.Evidence.MaxAge if evidenceAge > maxAge { return fmt.Errorf("Evidence from height %d is too old. Min height is %d", evidence.Height(), height-maxAge) diff --git a/state/validation_test.go b/state/validation_test.go index 362a407374f..f89fbdea989 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -2,67 +2,131 @@ package state import ( "testing" + "time" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tendermint/libs/db" + + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" ) -func TestValidateBlock(t *testing.T) { - state, _ := state(1, 1) +// TODO(#2589): +// - generalize this past the first height +// - add txs and build up full State properly +// - test block.Time (see #2587 - there are no conditions on time for the first height) +func TestValidateBlockHeader(t *testing.T) { + var height int64 = 1 // TODO(#2589): generalize + state, stateDB := state(1, int(height)) - blockExec := NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nil, nil, nil) + blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), nil, nil, nil) - // proper block must pass - block := makeBlock(state, 1) + // A good block passes. + block := makeBlock(state, height) err := blockExec.ValidateBlock(state, block) require.NoError(t, err) - // wrong chain fails - block = makeBlock(state, 1) - block.ChainID = "not-the-real-one" - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) + // some bad values + wrongHash := tmhash.Sum([]byte("this hash is wrong")) + wrongVersion1 := state.Version.Consensus + wrongVersion1.Block += 1 + wrongVersion2 := state.Version.Consensus + wrongVersion2.App += 1 - // wrong height fails - block = makeBlock(state, 1) - block.Height += 10 - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) + // Manipulation of any header field causes failure. + testCases := []struct { + name string + malleateBlock func(block *types.Block) + }{ + {"Version wrong1", func(block *types.Block) { block.Version = wrongVersion1 }}, + {"Version wrong2", func(block *types.Block) { block.Version = wrongVersion2 }}, + {"ChainID wrong", func(block *types.Block) { block.ChainID = "not-the-real-one" }}, + {"Height wrong", func(block *types.Block) { block.Height += 10 }}, + {"Time wrong", func(block *types.Block) { block.Time = block.Time.Add(-time.Second * 3600 * 24) }}, + {"NumTxs wrong", func(block *types.Block) { block.NumTxs += 10 }}, + {"TotalTxs wrong", func(block *types.Block) { block.TotalTxs += 10 }}, - // wrong total tx fails - block = makeBlock(state, 1) - block.TotalTxs += 10 - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) + {"LastBlockID wrong", func(block *types.Block) { block.LastBlockID.PartsHeader.Total += 10 }}, + {"LastCommitHash wrong", func(block *types.Block) { block.LastCommitHash = wrongHash }}, + {"DataHash wrong", func(block *types.Block) { block.DataHash = wrongHash }}, - // wrong blockid fails - block = makeBlock(state, 1) - block.LastBlockID.PartsHeader.Total += 10 - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) + {"ValidatorsHash wrong", func(block *types.Block) { block.ValidatorsHash = wrongHash }}, + {"NextValidatorsHash wrong", func(block *types.Block) { block.NextValidatorsHash = wrongHash }}, + {"ConsensusHash wrong", func(block *types.Block) { block.ConsensusHash = wrongHash }}, + {"AppHash wrong", func(block *types.Block) { block.AppHash = wrongHash }}, + {"LastResultsHash wrong", func(block *types.Block) { block.LastResultsHash = wrongHash }}, - // wrong app hash fails - block = makeBlock(state, 1) - block.AppHash = []byte("wrong app hash") - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) + {"EvidenceHash wrong", func(block *types.Block) { block.EvidenceHash = wrongHash }}, + {"Proposer wrong", func(block *types.Block) { block.ProposerAddress = ed25519.GenPrivKey().PubKey().Address() }}, + {"Proposer invalid", func(block *types.Block) { block.ProposerAddress = []byte("wrong size") }}, + } - // wrong consensus hash fails - block = makeBlock(state, 1) - block.ConsensusHash = []byte("wrong consensus hash") - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) + for _, tc := range testCases { + block := makeBlock(state, height) + tc.malleateBlock(block) + err := blockExec.ValidateBlock(state, block) + require.Error(t, err, tc.name) + } +} - // wrong results hash fails - block = makeBlock(state, 1) - block.LastResultsHash = []byte("wrong results hash") - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) +/* + TODO(#2589): + - test Block.Data.Hash() == Block.DataHash + - test len(Block.Data.Txs) == Block.NumTxs +*/ +func TestValidateBlockData(t *testing.T) { +} + +/* + TODO(#2589): + - test len(block.LastCommit.Precommits) == state.LastValidators.Size() + - test state.LastValidators.VerifyCommit +*/ +func TestValidateBlockCommit(t *testing.T) { +} + +/* + TODO(#2589): + - test good/bad evidence in block +*/ +func TestValidateBlockEvidence(t *testing.T) { + var height int64 = 1 // TODO(#2589): generalize + state, stateDB := state(1, int(height)) + + blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), nil, nil, nil) - // wrong validators hash fails - block = makeBlock(state, 1) - block.ValidatorsHash = []byte("wrong validators hash") + // make some evidence + addr, _ := state.Validators.GetByIndex(0) + goodEvidence := types.NewMockGoodEvidence(height, 0, addr) + + // A block with a couple pieces of evidence passes. + block := makeBlock(state, height) + block.Evidence.Evidence = []types.Evidence{goodEvidence, goodEvidence} + block.EvidenceHash = block.Evidence.Hash() + err := blockExec.ValidateBlock(state, block) + require.NoError(t, err) + + // A block with too much evidence fails. + maxBlockSize := state.ConsensusParams.BlockSize.MaxBytes + maxEvidenceBytes := types.MaxEvidenceBytesPerBlock(maxBlockSize) + maxEvidence := maxEvidenceBytes / types.MaxEvidenceBytes + require.True(t, maxEvidence > 2) + for i := int64(0); i < maxEvidence; i++ { + block.Evidence.Evidence = append(block.Evidence.Evidence, goodEvidence) + } + block.EvidenceHash = block.Evidence.Hash() err = blockExec.ValidateBlock(state, block) require.Error(t, err) + _, ok := err.(*types.ErrEvidenceOverflow) + require.True(t, ok) +} + +/* + TODO(#2589): + - test unmarshalling BlockParts that are too big into a Block that + (note this logic happens in the consensus, not in the validation here). + - test making blocks from the types.MaxXXX functions works/fails as expected +*/ +func TestValidateBlockSize(t *testing.T) { } diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go index c55713c7f66..51c0d9b7ebc 100644 --- a/test/app/grpc_client.go +++ b/test/app/grpc_client.go @@ -2,13 +2,13 @@ package main import ( "encoding/hex" - "encoding/json" "fmt" "os" "context" - "github.com/tendermint/tendermint/rpc/grpc" + amino "github.com/tendermint/go-amino" + core_grpc "github.com/tendermint/tendermint/rpc/grpc" ) var grpcAddr = "tcp://localhost:36656" @@ -27,13 +27,13 @@ func main() { } clientGRPC := core_grpc.StartGRPCClient(grpcAddr) - res, err := clientGRPC.BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{txBytes}) + res, err := clientGRPC.BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{Tx: txBytes}) if err != nil { fmt.Println(err) os.Exit(1) } - bz, err := json.Marshal(res) + bz, err := amino.NewCodec().MarshalJSON(res) if err != nil { fmt.Println(err) os.Exit(1) diff --git a/test/app/kvstore_test.sh b/test/app/kvstore_test.sh index 67f6b583cb2..034e28878d5 100755 --- a/test/app/kvstore_test.sh +++ b/test/app/kvstore_test.sh @@ -41,7 +41,7 @@ set -e # we should not be able to look up the value RESPONSE=`abci-cli query \"$VALUE\"` set +e -A=`echo $RESPONSE | grep $VALUE` +A=`echo $RESPONSE | grep \"value: $VALUE\"` if [[ $? == 0 ]]; then echo "Found '$VALUE' for $VALUE when we should not have. Response:" echo "$RESPONSE" diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index 34968c39149..1a64d4173f0 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -14,6 +14,7 @@ ENV GOBIN $GOPATH/bin WORKDIR $REPO # Copy in the code +# TODO: rewrite to only copy Makefile & other files? COPY . $REPO # Install the vendored dependencies @@ -21,8 +22,17 @@ COPY . $REPO RUN make get_tools RUN make get_vendor_deps -RUN go install ./cmd/tendermint -RUN go install ./abci/cmd/abci-cli +# install ABCI CLI +RUN make install_abci + +# install Tendermint +RUN make install + +RUN tendermint testnet --node-dir-prefix="mach" --v=4 --populate-persistent-peers=false --o=$REPO/test/p2p/data + +# Now copy in the code +# NOTE: this will overwrite whatever is in vendor/ +COPY . $REPO # expose the volume for debugging VOLUME $REPO diff --git a/test/p2p/README.md b/test/p2p/README.md index 4ee3690af0f..956ce906cac 100644 --- a/test/p2p/README.md +++ b/test/p2p/README.md @@ -19,7 +19,7 @@ docker network create --driver bridge --subnet 172.57.0.0/16 my_testnet ``` This gives us a new network with IP addresses in the rage `172.57.0.0 - 172.57.255.255`. -Peers on the network can have any IP address in this range. +Peers on the network can have any IP address in this range. For our four node network, let's pick `172.57.0.101 - 172.57.0.104`. Since we use Tendermint's default listening port of 26656, our list of seed nodes will look like: @@ -37,7 +37,7 @@ for i in $(seq 1 4); do --ip="172.57.0.$((100 + $i))" \ --name local_testnet_$i \ --entrypoint tendermint \ - -e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$i/core \ + -e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$((i-1)) \ tendermint_tester node --p2p.persistent_peers 172.57.0.101:26656,172.57.0.102:26656,172.57.0.103:26656,172.57.0.104:26656 --proxy_app=kvstore done ``` @@ -47,8 +47,5 @@ If you now run `docker ps`, you'll see your containers! We can confirm they are making blocks by checking the `/status` message using `curl` and `jq` to pretty print the output json: ``` -curl 172.57.0.101:26657/status | jq . +curl 172.57.0.101:26657/status | jq . ``` - - - diff --git a/test/p2p/basic/test.sh b/test/p2p/basic/test.sh index caf665122ff..423b5b0179c 100755 --- a/test/p2p/basic/test.sh +++ b/test/p2p/basic/test.sh @@ -56,6 +56,7 @@ for i in `seq 1 $N`; do # - assert block height is greater than 1 BLOCK_HEIGHT=`curl -s $addr/status | jq .result.sync_info.latest_block_height | jq fromjson` COUNT=0 + echo "$$BLOCK_HEIGHT IS $BLOCK_HEIGHT" while [ "$BLOCK_HEIGHT" -le 1 ]; do echo "Waiting for node $i to commit a block ..." sleep 1 diff --git a/test/p2p/circleci.sh b/test/p2p/circleci.sh index 19200afbecb..c548d5752f0 100644 --- a/test/p2p/circleci.sh +++ b/test/p2p/circleci.sh @@ -6,7 +6,7 @@ SOURCE="${BASH_SOURCE[0]}" while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" -LOGS_DIR="$DIR/../logs" +LOGS_DIR="$DIR/logs" echo echo "* [$(date +"%T")] cleaning up $LOGS_DIR" rm -rf "$LOGS_DIR" @@ -33,3 +33,7 @@ fi echo echo "* [$(date +"%T")] running p2p tests on a local docker network" bash "$DIR/../p2p/test.sh" tester + +echo +echo "* [$(date +"%T")] copying log files out of docker container into $LOGS_DIR" +docker cp rsyslog:/var/log $LOGS_DIR diff --git a/test/p2p/data/mach1/core/config/genesis.json b/test/p2p/data/mach1/core/config/genesis.json deleted file mode 100644 index 515c10714d2..00000000000 --- a/test/p2p/data/mach1/core/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "2016-06-24T20:01:19.322Z", - "chain_id": "chain-9ujDWI", - "validators": [ - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "power": "1", - "name": "mach1" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "power": "1", - "name": "mach2" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "power": "1", - "name": "mach3" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "power": "1", - "name": "mach4" - } - ], - "app_hash": "" -} diff --git a/test/p2p/data/mach1/core/config/node_key.json b/test/p2p/data/mach1/core/config/node_key.json deleted file mode 100644 index 4fa9608501d..00000000000 --- a/test/p2p/data/mach1/core/config/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "BpYtFp8xSrudBa5aBLRuSPD72PGDAUm0dJORDL3Kd5YJbluUzRefVFrjwoHZv1yeDj2P9xkEi2L3hJCUz/qFkQ==" - } -} diff --git a/test/p2p/data/mach1/core/config/priv_validator.json b/test/p2p/data/mach1/core/config/priv_validator.json deleted file mode 100644 index ea2a01f5cad..00000000000 --- a/test/p2p/data/mach1/core/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "AE47BBD4B3ACD80BFE17F6E0C66C5B8492A81AE4", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "last_height": "0", - "last_round": "0", - "last_step": 0, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "VHqgfHqM4WxcsqQMbCbRWwoylgQQqfHqblC2NvGrOJq+iTPf8WAMAm40cY8XhaTN6rkMNWmLOU44tpR66R3hFg==" - } -} diff --git a/test/p2p/data/mach2/core/config/genesis.json b/test/p2p/data/mach2/core/config/genesis.json deleted file mode 100644 index 515c10714d2..00000000000 --- a/test/p2p/data/mach2/core/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "2016-06-24T20:01:19.322Z", - "chain_id": "chain-9ujDWI", - "validators": [ - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "power": "1", - "name": "mach1" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "power": "1", - "name": "mach2" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "power": "1", - "name": "mach3" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "power": "1", - "name": "mach4" - } - ], - "app_hash": "" -} diff --git a/test/p2p/data/mach2/core/config/node_key.json b/test/p2p/data/mach2/core/config/node_key.json deleted file mode 100644 index 6eb15110661..00000000000 --- a/test/p2p/data/mach2/core/config/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "uM6LDVE4wQIIUmq9rc6RxzX8zEGG4G4Jcuw15klzQopF68YfJM4bkbPSavurEcJ4nvBMusKBg2GcARFrZqnFKA==" - } -} diff --git a/test/p2p/data/mach2/core/config/priv_validator.json b/test/p2p/data/mach2/core/config/priv_validator.json deleted file mode 100644 index 6e0cd7f8f9a..00000000000 --- a/test/p2p/data/mach2/core/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "5D61EE46CCE91F579086522D7FD8CEC3F854E946", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "last_height": "0", - "last_round": "0", - "last_step": 0, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "0EeInmBQL8MSnQq38zSxg47Z7R7Nmcu5a3GtWr9agUNtxTRGUyMSZYfSoqk7WdaJtxcHOx3paKJabvE9WVMYrQ==" - } -} diff --git a/test/p2p/data/mach3/core/config/genesis.json b/test/p2p/data/mach3/core/config/genesis.json deleted file mode 100644 index 515c10714d2..00000000000 --- a/test/p2p/data/mach3/core/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "2016-06-24T20:01:19.322Z", - "chain_id": "chain-9ujDWI", - "validators": [ - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "power": "1", - "name": "mach1" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "power": "1", - "name": "mach2" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "power": "1", - "name": "mach3" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "power": "1", - "name": "mach4" - } - ], - "app_hash": "" -} diff --git a/test/p2p/data/mach3/core/config/node_key.json b/test/p2p/data/mach3/core/config/node_key.json deleted file mode 100644 index 0885bcf9c67..00000000000 --- a/test/p2p/data/mach3/core/config/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "kT3orG0YkipT9rAZbvAjtGk/7Pu1ZeCE8LSUF2jz2uiSs1rdlUVi/gccRlvCRLKvrtSicOyEkmk0FHPOGS3mgg==" - } -} diff --git a/test/p2p/data/mach3/core/config/priv_validator.json b/test/p2p/data/mach3/core/config/priv_validator.json deleted file mode 100644 index ec68ca7bb0b..00000000000 --- a/test/p2p/data/mach3/core/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "705F9DA2CC7D7AF5F4519455ED99622E40E439A1", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "last_height": "0", - "last_round": "0", - "last_step": 0, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "waTkfzSfxfVW9Kmie6d2uUQkwxK6ps9u5EuGc0jXw/KuZ6xpfRNaoLRgHqV+qrP+v0uqTyKcRaWYwphbEvzRoQ==" - } -} diff --git a/test/p2p/data/mach4/core/config/genesis.json b/test/p2p/data/mach4/core/config/genesis.json deleted file mode 100644 index 515c10714d2..00000000000 --- a/test/p2p/data/mach4/core/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "2016-06-24T20:01:19.322Z", - "chain_id": "chain-9ujDWI", - "validators": [ - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "power": "1", - "name": "mach1" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "power": "1", - "name": "mach2" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "power": "1", - "name": "mach3" - }, - { - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "power": "1", - "name": "mach4" - } - ], - "app_hash": "" -} diff --git a/test/p2p/data/mach4/core/config/node_key.json b/test/p2p/data/mach4/core/config/node_key.json deleted file mode 100644 index d6a5d79c233..00000000000 --- a/test/p2p/data/mach4/core/config/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "QIIm8/QEEawiJi3Zozv+J9b+1CufCEkGs3lxGMlRy4L4FVIXCoXJTwYIrotZtwoMqLYEqQV1hbKKJmFA3GFelw==" - } -} diff --git a/test/p2p/data/mach4/core/config/priv_validator.json b/test/p2p/data/mach4/core/config/priv_validator.json deleted file mode 100644 index 468550ea8c7..00000000000 --- a/test/p2p/data/mach4/core/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "D1054266EC9EEA511ED9A76DEFD520BBE1B5E850", - "pub_key": { - "type": "tendermint/PubKeyEd25519", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "last_height": "0", - "last_round": "0", - "last_step": 0, - "priv_key": { - "type": "tendermint/PrivKeyEd25519", - "value": "xMw+0o8CDC29qYvNvwjDztNwRw508l6TjV0pXo49KwyevI9YztS0bc1auKulkd0lPNfLUDcnP9oyvAtkYcTv2Q==" - } -} diff --git a/test/p2p/ip_plus_id.sh b/test/p2p/ip_plus_id.sh index 0d2248fe049..95871d3f1d2 100755 --- a/test/p2p/ip_plus_id.sh +++ b/test/p2p/ip_plus_id.sh @@ -3,5 +3,5 @@ set -eu ID=$1 DOCKER_IMAGE=$2 -NODEID="$(docker run --rm -e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core $DOCKER_IMAGE tendermint show_node_id)" +NODEID="$(docker run --rm -e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$((ID-1)) $DOCKER_IMAGE tendermint show_node_id)" echo "$NODEID@172.57.0.$((100+$ID))" diff --git a/test/p2p/kill_all/check_peers.sh b/test/p2p/kill_all/check_peers.sh index 87a7681109e..95da7484ebd 100644 --- a/test/p2p/kill_all/check_peers.sh +++ b/test/p2p/kill_all/check_peers.sh @@ -23,7 +23,7 @@ set -e # get the first peer's height addr=$(test/p2p/ip.sh 1):26657 -h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height) +h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height | sed -e "s/^\"\(.*\)\"$/\1/g") echo "1st peer is on height $h1" echo "Waiting until other peers reporting a height higher than the 1st one" @@ -33,7 +33,7 @@ for i in $(seq 2 "$NUM_OF_PEERS"); do while [[ $hi -le $h1 ]] ; do addr=$(test/p2p/ip.sh "$i"):26657 - hi=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height) + hi=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height | sed -e "s/^\"\(.*\)\"$/\1/g") echo "... peer $i is on height $hi" diff --git a/test/p2p/peer.sh b/test/p2p/peer.sh index 15d44ff3323..63d46f8d582 100644 --- a/test/p2p/peer.sh +++ b/test/p2p/peer.sh @@ -14,14 +14,33 @@ echo "starting tendermint peer ID=$ID" # start tendermint container on the network # NOTE: $NODE_FLAGS should be unescaped (no quotes). otherwise it will be # treated as one flag. -docker run -d \ - --net="$NETWORK_NAME" \ - --ip=$(test/p2p/ip.sh "$ID") \ - --name "local_testnet_$ID" \ - --entrypoint tendermint \ - -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ - --log-driver=syslog \ - --log-opt syslog-address=udp://127.0.0.1:5514 \ - --log-opt syslog-facility=daemon \ - --log-opt tag="{{.Name}}" \ - "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" + +# test/p2p/data/mach$((ID-1)) data is generated in test/docker/Dockerfile using +# the tendermint testnet command. +if [[ "$ID" == "x" ]]; then # Set "x" to "1" to print to console. + docker run \ + --net="$NETWORK_NAME" \ + --ip=$(test/p2p/ip.sh "$ID") \ + --name "local_testnet_$ID" \ + --entrypoint tendermint \ + -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$((ID-1))" \ + -e GOMAXPROCS=1 \ + --log-driver=syslog \ + --log-opt syslog-address=udp://127.0.0.1:5514 \ + --log-opt syslog-facility=daemon \ + --log-opt tag="{{.Name}}" \ + "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" & +else + docker run -d \ + --net="$NETWORK_NAME" \ + --ip=$(test/p2p/ip.sh "$ID") \ + --name "local_testnet_$ID" \ + --entrypoint tendermint \ + -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$((ID-1))" \ + -e GOMAXPROCS=1 \ + --log-driver=syslog \ + --log-opt syslog-address=udp://127.0.0.1:5514 \ + --log-opt syslog-facility=daemon \ + --log-opt tag="{{.Name}}" \ + "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" +fi diff --git a/test/p2p/pex/test_addrbook.sh b/test/p2p/pex/test_addrbook.sh index d54bcf428a4..06f9212fd05 100644 --- a/test/p2p/pex/test_addrbook.sh +++ b/test/p2p/pex/test_addrbook.sh @@ -16,24 +16,29 @@ CLIENT_NAME="pex_addrbook_$ID" echo "1. restart peer $ID" docker stop "local_testnet_$ID" +echo "stopped local_testnet_$ID" # preserve addrbook.json -docker cp "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" "/tmp/addrbook.json" +docker cp "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach0/config/addrbook.json" "/tmp/addrbook.json" set +e #CIRCLE docker rm -vf "local_testnet_$ID" set -e # NOTE that we do not provide persistent_peers bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$ID" "$PROXY_APP" "--p2p.pex --rpc.unsafe" -docker cp "/tmp/addrbook.json" "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" +echo "started local_testnet_$ID" + +# if the client runs forever, it means addrbook wasn't saved or was empty +bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N" + +# Now we know that the node is up. + +docker cp "/tmp/addrbook.json" "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach0/config/addrbook.json" echo "with the following addrbook:" cat /tmp/addrbook.json # exec doesn't work on circle -# docker exec "local_testnet_$ID" cat "/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" +# docker exec "local_testnet_$ID" cat "/go/src/github.com/tendermint/tendermint/test/p2p/data/mach0/config/addrbook.json" echo "" -# if the client runs forever, it means addrbook wasn't saved or was empty -bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N" - echo "----------------------------------------------------------------------" echo "Testing other peers connect to us if we have neither persistent_peers nor the addrbook" echo "(assuming peers are started with pex enabled)" @@ -42,16 +47,20 @@ CLIENT_NAME="pex_no_addrbook_$ID" echo "1. restart peer $ID" docker stop "local_testnet_$ID" +echo "stopped local_testnet_$ID" set +e #CIRCLE docker rm -vf "local_testnet_$ID" set -e # NOTE that we do not provide persistent_peers bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$ID" "$PROXY_APP" "--p2p.pex --rpc.unsafe" +echo "started local_testnet_$ID" # if the client runs forever, it means other peers have removed us from their books (which should not happen) bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N" +# Now we know that the node is up. + echo "" echo "PASS" echo "" diff --git a/tools/tm-bench/Dockerfile b/tools/tm-bench/Dockerfile index 97eebfcfc93..562580d2d42 100644 --- a/tools/tm-bench/Dockerfile +++ b/tools/tm-bench/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.7 +FROM alpine:3.8 WORKDIR /app COPY ./dist/linux-amd64/tm-bench /app/tm-bench diff --git a/tools/tm-bench/Makefile b/tools/tm-bench/Makefile index f48ea0adda4..b79de8b1844 100644 --- a/tools/tm-bench/Makefile +++ b/tools/tm-bench/Makefile @@ -1,5 +1,5 @@ DIST_DIRS := find * -type d -exec -VERSION := $(shell perl -ne '/^var version.*"([^"]+)".*$$/ && print "v$$1\n"' main.go) +VERSION := $(shell perl -ne '/^TMCoreSemVer = "([^"]+)"$$/ && print "v$$1\n"' ../../version/version.go) all: build test install @@ -37,7 +37,7 @@ dist: build-all build-docker: rm -f ./tm-bench - docker run -it --rm -v "$(PWD):/go/src/app" -w "/go/src/app" -e "CGO_ENABLED=0" golang:alpine go build -ldflags "-s -w" -o tm-bench + docker run -it --rm -v "$(PWD)/../../:/go/src/github.com/tendermint/tendermint" -w "/go/src/github.com/tendermint/tendermint/tools/tm-bench" -e "CGO_ENABLED=0" golang:alpine go build -ldflags "-s -w" -o tm-bench docker build -t "tendermint/bench" . clean: diff --git a/tools/tm-bench/README.md b/tools/tm-bench/README.md index 000f20f3788..9159a754681 100644 --- a/tools/tm-bench/README.md +++ b/tools/tm-bench/README.md @@ -4,49 +4,72 @@ Tendermint blockchain benchmarking tool: - https://github.com/tendermint/tools/tree/master/tm-bench -For example, the following: - - tm-bench -T 10 -r 1000 localhost:26657 +For example, the following: `tm-bench -T 30 -r 10000 localhost:26657` will output: - Stats Avg StdDev Max Total - Txs/sec 818 532 1549 9000 - Blocks/sec 0.818 0.386 1 9 +``` +Stats Avg StdDev Max Total +Txs/sec 3981 1993 5000 119434 +Blocks/sec 0.800 0.400 1 24 +``` +NOTE: **tm-bench only works with build-in `kvstore` ABCI application**. For it +to work with your application, you will need to modify `generateTx` function. +In the future, we plan to support scriptable transactions (see +[\#1938](https://github.com/tendermint/tendermint/issues/1938)). ## Quick Start +### Docker + +``` +docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init +docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm tendermint/tendermint node --proxy_app=kvstore + +docker run -it --rm --link=tm tendermint/bench tm:26657 +``` + +### Using binaries + [Install Tendermint](https://github.com/tendermint/tendermint#install) -This currently is setup to work on tendermint's develop branch. Please ensure -you are on that. (If not, update `tendermint` and `tmlibs` in gopkg.toml to use - the master branch.) then run: - tendermint init - tendermint node --proxy_app=kvstore +``` +tendermint init +tendermint node --proxy_app=kvstore - tm-bench localhost:26657 +tm-bench localhost:26657 +``` -with the last command being in a seperate window. +with the last command being in a separate window. ## Usage - tm-bench [-c 1] [-T 10] [-r 1000] [-s 250] [endpoints] - - Examples: - tm-bench localhost:26657 - Flags: - -T int - Exit after the specified amount of time in seconds (default 10) - -c int - Connections to keep open per endpoint (default 1) - -r int - Txs per second to send in a connection (default 1000) - -s int - Size per tx in bytes - -v Verbose output +``` +Tendermint blockchain benchmarking tool. + +Usage: + tm-bench [-c 1] [-T 10] [-r 1000] [-s 250] [endpoints] [-output-format [-broadcast-tx-method ]] + +Examples: + tm-bench localhost:26657 +Flags: + -T int + Exit after the specified amount of time in seconds (default 10) + -broadcast-tx-method string + Broadcast method: async (no guarantees; fastest), sync (ensures tx is checked) or commit (ensures tx is checked and committed; slowest) (default "async") + -c int + Connections to keep open per endpoint (default 1) + -output-format string + Output format: plain or json (default "plain") + -r int + Txs per second to send in a connection (default 1000) + -s int + The size of a transaction in bytes, must be greater than or equal to 40. (default 250) + -v Verbose output +``` ## How stats are collected @@ -72,9 +95,11 @@ that tm-bench sends. Similarly the end of the duration will likely end mid-way through tendermint trying to build the next block. -Each of the connections is handled via two separate goroutines. +Each of the connections is handled via two separate goroutines. ## Development - make get_vendor_deps - make test +``` +make get_vendor_deps +make test +``` diff --git a/tools/tm-bench/main.go b/tools/tm-bench/main.go index 64fb28ca66b..b16e6815039 100644 --- a/tools/tm-bench/main.go +++ b/tools/tm-bench/main.go @@ -4,8 +4,10 @@ import ( "flag" "fmt" "os" + "os/signal" "strings" "sync" + "syscall" "time" "github.com/go-kit/kit/log/term" @@ -26,7 +28,7 @@ func main() { flagSet.IntVar(&connections, "c", 1, "Connections to keep open per endpoint") flagSet.IntVar(&durationInt, "T", 10, "Exit after the specified amount of time in seconds") flagSet.IntVar(&txsRate, "r", 1000, "Txs per second to send in a connection") - flagSet.IntVar(&txSize, "s", 250, "The size of a transaction in bytes.") + flagSet.IntVar(&txSize, "s", 250, "The size of a transaction in bytes, must be greater than or equal to 40.") flagSet.StringVar(&outputFormat, "output-format", "plain", "Output format: plain or json") flagSet.StringVar(&broadcastTxMethod, "broadcast-tx-method", "async", "Broadcast method: async (no guarantees; fastest), sync (ensures tx is checked) or commit (ensures tx is checked and committed; slowest)") flagSet.BoolVar(&verbose, "v", false, "Verbose output") @@ -52,8 +54,7 @@ Examples: if verbose { if outputFormat == "json" { - fmt.Fprintln(os.Stderr, "Verbose mode not supported with json output.") - os.Exit(1) + printErrorAndExit("Verbose mode not supported with json output.") } // Color errors red colorFn := func(keyvals ...interface{}) term.FgBgColor { @@ -69,14 +70,14 @@ Examples: fmt.Printf("Running %ds test @ %s\n", durationInt, flagSet.Arg(0)) } + if txSize < 40 { + printErrorAndExit("The size of a transaction must be greater than or equal to 40.") + } + if broadcastTxMethod != "async" && broadcastTxMethod != "sync" && broadcastTxMethod != "commit" { - fmt.Fprintln( - os.Stderr, - "broadcast-tx-method should be either 'sync', 'async' or 'commit'.", - ) - os.Exit(1) + printErrorAndExit("broadcast-tx-method should be either 'sync', 'async' or 'commit'.") } var ( @@ -94,7 +95,20 @@ Examples: "broadcast_tx_"+broadcastTxMethod, ) - // Wait until transacters have begun until we get the start time + // Quit when interrupted or received SIGTERM. + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + for sig := range c { + fmt.Printf("captured %v, exiting...\n", sig) + for _, t := range transacters { + t.Stop() + } + os.Exit(1) + } + }() + + // Wait until transacters have begun until we get the start time. timeStart := time.Now() logger.Info("Time last transacter started", "t", timeStart) @@ -121,8 +135,7 @@ Examples: durationInt, ) if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) + printErrorAndExit(err.Error()) } printStatistics(stats, outputFormat) @@ -174,3 +187,8 @@ func startTransacters( return transacters } + +func printErrorAndExit(err string) { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) +} diff --git a/tools/tm-bench/transacter_test.go b/tools/tm-bench/transacter_test.go index 086a43c31a0..03f3046051b 100644 --- a/tools/tm-bench/transacter_test.go +++ b/tools/tm-bench/transacter_test.go @@ -22,7 +22,7 @@ func TestGenerateTxUpdateTxConsistentency(t *testing.T) { hostname string numTxsToTest int }{ - {0, 0, 50, "localhost:26657", 1000}, + {0, 0, 40, "localhost:26657", 1000}, {70, 300, 10000, "localhost:26657", 1000}, {0, 50, 100000, "localhost:26657", 1000}, } diff --git a/tools/tm-monitor/Dockerfile b/tools/tm-monitor/Dockerfile index 9c7406e1acc..be7a8349893 100644 --- a/tools/tm-monitor/Dockerfile +++ b/tools/tm-monitor/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.6 +FROM alpine:3.8 WORKDIR /app COPY ./dist/linux-amd64/tm-monitor /app/tm-monitor diff --git a/tools/tm-monitor/Makefile b/tools/tm-monitor/Makefile index c1e2825e629..520bb47aefe 100644 --- a/tools/tm-monitor/Makefile +++ b/tools/tm-monitor/Makefile @@ -1,5 +1,5 @@ DIST_DIRS := find * -type d -exec -VERSION := $(shell perl -ne '/^var version.*"([^"]+)".*$$/ && print "v$$1\n"' main.go) +VERSION := $(shell perl -ne '/^TMCoreSemVer = "([^"]+)"$$/ && print "v$$1\n"' ../../version/version.go) all: build test install @@ -35,7 +35,7 @@ dist: build-all build-docker: rm -f ./tm-monitor - docker run -it --rm -v "$(PWD):/go/src/github.com/tendermint/tools/tm-monitor" -w "/go/src/github.com/tendermint/tools/tm-monitor" -e "CGO_ENABLED=0" golang:alpine go build -ldflags "-s -w" -o tm-monitor + docker run -it --rm -v "$(PWD)/../../:/go/src/github.com/tendermint/tendermint" -w "/go/src/github.com/tendermint/tendermint/tools/tm-monitor" -e "CGO_ENABLED=0" golang:alpine go build -ldflags "-s -w" -o tm-monitor docker build -t "tendermint/monitor" . clean: diff --git a/tools/tm-monitor/README.md b/tools/tm-monitor/README.md index 4c49775e3f8..cf421684962 100644 --- a/tools/tm-monitor/README.md +++ b/tools/tm-monitor/README.md @@ -12,18 +12,22 @@ collecting and providing various statistics to the user: Assuming your application is running in another container with the name `app`: - docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init - docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm --link=app tendermint/tendermint node --proxy_app=tcp://app:26658 +``` +docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init +docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm --link=app tendermint/tendermint node --proxy_app=tcp://app:26658 - docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 +docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 +``` If you don't have an application yet, but still want to try monitor out, use `kvstore`: - docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init - docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm tendermint/tendermint node --proxy_app=kvstore +``` +docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init +docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm tendermint/tendermint node --proxy_app=kvstore - docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 +docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 +``` ### Using Binaries @@ -31,40 +35,49 @@ use `kvstore`: then run: - tendermint init - tendermint node --proxy_app=kvstore +``` +tendermint init +tendermint node --proxy_app=kvstore - tm-monitor localhost:26657 +tm-monitor localhost:26657 +``` -with the last command being in a seperate window. +with the last command being in a separate window. ## Usage - tm-monitor [-v] [-no-ton] [-listen-addr="tcp://0.0.0.0:26670"] [endpoints] +``` +Tendermint monitor watches over one or more Tendermint core +applications, collecting and providing various statistics to the user. - Examples: - # monitor single instance - tm-monitor localhost:26657 +Usage: + tm-monitor [-no-ton] [-listen-addr="tcp://0.0.0.0:26670"] [endpoints] - # monitor a few instances by providing comma-separated list of RPC endpoints - tm-monitor host1:26657,host2:26657 - Flags: - -listen-addr string - HTTP and Websocket server listen address (default "tcp://0.0.0.0:26670") - -no-ton - Do not show ton (table of nodes) - -v verbose logging +Examples: + # monitor single instance + tm-monitor localhost:26657 + + # monitor a few instances by providing comma-separated list of RPC endpoints + tm-monitor host1:26657,host2:26657 +Flags: + -listen-addr string + HTTP and Websocket server listen address (default "tcp://0.0.0.0:26670") + -no-ton + Do not show ton (table of nodes) +``` ### RPC UI Run `tm-monitor` and visit http://localhost:26670 You should see the list of the available RPC endpoints: - http://localhost:26670/status - http://localhost:26670/status/network - http://localhost:26670/monitor?endpoint=_ - http://localhost:26670/status/node?name=_ - http://localhost:26670/unmonitor?endpoint=_ +``` +http://localhost:26670/status +http://localhost:26670/status/network +http://localhost:26670/monitor?endpoint=_ +http://localhost:26670/status/node?name=_ +http://localhost:26670/unmonitor?endpoint=_ +``` The API is available as GET requests with URI encoded parameters, or as JSONRPC POST requests. The JSONRPC methods are also exposed over @@ -72,6 +85,8 @@ websocket. ## Development - make get_tools - make get_vendor_deps - make test +``` +make get_tools +make get_vendor_deps +make test +``` diff --git a/tools/tm-monitor/monitor/network.go b/tools/tm-monitor/monitor/network.go index 9b147c06bba..bb5dd0baa3c 100644 --- a/tools/tm-monitor/monitor/network.go +++ b/tools/tm-monitor/monitor/network.go @@ -140,14 +140,22 @@ func (n *Network) NodeIsOnline(name string) { // NewNode is called when the new node is added to the monitor. func (n *Network) NewNode(name string) { + n.mu.Lock() + defer n.mu.Unlock() + n.NumNodesMonitored++ n.NumNodesMonitoredOnline++ + n.updateHealth() } // NodeDeleted is called when the node is deleted from under the monitor. func (n *Network) NodeDeleted(name string) { + n.mu.Lock() + defer n.mu.Unlock() + n.NumNodesMonitored-- n.NumNodesMonitoredOnline-- + n.updateHealth() } func (n *Network) updateHealth() { diff --git a/tools/tm-monitor/monitor/node.go b/tools/tm-monitor/monitor/node.go index b99870207ce..8bc15a15290 100644 --- a/tools/tm-monitor/monitor/node.go +++ b/tools/tm-monitor/monitor/node.go @@ -217,8 +217,7 @@ func (n *Node) checkIsValidator() { if err == nil { for _, v := range validators { key, err1 := n.getPubKey() - // TODO: use bytes.Equal - if err1 == nil && v.PubKey == key { + if err1 == nil && v.PubKey.Equals(key) { n.IsValidator = true } } diff --git a/types/block.go b/types/block.go index 304e8bdee4e..b2cddb5e352 100644 --- a/types/block.go +++ b/types/block.go @@ -2,19 +2,36 @@ package types import ( "bytes" - "errors" "fmt" "strings" "sync" "time" + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/version" +) + +const ( + // MaxHeaderBytes is a maximum header size (including amino overhead). + MaxHeaderBytes int64 = 653 + + // MaxAminoOverheadForBlock - maximum amino overhead to encode a block (up to + // MaxBlockSizeBytes in size) not including it's parts except Data. + // This means it also excludes the overhead for individual transactions. + // To compute individual transactions' overhead use types.ComputeAminoOverhead(tx types.Tx, fieldNum int). + // + // Uvarint length of MaxBlockSizeBytes: 4 bytes + // 2 fields (2 embedded): 2 bytes + // Uvarint length of Data.Txs: 4 bytes + // Data.Txs field: 1 byte + MaxAminoOverheadForBlock int64 = 11 ) // Block defines the atomic unit of a Tendermint blockchain. -// TODO: add Version byte type Block struct { mtx sync.Mutex Header `json:"header"` @@ -23,20 +40,20 @@ type Block struct { LastCommit *Commit `json:"last_commit"` } -// MakeBlock returns a new block with an empty header, except what can be computed from itself. -// It populates the same set of fields validated by ValidateBasic -func MakeBlock(height int64, txs []Tx, commit *Commit, evidence []Evidence) *Block { +// MakeBlock returns a new block with an empty header, except what can be +// computed from itself. +// It populates the same set of fields validated by ValidateBasic. +func MakeBlock(height int64, txs []Tx, lastCommit *Commit, evidence []Evidence) *Block { block := &Block{ Header: Header{ Height: height, - Time: time.Now(), NumTxs: int64(len(txs)), }, Data: Data{ Txs: txs, }, Evidence: EvidenceData{Evidence: evidence}, - LastCommit: commit, + LastCommit: lastCommit, } block.fillHeader() return block @@ -44,31 +61,117 @@ func MakeBlock(height int64, txs []Tx, commit *Commit, evidence []Evidence) *Blo // ValidateBasic performs basic validation that doesn't involve state data. // It checks the internal consistency of the block. +// Further validation is done using state#ValidateBlock. func (b *Block) ValidateBasic() error { if b == nil { - return errors.New("Nil blocks are invalid") + return errors.New("nil block") } b.mtx.Lock() defer b.mtx.Unlock() + if len(b.ChainID) > MaxChainIDLen { + return fmt.Errorf("ChainID is too long. Max is %d, got %d", MaxChainIDLen, len(b.ChainID)) + } + + if b.Height < 0 { + return errors.New("Negative Header.Height") + } else if b.Height == 0 { + return errors.New("Zero Header.Height") + } + + // NOTE: Timestamp validation is subtle and handled elsewhere. + newTxs := int64(len(b.Data.Txs)) if b.NumTxs != newTxs { - return fmt.Errorf("Wrong Block.Header.NumTxs. Expected %v, got %v", newTxs, b.NumTxs) + return fmt.Errorf("Wrong Header.NumTxs. Expected %v, got %v", + newTxs, + b.NumTxs, + ) } - if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { - return fmt.Errorf("Wrong Block.Header.LastCommitHash. Expected %v, got %v", b.LastCommitHash, b.LastCommit.Hash()) + + // TODO: fix tests so we can do this + /*if b.TotalTxs < b.NumTxs { + return fmt.Errorf("Header.TotalTxs (%d) is less than Header.NumTxs (%d)", b.TotalTxs, b.NumTxs) + }*/ + if b.TotalTxs < 0 { + return errors.New("Negative Header.TotalTxs") } - if b.Header.Height != 1 { + + if err := b.LastBlockID.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong Header.LastBlockID: %v", err) + } + + // Validate the last commit and its hash. + if b.Header.Height > 1 { + if b.LastCommit == nil { + return errors.New("nil LastCommit") + } if err := b.LastCommit.ValidateBasic(); err != nil { - return err + return fmt.Errorf("Wrong LastCommit") } } + if err := ValidateHash(b.LastCommitHash); err != nil { + return fmt.Errorf("Wrong Header.LastCommitHash: %v", err) + } + if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { + return fmt.Errorf("Wrong Header.LastCommitHash. Expected %v, got %v", + b.LastCommit.Hash(), + b.LastCommitHash, + ) + } + + // Validate the hash of the transactions. + // NOTE: b.Data.Txs may be nil, but b.Data.Hash() + // still works fine + if err := ValidateHash(b.DataHash); err != nil { + return fmt.Errorf("Wrong Header.DataHash: %v", err) + } if !bytes.Equal(b.DataHash, b.Data.Hash()) { - return fmt.Errorf("Wrong Block.Header.DataHash. Expected %v, got %v", b.DataHash, b.Data.Hash()) + return fmt.Errorf( + "Wrong Header.DataHash. Expected %v, got %v", + b.Data.Hash(), + b.DataHash, + ) + } + + // Basic validation of hashes related to application data. + // Will validate fully against state in state#ValidateBlock. + if err := ValidateHash(b.ValidatorsHash); err != nil { + return fmt.Errorf("Wrong Header.ValidatorsHash: %v", err) + } + if err := ValidateHash(b.NextValidatorsHash); err != nil { + return fmt.Errorf("Wrong Header.NextValidatorsHash: %v", err) + } + if err := ValidateHash(b.ConsensusHash); err != nil { + return fmt.Errorf("Wrong Header.ConsensusHash: %v", err) + } + // NOTE: AppHash is arbitrary length + if err := ValidateHash(b.LastResultsHash); err != nil { + return fmt.Errorf("Wrong Header.LastResultsHash: %v", err) + } + + // Validate evidence and its hash. + if err := ValidateHash(b.EvidenceHash); err != nil { + return fmt.Errorf("Wrong Header.EvidenceHash: %v", err) + } + // NOTE: b.Evidence.Evidence may be nil, but we're just looping. + for i, ev := range b.Evidence.Evidence { + if err := ev.ValidateBasic(); err != nil { + return fmt.Errorf("Invalid evidence (#%d): %v", i, err) + } } if !bytes.Equal(b.EvidenceHash, b.Evidence.Hash()) { - return errors.New(cmn.Fmt("Wrong Block.Header.EvidenceHash. Expected %v, got %v", b.EvidenceHash, b.Evidence.Hash())) + return fmt.Errorf("Wrong Header.EvidenceHash. Expected %v, got %v", + b.EvidenceHash, + b.Evidence.Hash(), + ) + } + + if len(b.ProposerAddress) != crypto.AddressSize { + return fmt.Errorf("Expected len(Header.ProposerAddress) to be %d, got %d", + crypto.AddressSize, len(b.ProposerAddress)) } + return nil } @@ -113,7 +216,7 @@ func (b *Block) MakePartSet(partSize int) *PartSet { // We prefix the byte length, so that unmarshaling // can easily happen via a reader. - bz, err := cdc.MarshalBinary(b) + bz, err := cdc.MarshalBinaryLengthPrefixed(b) if err != nil { panic(err) } @@ -174,35 +277,111 @@ func (b *Block) StringShort() string { //----------------------------------------------------------------------------- -// Header defines the structure of a Tendermint block header -// TODO: limit header size -// NOTE: changes to the Header should be duplicated in the abci Header +// MaxDataBytes returns the maximum size of block's data. +// +// XXX: Panics on negative result. +func MaxDataBytes(maxBytes int64, valsCount, evidenceCount int) int64 { + maxDataBytes := maxBytes - + MaxAminoOverheadForBlock - + MaxHeaderBytes - + int64(valsCount)*MaxVoteBytes - + int64(evidenceCount)*MaxEvidenceBytes + + if maxDataBytes < 0 { + panic(fmt.Sprintf( + "Negative MaxDataBytes. BlockSize.MaxBytes=%d is too small to accommodate header&lastCommit&evidence=%d", + maxBytes, + -(maxDataBytes - maxBytes), + )) + } + + return maxDataBytes + +} + +// MaxDataBytesUnknownEvidence returns the maximum size of block's data when +// evidence count is unknown. MaxEvidenceBytesPerBlock will be used as the size +// of evidence. +// +// XXX: Panics on negative result. +func MaxDataBytesUnknownEvidence(maxBytes int64, valsCount int) int64 { + maxDataBytes := maxBytes - + MaxAminoOverheadForBlock - + MaxHeaderBytes - + int64(valsCount)*MaxVoteBytes - + MaxEvidenceBytesPerBlock(maxBytes) + + if maxDataBytes < 0 { + panic(fmt.Sprintf( + "Negative MaxDataBytesUnknownEvidence. BlockSize.MaxBytes=%d is too small to accommodate header&lastCommit&evidence=%d", + maxBytes, + -(maxDataBytes - maxBytes), + )) + } + + return maxDataBytes +} + +//----------------------------------------------------------------------------- + +// Header defines the structure of a Tendermint block header. +// NOTE: changes to the Header should be duplicated in: +// - header.Hash() +// - abci.Header +// - /docs/spec/blockchain/blockchain.md type Header struct { // basic block info - ChainID string `json:"chain_id"` - Height int64 `json:"height"` - Time time.Time `json:"time"` - NumTxs int64 `json:"num_txs"` + Version version.Consensus `json:"version"` + ChainID string `json:"chain_id"` + Height int64 `json:"height"` + Time time.Time `json:"time"` + NumTxs int64 `json:"num_txs"` + TotalTxs int64 `json:"total_txs"` // prev block info LastBlockID BlockID `json:"last_block_id"` - TotalTxs int64 `json:"total_txs"` // hashes of block data LastCommitHash cmn.HexBytes `json:"last_commit_hash"` // commit from validators from the last block DataHash cmn.HexBytes `json:"data_hash"` // transactions // hashes from the app output from the prev block - ValidatorsHash cmn.HexBytes `json:"validators_hash"` // validators for the current block - ConsensusHash cmn.HexBytes `json:"consensus_hash"` // consensus params for current block - AppHash cmn.HexBytes `json:"app_hash"` // state after txs from the previous block - LastResultsHash cmn.HexBytes `json:"last_results_hash"` // root hash of all results from the txs from the previous block + ValidatorsHash cmn.HexBytes `json:"validators_hash"` // validators for the current block + NextValidatorsHash cmn.HexBytes `json:"next_validators_hash"` // validators for the next block + ConsensusHash cmn.HexBytes `json:"consensus_hash"` // consensus params for current block + AppHash cmn.HexBytes `json:"app_hash"` // state after txs from the previous block + LastResultsHash cmn.HexBytes `json:"last_results_hash"` // root hash of all results from the txs from the previous block // consensus info - EvidenceHash cmn.HexBytes `json:"evidence_hash"` // evidence included in the block + EvidenceHash cmn.HexBytes `json:"evidence_hash"` // evidence included in the block + ProposerAddress Address `json:"proposer_address"` // original proposer of the block +} + +// Populate the Header with state-derived data. +// Call this after MakeBlock to complete the Header. +func (h *Header) Populate( + version version.Consensus, chainID string, + timestamp time.Time, lastBlockID BlockID, totalTxs int64, + valHash, nextValHash []byte, + consensusHash, appHash, lastResultsHash []byte, + proposerAddress Address, +) { + h.Version = version + h.ChainID = chainID + h.Time = timestamp + h.LastBlockID = lastBlockID + h.TotalTxs = totalTxs + h.ValidatorsHash = valHash + h.NextValidatorsHash = nextValHash + h.ConsensusHash = consensusHash + h.AppHash = appHash + h.LastResultsHash = lastResultsHash + h.ProposerAddress = proposerAddress } // Hash returns the hash of the header. +// It computes a Merkle tree from the header fields +// ordered as they appear in the Header. // Returns nil if ValidatorHash is missing, // since a Header is not valid unless there is // a ValidatorsHash (corresponding to the validator set). @@ -210,20 +389,23 @@ func (h *Header) Hash() cmn.HexBytes { if h == nil || len(h.ValidatorsHash) == 0 { return nil } - return merkle.SimpleHashFromMap(map[string]merkle.Hasher{ - "ChainID": aminoHasher(h.ChainID), - "Height": aminoHasher(h.Height), - "Time": aminoHasher(h.Time), - "NumTxs": aminoHasher(h.NumTxs), - "TotalTxs": aminoHasher(h.TotalTxs), - "LastBlockID": aminoHasher(h.LastBlockID), - "LastCommit": aminoHasher(h.LastCommitHash), - "Data": aminoHasher(h.DataHash), - "Validators": aminoHasher(h.ValidatorsHash), - "App": aminoHasher(h.AppHash), - "Consensus": aminoHasher(h.ConsensusHash), - "Results": aminoHasher(h.LastResultsHash), - "Evidence": aminoHasher(h.EvidenceHash), + return merkle.SimpleHashFromByteSlices([][]byte{ + cdcEncode(h.Version), + cdcEncode(h.ChainID), + cdcEncode(h.Height), + cdcEncode(h.Time), + cdcEncode(h.NumTxs), + cdcEncode(h.TotalTxs), + cdcEncode(h.LastBlockID), + cdcEncode(h.LastCommitHash), + cdcEncode(h.DataHash), + cdcEncode(h.ValidatorsHash), + cdcEncode(h.NextValidatorsHash), + cdcEncode(h.ConsensusHash), + cdcEncode(h.AppHash), + cdcEncode(h.LastResultsHash), + cdcEncode(h.EvidenceHash), + cdcEncode(h.ProposerAddress), }) } @@ -233,6 +415,7 @@ func (h *Header) StringIndented(indent string) string { return "nil-Header" } return fmt.Sprintf(`Header{ +%s Version: %v %s ChainID: %v %s Height: %v %s Time: %v @@ -242,11 +425,14 @@ func (h *Header) StringIndented(indent string) string { %s LastCommit: %v %s Data: %v %s Validators: %v +%s NextValidators: %v %s App: %v %s Consensus: %v %s Results: %v %s Evidence: %v +%s Proposer: %v %s}#%v`, + indent, h.Version, indent, h.ChainID, indent, h.Height, indent, h.Time, @@ -256,10 +442,12 @@ func (h *Header) StringIndented(indent string) string { indent, h.LastCommitHash, indent, h.DataHash, indent, h.ValidatorsHash, + indent, h.NextValidatorsHash, indent, h.AppHash, indent, h.ConsensusHash, indent, h.LastResultsHash, indent, h.EvidenceHash, + indent, h.ProposerAddress, indent, h.Hash()) } @@ -296,7 +484,7 @@ func (commit *Commit) FirstPrecommit() *Vote { } } return &Vote{ - Type: VoteTypePrecommit, + Type: PrecommitType, } } @@ -318,7 +506,7 @@ func (commit *Commit) Round() int { // Type returns the vote type of the commit, which is always VoteTypePrecommit func (commit *Commit) Type() byte { - return VoteTypePrecommit + return byte(PrecommitType) } // Size returns the number of votes in the commit @@ -353,6 +541,7 @@ func (commit *Commit) IsCommit() bool { } // ValidateBasic performs basic validation that doesn't involve state data. +// Does not actually check the cryptographic signatures. func (commit *Commit) ValidateBasic() error { if commit.BlockID.IsZero() { return errors.New("Commit cannot be for nil block") @@ -362,23 +551,23 @@ func (commit *Commit) ValidateBasic() error { } height, round := commit.Height(), commit.Round() - // validate the precommits + // Validate the precommits. for _, precommit := range commit.Precommits { // It's OK for precommits to be missing. if precommit == nil { continue } - // Ensure that all votes are precommits - if precommit.Type != VoteTypePrecommit { + // Ensure that all votes are precommits. + if precommit.Type != PrecommitType { return fmt.Errorf("Invalid commit vote. Expected precommit, got %v", precommit.Type) } - // Ensure that all heights are the same + // Ensure that all heights are the same. if precommit.Height != height { return fmt.Errorf("Invalid commit precommit height. Expected %v, got %v", height, precommit.Height) } - // Ensure that all rounds are the same + // Ensure that all rounds are the same. if precommit.Round != round { return fmt.Errorf("Invalid commit precommit round. Expected %v, got %v", round, precommit.Round) @@ -393,11 +582,11 @@ func (commit *Commit) Hash() cmn.HexBytes { return nil } if commit.hash == nil { - bs := make([]merkle.Hasher, len(commit.Precommits)) + bs := make([][]byte, len(commit.Precommits)) for i, precommit := range commit.Precommits { - bs[i] = aminoHasher(precommit) + bs[i] = cdcEncode(precommit) } - commit.hash = merkle.SimpleHashFromHashers(bs) + commit.hash = merkle.SimpleHashFromByteSlices(bs) } return commit.hash } @@ -413,19 +602,77 @@ func (commit *Commit) StringIndented(indent string) string { } return fmt.Sprintf(`Commit{ %s BlockID: %v -%s Precommits: %v +%s Precommits: +%s %v %s}#%v`, indent, commit.BlockID, - indent, strings.Join(precommitStrings, "\n"+indent+" "), + indent, + indent, strings.Join(precommitStrings, "\n"+indent+" "), indent, commit.hash) } //----------------------------------------------------------------------------- -// SignedHeader is a header along with the commits that prove it +// SignedHeader is a header along with the commits that prove it. type SignedHeader struct { - Header *Header `json:"header"` - Commit *Commit `json:"commit"` + *Header `json:"header"` + Commit *Commit `json:"commit"` +} + +// ValidateBasic does basic consistency checks and makes sure the header +// and commit are consistent. +// +// NOTE: This does not actually check the cryptographic signatures. Make +// sure to use a Verifier to validate the signatures actually provide a +// significantly strong proof for this header's validity. +func (sh SignedHeader) ValidateBasic(chainID string) error { + + // Make sure the header is consistent with the commit. + if sh.Header == nil { + return errors.New("SignedHeader missing header.") + } + if sh.Commit == nil { + return errors.New("SignedHeader missing commit (precommit votes).") + } + + // Check ChainID. + if sh.ChainID != chainID { + return fmt.Errorf("Header belongs to another chain '%s' not '%s'", + sh.ChainID, chainID) + } + // Check Height. + if sh.Commit.Height() != sh.Height { + return fmt.Errorf("SignedHeader header and commit height mismatch: %v vs %v", + sh.Height, sh.Commit.Height()) + } + // Check Hash. + hhash := sh.Hash() + chash := sh.Commit.BlockID.Hash + if !bytes.Equal(hhash, chash) { + return fmt.Errorf("SignedHeader commit signs block %X, header is block %X", + chash, hhash) + } + // ValidateBasic on the Commit. + err := sh.Commit.ValidateBasic() + if err != nil { + return cmn.ErrorWrap(err, "commit.ValidateBasic failed during SignedHeader.ValidateBasic") + } + return nil +} + +func (sh SignedHeader) String() string { + return sh.StringIndented("") +} + +// StringIndented returns a string representation of the SignedHeader. +func (sh SignedHeader) StringIndented(indent string) string { + return fmt.Sprintf(`SignedHeader{ +%s %v +%s %v +%s}`, + indent, sh.Header.StringIndented(indent+" "), + indent, sh.Commit.StringIndented(indent+" "), + indent) } //----------------------------------------------------------------------------- @@ -509,7 +756,6 @@ func (data *EvidenceData) StringIndented(indent string) string { %s}#%v`, indent, strings.Join(evStrings, "\n"+indent+" "), indent, data.hash) - return "" } //-------------------------------------------------------------------------------- @@ -540,38 +786,19 @@ func (blockID BlockID) Key() string { return string(blockID.Hash) + string(bz) } -// String returns a human readable string representation of the BlockID -func (blockID BlockID) String() string { - return fmt.Sprintf(`%v:%v`, blockID.Hash, blockID.PartsHeader) -} - -//------------------------------------------------------- - -type hasher struct { - item interface{} -} - -func (h hasher) Hash() []byte { - hasher := tmhash.New() - if h.item != nil && !cmn.IsTypedNil(h.item) && !cmn.IsEmpty(h.item) { - bz, err := cdc.MarshalBinaryBare(h.item) - if err != nil { - panic(err) - } - _, err = hasher.Write(bz) - if err != nil { - panic(err) - } +// ValidateBasic performs basic validation. +func (blockID BlockID) ValidateBasic() error { + // Hash can be empty in case of POLBlockID in Proposal. + if err := ValidateHash(blockID.Hash); err != nil { + return fmt.Errorf("Wrong Hash") } - return hasher.Sum(nil) - -} - -func aminoHash(item interface{}) []byte { - h := hasher{item} - return h.Hash() + if err := blockID.PartsHeader.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong PartsHeader: %v", err) + } + return nil } -func aminoHasher(item interface{}) merkle.Hasher { - return hasher{item} +// String returns a human readable string representation of the BlockID +func (blockID BlockID) String() string { + return fmt.Sprintf(`%v:%v`, blockID.Hash, blockID.PartsHeader) } diff --git a/types/block_test.go b/types/block_test.go index 50695c84b84..bedd8c8da1e 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -1,21 +1,34 @@ package types import ( + "crypto/rand" + "math" + "os" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/version" ) +func TestMain(m *testing.M) { + RegisterMockEvidences(cdc) + + code := m.Run() + os.Exit(code) +} + func TestBlockAddEvidence(t *testing.T) { txs := []Tx{Tx("foo"), Tx("bar")} lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) require.NoError(t, err) @@ -35,57 +48,47 @@ func TestBlockValidateBasic(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) require.NoError(t, err) ev := NewMockGoodEvidence(h, 0, valSet.Validators[0].Address) evList := []Evidence{ev} - block := MakeBlock(h, txs, commit, evList) - require.NotNil(t, block) - - // proper block must pass - err = block.ValidateBasic() - require.NoError(t, err) - - // tamper with NumTxs - block = MakeBlock(h, txs, commit, evList) - block.NumTxs++ - err = block.ValidateBasic() - require.Error(t, err) - - // remove 1/2 the commits - block = MakeBlock(h, txs, commit, evList) - block.LastCommit.Precommits = commit.Precommits[:commit.Size()/2] - block.LastCommit.hash = nil // clear hash or change wont be noticed - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with LastCommitHash - block = MakeBlock(h, txs, commit, evList) - block.LastCommitHash = []byte("something else") - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with data - block = MakeBlock(h, txs, commit, evList) - block.Data.Txs[0] = Tx("something else") - block.Data.hash = nil // clear hash or change wont be noticed - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with DataHash - block = MakeBlock(h, txs, commit, evList) - block.DataHash = cmn.RandBytes(len(block.DataHash)) - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with evidence - block = MakeBlock(h, txs, commit, evList) - block.EvidenceHash = []byte("something else") - err = block.ValidateBasic() - require.Error(t, err) + testCases := []struct { + testName string + malleateBlock func(*Block) + expErr bool + }{ + {"Make Block", func(blk *Block) {}, false}, + {"Make Block w/ proposer Addr", func(blk *Block) { blk.ProposerAddress = valSet.GetProposer().Address }, false}, + {"Negative Height", func(blk *Block) { blk.Height = -1 }, true}, + {"Increase NumTxs", func(blk *Block) { blk.NumTxs++ }, true}, + {"Remove 1/2 the commits", func(blk *Block) { + blk.LastCommit.Precommits = commit.Precommits[:commit.Size()/2] + blk.LastCommit.hash = nil // clear hash or change wont be noticed + }, true}, + {"Remove LastCommitHash", func(blk *Block) { blk.LastCommitHash = []byte("something else") }, true}, + {"Tampered Data", func(blk *Block) { + blk.Data.Txs[0] = Tx("something else") + blk.Data.hash = nil // clear hash or change wont be noticed + }, true}, + {"Tampered DataHash", func(blk *Block) { + blk.DataHash = cmn.RandBytes(len(blk.DataHash)) + }, true}, + {"Tampered EvidenceHash", func(blk *Block) { + blk.EvidenceHash = []byte("something else") + }, true}, + } + for i, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + block := MakeBlock(h, txs, commit, evList) + block.ProposerAddress = valSet.GetProposer().Address + tc.malleateBlock(block) + err = block.ValidateBasic() + assert.Equal(t, tc.expErr, err != nil, "#%d: %v", i, err) + }) + } } func TestBlockHash(t *testing.T) { @@ -104,18 +107,17 @@ func TestBlockMakePartSet(t *testing.T) { func TestBlockMakePartSetWithEvidence(t *testing.T) { assert.Nil(t, (*Block)(nil).MakePartSet(2)) - txs := []Tx{Tx("foo"), Tx("bar")} lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) require.NoError(t, err) ev := NewMockGoodEvidence(h, 0, valSet.Validators[0].Address) evList := []Evidence{ev} - partSet := MakeBlock(h, txs, commit, evList).MakePartSet(1024) + partSet := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList).MakePartSet(1024) assert.NotNil(t, partSet) assert.Equal(t, 3, partSet.Total()) } @@ -125,7 +127,7 @@ func TestBlockHashesTo(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) require.NoError(t, err) @@ -158,16 +160,20 @@ func TestBlockString(t *testing.T) { } func makeBlockIDRandom() BlockID { - blockHash, blockPartsHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} + blockHash := make([]byte, tmhash.Size) + partSetHash := make([]byte, tmhash.Size) + rand.Read(blockHash) + rand.Read(partSetHash) + blockPartsHeader := PartSetHeader{123, partSetHash} return BlockID{blockHash, blockPartsHeader} } -func makeBlockID(hash string, partSetSize int, partSetHash string) BlockID { +func makeBlockID(hash []byte, partSetSize int, partSetHash []byte) BlockID { return BlockID{ - Hash: []byte(hash), + Hash: hash, PartsHeader: PartSetHeader{ Total: partSetSize, - Hash: []byte(partSetHash), + Hash: partSetHash, }, } @@ -188,14 +194,14 @@ func TestNilDataHashDoesntCrash(t *testing.T) { func TestCommit(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + voteSet, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) require.NoError(t, err) assert.NotNil(t, commit.FirstPrecommit()) assert.Equal(t, h-1, commit.Height()) assert.Equal(t, 1, commit.Round()) - assert.Equal(t, VoteTypePrecommit, commit.Type()) + assert.Equal(t, PrecommitType, SignedMsgType(commit.Type())) if commit.Size() <= 0 { t.Fatalf("commit %v has a zero or negative size: %d", commit, commit.Size()) } @@ -208,37 +214,130 @@ func TestCommit(t *testing.T) { } func TestCommitValidateBasic(t *testing.T) { - commit := randCommit() - assert.NoError(t, commit.ValidateBasic()) + testCases := []struct { + testName string + malleateCommit func(*Commit) + expectErr bool + }{ + {"Random Commit", func(com *Commit) {}, false}, + {"Nil precommit", func(com *Commit) { com.Precommits[0] = nil }, false}, + {"Incorrect signature", func(com *Commit) { com.Precommits[0].Signature = []byte{0} }, false}, + {"Incorrect type", func(com *Commit) { com.Precommits[0].Type = PrevoteType }, true}, + {"Incorrect height", func(com *Commit) { com.Precommits[0].Height = int64(100) }, true}, + {"Incorrect round", func(com *Commit) { com.Precommits[0].Round = 100 }, true}, + } + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + com := randCommit() + tc.malleateCommit(com) + assert.Equal(t, tc.expectErr, com.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} - // nil precommit is OK - commit = randCommit() - commit.Precommits[0] = nil - assert.NoError(t, commit.ValidateBasic()) +func TestMaxHeaderBytes(t *testing.T) { + // Construct a UTF-8 string of MaxChainIDLen length using the supplementary + // characters. + // Each supplementary character takes 4 bytes. + // http://www.i18nguy.com/unicode/supplementary-test.html + maxChainID := "" + for i := 0; i < MaxChainIDLen; i++ { + maxChainID += "𠜎" + } - // tamper with types - commit = randCommit() - commit.Precommits[0].Type = VoteTypePrevote - assert.Error(t, commit.ValidateBasic()) + // time is varint encoded so need to pick the max. + // year int, month Month, day, hour, min, sec, nsec int, loc *Location + timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) + + h := Header{ + Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, + ChainID: maxChainID, + Height: math.MaxInt64, + Time: timestamp, + NumTxs: math.MaxInt64, + TotalTxs: math.MaxInt64, + LastBlockID: makeBlockID(make([]byte, tmhash.Size), math.MaxInt64, make([]byte, tmhash.Size)), + LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), + DataHash: tmhash.Sum([]byte("data_hash")), + ValidatorsHash: tmhash.Sum([]byte("validators_hash")), + NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), + ConsensusHash: tmhash.Sum([]byte("consensus_hash")), + AppHash: tmhash.Sum([]byte("app_hash")), + LastResultsHash: tmhash.Sum([]byte("last_results_hash")), + EvidenceHash: tmhash.Sum([]byte("evidence_hash")), + ProposerAddress: crypto.AddressHash([]byte("proposer_address")), + } - // tamper with height - commit = randCommit() - commit.Precommits[0].Height = int64(100) - assert.Error(t, commit.ValidateBasic()) + bz, err := cdc.MarshalBinaryLengthPrefixed(h) + require.NoError(t, err) - // tamper with round - commit = randCommit() - commit.Precommits[0].Round = 100 - assert.Error(t, commit.ValidateBasic()) + assert.EqualValues(t, MaxHeaderBytes, len(bz)) } func randCommit() *Commit { lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) + voteSet, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) if err != nil { panic(err) } return commit } + +func TestBlockMaxDataBytes(t *testing.T) { + testCases := []struct { + maxBytes int64 + valsCount int + evidenceCount int + panics bool + result int64 + }{ + 0: {-10, 1, 0, true, 0}, + 1: {10, 1, 0, true, 0}, + 2: {886, 1, 0, true, 0}, + 3: {887, 1, 0, false, 0}, + 4: {888, 1, 0, false, 1}, + } + + for i, tc := range testCases { + if tc.panics { + assert.Panics(t, func() { + MaxDataBytes(tc.maxBytes, tc.valsCount, tc.evidenceCount) + }, "#%v", i) + } else { + assert.Equal(t, + tc.result, + MaxDataBytes(tc.maxBytes, tc.valsCount, tc.evidenceCount), + "#%v", i) + } + } +} + +func TestBlockMaxDataBytesUnknownEvidence(t *testing.T) { + testCases := []struct { + maxBytes int64 + valsCount int + panics bool + result int64 + }{ + 0: {-10, 1, true, 0}, + 1: {10, 1, true, 0}, + 2: {984, 1, true, 0}, + 3: {985, 1, false, 0}, + 4: {986, 1, false, 1}, + } + + for i, tc := range testCases { + if tc.panics { + assert.Panics(t, func() { + MaxDataBytesUnknownEvidence(tc.maxBytes, tc.valsCount) + }, "#%v", i) + } else { + assert.Equal(t, + tc.result, + MaxDataBytesUnknownEvidence(tc.maxBytes, tc.valsCount), + "#%v", i) + } + } +} diff --git a/types/canonical.go b/types/canonical.go new file mode 100644 index 00000000000..a4f6f214d02 --- /dev/null +++ b/types/canonical.go @@ -0,0 +1,112 @@ +package types + +import ( + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" +) + +// Canonical* wraps the structs in types for amino encoding them for use in SignBytes / the Signable interface. + +// TimeFormat is used for generating the sigs +const TimeFormat = time.RFC3339Nano + +type CanonicalBlockID struct { + Hash cmn.HexBytes + PartsHeader CanonicalPartSetHeader +} + +type CanonicalPartSetHeader struct { + Hash cmn.HexBytes + Total int +} + +type CanonicalProposal struct { + Type SignedMsgType // type alias for byte + Height int64 `binary:"fixed64"` + Round int64 `binary:"fixed64"` + POLRound int64 `binary:"fixed64"` + BlockID CanonicalBlockID + Timestamp time.Time + ChainID string +} + +type CanonicalVote struct { + Type SignedMsgType // type alias for byte + Height int64 `binary:"fixed64"` + Round int64 `binary:"fixed64"` + Timestamp time.Time + BlockID CanonicalBlockID + ChainID string +} + +type CanonicalHeartbeat struct { + Type byte + Height int64 `binary:"fixed64"` + Round int `binary:"fixed64"` + Sequence int `binary:"fixed64"` + ValidatorAddress Address + ValidatorIndex int + ChainID string +} + +//----------------------------------- +// Canonicalize the structs + +func CanonicalizeBlockID(blockID BlockID) CanonicalBlockID { + return CanonicalBlockID{ + Hash: blockID.Hash, + PartsHeader: CanonicalizePartSetHeader(blockID.PartsHeader), + } +} + +func CanonicalizePartSetHeader(psh PartSetHeader) CanonicalPartSetHeader { + return CanonicalPartSetHeader{ + psh.Hash, + psh.Total, + } +} + +func CanonicalizeProposal(chainID string, proposal *Proposal) CanonicalProposal { + return CanonicalProposal{ + Type: ProposalType, + Height: proposal.Height, + Round: int64(proposal.Round), // cast int->int64 to make amino encode it fixed64 (does not work for int) + POLRound: int64(proposal.POLRound), + BlockID: CanonicalizeBlockID(proposal.BlockID), + Timestamp: proposal.Timestamp, + ChainID: chainID, + } +} + +func CanonicalizeVote(chainID string, vote *Vote) CanonicalVote { + return CanonicalVote{ + Type: vote.Type, + Height: vote.Height, + Round: int64(vote.Round), // cast int->int64 to make amino encode it fixed64 (does not work for int) + Timestamp: vote.Timestamp, + BlockID: CanonicalizeBlockID(vote.BlockID), + ChainID: chainID, + } +} + +func CanonicalizeHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalHeartbeat { + return CanonicalHeartbeat{ + Type: byte(HeartbeatType), + Height: heartbeat.Height, + Round: heartbeat.Round, + Sequence: heartbeat.Sequence, + ValidatorAddress: heartbeat.ValidatorAddress, + ValidatorIndex: heartbeat.ValidatorIndex, + ChainID: chainID, + } +} + +// CanonicalTime can be used to stringify time in a canonical way. +func CanonicalTime(t time.Time) string { + // Note that sending time over amino resets it to + // local time, we need to force UTC here, so the + // signatures match + return tmtime.Canonical(t).Format(TimeFormat) +} diff --git a/types/canonical_json.go b/types/canonical_json.go deleted file mode 100644 index aca9e9b799c..00000000000 --- a/types/canonical_json.go +++ /dev/null @@ -1,114 +0,0 @@ -package types - -import ( - "time" - - cmn "github.com/tendermint/tendermint/libs/common" -) - -// Canonical json is amino's json for structs with fields in alphabetical order - -// TimeFormat is used for generating the sigs -const TimeFormat = time.RFC3339Nano - -type CanonicalJSONBlockID struct { - Hash cmn.HexBytes `json:"hash,omitempty"` - PartsHeader CanonicalJSONPartSetHeader `json:"parts,omitempty"` -} - -type CanonicalJSONPartSetHeader struct { - Hash cmn.HexBytes `json:"hash,omitempty"` - Total int `json:"total,omitempty"` -} - -type CanonicalJSONProposal struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - BlockPartsHeader CanonicalJSONPartSetHeader `json:"block_parts_header"` - Height int64 `json:"height"` - POLBlockID CanonicalJSONBlockID `json:"pol_block_id"` - POLRound int `json:"pol_round"` - Round int `json:"round"` - Timestamp string `json:"timestamp"` -} - -type CanonicalJSONVote struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - BlockID CanonicalJSONBlockID `json:"block_id"` - Height int64 `json:"height"` - Round int `json:"round"` - Timestamp string `json:"timestamp"` - VoteType byte `json:"type"` -} - -type CanonicalJSONHeartbeat struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - Height int64 `json:"height"` - Round int `json:"round"` - Sequence int `json:"sequence"` - ValidatorAddress Address `json:"validator_address"` - ValidatorIndex int `json:"validator_index"` -} - -//----------------------------------- -// Canonicalize the structs - -func CanonicalBlockID(blockID BlockID) CanonicalJSONBlockID { - return CanonicalJSONBlockID{ - Hash: blockID.Hash, - PartsHeader: CanonicalPartSetHeader(blockID.PartsHeader), - } -} - -func CanonicalPartSetHeader(psh PartSetHeader) CanonicalJSONPartSetHeader { - return CanonicalJSONPartSetHeader{ - psh.Hash, - psh.Total, - } -} - -func CanonicalProposal(chainID string, proposal *Proposal) CanonicalJSONProposal { - return CanonicalJSONProposal{ - ChainID: chainID, - Type: "proposal", - BlockPartsHeader: CanonicalPartSetHeader(proposal.BlockPartsHeader), - Height: proposal.Height, - Timestamp: CanonicalTime(proposal.Timestamp), - POLBlockID: CanonicalBlockID(proposal.POLBlockID), - POLRound: proposal.POLRound, - Round: proposal.Round, - } -} - -func CanonicalVote(chainID string, vote *Vote) CanonicalJSONVote { - return CanonicalJSONVote{ - ChainID: chainID, - Type: "vote", - BlockID: CanonicalBlockID(vote.BlockID), - Height: vote.Height, - Round: vote.Round, - Timestamp: CanonicalTime(vote.Timestamp), - VoteType: vote.Type, - } -} - -func CanonicalHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalJSONHeartbeat { - return CanonicalJSONHeartbeat{ - ChainID: chainID, - Type: "heartbeat", - Height: heartbeat.Height, - Round: heartbeat.Round, - Sequence: heartbeat.Sequence, - ValidatorAddress: heartbeat.ValidatorAddress, - ValidatorIndex: heartbeat.ValidatorIndex, - } -} - -func CanonicalTime(t time.Time) string { - // Note that sending time over amino resets it to - // local time, we need to force UTC here, so the - // signatures match - return t.UTC().Format(TimeFormat) -} diff --git a/types/encoding_helper.go b/types/encoding_helper.go new file mode 100644 index 00000000000..f825de8a6a0 --- /dev/null +++ b/types/encoding_helper.go @@ -0,0 +1,14 @@ +package types + +import ( + cmn "github.com/tendermint/tendermint/libs/common" +) + +// cdcEncode returns nil if the input is nil, otherwise returns +// cdc.MustMarshalBinaryBare(item) +func cdcEncode(item interface{}) []byte { + if item != nil && !cmn.IsTypedNil(item) && !cmn.IsEmpty(item) { + return cdc.MustMarshalBinaryBare(item) + } + return nil +} diff --git a/types/event_bus.go b/types/event_bus.go index b4965feee67..65206e938b4 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -45,7 +45,7 @@ func (b *EventBus) SetLogger(l log.Logger) { } func (b *EventBus) OnStart() error { - return b.pubsub.OnStart() + return b.pubsub.Start() } func (b *EventBus) OnStop() { @@ -71,34 +71,36 @@ func (b *EventBus) Publish(eventType string, eventData TMEventData) error { return nil } -//--- block, tx, and vote events +func (b *EventBus) PublishEventNewBlock(data EventDataNewBlock) error { + return b.Publish(EventNewBlock, data) +} -func (b *EventBus) PublishEventNewBlock(event EventDataNewBlock) error { - return b.Publish(EventNewBlock, event) +func (b *EventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { + return b.Publish(EventNewBlockHeader, data) } -func (b *EventBus) PublishEventNewBlockHeader(event EventDataNewBlockHeader) error { - return b.Publish(EventNewBlockHeader, event) +func (b *EventBus) PublishEventVote(data EventDataVote) error { + return b.Publish(EventVote, data) } -func (b *EventBus) PublishEventVote(event EventDataVote) error { - return b.Publish(EventVote, event) +func (b *EventBus) PublishEventValidBlock(data EventDataRoundState) error { + return b.Publish(EventValidBlock, data) } // PublishEventTx publishes tx event with tags from Result. Note it will add // predefined tags (EventTypeKey, TxHashKey). Existing tags with the same names // will be overwritten. -func (b *EventBus) PublishEventTx(event EventDataTx) error { +func (b *EventBus) PublishEventTx(data EventDataTx) error { // no explicit deadline for publishing events ctx := context.Background() tags := make(map[string]string) // validate and fill tags from tx result - for _, tag := range event.Result.Tags { + for _, tag := range data.Result.Tags { // basic validation if len(tag.Key) == 0 { - b.Logger.Info("Got tag with an empty key (skipping)", "tag", tag, "tx", event.Tx) + b.Logger.Info("Got tag with an empty key (skipping)", "tag", tag, "tx", data.Tx) continue } tags[string(tag.Key)] = string(tag.Value) @@ -109,55 +111,57 @@ func (b *EventBus) PublishEventTx(event EventDataTx) error { tags[EventTypeKey] = EventTx logIfTagExists(TxHashKey, tags, b.Logger) - tags[TxHashKey] = fmt.Sprintf("%X", event.Tx.Hash()) + tags[TxHashKey] = fmt.Sprintf("%X", data.Tx.Hash()) logIfTagExists(TxHeightKey, tags, b.Logger) - tags[TxHeightKey] = fmt.Sprintf("%d", event.Height) + tags[TxHeightKey] = fmt.Sprintf("%d", data.Height) - b.pubsub.PublishWithTags(ctx, event, tmpubsub.NewTagMap(tags)) + b.pubsub.PublishWithTags(ctx, data, tmpubsub.NewTagMap(tags)) return nil } -func (b *EventBus) PublishEventProposalHeartbeat(event EventDataProposalHeartbeat) error { - return b.Publish(EventProposalHeartbeat, event) +func (b *EventBus) PublishEventProposalHeartbeat(data EventDataProposalHeartbeat) error { + return b.Publish(EventProposalHeartbeat, data) } -//--- EventDataRoundState events +func (b *EventBus) PublishEventNewRoundStep(data EventDataRoundState) error { + return b.Publish(EventNewRoundStep, data) +} -func (b *EventBus) PublishEventNewRoundStep(event EventDataRoundState) error { - return b.Publish(EventNewRoundStep, event) +func (b *EventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { + return b.Publish(EventTimeoutPropose, data) } -func (b *EventBus) PublishEventTimeoutPropose(event EventDataRoundState) error { - return b.Publish(EventTimeoutPropose, event) +func (b *EventBus) PublishEventTimeoutWait(data EventDataRoundState) error { + return b.Publish(EventTimeoutWait, data) } -func (b *EventBus) PublishEventTimeoutWait(event EventDataRoundState) error { - return b.Publish(EventTimeoutWait, event) +func (b *EventBus) PublishEventNewRound(data EventDataRoundState) error { + return b.Publish(EventNewRound, data) } -func (b *EventBus) PublishEventNewRound(event EventDataRoundState) error { - return b.Publish(EventNewRound, event) +func (b *EventBus) PublishEventCompleteProposal(data EventDataRoundState) error { + return b.Publish(EventCompleteProposal, data) } -func (b *EventBus) PublishEventCompleteProposal(event EventDataRoundState) error { - return b.Publish(EventCompleteProposal, event) +func (b *EventBus) PublishEventPolka(data EventDataRoundState) error { + return b.Publish(EventPolka, data) } -func (b *EventBus) PublishEventPolka(event EventDataRoundState) error { - return b.Publish(EventPolka, event) +func (b *EventBus) PublishEventUnlock(data EventDataRoundState) error { + return b.Publish(EventUnlock, data) } -func (b *EventBus) PublishEventUnlock(event EventDataRoundState) error { - return b.Publish(EventUnlock, event) +func (b *EventBus) PublishEventRelock(data EventDataRoundState) error { + return b.Publish(EventRelock, data) } -func (b *EventBus) PublishEventRelock(event EventDataRoundState) error { - return b.Publish(EventRelock, event) +func (b *EventBus) PublishEventLock(data EventDataRoundState) error { + return b.Publish(EventLock, data) } -func (b *EventBus) PublishEventLock(event EventDataRoundState) error { - return b.Publish(EventLock, event) +func (b *EventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdates) error { + return b.Publish(EventValidatorSetUpdates, data) } func logIfTagExists(tag string, tags map[string]string, logger log.Logger) { @@ -165,3 +169,74 @@ func logIfTagExists(tag string, tags map[string]string, logger log.Logger) { logger.Error("Found predefined tag (value will be overwritten)", "tag", tag, "value", value) } } + +//----------------------------------------------------------------------------- +type NopEventBus struct{} + +func (NopEventBus) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { + return nil +} + +func (NopEventBus) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { + return nil +} + +func (NopEventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { + return nil +} + +func (NopEventBus) PublishEventNewBlock(data EventDataNewBlock) error { + return nil +} + +func (NopEventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { + return nil +} + +func (NopEventBus) PublishEventVote(data EventDataVote) error { + return nil +} + +func (NopEventBus) PublishEventTx(data EventDataTx) error { + return nil +} + +func (NopEventBus) PublishEventNewRoundStep(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventTimeoutWait(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventNewRound(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventCompleteProposal(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventPolka(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventUnlock(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventRelock(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventLock(data EventDataRoundState) error { + return nil +} + +func (NopEventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdates) error { + return nil +} diff --git a/types/event_bus_test.go b/types/event_bus_test.go index 907c69d3170..f0e825d5dcc 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -22,7 +22,7 @@ func TestEventBusPublishEventTx(t *testing.T) { defer eventBus.Stop() tx := Tx("foo") - result := abci.ResponseDeliverTx{Data: []byte("bar"), Tags: []cmn.KVPair{{[]byte("baz"), []byte("1")}}} + result := abci.ResponseDeliverTx{Data: []byte("bar"), Tags: []cmn.KVPair{{Key: []byte("baz"), Value: []byte("1")}}} txEventsCh := make(chan interface{}) @@ -68,7 +68,7 @@ func TestEventBusPublish(t *testing.T) { err = eventBus.Subscribe(context.Background(), "test", tmquery.Empty{}, eventsCh) require.NoError(t, err) - const numEventsExpected = 14 + const numEventsExpected = 15 done := make(chan struct{}) go func() { numEvents := 0 @@ -108,6 +108,8 @@ func TestEventBusPublish(t *testing.T) { require.NoError(t, err) err = eventBus.PublishEventLock(EventDataRoundState{}) require.NoError(t, err) + err = eventBus.PublishEventValidatorSetUpdates(EventDataValidatorSetUpdates{}) + require.NoError(t, err) select { case <-done: diff --git a/types/events.go b/types/events.go index c26fecb71dd..33aa712efd7 100644 --- a/types/events.go +++ b/types/events.go @@ -8,42 +8,35 @@ import ( tmquery "github.com/tendermint/tendermint/libs/pubsub/query" ) -// Reserved event types +// Reserved event types (alphabetically sorted). const ( - EventCompleteProposal = "CompleteProposal" - EventLock = "Lock" - EventNewBlock = "NewBlock" - EventNewBlockHeader = "NewBlockHeader" - EventNewRound = "NewRound" - EventNewRoundStep = "NewRoundStep" - EventPolka = "Polka" - EventRelock = "Relock" - EventTimeoutPropose = "TimeoutPropose" - EventTimeoutWait = "TimeoutWait" - EventTx = "Tx" - EventUnlock = "Unlock" - EventVote = "Vote" - EventProposalHeartbeat = "ProposalHeartbeat" + EventCompleteProposal = "CompleteProposal" + EventLock = "Lock" + EventNewBlock = "NewBlock" + EventNewBlockHeader = "NewBlockHeader" + EventNewRound = "NewRound" + EventNewRoundStep = "NewRoundStep" + EventPolka = "Polka" + EventProposalHeartbeat = "ProposalHeartbeat" + EventRelock = "Relock" + EventTimeoutPropose = "TimeoutPropose" + EventTimeoutWait = "TimeoutWait" + EventTx = "Tx" + EventUnlock = "Unlock" + EventValidBlock = "ValidBlock" + EventValidatorSetUpdates = "ValidatorSetUpdates" + EventVote = "Vote" ) /////////////////////////////////////////////////////////////////////////////// // ENCODING / DECODING /////////////////////////////////////////////////////////////////////////////// -// implements events.EventData +// TMEventData implements events.EventData. type TMEventData interface { - AssertIsTMEventData() // empty interface } -func (_ EventDataNewBlock) AssertIsTMEventData() {} -func (_ EventDataNewBlockHeader) AssertIsTMEventData() {} -func (_ EventDataTx) AssertIsTMEventData() {} -func (_ EventDataRoundState) AssertIsTMEventData() {} -func (_ EventDataVote) AssertIsTMEventData() {} -func (_ EventDataProposalHeartbeat) AssertIsTMEventData() {} -func (_ EventDataString) AssertIsTMEventData() {} - func RegisterEventDatas(cdc *amino.Codec) { cdc.RegisterInterface((*TMEventData)(nil), nil) cdc.RegisterConcrete(EventDataNewBlock{}, "tendermint/event/NewBlock", nil) @@ -52,6 +45,7 @@ func RegisterEventDatas(cdc *amino.Codec) { cdc.RegisterConcrete(EventDataRoundState{}, "tendermint/event/RoundState", nil) cdc.RegisterConcrete(EventDataVote{}, "tendermint/event/Vote", nil) cdc.RegisterConcrete(EventDataProposalHeartbeat{}, "tendermint/event/ProposalHeartbeat", nil) + cdc.RegisterConcrete(EventDataValidatorSetUpdates{}, "tendermint/event/ValidatorSetUpdates", nil) cdc.RegisterConcrete(EventDataString(""), "tendermint/event/ProposalString", nil) } @@ -92,6 +86,10 @@ type EventDataVote struct { type EventDataString string +type EventDataValidatorSetUpdates struct { + ValidatorUpdates []*Validator `json:"validator_updates"` +} + /////////////////////////////////////////////////////////////////////////////// // PUBSUB /////////////////////////////////////////////////////////////////////////////// @@ -108,20 +106,22 @@ const ( ) var ( - EventQueryNewBlock = QueryForEvent(EventNewBlock) - EventQueryNewBlockHeader = QueryForEvent(EventNewBlockHeader) - EventQueryNewRound = QueryForEvent(EventNewRound) - EventQueryNewRoundStep = QueryForEvent(EventNewRoundStep) - EventQueryTimeoutPropose = QueryForEvent(EventTimeoutPropose) - EventQueryCompleteProposal = QueryForEvent(EventCompleteProposal) - EventQueryPolka = QueryForEvent(EventPolka) - EventQueryUnlock = QueryForEvent(EventUnlock) - EventQueryLock = QueryForEvent(EventLock) - EventQueryRelock = QueryForEvent(EventRelock) - EventQueryTimeoutWait = QueryForEvent(EventTimeoutWait) - EventQueryVote = QueryForEvent(EventVote) - EventQueryProposalHeartbeat = QueryForEvent(EventProposalHeartbeat) - EventQueryTx = QueryForEvent(EventTx) + EventQueryCompleteProposal = QueryForEvent(EventCompleteProposal) + EventQueryLock = QueryForEvent(EventLock) + EventQueryNewBlock = QueryForEvent(EventNewBlock) + EventQueryNewBlockHeader = QueryForEvent(EventNewBlockHeader) + EventQueryNewRound = QueryForEvent(EventNewRound) + EventQueryNewRoundStep = QueryForEvent(EventNewRoundStep) + EventQueryPolka = QueryForEvent(EventPolka) + EventQueryProposalHeartbeat = QueryForEvent(EventProposalHeartbeat) + EventQueryRelock = QueryForEvent(EventRelock) + EventQueryTimeoutPropose = QueryForEvent(EventTimeoutPropose) + EventQueryTimeoutWait = QueryForEvent(EventTimeoutWait) + EventQueryTx = QueryForEvent(EventTx) + EventQueryUnlock = QueryForEvent(EventUnlock) + EventQueryValidatorSetUpdates = QueryForEvent(EventValidatorSetUpdates) + EventQueryValidBlock = QueryForEvent(EventValidBlock) + EventQueryVote = QueryForEvent(EventVote) ) func EventQueryTxFor(tx Tx) tmpubsub.Query { @@ -137,6 +137,7 @@ type BlockEventPublisher interface { PublishEventNewBlock(block EventDataNewBlock) error PublishEventNewBlockHeader(header EventDataNewBlockHeader) error PublishEventTx(EventDataTx) error + PublishEventValidatorSetUpdates(EventDataValidatorSetUpdates) error } type TxEventPublisher interface { diff --git a/types/evidence.go b/types/evidence.go index 92675868fd6..fb2423458bf 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -4,19 +4,28 @@ import ( "bytes" "fmt" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/tmhash" + amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/merkle" ) +const ( + // MaxEvidenceBytes is a maximum size of any evidence (including amino overhead). + MaxEvidenceBytes int64 = 484 +) + // ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid. type ErrEvidenceInvalid struct { Evidence Evidence ErrorValue error } -func NewEvidenceInvalidErr(ev Evidence, err error) *ErrEvidenceInvalid { +// NewErrEvidenceInvalid returns a new EvidenceInvalid with the given err. +func NewErrEvidenceInvalid(ev Evidence, err error) *ErrEvidenceInvalid { return &ErrEvidenceInvalid{ev, err} } @@ -25,37 +34,65 @@ func (err *ErrEvidenceInvalid) Error() string { return fmt.Sprintf("Invalid evidence: %v. Evidence: %v", err.ErrorValue, err.Evidence) } +// ErrEvidenceOverflow is for when there is too much evidence in a block. +type ErrEvidenceOverflow struct { + MaxBytes int64 + GotBytes int64 +} + +// NewErrEvidenceOverflow returns a new ErrEvidenceOverflow where got > max. +func NewErrEvidenceOverflow(max, got int64) *ErrEvidenceOverflow { + return &ErrEvidenceOverflow{max, got} +} + +// Error returns a string representation of the error. +func (err *ErrEvidenceOverflow) Error() string { + return fmt.Sprintf("Too much evidence: Max %d bytes, got %d bytes", err.MaxBytes, err.GotBytes) +} + //------------------------------------------- // Evidence represents any provable malicious activity by a validator type Evidence interface { Height() int64 // height of the equivocation Address() []byte // address of the equivocating validator + Bytes() []byte // bytes which compromise the evidence Hash() []byte // hash of the evidence Verify(chainID string, pubKey crypto.PubKey) error // verify the evidence Equal(Evidence) bool // check equality of evidence + ValidateBasic() error String() string } func RegisterEvidences(cdc *amino.Codec) { cdc.RegisterInterface((*Evidence)(nil), nil) cdc.RegisterConcrete(&DuplicateVoteEvidence{}, "tendermint/DuplicateVoteEvidence", nil) +} - // mocks +func RegisterMockEvidences(cdc *amino.Codec) { cdc.RegisterConcrete(MockGoodEvidence{}, "tendermint/MockGoodEvidence", nil) cdc.RegisterConcrete(MockBadEvidence{}, "tendermint/MockBadEvidence", nil) } +// MaxEvidenceBytesPerBlock returns the maximum evidence size per block - +// 1/10th of the maximum block size. +func MaxEvidenceBytesPerBlock(blockMaxBytes int64) int64 { + return blockMaxBytes / 10 +} + //------------------------------------------- -// DuplicateVoteEvidence contains evidence a validator signed two conflicting votes. +// DuplicateVoteEvidence contains evidence a validator signed two conflicting +// votes. type DuplicateVoteEvidence struct { PubKey crypto.PubKey VoteA *Vote VoteB *Vote } +var _ Evidence = &DuplicateVoteEvidence{} + // String returns a string representation of the evidence. func (dve *DuplicateVoteEvidence) String() string { return fmt.Sprintf("VoteA: %v; VoteB: %v", dve.VoteA, dve.VoteB) @@ -72,9 +109,14 @@ func (dve *DuplicateVoteEvidence) Address() []byte { return dve.PubKey.Address() } +// Hash returns the hash of the evidence. +func (dve *DuplicateVoteEvidence) Bytes() []byte { + return cdcEncode(dve) +} + // Hash returns the hash of the evidence. func (dve *DuplicateVoteEvidence) Hash() []byte { - return aminoHasher(dve).Hash() + return tmhash.Sum(cdcEncode(dve)) } // Verify returns an error if the two votes aren't conflicting. @@ -127,11 +169,28 @@ func (dve *DuplicateVoteEvidence) Equal(ev Evidence) bool { } // just check their hashes - dveHash := aminoHasher(dve).Hash() - evHash := aminoHasher(ev).Hash() + dveHash := tmhash.Sum(cdcEncode(dve)) + evHash := tmhash.Sum(cdcEncode(ev)) return bytes.Equal(dveHash, evHash) } +// ValidateBasic performs basic validation. +func (dve *DuplicateVoteEvidence) ValidateBasic() error { + if len(dve.PubKey.Bytes()) == 0 { + return errors.New("Empty PubKey") + } + if dve.VoteA == nil || dve.VoteB == nil { + return fmt.Errorf("One or both of the votes are empty %v, %v", dve.VoteA, dve.VoteB) + } + if err := dve.VoteA.ValidateBasic(); err != nil { + return fmt.Errorf("Invalid VoteA: %v", err) + } + if err := dve.VoteB.ValidateBasic(); err != nil { + return fmt.Errorf("Invalid VoteB: %v", err) + } + return nil +} + //----------------------------------------------------------------- // UNSTABLE @@ -140,6 +199,8 @@ type MockGoodEvidence struct { Address_ []byte } +var _ Evidence = &MockGoodEvidence{} + // UNSTABLE func NewMockGoodEvidence(height int64, idx int, address []byte) MockGoodEvidence { return MockGoodEvidence{height, address} @@ -150,12 +211,16 @@ func (e MockGoodEvidence) Address() []byte { return e.Address_ } func (e MockGoodEvidence) Hash() []byte { return []byte(fmt.Sprintf("%d-%x", e.Height_, e.Address_)) } +func (e MockGoodEvidence) Bytes() []byte { + return []byte(fmt.Sprintf("%d-%x", e.Height_, e.Address_)) +} func (e MockGoodEvidence) Verify(chainID string, pubKey crypto.PubKey) error { return nil } func (e MockGoodEvidence) Equal(ev Evidence) bool { e2 := ev.(MockGoodEvidence) return e.Height_ == e2.Height_ && bytes.Equal(e.Address_, e2.Address_) } +func (e MockGoodEvidence) ValidateBasic() error { return nil } func (e MockGoodEvidence) String() string { return fmt.Sprintf("GoodEvidence: %d/%s", e.Height_, e.Address_) } @@ -173,6 +238,7 @@ func (e MockBadEvidence) Equal(ev Evidence) bool { return e.Height_ == e2.Height_ && bytes.Equal(e.Address_, e2.Address_) } +func (e MockBadEvidence) ValidateBasic() error { return nil } func (e MockBadEvidence) String() string { return fmt.Sprintf("BadEvidence: %d/%s", e.Height_, e.Address_) } @@ -184,18 +250,14 @@ type EvidenceList []Evidence // Hash returns the simple merkle root hash of the EvidenceList. func (evl EvidenceList) Hash() []byte { - // Recursive impl. - // Copied from crypto/merkle to avoid allocations - switch len(evl) { - case 0: - return nil - case 1: - return evl[0].Hash() - default: - left := EvidenceList(evl[:(len(evl)+1)/2]).Hash() - right := EvidenceList(evl[(len(evl)+1)/2:]).Hash() - return merkle.SimpleHashFromTwoHashes(left, right) + // These allocations are required because Evidence is not of type Bytes, and + // golang slices can't be typed cast. This shouldn't be a performance problem since + // the Evidence size is capped. + evidenceBzs := make([][]byte, len(evl)) + for i := 0; i < len(evl); i++ { + evidenceBzs[i] = evl[i].Bytes() } + return merkle.SimpleHashFromByteSlices(evidenceBzs) } func (evl EvidenceList) String() string { diff --git a/types/evidence_test.go b/types/evidence_test.go index 54eba01cde0..a96b63a9e0c 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -1,9 +1,13 @@ package types import ( + "math" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/secp256k1" + "github.com/tendermint/tendermint/crypto/tmhash" ) type voteData struct { @@ -18,7 +22,7 @@ func makeVote(val PrivValidator, chainID string, valIndex int, height int64, rou ValidatorIndex: valIndex, Height: height, Round: round, - Type: byte(step), + Type: SignedMsgType(step), BlockID: blockID, } err := val.SignVote(chainID, v) @@ -31,10 +35,11 @@ func makeVote(val PrivValidator, chainID string, valIndex int, height int64, rou func TestEvidence(t *testing.T) { val := NewMockPV() val2 := NewMockPV() - blockID := makeBlockID("blockhash", 1000, "partshash") - blockID2 := makeBlockID("blockhash2", 1000, "partshash") - blockID3 := makeBlockID("blockhash", 10000, "partshash") - blockID4 := makeBlockID("blockhash", 10000, "partshash2") + + blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) + blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) + blockID3 := makeBlockID([]byte("blockhash"), 10000, []byte("partshash")) + blockID4 := makeBlockID([]byte("blockhash"), 10000, []byte("partshash2")) const chainID = "mychain" @@ -89,13 +94,65 @@ func TestEvidenceList(t *testing.T) { assert.False(t, evl.Has(&DuplicateVoteEvidence{})) } +func TestMaxEvidenceBytes(t *testing.T) { + val := NewMockPV() + blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt64, tmhash.Sum([]byte("partshash"))) + blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt64, tmhash.Sum([]byte("partshash"))) + const chainID = "mychain" + ev := &DuplicateVoteEvidence{ + PubKey: secp256k1.GenPrivKey().PubKey(), // use secp because it's pubkey is longer + VoteA: makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID), + VoteB: makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID2), + } + + bz, err := cdc.MarshalBinaryLengthPrefixed(ev) + require.NoError(t, err) + + assert.EqualValues(t, MaxEvidenceBytes, len(bz)) +} + func randomDuplicatedVoteEvidence() *DuplicateVoteEvidence { val := NewMockPV() - blockID := makeBlockID("blockhash", 1000, "partshash") - blockID2 := makeBlockID("blockhash2", 1000, "partshash") + blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) + blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) const chainID = "mychain" return &DuplicateVoteEvidence{ VoteA: makeVote(val, chainID, 0, 10, 2, 1, blockID), VoteB: makeVote(val, chainID, 0, 10, 2, 1, blockID2), } } + +func TestDuplicateVoteEvidenceValidation(t *testing.T) { + val := NewMockPV() + blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt64, tmhash.Sum([]byte("partshash"))) + blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt64, tmhash.Sum([]byte("partshash"))) + const chainID = "mychain" + + testCases := []struct { + testName string + malleateEvidence func(*DuplicateVoteEvidence) + expectErr bool + }{ + {"Good DuplicateVoteEvidence", func(ev *DuplicateVoteEvidence) {}, false}, + {"Nil vote A", func(ev *DuplicateVoteEvidence) { ev.VoteA = nil }, true}, + {"Nil vote B", func(ev *DuplicateVoteEvidence) { ev.VoteB = nil }, true}, + {"Nil votes", func(ev *DuplicateVoteEvidence) { + ev.VoteA = nil + ev.VoteB = nil + }, true}, + {"Invalid vote type", func(ev *DuplicateVoteEvidence) { + ev.VoteA = makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0, blockID2) + }, true}, + } + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + ev := &DuplicateVoteEvidence{ + PubKey: secp256k1.GenPrivKey().PubKey(), + VoteA: makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0x02, blockID), + VoteB: makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0x02, blockID2), + } + tc.malleateEvidence(ev) + assert.Equal(t, tc.expectErr, ev.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} diff --git a/types/genesis.go b/types/genesis.go index 220ee0e0efc..8684eb33b01 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -1,12 +1,20 @@ package types import ( + "bytes" "encoding/json" + "fmt" "io/ioutil" "time" "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" +) + +const ( + // MaxChainIDLen is a maximum length of the chain ID. + MaxChainIDLen = 50 ) //------------------------------------------------------------ @@ -14,9 +22,10 @@ import ( // GenesisValidator is an initial validator. type GenesisValidator struct { - PubKey crypto.PubKey `json:"pub_key"` - Power int64 `json:"power"` - Name string `json:"name"` + Address Address `json:"address"` + PubKey crypto.PubKey `json:"pub_key"` + Power int64 `json:"power"` + Name string `json:"name"` } // GenesisDoc defines the initial conditions for a tendermint blockchain, in particular its validator set. @@ -24,7 +33,7 @@ type GenesisDoc struct { GenesisTime time.Time `json:"genesis_time"` ChainID string `json:"chain_id"` ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` - Validators []GenesisValidator `json:"validators"` + Validators []GenesisValidator `json:"validators,omitempty"` AppHash cmn.HexBytes `json:"app_hash"` AppState json.RawMessage `json:"app_state,omitempty"` } @@ -51,10 +60,12 @@ func (genDoc *GenesisDoc) ValidatorHash() []byte { // ValidateAndComplete checks that all necessary fields are present // and fills in defaults for optional fields left empty func (genDoc *GenesisDoc) ValidateAndComplete() error { - if genDoc.ChainID == "" { return cmn.NewError("Genesis doc must include non-empty chain_id") } + if len(genDoc.ChainID) > MaxChainIDLen { + return cmn.NewError("chain_id in genesis doc is too long (max: %d)", MaxChainIDLen) + } if genDoc.ConsensusParams == nil { genDoc.ConsensusParams = DefaultConsensusParams() @@ -64,18 +75,20 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error { } } - if len(genDoc.Validators) == 0 { - return cmn.NewError("The genesis file must have at least one validator") - } - - for _, v := range genDoc.Validators { + for i, v := range genDoc.Validators { if v.Power == 0 { return cmn.NewError("The genesis file cannot contain validators with no voting power: %v", v) } + if len(v.Address) > 0 && !bytes.Equal(v.PubKey.Address(), v.Address) { + return cmn.NewError("Incorrect address for validator %v in the genesis file, should be %v", v, v.PubKey.Address()) + } + if len(v.Address) == 0 { + genDoc.Validators[i].Address = v.PubKey.Address() + } } if genDoc.GenesisTime.IsZero() { - genDoc.GenesisTime = time.Now() + genDoc.GenesisTime = tmtime.Now() } return nil @@ -107,7 +120,7 @@ func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) { } genDoc, err := GenesisDocFromJSON(jsonBlob) if err != nil { - return nil, cmn.ErrorWrap(err, cmn.Fmt("Error reading GenesisDoc at %v", genDocFile)) + return nil, cmn.ErrorWrap(err, fmt.Sprintf("Error reading GenesisDoc at %v", genDocFile)) } return genDoc, nil } diff --git a/types/genesis_test.go b/types/genesis_test.go index 925bba79078..e7f041a851c 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -4,25 +4,29 @@ import ( "io/ioutil" "os" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" + tmtime "github.com/tendermint/tendermint/types/time" ) func TestGenesisBad(t *testing.T) { // test some bad ones from raw json testCases := [][]byte{ - []byte{}, // empty - []byte{1, 1, 1, 1, 1}, // junk - []byte(`{}`), // empty - []byte(`{"chain_id":"mychain"}`), // missing validators - []byte(`{"chain_id":"mychain","validators":[]}`), // missing validators - []byte(`{"chain_id":"mychain","validators":[{}]}`), // missing validators - []byte(`{"chain_id":"mychain","validators":null}`), // missing validators - []byte(`{"chain_id":"mychain"}`), // missing validators - []byte(`{"validators":[{"pub_key":{"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":"10","name":""}]}`), // missing chain_id + []byte{}, // empty + []byte{1, 1, 1, 1, 1}, // junk + []byte(`{}`), // empty + []byte(`{"chain_id":"mychain","validators":[{}]}`), // invalid validator + // missing pub_key type + []byte(`{"validators":[{"pub_key":{"value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":"10","name":""}]}`), + // missing chain_id + []byte(`{"validators":[{"pub_key":{"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":"10","name":""}]}`), + // too big chain_id + []byte(`{"chain_id": "Lorem ipsum dolor sit amet, consectetuer adipiscing", "validators": [{"pub_key":{"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":"10","name":""}]}`), + // wrong address + []byte(`{"chain_id":"mychain", "validators":[{"address": "A", "pub_key":{"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":"10","name":""}]}`), } for _, testCase := range testCases { @@ -37,10 +41,11 @@ func TestGenesisGood(t *testing.T) { _, err := GenesisDocFromJSON(genDocBytes) assert.NoError(t, err, "expected no error for good genDoc json") + pubkey := ed25519.GenPrivKey().PubKey() // create a base gendoc from struct baseGenDoc := &GenesisDoc{ ChainID: "abc", - Validators: []GenesisValidator{{ed25519.GenPrivKey().PubKey(), 10, "myval"}}, + Validators: []GenesisValidator{{pubkey.Address(), pubkey, 10, "myval"}}, } genDocBytes, err = cdc.MarshalJSON(baseGenDoc) assert.NoError(t, err, "error marshalling genDoc") @@ -50,6 +55,9 @@ func TestGenesisGood(t *testing.T) { assert.NoError(t, err, "expected no error for valid genDoc json") assert.NotNil(t, genDoc.ConsensusParams, "expected consensus params to be filled in") + // check validator's address is filled + assert.NotNil(t, genDoc.Validators[0].Address, "expected validator's address to be filled in") + // create json with consensus params filled genDocBytes, err = cdc.MarshalJSON(genDoc) assert.NoError(t, err, "error marshalling genDoc") @@ -62,6 +70,19 @@ func TestGenesisGood(t *testing.T) { assert.NoError(t, err, "error marshalling genDoc") genDoc, err = GenesisDocFromJSON(genDocBytes) assert.Error(t, err, "expected error for genDoc json with block size of 0") + + // Genesis doc from raw json + missingValidatorsTestCases := [][]byte{ + []byte(`{"chain_id":"mychain"}`), // missing validators + []byte(`{"chain_id":"mychain","validators":[]}`), // missing validators + []byte(`{"chain_id":"mychain","validators":null}`), // nil validator + []byte(`{"chain_id":"mychain"}`), // missing validators + } + + for _, tc := range missingValidatorsTestCases { + _, err := GenesisDocFromJSON(tc) + assert.NoError(t, err) + } } func TestGenesisSaveAs(t *testing.T) { @@ -97,10 +118,11 @@ func TestGenesisValidatorHash(t *testing.T) { } func randomGenesisDoc() *GenesisDoc { + pubkey := ed25519.GenPrivKey().PubKey() return &GenesisDoc{ - GenesisTime: time.Now().UTC(), + GenesisTime: tmtime.Now(), ChainID: "abc", - Validators: []GenesisValidator{{ed25519.GenPrivKey().PubKey(), 10, "myval"}}, + Validators: []GenesisValidator{{pubkey.Address(), pubkey, 10, "myval"}}, ConsensusParams: DefaultConsensusParams(), } } diff --git a/types/heartbeat.go b/types/heartbeat.go index 151f1b0b217..986e9384fd3 100644 --- a/types/heartbeat.go +++ b/types/heartbeat.go @@ -3,6 +3,8 @@ package types import ( "fmt" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -23,7 +25,7 @@ type Heartbeat struct { // SignBytes returns the Heartbeat bytes for signing. // It panics if the Heartbeat is nil. func (heartbeat *Heartbeat) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalJSON(CanonicalHeartbeat(chainID, heartbeat)) + bz, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeHeartbeat(chainID, heartbeat)) if err != nil { panic(err) } @@ -50,3 +52,32 @@ func (heartbeat *Heartbeat) String() string { heartbeat.Height, heartbeat.Round, heartbeat.Sequence, fmt.Sprintf("/%X.../", cmn.Fingerprint(heartbeat.Signature[:]))) } + +// ValidateBasic performs basic validation. +func (heartbeat *Heartbeat) ValidateBasic() error { + if len(heartbeat.ValidatorAddress) != crypto.AddressSize { + return fmt.Errorf("Expected ValidatorAddress size to be %d bytes, got %d bytes", + crypto.AddressSize, + len(heartbeat.ValidatorAddress), + ) + } + if heartbeat.ValidatorIndex < 0 { + return errors.New("Negative ValidatorIndex") + } + if heartbeat.Height < 0 { + return errors.New("Negative Height") + } + if heartbeat.Round < 0 { + return errors.New("Negative Round") + } + if heartbeat.Sequence < 0 { + return errors.New("Negative Sequence") + } + if len(heartbeat.Signature) == 0 { + return errors.New("Signature is missing") + } + if len(heartbeat.Signature) > MaxSignatureSize { + return fmt.Errorf("Signature is too big (max: %d)", MaxSignatureSize) + } + return nil +} diff --git a/types/heartbeat_test.go b/types/heartbeat_test.go index ce9e4923066..0951c7b9d87 100644 --- a/types/heartbeat_test.go +++ b/types/heartbeat_test.go @@ -3,8 +3,10 @@ package types import ( "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" ) func TestHeartbeatCopy(t *testing.T) { @@ -34,19 +36,69 @@ func TestHeartbeatString(t *testing.T) { } func TestHeartbeatWriteSignBytes(t *testing.T) { + chainID := "test_chain_id" - hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} - bz := hb.SignBytes("0xdeadbeef") - // XXX HMMMMMMM - require.Equal(t, string(bz), `{"@chain_id":"0xdeadbeef","@type":"heartbeat","height":"10","round":"1","sequence":"0","validator_address":"","validator_index":"1"}`) + { + testHeartbeat := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} + signBytes := testHeartbeat.SignBytes(chainID) + expected, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeHeartbeat(chainID, testHeartbeat)) + require.NoError(t, err) + require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Heartbeat") + } - plainHb := &Heartbeat{} - bz = plainHb.SignBytes("0xdeadbeef") - require.Equal(t, string(bz), `{"@chain_id":"0xdeadbeef","@type":"heartbeat","height":"0","round":"0","sequence":"0","validator_address":"","validator_index":"0"}`) + { + testHeartbeat := &Heartbeat{} + signBytes := testHeartbeat.SignBytes(chainID) + expected, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeHeartbeat(chainID, testHeartbeat)) + require.NoError(t, err) + require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Heartbeat") + } require.Panics(t, func() { var nilHb *Heartbeat - bz := nilHb.SignBytes("0xdeadbeef") - require.Equal(t, string(bz), "null") + signBytes := nilHb.SignBytes(chainID) + require.Equal(t, string(signBytes), "null") }) } + +func TestHeartbeatValidateBasic(t *testing.T) { + testCases := []struct { + testName string + malleateHeartBeat func(*Heartbeat) + expectErr bool + }{ + {"Good HeartBeat", func(hb *Heartbeat) {}, false}, + {"Invalid address size", func(hb *Heartbeat) { + hb.ValidatorAddress = nil + }, true}, + {"Negative validator index", func(hb *Heartbeat) { + hb.ValidatorIndex = -1 + }, true}, + {"Negative height", func(hb *Heartbeat) { + hb.Height = -1 + }, true}, + {"Negative round", func(hb *Heartbeat) { + hb.Round = -1 + }, true}, + {"Negative sequence", func(hb *Heartbeat) { + hb.Sequence = -1 + }, true}, + {"Missing signature", func(hb *Heartbeat) { + hb.Signature = nil + }, true}, + {"Signature too big", func(hb *Heartbeat) { + hb.Signature = make([]byte, MaxSignatureSize+1) + }, true}, + } + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + hb := &Heartbeat{ + ValidatorAddress: secp256k1.GenPrivKey().PubKey().Address(), + Signature: make([]byte, 4), + ValidatorIndex: 1, Height: 10, Round: 1} + + tc.malleateHeartBeat(hb) + assert.Equal(t, tc.expectErr, hb.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} diff --git a/types/nop_event_bus.go b/types/nop_event_bus.go deleted file mode 100644 index cd1eab8cd36..00000000000 --- a/types/nop_event_bus.go +++ /dev/null @@ -1,77 +0,0 @@ -package types - -import ( - "context" - - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" -) - -type NopEventBus struct{} - -func (NopEventBus) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { - return nil -} - -func (NopEventBus) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { - return nil -} - -func (NopEventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { - return nil -} - -//--- block, tx, and vote events - -func (NopEventBus) PublishEventNewBlock(block EventDataNewBlock) error { - return nil -} - -func (NopEventBus) PublishEventNewBlockHeader(header EventDataNewBlockHeader) error { - return nil -} - -func (NopEventBus) PublishEventVote(vote EventDataVote) error { - return nil -} - -func (NopEventBus) PublishEventTx(tx EventDataTx) error { - return nil -} - -//--- EventDataRoundState events - -func (NopEventBus) PublishEventNewRoundStep(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventTimeoutPropose(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventTimeoutWait(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventNewRound(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventCompleteProposal(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventPolka(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventUnlock(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventRelock(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventLock(rs EventDataRoundState) error { - return nil -} diff --git a/types/params.go b/types/params.go index 3056c82a038..81cf429ff1a 100644 --- a/types/params.go +++ b/types/params.go @@ -2,118 +2,141 @@ package types import ( abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) const ( // MaxBlockSizeBytes is the maximum permitted size of the blocks. MaxBlockSizeBytes = 104857600 // 100MB + + // BlockPartSizeBytes is the size of one block part. + BlockPartSizeBytes = 65536 // 64kB ) -// ConsensusParams contains consensus critical parameters -// that determine the validity of blocks. +// ConsensusParams contains consensus critical parameters that determine the +// validity of blocks. type ConsensusParams struct { - BlockSize `json:"block_size_params"` - TxSize `json:"tx_size_params"` - BlockGossip `json:"block_gossip_params"` - EvidenceParams `json:"evidence_params"` -} - -// BlockSize contain limits on the block size. -type BlockSize struct { - MaxBytes int `json:"max_bytes"` // NOTE: must not be 0 nor greater than 100MB - MaxTxs int `json:"max_txs"` - MaxGas int64 `json:"max_gas"` + BlockSize BlockSizeParams `json:"block_size"` + Evidence EvidenceParams `json:"evidence"` + Validator ValidatorParams `json:"validator"` } -// TxSize contain limits on the tx size. -type TxSize struct { - MaxBytes int `json:"max_bytes"` +// BlockSizeParams define limits on the block size. +type BlockSizeParams struct { + MaxBytes int64 `json:"max_bytes"` MaxGas int64 `json:"max_gas"` } -// BlockGossip determine consensus critical elements of how blocks are gossiped -type BlockGossip struct { - BlockPartSizeBytes int `json:"block_part_size_bytes"` // NOTE: must not be 0 -} - // EvidenceParams determine how we handle evidence of malfeasance type EvidenceParams struct { MaxAge int64 `json:"max_age"` // only accept new evidence more recent than this } +// ValidatorParams restrict the public key types validators can use. +// NOTE: uses ABCI pubkey naming, not Amino routes. +type ValidatorParams struct { + PubKeyTypes []string `json:"pub_key_types"` +} + // DefaultConsensusParams returns a default ConsensusParams. func DefaultConsensusParams() *ConsensusParams { return &ConsensusParams{ - DefaultBlockSize(), - DefaultTxSize(), - DefaultBlockGossip(), + DefaultBlockSizeParams(), DefaultEvidenceParams(), + DefaultValidatorParams(), } } -// DefaultBlockSize returns a default BlockSize. -func DefaultBlockSize() BlockSize { - return BlockSize{ +// DefaultBlockSizeParams returns a default BlockSizeParams. +func DefaultBlockSizeParams() BlockSizeParams { + return BlockSizeParams{ MaxBytes: 22020096, // 21MB - MaxTxs: 10000, - MaxGas: -1, - } -} - -// DefaultTxSize returns a default TxSize. -func DefaultTxSize() TxSize { - return TxSize{ - MaxBytes: 10240, // 10kB MaxGas: -1, } } -// DefaultBlockGossip returns a default BlockGossip. -func DefaultBlockGossip() BlockGossip { - return BlockGossip{ - BlockPartSizeBytes: 65536, // 64kB, - } -} - -// DefaultEvidence Params returns a default EvidenceParams. +// DefaultEvidenceParams Params returns a default EvidenceParams. func DefaultEvidenceParams() EvidenceParams { return EvidenceParams{ MaxAge: 100000, // 27.8 hrs at 1block/s } } -// Validate validates the ConsensusParams to ensure all values -// are within their allowed limits, and returns an error if they are not. +// DefaultValidatorParams returns a default ValidatorParams, which allows +// only ed25519 pubkeys. +func DefaultValidatorParams() ValidatorParams { + return ValidatorParams{[]string{ABCIPubKeyTypeEd25519}} +} + +// Validate validates the ConsensusParams to ensure all values are within their +// allowed limits, and returns an error if they are not. func (params *ConsensusParams) Validate() error { - // ensure some values are greater than 0 if params.BlockSize.MaxBytes <= 0 { - return cmn.NewError("BlockSize.MaxBytes must be greater than 0. Got %d", params.BlockSize.MaxBytes) + return cmn.NewError("BlockSize.MaxBytes must be greater than 0. Got %d", + params.BlockSize.MaxBytes) } - if params.BlockGossip.BlockPartSizeBytes <= 0 { - return cmn.NewError("BlockGossip.BlockPartSizeBytes must be greater than 0. Got %d", params.BlockGossip.BlockPartSizeBytes) - } - - // ensure blocks aren't too big if params.BlockSize.MaxBytes > MaxBlockSizeBytes { return cmn.NewError("BlockSize.MaxBytes is too big. %d > %d", params.BlockSize.MaxBytes, MaxBlockSizeBytes) } + + if params.BlockSize.MaxGas < -1 { + return cmn.NewError("BlockSize.MaxGas must be greater or equal to -1. Got %d", + params.BlockSize.MaxGas) + } + + if params.Evidence.MaxAge <= 0 { + return cmn.NewError("EvidenceParams.MaxAge must be greater than 0. Got %d", + params.Evidence.MaxAge) + } + + if len(params.Validator.PubKeyTypes) == 0 { + return cmn.NewError("len(Validator.PubKeyTypes) must be greater than 0") + } + + // Check if keyType is a known ABCIPubKeyType + for i := 0; i < len(params.Validator.PubKeyTypes); i++ { + keyType := params.Validator.PubKeyTypes[i] + if _, ok := ABCIPubKeyTypesToAminoRoutes[keyType]; !ok { + return cmn.NewError("params.Validator.PubKeyTypes[%d], %s, is an unknown pubkey type", + i, keyType) + } + } + return nil } -// Hash returns a merkle hash of the parameters to store -// in the block header +// Hash returns a hash of the parameters to store in the block header +// No Merkle tree here, only three values are hashed here +// thus benefit from saving space < drawbacks from proofs' overhead +// Revisit this function if new fields are added to ConsensusParams func (params *ConsensusParams) Hash() []byte { - return merkle.SimpleHashFromMap(map[string]merkle.Hasher{ - "block_gossip_part_size_bytes": aminoHasher(params.BlockGossip.BlockPartSizeBytes), - "block_size_max_bytes": aminoHasher(params.BlockSize.MaxBytes), - "block_size_max_gas": aminoHasher(params.BlockSize.MaxGas), - "block_size_max_txs": aminoHasher(params.BlockSize.MaxTxs), - "tx_size_max_bytes": aminoHasher(params.TxSize.MaxBytes), - "tx_size_max_gas": aminoHasher(params.TxSize.MaxGas), - }) + hasher := tmhash.New() + bz := cdcEncode(params) + if bz == nil { + panic("cannot fail to encode ConsensusParams") + } + hasher.Write(bz) + return hasher.Sum(nil) +} + +func (params *ConsensusParams) Equals(params2 *ConsensusParams) bool { + return params.BlockSize == params2.BlockSize && + params.Evidence == params2.Evidence && + stringSliceEqual(params.Validator.PubKeyTypes, params2.Validator.PubKeyTypes) +} + +func stringSliceEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + return true } // Update returns a copy of the params with updates from the non-zero fields of p2. @@ -126,31 +149,15 @@ func (params ConsensusParams) Update(params2 *abci.ConsensusParams) ConsensusPar } // we must defensively consider any structs may be nil - // XXX: it's cast city over here. It's ok because we only do int32->int - // but still, watch it champ. if params2.BlockSize != nil { - if params2.BlockSize.MaxBytes > 0 { - res.BlockSize.MaxBytes = int(params2.BlockSize.MaxBytes) - } - if params2.BlockSize.MaxTxs > 0 { - res.BlockSize.MaxTxs = int(params2.BlockSize.MaxTxs) - } - if params2.BlockSize.MaxGas > 0 { - res.BlockSize.MaxGas = params2.BlockSize.MaxGas - } + res.BlockSize.MaxBytes = params2.BlockSize.MaxBytes + res.BlockSize.MaxGas = params2.BlockSize.MaxGas } - if params2.TxSize != nil { - if params2.TxSize.MaxBytes > 0 { - res.TxSize.MaxBytes = int(params2.TxSize.MaxBytes) - } - if params2.TxSize.MaxGas > 0 { - res.TxSize.MaxGas = params2.TxSize.MaxGas - } + if params2.Evidence != nil { + res.Evidence.MaxAge = params2.Evidence.MaxAge } - if params2.BlockGossip != nil { - if params2.BlockGossip.BlockPartSizeBytes > 0 { - res.BlockGossip.BlockPartSizeBytes = int(params2.BlockGossip.BlockPartSizeBytes) - } + if params2.Validator != nil { + res.Validator.PubKeyTypes = params2.Validator.PubKeyTypes } return res } diff --git a/types/params_test.go b/types/params_test.go index e8e13dba0ff..dc1936fbf61 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -9,68 +9,67 @@ import ( abci "github.com/tendermint/tendermint/abci/types" ) -func newConsensusParams(blockSize, partSize int) ConsensusParams { - return ConsensusParams{ - BlockSize: BlockSize{MaxBytes: blockSize}, - BlockGossip: BlockGossip{BlockPartSizeBytes: partSize}, - } -} +var ( + valEd25519 = []string{ABCIPubKeyTypeEd25519} + valSecp256k1 = []string{ABCIPubKeyTypeSecp256k1} +) func TestConsensusParamsValidation(t *testing.T) { testCases := []struct { params ConsensusParams valid bool }{ - {newConsensusParams(1, 1), true}, - {newConsensusParams(1, 0), false}, - {newConsensusParams(0, 1), false}, - {newConsensusParams(0, 0), false}, - {newConsensusParams(0, 10), false}, - {newConsensusParams(10, -1), false}, - {newConsensusParams(47*1024*1024, 400), true}, - {newConsensusParams(10, 400), true}, - {newConsensusParams(100*1024*1024, 400), true}, - {newConsensusParams(101*1024*1024, 400), false}, - {newConsensusParams(1024*1024*1024, 400), false}, + // test block size + 0: {makeParams(1, 0, 1, valEd25519), true}, + 1: {makeParams(0, 0, 1, valEd25519), false}, + 2: {makeParams(47*1024*1024, 0, 1, valEd25519), true}, + 3: {makeParams(10, 0, 1, valEd25519), true}, + 4: {makeParams(100*1024*1024, 0, 1, valEd25519), true}, + 5: {makeParams(101*1024*1024, 0, 1, valEd25519), false}, + 6: {makeParams(1024*1024*1024, 0, 1, valEd25519), false}, + 7: {makeParams(1024*1024*1024, 0, -1, valEd25519), false}, + // test evidence age + 8: {makeParams(1, 0, 0, valEd25519), false}, + 9: {makeParams(1, 0, -1, valEd25519), false}, + // test no pubkey type provided + 10: {makeParams(1, 0, 1, []string{}), false}, + // test invalid pubkey type provided + 11: {makeParams(1, 0, 1, []string{"potatoes make good pubkeys"}), false}, } - for _, testCase := range testCases { - if testCase.valid { - assert.NoError(t, testCase.params.Validate(), "expected no error for valid params") + for i, tc := range testCases { + if tc.valid { + assert.NoErrorf(t, tc.params.Validate(), "expected no error for valid params (#%d)", i) } else { - assert.Error(t, testCase.params.Validate(), "expected error for non valid params") + assert.Errorf(t, tc.params.Validate(), "expected error for non valid params (#%d)", i) } } } -func makeParams(blockBytes, blockTx, blockGas, txBytes, - txGas, partSize int) ConsensusParams { - +func makeParams(blockBytes, blockGas, evidenceAge int64, pubkeyTypes []string) ConsensusParams { return ConsensusParams{ - BlockSize: BlockSize{ + BlockSize: BlockSizeParams{ MaxBytes: blockBytes, - MaxTxs: blockTx, - MaxGas: int64(blockGas), + MaxGas: blockGas, }, - TxSize: TxSize{ - MaxBytes: txBytes, - MaxGas: int64(txGas), + Evidence: EvidenceParams{ + MaxAge: evidenceAge, }, - BlockGossip: BlockGossip{ - BlockPartSizeBytes: partSize, + Validator: ValidatorParams{ + PubKeyTypes: pubkeyTypes, }, } } func TestConsensusParamsHash(t *testing.T) { params := []ConsensusParams{ - makeParams(1, 2, 3, 4, 5, 6), - makeParams(7, 2, 3, 4, 5, 6), - makeParams(1, 7, 3, 4, 5, 6), - makeParams(1, 2, 7, 4, 5, 6), - makeParams(1, 2, 3, 7, 5, 6), - makeParams(1, 2, 3, 4, 7, 6), - makeParams(1, 2, 3, 4, 5, 7), - makeParams(6, 5, 4, 3, 2, 1), + makeParams(4, 2, 3, valEd25519), + makeParams(1, 4, 3, valEd25519), + makeParams(1, 2, 4, valEd25519), + makeParams(2, 5, 7, valEd25519), + makeParams(1, 7, 6, valEd25519), + makeParams(9, 5, 4, valEd25519), + makeParams(7, 8, 9, valEd25519), + makeParams(4, 6, 5, valEd25519), } hashes := make([][]byte, len(params)) @@ -96,47 +95,26 @@ func TestConsensusParamsUpdate(t *testing.T) { }{ // empty updates { - makeParams(1, 2, 3, 4, 5, 6), + makeParams(1, 2, 3, valEd25519), &abci.ConsensusParams{}, - makeParams(1, 2, 3, 4, 5, 6), - }, - // negative BlockPartSizeBytes - { - makeParams(1, 2, 3, 4, 5, 6), - &abci.ConsensusParams{ - BlockSize: &abci.BlockSize{ - MaxBytes: -100, - MaxTxs: -200, - MaxGas: -300, - }, - TxSize: &abci.TxSize{ - MaxBytes: -400, - MaxGas: -500, - }, - BlockGossip: &abci.BlockGossip{ - BlockPartSizeBytes: -600, - }, - }, - makeParams(1, 2, 3, 4, 5, 6), + makeParams(1, 2, 3, valEd25519), }, // fine updates { - makeParams(1, 2, 3, 4, 5, 6), + makeParams(1, 2, 3, valEd25519), &abci.ConsensusParams{ - BlockSize: &abci.BlockSize{ + BlockSize: &abci.BlockSizeParams{ MaxBytes: 100, - MaxTxs: 200, - MaxGas: 300, + MaxGas: 200, }, - TxSize: &abci.TxSize{ - MaxBytes: 400, - MaxGas: 500, + Evidence: &abci.EvidenceParams{ + MaxAge: 300, }, - BlockGossip: &abci.BlockGossip{ - BlockPartSizeBytes: 600, + Validator: &abci.ValidatorParams{ + PubKeyTypes: valSecp256k1, }, }, - makeParams(100, 200, 300, 400, 500, 600), + makeParams(100, 200, 300, valSecp256k1), }, } for _, tc := range testCases { diff --git a/types/part_set.go b/types/part_set.go index f6d7f6b6e2b..af59851c9b8 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -2,11 +2,12 @@ package types import ( "bytes" - "errors" "fmt" "io" "sync" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" @@ -36,6 +37,17 @@ func (part *Part) Hash() []byte { return part.hash } +// ValidateBasic performs basic validation. +func (part *Part) ValidateBasic() error { + if part.Index < 0 { + return errors.New("Negative Index") + } + if len(part.Bytes) > BlockPartSizeBytes { + return fmt.Errorf("Too big (max: %d)", BlockPartSizeBytes) + } + return nil +} + func (part *Part) String() string { return part.StringIndented("") } @@ -70,6 +82,18 @@ func (psh PartSetHeader) Equals(other PartSetHeader) bool { return psh.Total == other.Total && bytes.Equal(psh.Hash, other.Hash) } +// ValidateBasic performs basic validation. +func (psh PartSetHeader) ValidateBasic() error { + if psh.Total < 0 { + return errors.New("Negative Total") + } + // Hash can be empty in case of POLBlockID.PartsHeader in Proposal. + if err := ValidateHash(psh.Hash); err != nil { + return errors.Wrap(err, "Wrong Hash") + } + return nil +} + //------------------------------------- type PartSet struct { @@ -88,7 +112,7 @@ func NewPartSetFromData(data []byte, partSize int) *PartSet { // divide data into 4kb parts. total := (len(data) + partSize - 1) / partSize parts := make([]*Part, total) - parts_ := make([]merkle.Hasher, total) + partsBytes := make([][]byte, total) partsBitArray := cmn.NewBitArray(total) for i := 0; i < total; i++ { part := &Part{ @@ -96,11 +120,11 @@ func NewPartSetFromData(data []byte, partSize int) *PartSet { Bytes: data[i*partSize : cmn.MinInt(len(data), (i+1)*partSize)], } parts[i] = part - parts_[i] = part + partsBytes[i] = part.Bytes partsBitArray.SetIndex(i, true) } // Compute merkle proofs - root, proofs := merkle.SimpleProofsFromHashers(parts_) + root, proofs := merkle.SimpleProofsFromByteSlices(partsBytes) for i := 0; i < total; i++ { parts[i].Proof = *proofs[i] } @@ -190,7 +214,7 @@ func (ps *PartSet) AddPart(part *Part) (bool, error) { } // Check hash proof - if !part.Proof.Verify(part.Index, ps.total, part.Hash(), ps.Hash()) { + if part.Proof.Verify(ps.Hash(), part.Hash()) != nil { return false, ErrPartSetInvalidProof } diff --git a/types/part_set_test.go b/types/part_set_test.go index 3576e747ec7..e597088c6d7 100644 --- a/types/part_set_test.go +++ b/types/part_set_test.go @@ -83,3 +83,48 @@ func TestWrongProof(t *testing.T) { t.Errorf("Expected to fail adding a part with bad bytes.") } } + +func TestPartSetHeaderSetValidateBasic(t *testing.T) { + + testCases := []struct { + testName string + malleatePartSetHeader func(*PartSetHeader) + expectErr bool + }{ + {"Good PartSet", func(psHeader *PartSetHeader) {}, false}, + {"Negative Total", func(psHeader *PartSetHeader) { psHeader.Total = -2 }, true}, + {"Invalid Hash", func(psHeader *PartSetHeader) { psHeader.Hash = make([]byte, 1) }, true}, + } + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + data := cmn.RandBytes(testPartSize * 100) + ps := NewPartSetFromData(data, testPartSize) + psHeader := ps.Header() + tc.malleatePartSetHeader(&psHeader) + assert.Equal(t, tc.expectErr, psHeader.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} + +func TestPartValidateBasic(t *testing.T) { + + testCases := []struct { + testName string + malleatePart func(*Part) + expectErr bool + }{ + {"Good Part", func(pt *Part) {}, false}, + {"Negative index", func(pt *Part) { pt.Index = -1 }, true}, + {"Too big part", func(pt *Part) { pt.Bytes = make([]byte, BlockPartSizeBytes+1) }, true}, + } + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + data := cmn.RandBytes(testPartSize * 100) + ps := NewPartSetFromData(data, testPartSize) + part := ps.GetPart(0) + tc.malleatePart(part) + assert.Equal(t, tc.expectErr, part.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} diff --git a/types/priv_validator.go b/types/priv_validator.go index 1642be41bb6..25be5220d07 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "errors" "fmt" "github.com/tendermint/tendermint/crypto" @@ -103,3 +104,29 @@ func (pv *MockPV) DisableChecks() { // Currently this does nothing, // as MockPV has no safety checks at all. } + +type erroringMockPV struct { + *MockPV +} + +var ErroringMockPVErr = errors.New("erroringMockPV always returns an error") + +// Implements PrivValidator. +func (pv *erroringMockPV) SignVote(chainID string, vote *Vote) error { + return ErroringMockPVErr +} + +// Implements PrivValidator. +func (pv *erroringMockPV) SignProposal(chainID string, proposal *Proposal) error { + return ErroringMockPVErr +} + +// signHeartbeat signs the heartbeat without any checking. +func (pv *erroringMockPV) SignHeartbeat(chainID string, heartbeat *Heartbeat) error { + return ErroringMockPVErr +} + +// NewErroringMockPV returns a MockPV that fails on each signing request. Again, for testing only. +func NewErroringMockPV() *erroringMockPV { + return &erroringMockPV{&MockPV{ed25519.GenPrivKey()}} +} diff --git a/types/proposal.go b/types/proposal.go index 81a5e2c33f5..f3b62aae7c5 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -6,6 +6,7 @@ import ( "time" cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" ) var ( @@ -14,44 +15,77 @@ var ( ) // Proposal defines a block proposal for the consensus. -// It refers to the block only by its PartSetHeader. +// It refers to the block by BlockID field. // It must be signed by the correct proposer for the given Height/Round // to be considered valid. It may depend on votes from a previous round, -// a so-called Proof-of-Lock (POL) round, as noted in the POLRound and POLBlockID. +// a so-called Proof-of-Lock (POL) round, as noted in the POLRound. +// If POLRound >= 0, then BlockID corresponds to the block that is locked in POLRound. type Proposal struct { - Height int64 `json:"height"` - Round int `json:"round"` - Timestamp time.Time `json:"timestamp"` - BlockPartsHeader PartSetHeader `json:"block_parts_header"` - POLRound int `json:"pol_round"` // -1 if null. - POLBlockID BlockID `json:"pol_block_id"` // zero if null. - Signature []byte `json:"signature"` + Type SignedMsgType + Height int64 `json:"height"` + Round int `json:"round"` + POLRound int `json:"pol_round"` // -1 if null. + BlockID BlockID `json:"block_id"` + Timestamp time.Time `json:"timestamp"` + Signature []byte `json:"signature"` } // NewProposal returns a new Proposal. // If there is no POLRound, polRound should be -1. -func NewProposal(height int64, round int, blockPartsHeader PartSetHeader, polRound int, polBlockID BlockID) *Proposal { +func NewProposal(height int64, round int, polRound int, blockID BlockID) *Proposal { return &Proposal{ - Height: height, - Round: round, - Timestamp: time.Now().UTC(), - BlockPartsHeader: blockPartsHeader, - POLRound: polRound, - POLBlockID: polBlockID, + Type: ProposalType, + Height: height, + Round: round, + BlockID: blockID, + POLRound: polRound, + Timestamp: tmtime.Now(), } } +// ValidateBasic performs basic validation. +func (p *Proposal) ValidateBasic() error { + if p.Type != ProposalType { + return errors.New("Invalid Type") + } + if p.Height < 0 { + return errors.New("Negative Height") + } + if p.Round < 0 { + return errors.New("Negative Round") + } + if p.POLRound < -1 { + return errors.New("Negative POLRound (exception: -1)") + } + if err := p.BlockID.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong BlockID: %v", err) + } + + // NOTE: Timestamp validation is subtle and handled elsewhere. + + if len(p.Signature) == 0 { + return errors.New("Signature is missing") + } + if len(p.Signature) > MaxSignatureSize { + return fmt.Errorf("Signature is too big (max: %d)", MaxSignatureSize) + } + return nil +} + // String returns a string representation of the Proposal. func (p *Proposal) String() string { - return fmt.Sprintf("Proposal{%v/%v %v (%v,%v) %X @ %s}", - p.Height, p.Round, p.BlockPartsHeader, p.POLRound, - p.POLBlockID, - cmn.Fingerprint(p.Signature), CanonicalTime(p.Timestamp)) + return fmt.Sprintf("Proposal{%v/%v (%v, %v) %X @ %s}", + p.Height, + p.Round, + p.BlockID, + p.POLRound, + cmn.Fingerprint(p.Signature), + CanonicalTime(p.Timestamp)) } // SignBytes returns the Proposal bytes for signing func (p *Proposal) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalJSON(CanonicalProposal(chainID, p)) + bz, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeProposal(chainID, p)) if err != nil { panic(err) } diff --git a/types/proposal_test.go b/types/proposal_test.go index 7396fb767bb..f1c048e1d39 100644 --- a/types/proposal_test.go +++ b/types/proposal_test.go @@ -1,10 +1,13 @@ package types import ( + "math" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/tmhash" ) var testProposal *Proposal @@ -15,31 +18,26 @@ func init() { panic(err) } testProposal = &Proposal{ - Height: 12345, - Round: 23456, - BlockPartsHeader: PartSetHeader{111, []byte("blockparts")}, - POLRound: -1, - Timestamp: stamp, + Height: 12345, + Round: 23456, + BlockID: BlockID{[]byte{1, 2, 3}, PartSetHeader{111, []byte("blockparts")}}, + POLRound: -1, + Timestamp: stamp, } } func TestProposalSignable(t *testing.T) { - signBytes := testProposal.SignBytes("test_chain_id") - signStr := string(signBytes) + chainID := "test_chain_id" + signBytes := testProposal.SignBytes(chainID) - expected := `{"@chain_id":"test_chain_id","@type":"proposal","block_parts_header":{"hash":"626C6F636B7061727473","total":"111"},"height":"12345","pol_block_id":{},"pol_round":"-1","round":"23456","timestamp":"2018-02-11T07:09:22.765Z"}` - if signStr != expected { - t.Errorf("Got unexpected sign string for Proposal. Expected:\n%v\nGot:\n%v", expected, signStr) - } - - if signStr != expected { - t.Errorf("Got unexpected sign string for Proposal. Expected:\n%v\nGot:\n%v", expected, signStr) - } + expected, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeProposal(chainID, testProposal)) + require.NoError(t, err) + require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Proposal") } func TestProposalString(t *testing.T) { str := testProposal.String() - expected := `Proposal{12345/23456 111:626C6F636B70 (-1,:0:000000000000) 000000000000 @ 2018-02-11T07:09:22.765Z}` + expected := `Proposal{12345/23456 (010203:111:626C6F636B70, -1) 000000000000 @ 2018-02-11T07:09:22.765Z}` if str != expected { t.Errorf("Got unexpected string for Proposal. Expected:\n%v\nGot:\n%v", expected, str) } @@ -49,7 +47,9 @@ func TestProposalVerifySignature(t *testing.T) { privVal := NewMockPV() pubKey := privVal.GetPubKey() - prop := NewProposal(4, 2, PartSetHeader{777, []byte("proper")}, 2, BlockID{}) + prop := NewProposal( + 4, 2, 2, + BlockID{[]byte{1, 2, 3}, PartSetHeader{777, []byte("proper")}}) signBytes := prop.SignBytes("test_chain_id") // sign it @@ -62,9 +62,9 @@ func TestProposalVerifySignature(t *testing.T) { // serialize, deserialize and verify again.... newProp := new(Proposal) - bs, err := cdc.MarshalBinary(prop) + bs, err := cdc.MarshalBinaryLengthPrefixed(prop) require.NoError(t, err) - err = cdc.UnmarshalBinary(bs, &newProp) + err = cdc.UnmarshalBinaryLengthPrefixed(bs, &newProp) require.NoError(t, err) // verify the transmitted proposal @@ -100,3 +100,41 @@ func BenchmarkProposalVerifySignature(b *testing.B) { pubKey.VerifyBytes(testProposal.SignBytes("test_chain_id"), testProposal.Signature) } } + +func TestProposalValidateBasic(t *testing.T) { + + privVal := NewMockPV() + testCases := []struct { + testName string + malleateProposal func(*Proposal) + expectErr bool + }{ + {"Good Proposal", func(p *Proposal) {}, false}, + {"Invalid Type", func(p *Proposal) { p.Type = PrecommitType }, true}, + {"Invalid Height", func(p *Proposal) { p.Height = -1 }, true}, + {"Invalid Round", func(p *Proposal) { p.Round = -1 }, true}, + {"Invalid POLRound", func(p *Proposal) { p.POLRound = -2 }, true}, + {"Invalid BlockId", func(p *Proposal) { + p.BlockID = BlockID{[]byte{1, 2, 3}, PartSetHeader{111, []byte("blockparts")}} + }, true}, + {"Invalid Signature", func(p *Proposal) { + p.Signature = make([]byte, 0) + }, true}, + {"Too big Signature", func(p *Proposal) { + p.Signature = make([]byte, MaxSignatureSize+1) + }, true}, + } + blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt64, tmhash.Sum([]byte("partshash"))) + + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + prop := NewProposal( + 4, 2, 2, + blockID) + err := privVal.SignProposal("test_chain_id", prop) + require.NoError(t, err) + tc.malleateProposal(prop) + assert.Equal(t, tc.expectErr, prop.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} diff --git a/types/proto3/block.pb.go b/types/proto3/block.pb.go index 805828f8227..99dadac16a7 100644 --- a/types/proto3/block.pb.go +++ b/types/proto3/block.pb.go @@ -1,21 +1,9 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: block.proto +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: types/proto3/block.proto -/* -Package proto3 is a generated protocol buffer package. - -It is generated from these files: - block.proto - -It has these top-level messages: - PartSetHeader - BlockID - Header - Timestamp -*/ package proto3 -import proto "github.com/golang/protobuf/proto" +import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" @@ -28,17 +16,39 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type PartSetHeader struct { - Total int32 `protobuf:"zigzag32,1,opt,name=Total" json:"Total,omitempty"` - Hash []byte `protobuf:"bytes,2,opt,name=Hash,proto3" json:"Hash,omitempty"` + Total int32 `protobuf:"varint,1,opt,name=Total,proto3" json:"Total,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=Hash,proto3" json:"Hash,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } +func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } +func (*PartSetHeader) ProtoMessage() {} +func (*PartSetHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_block_57c41dfc0fc285b3, []int{0} +} +func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PartSetHeader.Unmarshal(m, b) +} +func (m *PartSetHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PartSetHeader.Marshal(b, m, deterministic) +} +func (dst *PartSetHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartSetHeader.Merge(dst, src) +} +func (m *PartSetHeader) XXX_Size() int { + return xxx_messageInfo_PartSetHeader.Size(m) +} +func (m *PartSetHeader) XXX_DiscardUnknown() { + xxx_messageInfo_PartSetHeader.DiscardUnknown(m) } -func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } -func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } -func (*PartSetHeader) ProtoMessage() {} -func (*PartSetHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +var xxx_messageInfo_PartSetHeader proto.InternalMessageInfo func (m *PartSetHeader) GetTotal() int32 { if m != nil { @@ -55,14 +65,36 @@ func (m *PartSetHeader) GetHash() []byte { } type BlockID struct { - Hash []byte `protobuf:"bytes,1,opt,name=Hash,proto3" json:"Hash,omitempty"` - PartsHeader *PartSetHeader `protobuf:"bytes,2,opt,name=PartsHeader" json:"PartsHeader,omitempty"` + Hash []byte `protobuf:"bytes,1,opt,name=Hash,proto3" json:"Hash,omitempty"` + PartsHeader *PartSetHeader `protobuf:"bytes,2,opt,name=PartsHeader" json:"PartsHeader,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockID) Reset() { *m = BlockID{} } +func (m *BlockID) String() string { return proto.CompactTextString(m) } +func (*BlockID) ProtoMessage() {} +func (*BlockID) Descriptor() ([]byte, []int) { + return fileDescriptor_block_57c41dfc0fc285b3, []int{1} +} +func (m *BlockID) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockID.Unmarshal(m, b) +} +func (m *BlockID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockID.Marshal(b, m, deterministic) +} +func (dst *BlockID) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockID.Merge(dst, src) +} +func (m *BlockID) XXX_Size() int { + return xxx_messageInfo_BlockID.Size(m) +} +func (m *BlockID) XXX_DiscardUnknown() { + xxx_messageInfo_BlockID.DiscardUnknown(m) } -func (m *BlockID) Reset() { *m = BlockID{} } -func (m *BlockID) String() string { return proto.CompactTextString(m) } -func (*BlockID) ProtoMessage() {} -func (*BlockID) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_BlockID proto.InternalMessageInfo func (m *BlockID) GetHash() []byte { if m != nil { @@ -80,29 +112,61 @@ func (m *BlockID) GetPartsHeader() *PartSetHeader { type Header struct { // basic block info - ChainID string `protobuf:"bytes,1,opt,name=ChainID" json:"ChainID,omitempty"` - Height int64 `protobuf:"zigzag64,2,opt,name=Height" json:"Height,omitempty"` - Time *Timestamp `protobuf:"bytes,3,opt,name=Time" json:"Time,omitempty"` - NumTxs int64 `protobuf:"zigzag64,4,opt,name=NumTxs" json:"NumTxs,omitempty"` + Version *Version `protobuf:"bytes,1,opt,name=Version" json:"Version,omitempty"` + ChainID string `protobuf:"bytes,2,opt,name=ChainID,proto3" json:"ChainID,omitempty"` + Height int64 `protobuf:"varint,3,opt,name=Height,proto3" json:"Height,omitempty"` + Time *Timestamp `protobuf:"bytes,4,opt,name=Time" json:"Time,omitempty"` + NumTxs int64 `protobuf:"varint,5,opt,name=NumTxs,proto3" json:"NumTxs,omitempty"` + TotalTxs int64 `protobuf:"varint,6,opt,name=TotalTxs,proto3" json:"TotalTxs,omitempty"` // prev block info - LastBlockID *BlockID `protobuf:"bytes,5,opt,name=LastBlockID" json:"LastBlockID,omitempty"` - TotalTxs int64 `protobuf:"zigzag64,6,opt,name=TotalTxs" json:"TotalTxs,omitempty"` + LastBlockID *BlockID `protobuf:"bytes,7,opt,name=LastBlockID" json:"LastBlockID,omitempty"` // hashes of block data - LastCommitHash []byte `protobuf:"bytes,7,opt,name=LastCommitHash,proto3" json:"LastCommitHash,omitempty"` - DataHash []byte `protobuf:"bytes,8,opt,name=DataHash,proto3" json:"DataHash,omitempty"` + LastCommitHash []byte `protobuf:"bytes,8,opt,name=LastCommitHash,proto3" json:"LastCommitHash,omitempty"` + DataHash []byte `protobuf:"bytes,9,opt,name=DataHash,proto3" json:"DataHash,omitempty"` // hashes from the app output from the prev block - ValidatorsHash []byte `protobuf:"bytes,9,opt,name=ValidatorsHash,proto3" json:"ValidatorsHash,omitempty"` - ConsensusHash []byte `protobuf:"bytes,10,opt,name=ConsensusHash,proto3" json:"ConsensusHash,omitempty"` - AppHash []byte `protobuf:"bytes,11,opt,name=AppHash,proto3" json:"AppHash,omitempty"` - LastResultsHash []byte `protobuf:"bytes,12,opt,name=LastResultsHash,proto3" json:"LastResultsHash,omitempty"` + ValidatorsHash []byte `protobuf:"bytes,10,opt,name=ValidatorsHash,proto3" json:"ValidatorsHash,omitempty"` + NextValidatorsHash []byte `protobuf:"bytes,11,opt,name=NextValidatorsHash,proto3" json:"NextValidatorsHash,omitempty"` + ConsensusHash []byte `protobuf:"bytes,12,opt,name=ConsensusHash,proto3" json:"ConsensusHash,omitempty"` + AppHash []byte `protobuf:"bytes,13,opt,name=AppHash,proto3" json:"AppHash,omitempty"` + LastResultsHash []byte `protobuf:"bytes,14,opt,name=LastResultsHash,proto3" json:"LastResultsHash,omitempty"` // consensus info - EvidenceHash []byte `protobuf:"bytes,13,opt,name=EvidenceHash,proto3" json:"EvidenceHash,omitempty"` + EvidenceHash []byte `protobuf:"bytes,15,opt,name=EvidenceHash,proto3" json:"EvidenceHash,omitempty"` + ProposerAddress []byte `protobuf:"bytes,16,opt,name=ProposerAddress,proto3" json:"ProposerAddress,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Header) Reset() { *m = Header{} } -func (m *Header) String() string { return proto.CompactTextString(m) } -func (*Header) ProtoMessage() {} -func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_block_57c41dfc0fc285b3, []int{2} +} +func (m *Header) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Header.Unmarshal(m, b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) +} +func (dst *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(dst, src) +} +func (m *Header) XXX_Size() int { + return xxx_messageInfo_Header.Size(m) +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_Header proto.InternalMessageInfo + +func (m *Header) GetVersion() *Version { + if m != nil { + return m.Version + } + return nil +} func (m *Header) GetChainID() string { if m != nil { @@ -132,18 +196,18 @@ func (m *Header) GetNumTxs() int64 { return 0 } -func (m *Header) GetLastBlockID() *BlockID { +func (m *Header) GetTotalTxs() int64 { if m != nil { - return m.LastBlockID + return m.TotalTxs } - return nil + return 0 } -func (m *Header) GetTotalTxs() int64 { +func (m *Header) GetLastBlockID() *BlockID { if m != nil { - return m.TotalTxs + return m.LastBlockID } - return 0 + return nil } func (m *Header) GetLastCommitHash() []byte { @@ -167,6 +231,13 @@ func (m *Header) GetValidatorsHash() []byte { return nil } +func (m *Header) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash + } + return nil +} + func (m *Header) GetConsensusHash() []byte { if m != nil { return m.ConsensusHash @@ -195,19 +266,95 @@ func (m *Header) GetEvidenceHash() []byte { return nil } -// Timestamp wraps how amino encodes time. Note that this is different from the protobuf well-known type -// protobuf/timestamp.proto in the sense that there seconds and nanos are varint encoded. See: +func (m *Header) GetProposerAddress() []byte { + if m != nil { + return m.ProposerAddress + } + return nil +} + +type Version struct { + Block uint64 `protobuf:"varint,1,opt,name=Block,proto3" json:"Block,omitempty"` + App uint64 `protobuf:"varint,2,opt,name=App,proto3" json:"App,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { + return fileDescriptor_block_57c41dfc0fc285b3, []int{3} +} +func (m *Version) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Version.Unmarshal(m, b) +} +func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) +} +func (dst *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(dst, src) +} +func (m *Version) XXX_Size() int { + return xxx_messageInfo_Version.Size(m) +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func (m *Version) GetBlock() uint64 { + if m != nil { + return m.Block + } + return 0 +} + +func (m *Version) GetApp() uint64 { + if m != nil { + return m.App + } + return 0 +} + +// Timestamp wraps how amino encodes time. +// This is the protobuf well-known type protobuf/timestamp.proto +// See: // https://github.com/google/protobuf/blob/d2980062c859649523d5fd51d6b55ab310e47482/src/google/protobuf/timestamp.proto#L123-L135 -// Also nanos do not get skipped if they are zero in amino. +// NOTE/XXX: nanos do not get skipped if they are zero in amino. type Timestamp struct { - Seconds int64 `protobuf:"fixed64,1,opt,name=seconds" json:"seconds,omitempty"` - Nanos int32 `protobuf:"fixed32,2,opt,name=nanos" json:"nanos,omitempty"` + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_block_57c41dfc0fc285b3, []int{4} +} +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) } -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *Timestamp) GetSeconds() int64 { if m != nil { @@ -227,35 +374,41 @@ func init() { proto.RegisterType((*PartSetHeader)(nil), "proto3.PartSetHeader") proto.RegisterType((*BlockID)(nil), "proto3.BlockID") proto.RegisterType((*Header)(nil), "proto3.Header") + proto.RegisterType((*Version)(nil), "proto3.Version") proto.RegisterType((*Timestamp)(nil), "proto3.Timestamp") } -func init() { proto.RegisterFile("block.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 372 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x92, 0x4f, 0x6b, 0xe3, 0x30, - 0x10, 0xc5, 0xf1, 0xe6, 0xff, 0x38, 0xd9, 0x6c, 0x86, 0xdd, 0xc5, 0xf4, 0x14, 0x4c, 0x5b, 0x72, - 0x0a, 0xb4, 0x39, 0x94, 0xd2, 0x53, 0x9b, 0x14, 0x12, 0x28, 0xa5, 0xa8, 0x21, 0x77, 0x25, 0x16, - 0x8d, 0xa9, 0x2d, 0x19, 0x4b, 0x29, 0xfd, 0x7c, 0xfd, 0x64, 0x45, 0x23, 0xdb, 0x8d, 0x73, 0x4a, - 0xde, 0x9b, 0x37, 0xbf, 0x91, 0x47, 0x02, 0x7f, 0x9b, 0xa8, 0xdd, 0xfb, 0x34, 0xcb, 0x95, 0x51, - 0xd8, 0xa6, 0x9f, 0x59, 0x78, 0x0b, 0x83, 0x17, 0x9e, 0x9b, 0x57, 0x61, 0x96, 0x82, 0x47, 0x22, - 0xc7, 0xbf, 0xd0, 0x5a, 0x2b, 0xc3, 0x93, 0xc0, 0x1b, 0x7b, 0x93, 0x11, 0x73, 0x02, 0x11, 0x9a, - 0x4b, 0xae, 0xf7, 0xc1, 0xaf, 0xb1, 0x37, 0xe9, 0x33, 0xfa, 0x1f, 0x6e, 0xa0, 0xf3, 0x60, 0x89, - 0xab, 0x45, 0x55, 0xf6, 0x7e, 0xca, 0x78, 0x03, 0xbe, 0x25, 0x6b, 0xc7, 0xa5, 0x4e, 0xff, 0xfa, - 0x9f, 0x1b, 0x3f, 0x9b, 0xd6, 0x86, 0xb2, 0xe3, 0x64, 0xf8, 0xd5, 0x80, 0x76, 0x71, 0x98, 0x00, - 0x3a, 0xf3, 0x3d, 0x8f, 0xe5, 0x6a, 0x41, 0xe8, 0x1e, 0x2b, 0x25, 0xfe, 0xb7, 0x99, 0xf8, 0x6d, - 0x6f, 0x08, 0x8c, 0xac, 0x50, 0x78, 0x01, 0xcd, 0x75, 0x9c, 0x8a, 0xa0, 0x41, 0xe3, 0x46, 0xe5, - 0x38, 0xeb, 0x69, 0xc3, 0xd3, 0x8c, 0x51, 0xd9, 0xb6, 0x3f, 0x1f, 0xd2, 0xf5, 0xa7, 0x0e, 0x9a, - 0xae, 0xdd, 0x29, 0xbc, 0x02, 0xff, 0x89, 0x6b, 0x53, 0x7c, 0x57, 0xd0, 0x22, 0xca, 0xb0, 0xa4, - 0x14, 0x36, 0x3b, 0xce, 0xe0, 0x19, 0x74, 0x69, 0x47, 0x16, 0xd6, 0x26, 0x58, 0xa5, 0xf1, 0x12, - 0x7e, 0xdb, 0xe8, 0x5c, 0xa5, 0x69, 0x6c, 0x68, 0x43, 0x1d, 0xda, 0xd0, 0x89, 0x6b, 0x19, 0x0b, - 0x6e, 0x38, 0x25, 0xba, 0x94, 0xa8, 0xb4, 0x65, 0x6c, 0x78, 0x12, 0x47, 0xdc, 0xa8, 0x5c, 0x53, - 0xa2, 0xe7, 0x18, 0x75, 0x17, 0xcf, 0x61, 0x30, 0x57, 0x52, 0x0b, 0xa9, 0x0f, 0x2e, 0x06, 0x14, - 0xab, 0x9b, 0x76, 0xa3, 0xf7, 0x59, 0x46, 0x75, 0x9f, 0xea, 0xa5, 0xc4, 0x09, 0x0c, 0xed, 0xa9, - 0x98, 0xd0, 0x87, 0xc4, 0x38, 0x42, 0x9f, 0x12, 0xa7, 0x36, 0x86, 0xd0, 0x7f, 0xfc, 0x88, 0x23, - 0x21, 0x77, 0x82, 0x62, 0x03, 0x8a, 0xd5, 0xbc, 0xf0, 0x0e, 0x7a, 0xd5, 0xce, 0xed, 0x50, 0x2d, - 0x76, 0x4a, 0x46, 0x9a, 0xae, 0xf1, 0x0f, 0x2b, 0xa5, 0x7d, 0x6d, 0x92, 0x4b, 0xa5, 0xe9, 0x16, - 0x87, 0xcc, 0x89, 0x6d, 0xf1, 0x38, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x74, 0x2f, 0xbd, - 0xb2, 0x02, 0x00, 0x00, +func init() { proto.RegisterFile("types/proto3/block.proto", fileDescriptor_block_57c41dfc0fc285b3) } + +var fileDescriptor_block_57c41dfc0fc285b3 = []byte{ + // 451 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x5f, 0x6f, 0xd3, 0x30, + 0x10, 0x57, 0x68, 0xda, 0xae, 0x97, 0x76, 0x1d, 0x27, 0x40, 0x16, 0x4f, 0x55, 0x04, 0xa8, 0xbc, + 0x74, 0xda, 0xf6, 0x80, 0x10, 0x4f, 0xa5, 0x45, 0xda, 0x24, 0x34, 0x4d, 0xa6, 0xea, 0xbb, 0xd7, + 0x58, 0x34, 0xa2, 0x89, 0xa3, 0x9c, 0x8b, 0xc6, 0x27, 0xe4, 0x6b, 0x21, 0x9f, 0x93, 0xd0, 0x44, + 0x7b, 0xf3, 0xef, 0xcf, 0xfd, 0xce, 0xbe, 0x5c, 0x40, 0xd8, 0x3f, 0x85, 0xa6, 0xcb, 0xa2, 0x34, + 0xd6, 0xdc, 0x5c, 0x3e, 0x1e, 0xcc, 0xee, 0xd7, 0x82, 0x01, 0x0e, 0x3c, 0x17, 0x7f, 0x86, 0xc9, + 0x83, 0x2a, 0xed, 0x0f, 0x6d, 0x6f, 0xb5, 0x4a, 0x74, 0x89, 0xaf, 0xa0, 0xbf, 0x31, 0x56, 0x1d, + 0x44, 0x30, 0x0b, 0xe6, 0x7d, 0xe9, 0x01, 0x22, 0x84, 0xb7, 0x8a, 0xf6, 0xe2, 0xc5, 0x2c, 0x98, + 0x8f, 0x25, 0x9f, 0xe3, 0x2d, 0x0c, 0xbf, 0xba, 0xc4, 0xbb, 0x75, 0x23, 0x07, 0xff, 0x65, 0xfc, + 0x04, 0x91, 0x4b, 0x26, 0x9f, 0xcb, 0x95, 0xd1, 0xf5, 0x6b, 0xdf, 0xfe, 0x66, 0xd1, 0x6a, 0x2a, + 0x4f, 0x9d, 0xf1, 0xdf, 0x10, 0x06, 0xd5, 0x65, 0x3e, 0xc2, 0x70, 0xab, 0x4b, 0x4a, 0x4d, 0xce, + 0xd1, 0xd1, 0xf5, 0xb4, 0xae, 0xaf, 0x68, 0x59, 0xeb, 0x28, 0x60, 0xb8, 0xda, 0xab, 0x34, 0xbf, + 0x5b, 0x73, 0xab, 0x91, 0xac, 0x21, 0xbe, 0x71, 0x71, 0xe9, 0xcf, 0xbd, 0x15, 0xbd, 0x59, 0x30, + 0xef, 0xc9, 0x0a, 0xe1, 0x7b, 0x08, 0x37, 0x69, 0xa6, 0x45, 0xc8, 0xc9, 0x2f, 0xeb, 0x64, 0xc7, + 0x91, 0x55, 0x59, 0x21, 0x59, 0x76, 0xe5, 0xf7, 0xc7, 0x6c, 0xf3, 0x44, 0xa2, 0xef, 0xcb, 0x3d, + 0xc2, 0xb7, 0x70, 0xc6, 0xb3, 0x71, 0xca, 0x80, 0x95, 0x06, 0xe3, 0x15, 0x44, 0xdf, 0x15, 0xd9, + 0x6a, 0x3c, 0x62, 0xd8, 0xbe, 0x7b, 0x45, 0xcb, 0x53, 0x0f, 0x7e, 0x80, 0x73, 0x07, 0x57, 0x26, + 0xcb, 0x52, 0xcb, 0xc3, 0x3c, 0xe3, 0x61, 0x76, 0x58, 0xd7, 0x76, 0xad, 0xac, 0x62, 0xc7, 0x88, + 0x1d, 0x0d, 0x76, 0x19, 0x5b, 0x75, 0x48, 0x13, 0x65, 0x4d, 0x49, 0xec, 0x00, 0x9f, 0xd1, 0x66, + 0x71, 0x01, 0x78, 0xaf, 0x9f, 0x6c, 0xc7, 0x1b, 0xb1, 0xf7, 0x19, 0x05, 0xdf, 0xc1, 0x64, 0x65, + 0x72, 0xd2, 0x39, 0x1d, 0xbd, 0x75, 0xcc, 0xd6, 0x36, 0xe9, 0xbe, 0xc0, 0xb2, 0x28, 0x58, 0x9f, + 0xb0, 0x5e, 0x43, 0x9c, 0xc3, 0xd4, 0xbd, 0x42, 0x6a, 0x3a, 0x1e, 0xac, 0x4f, 0x38, 0x67, 0x47, + 0x97, 0xc6, 0x18, 0xc6, 0xdf, 0x7e, 0xa7, 0x89, 0xce, 0x77, 0x9a, 0x6d, 0x53, 0xb6, 0xb5, 0x38, + 0x97, 0xf6, 0x50, 0x9a, 0xc2, 0x90, 0x2e, 0x97, 0x49, 0x52, 0x6a, 0x22, 0x71, 0xe1, 0xd3, 0x3a, + 0x74, 0x7c, 0xd5, 0xac, 0x8f, 0x5b, 0x6b, 0x9e, 0x34, 0xef, 0x51, 0x28, 0x3d, 0xc0, 0x0b, 0xe8, + 0x2d, 0x8b, 0x82, 0x17, 0x26, 0x94, 0xee, 0x18, 0x7f, 0x81, 0x51, 0xb3, 0x00, 0xee, 0x45, 0xa4, + 0x77, 0x26, 0x4f, 0x88, 0xcb, 0x7a, 0xb2, 0x86, 0x2e, 0x2e, 0x57, 0xb9, 0x21, 0x2e, 0xed, 0x4b, + 0x0f, 0x1e, 0xab, 0x9f, 0xea, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4f, 0x84, 0xb5, 0xf8, 0x77, + 0x03, 0x00, 0x00, } diff --git a/types/proto3/block.proto b/types/proto3/block.proto index bc3cf8749ec..93cf1bc752e 100644 --- a/types/proto3/block.proto +++ b/types/proto3/block.proto @@ -4,7 +4,7 @@ package proto3; message PartSetHeader { - sint32 Total = 1; + int32 Total = 1; bytes Hash = 2; } @@ -15,34 +15,43 @@ message BlockID { message Header { // basic block info - string ChainID = 1; - sint64 Height = 2; - Timestamp Time = 3; - sint64 NumTxs = 4; + Version Version = 1; + string ChainID = 2; + int64 Height = 3; + Timestamp Time = 4; + int64 NumTxs = 5; + int64 TotalTxs = 6; // prev block info - BlockID LastBlockID = 5; - sint64 TotalTxs = 6; + BlockID LastBlockID = 7; // hashes of block data - bytes LastCommitHash = 7; // commit from validators from the last block - bytes DataHash = 8; // transactions + bytes LastCommitHash = 8; // commit from validators from the last block + bytes DataHash = 9; // transactions // hashes from the app output from the prev block - bytes ValidatorsHash = 9; // validators for the current block - bytes ConsensusHash = 10; // consensus params for current block - bytes AppHash = 11; // state after txs from the previous block - bytes LastResultsHash = 12; // root hash of all results from the txs from the previous block + bytes ValidatorsHash = 10; // validators for the current block + bytes NextValidatorsHash = 11; // validators for the next block + bytes ConsensusHash = 12; // consensus params for current block + bytes AppHash = 13; // state after txs from the previous block + bytes LastResultsHash = 14; // root hash of all results from the txs from the previous block // consensus info - bytes EvidenceHash = 13; // evidence included in the block + bytes EvidenceHash = 15; // evidence included in the block + bytes ProposerAddress = 16; // original proposer of the block } -// Timestamp wraps how amino encodes time. Note that this is different from the protobuf well-known type -// protobuf/timestamp.proto in the sense that there seconds and nanos are varint encoded. See: +message Version { + uint64 Block = 1; + uint64 App = 2; +} + +// Timestamp wraps how amino encodes time. +// This is the protobuf well-known type protobuf/timestamp.proto +// See: // https://github.com/google/protobuf/blob/d2980062c859649523d5fd51d6b55ab310e47482/src/google/protobuf/timestamp.proto#L123-L135 -// Also nanos do not get skipped if they are zero in amino. +// NOTE/XXX: nanos do not get skipped if they are zero in amino. message Timestamp { - sfixed64 seconds = 1; - sfixed32 nanos = 2; + int64 seconds = 1; + int32 nanos = 2; } diff --git a/types/proto3_test.go b/types/proto3_test.go index 19a624a65dd..c9dfa35a9b1 100644 --- a/types/proto3_test.go +++ b/types/proto3_test.go @@ -63,15 +63,10 @@ func TestProto3Compatibility(t *testing.T) { assert.Equal(t, ab, pb, "encoding doesn't match") emptyLastBlockPb := proto3.Header{ - ChainID: "cosmos", - Height: 150, - Time: &proto3.Timestamp{Seconds: seconds, Nanos: nanos}, - NumTxs: 7, - // This is not fully skipped in amino (yet) although it is empty: - LastBlockID: &proto3.BlockID{ - PartsHeader: &proto3.PartSetHeader{ - }, - }, + ChainID: "cosmos", + Height: 150, + Time: &proto3.Timestamp{Seconds: seconds, Nanos: nanos}, + NumTxs: 7, TotalTxs: 100, LastCommitHash: []byte("commit hash"), DataHash: []byte("data hash"), diff --git a/types/protobuf.go b/types/protobuf.go index 01d4ebf0cbb..1535c1e3762 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -1,7 +1,6 @@ package types import ( - "bytes" "fmt" "reflect" "time" @@ -25,6 +24,12 @@ const ( ABCIPubKeyTypeSecp256k1 = "secp256k1" ) +// TODO: Make non-global by allowing for registration of more pubkey types +var ABCIPubKeyTypesToAminoRoutes = map[string]string{ + ABCIPubKeyTypeEd25519: ed25519.PubKeyAminoRoute, + ABCIPubKeyTypeSecp256k1: secp256k1.PubKeyAminoRoute, +} + //------------------------------------------------------- // TM2PB is used for converting Tendermint ABCI to protobuf ABCI. @@ -35,34 +40,58 @@ type tm2pb struct{} func (tm2pb) Header(header *Header) abci.Header { return abci.Header{ - ChainID: header.ChainID, - Height: header.Height, - + Version: abci.Version{ + Block: header.Version.Block.Uint64(), + App: header.Version.App.Uint64(), + }, + ChainID: header.ChainID, + Height: header.Height, Time: header.Time, - NumTxs: int32(header.NumTxs), // XXX: overflow + NumTxs: header.NumTxs, TotalTxs: header.TotalTxs, - LastBlockHash: header.LastBlockID.Hash, - ValidatorsHash: header.ValidatorsHash, - AppHash: header.AppHash, + LastBlockId: TM2PB.BlockID(header.LastBlockID), + + LastCommitHash: header.LastCommitHash, + DataHash: header.DataHash, + + ValidatorsHash: header.ValidatorsHash, + NextValidatorsHash: header.NextValidatorsHash, + ConsensusHash: header.ConsensusHash, + AppHash: header.AppHash, + LastResultsHash: header.LastResultsHash, - // Proposer: TODO + EvidenceHash: header.EvidenceHash, + ProposerAddress: header.ProposerAddress, } } -func (tm2pb) ValidatorWithoutPubKey(val *Validator) abci.Validator { +func (tm2pb) Validator(val *Validator) abci.Validator { return abci.Validator{ Address: val.PubKey.Address(), Power: val.VotingPower, } } +func (tm2pb) BlockID(blockID BlockID) abci.BlockID { + return abci.BlockID{ + Hash: blockID.Hash, + PartsHeader: TM2PB.PartSetHeader(blockID.PartsHeader), + } +} + +func (tm2pb) PartSetHeader(header PartSetHeader) abci.PartSetHeader { + return abci.PartSetHeader{ + Total: int32(header.Total), + Hash: header.Hash, + } +} + // XXX: panics on unknown pubkey type -func (tm2pb) Validator(val *Validator) abci.Validator { - return abci.Validator{ - Address: val.PubKey.Address(), - PubKey: TM2PB.PubKey(val.PubKey), - Power: val.VotingPower, +func (tm2pb) ValidatorUpdate(val *Validator) abci.ValidatorUpdate { + return abci.ValidatorUpdate{ + PubKey: TM2PB.PubKey(val.PubKey), + Power: val.VotingPower, } } @@ -86,28 +115,25 @@ func (tm2pb) PubKey(pubKey crypto.PubKey) abci.PubKey { } // XXX: panics on nil or unknown pubkey type -func (tm2pb) Validators(vals *ValidatorSet) []abci.Validator { - validators := make([]abci.Validator, vals.Size()) +func (tm2pb) ValidatorUpdates(vals *ValidatorSet) []abci.ValidatorUpdate { + validators := make([]abci.ValidatorUpdate, vals.Size()) for i, val := range vals.Validators { - validators[i] = TM2PB.Validator(val) + validators[i] = TM2PB.ValidatorUpdate(val) } return validators } func (tm2pb) ConsensusParams(params *ConsensusParams) *abci.ConsensusParams { return &abci.ConsensusParams{ - BlockSize: &abci.BlockSize{ - - MaxBytes: int32(params.BlockSize.MaxBytes), - MaxTxs: int32(params.BlockSize.MaxTxs), + BlockSize: &abci.BlockSizeParams{ + MaxBytes: params.BlockSize.MaxBytes, MaxGas: params.BlockSize.MaxGas, }, - TxSize: &abci.TxSize{ - MaxBytes: int32(params.TxSize.MaxBytes), - MaxGas: params.TxSize.MaxGas, + Evidence: &abci.EvidenceParams{ + MaxAge: params.Evidence.MaxAge, }, - BlockGossip: &abci.BlockGossip{ - BlockPartSizeBytes: int32(params.BlockGossip.BlockPartSizeBytes), + Validator: &abci.ValidatorParams{ + PubKeyTypes: params.Validator.PubKeyTypes, }, } } @@ -136,7 +162,7 @@ func (tm2pb) Evidence(ev Evidence, valSet *ValidatorSet, evTime time.Time) abci. return abci.Evidence{ Type: evType, - Validator: TM2PB.ValidatorWithoutPubKey(val), + Validator: TM2PB.Validator(val), Height: ev.Height(), Time: evTime, TotalVotingPower: valSet.TotalVotingPower(), @@ -144,12 +170,11 @@ func (tm2pb) Evidence(ev Evidence, valSet *ValidatorSet, evTime time.Time) abci. } // XXX: panics on nil or unknown pubkey type -func (tm2pb) ValidatorFromPubKeyAndPower(pubkey crypto.PubKey, power int64) abci.Validator { +func (tm2pb) NewValidatorUpdate(pubkey crypto.PubKey, power int64) abci.ValidatorUpdate { pubkeyABCI := TM2PB.PubKey(pubkey) - return abci.Validator{ - Address: pubkey.Address(), - PubKey: pubkeyABCI, - Power: power, + return abci.ValidatorUpdate{ + PubKey: pubkeyABCI, + Power: power, } } @@ -185,46 +210,29 @@ func (pb2tm) PubKey(pubKey abci.PubKey) (crypto.PubKey, error) { } } -func (pb2tm) Validators(vals []abci.Validator) ([]*Validator, error) { +func (pb2tm) ValidatorUpdates(vals []abci.ValidatorUpdate) ([]*Validator, error) { tmVals := make([]*Validator, len(vals)) for i, v := range vals { pub, err := PB2TM.PubKey(v.PubKey) if err != nil { return nil, err } - // If the app provided an address too, it must match. - // This is just a sanity check. - if len(v.Address) > 0 { - if !bytes.Equal(pub.Address(), v.Address) { - return nil, fmt.Errorf("Validator.Address (%X) does not match PubKey.Address (%X)", - v.Address, pub.Address()) - } - } - tmVals[i] = &Validator{ - Address: pub.Address(), - PubKey: pub, - VotingPower: v.Power, - } + tmVals[i] = NewValidator(pub, v.Power) } return tmVals, nil } func (pb2tm) ConsensusParams(csp *abci.ConsensusParams) ConsensusParams { return ConsensusParams{ - BlockSize: BlockSize{ - MaxBytes: int(csp.BlockSize.MaxBytes), // XXX - MaxTxs: int(csp.BlockSize.MaxTxs), // XXX + BlockSize: BlockSizeParams{ + MaxBytes: csp.BlockSize.MaxBytes, MaxGas: csp.BlockSize.MaxGas, }, - TxSize: TxSize{ - MaxBytes: int(csp.TxSize.MaxBytes), // XXX - MaxGas: csp.TxSize.MaxGas, + Evidence: EvidenceParams{ + MaxAge: csp.Evidence.MaxAge, }, - BlockGossip: BlockGossip{ - BlockPartSizeBytes: int(csp.BlockGossip.BlockPartSizeBytes), // XXX + Validator: ValidatorParams{ + PubKeyTypes: csp.Validator.PubKeyTypes, }, - // TODO: EvidenceParams: EvidenceParams{ - // MaxAge: int(csp.Evidence.MaxAge), // XXX - // }, } } diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 6c9ba3668e7..f5a2ce5d459 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -4,11 +4,16 @@ import ( "testing" "time" + "github.com/golang/protobuf/proto" "github.com/stretchr/testify/assert" + + "github.com/tendermint/go-amino" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" + "github.com/tendermint/tendermint/version" ) func TestABCIPubKey(t *testing.T) { @@ -29,66 +34,113 @@ func TestABCIValidators(t *testing.T) { pkEd := ed25519.GenPrivKey().PubKey() // correct validator - tmValExpected := &Validator{ - Address: pkEd.Address(), - PubKey: pkEd, - VotingPower: 10, - } + tmValExpected := NewValidator(pkEd, 10) - tmVal := &Validator{ - Address: pkEd.Address(), - PubKey: pkEd, - VotingPower: 10, - } + tmVal := NewValidator(pkEd, 10) - abciVal := TM2PB.Validator(tmVal) - tmVals, err := PB2TM.Validators([]abci.Validator{abciVal}) + abciVal := TM2PB.ValidatorUpdate(tmVal) + tmVals, err := PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{abciVal}) assert.Nil(t, err) assert.Equal(t, tmValExpected, tmVals[0]) - abciVals := TM2PB.Validators(NewValidatorSet(tmVals)) - assert.Equal(t, []abci.Validator{abciVal}, abciVals) + abciVals := TM2PB.ValidatorUpdates(NewValidatorSet(tmVals)) + assert.Equal(t, []abci.ValidatorUpdate{abciVal}, abciVals) // val with address tmVal.Address = pkEd.Address() - abciVal = TM2PB.Validator(tmVal) - tmVals, err = PB2TM.Validators([]abci.Validator{abciVal}) + abciVal = TM2PB.ValidatorUpdate(tmVal) + tmVals, err = PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{abciVal}) assert.Nil(t, err) assert.Equal(t, tmValExpected, tmVals[0]) - // val with incorrect address - abciVal = TM2PB.Validator(tmVal) - abciVal.Address = []byte("incorrect!") - tmVals, err = PB2TM.Validators([]abci.Validator{abciVal}) + // val with incorrect pubkey data + abciVal = TM2PB.ValidatorUpdate(tmVal) + abciVal.PubKey.Data = []byte("incorrect!") + tmVals, err = PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{abciVal}) assert.NotNil(t, err) assert.Nil(t, tmVals) } func TestABCIConsensusParams(t *testing.T) { cp := DefaultConsensusParams() - cp.EvidenceParams.MaxAge = 0 // TODO add this to ABCI abciCP := TM2PB.ConsensusParams(cp) cp2 := PB2TM.ConsensusParams(abciCP) assert.Equal(t, *cp, cp2) } +func newHeader( + height, numTxs int64, + commitHash, dataHash, evidenceHash []byte, +) *Header { + return &Header{ + Height: height, + NumTxs: numTxs, + LastCommitHash: commitHash, + DataHash: dataHash, + EvidenceHash: evidenceHash, + } +} + func TestABCIHeader(t *testing.T) { - header := &Header{ - Height: int64(3), - Time: time.Now(), - NumTxs: int64(10), + // build a full header + var height int64 = 5 + var numTxs int64 = 3 + header := newHeader( + height, numTxs, + []byte("lastCommitHash"), []byte("dataHash"), []byte("evidenceHash"), + ) + protocolVersion := version.Consensus{7, 8} + timestamp := time.Now() + lastBlockID := BlockID{ + Hash: []byte("hash"), + PartsHeader: PartSetHeader{ + Total: 10, + Hash: []byte("hash"), + }, } - abciHeader := TM2PB.Header(header) + var totalTxs int64 = 100 + header.Populate( + protocolVersion, "chainID", + timestamp, lastBlockID, totalTxs, + []byte("valHash"), []byte("nextValHash"), + []byte("consHash"), []byte("appHash"), []byte("lastResultsHash"), + []byte("proposerAddress"), + ) + + cdc := amino.NewCodec() + headerBz := cdc.MustMarshalBinaryBare(header) + + pbHeader := TM2PB.Header(header) + pbHeaderBz, err := proto.Marshal(&pbHeader) + assert.NoError(t, err) + + // assert some fields match + assert.EqualValues(t, protocolVersion.Block, pbHeader.Version.Block) + assert.EqualValues(t, protocolVersion.App, pbHeader.Version.App) + assert.EqualValues(t, "chainID", pbHeader.ChainID) + assert.EqualValues(t, height, pbHeader.Height) + assert.EqualValues(t, timestamp, pbHeader.Time) + assert.EqualValues(t, numTxs, pbHeader.NumTxs) + assert.EqualValues(t, totalTxs, pbHeader.TotalTxs) + assert.EqualValues(t, lastBlockID.Hash, pbHeader.LastBlockId.Hash) + assert.EqualValues(t, []byte("lastCommitHash"), pbHeader.LastCommitHash) + assert.Equal(t, []byte("proposerAddress"), pbHeader.ProposerAddress) + + // assert the encodings match + // NOTE: they don't yet because Amino encodes + // int64 as zig-zag and we're using non-zigzag in the protobuf. + // See https://github.com/tendermint/tendermint/issues/2682 + _, _ = headerBz, pbHeaderBz + // assert.EqualValues(t, headerBz, pbHeaderBz) - assert.Equal(t, int64(3), abciHeader.Height) } func TestABCIEvidence(t *testing.T) { val := NewMockPV() - blockID := makeBlockID("blockhash", 1000, "partshash") - blockID2 := makeBlockID("blockhash2", 1000, "partshash") + blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) + blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) const chainID = "mychain" ev := &DuplicateVoteEvidence{ PubKey: val.GetPubKey(), @@ -102,36 +154,29 @@ func TestABCIEvidence(t *testing.T) { ) assert.Equal(t, "duplicate/vote", abciEv.Type) - - // test we do not send pubkeys - assert.Empty(t, abciEv.Validator.PubKey) } type pubKeyEddie struct{} -func (pubKeyEddie) Address() Address { return []byte{} } -func (pubKeyEddie) Bytes() []byte { return []byte{} } +func (pubKeyEddie) Address() Address { return []byte{} } +func (pubKeyEddie) Bytes() []byte { return []byte{} } func (pubKeyEddie) VerifyBytes(msg []byte, sig []byte) bool { return false } -func (pubKeyEddie) Equals(crypto.PubKey) bool { return false } +func (pubKeyEddie) Equals(crypto.PubKey) bool { return false } func TestABCIValidatorFromPubKeyAndPower(t *testing.T) { pubkey := ed25519.GenPrivKey().PubKey() - abciVal := TM2PB.ValidatorFromPubKeyAndPower(pubkey, 10) + abciVal := TM2PB.NewValidatorUpdate(pubkey, 10) assert.Equal(t, int64(10), abciVal.Power) - assert.Panics(t, func() { TM2PB.ValidatorFromPubKeyAndPower(nil, 10) }) - assert.Panics(t, func() { TM2PB.ValidatorFromPubKeyAndPower(pubKeyEddie{}, 10) }) + assert.Panics(t, func() { TM2PB.NewValidatorUpdate(nil, 10) }) + assert.Panics(t, func() { TM2PB.NewValidatorUpdate(pubKeyEddie{}, 10) }) } func TestABCIValidatorWithoutPubKey(t *testing.T) { pkEd := ed25519.GenPrivKey().PubKey() - abciVal := TM2PB.ValidatorWithoutPubKey(&Validator{ - Address: pkEd.Address(), - PubKey: pkEd, - VotingPower: 10, - }) + abciVal := TM2PB.Validator(NewValidator(pkEd, 10)) // pubkey must be nil tmValExpected := abci.Validator{ diff --git a/types/results.go b/types/results.go index 17d5891c3c6..db781168491 100644 --- a/types/results.go +++ b/types/results.go @@ -3,6 +3,7 @@ package types import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -17,10 +18,14 @@ type ABCIResult struct { // Hash returns the canonical hash of the ABCIResult func (a ABCIResult) Hash() []byte { - bz := aminoHash(a) + bz := tmhash.Sum(cdcEncode(a)) return bz } +func (a ABCIResult) Bytes() []byte { + return cdcEncode(a) +} + // ABCIResults wraps the deliver tx results to return a proof type ABCIResults []ABCIResult @@ -43,7 +48,7 @@ func NewResultFromResponse(response *abci.ResponseDeliverTx) ABCIResult { // Bytes serializes the ABCIResponse using wire func (a ABCIResults) Bytes() []byte { - bz, err := cdc.MarshalBinary(a) + bz, err := cdc.MarshalBinaryLengthPrefixed(a) if err != nil { panic(err) } @@ -54,20 +59,20 @@ func (a ABCIResults) Bytes() []byte { func (a ABCIResults) Hash() []byte { // NOTE: we copy the impl of the merkle tree for txs - // we should be consistent and either do it for both or not. - return merkle.SimpleHashFromHashers(a.toHashers()) + return merkle.SimpleHashFromByteSlices(a.toByteSlices()) } // ProveResult returns a merkle proof of one result from the set func (a ABCIResults) ProveResult(i int) merkle.SimpleProof { - _, proofs := merkle.SimpleProofsFromHashers(a.toHashers()) + _, proofs := merkle.SimpleProofsFromByteSlices(a.toByteSlices()) return *proofs[i] } -func (a ABCIResults) toHashers() []merkle.Hasher { +func (a ABCIResults) toByteSlices() [][]byte { l := len(a) - hashers := make([]merkle.Hasher, l) + bzs := make([][]byte, l) for i := 0; i < l; i++ { - hashers[i] = a[i] + bzs[i] = a[i].Bytes() } - return hashers + return bzs } diff --git a/types/results_test.go b/types/results_test.go index 8cbe319ff04..4e57e580450 100644 --- a/types/results_test.go +++ b/types/results_test.go @@ -38,12 +38,12 @@ func TestABCIResults(t *testing.T) { for i, res := range results { proof := results.ProveResult(i) - valid := proof.Verify(i, len(results), res.Hash(), root) - assert.True(t, valid, "%d", i) + valid := proof.Verify(root, res.Hash()) + assert.NoError(t, valid, "%d", i) } } -func TestABCIBytes(t *testing.T) { +func TestABCIResultsBytes(t *testing.T) { results := NewResults([]*abci.ResponseDeliverTx{ {Code: 0, Data: []byte{}}, {Code: 0, Data: []byte("one")}, diff --git a/types/signable.go b/types/signable.go index cc6498882a6..baabdff080d 100644 --- a/types/signable.go +++ b/types/signable.go @@ -1,5 +1,17 @@ package types +import ( + "github.com/tendermint/tendermint/crypto/ed25519" + cmn "github.com/tendermint/tendermint/libs/common" +) + +var ( + // MaxSignatureSize is a maximum allowed signature size for the Heartbeat, + // Proposal and Vote. + // XXX: secp256k1 does not have Size nor MaxSize defined. + MaxSignatureSize = cmn.MaxInt(ed25519.SignatureSize, 64) +) + // Signable is an interface for all signable things. // It typically removes signatures before serializing. // SignBytes returns the bytes to be signed diff --git a/types/signed_msg_type.go b/types/signed_msg_type.go new file mode 100644 index 00000000000..10e7c70c095 --- /dev/null +++ b/types/signed_msg_type.go @@ -0,0 +1,26 @@ +package types + +// SignedMsgType is a type of signed message in the consensus. +type SignedMsgType byte + +const ( + // Votes + PrevoteType SignedMsgType = 0x01 + PrecommitType SignedMsgType = 0x02 + + // Proposals + ProposalType SignedMsgType = 0x20 + + // Heartbeat + HeartbeatType SignedMsgType = 0x30 +) + +// IsVoteTypeValid returns true if t is a valid vote type. +func IsVoteTypeValid(t SignedMsgType) bool { + switch t { + case PrevoteType, PrecommitType: + return true + default: + return false + } +} diff --git a/types/test_util.go b/types/test_util.go index f21c2831fe2..80f0c787284 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -1,6 +1,8 @@ package types -import "time" +import ( + tmtime "github.com/tendermint/tendermint/types/time" +) func MakeCommit(blockID BlockID, height int64, round int, voteSet *VoteSet, @@ -14,9 +16,9 @@ func MakeCommit(blockID BlockID, height int64, round int, ValidatorIndex: i, Height: height, Round: round, - Type: VoteTypePrecommit, + Type: PrecommitType, BlockID: blockID, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), } _, err := signAddVote(validators[i], vote, voteSet) diff --git a/types/time/time.go b/types/time/time.go new file mode 100644 index 00000000000..022bdf574f8 --- /dev/null +++ b/types/time/time.go @@ -0,0 +1,58 @@ +package time + +import ( + "sort" + "time" +) + +// Now returns the current time in UTC with no monotonic component. +func Now() time.Time { + return Canonical(time.Now()) +} + +// Canonical returns UTC time with no monotonic component. +// Stripping the monotonic component is for time equality. +// See https://github.com/tendermint/tendermint/pull/2203#discussion_r215064334 +func Canonical(t time.Time) time.Time { + return t.Round(0).UTC() +} + +// WeightedTime for computing a median. +type WeightedTime struct { + Time time.Time + Weight int64 +} + +// NewWeightedTime with time and weight. +func NewWeightedTime(time time.Time, weight int64) *WeightedTime { + return &WeightedTime{ + Time: time, + Weight: weight, + } +} + +// WeightedMedian computes weighted median time for a given array of WeightedTime and the total voting power. +func WeightedMedian(weightedTimes []*WeightedTime, totalVotingPower int64) (res time.Time) { + median := totalVotingPower / 2 + + sort.Slice(weightedTimes, func(i, j int) bool { + if weightedTimes[i] == nil { + return false + } + if weightedTimes[j] == nil { + return true + } + return weightedTimes[i].Time.UnixNano() < weightedTimes[j].Time.UnixNano() + }) + + for _, weightedTime := range weightedTimes { + if weightedTime != nil { + if median <= weightedTime.Weight { + res = weightedTime.Time + break + } + median -= weightedTime.Weight + } + } + return +} diff --git a/types/time/time_test.go b/types/time/time_test.go new file mode 100644 index 00000000000..1b1a30e5058 --- /dev/null +++ b/types/time/time_test.go @@ -0,0 +1,56 @@ +package time + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestWeightedMedian(t *testing.T) { + m := make([]*WeightedTime, 3) + + t1 := Now() + t2 := t1.Add(5 * time.Second) + t3 := t1.Add(10 * time.Second) + + m[2] = NewWeightedTime(t1, 33) // faulty processes + m[0] = NewWeightedTime(t2, 40) // correct processes + m[1] = NewWeightedTime(t3, 27) // correct processes + totalVotingPower := int64(100) + + median := WeightedMedian(m, totalVotingPower) + assert.Equal(t, t2, median) + // median always returns value between values of correct processes + assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && + (median.Before(t3) || median.Equal(t3))) + + m[1] = NewWeightedTime(t1, 40) // correct processes + m[2] = NewWeightedTime(t2, 27) // correct processes + m[0] = NewWeightedTime(t3, 33) // faulty processes + totalVotingPower = int64(100) + + median = WeightedMedian(m, totalVotingPower) + assert.Equal(t, t2, median) + // median always returns value between values of correct processes + assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && + (median.Before(t2) || median.Equal(t2))) + + m = make([]*WeightedTime, 8) + t4 := t1.Add(15 * time.Second) + t5 := t1.Add(60 * time.Second) + + m[3] = NewWeightedTime(t1, 10) // correct processes + m[1] = NewWeightedTime(t2, 10) // correct processes + m[5] = NewWeightedTime(t2, 10) // correct processes + m[4] = NewWeightedTime(t3, 23) // faulty processes + m[0] = NewWeightedTime(t4, 20) // correct processes + m[7] = NewWeightedTime(t5, 10) // faulty processes + totalVotingPower = int64(83) + + median = WeightedMedian(m, totalVotingPower) + assert.Equal(t, t3, median) + // median always returns value between values of correct processes + assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && + (median.Before(t4) || median.Equal(t4))) +} diff --git a/types/tx.go b/types/tx.go index 489f0b232c2..41be77946f7 100644 --- a/types/tx.go +++ b/types/tx.go @@ -5,6 +5,8 @@ import ( "errors" "fmt" + "github.com/tendermint/go-amino" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/crypto/tmhash" @@ -31,18 +33,13 @@ type Txs []Tx // Hash returns the simple Merkle root hash of the transactions. func (txs Txs) Hash() []byte { - // Recursive impl. - // Copied from tendermint/crypto/merkle to avoid allocations - switch len(txs) { - case 0: - return nil - case 1: - return txs[0].Hash() - default: - left := Txs(txs[:(len(txs)+1)/2]).Hash() - right := Txs(txs[(len(txs)+1)/2:]).Hash() - return merkle.SimpleHashFromTwoHashes(left, right) + // These allocations will be removed once Txs is switched to [][]byte, + // ref #2603. This is because golang does not allow type casting slices without unsafe + txBzs := make([][]byte, len(txs)) + for i := 0; i < len(txs); i++ { + txBzs[i] = txs[i] } + return merkle.SimpleHashFromByteSlices(txBzs) } // Index returns the index of this transaction in the list, or -1 if not found @@ -70,15 +67,13 @@ func (txs Txs) IndexByHash(hash []byte) int { // TODO: optimize this! func (txs Txs) Proof(i int) TxProof { l := len(txs) - hashers := make([]merkle.Hasher, l) + bzs := make([][]byte, l) for i := 0; i < l; i++ { - hashers[i] = txs[i] + bzs[i] = txs[i] } - root, proofs := merkle.SimpleProofsFromHashers(hashers) + root, proofs := merkle.SimpleProofsFromByteSlices(bzs) return TxProof{ - Index: i, - Total: l, RootHash: root, Data: txs[i], Proof: *proofs[i], @@ -87,10 +82,9 @@ func (txs Txs) Proof(i int) TxProof { // TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. type TxProof struct { - Index, Total int - RootHash cmn.HexBytes - Data Tx - Proof merkle.SimpleProof + RootHash cmn.HexBytes + Data Tx + Proof merkle.SimpleProof } // LeadHash returns the hash of the this proof refers to. @@ -104,14 +98,14 @@ func (tp TxProof) Validate(dataHash []byte) error { if !bytes.Equal(dataHash, tp.RootHash) { return errors.New("Proof matches different data hash") } - if tp.Index < 0 { + if tp.Proof.Index < 0 { return errors.New("Proof index cannot be negative") } - if tp.Total <= 0 { + if tp.Proof.Total <= 0 { return errors.New("Proof total must be positive") } - valid := tp.Proof.Verify(tp.Index, tp.Total, tp.LeafHash(), tp.RootHash) - if !valid { + valid := tp.Proof.Verify(tp.RootHash, tp.LeafHash()) + if valid != nil { return errors.New("Proof is not internally consistent") } return nil @@ -126,3 +120,18 @@ type TxResult struct { Tx Tx `json:"tx"` Result abci.ResponseDeliverTx `json:"result"` } + +// ComputeAminoOverhead calculates the overhead for amino encoding a transaction. +// The overhead consists of varint encoding the field number and the wire type +// (= length-delimited = 2), and another varint encoding the length of the +// transaction. +// The field number can be the field number of the particular transaction, or +// the field number of the parenting struct that contains the transactions []Tx +// as a field (this field number is repeated for each contained Tx). +// If some []Tx are encoded directly (without a parenting struct), the default +// fieldNum is also 1 (see BinFieldNum in amino.MarshalBinaryBare). +func ComputeAminoOverhead(tx Tx, fieldNum int) int64 { + fnum := uint64(fieldNum) + typ3AndFieldNum := (uint64(fnum) << 3) | uint64(amino.Typ3_ByteLength) + return int64(amino.UvarintSize(typ3AndFieldNum)) + int64(amino.UvarintSize(uint64(len(tx)))) +} diff --git a/types/tx_test.go b/types/tx_test.go index df7a744965c..3afaaccc1be 100644 --- a/types/tx_test.go +++ b/types/tx_test.go @@ -69,8 +69,8 @@ func TestValidTxProof(t *testing.T) { leaf := txs[i] leafHash := leaf.Hash() proof := txs.Proof(i) - assert.Equal(t, i, proof.Index, "%d: %d", h, i) - assert.Equal(t, len(txs), proof.Total, "%d: %d", h, i) + assert.Equal(t, i, proof.Proof.Index, "%d: %d", h, i) + assert.Equal(t, len(txs), proof.Proof.Total, "%d: %d", h, i) assert.EqualValues(t, root, proof.RootHash, "%d: %d", h, i) assert.EqualValues(t, leaf, proof.Data, "%d: %d", h, i) assert.EqualValues(t, leafHash, proof.LeafHash(), "%d: %d", h, i) @@ -79,9 +79,9 @@ func TestValidTxProof(t *testing.T) { // read-write must also work var p2 TxProof - bin, err := cdc.MarshalBinary(proof) + bin, err := cdc.MarshalBinaryLengthPrefixed(proof) assert.Nil(t, err) - err = cdc.UnmarshalBinary(bin, &p2) + err = cdc.UnmarshalBinaryLengthPrefixed(bin, &p2) if assert.Nil(t, err, "%d: %d: %+v", h, i, err) { assert.Nil(t, p2.Validate(root), "%d: %d", h, i) } @@ -96,6 +96,63 @@ func TestTxProofUnchangable(t *testing.T) { } } +func TestComputeTxsOverhead(t *testing.T) { + cases := []struct { + txs Txs + wantOverhead int + }{ + {Txs{[]byte{6, 6, 6, 6, 6, 6}}, 2}, + // one 21 Mb transaction: + {Txs{make([]byte, 22020096, 22020096)}, 5}, + // two 21Mb/2 sized transactions: + {Txs{make([]byte, 11010048, 11010048), make([]byte, 11010048, 11010048)}, 10}, + {Txs{[]byte{1, 2, 3}, []byte{1, 2, 3}, []byte{4, 5, 6}}, 6}, + {Txs{[]byte{100, 5, 64}, []byte{42, 116, 118}, []byte{6, 6, 6}, []byte{6, 6, 6}}, 8}, + } + + for _, tc := range cases { + totalBytes := int64(0) + totalOverhead := int64(0) + for _, tx := range tc.txs { + aminoOverhead := ComputeAminoOverhead(tx, 1) + totalOverhead += aminoOverhead + totalBytes += aminoOverhead + int64(len(tx)) + } + bz, err := cdc.MarshalBinaryBare(tc.txs) + assert.EqualValues(t, tc.wantOverhead, totalOverhead) + assert.NoError(t, err) + assert.EqualValues(t, len(bz), totalBytes) + } +} + +func TestComputeAminoOverhead(t *testing.T) { + cases := []struct { + tx Tx + fieldNum int + want int + }{ + {[]byte{6, 6, 6}, 1, 2}, + {[]byte{6, 6, 6}, 16, 3}, + {[]byte{6, 6, 6}, 32, 3}, + {[]byte{6, 6, 6}, 64, 3}, + {[]byte{6, 6, 6}, 512, 3}, + {[]byte{6, 6, 6}, 1024, 3}, + {[]byte{6, 6, 6}, 2048, 4}, + {make([]byte, 64), 1, 2}, + {make([]byte, 65), 1, 2}, + {make([]byte, 127), 1, 2}, + {make([]byte, 128), 1, 3}, + {make([]byte, 256), 1, 3}, + {make([]byte, 512), 1, 3}, + {make([]byte, 1024), 1, 3}, + {make([]byte, 128), 16, 4}, + } + for _, tc := range cases { + got := ComputeAminoOverhead(tc.tx, tc.fieldNum) + assert.EqualValues(t, tc.want, got) + } +} + func testTxProofUnchangable(t *testing.T) { // make some proof txs := makeTxs(randInt(2, 100), randInt(16, 128)) @@ -105,7 +162,7 @@ func testTxProofUnchangable(t *testing.T) { // make sure it is valid to start with assert.Nil(t, proof.Validate(root)) - bin, err := cdc.MarshalBinary(proof) + bin, err := cdc.MarshalBinaryLengthPrefixed(proof) assert.Nil(t, err) // try mutating the data and make sure nothing breaks @@ -120,7 +177,7 @@ func testTxProofUnchangable(t *testing.T) { // This makes sure that the proof doesn't deserialize into something valid. func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) { var proof TxProof - err := cdc.UnmarshalBinary(bad, &proof) + err := cdc.UnmarshalBinaryLengthPrefixed(bad, &proof) if err == nil { err = proof.Validate(root) if err == nil { @@ -128,7 +185,7 @@ func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) { // This can happen if we have a slightly different total (where the // path ends up the same). If it is something else, we have a real // problem. - assert.NotEqual(t, proof.Total, good.Total, "bad: %#v\ngood: %#v", proof, good) + assert.NotEqual(t, proof.Proof.Total, good.Proof.Total, "bad: %#v\ngood: %#v", proof, good) } } } diff --git a/types/validation.go b/types/validation.go new file mode 100644 index 00000000000..1271cfd9433 --- /dev/null +++ b/types/validation.go @@ -0,0 +1,40 @@ +package types + +import ( + "fmt" + "time" + + "github.com/tendermint/tendermint/crypto/tmhash" + tmtime "github.com/tendermint/tendermint/types/time" +) + +// ValidateTime does a basic time validation ensuring time does not drift too +// much: +/- one year. +// TODO: reduce this to eg 1 day +// NOTE: DO NOT USE in ValidateBasic methods in this package. This function +// can only be used for real time validation, like on proposals and votes +// in the consensus. If consensus is stuck, and rounds increase for more than a day, +// having only a 1-day band here could break things... +// Can't use for validating blocks because we may be syncing years worth of history. +func ValidateTime(t time.Time) error { + var ( + now = tmtime.Now() + oneYear = 8766 * time.Hour + ) + if t.Before(now.Add(-oneYear)) || t.After(now.Add(oneYear)) { + return fmt.Errorf("Time drifted too much. Expected: -1 < %v < 1 year", now) + } + return nil +} + +// ValidateHash returns an error if the hash is not empty, but its +// size != tmhash.Size. +func ValidateHash(h []byte) error { + if len(h) > 0 && len(h) != tmhash.Size { + return fmt.Errorf("Expected size to be %d bytes, got %d bytes", + tmhash.Size, + len(h), + ) + } + return nil +} diff --git a/types/validator.go b/types/validator.go index e43acf09d61..4bfd78a6de3 100644 --- a/types/validator.go +++ b/types/validator.go @@ -4,6 +4,8 @@ import ( "bytes" "fmt" + "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -71,15 +73,21 @@ func (v *Validator) String() string { // Hash computes the unique ID of a validator with a given voting power. // It excludes the Accum value, which changes with every round. func (v *Validator) Hash() []byte { - return aminoHash(struct { - Address Address + return tmhash.Sum(v.Bytes()) +} + +// Bytes computes the unique encoding of a validator with a given voting power. +// These are the bytes that gets hashed in consensus. It excludes address +// as its redundant with the pubkey. This also excludes accum which changes +// every round. +func (v *Validator) Bytes() []byte { + return cdcEncode((struct { PubKey crypto.PubKey VotingPower int64 }{ - v.Address, v.PubKey, v.VotingPower, - }) + })) } //---------------------------------------- diff --git a/types/validator_set.go b/types/validator_set.go index 60fc2d83b59..ab030d1be1b 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -29,42 +29,57 @@ type ValidatorSet struct { totalVotingPower int64 } -func NewValidatorSet(vals []*Validator) *ValidatorSet { - validators := make([]*Validator, len(vals)) - for i, val := range vals { +func NewValidatorSet(valz []*Validator) *ValidatorSet { + if valz != nil && len(valz) == 0 { + panic("validator set initialization slice cannot be an empty slice (but it can be nil)") + } + validators := make([]*Validator, len(valz)) + for i, val := range valz { validators[i] = val.Copy() } sort.Sort(ValidatorsByAddress(validators)) - vs := &ValidatorSet{ + vals := &ValidatorSet{ Validators: validators, } - - if len(vals) > 0 { - vs.IncrementAccum(1) + if len(valz) > 0 { + vals.IncrementAccum(1) } - return vs + return vals +} + +// Nil or empty validator sets are invalid. +func (vals *ValidatorSet) IsNilOrEmpty() bool { + return vals == nil || len(vals.Validators) == 0 +} + +// Increment Accum and update the proposer on a copy, and return it. +func (vals *ValidatorSet) CopyIncrementAccum(times int) *ValidatorSet { + copy := vals.Copy() + copy.IncrementAccum(times) + return copy } // IncrementAccum increments accum of each validator and updates the // proposer. Panics if validator set is empty. -func (valSet *ValidatorSet) IncrementAccum(times int) { +func (vals *ValidatorSet) IncrementAccum(times int) { + // Add VotingPower * times to each validator and order into heap. validatorsHeap := cmn.NewHeap() - for _, val := range valSet.Validators { - // check for overflow both multiplication and sum + for _, val := range vals.Validators { + // Check for overflow both multiplication and sum. val.Accum = safeAddClip(val.Accum, safeMulClip(val.VotingPower, int64(times))) validatorsHeap.PushComparable(val, accumComparable{val}) } - // Decrement the validator with most accum times times + // Decrement the validator with most accum times times. for i := 0; i < times; i++ { mostest := validatorsHeap.Peek().(*Validator) // mind underflow - mostest.Accum = safeSubClip(mostest.Accum, valSet.TotalVotingPower()) + mostest.Accum = safeSubClip(mostest.Accum, vals.TotalVotingPower()) if i == times-1 { - valSet.Proposer = mostest + vals.Proposer = mostest } else { validatorsHeap.Update(mostest, accumComparable{mostest}) } @@ -72,36 +87,36 @@ func (valSet *ValidatorSet) IncrementAccum(times int) { } // Copy each validator into a new ValidatorSet -func (valSet *ValidatorSet) Copy() *ValidatorSet { - validators := make([]*Validator, len(valSet.Validators)) - for i, val := range valSet.Validators { +func (vals *ValidatorSet) Copy() *ValidatorSet { + validators := make([]*Validator, len(vals.Validators)) + for i, val := range vals.Validators { // NOTE: must copy, since IncrementAccum updates in place. validators[i] = val.Copy() } return &ValidatorSet{ Validators: validators, - Proposer: valSet.Proposer, - totalVotingPower: valSet.totalVotingPower, + Proposer: vals.Proposer, + totalVotingPower: vals.totalVotingPower, } } // HasAddress returns true if address given is in the validator set, false - // otherwise. -func (valSet *ValidatorSet) HasAddress(address []byte) bool { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 +func (vals *ValidatorSet) HasAddress(address []byte) bool { + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(address, vals.Validators[i].Address) <= 0 }) - return idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) + return idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address, address) } // GetByAddress returns an index of the validator with address and validator // itself if found. Otherwise, -1 and nil are returned. -func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 +func (vals *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) { + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(address, vals.Validators[i].Address) <= 0 }) - if idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) { - return idx, valSet.Validators[idx].Copy() + if idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address, address) { + return idx, vals.Validators[idx].Copy() } return -1, nil } @@ -109,45 +124,45 @@ func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Valida // GetByIndex returns the validator's address and validator itself by index. // It returns nil values if index is less than 0 or greater or equal to // len(ValidatorSet.Validators). -func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) { - if index < 0 || index >= len(valSet.Validators) { +func (vals *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) { + if index < 0 || index >= len(vals.Validators) { return nil, nil } - val = valSet.Validators[index] + val = vals.Validators[index] return val.Address, val.Copy() } // Size returns the length of the validator set. -func (valSet *ValidatorSet) Size() int { - return len(valSet.Validators) +func (vals *ValidatorSet) Size() int { + return len(vals.Validators) } // TotalVotingPower returns the sum of the voting powers of all validators. -func (valSet *ValidatorSet) TotalVotingPower() int64 { - if valSet.totalVotingPower == 0 { - for _, val := range valSet.Validators { +func (vals *ValidatorSet) TotalVotingPower() int64 { + if vals.totalVotingPower == 0 { + for _, val := range vals.Validators { // mind overflow - valSet.totalVotingPower = safeAddClip(valSet.totalVotingPower, val.VotingPower) + vals.totalVotingPower = safeAddClip(vals.totalVotingPower, val.VotingPower) } } - return valSet.totalVotingPower + return vals.totalVotingPower } // GetProposer returns the current proposer. If the validator set is empty, nil // is returned. -func (valSet *ValidatorSet) GetProposer() (proposer *Validator) { - if len(valSet.Validators) == 0 { +func (vals *ValidatorSet) GetProposer() (proposer *Validator) { + if len(vals.Validators) == 0 { return nil } - if valSet.Proposer == nil { - valSet.Proposer = valSet.findProposer() + if vals.Proposer == nil { + vals.Proposer = vals.findProposer() } - return valSet.Proposer.Copy() + return vals.Proposer.Copy() } -func (valSet *ValidatorSet) findProposer() *Validator { +func (vals *ValidatorSet) findProposer() *Validator { var proposer *Validator - for _, val := range valSet.Validators { + for _, val := range vals.Validators { if proposer == nil || !bytes.Equal(val.Address, proposer.Address) { proposer = proposer.CompareAccum(val) } @@ -157,83 +172,83 @@ func (valSet *ValidatorSet) findProposer() *Validator { // Hash returns the Merkle root hash build using validators (as leaves) in the // set. -func (valSet *ValidatorSet) Hash() []byte { - if len(valSet.Validators) == 0 { +func (vals *ValidatorSet) Hash() []byte { + if len(vals.Validators) == 0 { return nil } - hashers := make([]merkle.Hasher, len(valSet.Validators)) - for i, val := range valSet.Validators { - hashers[i] = val + bzs := make([][]byte, len(vals.Validators)) + for i, val := range vals.Validators { + bzs[i] = val.Bytes() } - return merkle.SimpleHashFromHashers(hashers) + return merkle.SimpleHashFromByteSlices(bzs) } // Add adds val to the validator set and returns true. It returns false if val // is already in the set. -func (valSet *ValidatorSet) Add(val *Validator) (added bool) { +func (vals *ValidatorSet) Add(val *Validator) (added bool) { val = val.Copy() - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(val.Address, valSet.Validators[i].Address) <= 0 + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(val.Address, vals.Validators[i].Address) <= 0 }) - if idx >= len(valSet.Validators) { - valSet.Validators = append(valSet.Validators, val) + if idx >= len(vals.Validators) { + vals.Validators = append(vals.Validators, val) // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return true - } else if bytes.Equal(valSet.Validators[idx].Address, val.Address) { + } else if bytes.Equal(vals.Validators[idx].Address, val.Address) { return false } else { - newValidators := make([]*Validator, len(valSet.Validators)+1) - copy(newValidators[:idx], valSet.Validators[:idx]) + newValidators := make([]*Validator, len(vals.Validators)+1) + copy(newValidators[:idx], vals.Validators[:idx]) newValidators[idx] = val - copy(newValidators[idx+1:], valSet.Validators[idx:]) - valSet.Validators = newValidators + copy(newValidators[idx+1:], vals.Validators[idx:]) + vals.Validators = newValidators // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return true } } // Update updates val and returns true. It returns false if val is not present // in the set. -func (valSet *ValidatorSet) Update(val *Validator) (updated bool) { - index, sameVal := valSet.GetByAddress(val.Address) +func (vals *ValidatorSet) Update(val *Validator) (updated bool) { + index, sameVal := vals.GetByAddress(val.Address) if sameVal == nil { return false } - valSet.Validators[index] = val.Copy() + vals.Validators[index] = val.Copy() // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return true } // Remove deletes the validator with address. It returns the validator removed // and true. If returns nil and false if validator is not present in the set. -func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 +func (vals *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) { + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(address, vals.Validators[i].Address) <= 0 }) - if idx >= len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) { + if idx >= len(vals.Validators) || !bytes.Equal(vals.Validators[idx].Address, address) { return nil, false } - removedVal := valSet.Validators[idx] - newValidators := valSet.Validators[:idx] - if idx+1 < len(valSet.Validators) { - newValidators = append(newValidators, valSet.Validators[idx+1:]...) + removedVal := vals.Validators[idx] + newValidators := vals.Validators[:idx] + if idx+1 < len(vals.Validators) { + newValidators = append(newValidators, vals.Validators[idx+1:]...) } - valSet.Validators = newValidators + vals.Validators = newValidators // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return removedVal, true } // Iterate will run the given function over the set. -func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { - for i, val := range valSet.Validators { +func (vals *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { + for i, val := range vals.Validators { stop := fn(i, val.Copy()) if stop { break @@ -241,143 +256,153 @@ func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { } } -// Verify that +2/3 of the set had signed the given signBytes -func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error { - if valSet.Size() != len(commit.Precommits) { - return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", valSet.Size(), len(commit.Precommits)) +// Verify that +2/3 of the set had signed the given signBytes. +func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error { + if vals.Size() != len(commit.Precommits) { + return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", vals.Size(), len(commit.Precommits)) } if height != commit.Height() { return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) } + if !blockID.Equals(commit.BlockID) { + return fmt.Errorf("Invalid commit -- wrong block id: want %v got %v", + blockID, commit.BlockID) + } talliedVotingPower := int64(0) round := commit.Round() for idx, precommit := range commit.Precommits { - // may be nil if validator skipped. if precommit == nil { - continue + continue // OK, some precommits can be missing. } if precommit.Height != height { - return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, precommit.Height) + return fmt.Errorf("Invalid commit -- wrong height: want %v got %v", height, precommit.Height) } if precommit.Round != round { - return fmt.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) + return fmt.Errorf("Invalid commit -- wrong round: want %v got %v", round, precommit.Round) } - if precommit.Type != VoteTypePrecommit { + if precommit.Type != PrecommitType { return fmt.Errorf("Invalid commit -- not precommit @ index %v", idx) } - _, val := valSet.GetByIndex(idx) - // Validate signature + _, val := vals.GetByIndex(idx) + // Validate signature. precommitSignBytes := precommit.SignBytes(chainID) if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { return fmt.Errorf("Invalid commit -- invalid signature: %v", precommit) } - if !blockID.Equals(precommit.BlockID) { - continue // Not an error, but doesn't count - } // Good precommit! - talliedVotingPower += val.VotingPower + if blockID.Equals(precommit.BlockID) { + talliedVotingPower += val.VotingPower + } else { + // It's OK that the BlockID doesn't match. We include stray + // precommits to measure validator availability. + } } - if talliedVotingPower > valSet.TotalVotingPower()*2/3 { + if talliedVotingPower > vals.TotalVotingPower()*2/3 { return nil } return fmt.Errorf("Invalid commit -- insufficient voting power: got %v, needed %v", - talliedVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) + talliedVotingPower, (vals.TotalVotingPower()*2/3 + 1)) } -// VerifyCommitAny will check to see if the set would -// be valid with a different validator set. +// VerifyFutureCommit will check to see if the set would be valid with a different +// validator set. +// +// vals is the old validator set that we know. Over 2/3 of the power in old +// signed this block. +// +// In Tendermint, 1/3 of the voting power can halt or fork the chain, but 1/3 +// can't make arbitrary state transitions. You still need > 2/3 Byzantine to +// make arbitrary state transitions. // -// valSet is the validator set that we know -// * over 2/3 of the power in old signed this block +// To preserve this property in the light client, we also require > 2/3 of the +// old vals to sign the future commit at H, that way we preserve the property +// that if they weren't being truthful about the validator set at H (block hash +// -> vals hash) or about the app state (block hash -> app hash) we can slash +// > 2/3. Otherwise, the lite client isn't providing the same security +// guarantees. // -// newSet is the validator set that signed this block -// * only votes from old are sufficient for 2/3 majority -// in the new set as well +// Even if we added a slashing condition that if you sign a block header with +// the wrong validator set, then we would only need > 1/3 of signatures from +// the old vals on the new commit, it wouldn't be sufficient because the new +// vals can be arbitrary and commit some arbitrary app hash. // -// That means that: -// * 10% of the valset can't just declare themselves kings -// * If the validator set is 3x old size, we need more proof to trust -func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string, +// newSet is the validator set that signed this block. Only votes from new are +// sufficient for 2/3 majority in the new set as well, for it to be a valid +// commit. +// +// NOTE: This doesn't check whether the commit is a future commit, because the +// current height isn't part of the ValidatorSet. Caller must check that the +// commit height is greater than the height for this validator set. +func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID string, blockID BlockID, height int64, commit *Commit) error { + oldVals := vals - if newSet.Size() != len(commit.Precommits) { - return cmn.NewError("Invalid commit -- wrong set size: %v vs %v", newSet.Size(), len(commit.Precommits)) - } - if height != commit.Height() { - return cmn.NewError("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) + // Commit must be a valid commit for newSet. + err := newSet.VerifyCommit(chainID, blockID, height, commit) + if err != nil { + return err } + // Check old voting power. oldVotingPower := int64(0) - newVotingPower := int64(0) seen := map[int]bool{} round := commit.Round() for idx, precommit := range commit.Precommits { - // first check as in VerifyCommit if precommit == nil { continue } if precommit.Height != height { - // return certerr.ErrHeightMismatch(height, precommit.Height) return cmn.NewError("Blocks don't match - %d vs %d", round, precommit.Round) } if precommit.Round != round { return cmn.NewError("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) } - if precommit.Type != VoteTypePrecommit { + if precommit.Type != PrecommitType { return cmn.NewError("Invalid commit -- not precommit @ index %v", idx) } - if !blockID.Equals(precommit.BlockID) { - continue // Not an error, but doesn't count - } - - // we only grab by address, ignoring unknown validators - vi, ov := valSet.GetByAddress(precommit.ValidatorAddress) - if ov == nil || seen[vi] { + // See if this validator is in oldVals. + idx, val := oldVals.GetByAddress(precommit.ValidatorAddress) + if val == nil || seen[idx] { continue // missing or double vote... } - seen[vi] = true + seen[idx] = true - // Validate signature old school + // Validate signature. precommitSignBytes := precommit.SignBytes(chainID) - if !ov.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { + if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { return cmn.NewError("Invalid commit -- invalid signature: %v", precommit) } // Good precommit! - oldVotingPower += ov.VotingPower - - // check new school - _, cv := newSet.GetByIndex(idx) - if cv.PubKey.Equals(ov.PubKey) { - // make sure this is properly set in the current block as well - newVotingPower += cv.VotingPower + if blockID.Equals(precommit.BlockID) { + oldVotingPower += val.VotingPower + } else { + // It's OK that the BlockID doesn't match. We include stray + // precommits to measure validator availability. } } - if oldVotingPower <= valSet.TotalVotingPower()*2/3 { + if oldVotingPower <= oldVals.TotalVotingPower()*2/3 { return cmn.NewError("Invalid commit -- insufficient old voting power: got %v, needed %v", - oldVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) - } else if newVotingPower <= newSet.TotalVotingPower()*2/3 { - return cmn.NewError("Invalid commit -- insufficient cur voting power: got %v, needed %v", - newVotingPower, (newSet.TotalVotingPower()*2/3 + 1)) + oldVotingPower, (oldVals.TotalVotingPower()*2/3 + 1)) } return nil } -func (valSet *ValidatorSet) String() string { - return valSet.StringIndented("") +func (vals *ValidatorSet) String() string { + return vals.StringIndented("") } // String -func (valSet *ValidatorSet) StringIndented(indent string) string { - if valSet == nil { +func (vals *ValidatorSet) StringIndented(indent string) string { + if vals == nil { return "nil-ValidatorSet" } valStrings := []string{} - valSet.Iterate(func(index int, val *Validator) bool { + vals.Iterate(func(index int, val *Validator) bool { valStrings = append(valStrings, val.String()) return false }) @@ -386,9 +411,9 @@ func (valSet *ValidatorSet) StringIndented(indent string) string { %s Validators: %s %v %s}`, - indent, valSet.GetProposer().String(), + indent, vals.GetProposer().String(), indent, - indent, strings.Join(valStrings, "\n"+indent+" "), + indent, strings.Join(valStrings, "\n"+indent+" "), indent) } @@ -399,18 +424,18 @@ func (valSet *ValidatorSet) StringIndented(indent string) string { // Sort validators by address type ValidatorsByAddress []*Validator -func (vs ValidatorsByAddress) Len() int { - return len(vs) +func (valz ValidatorsByAddress) Len() int { + return len(valz) } -func (vs ValidatorsByAddress) Less(i, j int) bool { - return bytes.Compare(vs[i].Address, vs[j].Address) == -1 +func (valz ValidatorsByAddress) Less(i, j int) bool { + return bytes.Compare(valz[i].Address, valz[j].Address) == -1 } -func (vs ValidatorsByAddress) Swap(i, j int) { - it := vs[i] - vs[i] = vs[j] - vs[j] = it +func (valz ValidatorsByAddress) Swap(i, j int) { + it := valz[i] + valz[i] = valz[j] + valz[j] = it } //------------------------------------- @@ -434,16 +459,16 @@ func (ac accumComparable) Less(o interface{}) bool { // NOTE: PrivValidator are in order. // UNSTABLE func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []PrivValidator) { - vals := make([]*Validator, numValidators) + valz := make([]*Validator, numValidators) privValidators := make([]PrivValidator, numValidators) for i := 0; i < numValidators; i++ { val, privValidator := RandValidator(false, votingPower) - vals[i] = val + valz[i] = val privValidators[i] = privValidator } - valSet := NewValidatorSet(vals) + vals := NewValidatorSet(valz) sort.Sort(PrivValidatorsByAddress(privValidators)) - return valSet, privValidators + return vals, privValidators } /////////////////////////////////////////////////////////////////////////////// diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 1756f7890c4..aad9d85a8f4 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -2,71 +2,73 @@ package types import ( "bytes" + "fmt" "math" "strings" "testing" "testing/quick" - "time" "github.com/stretchr/testify/assert" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" ) func TestValidatorSetBasic(t *testing.T) { - for _, vset := range []*ValidatorSet{NewValidatorSet([]*Validator{}), NewValidatorSet(nil)} { - assert.Panics(t, func() { vset.IncrementAccum(1) }) - - assert.EqualValues(t, vset, vset.Copy()) - assert.False(t, vset.HasAddress([]byte("some val"))) - idx, val := vset.GetByAddress([]byte("some val")) - assert.Equal(t, -1, idx) - assert.Nil(t, val) - addr, val := vset.GetByIndex(-100) - assert.Nil(t, addr) - assert.Nil(t, val) - addr, val = vset.GetByIndex(0) - assert.Nil(t, addr) - assert.Nil(t, val) - addr, val = vset.GetByIndex(100) - assert.Nil(t, addr) - assert.Nil(t, val) - assert.Zero(t, vset.Size()) - assert.Equal(t, int64(0), vset.TotalVotingPower()) - assert.Nil(t, vset.GetProposer()) - assert.Nil(t, vset.Hash()) - - // add - val = randValidator_() - assert.True(t, vset.Add(val)) - assert.True(t, vset.HasAddress(val.Address)) - idx, val2 := vset.GetByAddress(val.Address) - assert.Equal(t, 0, idx) - assert.Equal(t, val, val2) - addr, val2 = vset.GetByIndex(0) - assert.Equal(t, []byte(val.Address), addr) - assert.Equal(t, val, val2) - assert.Equal(t, 1, vset.Size()) - assert.Equal(t, val.VotingPower, vset.TotalVotingPower()) - assert.Equal(t, val, vset.GetProposer()) - assert.NotNil(t, vset.Hash()) - assert.NotPanics(t, func() { vset.IncrementAccum(1) }) - - // update - assert.False(t, vset.Update(randValidator_())) - val.VotingPower = 100 - assert.True(t, vset.Update(val)) - - // remove - val2, removed := vset.Remove(randValidator_().Address) - assert.Nil(t, val2) - assert.False(t, removed) - val2, removed = vset.Remove(val.Address) - assert.Equal(t, val.Address, val2.Address) - assert.True(t, removed) - } + assert.Panics(t, func() { NewValidatorSet([]*Validator{}) }) + + vset := NewValidatorSet(nil) + assert.Panics(t, func() { vset.IncrementAccum(1) }) + + assert.EqualValues(t, vset, vset.Copy()) + assert.False(t, vset.HasAddress([]byte("some val"))) + idx, val := vset.GetByAddress([]byte("some val")) + assert.Equal(t, -1, idx) + assert.Nil(t, val) + addr, val := vset.GetByIndex(-100) + assert.Nil(t, addr) + assert.Nil(t, val) + addr, val = vset.GetByIndex(0) + assert.Nil(t, addr) + assert.Nil(t, val) + addr, val = vset.GetByIndex(100) + assert.Nil(t, addr) + assert.Nil(t, val) + assert.Zero(t, vset.Size()) + assert.Equal(t, int64(0), vset.TotalVotingPower()) + assert.Nil(t, vset.GetProposer()) + assert.Nil(t, vset.Hash()) + + // add + val = randValidator_() + assert.True(t, vset.Add(val)) + assert.True(t, vset.HasAddress(val.Address)) + idx, val2 := vset.GetByAddress(val.Address) + assert.Equal(t, 0, idx) + assert.Equal(t, val, val2) + addr, val2 = vset.GetByIndex(0) + assert.Equal(t, []byte(val.Address), addr) + assert.Equal(t, val, val2) + assert.Equal(t, 1, vset.Size()) + assert.Equal(t, val.VotingPower, vset.TotalVotingPower()) + assert.Equal(t, val, vset.GetProposer()) + assert.NotNil(t, vset.Hash()) + assert.NotPanics(t, func() { vset.IncrementAccum(1) }) + + // update + assert.False(t, vset.Update(randValidator_())) + val.VotingPower = 100 + assert.True(t, vset.Update(val)) + + // remove + val2, removed := vset.Remove(randValidator_().Address) + assert.Nil(t, val2) + assert.False(t, removed) + val2, removed = vset.Remove(val.Address) + assert.Equal(t, val.Address, val2.Address) + assert.True(t, removed) } func TestCopy(t *testing.T) { @@ -218,7 +220,7 @@ func TestProposerSelection3(t *testing.T) { got := vset.GetProposer().Address expected := proposerOrder[j%4].Address if !bytes.Equal(got, expected) { - t.Fatalf(cmn.Fmt("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j)) + t.Fatalf(fmt.Sprintf("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j)) } // serialize, deserialize, check proposer @@ -228,7 +230,7 @@ func TestProposerSelection3(t *testing.T) { computed := vset.GetProposer() // findGetProposer() if i != 0 { if !bytes.Equal(got, computed.Address) { - t.Fatalf(cmn.Fmt("vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)", got, computed.Address, i, j)) + t.Fatalf(fmt.Sprintf("vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)", got, computed.Address, i, j)) } } @@ -270,7 +272,7 @@ func randValidatorSet(numValidators int) *ValidatorSet { } func (valSet *ValidatorSet) toBytes() []byte { - bz, err := cdc.MarshalBinary(valSet) + bz, err := cdc.MarshalBinaryLengthPrefixed(valSet) if err != nil { panic(err) } @@ -278,7 +280,7 @@ func (valSet *ValidatorSet) toBytes() []byte { } func (valSet *ValidatorSet) fromBytes(b []byte) { - err := cdc.UnmarshalBinary(b, &valSet) + err := cdc.UnmarshalBinaryLengthPrefixed(b, &valSet) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED panic(err) @@ -382,8 +384,8 @@ func TestValidatorSetVerifyCommit(t *testing.T) { ValidatorIndex: 0, Height: height, Round: 0, - Timestamp: time.Now().UTC(), - Type: VoteTypePrecommit, + Timestamp: tmtime.Now(), + Type: PrecommitType, BlockID: blockID, } sig, err := privKey.Sign(vote.SignBytes(chainID)) diff --git a/types/vote.go b/types/vote.go index 9a6180d7556..bf14d403bcb 100644 --- a/types/vote.go +++ b/types/vote.go @@ -6,10 +6,15 @@ import ( "fmt" "time" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" ) +const ( + // MaxVoteBytes is a maximum vote size (including amino overhead). + MaxVoteBytes int64 = 223 +) + var ( ErrVoteUnexpectedStep = errors.New("Unexpected step") ErrVoteInvalidValidatorIndex = errors.New("Invalid validator index") @@ -38,41 +43,24 @@ func NewConflictingVoteError(val *Validator, voteA, voteB *Vote) *ErrVoteConflic } } -// Types of votes -// TODO Make a new type "VoteType" -const ( - VoteTypePrevote = byte(0x01) - VoteTypePrecommit = byte(0x02) -) - -func IsVoteTypeValid(type_ byte) bool { - switch type_ { - case VoteTypePrevote: - return true - case VoteTypePrecommit: - return true - default: - return false - } -} - -// Address is hex bytes. TODO: crypto.Address -type Address = cmn.HexBytes +// Address is hex bytes. +type Address = crypto.Address -// Represents a prevote, precommit, or commit vote from validators for consensus. +// Vote represents a prevote, precommit, or commit vote from validators for +// consensus. type Vote struct { - ValidatorAddress Address `json:"validator_address"` - ValidatorIndex int `json:"validator_index"` - Height int64 `json:"height"` - Round int `json:"round"` - Timestamp time.Time `json:"timestamp"` - Type byte `json:"type"` - BlockID BlockID `json:"block_id"` // zero if vote is nil. - Signature []byte `json:"signature"` + Type SignedMsgType `json:"type"` + Height int64 `json:"height"` + Round int `json:"round"` + Timestamp time.Time `json:"timestamp"` + BlockID BlockID `json:"block_id"` // zero if vote is nil. + ValidatorAddress Address `json:"validator_address"` + ValidatorIndex int `json:"validator_index"` + Signature []byte `json:"signature"` } func (vote *Vote) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalJSON(CanonicalVote(chainID, vote)) + bz, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeVote(chainID, vote)) if err != nil { panic(err) } @@ -90,20 +78,25 @@ func (vote *Vote) String() string { } var typeString string switch vote.Type { - case VoteTypePrevote: + case PrevoteType: typeString = "Prevote" - case VoteTypePrecommit: + case PrecommitType: typeString = "Precommit" default: cmn.PanicSanity("Unknown vote type") } return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %X @ %s}", - vote.ValidatorIndex, cmn.Fingerprint(vote.ValidatorAddress), - vote.Height, vote.Round, vote.Type, typeString, + vote.ValidatorIndex, + cmn.Fingerprint(vote.ValidatorAddress), + vote.Height, + vote.Round, + vote.Type, + typeString, cmn.Fingerprint(vote.BlockID.Hash), cmn.Fingerprint(vote.Signature), - CanonicalTime(vote.Timestamp)) + CanonicalTime(vote.Timestamp), + ) } func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error { @@ -116,3 +109,38 @@ func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error { } return nil } + +// ValidateBasic performs basic validation. +func (vote *Vote) ValidateBasic() error { + if !IsVoteTypeValid(vote.Type) { + return errors.New("Invalid Type") + } + if vote.Height < 0 { + return errors.New("Negative Height") + } + if vote.Round < 0 { + return errors.New("Negative Round") + } + + // NOTE: Timestamp validation is subtle and handled elsewhere. + + if err := vote.BlockID.ValidateBasic(); err != nil { + return fmt.Errorf("Wrong BlockID: %v", err) + } + if len(vote.ValidatorAddress) != crypto.AddressSize { + return fmt.Errorf("Expected ValidatorAddress size to be %d bytes, got %d bytes", + crypto.AddressSize, + len(vote.ValidatorAddress), + ) + } + if vote.ValidatorIndex < 0 { + return errors.New("Negative ValidatorIndex") + } + if len(vote.Signature) == 0 { + return errors.New("Signature is missing") + } + if len(vote.Signature) > MaxSignatureSize { + return fmt.Errorf("Signature is too big (max: %d)", MaxSignatureSize) + } + return nil +} diff --git a/types/vote_set.go b/types/vote_set.go index 36bf5ef7e32..0cf6cbb7f5d 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -55,7 +55,7 @@ type VoteSet struct { chainID string height int64 round int - type_ byte + type_ SignedMsgType valSet *ValidatorSet mtx sync.Mutex @@ -68,7 +68,7 @@ type VoteSet struct { } // Constructs a new VoteSet struct used to accumulate votes for given height/round. -func NewVoteSet(chainID string, height int64, round int, type_ byte, valSet *ValidatorSet) *VoteSet { +func NewVoteSet(chainID string, height int64, round int, type_ SignedMsgType, valSet *ValidatorSet) *VoteSet { if height == 0 { cmn.PanicSanity("Cannot make VoteSet for height == 0, doesn't make sense.") } @@ -109,7 +109,7 @@ func (voteSet *VoteSet) Type() byte { if voteSet == nil { return 0x00 } - return voteSet.type_ + return byte(voteSet.type_) } func (voteSet *VoteSet) Size() int { @@ -158,7 +158,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { if (vote.Height != voteSet.height) || (vote.Round != voteSet.round) || (vote.Type != voteSet.type_) { - return false, errors.Wrapf(ErrVoteUnexpectedStep, "Got %d/%d/%d, expected %d/%d/%d", + return false, errors.Wrapf(ErrVoteUnexpectedStep, "Expected %d/%d/%d, but got %d/%d/%d", voteSet.height, voteSet.round, voteSet.type_, vote.Height, vote.Round, vote.Type) } @@ -170,7 +170,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { "Cannot find validator %d in valSet of size %d", valIndex, voteSet.valSet.Size()) } - // Ensure that the signer has the right address + // Ensure that the signer has the right address. if !bytes.Equal(valAddr, lookupAddr) { return false, errors.Wrapf(ErrVoteInvalidValidatorAddress, "vote.ValidatorAddress (%X) does not match address (%X) for vote.ValidatorIndex (%d)\nEnsure the genesis file is correct across all validators.", @@ -190,7 +190,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { return false, errors.Wrapf(err, "Failed to verify vote with ChainID %s and PubKey %s", voteSet.chainID, val.PubKey) } - // Add vote and get conflicting vote if any + // Add vote and get conflicting vote if any. added, conflicting := voteSet.addVerifiedVote(vote, blockKey, val.VotingPower) if conflicting != nil { return added, NewConflictingVoteError(val, conflicting, vote) @@ -201,7 +201,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { return added, nil } -// Returns (vote, true) if vote exists for valIndex and blockKey +// Returns (vote, true) if vote exists for valIndex and blockKey. func (voteSet *VoteSet) getVote(valIndex int, blockKey string) (vote *Vote, ok bool) { if existing := voteSet.votes[valIndex]; existing != nil && existing.BlockID.Key() == blockKey { return existing, true @@ -381,7 +381,7 @@ func (voteSet *VoteSet) IsCommit() bool { if voteSet == nil { return false } - if voteSet.type_ != VoteTypePrecommit { + if voteSet.type_ != PrecommitType { return false } voteSet.mtx.Lock() @@ -529,8 +529,8 @@ func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) { // Commit func (voteSet *VoteSet) MakeCommit() *Commit { - if voteSet.type_ != VoteTypePrecommit { - cmn.PanicSanity("Cannot MakeCommit() unless VoteSet.Type is VoteTypePrecommit") + if voteSet.type_ != PrecommitType { + cmn.PanicSanity("Cannot MakeCommit() unless VoteSet.Type is PrecommitType") } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 32ceb7b1614..641872920c2 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -3,15 +3,15 @@ package types import ( "bytes" "testing" - "time" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" tst "github.com/tendermint/tendermint/libs/test" + tmtime "github.com/tendermint/tendermint/types/time" ) // NOTE: privValidators are in order -func randVoteSet(height int64, round int, type_ byte, numValidators int, votingPower int64) (*VoteSet, *ValidatorSet, []PrivValidator) { +func randVoteSet(height int64, round int, type_ SignedMsgType, numValidators int, votingPower int64) (*VoteSet, *ValidatorSet, []PrivValidator) { valSet, privValidators := RandValidatorSet(numValidators, votingPower) return NewVoteSet("test_chain_id", height, round, type_, valSet), valSet, privValidators } @@ -41,7 +41,7 @@ func withRound(vote *Vote, round int) *Vote { // Convenience: Return new vote with different type func withType(vote *Vote, type_ byte) *Vote { vote = vote.Copy() - vote.Type = type_ + vote.Type = SignedMsgType(type_) return vote } @@ -61,7 +61,7 @@ func withBlockPartsHeader(vote *Vote, blockPartsHeader PartSetHeader) *Vote { func TestAddVote(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) val0 := privValidators[0] // t.Logf(">> %v", voteSet) @@ -82,8 +82,8 @@ func TestAddVote(t *testing.T) { ValidatorIndex: 0, // since privValidators are in order Height: height, Round: round, - Type: VoteTypePrevote, - Timestamp: time.Now().UTC(), + Type: PrevoteType, + Timestamp: tmtime.Now(), BlockID: BlockID{nil, PartSetHeader{}}, } _, err := signAddVote(val0, vote, voteSet) @@ -105,15 +105,15 @@ func TestAddVote(t *testing.T) { func Test2_3Majority(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) voteProto := &Vote{ ValidatorAddress: nil, // NOTE: must fill in ValidatorIndex: -1, // NOTE: must fill in Height: height, Round: round, - Type: VoteTypePrevote, - Timestamp: time.Now().UTC(), + Type: PrevoteType, + Timestamp: tmtime.Now(), BlockID: BlockID{nil, PartSetHeader{}}, } // 6 out of 10 voted for nil. @@ -158,7 +158,7 @@ func Test2_3Majority(t *testing.T) { func Test2_3MajorityRedux(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 100, 1) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 100, 1) blockHash := crypto.CRandBytes(32) blockPartsTotal := 123 @@ -169,8 +169,8 @@ func Test2_3MajorityRedux(t *testing.T) { ValidatorIndex: -1, // NOTE: must fill in Height: height, Round: round, - Timestamp: time.Now().UTC(), - Type: VoteTypePrevote, + Timestamp: tmtime.Now(), + Type: PrevoteType, BlockID: BlockID{blockHash, blockPartsHeader}, } @@ -257,15 +257,15 @@ func Test2_3MajorityRedux(t *testing.T) { func TestBadVotes(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) voteProto := &Vote{ ValidatorAddress: nil, ValidatorIndex: -1, Height: height, Round: round, - Timestamp: time.Now().UTC(), - Type: VoteTypePrevote, + Timestamp: tmtime.Now(), + Type: PrevoteType, BlockID: BlockID{nil, PartSetHeader{}}, } @@ -308,7 +308,7 @@ func TestBadVotes(t *testing.T) { // val3 votes of another type. { vote := withValidator(voteProto, privValidators[3].GetAddress(), 3) - added, err := signAddVote(privValidators[3], withType(vote, VoteTypePrecommit), voteSet) + added, err := signAddVote(privValidators[3], withType(vote, byte(PrecommitType)), voteSet) if added || err == nil { t.Errorf("Expected VoteSet.Add to fail, wrong type") } @@ -317,7 +317,7 @@ func TestBadVotes(t *testing.T) { func TestConflicts(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 4, 1) + voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 4, 1) blockHash1 := cmn.RandBytes(32) blockHash2 := cmn.RandBytes(32) @@ -326,8 +326,8 @@ func TestConflicts(t *testing.T) { ValidatorIndex: -1, Height: height, Round: round, - Timestamp: time.Now().UTC(), - Type: VoteTypePrevote, + Timestamp: tmtime.Now(), + Type: PrevoteType, BlockID: BlockID{nil, PartSetHeader{}}, } @@ -447,7 +447,7 @@ func TestConflicts(t *testing.T) { func TestMakeCommit(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrecommit, 10, 1) + voteSet, _, privValidators := randVoteSet(height, round, PrecommitType, 10, 1) blockHash, blockPartsHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} voteProto := &Vote{ @@ -455,8 +455,8 @@ func TestMakeCommit(t *testing.T) { ValidatorIndex: -1, Height: height, Round: round, - Timestamp: time.Now().UTC(), - Type: VoteTypePrecommit, + Timestamp: tmtime.Now(), + Type: PrecommitType, BlockID: BlockID{blockHash, blockPartsHeader}, } diff --git a/types/vote_test.go b/types/vote_test.go index 836baa615cf..942f2d6b9e4 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -1,20 +1,23 @@ package types import ( + "math" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/tmhash" ) func examplePrevote() *Vote { - return exampleVote(VoteTypePrevote) + return exampleVote(byte(PrevoteType)) } func examplePrecommit() *Vote { - return exampleVote(VoteTypePrecommit) + return exampleVote(byte(PrecommitType)) } func exampleVote(t byte) *Vote { @@ -24,34 +27,117 @@ func exampleVote(t byte) *Vote { } return &Vote{ - ValidatorAddress: []byte("addr"), - ValidatorIndex: 56789, - Height: 12345, - Round: 2, - Timestamp: stamp, - Type: t, + Type: SignedMsgType(t), + Height: 12345, + Round: 2, + Timestamp: stamp, BlockID: BlockID{ - Hash: []byte("hash"), + Hash: tmhash.Sum([]byte("blockID_hash")), PartsHeader: PartSetHeader{ Total: 1000000, - Hash: []byte("parts_hash"), + Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), }, }, + ValidatorAddress: crypto.AddressHash([]byte("validator_address")), + ValidatorIndex: 56789, } } func TestVoteSignable(t *testing.T) { vote := examplePrecommit() signBytes := vote.SignBytes("test_chain_id") - signStr := string(signBytes) - expected := `{"@chain_id":"test_chain_id","@type":"vote","block_id":{"hash":"68617368","parts":{"hash":"70617274735F68617368","total":"1000000"}},"height":"12345","round":"2","timestamp":"2017-12-25T03:00:01.234Z","type":2}` - if signStr != expected { - // NOTE: when this fails, you probably want to fix up consensus/replay_test too - t.Errorf("Got unexpected sign string for Vote. Expected:\n%v\nGot:\n%v", expected, signStr) + expected, err := cdc.MarshalBinaryLengthPrefixed(CanonicalizeVote("test_chain_id", vote)) + require.NoError(t, err) + + require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Vote.") +} + +func TestVoteSignableTestVectors(t *testing.T) { + vote := CanonicalizeVote("", &Vote{Height: 1, Round: 1}) + + tests := []struct { + canonicalVote CanonicalVote + want []byte + }{ + { + CanonicalizeVote("", &Vote{}), + // NOTE: Height and Round are skipped here. This case needs to be considered while parsing. + // []byte{0x22, 0x9, 0x9, 0x0, 0x9, 0x6e, 0x88, 0xf1, 0xff, 0xff, 0xff}, + []byte{0x22, 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1}, + }, + // with proper (fixed size) height and round (PreCommit): + { + CanonicalizeVote("", &Vote{Height: 1, Round: 1, Type: PrecommitType}), + []byte{ + 0x8, // (field_number << 3) | wire_type + 0x2, // PrecommitType + 0x11, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x19, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round + 0x22, // (field_number << 3) | wire_type + // remaining fields (timestamp): + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1}, + }, + // with proper (fixed size) height and round (PreVote): + { + CanonicalizeVote("", &Vote{Height: 1, Round: 1, Type: PrevoteType}), + []byte{ + 0x8, // (field_number << 3) | wire_type + 0x1, // PrevoteType + 0x11, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x19, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round + 0x22, // (field_number << 3) | wire_type + // remaining fields (timestamp): + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1}, + }, + { + vote, + []byte{ + 0x11, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x19, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round + // remaining fields (timestamp): + 0x22, + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1}, + }, + // containing non-empty chain_id: + { + CanonicalizeVote("test_chain_id", &Vote{Height: 1, Round: 1}), + []byte{ + 0x11, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // height + 0x19, // (field_number << 3) | wire_type + 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, // round + // remaining fields: + 0x22, // (field_number << 3) | wire_type + 0xb, 0x8, 0x80, 0x92, 0xb8, 0xc3, 0x98, 0xfe, 0xff, 0xff, 0xff, 0x1, // timestamp + 0x32, // (field_number << 3) | wire_type + 0xd, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64}, // chainID + }, + } + for i, tc := range tests { + got, err := cdc.MarshalBinaryBare(tc.canonicalVote) + require.NoError(t, err) + + require.Equal(t, tc.want, got, "test case #%v: got unexpected sign bytes for Vote.", i) } } +func TestVoteProposalNotEq(t *testing.T) { + cv := CanonicalizeVote("", &Vote{Height: 1, Round: 1}) + p := CanonicalizeProposal("", &Proposal{Height: 1, Round: 1}) + vb, err := cdc.MarshalBinaryLengthPrefixed(cv) + require.NoError(t, err) + pb, err := cdc.MarshalBinaryLengthPrefixed(p) + require.NoError(t, err) + require.NotEqual(t, vb, pb) +} + func TestVoteVerifySignature(t *testing.T) { privVal := NewMockPV() pubkey := privVal.GetPubKey() @@ -69,9 +155,9 @@ func TestVoteVerifySignature(t *testing.T) { // serialize, deserialize and verify again.... precommit := new(Vote) - bs, err := cdc.MarshalBinary(vote) + bs, err := cdc.MarshalBinaryLengthPrefixed(vote) require.NoError(t, err) - err = cdc.UnmarshalBinary(bs, &precommit) + err = cdc.UnmarshalBinaryLengthPrefixed(bs, &precommit) require.NoError(t, err) // verify the transmitted vote @@ -84,12 +170,12 @@ func TestVoteVerifySignature(t *testing.T) { func TestIsVoteTypeValid(t *testing.T) { tc := []struct { name string - in byte + in SignedMsgType out bool }{ - {"Prevote", VoteTypePrevote, true}, - {"Precommit", VoteTypePrecommit, true}, - {"InvalidType", byte(3), false}, + {"Prevote", PrevoteType, true}, + {"Precommit", PrecommitType, true}, + {"InvalidType", SignedMsgType(0x3), false}, } for _, tt := range tc { @@ -119,3 +205,76 @@ func TestVoteVerify(t *testing.T) { assert.Equal(t, ErrVoteInvalidSignature, err) } } + +func TestMaxVoteBytes(t *testing.T) { + // time is varint encoded so need to pick the max. + // year int, month Month, day, hour, min, sec, nsec int, loc *Location + timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) + + vote := &Vote{ + ValidatorAddress: crypto.AddressHash([]byte("validator_address")), + ValidatorIndex: math.MaxInt64, + Height: math.MaxInt64, + Round: math.MaxInt64, + Timestamp: timestamp, + Type: PrevoteType, + BlockID: BlockID{ + Hash: tmhash.Sum([]byte("blockID_hash")), + PartsHeader: PartSetHeader{ + Total: math.MaxInt64, + Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + }, + }, + } + + privVal := NewMockPV() + err := privVal.SignVote("test_chain_id", vote) + require.NoError(t, err) + + bz, err := cdc.MarshalBinaryLengthPrefixed(vote) + require.NoError(t, err) + + assert.EqualValues(t, MaxVoteBytes, len(bz)) +} + +func TestVoteString(t *testing.T) { + str := examplePrecommit().String() + expected := `Vote{56789:6AF1F4111082 12345/02/2(Precommit) 8B01023386C3 000000000000 @ 2017-12-25T03:00:01.234Z}` + if str != expected { + t.Errorf("Got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str) + } + + str2 := examplePrevote().String() + expected = `Vote{56789:6AF1F4111082 12345/02/1(Prevote) 8B01023386C3 000000000000 @ 2017-12-25T03:00:01.234Z}` + if str2 != expected { + t.Errorf("Got unexpected string for Vote. Expected:\n%v\nGot:\n%v", expected, str2) + } +} + +func TestVoteValidateBasic(t *testing.T) { + privVal := NewMockPV() + + testCases := []struct { + testName string + malleateVote func(*Vote) + expectErr bool + }{ + {"Good Vote", func(v *Vote) {}, false}, + {"Negative Height", func(v *Vote) { v.Height = -1 }, true}, + {"Negative Round", func(v *Vote) { v.Round = -1 }, true}, + {"Invalid BlockID", func(v *Vote) { v.BlockID = BlockID{[]byte{1, 2, 3}, PartSetHeader{111, []byte("blockparts")}} }, true}, + {"Invalid Address", func(v *Vote) { v.ValidatorAddress = make([]byte, 1) }, true}, + {"Invalid ValidatorIndex", func(v *Vote) { v.ValidatorIndex = -1 }, true}, + {"Invalid Signature", func(v *Vote) { v.Signature = nil }, true}, + {"Too big Signature", func(v *Vote) { v.Signature = make([]byte, MaxSignatureSize+1) }, true}, + } + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + vote := examplePrecommit() + err := privVal.SignVote("test_chain_id", vote) + require.NoError(t, err) + tc.malleateVote(vote) + assert.Equal(t, tc.expectErr, vote.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } +} diff --git a/types/wire.go b/types/wire.go index c56089983d3..f3c314fa6c0 100644 --- a/types/wire.go +++ b/types/wire.go @@ -1,7 +1,7 @@ package types import ( - "github.com/tendermint/go-amino" + amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto/encoding/amino" ) @@ -15,3 +15,8 @@ func RegisterBlockAmino(cdc *amino.Codec) { cryptoAmino.RegisterAmino(cdc) RegisterEvidences(cdc) } + +// GetCodec returns a codec used by the package. For testing purposes only. +func GetCodec() *amino.Codec { + return cdc +} diff --git a/version/version.go b/version/version.go index 04880cfe0de..b7a72a7f788 100644 --- a/version/version.go +++ b/version/version.go @@ -1,19 +1,11 @@ package version -// Version components -const ( - Maj = "0" - Min = "23" - Fix = "0" -) - var ( - // Version is the current version of Tendermint - // Must be a string because scripts like dist.sh read this file. - Version = "0.23.0" - // GitCommit is the current HEAD set using ldflags. GitCommit string + + // Version is the built softwares version. + Version string = TMCoreSemVer ) func init() { @@ -21,3 +13,50 @@ func init() { Version += "-" + GitCommit } } + +const ( + // TMCoreSemVer is the current version of Tendermint Core. + // It's the Semantic Version of the software. + // Must be a string because scripts like dist.sh read this file. + TMCoreSemVer = "0.26.1" + + // ABCISemVer is the semantic version of the ABCI library + ABCISemVer = "0.15.0" + ABCIVersion = ABCISemVer +) + +// Protocol is used for implementation agnostic versioning. +type Protocol uint64 + +// Uint64 returns the Protocol version as a uint64, +// eg. for compatibility with ABCI types. +func (p Protocol) Uint64() uint64 { + return uint64(p) +} + +var ( + // P2PProtocol versions all p2p behaviour and msgs. + P2PProtocol Protocol = 4 + + // BlockProtocol versions all block data structures and processing. + BlockProtocol Protocol = 7 +) + +//------------------------------------------------------------------------ +// Version types + +// App includes the protocol and software version for the application. +// This information is included in ResponseInfo. The App.Protocol can be +// updated in ResponseEndBlock. +type App struct { + Protocol Protocol `json:"protocol"` + Software string `json:"software"` +} + +// Consensus captures the consensus rules for processing a block in the blockchain, +// including all blockchain data structures and the rules of the application's +// state transition machine. +type Consensus struct { + Block Protocol `json:"block"` + App Protocol `json:"app"` +}