diff --git a/.circleci/codecov.sh b/.circleci/codecov.sh new file mode 100644 index 00000000000..1ef332b1b31 --- /dev/null +++ b/.circleci/codecov.sh @@ -0,0 +1,1550 @@ +#!/usr/bin/env bash + +# Apache License Version 2.0, January 2004 +# https://github.com/codecov/codecov-bash/blob/master/LICENSE + + +set -e +o pipefail + +VERSION="0b37652" + +url="https://codecov.io" +env="$CODECOV_ENV" +service="" +token="" +search_in="" +flags="" +exit_with=0 +curlargs="" +curlawsargs="" +dump="0" +clean="0" +curl_s="-s" +name="$CODECOV_NAME" +include_cov="" +exclude_cov="" +ddp="$(echo ~)/Library/Developer/Xcode/DerivedData" +xp="" +files="" +cacert="$CODECOV_CA_BUNDLE" +gcov_ignore="-not -path './bower_components/**' -not -path './node_modules/**' -not -path './vendor/**'" +gcov_include="" + +ft_gcov="1" +ft_coveragepy="1" +ft_fix="1" +ft_search="1" +ft_s3="1" +ft_network="1" +ft_xcodellvm="1" +ft_xcodeplist="0" + +_git_root=$(git rev-parse --show-toplevel 2>/dev/null || hg root 2>/dev/null || echo $PWD) +git_root="$_git_root" +codecov_yml="" +remote_addr="" +if [ "$git_root" = "$PWD" ]; +then + git_root="." +fi + +url_o="" +pr_o="" +build_o="" +commit_o="" +search_in_o="" +tag_o="" +branch_o="" +slug_o="" +prefix_o="" + +commit="$VCS_COMMIT_ID" +branch="$VCS_BRANCH_NAME" +pr="$VCS_PULL_REQUEST" +slug="$VCS_SLUG" +tag="$VCS_TAG" +build_url="$CI_BUILD_URL" +build="$CI_BUILD_ID" +job="$CI_JOB_ID" + +beta_xcode_partials="" + +proj_root="$git_root" +gcov_exe="gcov" +gcov_arg="" + +b="\033[0;36m" +g="\033[0;32m" +r="\033[0;31m" +e="\033[0;90m" +x="\033[0m" + +show_help() { +cat << EOF + + Codecov Bash $VERSION + + Global report uploading tool for Codecov + Documentation at https://docs.codecov.io/docs + Contribute at https://github.com/codecov/codecov-bash + + + -h Display this help and exit + -f FILE Target file(s) to upload + + -f "path/to/file" only upload this file + skips searching unless provided patterns below + + -f '!*.bar' ignore all files at pattern *.bar + -f '*.foo' include all files at pattern *.foo + Must use single quotes. + This is non-exclusive, use -s "*.foo" to match specific paths. + + -s DIR Directory to search for coverage reports. + Already searches project root and artifact folders. + -t TOKEN Set the private repository token + (option) set environment variable CODECOV_TOKEN=:uuid + + -t @/path/to/token_file + -t uuid + + -n NAME Custom defined name of the upload. Visible in Codecov UI + + -e ENV Specify environment variables to be included with this build + Also accepting environment variables: CODECOV_ENV=VAR,VAR2 + + -e VAR,VAR2 + + -X feature Toggle functionalities + + -X gcov Disable gcov + -X coveragepy Disable python coverage + -X fix Disable report fixing + -X search Disable searching for reports + -X xcode Disable xcode processing + -X network Disable uploading the file network + + -R root dir Used when not in git/hg project to identify project root directory + -y conf file Used to specify the location of the .codecov.yml config file + -F flag Flag the upload to group coverage metrics + + -F unittests This upload is only unittests + -F integration This upload is only integration tests + -F ui,chrome This upload is Chrome - UI tests + + -c Move discovered coverage reports to the trash + -Z Exit with 1 if not successful. Default will Exit with 0 + + -- xcode -- + -D Custom Derived Data Path for Coverage.profdata and gcov processing + Default '~/Library/Developer/Xcode/DerivedData' + -J Specify packages to build coverage. + This can significantly reduces time to build coverage reports. + + -J 'MyAppName' Will match "MyAppName" and "MyAppNameTests" + -J '^ExampleApp$' Will match only "ExampleApp" not "ExampleAppTests" + + -- gcov -- + -g GLOB Paths to ignore during gcov gathering + -G GLOB Paths to include during gcov gathering + -p dir Project root directory + Also used when preparing gcov + -k prefix Prefix filepaths to help resolve path fixing: https://github.com/codecov/support/issues/472 + -x gcovexe gcov executable to run. Defaults to 'gcov' + -a gcovargs extra arguments to pass to gcov + + -- Override CI Environment Variables -- + These variables are automatically detected by popular CI providers + + -B branch Specify the branch name + -C sha Specify the commit sha + -P pr Specify the pull request number + -b build Specify the build number + -T tag Specify the git tag + + -- Enterprise -- + -u URL Set the target url for Enterprise customers + Not required when retrieving the bash uploader from your CCE + (option) Set environment variable CODECOV_URL=https://my-hosted-codecov.com + -r SLUG owner/repo slug used instead of the private repo token in Enterprise + (option) set environment variable CODECOV_SLUG=:owner/:repo + (option) set in your codecov.yml "codecov.slug" + -S PATH File path to your cacert.pem file used to verify ssl with Codecov Enterprise (optional) + (option) Set environment variable: CODECOV_CA_BUNDLE="/path/to/ca.pem" + -U curlargs Extra curl arguments to communicate with Codecov. e.g., -U "--proxy http://http-proxy" + -A curlargs Extra curl arguments to communicate with AWS. + + -- Debugging -- + -d Don't upload, but dump upload file to stdout + -K Remove color from the output + -v Verbose mode + +EOF +} + + +say() { + echo -e "$1" +} + + +urlencode() { + echo "$1" | curl -Gso /dev/null -w %{url_effective} --data-urlencode @- "" | cut -c 3- | sed -e 's/%0A//' +} + + +swiftcov() { + _dir=$(dirname "$1" | sed 's/\(Build\).*/\1/g') + for _type in app framework xctest + do + find "$_dir" -name "*.$_type" | while read f + do + _proj=${f##*/} + _proj=${_proj%."$_type"} + if [ "$2" = "" ] || [ "$(echo "$_proj" | grep -i "$2")" != "" ]; + then + say " $g+$x Building reports for $_proj $_type" + dest=$([ -f "$f/$_proj" ] && echo "$f/$_proj" || echo "$f/Contents/MacOS/$_proj") + _proj_name=$(echo "$_proj" | sed -e 's/[[:space:]]//g') + xcrun llvm-cov show $beta_xcode_partials -instr-profile "$1" "$dest" > "$_proj_name.$_type.coverage.txt" \ + || say " ${r}x>${x} llvm-cov failed to produce results for $dest" + fi + done + done +} + + +# Credits to: https://gist.github.com/pkuczynski/8665367 +parse_yaml() { + local prefix=$2 + local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034') + sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \ + -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 | + awk -F$fs '{ + indent = length($1)/2; + vname[indent] = $2; + for (i in vname) {if (i > indent) {delete vname[i]}} + if (length($3) > 0) { + vn=""; if (indent > 0) {vn=(vn)(vname[0])("_")} + printf("%s%s%s=\"%s\"\n", "'$prefix'",vn, $2, $3); + } + }' +} + + +if [ $# != 0 ]; +then + while getopts "a:A:b:B:cC:dD:e:f:F:g:G:hJ:k:Kn:p:P:r:R:y:s:S:t:T:u:U:vx:X:Z" o + do + case "$o" in + "a") + gcov_arg=$OPTARG + ;; + "A") + curlawsargs="$OPTARG" + ;; + "b") + build_o="$OPTARG" + ;; + "B") + branch_o="$OPTARG" + ;; + "c") + clean="1" + ;; + "C") + commit_o="$OPTARG" + ;; + "d") + dump="1" + ;; + "D") + ddp="$OPTARG" + ;; + "e") + env="$env,$OPTARG" + ;; + "f") + if [ "${OPTARG::1}" = "!" ]; + then + exclude_cov="$exclude_cov -not -path '${OPTARG:1}'" + + elif [[ "$OPTARG" = *"*"* ]]; + then + include_cov="$include_cov -or -name '$OPTARG'" + + else + ft_search=0 + if [ "$files" = "" ]; + then + files="$OPTARG" + else + files="$files +$OPTARG" + fi + fi + ;; + "F") + if [ "$flags" = "" ]; + then + flags="$OPTARG" + else + flags="$flags,$OPTARG" + fi + ;; + "g") + gcov_ignore="$gcov_ignore -not -path '$OPTARG'" + ;; + "G") + gcov_include="$gcov_include -path '$OPTARG'" + ;; + "h") + show_help + exit 0; + ;; + "J") + ft_xcodellvm="1" + ft_xcodeplist="0" + if [ "$xp" = "" ]; + then + xp="$OPTARG" + else + xp="$xp\|$OPTARG" + fi + ;; + "k") + prefix_o=$(echo "$OPTARG" | sed -e 's:^/*::' -e 's:/*$::') + ;; + "K") + b="" + g="" + r="" + e="" + x="" + ;; + "n") + name="$OPTARG" + ;; + "p") + proj_root="$OPTARG" + ;; + "P") + pr_o="$OPTARG" + ;; + "r") + slug_o="$OPTARG" + ;; + "R") + git_root="$OPTARG" + ;; + "s") + if [ "$search_in_o" = "" ]; + then + search_in_o="$OPTARG" + else + search_in_o="$search_in_o $OPTARG" + fi + ;; + "S") + cacert="--cacert \"$OPTARG\"" + ;; + "t") + if [ "${OPTARG::1}" = "@" ]; + then + token=$(cat "${OPTARG:1}" | tr -d ' \n') + else + token="$OPTARG" + fi + ;; + "T") + tag_o="$OPTARG" + ;; + "u") + url_o=$(echo "$OPTARG" | sed -e 's/\/$//') + ;; + "U") + curlargs="$OPTARG" + ;; + "v") + set -x + curl_s="" + ;; + "x") + gcov_exe=$OPTARG + ;; + "X") + if [ "$OPTARG" = "gcov" ]; + then + ft_gcov="0" + elif [ "$OPTARG" = "coveragepy" ] || [ "$OPTARG" = "py" ]; + then + ft_coveragepy="0" + elif [ "$OPTARG" = "xcodellvm" ]; + then + ft_xcodellvm="1" + ft_xcodeplist="0" + elif [ "$OPTARG" = "fix" ] || [ "$OPTARG" = "fixes" ]; + then + ft_fix="0" + elif [ "$OPTARG" = "xcode" ]; + then + ft_xcodellvm="0" + ft_xcodeplist="0" + elif [ "$OPTARG" = "search" ]; + then + ft_search="0" + elif [ "$OPTARG" = "xcodepartials" ]; + then + beta_xcode_partials="-use-color" + elif [ "$OPTARG" = "network" ]; + then + ft_network="0" + elif [ "$OPTARG" = "s3" ]; + then + ft_s3="0" + fi + ;; + "y") + codecov_yml="$OPTARG" + ;; + "Z") + exit_with=1 + ;; + esac + done +fi + +say " + _____ _ + / ____| | | +| | ___ __| | ___ ___ _____ __ +| | / _ \\ / _\` |/ _ \\/ __/ _ \\ \\ / / +| |___| (_) | (_| | __/ (_| (_) \\ V / + \\_____\\___/ \\__,_|\\___|\\___\\___/ \\_/ + Bash-$VERSION + +" + +search_in="$proj_root" + +if [ "$JENKINS_URL" != "" ]; +then + say "$e==>$x Jenkins CI detected." + # https://wiki.jenkins-ci.org/display/JENKINS/Building+a+software+project + # https://wiki.jenkins-ci.org/display/JENKINS/GitHub+pull+request+builder+plugin#GitHubpullrequestbuilderplugin-EnvironmentVariables + service="jenkins" + + if [ "$ghprbSourceBranch" != "" ]; + then + branch="$ghprbSourceBranch" + elif [ "$GIT_BRANCH" != "" ]; + then + branch="$GIT_BRANCH" + elif [ "$BRANCH_NAME" != "" ]; + then + branch="$BRANCH_NAME" + fi + + if [ "$ghprbActualCommit" != "" ]; + then + commit="$ghprbActualCommit" + elif [ "$GIT_COMMIT" != "" ]; + then + commit="$GIT_COMMIT" + fi + + if [ "$ghprbPullId" != "" ]; + then + pr="$ghprbPullId" + elif [ "$CHANGE_ID" != "" ]; + then + pr="$CHANGE_ID" + fi + + build="$BUILD_NUMBER" + build_url=$(urlencode "$BUILD_URL") + +elif [ "$CI" = "true" ] && [ "$TRAVIS" = "true" ] && [ "$SHIPPABLE" != "true" ]; +then + say "$e==>$x Travis CI detected." + # https://docs.travis-ci.com/user/environment-variables/ + service="travis" + commit="${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT}" + build="$TRAVIS_JOB_NUMBER" + pr="$TRAVIS_PULL_REQUEST" + job="$TRAVIS_JOB_ID" + slug="$TRAVIS_REPO_SLUG" + env="$env,TRAVIS_OS_NAME" + tag="$TRAVIS_TAG" + if [ "$TRAVIS_BRANCH" != "$TRAVIS_TAG" ]; + then + branch="$TRAVIS_BRANCH" + fi + + language=$(printenv | grep "TRAVIS_.*_VERSION" | head -1) + if [ "$language" != "" ]; + then + env="$env,${language%=*}" + fi + +elif [ "$DOCKER_REPO" != "" ]; +then + say "$e==>$x Docker detected." + # https://docs.docker.com/docker-cloud/builds/advanced/ + service="docker" + branch="$SOURCE_BRANCH" + commit="$SOURCE_COMMIT" + slug="$DOCKER_REPO" + tag="$CACHE_TAG" + env="$env,IMAGE_NAME" + +elif [ "$CI" = "true" ] && [ "$CI_NAME" = "codeship" ]; +then + say "$e==>$x Codeship CI detected." + # https://www.codeship.io/documentation/continuous-integration/set-environment-variables/ + service="codeship" + branch="$CI_BRANCH" + build="$CI_BUILD_NUMBER" + build_url=$(urlencode "$CI_BUILD_URL") + commit="$CI_COMMIT_ID" + +elif [ ! -z "$CF_BUILD_URL" ] && [ ! -z "$CF_BUILD_ID" ]; +then + say "$e==>$x Codefresh CI detected." + # https://docs.codefresh.io/v1.0/docs/variables + service="codefresh" + branch="$CF_BRANCH" + build="$CF_BUILD_ID" + build_url=$(urlencode "$CF_BUILD_URL") + commit="$CF_REVISION" + +elif [ "$TEAMCITY_VERSION" != "" ]; +then + say "$e==>$x TeamCity CI detected." + # https://confluence.jetbrains.com/display/TCD8/Predefined+Build+Parameters + # https://confluence.jetbrains.com/plugins/servlet/mobile#content/view/74847298 + if [ "$TEAMCITY_BUILD_BRANCH" = '' ]; + then + echo " Teamcity does not automatically make build parameters available as environment variables." + echo " Add the following environment parameters to the build configuration" + echo " env.TEAMCITY_BUILD_BRANCH = %teamcity.build.branch%" + echo " env.TEAMCITY_BUILD_ID = %teamcity.build.id%" + echo " env.TEAMCITY_BUILD_URL = %teamcity.serverUrl%/viewLog.html?buildId=%teamcity.build.id%" + echo " env.TEAMCITY_BUILD_COMMIT = %system.build.vcs.number%" + echo " env.TEAMCITY_BUILD_REPOSITORY = %vcsroot..url%" + fi + service="teamcity" + branch="$TEAMCITY_BUILD_BRANCH" + build="$TEAMCITY_BUILD_ID" + build_url=$(urlencode "$TEAMCITY_BUILD_URL") + if [ "$TEAMCITY_BUILD_COMMIT" != "" ]; + then + commit="$TEAMCITY_BUILD_COMMIT" + else + commit="$BUILD_VCS_NUMBER" + fi + remote_addr="$TEAMCITY_BUILD_REPOSITORY" + +elif [ "$CI" = "true" ] && [ "$CIRCLECI" = "true" ]; +then + say "$e==>$x Circle CI detected." + # https://circleci.com/docs/environment-variables + service="circleci" + branch="$CIRCLE_BRANCH" + build="$CIRCLE_BUILD_NUM" + job="$CIRCLE_NODE_INDEX" + if [ "$CIRCLE_PROJECT_REPONAME" != "" ]; + then + slug="$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME" + else + # git@github.com:owner/repo.git + slug="${CIRCLE_REPOSITORY_URL##*:}" + # owner/repo.git + slug="${slug%%.git}" + fi + pr="$CIRCLE_PR_NUMBER" + commit="$CIRCLE_SHA1" + search_in="$search_in $CIRCLE_ARTIFACTS $CIRCLE_TEST_REPORTS" + +elif [ "$BUDDYBUILD_BRANCH" != "" ]; +then + say "$e==>$x buddybuild detected" + # http://docs.buddybuild.com/v6/docs/custom-prebuild-and-postbuild-steps + service="buddybuild" + branch="$BUDDYBUILD_BRANCH" + build="$BUDDYBUILD_BUILD_NUMBER" + build_url="https://dashboard.buddybuild.com/public/apps/$BUDDYBUILD_APP_ID/build/$BUDDYBUILD_BUILD_ID" + # BUDDYBUILD_TRIGGERED_BY + if [ "$ddp" = "$(echo ~)/Library/Developer/Xcode/DerivedData" ]; + then + ddp="/private/tmp/sandbox/${BUDDYBUILD_APP_ID}/bbtest" + fi + +elif [ "${bamboo_planRepository_revision}" != "" ]; +then + say "$e==>$x Bamboo detected" + # https://confluence.atlassian.com/bamboo/bamboo-variables-289277087.html#Bamboovariables-Build-specificvariables + service="bamboo" + commit="${bamboo_planRepository_revision}" + branch="${bamboo_planRepository_branch}" + build="${bamboo_buildNumber}" + build_url="${bamboo_buildResultsUrl}" + remote_addr="${bamboo_planRepository_repositoryUrl}" + +elif [ "$CI" = "true" ] && [ "$BITRISE_IO" = "true" ]; +then + # http://devcenter.bitrise.io/faq/available-environment-variables/ + say "$e==>$x Bitrise CI detected." + service="bitrise" + branch="$BITRISE_GIT_BRANCH" + build="$BITRISE_BUILD_NUMBER" + build_url=$(urlencode "$BITRISE_BUILD_URL") + pr="$BITRISE_PULL_REQUEST" + if [ "$GIT_CLONE_COMMIT_HASH" != "" ]; + then + commit="$GIT_CLONE_COMMIT_HASH" + fi + +elif [ "$CI" = "true" ] && [ "$SEMAPHORE" = "true" ]; +then + say "$e==>$x Semaphore CI detected." + # https://semaphoreapp.com/docs/available-environment-variables.html + service="semaphore" + branch="$BRANCH_NAME" + build="$SEMAPHORE_BUILD_NUMBER" + job="$SEMAPHORE_CURRENT_THREAD" + pr="$PULL_REQUEST_NUMBER" + slug="$SEMAPHORE_REPO_SLUG" + commit="$REVISION" + env="$env,SEMAPHORE_TRIGGER_SOURCE" + +elif [ "$CI" = "true" ] && [ "$BUILDKITE" = "true" ]; +then + say "$e==>$x Buildkite CI detected." + # https://buildkite.com/docs/guides/environment-variables + service="buildkite" + branch="$BUILDKITE_BRANCH" + build="$BUILDKITE_BUILD_NUMBER" + job="$BUILDKITE_JOB_ID" + build_url=$(urlencode "$BUILDKITE_BUILD_URL") + slug="$BUILDKITE_PROJECT_SLUG" + commit="$BUILDKITE_COMMIT" + if [[ "$BUILDKITE_PULL_REQUEST" != "false" ]]; then + pr="$BUILDKITE_PULL_REQUEST" + fi + tag="$BUILDKITE_TAG" + +elif [ "$CI" = "drone" ] || [ "$DRONE" = "true" ]; +then + say "$e==>$x Drone CI detected." + # http://docs.drone.io/env.html + # drone commits are not full shas + service="drone.io" + branch="$DRONE_BRANCH" + build="$DRONE_BUILD_NUMBER" + build_url=$(urlencode "${DRONE_BUILD_LINK}") + pr="$DRONE_PULL_REQUEST" + job="$DRONE_JOB_NUMBER" + tag="$DRONE_TAG" + +elif [ "$HEROKU_TEST_RUN_BRANCH" != "" ]; +then + say "$e==>$x Heroku CI detected." + # https://devcenter.heroku.com/articles/heroku-ci#environment-variables + service="heroku" + branch="$HEROKU_TEST_RUN_BRANCH" + build="$HEROKU_TEST_RUN_ID" + +elif [ "$CI" = "True" ] && [ "$APPVEYOR" = "True" ]; +then + say "$e==>$x Appveyor CI detected." + # http://www.appveyor.com/docs/environment-variables + service="appveyor" + branch="$APPVEYOR_REPO_BRANCH" + build=$(urlencode "$APPVEYOR_JOB_ID") + pr="$APPVEYOR_PULL_REQUEST_NUMBER" + job="$APPVEYOR_ACCOUNT_NAME%2F$APPVEYOR_PROJECT_SLUG%2F$APPVEYOR_BUILD_VERSION" + slug="$APPVEYOR_REPO_NAME" + commit="$APPVEYOR_REPO_COMMIT" + +elif [ "$CI" = "true" ] && [ "$WERCKER_GIT_BRANCH" != "" ]; +then + say "$e==>$x Wercker CI detected." + # http://devcenter.wercker.com/articles/steps/variables.html + service="wercker" + branch="$WERCKER_GIT_BRANCH" + build="$WERCKER_MAIN_PIPELINE_STARTED" + slug="$WERCKER_GIT_OWNER/$WERCKER_GIT_REPOSITORY" + commit="$WERCKER_GIT_COMMIT" + +elif [ "$CI" = "true" ] && [ "$MAGNUM" = "true" ]; +then + say "$e==>$x Magnum CI detected." + # https://magnum-ci.com/docs/environment + service="magnum" + branch="$CI_BRANCH" + build="$CI_BUILD_NUMBER" + commit="$CI_COMMIT" + +elif [ "$SHIPPABLE" = "true" ]; +then + say "$e==>$x Shippable CI detected." + # http://docs.shippable.com/ci_configure/ + service="shippable" + branch=$([ "$HEAD_BRANCH" != "" ] && echo "$HEAD_BRANCH" || echo "$BRANCH") + build="$BUILD_NUMBER" + build_url=$(urlencode "$BUILD_URL") + pr="$PULL_REQUEST" + slug="$REPO_FULL_NAME" + commit="$COMMIT" + +elif [ "$TDDIUM" = "true" ]; +then + say "Solano CI detected." + # http://docs.solanolabs.com/Setup/tddium-set-environment-variables/ + service="solano" + commit="$TDDIUM_CURRENT_COMMIT" + branch="$TDDIUM_CURRENT_BRANCH" + build="$TDDIUM_TID" + pr="$TDDIUM_PR_ID" + +elif [ "$GREENHOUSE" = "true" ]; +then + say "$e==>$x Greenhouse CI detected." + # http://docs.greenhouseci.com/docs/environment-variables-files + service="greenhouse" + branch="$GREENHOUSE_BRANCH" + build="$GREENHOUSE_BUILD_NUMBER" + build_url=$(urlencode "$GREENHOUSE_BUILD_URL") + pr="$GREENHOUSE_PULL_REQUEST" + commit="$GREENHOUSE_COMMIT" + search_in="$search_in $GREENHOUSE_EXPORT_DIR" + +elif [ "$GITLAB_CI" != "" ]; +then + say "$e==>$x GitLab CI detected." + # http://doc.gitlab.com/ce/ci/variables/README.html + service="gitlab" + branch="${CI_BUILD_REF_NAME:-$CI_COMMIT_REF_NAME}" + build="${CI_BUILD_ID:-$CI_JOB_ID}" + remote_addr="${CI_BUILD_REPO:-$CI_REPOSITORY_URL}" + commit="${CI_BUILD_REF:-$CI_COMMIT_SHA}" + +else + say "${r}x>${x} No CI provider detected." + say " Testing inside Docker? ${b}http://docs.codecov.io/docs/testing-with-docker${x}" + say " Testing with Tox? ${b}https://docs.codecov.io/docs/python#section-testing-with-tox${x}" + +fi + +say " ${e}project root:${x} $git_root" + +# find branch, commit, repo from git command +if [ "$GIT_BRANCH" != "" ]; +then + branch="$GIT_BRANCH" + +elif [ "$branch" = "" ]; +then + branch=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || hg branch 2>/dev/null || echo "") + if [ "$branch" = "HEAD" ]; + then + branch="" + fi +fi + +if [ "$commit_o" = "" ]; +then + # merge commit -> actual commit + mc= + if [ -n "$pr" ] && [ "$pr" != false ]; + then + mc=$(git show --no-patch --format="%P" 2>/dev/null || echo "") + fi + if [[ "$mc" =~ ^[a-z0-9]{40}[[:space:]][a-z0-9]{40}$ ]]; + then + say " Fixing merge commit SHA" + commit=$(echo "$mc" | cut -d' ' -f2) + elif [ "$GIT_COMMIT" != "" ]; + then + commit="$GIT_COMMIT" + elif [ "$commit" = "" ]; + then + commit=$(git log -1 --format="%H" 2>/dev/null || hg id -i --debug 2>/dev/null | tr -d '+' || echo "") + fi +else + commit="$commit_o" +fi + +if [ "$CODECOV_TOKEN" != "" ] && [ "$token" = "" ]; +then + say "${e}-->${x} token set from env" + token="$CODECOV_TOKEN" +fi + +if [ "$CODECOV_URL" != "" ] && [ "$url_o" = "" ]; +then + say "${e}-->${x} url set from env" + url_o=$(echo "$CODECOV_URL" | sed -e 's/\/$//') +fi + +if [ "$CODECOV_SLUG" != "" ]; +then + say "${e}-->${x} slug set from env" + slug_o="$CODECOV_SLUG" + +elif [ "$slug" = "" ]; +then + if [ "$remote_addr" = "" ]; + then + remote_addr=$(git config --get remote.origin.url || hg paths default || echo '') + fi + if [ "$remote_addr" != "" ]; + then + if echo "$remote_addr" | grep -q "//"; then + # https + slug=$(echo "$remote_addr" | cut -d / -f 4,5 | sed -e 's/\.git$//') + else + # ssh + slug=$(echo "$remote_addr" | cut -d : -f 2 | sed -e 's/\.git$//') + fi + fi + if [ "$slug" = "/" ]; + then + slug="" + fi +fi + +yaml=$(test -n "$codecov_yml" && echo "$codecov_yml" \ + || cd "$git_root" && \ + git ls-files "*codecov.yml" "*codecov.yaml" 2>/dev/null \ + || hg locate "*codecov.yml" "*codecov.yaml" 2>/dev/null \ + || cd $proj_root && find . -type f -name '*codecov.y*ml' -depth 1 2>/dev/null \ + || echo '') +yaml=$(echo "$yaml" | head -1) + +if [ "$yaml" != "" ]; +then + say " ${e}Yaml found at:${x} $yaml" + config=$(parse_yaml "$git_root/$yaml" || echo '') + + # TODO validate the yaml here + + if [ "$(echo "$config" | grep 'codecov_token="')" != "" ] && [ "$token" = "" ]; + then + say "${e}-->${x} token set from yaml" + token="$(echo "$config" | grep 'codecov_token="' | sed -e 's/codecov_token="//' | sed -e 's/"\.*//')" + fi + + if [ "$(echo "$config" | grep 'codecov_url="')" != "" ] && [ "$url_o" = "" ]; + then + say "${e}-->${x} url set from yaml" + url_o="$(echo "$config" | grep 'codecov_url="' | sed -e 's/codecov_url="//' | sed -e 's/"\.*//')" + fi + + if [ "$(echo "$config" | grep 'codecov_slug="')" != "" ] && [ "$slug_o" = "" ]; + then + say "${e}-->${x} slug set from yaml" + slug_o="$(echo "$config" | grep 'codecov_slug="' | sed -e 's/codecov_slug="//' | sed -e 's/"\.*//')" + fi +else + say " ${g}Yaml not found, that's ok! Learn more at${x} ${b}http://docs.codecov.io/docs/codecov-yaml${x}" + +fi + +if [ "$branch_o" != "" ]; +then + branch=$(urlencode "$branch_o") +else + branch=$(urlencode "$branch") +fi + +query="branch=$branch\ + &commit=$commit\ + &build=$([ "$build_o" = "" ] && echo "$build" || echo "$build_o")\ + &build_url=$build_url\ + &name=$(urlencode "$name")\ + &tag=$([ "$tag_o" = "" ] && echo "$tag" || echo "$tag_o")\ + &slug=$([ "$slug_o" = "" ] && urlencode "$slug" || urlencode "$slug_o")\ + &service=$service\ + &flags=$flags\ + &pr=$([ "$pr_o" = "" ] && echo "${pr##\#}" || echo "${pr_o##\#}")\ + &job=$job" + +if [ "$ft_search" = "1" ]; +then + # detect bower comoponents location + bower_components="bower_components" + bower_rc=$(cd "$git_root" && cat .bowerrc 2>/dev/null || echo "") + if [ "$bower_rc" != "" ]; + then + bower_components=$(echo "$bower_rc" | tr -d '\n' | grep '"directory"' | cut -d'"' -f4 | sed -e 's/\/$//') + if [ "$bower_components" = "" ]; + then + bower_components="bower_components" + fi + fi + + # Swift Coverage + if [ "$ft_xcodellvm" = "1" ] && [ -d "$ddp" ]; + then + say "${e}==>${x} Processing Xcode reports via llvm-cov" + say " DerivedData folder: $ddp" + profdata_files=$(find "$ddp" -name '*.profdata' 2>/dev/null || echo '') + if [ "$profdata_files" != "" ]; + then + # xcode via profdata + if [ "$xp" = "" ]; + then + # xp=$(xcodebuild -showBuildSettings 2>/dev/null | grep -i "^\s*PRODUCT_NAME" | sed -e 's/.*= \(.*\)/\1/') + # say " ${e}->${x} Speed up Xcode processing by adding ${e}-J '$xp'${x}" + say " ${g}hint${x} Speed up Swift processing by using use ${g}-J 'AppName'${x} (regexp accepted)" + say " ${g}hint${x} This will remove Pods/ from your report. Also ${b}https://docs.codecov.io/docs/ignoring-paths${x}" + fi + while read -r profdata; + do + if [ "$profdata" != "" ]; + then + swiftcov "$profdata" "$xp" + fi + done <<< "$profdata_files" + else + say " ${e}->${x} No Swift coverage found" + fi + + # Obj-C Gcov Coverage + if [ "$ft_gcov" = "1" ]; + then + say " ${e}->${x} Running $gcov_exe for Obj-C" + bash -c "find $ddp -type f -name '*.gcda' $gcov_include $gcov_ignore -exec $gcov_exe -p $gcov_arg {} +" || true + fi + fi + + if [ "$ft_xcodeplist" = "1" ] && [ -d "$ddp" ]; + then + say "${e}==>${x} Processing Xcode plists" + plists_files=$(find "$ddp" -name '*.xccoverage' 2>/dev/null || echo '') + if [ "$plists_files" != "" ]; + then + while read -r plist; + do + if [ "$plist" != "" ]; + then + say " ${g}Found${x} plist file at $plist" + plutil -convert xml1 -o "$(basename "$plist").plist" -- $plist + fi + done <<< "$plists_files" + fi + fi + + # Gcov Coverage + if [ "$ft_gcov" = "1" ]; + then + say "${e}==>${x} Running gcov in $proj_root ${e}(disable via -X gcov)${x}" + bash -c "find $proj_root -type f -name '*.gcno' $gcov_include $gcov_ignore -exec $gcov_exe -pb $gcov_arg {} +" || true + else + say "${e}==>${x} gcov disabled" + fi + + # Python Coverage + if [ "$ft_coveragepy" = "1" ]; + then + if [ ! -f coverage.xml ]; + then + if which coverage >/dev/null 2>&1; + then + say "${e}==>${x} Python coveragepy exists ${e}disable via -X coveragepy${x}" + + dotcoverage=$(find "$git_root" -name '.coverage' -or -name '.coverage.*' | head -1 || echo '') + if [ "$dotcoverage" != "" ]; + then + cd "$(dirname "$dotcoverage")" + if [ ! -f .coverage ]; + then + say " ${e}->${x} Running coverage combine" + coverage combine -a + fi + say " ${e}->${x} Running coverage xml" + if [ "$(coverage xml -i)" != "No data to report." ]; + then + files="$files +$PWD/coverage.xml" + else + say " ${r}No data to report.${x}" + fi + cd "$proj_root" + else + say " ${r}No .coverage file found.${x}" + fi + else + say "${e}==>${x} Python coveragepy not found" + fi + fi + else + say "${e}==>${x} Python coveragepy disabled" + fi + + if [ "$search_in_o" != "" ]; + then + # location override + search_in="$search_in_o" + fi + + say "$e==>$x Searching for coverage reports in:" + for _path in $search_in + do + say " ${g}+${x} $_path" + done + + patterns="find $search_in \( \ + -name vendor \ + -or -name htmlcov \ + -or -name virtualenv \ + -or -name js/generated/coverage \ + -or -name .virtualenv \ + -or -name virtualenvs \ + -or -name .virtualenvs \ + -or -name .env \ + -or -name .envs \ + -or -name env \ + -or -name .yarn-cache \ + -or -name envs \ + -or -name .venv \ + -or -name .venvs \ + -or -name venv \ + -or -name venvs \ + -or -name .git \ + -or -name .hg \ + -or -name .tox \ + -or -name __pycache__ \ + -or -name '.egg-info*' \ + -or -name '$bower_components' \ + -or -name node_modules \ + -or -name 'conftest_*.c.gcov' \ + \) -prune -or \ + -type f \( -name '*coverage*.*' \ + -or -name 'nosetests.xml' \ + -or -name 'jacoco*.xml' \ + -or -name 'clover.xml' \ + -or -name 'report.xml' \ + -or -name '*.codecov.*' \ + -or -name 'codecov.*' \ + -or -name 'cobertura.xml' \ + -or -name 'excoveralls.json' \ + -or -name 'luacov.report.out' \ + -or -name 'coverage-final.json' \ + -or -name 'naxsi.info' \ + -or -name 'lcov.info' \ + -or -name 'lcov.dat' \ + -or -name '*.lcov' \ + -or -name '*.clover' \ + -or -name 'cover.out' \ + -or -name 'gcov.info' \ + -or -name '*.gcov' \ + -or -name '*.lst' \ + $include_cov \) \ + $exclude_cov \ + -not -name '*.profdata' \ + -not -name 'coverage-summary.json' \ + -not -name 'phpunit-code-coverage.xml' \ + -not -name '*/classycle/report.xml' \ + -not -name 'remapInstanbul.coverage*.json' \ + -not -name 'phpunit-coverage.xml' \ + -not -name '*codecov.yml' \ + -not -name '*.serialized' \ + -not -name '.coverage*' \ + -not -name '.*coveragerc' \ + -not -name '*.sh' \ + -not -name '*.bat' \ + -not -name '*.ps1' \ + -not -name '*.env' \ + -not -name '*.cmake' \ + -not -name '*.dox' \ + -not -name '*.ec' \ + -not -name '*.rst' \ + -not -name '*.h' \ + -not -name '*.scss' \ + -not -name '*.o' \ + -not -name '*.proto' \ + -not -name '*.sbt' \ + -not -name '*.xcoverage.*' \ + -not -name '*.gz' \ + -not -name '*.conf' \ + -not -name '*.p12' \ + -not -name '*.csv' \ + -not -name '*.rsp' \ + -not -name '*.m4' \ + -not -name '*.pem' \ + -not -name '*~' \ + -not -name '*.exe' \ + -not -name '*.am' \ + -not -name '*.template' \ + -not -name '*.cp' \ + -not -name '*.bw' \ + -not -name '*.crt' \ + -not -name '*.log' \ + -not -name '*.cmake' \ + -not -name '*.pth' \ + -not -name '*.in' \ + -not -name '*.jar*' \ + -not -name '*.pom*' \ + -not -name '*.png' \ + -not -name '*.jpg' \ + -not -name '*.sql' \ + -not -name '*.jpeg' \ + -not -name '*.svg' \ + -not -name '*.gif' \ + -not -name '*.csv' \ + -not -name '*.snapshot' \ + -not -name '*.mak*' \ + -not -name '*.bash' \ + -not -name '*.data' \ + -not -name '*.py' \ + -not -name '*.class' \ + -not -name '*.xcconfig' \ + -not -name '*.ec' \ + -not -name '*.coverage' \ + -not -name '*.pyc' \ + -not -name '*.cfg' \ + -not -name '*.egg' \ + -not -name '*.ru' \ + -not -name '*.css' \ + -not -name '*.less' \ + -not -name '*.pyo' \ + -not -name '*.whl' \ + -not -name '*.html' \ + -not -name '*.ftl' \ + -not -name '*.erb' \ + -not -name '*.rb' \ + -not -name '*.js' \ + -not -name '*.jade' \ + -not -name '*.db' \ + -not -name '*.md' \ + -not -name '*.cpp' \ + -not -name '*.gradle' \ + -not -name '*.tar.tz' \ + -not -name '*.scss' \ + -not -name 'include.lst' \ + -not -name 'fullLocaleNames.lst' \ + -not -name 'inputFiles.lst' \ + -not -name 'createdFiles.lst' \ + -not -name 'scoverage.measurements.*' \ + -not -name 'test_*_coverage.txt' \ + -not -name 'testrunner-coverage*' \ + -print 2>/dev/null" + files=$(eval "$patterns" || echo '') + +elif [ "$include_cov" != "" ]; +then + files=$(eval "find $search_in -type f \( ${include_cov:5} \)$exclude_cov 2>/dev/null" || echo '') +fi + +num_of_files=$(echo "$files" | wc -l | tr -d ' ') +if [ "$num_of_files" != '' ] && [ "$files" != '' ]; +then + say " ${e}->${x} Found $num_of_files reports" +fi + +# no files found +if [ "$files" = "" ]; +then + say "${r}-->${x} No coverage report found." + say " Please visit ${b}http://docs.codecov.io/docs/supported-languages${x}" + exit ${exit_with}; +fi + +if [ "$ft_network" == "1" ]; +then + say "${e}==>${x} Detecting git/mercurial file structure" + network=$(cd "$git_root" && git ls-files 2>/dev/null || hg locate 2>/dev/null || echo "") + if [ "$network" = "" ]; + then + network=$(find "$git_root" \( \ + -name virtualenv \ + -name .virtualenv \ + -name virtualenvs \ + -name .virtualenvs \ + -name '*.png' \ + -name '*.gif' \ + -name '*.jpg' \ + -name '*.jpeg' \ + -name '*.md' \ + -name .env \ + -name .envs \ + -name env \ + -name envs \ + -name .venv \ + -name .venvs \ + -name venv \ + -name venvs \ + -name .git \ + -name .egg-info \ + -name shunit2-2.1.6 \ + -name vendor \ + -name __pycache__ \ + -name node_modules \ + -path '*/$bower_components/*' \ + -path '*/target/delombok/*' \ + -path '*/build/lib/*' \ + -path '*/js/generated/coverage/*' \ + \) -prune -or \ + -type f -print 2>/dev/null || echo '') + fi + + if [ "$prefix_o" != "" ]; + then + network=$(echo "$network" | awk "{print \"$prefix_o/\"\$0}") + fi +fi + +upload_file=`mktemp /tmp/codecov.XXXXXX` +adjustments_file=`mktemp /tmp/codecov.adjustments.XXXXXX` + +cleanup() { + rm -f $upload_file $adjustments_file $upload_file.gz +} + +trap cleanup INT ABRT TERM + +if [ "$env" != "" ]; +then + inc_env="" + say "${e}==>${x} Appending build variables" + for varname in $(echo "$env" | tr ',' ' ') + do + if [ "$varname" != "" ]; + then + say " ${g}+${x} $varname" + inc_env="${inc_env}${varname}=$(eval echo "\$${varname}") +" + fi + done + +echo "$inc_env<<<<<< ENV" >> $upload_file +fi + +# Append git file list +# write discovered yaml location +echo "$yaml" >> $upload_file +if [ "$ft_network" == "1" ]; +then + i="woff|eot|otf" # fonts + i="$i|gif|png|jpg|jpeg|psd" # images + i="$i|ptt|pptx|numbers|pages|md|txt|xlsx|docx|doc|pdf|html|csv" # docs + i="$i|yml|yaml|.gitignore" # supporting docs + echo "$network" | grep -vwE "($i)$" >> $upload_file +fi +echo "<<<<<< network" >> $upload_file + +fr=0 +say "${e}==>${x} Reading reports" +while IFS='' read -r file; +do + # read the coverage file + if [ "$(echo "$file" | tr -d ' ')" != '' ]; + then + if [ -f "$file" ]; + then + report_len=$(wc -c < "$file") + if [ "$report_len" -ne 0 ]; + then + say " ${g}+${x} $file ${e}bytes=$(echo "$report_len" | tr -d ' ')${x}" + # append to to upload + _filename=$(basename "$file") + if [ "${_filename##*.}" = 'gcov' ]; + then + echo "# path=$(echo "$file.reduced" | sed "s|^$git_root/||")" >> $upload_file + # get file name + head -1 $file >> $upload_file + # 1. remove source code + # 2. remove ending bracket lines + # 3. remove whitespace + # 4. remove contextual lines + # 5. remove function names + awk -F': *' '{print $1":"$2":"}' $file \ + | sed '\/: *} *$/d' \ + | sed 's/^ *//' \ + | sed '/^-/d' \ + | sed 's/^function.*/func/' >> $upload_file + else + echo "# path=$(echo "$file" | sed "s|^$git_root/||")" >> $upload_file + cat "$file" >> $upload_file + fi + echo "<<<<<< EOF" >> $upload_file + fr=1 + if [ "$clean" = "1" ]; + then + rm "$file" + fi + else + say " ${r}-${x} Skipping empty file $file" + fi + else + say " ${r}-${x} file not found at $file" + fi + fi +done <<< "$(echo -e "$files")" + +if [ "$fr" = "0" ]; +then + say "${r}-->${x} No coverage data found." + say " Please visit ${b}http://docs.codecov.io/docs/supported-languages${x}" + say " search for your projects language to learn how to collect reports." + exit ${exit_with}; +fi + +if [ "$ft_fix" = "1" ]; +then + say "${e}==>${x} Appending adjustments" + say " ${b}http://docs.codecov.io/docs/fixing-reports${x}" + + empty_line='^[[:space:]]*$' + # // + syntax_comment='^[[:space:]]*//.*' + # /* or */ + syntax_comment_block='^[[:space:]]*(\/\*|\*\/)[[:space:]]*$' + # { or } + syntax_bracket='^[[:space:]]*[\{\}][[:space:]]*(//.*)?$' + # [ or ] + syntax_list='^[[:space:]]*[][][[:space:]]*(//.*)?$' + + skip_dirs="-not -path '*/$bower_components/*' \ + -not -path '*/node_modules/*'" + + cut_and_join() { + awk 'BEGIN { FS=":" } + $3 ~ /\/\*/ || $3 ~ /\*\// { print $0 ; next } + $1!=key { if (key!="") print out ; key=$1 ; out=$1":"$2 ; next } + { out=out","$2 } + END { print out }' 2>/dev/null + } + + if echo "$network" | grep -m1 '.kt$' 1>/dev/null; + then + # skip brackets and comments + find "$git_root" -type f \ + -name '*.kt' \ + -exec \ + grep -nIHE -e $syntax_bracket \ + -e $syntax_comment_block {} \; \ + | cut_and_join \ + >> $adjustments_file \ + || echo '' + + # last line in file + find "$git_root" -type f \ + -name '*.kt' -exec \ + wc -l {} \; \ + | while read l; do echo "EOF: $l"; done \ + 2>/dev/null \ + >> $adjustments_file \ + || echo '' + + fi + + if echo "$network" | grep -m1 '.go$' 1>/dev/null; + then + # skip empty lines, comments, and brackets + find "$git_root" -not -path '*/vendor/*' \ + -type f \ + -name '*.go' \ + -exec \ + grep -nIHE \ + -e $empty_line \ + -e $syntax_comment \ + -e $syntax_comment_block \ + -e $syntax_bracket \ + {} \; \ + | cut_and_join \ + >> $adjustments_file \ + || echo '' + fi + + if echo "$network" | grep -m1 '.dart$' 1>/dev/null; + then + # skip brackets + find "$git_root" -type f \ + -name '*.dart' \ + -exec \ + grep -nIHE \ + -e $syntax_bracket \ + {} \; \ + | cut_and_join \ + >> $adjustments_file \ + || echo '' + fi + + if echo "$network" | grep -m1 '.php$' 1>/dev/null; + then + # skip empty lines, comments, and brackets + find "$git_root" -not -path "*/vendor/*" \ + -type f \ + -name '*.php' \ + -exec \ + grep -nIHE \ + -e $syntax_list \ + -e $syntax_bracket \ + -e '^[[:space:]]*\);[[:space:]]*(//.*)?$' \ + {} \; \ + | cut_and_join \ + >> $adjustments_file \ + || echo '' + fi + + if echo "$network" | grep -m1 '\(.cpp\|.h\|.cxx\|.c\|.hpp\|.m\)$' 1>/dev/null; + then + # skip brackets + find "$git_root" -type f \ + $skip_dirs \ + \( \ + -name '*.h' \ + -or -name '*.cpp' \ + -or -name '*.cxx' \ + -or -name '*.m' \ + -or -name '*.c' \ + -or -name '*.hpp' \ + \) -exec \ + grep -nIHE \ + -e $empty_line \ + -e $syntax_bracket \ + -e '// LCOV_EXCL' \ + {} \; \ + | cut_and_join \ + >> $adjustments_file \ + || echo '' + + # skip brackets + find "$git_root" -type f \ + $skip_dirs \ + \( \ + -name '*.h' \ + -or -name '*.cpp' \ + -or -name '*.cxx' \ + -or -name '*.m' \ + -or -name '*.c' \ + -or -name '*.hpp' \ + \) -exec \ + grep -nIH '// LCOV_EXCL' \ + {} \; \ + >> $adjustments_file \ + || echo '' + + fi + + found=$(cat $adjustments_file | tr -d ' ') + + if [ "$found" != "" ]; + then + say " ${g}+${x} Found adjustments" + echo "# path=fixes" >> $upload_file + cat $adjustments_file >> $upload_file + echo "<<<<<< EOF" >> $upload_file + rm -rf $adjustments_file + else + say " ${e}->${x} No adjustments found" + fi +fi + +if [ "$url_o" != "" ]; +then + url="$url_o" +fi + +if [ "$dump" != "0" ]; +then + # trim whitespace from query + say " ${e}->${x} Dumping upload file (no upload)" + echo "$url/upload/v4?$(echo "package=bash-$VERSION&token=$token&$query" | tr -d ' ')" + cat $upload_file +else + + say "${e}==>${x} Gzipping contents" + gzip -nf9 $upload_file + + query=$(echo "${query}" | tr -d ' ') + say "${e}==>${x} Uploading reports" + say " ${e}url:${x} $url" + say " ${e}query:${x} $query" + + # now add token to query + query=$(echo "package=bash-$VERSION&token=$token&$query" | tr -d ' ') + + if [ "$ft_s3" = "1" ]; + then + i="0" + while [ $i -lt 4 ] + do + i=$[$i+1] + say " ${e}->${x} Pinging Codecov" + res=$(curl $curl_s -X POST $curlargs $cacert \ + -H 'X-Reduced-Redundancy: false' \ + -H 'X-Content-Type: application/x-gzip' \ + "$url/upload/v4?$query" || true) + # a good replay is "https://codecov.io" + "\n" + "https://codecov.s3.amazonaws.com/..." + status=$(echo "$res" | head -1 | grep 'HTTP ' | cut -d' ' -f2) + if [ "$status" = "" ]; + then + s3target=$(echo "$res" | sed -n 2p) + say " ${e}->${x} Uploading" + s3=$(curl $curl_s -fiX PUT $curlawsargs \ + --data-binary @$upload_file.gz \ + -H 'Content-Type: application/x-gzip' \ + -H 'Content-Encoding: gzip' \ + -H 'x-amz-acl: public-read' \ + "$s3target" || true) + if [ "$s3" != "" ]; + then + say " ${g}->${x} View reports at ${b}$(echo "$res" | sed -n 1p)${x}" + exit 0 + else + say " ${r}X>${x} Failed to upload" + fi + elif [ "$status" = "400" ]; + then + # 400 Error + say "${g}${res}${x}" + exit ${exit_with} + fi + say " ${e}->${x} Sleeping for 30s and trying again..." + sleep 30 + done + fi + + say " ${e}->${x} Uploading to Codecov" + i="0" + while [ $i -lt 4 ] + do + i=$[$i+1] + + res=$(curl $curl_s -X POST $curlargs $cacert \ + --data-binary @$upload_file.gz \ + -H 'Content-Type: text/plain' \ + -H 'Content-Encoding: gzip' \ + -H 'X-Content-Encoding: gzip' \ + -H 'Accept: text/plain' \ + "$url/upload/v2?$query" || echo 'HTTP 500') + # HTTP 200 + # http://.... + status=$(echo "$res" | head -1 | cut -d' ' -f2) + if [ "$status" = "" ]; + then + say " View reports at ${b}$(echo "$res" | head -2 | tail -1)${x}" + exit 0 + + elif [ "${status:0:1}" = "5" ]; + then + say " ${e}->${x} Sleeping for 30s and trying again..." + sleep 30 + + else + say " ${g}${res}${x}" + exit 0 + exit ${exit_with} + fi + + done + + say " ${r}X> Failed to upload coverage reports${x}" +fi + +exit ${exit_with} diff --git a/.circleci/config.yml b/.circleci/config.yml index 8d321e9e690..9d284be6f3a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -41,10 +41,10 @@ jobs: key: v3-pkg-cache paths: - /go/pkg - - save_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} - paths: - - /go/src/github.com/tendermint/tendermint + # - save_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # paths: + # - /go/src/github.com/tendermint/tendermint build_slate: <<: *defaults @@ -53,8 +53,23 @@ jobs: at: /tmp/workspace - restore_cache: key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # https://discuss.circleci.com/t/saving-cache-stopped-working-warning-skipping-this-step-disabled-in-configuration/24423/2 + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: name: slate docs command: | @@ -69,8 +84,22 @@ jobs: at: /tmp/workspace - restore_cache: key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: name: metalinter command: | @@ -91,8 +120,22 @@ jobs: at: /tmp/workspace - restore_cache: key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: name: Run abci apps tests command: | @@ -108,8 +151,22 @@ jobs: at: /tmp/workspace - restore_cache: key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: name: Run abci-cli tests command: | @@ -123,8 +180,22 @@ jobs: at: /tmp/workspace - restore_cache: key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils - run: name: Run tests @@ -138,8 +209,22 @@ jobs: at: /tmp/workspace - restore_cache: key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: mkdir -p /tmp/logs - run: name: Run tests @@ -163,12 +248,48 @@ jobs: at: /tmp/workspace - restore_cache: key: v3-pkg-cache - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: name: Run tests command: bash test/persist/test_failure_indices.sh + localnet: + working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint + machine: + image: circleci/classic:latest + environment: + GOBIN: /home/circleci/.go_workspace/bin + GOPATH: /home/circleci/.go_workspace/ + GOOS: linux + GOARCH: amd64 + parallelism: 1 + steps: + - checkout + - run: + name: run localnet and exit on failure + command: | + set -x + make get_tools + make get_vendor_deps + make build-linux + make localnet-start & + ./scripts/localnet-blocks-test.sh 40 5 10 localhost + test_p2p: environment: GOBIN: /home/circleci/.go_workspace/bin @@ -186,8 +307,22 @@ jobs: steps: - attach_workspace: at: /tmp/workspace - - restore_cache: - key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + # - restore_cache: + # key: v3-tree-{{ .Environment.CIRCLE_SHA1 }} + - checkout + - run: + name: tools + command: | + export PATH="$GOBIN:$PATH" + make get_tools + - run: + name: dependencies + command: | + export PATH="$GOBIN:$PATH" + make get_vendor_deps + - run: mkdir -p $GOPATH/src/github.com/tendermint + - run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint + - run: name: gather command: | @@ -199,7 +334,7 @@ jobs: done - run: name: upload - command: bash <(curl -s https://codecov.io/bash) -f coverage.txt + command: bash .circleci/codecov.sh -f coverage.txt workflows: version: 2 @@ -224,6 +359,9 @@ workflows: - test_persistence: requires: - setup_dependencies + - localnet: + requires: + - setup_dependencies - test_p2p - upload_coverage: requires: diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 9586a870287..9d2fc15be57 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -4,4 +4,4 @@ * @ebuchman @melekes @xla # Precious documentation -/docs/ @zramsay @jolesbi +/docs/ @zramsay diff --git a/.gitignore b/.gitignore index 4f8481603bc..193a42894a1 100644 --- a/.gitignore +++ b/.gitignore @@ -28,9 +28,14 @@ scripts/cutWALUntil/cutWALUntil libs/pubsub/query/fuzz_test/output shunit2 +.tendermint-lite +addrbook.json + */vendor */.glide .terraform terraform.tfstate terraform.tfstate.backup terraform.tfstate.d + +.vscode \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index d3f92f64b2f..726ca9aedf0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,109 @@ # Changelog +## 0.24.0 + +*September 6th, 2018* + +Special thanks to external contributors with PRs included in this release: ackratos, james-ray, bradyjoestar, +peerlink, Ahmah2009, bluele, b00f. + +This release includes breaking upgrades in the block header, +including the long awaited changes for delaying validator set updates by one +block to better support light clients. +It also fixes enforcement on the maximum size of blocks, and includes a BFT +timestamp in each block that can be safely used by applications. +There are also some minor breaking changes to the rpc, config, and ABCI. + +See the [UPGRADING.md](UPGRADING.md#v0.24.0) for details on upgrading to the new +version. + +From here on, breaking changes will be broken down to better reflect how users +are affected by a change. + +A few more breaking changes are in the works - each will come with a clear +Architecture Decision Record (ADR) explaining the change. You can review ADRs +[here](https://github.com/tendermint/tendermint/tree/develop/docs/architecture) +or in the [open Pull Requests](https://github.com/tendermint/tendermint/pulls). +You can also check in on the [issues marked as +breaking](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3Abreaking). + +BREAKING CHANGES: + +* CLI/RPC/Config + - [config] [\#2169](https://github.com/tendermint/tendermint/issues/2169) Replace MaxNumPeers with MaxNumInboundPeers and MaxNumOutboundPeers + - [config] [\#2300](https://github.com/tendermint/tendermint/issues/2300) Reduce default mempool size from 100k to 5k, until ABCI rechecking is implemented. + - [rpc] [\#1815](https://github.com/tendermint/tendermint/issues/1815) `/commit` returns a `signed_header` field instead of everything being top-level + +* Apps + - [abci] Added address of the original proposer of the block to Header + - [abci] Change ABCI Header to match Tendermint exactly + - [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see + [ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)): + - Remove PubKey from `Validator` (so it's just Address and Power) + - Introduce `ValidatorUpdate` (with just PubKey and Power) + - InitChain and EndBlock use ValidatorUpdate + - Update field names and types in BeginBlock + - [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block + - updates returned in ResponseEndBlock for block H will be included in RequestBeginBlock for block H+2 + +* Go API + - [lite] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Complete refactor of the package + - [node] [\#2212](https://github.com/tendermint/tendermint/issues/2212) NewNode now accepts a `*p2p.NodeKey` (@bradyjoestar) + - [libs/common] [\#2199](https://github.com/tendermint/tendermint/issues/2199) Remove Fmt, in favor of fmt.Sprintf + - [libs/common] SplitAndTrim was deleted + - [libs/common] [\#2274](https://github.com/tendermint/tendermint/issues/2274) Remove unused Math functions like MaxInt, MaxInt64, + MinInt, MinInt64 (@Ahmah2009) + - [libs/clist] Panics if list extends beyond MaxLength + - [crypto] [\#2205](https://github.com/tendermint/tendermint/issues/2205) Rename AminoRoute variables to no longer be prefixed by signature type. + +* Blockchain Protocol + - [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!) + - Add NextValidatorSet to State, changes on-disk representation of state + - [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See + [ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)). + - Remove ConsensusParams.BlockSize.MaxTxs + - Introduce maximum sizes for all components of a block, including ChainID + - [types] Updates to the block Header: + - [\#1815](https://github.com/tendermint/tendermint/issues/1815) NextValidatorsHash - hash of the validator set for the next block, + so the current validators actually sign over the hash for the new + validators + - [\#2106](https://github.com/tendermint/tendermint/issues/2106) ProposerAddress - address of the block's original proposer + - [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time + - Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit + - [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See + [ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)): + - format changed from DER to `r || s`, both little endian encoded as 32 bytes. + - malleability removed by requiring `s` to be in canonical form. + +* P2P Protocol + - [p2p] [\#2263](https://github.com/tendermint/tendermint/issues/2263) Update secret connection to use a little endian encoded nonce + - [blockchain] [\#2213](https://github.com/tendermint/tendermint/issues/2213) Fix Amino routes for blockchain reactor messages + (@peerlink) + + +FEATURES: +- [types] [\#2015](https://github.com/tendermint/tendermint/issues/2015) Allow genesis file to have 0 validators (@b00f) + - Initial validator set can be determined by the app in ResponseInitChain +- [rpc] [\#2161](https://github.com/tendermint/tendermint/issues/2161) New event `ValidatorSetUpdates` for when the validator set changes +- [crypto/multisig] [\#2164](https://github.com/tendermint/tendermint/issues/2164) Introduce multisig pubkey and signature format +- [libs/db] [\#2293](https://github.com/tendermint/tendermint/issues/2293) Allow passing options through when creating instances of leveldb dbs + +IMPROVEMENTS: +- [docs] Lint documentation with `write-good` and `stop-words`. +- [docs] [\#2249](https://github.com/tendermint/tendermint/issues/2249) Refactor, deduplicate, and improve the ABCI docs and spec (with thanks to @ttmc). +- [scripts] [\#2196](https://github.com/tendermint/tendermint/issues/2196) Added json2wal tool, which is supposed to help our users restore (@bradyjoestar) + corrupted WAL files and compose test WAL files (@bradyjoestar) +- [mempool] [\#2234](https://github.com/tendermint/tendermint/issues/2234) Now stores txs by hash inside of the cache, to mitigate memory leakage +- [mempool] [\#2166](https://github.com/tendermint/tendermint/issues/2166) Set explicit capacity for map when updating txs (@bluele) + +BUG FIXES: +- [config] [\#2284](https://github.com/tendermint/tendermint/issues/2284) Replace `db_path` with `db_dir` from automatically generated configuration files. +- [mempool] [\#2188](https://github.com/tendermint/tendermint/issues/2188) Fix OOM issue from cache map and list getting out of sync +- [state] [\#2051](https://github.com/tendermint/tendermint/issues/2051) KV store index supports searching by `tx.height` (@ackratos) +- [rpc] [\#2327](https://github.com/tendermint/tendermint/issues/2327) `/dial_peers` does not try to dial existing peers +- [node] [\#2323](https://github.com/tendermint/tendermint/issues/2323) Filter empty strings from config lists (@james-ray) +- [abci/client] [\#2236](https://github.com/tendermint/tendermint/issues/2236) Fix closing GRPC connection (@bradyjoestar) + ## 0.23.1 *August 22nd, 2018* @@ -646,7 +750,7 @@ BREAKING CHANGES: - use scripts/wal2json to convert to json for debugging FEATURES: - - new `certifiers` pkg contains the tendermint light-client library (name subject to change)! + - new `Verifiers` pkg contains the tendermint light-client library (name subject to change)! - rpc: `/genesis` includes the `app_options` . - rpc: `/abci_query` takes an additional `height` parameter to support historical queries. - rpc/client: new ABCIQueryWithOptions supports options like `trusted` (set false to get a proof) and `height` to query a historical height. diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md new file mode 100644 index 00000000000..cd5dc06d609 --- /dev/null +++ b/CHANGELOG_PENDING.md @@ -0,0 +1,22 @@ +# Pending + +Special thanks to external contributors with PRs included in this release: + +BREAKING CHANGES: + +* CLI/RPC/Config + +* Apps + +* Go API + +* Blockchain Protocol + +* P2P Protocol + + +FEATURES: + +IMPROVEMENTS: + +BUG FIXES: diff --git a/Gopkg.lock b/Gopkg.lock index 557e2b181e5..8deb0637854 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -357,6 +357,13 @@ pruneopts = "UT" revision = "c4c61651e9e37fa117f53c5a906d3b63090d8445" +[[projects]] + digest = "1:605b6546f3f43745695298ec2d342d3e952b6d91cdf9f349bea9315f677d759f" + name = "github.com/tendermint/btcd" + packages = ["btcec"] + pruneopts = "UT" + revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df" + [[projects]] branch = "master" digest = "1:087aaa7920e5d0bf79586feb57ce01c35c830396ab4392798112e8aae8c47722" @@ -370,12 +377,12 @@ revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057" [[projects]] - digest = "1:e9113641c839c21d8eaeb2c907c7276af1eddeed988df8322168c56b7e06e0e1" + digest = "1:e0a2a4be1e20c305badc2b0a7a9ab7fef6da500763bec23ab81df3b5f9eec9ee" name = "github.com/tendermint/go-amino" packages = ["."] pruneopts = "UT" - revision = "2106ca61d91029c931fd54968c2bb02dc96b1412" - version = "0.10.1" + revision = "a8328986c1608950fa5d3d1c0472cccc4f8fc02c" + version = "v0.12.0-rc0" [[projects]] branch = "master" @@ -398,7 +405,7 @@ "salsa20/salsa", ] pruneopts = "UT" - revision = "c126467f60eb25f8f27e5a981f32a87e3965053f" + revision = "56440b844dfe139a8ac053f4ecac0b20b79058f4" [[projects]] digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1" @@ -504,7 +511,6 @@ analyzer-name = "dep" analyzer-version = 1 input-imports = [ - "github.com/btcsuite/btcd/btcec", "github.com/btcsuite/btcutil/base58", "github.com/btcsuite/btcutil/bech32", "github.com/ebuchman/fail-test", @@ -536,6 +542,7 @@ "github.com/syndtr/goleveldb/leveldb/errors", "github.com/syndtr/goleveldb/leveldb/iterator", "github.com/syndtr/goleveldb/leveldb/opt", + "github.com/tendermint/btcd/btcec", "github.com/tendermint/ed25519", "github.com/tendermint/ed25519/extra25519", "github.com/tendermint/go-amino", diff --git a/Gopkg.toml b/Gopkg.toml index 9d605f6cce1..d3bca19e860 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -58,7 +58,7 @@ [[constraint]] name = "github.com/tendermint/go-amino" - version = "=v0.10.1" + version = "v0.12.0-rc0" [[constraint]] name = "google.golang.org/grpc" @@ -85,6 +85,10 @@ name = "github.com/btcsuite/btcutil" revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4" +[[constraint]] + name = "github.com/tendermint/btcd" + revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df" + # Haven't made a release since 2016. [[constraint]] name = "github.com/prometheus/client_golang" diff --git a/Makefile b/Makefile index 22e9288612f..1fb3eacb3fa 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,6 @@ GOTOOLS = \ github.com/golang/dep/cmd/dep \ gopkg.in/alecthomas/gometalinter.v2 \ github.com/gogo/protobuf/protoc-gen-gogo \ - github.com/gogo/protobuf/gogoproto \ github.com/square/certstrap PACKAGES=$(shell go list ./...) @@ -11,6 +10,8 @@ INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protob BUILD_TAGS?='tendermint' BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" +LINT_FLAGS = --exclude '.*\.pb\.go' --vendor --deadline=600s + all: check build test install check: check_tools get_vendor_deps @@ -37,13 +38,14 @@ protoc_all: protoc_libs protoc_abci protoc_grpc ## If you get the following error, ## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory" ## See https://stackoverflow.com/a/25518702 + ## Note the $< here is substituted for the %.proto + ## Note the $@ here is substituted for the %.pb.go protoc $(INCLUDE) $< --gogo_out=Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,plugins=grpc:. - @echo "--> adding nolint declarations to protobuf generated files" - @awk -i inplace '/^\s*package \w+/ { print "//nolint" }1' $@ ######################################## ### Build ABCI +# see protobuf section above protoc_abci: abci/types/types.pb.go build_abci: @@ -75,7 +77,7 @@ get_tools: update_tools: @echo "--> Updating tools" - @go get -u $(GOTOOLS) + go get -u -v $(GOTOOLS) #Update dependencies get_vendor_deps: @@ -85,13 +87,15 @@ get_vendor_deps: #For ABCI and libs get_protoc: @# https://github.com/google/protobuf/releases - curl -L https://github.com/google/protobuf/releases/download/v3.4.1/protobuf-cpp-3.4.1.tar.gz | tar xvz && \ - cd protobuf-3.4.1 && \ + curl -L https://github.com/google/protobuf/releases/download/v3.6.1/protobuf-cpp-3.6.1.tar.gz | tar xvz && \ + cd protobuf-3.6.1 && \ DIST_LANG=cpp ./configure && \ make && \ - make install && \ + make check && \ + sudo make install && \ + sudo ldconfig && \ cd .. && \ - rm -rf protobuf-3.4.1 + rm -rf protobuf-3.6.1 draw_deps: @# requires brew install graphviz or apt-get install graphviz @@ -200,11 +204,11 @@ vagrant_test: ### go tests test: @echo "--> Running go test" - @GOCACHE=off go test $(PACKAGES) + @GOCACHE=off go test -p 1 $(PACKAGES) test_race: @echo "--> Running go test --race" - @go test -v -race $(PACKAGES) + @GOCACHE=off go test -p 1 -v -race $(PACKAGES) ######################################## @@ -215,7 +219,7 @@ fmt: metalinter: @echo "--> Running linter" - @gometalinter.v2 --vendor --deadline=600s --disable-all \ + @gometalinter.v2 $(LINT_FLAGS) --disable-all \ --enable=deadcode \ --enable=gosimple \ --enable=misspell \ @@ -244,7 +248,7 @@ metalinter: metalinter_all: @echo "--> Running linter (all)" - gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./... + gometalinter.v2 $(LINT_FLAGS) --enable-all --disable=lll ./... DESTINATION = ./index.html.md diff --git a/UPGRADING.md b/UPGRADING.md new file mode 100644 index 00000000000..16e397b2206 --- /dev/null +++ b/UPGRADING.md @@ -0,0 +1,66 @@ +# Upgrading Tendermint Core + +This guide provides steps to be followed when you upgrade your applications to +a newer version of Tendermint Core. + +## v0.24.0 + +New 0.24.0 release contains a lot of changes to the state and types. It's not +compatible to the old versions and there is no straight forward way to update +old data to be compatible with the new version. + +To reset the state do: + +``` +$ tendermint unsafe_reset_all +``` + +Here we summarize some other notable changes to be mindful of. + +### Config changes + +`p2p.max_num_peers` was removed in favor of `p2p.max_num_inbound_peers` and +`p2p.max_num_outbound_peers`. + +``` +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 +``` + +As you can see, the default ratio of inbound/outbound peers is 4/1. The reason +is we want it to be easier for new nodes to connect to the network. You can +tweak these parameters to alter the network topology. + +### RPC Changes + +The result of `/commit` used to contain `header` and `commit` fields at the top level. These are now contained under the `signed_header` field. + +### ABCI Changes + +The header has been upgraded and contains new fields, but none of the existing +fields were changed, except their order. + +The `Validator` type was split into two, one containing an `Address` and one +containing a `PubKey`. When processing `RequestBeginBlock`, use the `Validator` +type, which contains just the `Address`. When returning `ResponseEndBlock`, use +the `ValidatorUpdate` type, which contains just the `PubKey`. + +### Validator Set Updates + +Validator set updates returned in ResponseEndBlock for height `H` used to take +effect immediately at height `H+1`. Now they will be delayed one block, to take +effect at height `H+2`. Note this means that the change will be seen by the ABCI +app in the `RequestBeginBlock.LastCommitInfo` at block `H+3`. Apps were already +required to maintain a map from validator addresses to pubkeys since v0.23 (when +pubkeys were removed from RequestBeginBlock), but now they may need to track +multiple validator sets at once to accomodate this delay. + + +### Block Size + +The `ConsensusParams.BlockSize.MaxTxs` was removed in favour of +`ConsensusParams.BlockSize.MaxBytes`, which is now enforced. This means blocks +are limitted only by byte-size, not by number of transactions. diff --git a/abci/README.md b/abci/README.md index 493d862f03a..63b43e54f53 100644 --- a/abci/README.md +++ b/abci/README.md @@ -17,14 +17,14 @@ The community has provided a number of addtional implementations, see the [Tende A detailed description of the ABCI methods and message types is contained in: - [A prose specification](specification.md) -- [A protobuf file](https://github.com/tendermint/abci/blob/master/types/types.proto) -- [A Go interface](https://github.com/tendermint/abci/blob/master/types/application.go). +- [A protobuf file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto) +- [A Go interface](https://github.com/tendermint/tendermint/blob/master/abci/types/application.go). -For more background information on ABCI, motivations, and tendermint, please visit [the documentation](http://tendermint.readthedocs.io/en/master/). +For more background information on ABCI, motivations, and tendermint, please visit [the documentation](https://tendermint.com/docs/). The two guides to focus on are the `Application Development Guide` and `Using ABCI-CLI`. -## Protocl Buffers +## Protocol Buffers To compile the protobuf file, run: @@ -42,10 +42,13 @@ The `abci-cli` is a simple tool for debugging ABCI servers and running some example apps. To install it: ``` -go get github.com/tendermint/abci -cd $GOPATH/src/github.com/tendermint/abci +mkdir -p $GOPATH/src/github.com/tendermint +cd $GOPATH/src/github.com/tendermint +git clone https://github.com/tendermint/tendermint.git +cd tendermint +make get_tools make get_vendor_deps -make install +make install_abci ``` ## Implementation @@ -91,7 +94,7 @@ Note the length-prefixing used in the socket implementation does not apply for G The `abci-cli` tool wraps an ABCI client and can be used for probing/testing an ABCI server. For instance, `abci-cli test` will run a test sequence against a listening server running the Counter application (see below). It can also be used to run some example applications. -See [the documentation](http://tendermint.readthedocs.io/en/master/) for more details. +See [the documentation](https://tendermint.com/docs/) for more details. ### Examples diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index a1f0994684f..4f37b17b660 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -22,6 +22,7 @@ type grpcClient struct { mustConnect bool client types.ABCIApplicationClient + conn *grpc.ClientConn mtx sync.Mutex addr string @@ -60,6 +61,7 @@ RETRY_LOOP: cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr) client := types.NewABCIApplicationClient(conn) + cli.conn = conn ENSURE_CONNECTED: for { @@ -78,12 +80,10 @@ RETRY_LOOP: func (cli *grpcClient) OnStop() { cli.BaseService.OnStop() - cli.mtx.Lock() - defer cli.mtx.Unlock() - // TODO: how to close conn? its not a net.Conn and grpc doesn't expose a Close() - /*if cli.client.conn != nil { - cli.client.conn.Close() - }*/ + + if cli.conn != nil { + cli.conn.Close() + } } func (cli *grpcClient) StopForError(err error) { diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 00bceec216d..b7b8e7d728f 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -477,11 +477,8 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error { } func cmdUnimplemented(cmd *cobra.Command, args []string) error { - // TODO: Print out all the sub-commands available msg := "unimplemented command" - if err := cmd.Help(); err != nil { - msg = err.Error() - } + if len(args) > 0 { msg += fmt.Sprintf(" args: [%s]", strings.Join(args, " ")) } @@ -489,6 +486,17 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error { Code: codeBad, Log: msg, }) + + fmt.Println("Available commands:") + fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short) + fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short) + fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short) + fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short) + fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short) + fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short) + fmt.Printf("%s: %s\n", setOptionCmd.Use, setOptionCmd.Short) + fmt.Println("Use \"[command] --help\" for more information about a command.") + return nil } diff --git a/abci/example/counter/counter.go b/abci/example/counter/counter.go index 857e82bafdf..a77e7821f4a 100644 --- a/abci/example/counter/counter.go +++ b/abci/example/counter/counter.go @@ -6,7 +6,6 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tendermint/libs/common" ) type CounterApplication struct { @@ -22,7 +21,7 @@ func NewCounterApplication(serial bool) *CounterApplication { } func (app *CounterApplication) Info(req types.RequestInfo) types.ResponseInfo { - return types.ResponseInfo{Data: cmn.Fmt("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)} + return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)} } func (app *CounterApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption { @@ -34,7 +33,7 @@ func (app *CounterApplication) SetOption(req types.RequestSetOption) types.Respo TODO Panic and have the ABCI server pass an exception. The client can call SetOptionSync() and get an `error`. return types.ResponseSetOption{ - Error: cmn.Fmt("Unknown key (%s) or value (%s)", key, value), + Error: fmt.Sprintf("Unknown key (%s) or value (%s)", key, value), } */ return types.ResponseSetOption{} @@ -95,10 +94,10 @@ func (app *CounterApplication) Commit() (resp types.ResponseCommit) { func (app *CounterApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery { switch reqQuery.Path { case "hash": - return types.ResponseQuery{Value: []byte(cmn.Fmt("%v", app.hashCount))} + return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))} case "tx": - return types.ResponseQuery{Value: []byte(cmn.Fmt("%v", app.txCount))} + return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))} default: - return types.ResponseQuery{Log: cmn.Fmt("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)} + return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)} } } diff --git a/abci/example/example_test.go b/abci/example/example_test.go index 8fa3ae021b2..677a2a481a8 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -39,7 +39,7 @@ func TestGRPC(t *testing.T) { } func testStream(t *testing.T, app types.Application) { - numDeliverTxs := 200000 + numDeliverTxs := 20000 // Start the listener server := abciserver.NewSocketServer("unix://test.sock", app) @@ -72,7 +72,7 @@ func testStream(t *testing.T, app types.Application) { } if counter == numDeliverTxs { go func() { - time.Sleep(time.Second * 2) // Wait for a bit to allow counter overflow + time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow close(done) }() return @@ -148,7 +148,7 @@ func testGRPCSync(t *testing.T, app *types.GRPCApplication) { t.Log("response", counter) if counter == numDeliverTxs { go func() { - time.Sleep(time.Second * 2) // Wait for a bit to allow counter overflow + time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow }() } diff --git a/abci/example/kvstore/helpers.go b/abci/example/kvstore/helpers.go index 0e69fab9f6c..bb086dec0b9 100644 --- a/abci/example/kvstore/helpers.go +++ b/abci/example/kvstore/helpers.go @@ -7,12 +7,10 @@ import ( // RandVal creates one random validator, with a key derived // from the input value -func RandVal(i int) types.Validator { - addr := cmn.RandBytes(20) +func RandVal(i int) types.ValidatorUpdate { pubkey := cmn.RandBytes(32) power := cmn.RandUint16() + 1 - v := types.Ed25519Validator(pubkey, int64(power)) - v.Address = addr + v := types.Ed25519ValidatorUpdate(pubkey, int64(power)) return v } @@ -20,8 +18,8 @@ func RandVal(i int) types.Validator { // the application. Note that the keys are deterministically // derived from the index in the array, while the power is // random (Change this if not desired) -func RandVals(cnt int) []types.Validator { - res := make([]types.Validator, cnt) +func RandVals(cnt int) []types.ValidatorUpdate { + res := make([]types.ValidatorUpdate, cnt) for i := 0; i < cnt; i++ { res[i] = RandVal(i) } diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 0f72b44eafa..d8d18d5e242 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -81,8 +81,8 @@ func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { app.state.Size += 1 tags := []cmn.KVPair{ - {[]byte("app.creator"), []byte("jae")}, - {[]byte("app.key"), key}, + {Key: []byte("app.creator"), Value: []byte("jae")}, + {Key: []byte("app.key"), Value: key}, } return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags} } diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 6ef5a08f986..a18fb8d3c77 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -2,6 +2,7 @@ package kvstore import ( "bytes" + "fmt" "io/ioutil" "sort" "testing" @@ -121,11 +122,11 @@ func TestValUpdates(t *testing.T) { vals1, vals2 := vals[:nInit], kvstore.Validators() valsEqual(t, vals1, vals2) - var v1, v2, v3 types.Validator + var v1, v2, v3 types.ValidatorUpdate // add some validators v1, v2 = vals[nInit], vals[nInit+1] - diff := []types.Validator{v1, v2} + diff := []types.ValidatorUpdate{v1, v2} tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power) tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power) @@ -139,7 +140,7 @@ func TestValUpdates(t *testing.T) { v1.Power = 0 v2.Power = 0 v3.Power = 0 - diff = []types.Validator{v1, v2, v3} + diff = []types.ValidatorUpdate{v1, v2, v3} tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power) tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power) tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power) @@ -157,18 +158,18 @@ func TestValUpdates(t *testing.T) { } else { v1.Power = 5 } - diff = []types.Validator{v1} + diff = []types.ValidatorUpdate{v1} tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power) makeApplyBlock(t, kvstore, 3, diff, tx1) - vals1 = append([]types.Validator{v1}, vals1[1:]...) + vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...) vals2 = kvstore.Validators() valsEqual(t, vals1, vals2) } -func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.Validator, txs ...[]byte) { +func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, txs ...[]byte) { // make and apply block height := int64(heightInt) hash := []byte("foo") @@ -190,12 +191,12 @@ func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff } // order doesn't matter -func valsEqual(t *testing.T, vals1, vals2 []types.Validator) { +func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) { if len(vals1) != len(vals2) { t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1)) } - sort.Sort(types.Validators(vals1)) - sort.Sort(types.Validators(vals2)) + sort.Sort(types.ValidatorUpdates(vals1)) + sort.Sort(types.ValidatorUpdates(vals2)) for i, v1 := range vals1 { v2 := vals2[i] if !bytes.Equal(v1.PubKey.Data, v2.PubKey.Data) || @@ -207,7 +208,7 @@ func valsEqual(t *testing.T, vals1, vals2 []types.Validator) { func makeSocketClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) { // Start the listener - socket := cmn.Fmt("unix://%s.sock", name) + socket := fmt.Sprintf("unix://%s.sock", name) logger := log.TestingLogger() server := abciserver.NewSocketServer(socket, app) @@ -229,7 +230,7 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client, func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) { // Start the listener - socket := cmn.Fmt("unix://%s.sock", name) + socket := fmt.Sprintf("unix://%s.sock", name) logger := log.TestingLogger() gapp := types.NewGRPCApplication(app) diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index 12ccbab782b..f969eebfefa 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -9,7 +9,6 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" ) @@ -26,7 +25,7 @@ type PersistentKVStoreApplication struct { app *KVStoreApplication // validator set - ValUpdates []types.Validator + ValUpdates []types.ValidatorUpdate logger log.Logger } @@ -102,7 +101,7 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t // Track the block hash and header information func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock { // reset valset changes - app.ValUpdates = make([]types.Validator, 0) + app.ValUpdates = make([]types.ValidatorUpdate, 0) return types.ResponseBeginBlock{} } @@ -114,11 +113,11 @@ func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) typ //--------------------------------------------- // update validators -func (app *PersistentKVStoreApplication) Validators() (validators []types.Validator) { +func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) { itr := app.app.state.db.Iterator(nil, nil) for ; itr.Valid(); itr.Next() { if isValidatorTx(itr.Key()) { - validator := new(types.Validator) + validator := new(types.ValidatorUpdate) err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator) if err != nil { panic(err) @@ -130,7 +129,7 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida } func MakeValSetChangeTx(pubkey types.PubKey, power int64) []byte { - return []byte(cmn.Fmt("val:%X/%d", pubkey.Data, power)) + return []byte(fmt.Sprintf("val:%X/%d", pubkey.Data, power)) } func isValidatorTx(tx []byte) bool { @@ -168,11 +167,11 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon } // update - return app.updateValidator(types.Ed25519Validator(pubkey, int64(power))) + return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, int64(power))) } // add, update, or remove a validator -func (app *PersistentKVStoreApplication) updateValidator(v types.Validator) types.ResponseDeliverTx { +func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx { key := []byte("val:" + string(v.PubKey.Data)) if v.Power == 0 { // remove validator diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index f67297cd700..5daa1e6af10 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -12,11 +12,11 @@ import ( func InitChain(client abcicli.Client) error { total := 10 - vals := make([]types.Validator, total) + vals := make([]types.ValidatorUpdate, total) for i := 0; i < total; i++ { pubkey := cmn.RandBytes(33) power := cmn.RandInt() - vals[i] = types.Ed25519Validator(pubkey, int64(power)) + vals[i] = types.Ed25519ValidatorUpdate(pubkey, int64(power)) } _, err := client.InitChainSync(types.RequestInitChain{ Validators: vals, diff --git a/abci/types/messages_test.go b/abci/types/messages_test.go index 603e602aef2..14bc5718f1e 100644 --- a/abci/types/messages_test.go +++ b/abci/types/messages_test.go @@ -22,7 +22,7 @@ func TestMarshalJSON(t *testing.T) { Data: []byte("hello"), GasWanted: 43, Tags: []cmn.KVPair{ - {[]byte("pho"), []byte("bo")}, + {Key: []byte("pho"), Value: []byte("bo")}, }, } b, err = json.Marshal(&r1) @@ -83,7 +83,7 @@ func TestWriteReadMessage2(t *testing.T) { Log: phrase, GasWanted: 10, Tags: []cmn.KVPair{ - cmn.KVPair{[]byte("abc"), []byte("def")}, + cmn.KVPair{Key: []byte("abc"), Value: []byte("def")}, }, }, // TODO: add the rest diff --git a/abci/types/pubkey.go b/abci/types/pubkey.go index e5cd5fbf3e4..46cd8c5e887 100644 --- a/abci/types/pubkey.go +++ b/abci/types/pubkey.go @@ -4,8 +4,8 @@ const ( PubKeyEd25519 = "ed25519" ) -func Ed25519Validator(pubkey []byte, power int64) Validator { - return Validator{ +func Ed25519ValidatorUpdate(pubkey []byte, power int64) ValidatorUpdate { + return ValidatorUpdate{ // Address: PubKey: PubKey{ Type: PubKeyEd25519, diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index ac71d91c8f9..7873f0975ac 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1,7 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: abci/types/types.proto -//nolint package types import proto "github.com/gogo/protobuf/proto" @@ -16,8 +15,10 @@ import time "time" import bytes "bytes" -import context "golang.org/x/net/context" -import grpc "google.golang.org/grpc" +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" @@ -59,7 +60,7 @@ func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{0} + return fileDescriptor_types_c68d3007ea320b94, []int{0} } func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -481,7 +482,7 @@ func (m *RequestEcho) Reset() { *m = RequestEcho{} } func (m *RequestEcho) String() string { return proto.CompactTextString(m) } func (*RequestEcho) ProtoMessage() {} func (*RequestEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{1} + return fileDescriptor_types_c68d3007ea320b94, []int{1} } func (m *RequestEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +528,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} } func (m *RequestFlush) String() string { return proto.CompactTextString(m) } func (*RequestFlush) ProtoMessage() {} func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{2} + return fileDescriptor_types_c68d3007ea320b94, []int{2} } func (m *RequestFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -567,7 +568,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (m *RequestInfo) String() string { return proto.CompactTextString(m) } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{3} + return fileDescriptor_types_c68d3007ea320b94, []int{3} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -616,7 +617,7 @@ func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } func (*RequestSetOption) ProtoMessage() {} func (*RequestSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{4} + return fileDescriptor_types_c68d3007ea320b94, []int{4} } func (m *RequestSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -660,21 +661,21 @@ func (m *RequestSetOption) GetValue() string { } type RequestInitChain struct { - Time time.Time `protobuf:"bytes,1,opt,name=time,stdtime" json:"time"` - ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - ConsensusParams *ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams" json:"consensus_params,omitempty"` - Validators []Validator `protobuf:"bytes,4,rep,name=validators" json:"validators"` - AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Time time.Time `protobuf:"bytes,1,opt,name=time,stdtime" json:"time"` + ChainId string `protobuf:"bytes,2,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + ConsensusParams *ConsensusParams `protobuf:"bytes,3,opt,name=consensus_params,json=consensusParams" json:"consensus_params,omitempty"` + Validators []ValidatorUpdate `protobuf:"bytes,4,rep,name=validators" json:"validators"` + AppStateBytes []byte `protobuf:"bytes,5,opt,name=app_state_bytes,json=appStateBytes,proto3" json:"app_state_bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } func (*RequestInitChain) ProtoMessage() {} func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{5} + return fileDescriptor_types_c68d3007ea320b94, []int{5} } func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -724,7 +725,7 @@ func (m *RequestInitChain) GetConsensusParams() *ConsensusParams { return nil } -func (m *RequestInitChain) GetValidators() []Validator { +func (m *RequestInitChain) GetValidators() []ValidatorUpdate { if m != nil { return m.Validators } @@ -752,7 +753,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} } func (m *RequestQuery) String() string { return proto.CompactTextString(m) } func (*RequestQuery) ProtoMessage() {} func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{6} + return fileDescriptor_types_c68d3007ea320b94, []int{6} } func (m *RequestQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -824,7 +825,7 @@ func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } func (*RequestBeginBlock) ProtoMessage() {} func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{7} + return fileDescriptor_types_c68d3007ea320b94, []int{7} } func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -892,7 +893,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{8} + return fileDescriptor_types_c68d3007ea320b94, []int{8} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -939,7 +940,7 @@ func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } func (*RequestDeliverTx) ProtoMessage() {} func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{9} + return fileDescriptor_types_c68d3007ea320b94, []int{9} } func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -986,7 +987,7 @@ func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } func (*RequestEndBlock) ProtoMessage() {} func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{10} + return fileDescriptor_types_c68d3007ea320b94, []int{10} } func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1032,7 +1033,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{11} + return fileDescriptor_types_c68d3007ea320b94, []int{11} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1085,7 +1086,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{12} + return fileDescriptor_types_c68d3007ea320b94, []int{12} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1538,7 +1539,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{13} + return fileDescriptor_types_c68d3007ea320b94, []int{13} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1585,7 +1586,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{14} + return fileDescriptor_types_c68d3007ea320b94, []int{14} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1631,7 +1632,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{15} + return fileDescriptor_types_c68d3007ea320b94, []int{15} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1674,7 +1675,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{16} + return fileDescriptor_types_c68d3007ea320b94, []int{16} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1746,7 +1747,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } func (*ResponseSetOption) ProtoMessage() {} func (*ResponseSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{17} + return fileDescriptor_types_c68d3007ea320b94, []int{17} } func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1797,18 +1798,18 @@ func (m *ResponseSetOption) GetInfo() string { } type ResponseInitChain struct { - ConsensusParams *ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams" json:"consensus_params,omitempty"` - Validators []Validator `protobuf:"bytes,2,rep,name=validators" json:"validators"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ConsensusParams *ConsensusParams `protobuf:"bytes,1,opt,name=consensus_params,json=consensusParams" json:"consensus_params,omitempty"` + Validators []ValidatorUpdate `protobuf:"bytes,2,rep,name=validators" json:"validators"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{18} + return fileDescriptor_types_c68d3007ea320b94, []int{18} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1844,7 +1845,7 @@ func (m *ResponseInitChain) GetConsensusParams() *ConsensusParams { return nil } -func (m *ResponseInitChain) GetValidators() []Validator { +func (m *ResponseInitChain) GetValidators() []ValidatorUpdate { if m != nil { return m.Validators } @@ -1870,7 +1871,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{19} + return fileDescriptor_types_c68d3007ea320b94, []int{19} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1966,7 +1967,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{20} + return fileDescriptor_types_c68d3007ea320b94, []int{20} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2019,7 +2020,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{21} + return fileDescriptor_types_c68d3007ea320b94, []int{21} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2114,7 +2115,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{22} + return fileDescriptor_types_c68d3007ea320b94, []int{22} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2193,19 +2194,19 @@ func (m *ResponseDeliverTx) GetTags() []common.KVPair { } type ResponseEndBlock struct { - ValidatorUpdates []Validator `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates" json:"validator_updates"` - ConsensusParamUpdates *ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates" json:"consensus_param_updates,omitempty"` - Tags []common.KVPair `protobuf:"bytes,3,rep,name=tags" json:"tags,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates" json:"validator_updates"` + ConsensusParamUpdates *ConsensusParams `protobuf:"bytes,2,opt,name=consensus_param_updates,json=consensusParamUpdates" json:"consensus_param_updates,omitempty"` + Tags []common.KVPair `protobuf:"bytes,3,rep,name=tags" json:"tags,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{23} + return fileDescriptor_types_c68d3007ea320b94, []int{23} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2234,7 +2235,7 @@ func (m *ResponseEndBlock) XXX_DiscardUnknown() { var xxx_messageInfo_ResponseEndBlock proto.InternalMessageInfo -func (m *ResponseEndBlock) GetValidatorUpdates() []Validator { +func (m *ResponseEndBlock) GetValidatorUpdates() []ValidatorUpdate { if m != nil { return m.ValidatorUpdates } @@ -2267,7 +2268,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{24} + return fileDescriptor_types_c68d3007ea320b94, []int{24} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2318,7 +2319,7 @@ func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } func (*ConsensusParams) ProtoMessage() {} func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{25} + return fileDescriptor_types_c68d3007ea320b94, []int{25} } func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2371,8 +2372,7 @@ func (m *ConsensusParams) GetBlockGossip() *BlockGossip { // BlockSize contains limits on the block size. type BlockSize struct { MaxBytes int32 `protobuf:"varint,1,opt,name=max_bytes,json=maxBytes,proto3" json:"max_bytes,omitempty"` - MaxTxs int32 `protobuf:"varint,2,opt,name=max_txs,json=maxTxs,proto3" json:"max_txs,omitempty"` - MaxGas int64 `protobuf:"varint,3,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` + MaxGas int64 `protobuf:"varint,2,opt,name=max_gas,json=maxGas,proto3" json:"max_gas,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2382,7 +2382,7 @@ func (m *BlockSize) Reset() { *m = BlockSize{} } func (m *BlockSize) String() string { return proto.CompactTextString(m) } func (*BlockSize) ProtoMessage() {} func (*BlockSize) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{26} + return fileDescriptor_types_c68d3007ea320b94, []int{26} } func (m *BlockSize) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2418,13 +2418,6 @@ func (m *BlockSize) GetMaxBytes() int32 { return 0 } -func (m *BlockSize) GetMaxTxs() int32 { - if m != nil { - return m.MaxTxs - } - return 0 -} - func (m *BlockSize) GetMaxGas() int64 { if m != nil { return m.MaxGas @@ -2445,7 +2438,7 @@ func (m *TxSize) Reset() { *m = TxSize{} } func (m *TxSize) String() string { return proto.CompactTextString(m) } func (*TxSize) ProtoMessage() {} func (*TxSize) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{27} + return fileDescriptor_types_c68d3007ea320b94, []int{27} } func (m *TxSize) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2502,7 +2495,7 @@ func (m *BlockGossip) Reset() { *m = BlockGossip{} } func (m *BlockGossip) String() string { return proto.CompactTextString(m) } func (*BlockGossip) ProtoMessage() {} func (*BlockGossip) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{28} + return fileDescriptor_types_c68d3007ea320b94, []int{28} } func (m *BlockGossip) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2539,18 +2532,18 @@ func (m *BlockGossip) GetBlockPartSizeBytes() int32 { } type LastCommitInfo struct { - CommitRound int32 `protobuf:"varint,1,opt,name=commit_round,json=commitRound,proto3" json:"commit_round,omitempty"` - Validators []SigningValidator `protobuf:"bytes,2,rep,name=validators" json:"validators"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Round int32 `protobuf:"varint,1,opt,name=round,proto3" json:"round,omitempty"` + Votes []VoteInfo `protobuf:"bytes,2,rep,name=votes" json:"votes"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{29} + return fileDescriptor_types_c68d3007ea320b94, []int{29} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2579,45 +2572,51 @@ func (m *LastCommitInfo) XXX_DiscardUnknown() { var xxx_messageInfo_LastCommitInfo proto.InternalMessageInfo -func (m *LastCommitInfo) GetCommitRound() int32 { +func (m *LastCommitInfo) GetRound() int32 { if m != nil { - return m.CommitRound + return m.Round } return 0 } -func (m *LastCommitInfo) GetValidators() []SigningValidator { +func (m *LastCommitInfo) GetVotes() []VoteInfo { if m != nil { - return m.Validators + return m.Votes } return nil } -// just the minimum the app might need type Header struct { - // basics - ChainID string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` - Time time.Time `protobuf:"bytes,3,opt,name=time,stdtime" json:"time"` - // txs - NumTxs int32 `protobuf:"varint,4,opt,name=num_txs,json=numTxs,proto3" json:"num_txs,omitempty"` - TotalTxs int64 `protobuf:"varint,5,opt,name=total_txs,json=totalTxs,proto3" json:"total_txs,omitempty"` - // hashes - LastBlockHash []byte `protobuf:"bytes,6,opt,name=last_block_hash,json=lastBlockHash,proto3" json:"last_block_hash,omitempty"` - ValidatorsHash []byte `protobuf:"bytes,7,opt,name=validators_hash,json=validatorsHash,proto3" json:"validators_hash,omitempty"` - AppHash []byte `protobuf:"bytes,8,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` - // consensus - Proposer Validator `protobuf:"bytes,9,opt,name=proposer" json:"proposer"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + // basic block info + ChainID string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` + Time time.Time `protobuf:"bytes,3,opt,name=time,stdtime" json:"time"` + NumTxs int64 `protobuf:"varint,4,opt,name=num_txs,json=numTxs,proto3" json:"num_txs,omitempty"` + TotalTxs int64 `protobuf:"varint,5,opt,name=total_txs,json=totalTxs,proto3" json:"total_txs,omitempty"` + // prev block info + LastBlockId BlockID `protobuf:"bytes,6,opt,name=last_block_id,json=lastBlockId" json:"last_block_id"` + // hashes of block data + LastCommitHash []byte `protobuf:"bytes,7,opt,name=last_commit_hash,json=lastCommitHash,proto3" json:"last_commit_hash,omitempty"` + DataHash []byte `protobuf:"bytes,8,opt,name=data_hash,json=dataHash,proto3" json:"data_hash,omitempty"` + // hashes from the app output from the prev block + ValidatorsHash []byte `protobuf:"bytes,9,opt,name=validators_hash,json=validatorsHash,proto3" json:"validators_hash,omitempty"` + NextValidatorsHash []byte `protobuf:"bytes,10,opt,name=next_validators_hash,json=nextValidatorsHash,proto3" json:"next_validators_hash,omitempty"` + ConsensusHash []byte `protobuf:"bytes,11,opt,name=consensus_hash,json=consensusHash,proto3" json:"consensus_hash,omitempty"` + AppHash []byte `protobuf:"bytes,12,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + LastResultsHash []byte `protobuf:"bytes,13,opt,name=last_results_hash,json=lastResultsHash,proto3" json:"last_results_hash,omitempty"` + // consensus info + EvidenceHash []byte `protobuf:"bytes,14,opt,name=evidence_hash,json=evidenceHash,proto3" json:"evidence_hash,omitempty"` + ProposerAddress []byte `protobuf:"bytes,15,opt,name=proposer_address,json=proposerAddress,proto3" json:"proposer_address,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{30} + return fileDescriptor_types_c68d3007ea320b94, []int{30} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2667,7 +2666,7 @@ func (m *Header) GetTime() time.Time { return time.Time{} } -func (m *Header) GetNumTxs() int32 { +func (m *Header) GetNumTxs() int64 { if m != nil { return m.NumTxs } @@ -2681,9 +2680,23 @@ func (m *Header) GetTotalTxs() int64 { return 0 } -func (m *Header) GetLastBlockHash() []byte { +func (m *Header) GetLastBlockId() BlockID { + if m != nil { + return m.LastBlockId + } + return BlockID{} +} + +func (m *Header) GetLastCommitHash() []byte { if m != nil { - return m.LastBlockHash + return m.LastCommitHash + } + return nil +} + +func (m *Header) GetDataHash() []byte { + if m != nil { + return m.DataHash } return nil } @@ -2695,6 +2708,20 @@ func (m *Header) GetValidatorsHash() []byte { return nil } +func (m *Header) GetNextValidatorsHash() []byte { + if m != nil { + return m.NextValidatorsHash + } + return nil +} + +func (m *Header) GetConsensusHash() []byte { + if m != nil { + return m.ConsensusHash + } + return nil +} + func (m *Header) GetAppHash() []byte { if m != nil { return m.AppHash @@ -2702,17 +2729,141 @@ func (m *Header) GetAppHash() []byte { return nil } -func (m *Header) GetProposer() Validator { +func (m *Header) GetLastResultsHash() []byte { if m != nil { - return m.Proposer + return m.LastResultsHash } - return Validator{} + return nil +} + +func (m *Header) GetEvidenceHash() []byte { + if m != nil { + return m.EvidenceHash + } + return nil +} + +func (m *Header) GetProposerAddress() []byte { + if m != nil { + return m.ProposerAddress + } + return nil +} + +type BlockID struct { + Hash []byte `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"` + PartsHeader PartSetHeader `protobuf:"bytes,2,opt,name=parts_header,json=partsHeader" json:"parts_header"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockID) Reset() { *m = BlockID{} } +func (m *BlockID) String() string { return proto.CompactTextString(m) } +func (*BlockID) ProtoMessage() {} +func (*BlockID) Descriptor() ([]byte, []int) { + return fileDescriptor_types_c68d3007ea320b94, []int{31} +} +func (m *BlockID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BlockID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BlockID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *BlockID) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockID.Merge(dst, src) +} +func (m *BlockID) XXX_Size() int { + return m.Size() +} +func (m *BlockID) XXX_DiscardUnknown() { + xxx_messageInfo_BlockID.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockID proto.InternalMessageInfo + +func (m *BlockID) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *BlockID) GetPartsHeader() PartSetHeader { + if m != nil { + return m.PartsHeader + } + return PartSetHeader{} +} + +type PartSetHeader struct { + Total int32 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } +func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } +func (*PartSetHeader) ProtoMessage() {} +func (*PartSetHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_types_c68d3007ea320b94, []int{32} +} +func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartSetHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PartSetHeader.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *PartSetHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartSetHeader.Merge(dst, src) +} +func (m *PartSetHeader) XXX_Size() int { + return m.Size() +} +func (m *PartSetHeader) XXX_DiscardUnknown() { + xxx_messageInfo_PartSetHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_PartSetHeader proto.InternalMessageInfo + +func (m *PartSetHeader) GetTotal() int32 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *PartSetHeader) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil } // Validator type Validator struct { - Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` - PubKey PubKey `protobuf:"bytes,2,opt,name=pub_key,json=pubKey" json:"pub_key"` + Address []byte `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"` + // PubKey pub_key = 2 [(gogoproto.nullable)=false]; Power int64 `protobuf:"varint,3,opt,name=power,proto3" json:"power,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -2723,7 +2874,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{31} + return fileDescriptor_types_c68d3007ea320b94, []int{33} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2759,22 +2910,71 @@ func (m *Validator) GetAddress() []byte { return nil } -func (m *Validator) GetPubKey() PubKey { +func (m *Validator) GetPower() int64 { + if m != nil { + return m.Power + } + return 0 +} + +// ValidatorUpdate +type ValidatorUpdate struct { + PubKey PubKey `protobuf:"bytes,1,opt,name=pub_key,json=pubKey" json:"pub_key"` + Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } +func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } +func (*ValidatorUpdate) ProtoMessage() {} +func (*ValidatorUpdate) Descriptor() ([]byte, []int) { + return fileDescriptor_types_c68d3007ea320b94, []int{34} +} +func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorUpdate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ValidatorUpdate) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorUpdate.Merge(dst, src) +} +func (m *ValidatorUpdate) XXX_Size() int { + return m.Size() +} +func (m *ValidatorUpdate) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorUpdate.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorUpdate proto.InternalMessageInfo + +func (m *ValidatorUpdate) GetPubKey() PubKey { if m != nil { return m.PubKey } return PubKey{} } -func (m *Validator) GetPower() int64 { +func (m *ValidatorUpdate) GetPower() int64 { if m != nil { return m.Power } return 0 } -// Validator with an extra bool -type SigningValidator struct { +// VoteInfo +type VoteInfo struct { Validator Validator `protobuf:"bytes,1,opt,name=validator" json:"validator"` SignedLastBlock bool `protobuf:"varint,2,opt,name=signed_last_block,json=signedLastBlock,proto3" json:"signed_last_block,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -2782,18 +2982,18 @@ type SigningValidator struct { XXX_sizecache int32 `json:"-"` } -func (m *SigningValidator) Reset() { *m = SigningValidator{} } -func (m *SigningValidator) String() string { return proto.CompactTextString(m) } -func (*SigningValidator) ProtoMessage() {} -func (*SigningValidator) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{32} +func (m *VoteInfo) Reset() { *m = VoteInfo{} } +func (m *VoteInfo) String() string { return proto.CompactTextString(m) } +func (*VoteInfo) ProtoMessage() {} +func (*VoteInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_types_c68d3007ea320b94, []int{35} } -func (m *SigningValidator) XXX_Unmarshal(b []byte) error { +func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *SigningValidator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *VoteInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_SigningValidator.Marshal(b, m, deterministic) + return xxx_messageInfo_VoteInfo.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) @@ -2803,26 +3003,26 @@ func (m *SigningValidator) XXX_Marshal(b []byte, deterministic bool) ([]byte, er return b[:n], nil } } -func (dst *SigningValidator) XXX_Merge(src proto.Message) { - xxx_messageInfo_SigningValidator.Merge(dst, src) +func (dst *VoteInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoteInfo.Merge(dst, src) } -func (m *SigningValidator) XXX_Size() int { +func (m *VoteInfo) XXX_Size() int { return m.Size() } -func (m *SigningValidator) XXX_DiscardUnknown() { - xxx_messageInfo_SigningValidator.DiscardUnknown(m) +func (m *VoteInfo) XXX_DiscardUnknown() { + xxx_messageInfo_VoteInfo.DiscardUnknown(m) } -var xxx_messageInfo_SigningValidator proto.InternalMessageInfo +var xxx_messageInfo_VoteInfo proto.InternalMessageInfo -func (m *SigningValidator) GetValidator() Validator { +func (m *VoteInfo) GetValidator() Validator { if m != nil { return m.Validator } return Validator{} } -func (m *SigningValidator) GetSignedLastBlock() bool { +func (m *VoteInfo) GetSignedLastBlock() bool { if m != nil { return m.SignedLastBlock } @@ -2841,7 +3041,7 @@ func (m *PubKey) Reset() { *m = PubKey{} } func (m *PubKey) String() string { return proto.CompactTextString(m) } func (*PubKey) ProtoMessage() {} func (*PubKey) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{33} + return fileDescriptor_types_c68d3007ea320b94, []int{36} } func (m *PubKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2899,7 +3099,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_types_d8da2202f45d32c0, []int{34} + return fileDescriptor_types_c68d3007ea320b94, []int{37} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3026,10 +3226,16 @@ func init() { golang_proto.RegisterType((*LastCommitInfo)(nil), "types.LastCommitInfo") proto.RegisterType((*Header)(nil), "types.Header") golang_proto.RegisterType((*Header)(nil), "types.Header") + proto.RegisterType((*BlockID)(nil), "types.BlockID") + golang_proto.RegisterType((*BlockID)(nil), "types.BlockID") + proto.RegisterType((*PartSetHeader)(nil), "types.PartSetHeader") + golang_proto.RegisterType((*PartSetHeader)(nil), "types.PartSetHeader") proto.RegisterType((*Validator)(nil), "types.Validator") golang_proto.RegisterType((*Validator)(nil), "types.Validator") - proto.RegisterType((*SigningValidator)(nil), "types.SigningValidator") - golang_proto.RegisterType((*SigningValidator)(nil), "types.SigningValidator") + proto.RegisterType((*ValidatorUpdate)(nil), "types.ValidatorUpdate") + golang_proto.RegisterType((*ValidatorUpdate)(nil), "types.ValidatorUpdate") + proto.RegisterType((*VoteInfo)(nil), "types.VoteInfo") + golang_proto.RegisterType((*VoteInfo)(nil), "types.VoteInfo") proto.RegisterType((*PubKey)(nil), "types.PubKey") golang_proto.RegisterType((*PubKey)(nil), "types.PubKey") proto.RegisterType((*Evidence)(nil), "types.Evidence") @@ -4474,9 +4680,6 @@ func (this *BlockSize) Equal(that interface{}) bool { if this.MaxBytes != that1.MaxBytes { return false } - if this.MaxTxs != that1.MaxTxs { - return false - } if this.MaxGas != that1.MaxGas { return false } @@ -4561,14 +4764,14 @@ func (this *LastCommitInfo) Equal(that interface{}) bool { } else if this == nil { return false } - if this.CommitRound != that1.CommitRound { + if this.Round != that1.Round { return false } - if len(this.Validators) != len(that1.Validators) { + if len(this.Votes) != len(that1.Votes) { return false } - for i := range this.Validators { - if !this.Validators[i].Equal(&that1.Validators[i]) { + for i := range this.Votes { + if !this.Votes[i].Equal(&that1.Votes[i]) { return false } } @@ -4611,16 +4814,94 @@ func (this *Header) Equal(that interface{}) bool { if this.TotalTxs != that1.TotalTxs { return false } - if !bytes.Equal(this.LastBlockHash, that1.LastBlockHash) { + if !this.LastBlockId.Equal(&that1.LastBlockId) { + return false + } + if !bytes.Equal(this.LastCommitHash, that1.LastCommitHash) { + return false + } + if !bytes.Equal(this.DataHash, that1.DataHash) { return false } if !bytes.Equal(this.ValidatorsHash, that1.ValidatorsHash) { return false } + if !bytes.Equal(this.NextValidatorsHash, that1.NextValidatorsHash) { + return false + } + if !bytes.Equal(this.ConsensusHash, that1.ConsensusHash) { + return false + } if !bytes.Equal(this.AppHash, that1.AppHash) { return false } - if !this.Proposer.Equal(&that1.Proposer) { + if !bytes.Equal(this.LastResultsHash, that1.LastResultsHash) { + return false + } + if !bytes.Equal(this.EvidenceHash, that1.EvidenceHash) { + return false + } + if !bytes.Equal(this.ProposerAddress, that1.ProposerAddress) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BlockID) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BlockID) + if !ok { + that2, ok := that.(BlockID) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Hash, that1.Hash) { + return false + } + if !this.PartsHeader.Equal(&that1.PartsHeader) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *PartSetHeader) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*PartSetHeader) + if !ok { + that2, ok := that.(PartSetHeader) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Total != that1.Total { + return false + } + if !bytes.Equal(this.Hash, that1.Hash) { return false } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { @@ -4650,6 +4931,33 @@ func (this *Validator) Equal(that interface{}) bool { if !bytes.Equal(this.Address, that1.Address) { return false } + if this.Power != that1.Power { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *ValidatorUpdate) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ValidatorUpdate) + if !ok { + that2, ok := that.(ValidatorUpdate) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } if !this.PubKey.Equal(&that1.PubKey) { return false } @@ -4661,14 +4969,14 @@ func (this *Validator) Equal(that interface{}) bool { } return true } -func (this *SigningValidator) Equal(that interface{}) bool { +func (this *VoteInfo) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*SigningValidator) + that1, ok := that.(*VoteInfo) if !ok { - that2, ok := that.(SigningValidator) + that2, ok := that.(VoteInfo) if ok { that1 = &that2 } else { @@ -4769,8 +5077,9 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// Client API for ABCIApplication service - +// ABCIApplicationClient is the client API for ABCIApplication service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ABCIApplicationClient interface { Echo(ctx context.Context, in *RequestEcho, opts ...grpc.CallOption) (*ResponseEcho, error) Flush(ctx context.Context, in *RequestFlush, opts ...grpc.CallOption) (*ResponseFlush, error) @@ -6502,13 +6811,8 @@ func (m *BlockSize) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintTypes(dAtA, i, uint64(m.MaxBytes)) } - if m.MaxTxs != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintTypes(dAtA, i, uint64(m.MaxTxs)) - } if m.MaxGas != 0 { - dAtA[i] = 0x18 + dAtA[i] = 0x10 i++ i = encodeVarintTypes(dAtA, i, uint64(m.MaxGas)) } @@ -6590,13 +6894,13 @@ func (m *LastCommitInfo) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.CommitRound != 0 { + if m.Round != 0 { dAtA[i] = 0x8 i++ - i = encodeVarintTypes(dAtA, i, uint64(m.CommitRound)) + i = encodeVarintTypes(dAtA, i, uint64(m.Round)) } - if len(m.Validators) > 0 { - for _, msg := range m.Validators { + if len(m.Votes) > 0 { + for _, msg := range m.Votes { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) @@ -6657,32 +6961,135 @@ func (m *Header) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintTypes(dAtA, i, uint64(m.TotalTxs)) } - if len(m.LastBlockHash) > 0 { - dAtA[i] = 0x32 - i++ - i = encodeVarintTypes(dAtA, i, uint64(len(m.LastBlockHash))) - i += copy(dAtA[i:], m.LastBlockHash) + dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockId.Size())) + n36, err := m.LastBlockId.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if len(m.ValidatorsHash) > 0 { + i += n36 + if len(m.LastCommitHash) > 0 { dAtA[i] = 0x3a i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastCommitHash))) + i += copy(dAtA[i:], m.LastCommitHash) + } + if len(m.DataHash) > 0 { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.DataHash))) + i += copy(dAtA[i:], m.DataHash) + } + if len(m.ValidatorsHash) > 0 { + dAtA[i] = 0x4a + i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.ValidatorsHash))) i += copy(dAtA[i:], m.ValidatorsHash) } + if len(m.NextValidatorsHash) > 0 { + dAtA[i] = 0x52 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.NextValidatorsHash))) + i += copy(dAtA[i:], m.NextValidatorsHash) + } + if len(m.ConsensusHash) > 0 { + dAtA[i] = 0x5a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ConsensusHash))) + i += copy(dAtA[i:], m.ConsensusHash) + } if len(m.AppHash) > 0 { - dAtA[i] = 0x42 + dAtA[i] = 0x62 i++ i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) i += copy(dAtA[i:], m.AppHash) } - dAtA[i] = 0x4a + if len(m.LastResultsHash) > 0 { + dAtA[i] = 0x6a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.LastResultsHash))) + i += copy(dAtA[i:], m.LastResultsHash) + } + if len(m.EvidenceHash) > 0 { + dAtA[i] = 0x72 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.EvidenceHash))) + i += copy(dAtA[i:], m.EvidenceHash) + } + if len(m.ProposerAddress) > 0 { + dAtA[i] = 0x7a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ProposerAddress))) + i += copy(dAtA[i:], m.ProposerAddress) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *BlockID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i += copy(dAtA[i:], m.Hash) + } + dAtA[i] = 0x12 i++ - i = encodeVarintTypes(dAtA, i, uint64(m.Proposer.Size())) - n36, err := m.Proposer.MarshalTo(dAtA[i:]) + i = encodeVarintTypes(dAtA, i, uint64(m.PartsHeader.Size())) + n37, err := m.PartsHeader.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n37 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *PartSetHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartSetHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Total != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Total)) + } + if len(m.Hash) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i += copy(dAtA[i:], m.Hash) + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -6710,16 +7117,42 @@ func (m *Validator) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Address))) i += copy(dAtA[i:], m.Address) } - dAtA[i] = 0x12 + if m.Power != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Power)) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ValidatorUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorUpdate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.PubKey.Size())) - n37, err := m.PubKey.MarshalTo(dAtA[i:]) + n38, err := m.PubKey.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n38 if m.Power != 0 { - dAtA[i] = 0x18 + dAtA[i] = 0x10 i++ i = encodeVarintTypes(dAtA, i, uint64(m.Power)) } @@ -6729,7 +7162,7 @@ func (m *Validator) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *SigningValidator) Marshal() (dAtA []byte, err error) { +func (m *VoteInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -6739,7 +7172,7 @@ func (m *SigningValidator) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SigningValidator) MarshalTo(dAtA []byte) (int, error) { +func (m *VoteInfo) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -6747,11 +7180,11 @@ func (m *SigningValidator) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n38, err := m.Validator.MarshalTo(dAtA[i:]) + n39, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n39 if m.SignedLastBlock { dAtA[i] = 0x10 i++ @@ -6825,11 +7258,11 @@ func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n39, err := m.Validator.MarshalTo(dAtA[i:]) + n40, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n40 if m.Height != 0 { dAtA[i] = 0x18 i++ @@ -6838,11 +7271,11 @@ func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) - n40, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + n41, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n41 if m.TotalVotingPower != 0 { dAtA[i] = 0x28 i++ @@ -6997,9 +7430,9 @@ func NewPopulatedRequestInitChain(r randyTypes, easy bool) *RequestInitChain { } if r.Intn(10) != 0 { v2 := r.Intn(5) - this.Validators = make([]Validator, v2) + this.Validators = make([]ValidatorUpdate, v2) for i := 0; i < v2; i++ { - v3 := NewPopulatedValidator(r, easy) + v3 := NewPopulatedValidatorUpdate(r, easy) this.Validators[i] = *v3 } } @@ -7262,9 +7695,9 @@ func NewPopulatedResponseInitChain(r randyTypes, easy bool) *ResponseInitChain { } if r.Intn(10) != 0 { v14 := r.Intn(5) - this.Validators = make([]Validator, v14) + this.Validators = make([]ValidatorUpdate, v14) for i := 0; i < v14; i++ { - v15 := NewPopulatedValidator(r, easy) + v15 := NewPopulatedValidatorUpdate(r, easy) this.Validators[i] = *v15 } } @@ -7392,9 +7825,9 @@ func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { this := &ResponseEndBlock{} if r.Intn(10) != 0 { v27 := r.Intn(5) - this.ValidatorUpdates = make([]Validator, v27) + this.ValidatorUpdates = make([]ValidatorUpdate, v27) for i := 0; i < v27; i++ { - v28 := NewPopulatedValidator(r, easy) + v28 := NewPopulatedValidatorUpdate(r, easy) this.ValidatorUpdates[i] = *v28 } } @@ -7451,16 +7884,12 @@ func NewPopulatedBlockSize(r randyTypes, easy bool) *BlockSize { if r.Intn(2) == 0 { this.MaxBytes *= -1 } - this.MaxTxs = int32(r.Int31()) - if r.Intn(2) == 0 { - this.MaxTxs *= -1 - } this.MaxGas = int64(r.Int63()) if r.Intn(2) == 0 { this.MaxGas *= -1 } if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 4) + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } return this } @@ -7495,16 +7924,16 @@ func NewPopulatedBlockGossip(r randyTypes, easy bool) *BlockGossip { func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { this := &LastCommitInfo{} - this.CommitRound = int32(r.Int31()) + this.Round = int32(r.Int31()) if r.Intn(2) == 0 { - this.CommitRound *= -1 + this.Round *= -1 } if r.Intn(10) != 0 { v32 := r.Intn(5) - this.Validators = make([]SigningValidator, v32) + this.Votes = make([]VoteInfo, v32) for i := 0; i < v32; i++ { - v33 := NewPopulatedSigningValidator(r, easy) - this.Validators[i] = *v33 + v33 := NewPopulatedVoteInfo(r, easy) + this.Votes[i] = *v33 } } if !easy && r.Intn(10) != 0 { @@ -7522,7 +7951,7 @@ func NewPopulatedHeader(r randyTypes, easy bool) *Header { } v34 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) this.Time = *v34 - this.NumTxs = int32(r.Int31()) + this.NumTxs = int64(r.Int63()) if r.Intn(2) == 0 { this.NumTxs *= -1 } @@ -7530,38 +7959,98 @@ func NewPopulatedHeader(r randyTypes, easy bool) *Header { if r.Intn(2) == 0 { this.TotalTxs *= -1 } - v35 := r.Intn(100) - this.LastBlockHash = make([]byte, v35) - for i := 0; i < v35; i++ { - this.LastBlockHash[i] = byte(r.Intn(256)) - } + v35 := NewPopulatedBlockID(r, easy) + this.LastBlockId = *v35 v36 := r.Intn(100) - this.ValidatorsHash = make([]byte, v36) + this.LastCommitHash = make([]byte, v36) for i := 0; i < v36; i++ { - this.ValidatorsHash[i] = byte(r.Intn(256)) + this.LastCommitHash[i] = byte(r.Intn(256)) } v37 := r.Intn(100) - this.AppHash = make([]byte, v37) + this.DataHash = make([]byte, v37) for i := 0; i < v37; i++ { + this.DataHash[i] = byte(r.Intn(256)) + } + v38 := r.Intn(100) + this.ValidatorsHash = make([]byte, v38) + for i := 0; i < v38; i++ { + this.ValidatorsHash[i] = byte(r.Intn(256)) + } + v39 := r.Intn(100) + this.NextValidatorsHash = make([]byte, v39) + for i := 0; i < v39; i++ { + this.NextValidatorsHash[i] = byte(r.Intn(256)) + } + v40 := r.Intn(100) + this.ConsensusHash = make([]byte, v40) + for i := 0; i < v40; i++ { + this.ConsensusHash[i] = byte(r.Intn(256)) + } + v41 := r.Intn(100) + this.AppHash = make([]byte, v41) + for i := 0; i < v41; i++ { this.AppHash[i] = byte(r.Intn(256)) } - v38 := NewPopulatedValidator(r, easy) - this.Proposer = *v38 + v42 := r.Intn(100) + this.LastResultsHash = make([]byte, v42) + for i := 0; i < v42; i++ { + this.LastResultsHash[i] = byte(r.Intn(256)) + } + v43 := r.Intn(100) + this.EvidenceHash = make([]byte, v43) + for i := 0; i < v43; i++ { + this.EvidenceHash[i] = byte(r.Intn(256)) + } + v44 := r.Intn(100) + this.ProposerAddress = make([]byte, v44) + for i := 0; i < v44; i++ { + this.ProposerAddress[i] = byte(r.Intn(256)) + } if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 10) + this.XXX_unrecognized = randUnrecognizedTypes(r, 16) + } + return this +} + +func NewPopulatedBlockID(r randyTypes, easy bool) *BlockID { + this := &BlockID{} + v45 := r.Intn(100) + this.Hash = make([]byte, v45) + for i := 0; i < v45; i++ { + this.Hash[i] = byte(r.Intn(256)) + } + v46 := NewPopulatedPartSetHeader(r, easy) + this.PartsHeader = *v46 + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + } + return this +} + +func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { + this := &PartSetHeader{} + this.Total = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Total *= -1 + } + v47 := r.Intn(100) + this.Hash = make([]byte, v47) + for i := 0; i < v47; i++ { + this.Hash[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } return this } func NewPopulatedValidator(r randyTypes, easy bool) *Validator { this := &Validator{} - v39 := r.Intn(100) - this.Address = make([]byte, v39) - for i := 0; i < v39; i++ { + v48 := r.Intn(100) + this.Address = make([]byte, v48) + for i := 0; i < v48; i++ { this.Address[i] = byte(r.Intn(256)) } - v40 := NewPopulatedPubKey(r, easy) - this.PubKey = *v40 this.Power = int64(r.Int63()) if r.Intn(2) == 0 { this.Power *= -1 @@ -7572,10 +8061,24 @@ func NewPopulatedValidator(r randyTypes, easy bool) *Validator { return this } -func NewPopulatedSigningValidator(r randyTypes, easy bool) *SigningValidator { - this := &SigningValidator{} - v41 := NewPopulatedValidator(r, easy) - this.Validator = *v41 +func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { + this := &ValidatorUpdate{} + v49 := NewPopulatedPubKey(r, easy) + this.PubKey = *v49 + this.Power = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Power *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + } + return this +} + +func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { + this := &VoteInfo{} + v50 := NewPopulatedValidator(r, easy) + this.Validator = *v50 this.SignedLastBlock = bool(bool(r.Intn(2) == 0)) if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) @@ -7586,9 +8089,9 @@ func NewPopulatedSigningValidator(r randyTypes, easy bool) *SigningValidator { func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { this := &PubKey{} this.Type = string(randStringTypes(r)) - v42 := r.Intn(100) - this.Data = make([]byte, v42) - for i := 0; i < v42; i++ { + v51 := r.Intn(100) + this.Data = make([]byte, v51) + for i := 0; i < v51; i++ { this.Data[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -7600,14 +8103,14 @@ func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { func NewPopulatedEvidence(r randyTypes, easy bool) *Evidence { this := &Evidence{} this.Type = string(randStringTypes(r)) - v43 := NewPopulatedValidator(r, easy) - this.Validator = *v43 + v52 := NewPopulatedValidator(r, easy) + this.Validator = *v52 this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v44 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v44 + v53 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v53 this.TotalVotingPower = int64(r.Int63()) if r.Intn(2) == 0 { this.TotalVotingPower *= -1 @@ -7637,9 +8140,9 @@ func randUTF8RuneTypes(r randyTypes) rune { return rune(ru + 61) } func randStringTypes(r randyTypes) string { - v45 := r.Intn(100) - tmps := make([]rune, v45) - for i := 0; i < v45; i++ { + v54 := r.Intn(100) + tmps := make([]rune, v54) + for i := 0; i < v54; i++ { tmps[i] = randUTF8RuneTypes(r) } return string(tmps) @@ -7661,11 +8164,11 @@ func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte switch wire { case 0: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - v46 := r.Int63() + v55 := r.Int63() if r.Intn(2) == 0 { - v46 *= -1 + v55 *= -1 } - dAtA = encodeVarintPopulateTypes(dAtA, uint64(v46)) + dAtA = encodeVarintPopulateTypes(dAtA, uint64(v55)) case 1: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) @@ -8383,9 +8886,6 @@ func (m *BlockSize) Size() (n int) { if m.MaxBytes != 0 { n += 1 + sovTypes(uint64(m.MaxBytes)) } - if m.MaxTxs != 0 { - n += 1 + sovTypes(uint64(m.MaxTxs)) - } if m.MaxGas != 0 { n += 1 + sovTypes(uint64(m.MaxGas)) } @@ -8425,11 +8925,11 @@ func (m *BlockGossip) Size() (n int) { func (m *LastCommitInfo) Size() (n int) { var l int _ = l - if m.CommitRound != 0 { - n += 1 + sovTypes(uint64(m.CommitRound)) + if m.Round != 0 { + n += 1 + sovTypes(uint64(m.Round)) } - if len(m.Validators) > 0 { - for _, e := range m.Validators { + if len(m.Votes) > 0 { + for _, e := range m.Votes { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -8458,7 +8958,13 @@ func (m *Header) Size() (n int) { if m.TotalTxs != 0 { n += 1 + sovTypes(uint64(m.TotalTxs)) } - l = len(m.LastBlockHash) + l = m.LastBlockId.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.LastCommitHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.DataHash) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } @@ -8466,11 +8972,44 @@ func (m *Header) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + l = len(m.NextValidatorsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ConsensusHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } l = len(m.AppHash) if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = m.Proposer.Size() + l = len(m.LastResultsHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.EvidenceHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ProposerAddress) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BlockID) Size() (n int) { + var l int + _ = l + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.PartsHeader.Size() n += 1 + l + sovTypes(uint64(l)) if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -8478,6 +9017,22 @@ func (m *Header) Size() (n int) { return n } +func (m *PartSetHeader) Size() (n int) { + var l int + _ = l + if m.Total != 0 { + n += 1 + sovTypes(uint64(m.Total)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Validator) Size() (n int) { var l int _ = l @@ -8485,6 +9040,18 @@ func (m *Validator) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.Power != 0 { + n += 1 + sovTypes(uint64(m.Power)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ValidatorUpdate) Size() (n int) { + var l int + _ = l l = m.PubKey.Size() n += 1 + l + sovTypes(uint64(l)) if m.Power != 0 { @@ -8496,7 +9063,7 @@ func (m *Validator) Size() (n int) { return n } -func (m *SigningValidator) Size() (n int) { +func (m *VoteInfo) Size() (n int) { var l int _ = l l = m.Validator.Size() @@ -9433,7 +10000,7 @@ func (m *RequestInitChain) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Validators = append(m.Validators, Validator{}) + m.Validators = append(m.Validators, ValidatorUpdate{}) if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -11120,7 +11687,7 @@ func (m *ResponseInitChain) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Validators = append(m.Validators, Validator{}) + m.Validators = append(m.Validators, ValidatorUpdate{}) if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -11999,7 +12566,7 @@ func (m *ResponseEndBlock) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ValidatorUpdates = append(m.ValidatorUpdates, Validator{}) + m.ValidatorUpdates = append(m.ValidatorUpdates, ValidatorUpdate{}) if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -12371,25 +12938,6 @@ func (m *BlockSize) Unmarshal(dAtA []byte) error { } } case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxTxs", wireType) - } - m.MaxTxs = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxTxs |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxGas", wireType) } @@ -12620,9 +13168,9 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CommitRound", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Round", wireType) } - m.CommitRound = 0 + m.Round = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12632,14 +13180,14 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CommitRound |= (int32(b) & 0x7F) << shift + m.Round |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Validators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Votes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12663,8 +13211,8 @@ func (m *LastCommitInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Validators = append(m.Validators, SigningValidator{}) - if err := m.Validators[len(m.Validators)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Votes = append(m.Votes, VoteInfo{}) + if err := m.Votes[len(m.Votes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12811,7 +13359,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumTxs |= (int32(b) & 0x7F) << shift + m.NumTxs |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12837,9 +13385,9 @@ func (m *Header) Unmarshal(dAtA []byte) error { } case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastBlockHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastBlockId", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -12849,26 +13397,87 @@ func (m *Header) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.LastBlockHash = append(m.LastBlockHash[:0], dAtA[iNdEx:postIndex]...) - if m.LastBlockHash == nil { - m.LastBlockHash = []byte{} + if err := m.LastBlockId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValidatorsHash", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastCommitHash = append(m.LastCommitHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastCommitHash == nil { + m.LastCommitHash = []byte{} + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataHash = append(m.DataHash[:0], dAtA[iNdEx:postIndex]...) + if m.DataHash == nil { + m.DataHash = []byte{} + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorsHash", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -12897,7 +13506,69 @@ func (m *Header) Unmarshal(dAtA []byte) error { m.ValidatorsHash = []byte{} } iNdEx = postIndex - case 8: + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextValidatorsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextValidatorsHash = append(m.NextValidatorsHash[:0], dAtA[iNdEx:postIndex]...) + if m.NextValidatorsHash == nil { + m.NextValidatorsHash = []byte{} + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConsensusHash = append(m.ConsensusHash[:0], dAtA[iNdEx:postIndex]...) + if m.ConsensusHash == nil { + m.ConsensusHash = []byte{} + } + iNdEx = postIndex + case 12: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) } @@ -12928,9 +13599,184 @@ func (m *Header) Unmarshal(dAtA []byte) error { m.AppHash = []byte{} } iNdEx = postIndex - case 9: + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastResultsHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastResultsHash = append(m.LastResultsHash[:0], dAtA[iNdEx:postIndex]...) + if m.LastResultsHash == nil { + m.LastResultsHash = []byte{} + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvidenceHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvidenceHash = append(m.EvidenceHash[:0], dAtA[iNdEx:postIndex]...) + if m.EvidenceHash == nil { + m.EvidenceHash = []byte{} + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProposerAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProposerAddress = append(m.ProposerAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ProposerAddress == nil { + m.ProposerAddress = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlockID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlockID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlockID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proposer", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PartsHeader", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12954,7 +13800,7 @@ func (m *Header) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Proposer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.PartsHeader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -12980,6 +13826,107 @@ func (m *Header) Unmarshal(dAtA []byte) error { } return nil } +func (m *PartSetHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartSetHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartSetHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + m.Total = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Total |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Validator) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -13040,7 +13987,77 @@ func (m *Validator) Unmarshal(dAtA []byte) error { m.Address = []byte{} } iNdEx = postIndex - case 2: + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Power", wireType) + } + m.Power = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Power |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatorUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) } @@ -13070,7 +14087,7 @@ func (m *Validator) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Power", wireType) } @@ -13111,7 +14128,7 @@ func (m *Validator) Unmarshal(dAtA []byte) error { } return nil } -func (m *SigningValidator) Unmarshal(dAtA []byte) error { +func (m *VoteInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -13134,10 +14151,10 @@ func (m *SigningValidator) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SigningValidator: wiretype end group for non-group") + return fmt.Errorf("proto: VoteInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SigningValidator: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: VoteInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -13606,134 +14623,143 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_d8da2202f45d32c0) } +func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_c68d3007ea320b94) } func init() { - golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_d8da2202f45d32c0) + golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_c68d3007ea320b94) } -var fileDescriptor_types_d8da2202f45d32c0 = []byte{ - // 1959 bytes of a gzipped FileDescriptorProto +var fileDescriptor_types_c68d3007ea320b94 = []byte{ + // 2099 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4f, 0x73, 0x1b, 0x49, - 0x15, 0xf7, 0xc8, 0xb2, 0xa4, 0x79, 0xb2, 0x2d, 0xa7, 0x9d, 0xd8, 0x8a, 0x16, 0xec, 0x30, 0x45, - 0x65, 0x1d, 0xd6, 0x2b, 0x83, 0x97, 0x6c, 0x39, 0xbb, 0xb0, 0x85, 0xe5, 0x0d, 0x2b, 0xd7, 0x2e, - 0x60, 0x26, 0xd9, 0x50, 0xc5, 0x45, 0xd5, 0xd2, 0xb4, 0x47, 0x53, 0x91, 0x66, 0x66, 0xa7, 0x5b, - 0x5e, 0x39, 0x1f, 0x81, 0xda, 0xa2, 0xb8, 0x71, 0xe6, 0xc6, 0x17, 0xa0, 0x8a, 0x23, 0x27, 0x6a, - 0x8f, 0x1c, 0xa0, 0xe0, 0x14, 0xc0, 0x5b, 0x5c, 0xf8, 0x04, 0x1c, 0xa9, 0xd7, 0xdd, 0xf3, 0xd7, - 0xa3, 0x54, 0x12, 0x6e, 0x5c, 0xa4, 0xee, 0x7e, 0xef, 0xf5, 0xf4, 0x7b, 0xfd, 0xde, 0xfb, 0xbd, - 0xd7, 0xb0, 0x45, 0x87, 0x23, 0xef, 0x40, 0x5c, 0x86, 0x8c, 0xab, 0xdf, 0x6e, 0x18, 0x05, 0x22, - 0x20, 0x2b, 0x72, 0xd2, 0x79, 0xdb, 0xf5, 0xc4, 0x78, 0x36, 0xec, 0x8e, 0x82, 0xe9, 0x81, 0x1b, - 0xb8, 0xc1, 0x81, 0xa4, 0x0e, 0x67, 0xe7, 0x72, 0x26, 0x27, 0x72, 0xa4, 0xa4, 0x3a, 0xbb, 0x6e, - 0x10, 0xb8, 0x13, 0x96, 0x72, 0x09, 0x6f, 0xca, 0xb8, 0xa0, 0xd3, 0x50, 0x33, 0x1c, 0x65, 0xf6, - 0x13, 0xcc, 0x77, 0x58, 0x34, 0xf5, 0x7c, 0x91, 0x1d, 0x4e, 0xbc, 0x21, 0x3f, 0x18, 0x05, 0xd3, - 0x69, 0xe0, 0x67, 0x0f, 0x64, 0xfd, 0xb1, 0x0a, 0x75, 0x9b, 0x7d, 0x36, 0x63, 0x5c, 0x90, 0x3d, - 0xa8, 0xb2, 0xd1, 0x38, 0x68, 0x57, 0xee, 0x18, 0x7b, 0xcd, 0x43, 0xd2, 0x55, 0x7c, 0x9a, 0xfa, - 0x70, 0x34, 0x0e, 0xfa, 0x4b, 0xb6, 0xe4, 0x20, 0x6f, 0xc1, 0xca, 0xf9, 0x64, 0xc6, 0xc7, 0xed, - 0x65, 0xc9, 0xba, 0x99, 0x67, 0xfd, 0x21, 0x92, 0xfa, 0x4b, 0xb6, 0xe2, 0xc1, 0x6d, 0x3d, 0xff, - 0x3c, 0x68, 0x57, 0xcb, 0xb6, 0x3d, 0xf5, 0xcf, 0xe5, 0xb6, 0xc8, 0x41, 0x8e, 0x00, 0x38, 0x13, - 0x83, 0x20, 0x14, 0x5e, 0xe0, 0xb7, 0x57, 0x24, 0xff, 0x76, 0x9e, 0xff, 0x11, 0x13, 0x3f, 0x91, - 0xe4, 0xfe, 0x92, 0x6d, 0xf2, 0x78, 0x82, 0x92, 0x9e, 0xef, 0x89, 0xc1, 0x68, 0x4c, 0x3d, 0xbf, - 0x5d, 0x2b, 0x93, 0x3c, 0xf5, 0x3d, 0x71, 0x82, 0x64, 0x94, 0xf4, 0xe2, 0x09, 0xaa, 0xf2, 0xd9, - 0x8c, 0x45, 0x97, 0xed, 0x7a, 0x99, 0x2a, 0x3f, 0x45, 0x12, 0xaa, 0x22, 0x79, 0xc8, 0xfb, 0xd0, - 0x1c, 0x32, 0xd7, 0xf3, 0x07, 0xc3, 0x49, 0x30, 0x7a, 0xda, 0x6e, 0x48, 0x91, 0x76, 0x5e, 0xa4, - 0x87, 0x0c, 0x3d, 0xa4, 0xf7, 0x97, 0x6c, 0x18, 0x26, 0x33, 0x72, 0x08, 0x8d, 0xd1, 0x98, 0x8d, - 0x9e, 0x0e, 0xc4, 0xbc, 0x6d, 0x4a, 0xc9, 0x5b, 0x79, 0xc9, 0x13, 0xa4, 0x3e, 0x9e, 0xf7, 0x97, - 0xec, 0xfa, 0x48, 0x0d, 0xc9, 0x7d, 0x30, 0x99, 0xef, 0xe8, 0xcf, 0x35, 0xa5, 0xd0, 0x56, 0xe1, - 0x5e, 0x7c, 0x27, 0xfe, 0x58, 0x83, 0xe9, 0x31, 0xe9, 0x42, 0x0d, 0xef, 0xda, 0x13, 0xed, 0x55, - 0x29, 0x73, 0xb3, 0xf0, 0x21, 0x49, 0xeb, 0x2f, 0xd9, 0x9a, 0x0b, 0xcd, 0xe7, 0xb0, 0x89, 0x77, - 0xc1, 0x22, 0x3c, 0xdc, 0x66, 0x99, 0xf9, 0x3e, 0x54, 0x74, 0x79, 0x3c, 0xd3, 0x89, 0x27, 0xbd, - 0x3a, 0xac, 0x5c, 0xd0, 0xc9, 0x8c, 0x59, 0x6f, 0x42, 0x33, 0xe3, 0x29, 0xa4, 0x0d, 0xf5, 0x29, - 0xe3, 0x9c, 0xba, 0xac, 0x6d, 0xdc, 0x31, 0xf6, 0x4c, 0x3b, 0x9e, 0x5a, 0xeb, 0xb0, 0x9a, 0xf5, - 0x93, 0x8c, 0x20, 0xfa, 0x02, 0x0a, 0x5e, 0xb0, 0x88, 0xa3, 0x03, 0x68, 0x41, 0x3d, 0xb5, 0xde, - 0x83, 0x8d, 0xa2, 0x13, 0x90, 0x0d, 0x58, 0x7e, 0xca, 0x2e, 0x35, 0x27, 0x0e, 0xc9, 0x4d, 0x7d, - 0x20, 0xe9, 0xc5, 0xa6, 0xad, 0x4f, 0xf7, 0x8b, 0x4a, 0x22, 0x9c, 0xf8, 0x01, 0x39, 0x82, 0x2a, - 0x06, 0x92, 0x94, 0x6e, 0x1e, 0x76, 0xba, 0x2a, 0xca, 0xba, 0x71, 0x94, 0x75, 0x1f, 0xc7, 0x51, - 0xd6, 0x6b, 0x7c, 0xf9, 0x7c, 0x77, 0xe9, 0x57, 0x7f, 0xdf, 0x35, 0x6c, 0x29, 0x41, 0x6e, 0xe3, - 0x55, 0x52, 0xcf, 0x1f, 0x78, 0x8e, 0xfe, 0x4e, 0x5d, 0xce, 0x4f, 0x1d, 0x72, 0x0c, 0x1b, 0xa3, - 0xc0, 0xe7, 0xcc, 0xe7, 0x33, 0x3e, 0x08, 0x69, 0x44, 0xa7, 0x5c, 0x47, 0x49, 0x7c, 0x71, 0x27, - 0x31, 0xf9, 0x4c, 0x52, 0xed, 0xd6, 0x28, 0xbf, 0x40, 0xde, 0x05, 0xb8, 0xa0, 0x13, 0xcf, 0xa1, - 0x22, 0x88, 0x78, 0xbb, 0x7a, 0x67, 0x79, 0xaf, 0x79, 0xb8, 0xa1, 0x85, 0x9f, 0xc4, 0x84, 0x5e, - 0x15, 0xcf, 0x64, 0x67, 0x38, 0xc9, 0x5d, 0x68, 0xd1, 0x30, 0x1c, 0x70, 0x41, 0x05, 0x1b, 0x0c, - 0x2f, 0x05, 0xe3, 0x32, 0x86, 0x56, 0xed, 0x35, 0x1a, 0x86, 0x8f, 0x70, 0xb5, 0x87, 0x8b, 0x96, - 0x93, 0xdc, 0x80, 0x74, 0x6f, 0x42, 0xa0, 0xea, 0x50, 0x41, 0xa5, 0x1d, 0x56, 0x6d, 0x39, 0xc6, - 0xb5, 0x90, 0x8a, 0xb1, 0xd6, 0x4e, 0x8e, 0xc9, 0x16, 0xd4, 0xc6, 0xcc, 0x73, 0xc7, 0x42, 0x2a, - 0xb4, 0x6c, 0xeb, 0x19, 0x9a, 0x3c, 0x8c, 0x82, 0x0b, 0x26, 0x23, 0xbc, 0x61, 0xab, 0x89, 0xf5, - 0x2f, 0x03, 0x6e, 0x5c, 0x0b, 0x09, 0xdc, 0x77, 0x4c, 0xf9, 0x38, 0xfe, 0x16, 0x8e, 0xc9, 0x5b, - 0xb8, 0x2f, 0x75, 0x58, 0xa4, 0x33, 0xcf, 0x9a, 0xd6, 0xb5, 0x2f, 0x17, 0xb5, 0xa2, 0x9a, 0x85, - 0x3c, 0x84, 0x8d, 0x09, 0xe5, 0x62, 0xa0, 0x3c, 0x77, 0x20, 0x33, 0xcb, 0x72, 0x2e, 0x9a, 0x3e, - 0xa1, 0xb1, 0x87, 0xa3, 0x43, 0x69, 0xf1, 0xf5, 0x49, 0x6e, 0x95, 0xf4, 0xe1, 0xe6, 0xf0, 0xf2, - 0x19, 0xf5, 0x85, 0xe7, 0xb3, 0xc1, 0x35, 0x6b, 0xb7, 0xf4, 0x56, 0x0f, 0x2f, 0x3c, 0x87, 0xf9, - 0x23, 0xa6, 0x37, 0xd9, 0x4c, 0x44, 0x92, 0x6b, 0xe0, 0xd6, 0x1d, 0x58, 0xcf, 0xc7, 0x2f, 0x59, - 0x87, 0x8a, 0x98, 0x6b, 0x0d, 0x2b, 0x62, 0x6e, 0x59, 0x89, 0xef, 0x25, 0x41, 0x74, 0x8d, 0xe7, - 0x1e, 0xb4, 0x0a, 0x01, 0x9d, 0x31, 0xb7, 0x91, 0x35, 0xb7, 0xd5, 0x82, 0xb5, 0x5c, 0x1c, 0x5b, - 0x5f, 0xac, 0x40, 0xc3, 0x66, 0x3c, 0x44, 0x37, 0x22, 0x47, 0x60, 0xb2, 0xf9, 0x88, 0xa9, 0x14, - 0x6a, 0x14, 0x12, 0x94, 0xe2, 0x79, 0x18, 0xd3, 0x31, 0x94, 0x13, 0x66, 0x72, 0x2f, 0x97, 0xfe, - 0x37, 0x8b, 0x42, 0xd9, 0xfc, 0xbf, 0x9f, 0xcf, 0xff, 0x37, 0x0b, 0xbc, 0x05, 0x00, 0xb8, 0x97, - 0x03, 0x80, 0xe2, 0xc6, 0x39, 0x04, 0x78, 0x50, 0x82, 0x00, 0xc5, 0xe3, 0x2f, 0x80, 0x80, 0x07, - 0x25, 0x10, 0xd0, 0xbe, 0xf6, 0xad, 0x52, 0x0c, 0xd8, 0xcf, 0x63, 0x40, 0x51, 0x9d, 0x02, 0x08, - 0x7c, 0xaf, 0x0c, 0x04, 0x6e, 0x17, 0x64, 0x16, 0xa2, 0xc0, 0x3b, 0xd7, 0x50, 0x60, 0xab, 0x20, - 0x5a, 0x02, 0x03, 0x0f, 0x72, 0xf9, 0x19, 0x4a, 0x75, 0x2b, 0x4f, 0xd0, 0xe4, 0xdd, 0xeb, 0x08, - 0xb2, 0x5d, 0xbc, 0xda, 0x32, 0x08, 0x39, 0x28, 0x40, 0xc8, 0xad, 0xe2, 0x29, 0x0b, 0x18, 0x92, - 0x22, 0xc1, 0x3d, 0x8c, 0xfb, 0x82, 0xa7, 0x61, 0x8e, 0x60, 0x51, 0x14, 0x44, 0x3a, 0x55, 0xab, - 0x89, 0xb5, 0x87, 0x99, 0x28, 0xf5, 0xaf, 0x17, 0xa0, 0x86, 0x74, 0xfa, 0x8c, 0x77, 0x59, 0xbf, - 0x36, 0x52, 0x59, 0x19, 0xd1, 0xd9, 0x2c, 0x66, 0xea, 0x2c, 0x96, 0x01, 0x93, 0x4a, 0x0e, 0x4c, - 0xc8, 0xb7, 0xe0, 0x86, 0x4c, 0x23, 0xd2, 0x2e, 0x83, 0x5c, 0x5a, 0x6b, 0x21, 0x41, 0x19, 0x44, - 0xe5, 0xb7, 0xb7, 0x61, 0x33, 0xc3, 0x8b, 0x29, 0x56, 0xa6, 0xb0, 0xaa, 0x0c, 0xde, 0x8d, 0x84, - 0xfb, 0x38, 0x0c, 0xfb, 0x94, 0x8f, 0xad, 0x1f, 0xa5, 0xfa, 0xa7, 0x40, 0x45, 0xa0, 0x3a, 0x0a, - 0x1c, 0xa5, 0xd6, 0x9a, 0x2d, 0xc7, 0x08, 0x5e, 0x93, 0xc0, 0x95, 0x5f, 0x35, 0x6d, 0x1c, 0x22, - 0x57, 0x12, 0x29, 0xa6, 0x0a, 0x09, 0xeb, 0x97, 0x46, 0xba, 0x5f, 0x8a, 0x5d, 0x65, 0x30, 0x63, - 0xfc, 0x2f, 0x30, 0x53, 0x79, 0x59, 0x98, 0xb1, 0x7e, 0x67, 0xa4, 0x77, 0x91, 0x00, 0xc8, 0xeb, - 0x29, 0x87, 0x6e, 0xe1, 0xf9, 0x0e, 0x9b, 0xcb, 0x50, 0x5f, 0xb6, 0xd5, 0x24, 0x46, 0xf5, 0x9a, - 0x34, 0x70, 0x1e, 0xd5, 0xeb, 0x72, 0x4d, 0x4d, 0x34, 0xf0, 0x04, 0xe7, 0x32, 0x06, 0x57, 0x6d, - 0x35, 0xc9, 0xe4, 0x4d, 0x33, 0x97, 0x37, 0xcf, 0x80, 0x5c, 0x8f, 0x4e, 0xf2, 0x1e, 0x54, 0x05, - 0x75, 0xd1, 0x78, 0xa8, 0xff, 0x7a, 0x57, 0xd5, 0xc8, 0xdd, 0x8f, 0x9f, 0x9c, 0x51, 0x2f, 0xea, - 0x6d, 0xa1, 0xf6, 0xff, 0x7e, 0xbe, 0xbb, 0x8e, 0x3c, 0xfb, 0xc1, 0xd4, 0x13, 0x6c, 0x1a, 0x8a, - 0x4b, 0x5b, 0xca, 0x58, 0x7f, 0x31, 0x30, 0x6b, 0xe7, 0xa2, 0xb6, 0xd4, 0x16, 0xb1, 0x6b, 0x56, - 0x32, 0x00, 0xfb, 0x72, 0xf6, 0xf9, 0x3a, 0x80, 0x4b, 0xf9, 0xe0, 0x73, 0xea, 0x0b, 0xe6, 0x68, - 0x23, 0x99, 0x2e, 0xe5, 0x3f, 0x93, 0x0b, 0x58, 0x87, 0x20, 0x79, 0xc6, 0x99, 0x23, 0xad, 0xb5, - 0x6c, 0xd7, 0x5d, 0xca, 0x3f, 0xe5, 0xcc, 0x49, 0xf4, 0xaa, 0xbf, 0x86, 0x5e, 0x7f, 0xcd, 0xb8, - 0x5c, 0x0a, 0x59, 0xff, 0x0f, 0x9a, 0x7d, 0x65, 0x20, 0x16, 0xe7, 0xd3, 0x1e, 0x39, 0x81, 0x1b, - 0x89, 0x7b, 0x0f, 0x66, 0xa1, 0x43, 0xb1, 0x72, 0x32, 0x5e, 0x18, 0x0f, 0x1b, 0x89, 0xc0, 0xa7, - 0x8a, 0x9f, 0xfc, 0x18, 0xb6, 0x0b, 0x01, 0x99, 0x6c, 0x55, 0x79, 0x61, 0x5c, 0xde, 0xca, 0xc7, - 0x65, 0xbc, 0x5f, 0xac, 0xe5, 0xf2, 0x6b, 0x68, 0xf9, 0x4d, 0x2c, 0x49, 0xb2, 0x69, 0xba, 0xec, - 0x9e, 0xac, 0xdf, 0x18, 0xd0, 0x2a, 0x1c, 0x86, 0x1c, 0x00, 0xa8, 0x2c, 0xc7, 0xbd, 0x67, 0x71, - 0x61, 0x1c, 0xdb, 0x40, 0x1a, 0xeb, 0x91, 0xf7, 0x8c, 0xd9, 0xe6, 0x30, 0x1e, 0x92, 0xbb, 0x50, - 0x17, 0x73, 0xc5, 0x9d, 0x2f, 0xde, 0x1e, 0xcf, 0x25, 0x6b, 0x4d, 0xc8, 0x7f, 0x72, 0x1f, 0x56, - 0xd5, 0xc6, 0x6e, 0xc0, 0xb9, 0x17, 0xea, 0xc2, 0x81, 0x64, 0xb7, 0xfe, 0x48, 0x52, 0xec, 0xe6, - 0x30, 0x9d, 0x58, 0x3f, 0x07, 0x33, 0xf9, 0x2c, 0x79, 0x03, 0xcc, 0x29, 0x9d, 0xeb, 0xca, 0x16, - 0xcf, 0xb6, 0x62, 0x37, 0xa6, 0x74, 0x2e, 0x8b, 0x5a, 0xb2, 0x0d, 0x75, 0x24, 0x8a, 0xb9, 0xb2, - 0xf7, 0x8a, 0x5d, 0x9b, 0xd2, 0xf9, 0xe3, 0x79, 0x42, 0x70, 0x29, 0x8f, 0xcb, 0xd6, 0x29, 0x9d, - 0x7f, 0x44, 0xb9, 0xf5, 0x01, 0xd4, 0xd4, 0x21, 0x5f, 0x6a, 0x63, 0x94, 0xaf, 0xe4, 0xe4, 0x7f, - 0x00, 0xcd, 0xcc, 0xb9, 0xc9, 0x77, 0xe0, 0x96, 0xd2, 0x30, 0xa4, 0x91, 0x90, 0x16, 0xc9, 0x6d, - 0x48, 0x24, 0xf1, 0x8c, 0x46, 0x02, 0x3f, 0xa9, 0x0a, 0xf1, 0x08, 0xd6, 0xf3, 0xc5, 0x2a, 0xf9, - 0x06, 0xac, 0xea, 0xc2, 0x36, 0x0a, 0x66, 0xbe, 0xa3, 0x65, 0x9b, 0x6a, 0xcd, 0xc6, 0x25, 0xf2, - 0xfd, 0x92, 0xb4, 0x1d, 0x23, 0xfa, 0x23, 0xcf, 0xf5, 0x3d, 0xdf, 0x7d, 0x51, 0xf6, 0xfe, 0x5b, - 0x05, 0x6a, 0xaa, 0xb0, 0x26, 0x77, 0x33, 0x5d, 0x8c, 0x44, 0xcd, 0x5e, 0xf3, 0xea, 0xf9, 0x6e, - 0x5d, 0x02, 0xcc, 0xe9, 0x87, 0x69, 0x4b, 0x93, 0x26, 0xd4, 0x4a, 0xae, 0xee, 0x8f, 0xfb, 0xa7, - 0xe5, 0x57, 0xee, 0x9f, 0xb6, 0xa1, 0xee, 0xcf, 0xa6, 0xf2, 0xb2, 0xaa, 0xea, 0xb2, 0xfc, 0xd9, - 0x14, 0x2f, 0xeb, 0x0d, 0x30, 0x45, 0x20, 0xe8, 0x44, 0x92, 0x54, 0x52, 0x68, 0xc8, 0x05, 0x24, - 0xde, 0x85, 0x56, 0x16, 0xb3, 0x11, 0x83, 0x15, 0x44, 0xac, 0xa5, 0x88, 0x8d, 0xfd, 0xc4, 0x9b, - 0xd0, 0x4a, 0x15, 0x56, 0x7c, 0x0a, 0x36, 0xd6, 0xd3, 0x65, 0xc9, 0x78, 0x1b, 0x1a, 0x09, 0x9a, - 0x2b, 0x08, 0xa9, 0x53, 0x05, 0xe2, 0xd8, 0xac, 0x87, 0x51, 0x10, 0x06, 0x9c, 0x45, 0xba, 0x4c, - 0x5b, 0x94, 0x0a, 0x12, 0x3e, 0xcb, 0x03, 0x33, 0x21, 0x62, 0xe9, 0x41, 0x1d, 0x27, 0x62, 0x9c, - 0xeb, 0x2a, 0x3f, 0x9e, 0x92, 0x7d, 0xa8, 0x87, 0xb3, 0xe1, 0x00, 0x11, 0x2e, 0x1f, 0x32, 0x67, - 0xb3, 0xe1, 0xc7, 0xec, 0x32, 0xee, 0x77, 0x42, 0x39, 0x93, 0x18, 0x17, 0x7c, 0xce, 0x22, 0xed, - 0xbc, 0x6a, 0x62, 0x09, 0xd8, 0x28, 0xde, 0x35, 0xf9, 0x2e, 0x98, 0x89, 0x7e, 0x85, 0xd0, 0x2d, - 0x9e, 0x39, 0x65, 0xc4, 0x42, 0x88, 0x7b, 0xae, 0xcf, 0x9c, 0x41, 0x6a, 0x5b, 0x79, 0xae, 0x86, - 0xdd, 0x52, 0x84, 0x4f, 0x62, 0xe3, 0x5a, 0xdf, 0x86, 0x9a, 0x3a, 0x23, 0xe6, 0x13, 0xdc, 0x39, - 0x2e, 0xb6, 0x70, 0x5c, 0x9a, 0x63, 0xfe, 0x6c, 0x40, 0x23, 0x6e, 0xa2, 0x4a, 0x85, 0x72, 0x87, - 0xae, 0xbc, 0xec, 0xa1, 0x17, 0x75, 0xa2, 0xb1, 0x47, 0x56, 0x5f, 0xd9, 0x23, 0xf7, 0x81, 0x28, - 0xc7, 0xbb, 0x08, 0x84, 0xe7, 0xbb, 0x03, 0x65, 0x73, 0xe5, 0x81, 0x1b, 0x92, 0xf2, 0x44, 0x12, - 0xce, 0x70, 0xfd, 0xf0, 0x8b, 0x15, 0x68, 0x1d, 0xf7, 0x4e, 0x4e, 0x8f, 0xc3, 0x70, 0xe2, 0x8d, - 0xa8, 0xac, 0xf0, 0x0e, 0xa0, 0x2a, 0x6b, 0xd8, 0x92, 0x77, 0xb3, 0x4e, 0x59, 0x33, 0x45, 0x0e, - 0x61, 0x45, 0x96, 0xb2, 0xa4, 0xec, 0xf9, 0xac, 0x53, 0xda, 0x53, 0xe1, 0x47, 0x54, 0xb1, 0x7b, - 0xfd, 0x15, 0xad, 0x53, 0xd6, 0x58, 0x91, 0x0f, 0xc0, 0x4c, 0x8b, 0xd0, 0x45, 0x6f, 0x69, 0x9d, - 0x85, 0x2d, 0x16, 0xca, 0xa7, 0x15, 0xc0, 0xa2, 0x27, 0xa1, 0xce, 0xc2, 0x5e, 0x84, 0x1c, 0x41, - 0x3d, 0xae, 0x8c, 0xca, 0x5f, 0xbb, 0x3a, 0x0b, 0xda, 0x1f, 0x34, 0x8f, 0xaa, 0x2e, 0xcb, 0x9e, - 0xe4, 0x3a, 0xa5, 0x3d, 0x1a, 0xb9, 0x0f, 0x35, 0x0d, 0x78, 0xa5, 0x2f, 0x5e, 0x9d, 0xf2, 0x26, - 0x06, 0x95, 0x4c, 0x2b, 0xeb, 0x45, 0xcf, 0x86, 0x9d, 0x85, 0xcd, 0x24, 0x39, 0x06, 0xc8, 0x54, - 0x94, 0x0b, 0xdf, 0x03, 0x3b, 0x8b, 0x9b, 0x44, 0xf2, 0x3e, 0x34, 0xd2, 0xc6, 0xbf, 0xfc, 0x85, - 0xaf, 0xb3, 0xa8, 0x6f, 0xeb, 0x7d, 0xed, 0x3f, 0xff, 0xdc, 0x31, 0x7e, 0x7b, 0xb5, 0x63, 0xfc, - 0xfe, 0x6a, 0xc7, 0xf8, 0xf2, 0x6a, 0xc7, 0xf8, 0xd3, 0xd5, 0x8e, 0xf1, 0x8f, 0xab, 0x1d, 0xe3, - 0x0f, 0x5f, 0xed, 0x18, 0xc3, 0x9a, 0x74, 0xff, 0x77, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xc1, - 0xc2, 0x93, 0xfb, 0x94, 0x16, 0x00, 0x00, + 0x15, 0xf7, 0x48, 0xb2, 0xfe, 0x3c, 0xd9, 0x92, 0xd3, 0x76, 0x6c, 0x45, 0x80, 0x9d, 0x1a, 0x20, + 0x6b, 0xb3, 0x5e, 0x7b, 0xf1, 0x12, 0xca, 0xd9, 0x2c, 0x5b, 0x58, 0x49, 0x58, 0xbb, 0x76, 0x01, + 0x33, 0x49, 0xcc, 0x85, 0xaa, 0xa9, 0x96, 0xa6, 0x2d, 0x4d, 0x45, 0x9a, 0x99, 0x9d, 0x6e, 0x79, + 0xe5, 0x7c, 0x86, 0x3d, 0xec, 0x81, 0x2a, 0xce, 0xdc, 0xf8, 0x02, 0x54, 0x71, 0xe4, 0x44, 0xed, + 0x91, 0xa2, 0xa0, 0xb8, 0x05, 0x30, 0xc5, 0x01, 0x3e, 0x01, 0x47, 0xaa, 0x5f, 0xf7, 0xfc, 0xf5, + 0x28, 0x95, 0x84, 0xdb, 0x5e, 0xa4, 0xee, 0x7e, 0xef, 0x75, 0xf7, 0x7b, 0xf3, 0xde, 0xfb, 0xbd, + 0xd7, 0xb0, 0x4e, 0xfb, 0x03, 0x77, 0x5f, 0x5c, 0x06, 0x8c, 0xab, 0xdf, 0xbd, 0x20, 0xf4, 0x85, + 0x4f, 0x16, 0x71, 0xd2, 0x7d, 0x67, 0xe8, 0x8a, 0xd1, 0xb4, 0xbf, 0x37, 0xf0, 0x27, 0xfb, 0x43, + 0x7f, 0xe8, 0xef, 0x23, 0xb5, 0x3f, 0x3d, 0xc7, 0x19, 0x4e, 0x70, 0xa4, 0xa4, 0xba, 0x5b, 0x43, + 0xdf, 0x1f, 0x8e, 0x59, 0xc2, 0x25, 0xdc, 0x09, 0xe3, 0x82, 0x4e, 0x02, 0xcd, 0x70, 0x98, 0xda, + 0x4f, 0x30, 0xcf, 0x61, 0xe1, 0xc4, 0xf5, 0x44, 0x7a, 0x38, 0x76, 0xfb, 0x7c, 0x7f, 0xe0, 0x4f, + 0x26, 0xbe, 0x97, 0xbe, 0x90, 0xf9, 0x87, 0x0a, 0xd4, 0x2c, 0xf6, 0xe9, 0x94, 0x71, 0x41, 0xb6, + 0xa1, 0xc2, 0x06, 0x23, 0xbf, 0x53, 0xba, 0x6d, 0x6c, 0x37, 0x0f, 0xc8, 0x9e, 0xe2, 0xd3, 0xd4, + 0x47, 0x83, 0x91, 0x7f, 0xbc, 0x60, 0x21, 0x07, 0x79, 0x1b, 0x16, 0xcf, 0xc7, 0x53, 0x3e, 0xea, + 0x94, 0x91, 0x75, 0x35, 0xcb, 0xfa, 0x23, 0x49, 0x3a, 0x5e, 0xb0, 0x14, 0x8f, 0xdc, 0xd6, 0xf5, + 0xce, 0xfd, 0x4e, 0xa5, 0x68, 0xdb, 0x13, 0xef, 0x1c, 0xb7, 0x95, 0x1c, 0xe4, 0x10, 0x80, 0x33, + 0x61, 0xfb, 0x81, 0x70, 0x7d, 0xaf, 0xb3, 0x88, 0xfc, 0x1b, 0x59, 0xfe, 0xc7, 0x4c, 0xfc, 0x14, + 0xc9, 0xc7, 0x0b, 0x56, 0x83, 0x47, 0x13, 0x29, 0xe9, 0x7a, 0xae, 0xb0, 0x07, 0x23, 0xea, 0x7a, + 0x9d, 0x6a, 0x91, 0xe4, 0x89, 0xe7, 0x8a, 0x07, 0x92, 0x2c, 0x25, 0xdd, 0x68, 0x22, 0x55, 0xf9, + 0x74, 0xca, 0xc2, 0xcb, 0x4e, 0xad, 0x48, 0x95, 0x9f, 0x49, 0x92, 0x54, 0x05, 0x79, 0xc8, 0x7d, + 0x68, 0xf6, 0xd9, 0xd0, 0xf5, 0xec, 0xfe, 0xd8, 0x1f, 0x3c, 0xeb, 0xd4, 0x51, 0xa4, 0x93, 0x15, + 0xe9, 0x49, 0x86, 0x9e, 0xa4, 0x1f, 0x2f, 0x58, 0xd0, 0x8f, 0x67, 0xe4, 0x00, 0xea, 0x83, 0x11, + 0x1b, 0x3c, 0xb3, 0xc5, 0xac, 0xd3, 0x40, 0xc9, 0x9b, 0x59, 0xc9, 0x07, 0x92, 0xfa, 0x64, 0x76, + 0xbc, 0x60, 0xd5, 0x06, 0x6a, 0x48, 0xee, 0x42, 0x83, 0x79, 0x8e, 0x3e, 0xae, 0x89, 0x42, 0xeb, + 0xb9, 0xef, 0xe2, 0x39, 0xd1, 0x61, 0x75, 0xa6, 0xc7, 0x64, 0x0f, 0xaa, 0xf2, 0x5b, 0xbb, 0xa2, + 0xb3, 0x84, 0x32, 0x6b, 0xb9, 0x83, 0x90, 0x76, 0xbc, 0x60, 0x69, 0x2e, 0x69, 0x3e, 0x87, 0x8d, + 0xdd, 0x0b, 0x16, 0xca, 0xcb, 0xad, 0x16, 0x99, 0xef, 0xa1, 0xa2, 0xe3, 0xf5, 0x1a, 0x4e, 0x34, + 0xe9, 0xd5, 0x60, 0xf1, 0x82, 0x8e, 0xa7, 0xcc, 0x7c, 0x0b, 0x9a, 0x29, 0x4f, 0x21, 0x1d, 0xa8, + 0x4d, 0x18, 0xe7, 0x74, 0xc8, 0x3a, 0xc6, 0x6d, 0x63, 0xbb, 0x61, 0x45, 0x53, 0xb3, 0x05, 0x4b, + 0x69, 0x3f, 0x49, 0x09, 0x4a, 0x5f, 0x90, 0x82, 0x17, 0x2c, 0xe4, 0xd2, 0x01, 0xb4, 0xa0, 0x9e, + 0x9a, 0xef, 0xc3, 0x4a, 0xde, 0x09, 0xc8, 0x0a, 0x94, 0x9f, 0xb1, 0x4b, 0xcd, 0x29, 0x87, 0x64, + 0x4d, 0x5f, 0x08, 0xbd, 0xb8, 0x61, 0xe9, 0xdb, 0x7d, 0x51, 0x8a, 0x85, 0x63, 0x3f, 0x20, 0x87, + 0x50, 0x91, 0x81, 0x84, 0xd2, 0xcd, 0x83, 0xee, 0x9e, 0x8a, 0xb2, 0xbd, 0x28, 0xca, 0xf6, 0x9e, + 0x44, 0x51, 0xd6, 0xab, 0x7f, 0xf9, 0x62, 0x6b, 0xe1, 0x8b, 0xbf, 0x6d, 0x19, 0x16, 0x4a, 0x90, + 0x5b, 0xf2, 0x53, 0x52, 0xd7, 0xb3, 0x5d, 0x47, 0x9f, 0x53, 0xc3, 0xf9, 0x89, 0x43, 0x8e, 0x60, + 0x65, 0xe0, 0x7b, 0x9c, 0x79, 0x7c, 0xca, 0xed, 0x80, 0x86, 0x74, 0xc2, 0x75, 0x94, 0x44, 0x1f, + 0xee, 0x41, 0x44, 0x3e, 0x45, 0xaa, 0xd5, 0x1e, 0x64, 0x17, 0xc8, 0x07, 0x00, 0x17, 0x74, 0xec, + 0x3a, 0x54, 0xf8, 0x21, 0xef, 0x54, 0x6e, 0x97, 0x53, 0xc2, 0x67, 0x11, 0xe1, 0x69, 0xe0, 0x50, + 0xc1, 0x7a, 0x15, 0x79, 0x33, 0x2b, 0xc5, 0x4f, 0xee, 0x40, 0x9b, 0x06, 0x81, 0xcd, 0x05, 0x15, + 0xcc, 0xee, 0x5f, 0x0a, 0xc6, 0x31, 0x92, 0x96, 0xac, 0x65, 0x1a, 0x04, 0x8f, 0xe5, 0x6a, 0x4f, + 0x2e, 0x9a, 0x4e, 0xfc, 0x1d, 0xd0, 0xc9, 0x09, 0x81, 0x8a, 0x43, 0x05, 0x45, 0x6b, 0x2c, 0x59, + 0x38, 0x96, 0x6b, 0x01, 0x15, 0x23, 0xad, 0x23, 0x8e, 0xc9, 0x3a, 0x54, 0x47, 0xcc, 0x1d, 0x8e, + 0x04, 0xaa, 0x55, 0xb6, 0xf4, 0x4c, 0x1a, 0x3e, 0x08, 0xfd, 0x0b, 0x86, 0x71, 0x5e, 0xb7, 0xd4, + 0xc4, 0xfc, 0x97, 0x01, 0x37, 0xae, 0x05, 0x86, 0xdc, 0x77, 0x44, 0xf9, 0x28, 0x3a, 0x4b, 0x8e, + 0xc9, 0xdb, 0x72, 0x5f, 0xea, 0xb0, 0x50, 0xe7, 0x9f, 0x65, 0xad, 0xf1, 0x31, 0x2e, 0x6a, 0x45, + 0x35, 0x0b, 0x79, 0x04, 0x2b, 0x63, 0xca, 0x85, 0xad, 0xfc, 0xd7, 0xc6, 0xfc, 0x52, 0xce, 0xc4, + 0xd4, 0x27, 0x34, 0xf2, 0x73, 0xe9, 0x56, 0x5a, 0xbc, 0x35, 0xce, 0xac, 0x92, 0x63, 0x58, 0xeb, + 0x5f, 0x3e, 0xa7, 0x9e, 0x70, 0x3d, 0x66, 0x5f, 0xb3, 0x79, 0x5b, 0x6f, 0xf5, 0xe8, 0xc2, 0x75, + 0x98, 0x37, 0x88, 0x8c, 0xbd, 0x1a, 0x8b, 0xc4, 0x1f, 0x83, 0x9b, 0xb7, 0xa1, 0x95, 0x8d, 0x62, + 0xd2, 0x82, 0x92, 0x98, 0x69, 0x0d, 0x4b, 0x62, 0x66, 0x9a, 0xb1, 0x07, 0xc6, 0xa1, 0x74, 0x8d, + 0x67, 0x07, 0xda, 0xb9, 0xb0, 0x4e, 0x99, 0xdb, 0x48, 0x9b, 0xdb, 0x6c, 0xc3, 0x72, 0x26, 0x9a, + 0xcd, 0xcf, 0x17, 0xa1, 0x6e, 0x31, 0x1e, 0x48, 0x67, 0x22, 0x87, 0xd0, 0x60, 0xb3, 0x01, 0x53, + 0x89, 0xd4, 0xc8, 0xa5, 0x29, 0xc5, 0xf3, 0x28, 0xa2, 0xcb, 0x80, 0x8e, 0x99, 0xc9, 0x4e, 0x06, + 0x04, 0x56, 0xf3, 0x42, 0x69, 0x14, 0xd8, 0xcd, 0xa2, 0xc0, 0x5a, 0x8e, 0x37, 0x07, 0x03, 0x3b, + 0x19, 0x18, 0xc8, 0x6f, 0x9c, 0xc1, 0x81, 0x7b, 0x05, 0x38, 0x90, 0xbf, 0xfe, 0x1c, 0x20, 0xb8, + 0x57, 0x00, 0x04, 0x9d, 0x6b, 0x67, 0x15, 0x22, 0xc1, 0x6e, 0x16, 0x09, 0xf2, 0xea, 0xe4, 0xa0, + 0xe0, 0x83, 0x22, 0x28, 0xb8, 0x95, 0x93, 0x99, 0x8b, 0x05, 0xef, 0x5d, 0xc3, 0x82, 0xf5, 0x9c, + 0x68, 0x01, 0x18, 0xdc, 0xcb, 0x64, 0x69, 0x28, 0xd4, 0xad, 0x38, 0x4d, 0x93, 0xef, 0x5f, 0xc7, + 0x91, 0x8d, 0xfc, 0xa7, 0x2d, 0x02, 0x92, 0xfd, 0x1c, 0x90, 0xdc, 0xcc, 0xdf, 0x32, 0x87, 0x24, + 0x09, 0x1e, 0xec, 0xc8, 0xb8, 0xcf, 0x79, 0x9a, 0xcc, 0x11, 0x2c, 0x0c, 0xfd, 0x50, 0x27, 0x6c, + 0x35, 0x31, 0xb7, 0x65, 0x26, 0x4a, 0xfc, 0xeb, 0x25, 0xd8, 0x81, 0x4e, 0x9f, 0xf2, 0x2e, 0xf3, + 0x57, 0x46, 0x22, 0x8b, 0x11, 0x9d, 0xce, 0x62, 0x0d, 0x9d, 0xc5, 0x52, 0x90, 0x52, 0xca, 0x40, + 0x0a, 0xf9, 0x0e, 0xdc, 0xc0, 0x34, 0x82, 0x76, 0xb1, 0x33, 0x69, 0xad, 0x2d, 0x09, 0xca, 0x20, + 0x2a, 0xbf, 0xbd, 0x03, 0xab, 0x29, 0x5e, 0x99, 0x62, 0x31, 0x85, 0x55, 0x30, 0x78, 0x57, 0x62, + 0xee, 0xa3, 0x20, 0x38, 0xa6, 0x7c, 0x64, 0xfe, 0x38, 0xd1, 0x3f, 0x81, 0x2b, 0x02, 0x95, 0x81, + 0xef, 0x28, 0xb5, 0x96, 0x2d, 0x1c, 0x4b, 0x08, 0x1b, 0xfb, 0x43, 0x3c, 0xb5, 0x61, 0xc9, 0xa1, + 0xe4, 0x8a, 0x23, 0xa5, 0xa1, 0x42, 0xc2, 0xfc, 0xa5, 0x91, 0xec, 0x97, 0x20, 0x58, 0x11, 0xd8, + 0x18, 0xff, 0x0f, 0xd8, 0x94, 0x5e, 0x0f, 0x6c, 0xcc, 0xdf, 0x1a, 0xc9, 0x17, 0x89, 0x61, 0xe4, + 0xcd, 0x54, 0x94, 0xce, 0xe1, 0x7a, 0x0e, 0x9b, 0x61, 0xc0, 0x97, 0x2d, 0x35, 0x89, 0x10, 0xbe, + 0x8a, 0x66, 0xce, 0x22, 0x7c, 0x0d, 0xd7, 0xd4, 0x44, 0xc3, 0x8f, 0x7f, 0x8e, 0x91, 0xb8, 0x64, + 0xa9, 0x49, 0x2a, 0x7b, 0x36, 0x32, 0xd9, 0xf3, 0x14, 0xc8, 0xf5, 0x18, 0x25, 0xef, 0x43, 0x45, + 0xd0, 0xa1, 0x34, 0xa1, 0xb4, 0x42, 0x6b, 0x4f, 0xd5, 0xcb, 0x7b, 0x1f, 0x9f, 0x9d, 0x52, 0x37, + 0xec, 0xad, 0x4b, 0xed, 0xff, 0xf3, 0x62, 0xab, 0x25, 0x79, 0x76, 0xfd, 0x89, 0x2b, 0xd8, 0x24, + 0x10, 0x97, 0x16, 0xca, 0x98, 0x7f, 0x31, 0x64, 0xee, 0xce, 0xc4, 0x6e, 0xa1, 0x2d, 0x22, 0x07, + 0x2d, 0xa5, 0x60, 0xf6, 0xd5, 0xec, 0xf3, 0x0d, 0x80, 0x21, 0xe5, 0xf6, 0x67, 0xd4, 0x13, 0xcc, + 0xd1, 0x46, 0x6a, 0x0c, 0x29, 0xff, 0x39, 0x2e, 0xc8, 0x9a, 0x44, 0x92, 0xa7, 0x9c, 0x39, 0x68, + 0xad, 0xb2, 0x55, 0x1b, 0x52, 0xfe, 0x94, 0x33, 0x27, 0xd6, 0xab, 0xf6, 0x06, 0x7a, 0xfd, 0x35, + 0xe5, 0x78, 0x09, 0x70, 0x7d, 0x15, 0x34, 0xfb, 0xb7, 0x21, 0x11, 0x39, 0x9b, 0xfc, 0xc8, 0x09, + 0xdc, 0x88, 0xdd, 0xdb, 0x9e, 0xa2, 0xdb, 0x47, 0xfe, 0xf0, 0xf2, 0xa8, 0x58, 0xb9, 0xc8, 0x2e, + 0x73, 0xf2, 0x13, 0xd8, 0xc8, 0x05, 0x67, 0xbc, 0x61, 0xe9, 0xa5, 0x31, 0x7a, 0x33, 0x1b, 0xa3, + 0xd1, 0x7e, 0x91, 0xae, 0xe5, 0x37, 0xd0, 0xf5, 0x5b, 0xb2, 0x3c, 0x49, 0xa7, 0xec, 0xa2, 0xaf, + 0x65, 0xfe, 0xda, 0x80, 0x76, 0xee, 0x32, 0x64, 0x1f, 0x40, 0x65, 0x3c, 0xee, 0x3e, 0x8f, 0x4a, + 0xe5, 0x15, 0x7d, 0x71, 0x34, 0xd9, 0x63, 0xf7, 0x39, 0xb3, 0x1a, 0xfd, 0x68, 0x48, 0xee, 0x40, + 0x4d, 0xcc, 0x14, 0x77, 0xb6, 0x90, 0x7b, 0x32, 0x43, 0xd6, 0xaa, 0xc0, 0x7f, 0x72, 0x17, 0x96, + 0xd4, 0xc6, 0x43, 0x9f, 0x73, 0x37, 0xd0, 0x45, 0x04, 0x49, 0x6f, 0xfd, 0x11, 0x52, 0xac, 0x66, + 0x3f, 0x99, 0x98, 0x47, 0xd0, 0x88, 0x8f, 0x25, 0x5f, 0x83, 0xc6, 0x84, 0xce, 0x74, 0x95, 0x2b, + 0xef, 0xb6, 0x68, 0xd5, 0x27, 0x74, 0x86, 0x05, 0x2e, 0xd9, 0x80, 0x9a, 0x24, 0x0e, 0xa9, 0xb2, + 0x77, 0xd9, 0xaa, 0x4e, 0xe8, 0xec, 0x23, 0xca, 0xcd, 0x0f, 0xa1, 0xaa, 0xee, 0xf2, 0x86, 0xf2, + 0x3f, 0x84, 0x66, 0xea, 0x7a, 0xe4, 0xbb, 0x70, 0x53, 0x29, 0x12, 0xd0, 0x50, 0xa0, 0xe2, 0x99, + 0x0d, 0x09, 0x12, 0x4f, 0x69, 0x28, 0xe4, 0x91, 0xaa, 0xf6, 0x7e, 0x0c, 0xad, 0x6c, 0x7d, 0x2a, + 0xd3, 0x57, 0xe8, 0x4f, 0x3d, 0x47, 0x0b, 0xa9, 0x89, 0x6c, 0x4e, 0x2f, 0x7c, 0xe5, 0x30, 0xe9, + 0x82, 0xf4, 0xcc, 0x17, 0x2c, 0x55, 0xd5, 0x2a, 0x1e, 0xf3, 0x4f, 0x15, 0xa8, 0xaa, 0x62, 0x99, + 0xdc, 0x49, 0xf5, 0x27, 0x88, 0x84, 0xbd, 0xe6, 0xd5, 0x8b, 0xad, 0x1a, 0x82, 0xc6, 0xc9, 0xc3, + 0xa4, 0x59, 0x49, 0xd2, 0x63, 0x29, 0x53, 0xcb, 0x47, 0x9d, 0x51, 0xf9, 0xb5, 0x3b, 0xa3, 0x0d, + 0xa8, 0x79, 0xd3, 0x89, 0x2d, 0x66, 0x1c, 0x23, 0xbc, 0x6c, 0x55, 0xbd, 0xe9, 0xe4, 0xc9, 0x8c, + 0x4b, 0x53, 0x0b, 0x5f, 0xd0, 0x31, 0x92, 0x54, 0x88, 0xd7, 0x71, 0x41, 0x12, 0x0f, 0x61, 0x39, + 0x85, 0xad, 0xae, 0xa3, 0x0b, 0xb7, 0x56, 0xda, 0x19, 0x4e, 0x1e, 0x6a, 0x75, 0x9b, 0x31, 0xd6, + 0x9e, 0x38, 0x64, 0x3b, 0xdb, 0x08, 0x20, 0x24, 0x2b, 0x5c, 0x48, 0xd5, 0xfa, 0x12, 0x90, 0xe5, + 0x05, 0xa4, 0x93, 0x2b, 0x16, 0x05, 0x12, 0x75, 0xb9, 0x80, 0xc4, 0xb7, 0xa0, 0x9d, 0xa0, 0x9a, + 0x62, 0x69, 0xa8, 0x5d, 0x92, 0x65, 0x64, 0x7c, 0x17, 0xd6, 0x3c, 0x36, 0x13, 0x76, 0x9e, 0x1b, + 0x90, 0x9b, 0x48, 0xda, 0x59, 0x56, 0xe2, 0xdb, 0xd0, 0x4a, 0xd2, 0x00, 0xf2, 0x36, 0x55, 0x3b, + 0x16, 0xaf, 0x22, 0xdb, 0x2d, 0xa8, 0xc7, 0x35, 0xc5, 0x12, 0x32, 0xd4, 0xa8, 0x2a, 0x25, 0xe2, + 0x2a, 0x25, 0x64, 0x7c, 0x3a, 0x16, 0x7a, 0x93, 0x65, 0xe4, 0xc1, 0x2a, 0xc5, 0x52, 0xeb, 0xc8, + 0xfb, 0x4d, 0x58, 0x66, 0xba, 0x5d, 0x51, 0x7c, 0x2d, 0xe4, 0x5b, 0x8a, 0x16, 0x91, 0x69, 0x07, + 0x56, 0x82, 0xd0, 0x0f, 0x7c, 0xce, 0x42, 0x9b, 0x3a, 0x4e, 0xc8, 0x38, 0xef, 0xb4, 0xd5, 0x7e, + 0xd1, 0xfa, 0x91, 0x5a, 0x36, 0x7f, 0x01, 0x35, 0x6d, 0xfd, 0xc2, 0xa6, 0xed, 0x07, 0xb0, 0x24, + 0xbd, 0x9e, 0xdb, 0x99, 0xd6, 0x2d, 0x2a, 0x9d, 0xd1, 0xe9, 0x99, 0xc8, 0x74, 0x70, 0x4d, 0xe4, + 0x57, 0x4b, 0xe6, 0x3d, 0x58, 0xce, 0xf0, 0xc8, 0x30, 0x40, 0xa7, 0x88, 0xc2, 0x00, 0x27, 0xf1, + 0xc9, 0xa5, 0xe4, 0x64, 0xf3, 0x3e, 0x34, 0x62, 0x43, 0xcb, 0x0a, 0x2f, 0xd2, 0xc3, 0xd0, 0xb6, + 0x53, 0x53, 0x2c, 0x0b, 0xfc, 0xcf, 0x58, 0xa8, 0xab, 0x3a, 0x35, 0x31, 0x9f, 0x42, 0x3b, 0x97, + 0xc5, 0xc9, 0x2e, 0xd4, 0x82, 0x69, 0xdf, 0x8e, 0x5e, 0x13, 0x92, 0xb4, 0x75, 0x3a, 0xed, 0x7f, + 0xcc, 0x2e, 0xa3, 0xfe, 0x33, 0xc0, 0x59, 0xb2, 0x6d, 0x29, 0xbd, 0xed, 0x18, 0xea, 0x51, 0x68, + 0x92, 0xef, 0x41, 0x23, 0xf6, 0x91, 0x5c, 0xda, 0x8c, 0x8f, 0xd6, 0x9b, 0x26, 0x8c, 0xf2, 0x53, + 0x73, 0x77, 0xe8, 0x31, 0xc7, 0x4e, 0xe2, 0x01, 0xcf, 0xa8, 0x5b, 0x6d, 0x45, 0xf8, 0x24, 0x72, + 0x7e, 0xf3, 0x5d, 0xa8, 0xaa, 0xbb, 0x49, 0xfb, 0xc8, 0x9d, 0xa3, 0xa2, 0x57, 0x8e, 0x0b, 0xf3, + 0xfb, 0x9f, 0x0d, 0xa8, 0x47, 0xcd, 0x6c, 0xa1, 0x50, 0xe6, 0xd2, 0xa5, 0x57, 0xbd, 0xf4, 0xbc, + 0x17, 0x81, 0x28, 0x8b, 0x54, 0x5e, 0x3b, 0x8b, 0xec, 0x02, 0x51, 0xc9, 0xe2, 0xc2, 0x17, 0xae, + 0x37, 0xb4, 0x95, 0xad, 0x55, 0xd6, 0x58, 0x41, 0xca, 0x19, 0x12, 0x4e, 0xe5, 0xfa, 0xc1, 0xe7, + 0x8b, 0xd0, 0x3e, 0xea, 0x3d, 0x38, 0x39, 0x0a, 0x82, 0xb1, 0x3b, 0xa0, 0x58, 0x69, 0xef, 0x43, + 0x05, 0x7b, 0x89, 0x82, 0x57, 0xcc, 0x6e, 0x51, 0x53, 0x4b, 0x0e, 0x60, 0x11, 0x5b, 0x0a, 0x52, + 0xf4, 0x98, 0xd9, 0x2d, 0xec, 0x6d, 0xe5, 0x21, 0xaa, 0xe9, 0xb8, 0xfe, 0xa6, 0xd9, 0x2d, 0x6a, + 0x70, 0xc9, 0x87, 0xd0, 0x48, 0x9a, 0x81, 0x79, 0x2f, 0x9b, 0xdd, 0xb9, 0xad, 0xae, 0x94, 0x4f, + 0x6a, 0xb0, 0x79, 0x0f, 0x74, 0xdd, 0xb9, 0x3d, 0x21, 0x39, 0x84, 0x5a, 0x54, 0x9b, 0x16, 0xbf, + 0x3d, 0x76, 0xe7, 0xb4, 0xa1, 0xd2, 0x3c, 0xaa, 0xbe, 0x2f, 0x7a, 0x20, 0xed, 0x16, 0xf6, 0xca, + 0xe4, 0x2e, 0x54, 0x75, 0xb1, 0x51, 0xf8, 0xfe, 0xd8, 0x2d, 0x6e, 0x26, 0xa5, 0x92, 0x49, 0x87, + 0x33, 0xef, 0x11, 0xb7, 0x3b, 0xb7, 0xa9, 0x27, 0x47, 0x00, 0xa9, 0x9a, 0x7e, 0xee, 0xeb, 0x6c, + 0x77, 0x7e, 0xb3, 0x4e, 0xee, 0x43, 0x3d, 0x79, 0x80, 0x29, 0x7e, 0x6f, 0xed, 0xce, 0xeb, 0x9f, + 0x7b, 0x5f, 0xff, 0xef, 0x3f, 0x36, 0x8d, 0xdf, 0x5c, 0x6d, 0x1a, 0xbf, 0xbb, 0xda, 0x34, 0xbe, + 0xbc, 0xda, 0x34, 0xfe, 0x78, 0xb5, 0x69, 0xfc, 0xfd, 0x6a, 0xd3, 0xf8, 0xfd, 0x3f, 0x37, 0x8d, + 0x7e, 0x15, 0xdd, 0xff, 0xbd, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x48, 0x26, 0x13, 0xca, 0x22, + 0x18, 0x00, 0x00, } diff --git a/abci/types/types.proto b/abci/types/types.proto index 6e6b1cd3671..75a53ac46f8 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -60,7 +60,7 @@ message RequestInitChain { google.protobuf.Timestamp time = 1 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true]; string chain_id = 2; ConsensusParams consensus_params = 3; - repeated Validator validators = 4 [(gogoproto.nullable)=false]; + repeated ValidatorUpdate validators = 4 [(gogoproto.nullable)=false]; bytes app_state_bytes = 5; } @@ -143,7 +143,7 @@ message ResponseSetOption { message ResponseInitChain { ConsensusParams consensus_params = 1; - repeated Validator validators = 2 [(gogoproto.nullable)=false]; + repeated ValidatorUpdate validators = 2 [(gogoproto.nullable)=false]; } message ResponseQuery { @@ -183,7 +183,7 @@ message ResponseDeliverTx { } message ResponseEndBlock { - repeated Validator validator_updates = 1 [(gogoproto.nullable)=false]; + repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable)=false]; ConsensusParams consensus_param_updates = 2; repeated common.KVPair tags = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"]; } @@ -207,8 +207,7 @@ message ConsensusParams { // BlockSize contains limits on the block size. message BlockSize { int32 max_bytes = 1; - int32 max_txs = 2; - int64 max_gas = 3; + int64 max_gas = 2; } // TxSize contains limits on the tx size. @@ -225,42 +224,65 @@ message BlockGossip { } message LastCommitInfo { - int32 commit_round = 1; - repeated SigningValidator validators = 2 [(gogoproto.nullable)=false]; + int32 round = 1; + repeated VoteInfo votes = 2 [(gogoproto.nullable)=false]; } //---------------------------------------- // Blockchain Types -// just the minimum the app might need message Header { - // basics + // basic block info string chain_id = 1 [(gogoproto.customname)="ChainID"]; int64 height = 2; google.protobuf.Timestamp time = 3 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true]; - - // txs - int32 num_txs = 4; + int64 num_txs = 4; int64 total_txs = 5; - // hashes - bytes last_block_hash = 6; - bytes validators_hash = 7; - bytes app_hash = 8; + // prev block info + BlockID last_block_id = 6 [(gogoproto.nullable)=false]; + + // hashes of block data + bytes last_commit_hash = 7; // commit from validators from the last block + bytes data_hash = 8; // transactions - // consensus - Validator proposer = 9 [(gogoproto.nullable)=false]; + // hashes from the app output from the prev block + bytes validators_hash = 9; // validators for the current block + bytes next_validators_hash = 10; // validators for the next block + bytes consensus_hash = 11; // consensus params for current block + bytes app_hash = 12; // state after txs from the previous block + bytes last_results_hash = 13;// root hash of all results from the txs from the previous block + + // consensus info + bytes evidence_hash = 14; // evidence included in the block + bytes proposer_address = 15; // original proposer of the block +} + +message BlockID { + bytes hash = 1; + PartSetHeader parts_header = 2 [(gogoproto.nullable)=false]; +} + +message PartSetHeader { + int32 total = 1; + bytes hash = 2; } // Validator message Validator { bytes address = 1; - PubKey pub_key = 2 [(gogoproto.nullable)=false]; + //PubKey pub_key = 2 [(gogoproto.nullable)=false]; int64 power = 3; } -// Validator with an extra bool -message SigningValidator { +// ValidatorUpdate +message ValidatorUpdate { + PubKey pub_key = 1 [(gogoproto.nullable)=false]; + int64 power = 2; +} + +// VoteInfo +message VoteInfo { Validator validator = 1 [(gogoproto.nullable)=false]; bool signed_last_block = 2; } diff --git a/abci/types/types_test.go b/abci/types/types_test.go deleted file mode 100644 index baa8155cd0c..00000000000 --- a/abci/types/types_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package types - -import ( - "testing" - - asrt "github.com/stretchr/testify/assert" -) - -func TestConsensusParams(t *testing.T) { - assert := asrt.New(t) - - params := &ConsensusParams{ - BlockSize: &BlockSize{MaxGas: 12345}, - BlockGossip: &BlockGossip{BlockPartSizeBytes: 54321}, - } - var noParams *ConsensusParams // nil - - // no error with nil fields - assert.Nil(noParams.GetBlockSize()) - assert.EqualValues(noParams.GetBlockSize().GetMaxGas(), 0) - - // get values with real fields - assert.NotNil(params.GetBlockSize()) - assert.EqualValues(params.GetBlockSize().GetMaxTxs(), 0) - assert.EqualValues(params.GetBlockSize().GetMaxGas(), 12345) - assert.NotNil(params.GetBlockGossip()) - assert.EqualValues(params.GetBlockGossip().GetBlockPartSizeBytes(), 54321) - assert.Nil(params.GetTxSize()) - assert.EqualValues(params.GetTxSize().GetMaxBytes(), 0) - -} diff --git a/abci/types/typespb_test.go b/abci/types/typespb_test.go index 33a368af443..0411afc8479 100644 --- a/abci/types/typespb_test.go +++ b/abci/types/typespb_test.go @@ -1758,6 +1758,118 @@ func TestHeaderMarshalTo(t *testing.T) { } } +func TestBlockIDProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedBlockID(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &BlockID{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestBlockIDMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedBlockID(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &BlockID{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestPartSetHeaderProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPartSetHeader(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PartSetHeader{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestPartSetHeaderMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPartSetHeader(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PartSetHeader{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestValidatorProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -1814,15 +1926,71 @@ func TestValidatorMarshalTo(t *testing.T) { } } -func TestSigningValidatorProto(t *testing.T) { +func TestValidatorUpdateProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorUpdate(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ValidatorUpdate{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestValidatorUpdateMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorUpdate(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ValidatorUpdate{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestVoteInfoProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedSigningValidator(popr, false) + p := NewPopulatedVoteInfo(popr, false) dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &SigningValidator{} + msg := &VoteInfo{} if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -1845,10 +2013,10 @@ func TestSigningValidatorProto(t *testing.T) { } } -func TestSigningValidatorMarshalTo(t *testing.T) { +func TestVoteInfoMarshalTo(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedSigningValidator(popr, false) + p := NewPopulatedVoteInfo(popr, false) size := p.Size() dAtA := make([]byte, size) for i := range dAtA { @@ -1858,7 +2026,7 @@ func TestSigningValidatorMarshalTo(t *testing.T) { if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &SigningValidator{} + msg := &VoteInfo{} if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -2540,6 +2708,42 @@ func TestHeaderJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } +func TestBlockIDJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedBlockID(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &BlockID{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestPartSetHeaderJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPartSetHeader(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &PartSetHeader{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} func TestValidatorJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -2558,16 +2762,34 @@ func TestValidatorJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } -func TestSigningValidatorJSON(t *testing.T) { +func TestValidatorUpdateJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorUpdate(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ValidatorUpdate{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestVoteInfoJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedSigningValidator(popr, true) + p := NewPopulatedVoteInfo(popr, true) marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } - msg := &SigningValidator{} + msg := &VoteInfo{} err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) @@ -3480,6 +3702,62 @@ func TestHeaderProtoCompactText(t *testing.T) { } } +func TestBlockIDProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedBlockID(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &BlockID{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestBlockIDProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedBlockID(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &BlockID{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestPartSetHeaderProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPartSetHeader(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &PartSetHeader{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestPartSetHeaderProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPartSetHeader(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &PartSetHeader{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestValidatorProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -3508,12 +3786,40 @@ func TestValidatorProtoCompactText(t *testing.T) { } } -func TestSigningValidatorProtoText(t *testing.T) { +func TestValidatorUpdateProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorUpdate(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &ValidatorUpdate{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestValidatorUpdateProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorUpdate(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &ValidatorUpdate{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestVoteInfoProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedSigningValidator(popr, true) + p := NewPopulatedVoteInfo(popr, true) dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) - msg := &SigningValidator{} + msg := &VoteInfo{} if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -3522,12 +3828,12 @@ func TestSigningValidatorProtoText(t *testing.T) { } } -func TestSigningValidatorProtoCompactText(t *testing.T) { +func TestVoteInfoProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedSigningValidator(popr, true) + p := NewPopulatedVoteInfo(popr, true) dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) - msg := &SigningValidator{} + msg := &VoteInfo{} if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -4274,6 +4580,50 @@ func TestHeaderSize(t *testing.T) { } } +func TestBlockIDSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedBlockID(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestPartSetHeaderSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedPartSetHeader(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + func TestValidatorSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -4296,10 +4646,32 @@ func TestValidatorSize(t *testing.T) { } } -func TestSigningValidatorSize(t *testing.T) { +func TestValidatorUpdateSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedValidatorUpdate(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestVoteInfoSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) - p := NewPopulatedSigningValidator(popr, true) + p := NewPopulatedVoteInfo(popr, true) size2 := github_com_gogo_protobuf_proto.Size(p) dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { diff --git a/abci/types/util.go b/abci/types/util.go index 458024c581b..3cde882320a 100644 --- a/abci/types/util.go +++ b/abci/types/util.go @@ -2,58 +2,33 @@ package types import ( "bytes" - "encoding/json" "sort" - - cmn "github.com/tendermint/tendermint/libs/common" ) //------------------------------------------------------------------------------ -// Validators is a list of validators that implements the Sort interface -type Validators []Validator +// ValidatorUpdates is a list of validators that implements the Sort interface +type ValidatorUpdates []ValidatorUpdate -var _ sort.Interface = (Validators)(nil) +var _ sort.Interface = (ValidatorUpdates)(nil) -// All these methods for Validators: +// All these methods for ValidatorUpdates: // Len, Less and Swap -// are for Validators to implement sort.Interface +// are for ValidatorUpdates to implement sort.Interface // which will be used by the sort package. // See Issue https://github.com/tendermint/abci/issues/212 -func (v Validators) Len() int { +func (v ValidatorUpdates) Len() int { return len(v) } // XXX: doesn't distinguish same validator with different power -func (v Validators) Less(i, j int) bool { +func (v ValidatorUpdates) Less(i, j int) bool { return bytes.Compare(v[i].PubKey.Data, v[j].PubKey.Data) <= 0 } -func (v Validators) Swap(i, j int) { +func (v ValidatorUpdates) Swap(i, j int) { v1 := v[i] v[i] = v[j] v[j] = v1 } - -func ValidatorsString(vs Validators) string { - s := make([]validatorPretty, len(vs)) - for i, v := range vs { - s[i] = validatorPretty{ - Address: v.Address, - PubKey: v.PubKey.Data, - Power: v.Power, - } - } - b, err := json.Marshal(s) - if err != nil { - panic(err.Error()) - } - return string(b) -} - -type validatorPretty struct { - Address cmn.HexBytes `json:"address"` - PubKey []byte `json:"pub_key"` - Power int64 `json:"power"` -} diff --git a/abci/version/version.go b/abci/version/version.go index 7223a86adaa..f4dc4d23586 100644 --- a/abci/version/version.go +++ b/abci/version/version.go @@ -1,9 +1,9 @@ package version -// NOTE: we should probably be versioning the ABCI and the abci-cli separately +import ( + "github.com/tendermint/tendermint/version" +) -const Maj = "0" -const Min = "12" -const Fix = "0" +// TODO: eliminate this after some version refactor -const Version = "0.12.0" +const Version = version.ABCIVersion diff --git a/benchmarks/simu/counter.go b/benchmarks/simu/counter.go index b7d2c4d6388..c24eddf8acc 100644 --- a/benchmarks/simu/counter.go +++ b/benchmarks/simu/counter.go @@ -6,8 +6,8 @@ import ( "fmt" "time" - rpcclient "github.com/tendermint/tendermint/rpc/lib/client" cmn "github.com/tendermint/tendermint/libs/common" + rpcclient "github.com/tendermint/tendermint/rpc/lib/client" ) func main() { diff --git a/blockchain/pool.go b/blockchain/pool.go index a881c7cb736..c7864a64630 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -365,10 +365,10 @@ func (pool *BlockPool) debug() string { nextHeight := pool.height + pool.requestersLen() for h := pool.height; h < nextHeight; h++ { if pool.requesters[h] == nil { - str += cmn.Fmt("H(%v):X ", h) + str += fmt.Sprintf("H(%v):X ", h) } else { - str += cmn.Fmt("H(%v):", h) - str += cmn.Fmt("B?(%v) ", pool.requesters[h].block != nil) + str += fmt.Sprintf("H(%v):", h) + str += fmt.Sprintf("B?(%v) ", pool.requesters[h].block != nil) } } return str diff --git a/blockchain/reactor.go b/blockchain/reactor.go index f00df50c328..f975737c6d8 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -201,7 +201,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) // Got a peer status. Unverified. bcR.pool.SetPeerHeight(src.ID(), msg.Height) default: - bcR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } } @@ -321,7 +321,7 @@ FOR_LOOP: state, err = bcR.blockExec.ApplyBlock(state, firstID, first) if err != nil { // TODO This is bad, are we zombie? - cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v", + cmn.PanicQ(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) } blocksSynced++ @@ -356,11 +356,11 @@ type BlockchainMessage interface{} func RegisterBlockchainMessages(cdc *amino.Codec) { cdc.RegisterInterface((*BlockchainMessage)(nil), nil) - cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/mempool/BlockRequest", nil) - cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/mempool/BlockResponse", nil) - cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/mempool/NoBlockResponse", nil) - cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/mempool/StatusResponse", nil) - cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil) + cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil) + cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/blockchain/BlockResponse", nil) + cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/blockchain/NoBlockResponse", nil) + cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/blockchain/StatusResponse", nil) + cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/blockchain/StatusRequest", nil) } func decodeMsg(bz []byte) (msg BlockchainMessage, err error) { @@ -378,7 +378,7 @@ type bcBlockRequestMessage struct { } func (m *bcBlockRequestMessage) String() string { - return cmn.Fmt("[bcBlockRequestMessage %v]", m.Height) + return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height) } type bcNoBlockResponseMessage struct { @@ -386,7 +386,7 @@ type bcNoBlockResponseMessage struct { } func (brm *bcNoBlockResponseMessage) String() string { - return cmn.Fmt("[bcNoBlockResponseMessage %d]", brm.Height) + return fmt.Sprintf("[bcNoBlockResponseMessage %d]", brm.Height) } //------------------------------------- @@ -396,7 +396,7 @@ type bcBlockResponseMessage struct { } func (m *bcBlockResponseMessage) String() string { - return cmn.Fmt("[bcBlockResponseMessage %v]", m.Block.Height) + return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height) } //------------------------------------- @@ -406,7 +406,7 @@ type bcStatusRequestMessage struct { } func (m *bcStatusRequestMessage) String() string { - return cmn.Fmt("[bcStatusRequestMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height) } //------------------------------------- @@ -416,5 +416,5 @@ type bcStatusResponseMessage struct { } func (m *bcStatusResponseMessage) String() string { - return cmn.Fmt("[bcStatusResponseMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height) } diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index 11991bda4c8..f590fd52e93 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -156,7 +156,7 @@ func makeTxs(height int64) (txs []types.Tx) { } func makeBlock(height int64, state sm.State) *types.Block { - block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit), nil) + block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit), nil, state.Validators.GetProposer().Address) return block } diff --git a/blockchain/store.go b/blockchain/store.go index f02d4facbcf..fa9ee5189f8 100644 --- a/blockchain/store.go +++ b/blockchain/store.go @@ -148,10 +148,10 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s } height := block.Height if g, w := height, bs.Height()+1; g != w { - cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g)) + cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g)) } if !blockParts.IsComplete() { - cmn.PanicSanity(cmn.Fmt("BlockStore can only save complete block part sets")) + cmn.PanicSanity(fmt.Sprintf("BlockStore can only save complete block part sets")) } // Save block meta @@ -188,7 +188,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) { if height != bs.Height()+1 { - cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) + cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) } partBytes := cdc.MustMarshalBinaryBare(part) bs.db.Set(calcBlockPartKey(height, index), partBytes) @@ -224,7 +224,7 @@ type BlockStoreStateJSON struct { func (bsj BlockStoreStateJSON) Save(db dbm.DB) { bytes, err := cdc.MarshalJSON(bsj) if err != nil { - cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err)) + cmn.PanicSanity(fmt.Sprintf("Could not marshal state bytes: %v", err)) } db.SetSync(blockStoreKey, bytes) } diff --git a/blockchain/store_test.go b/blockchain/store_test.go index c816540050f..9c8fdb23c7f 100644 --- a/blockchain/store_test.go +++ b/blockchain/store_test.go @@ -6,7 +6,6 @@ import ( "runtime/debug" "strings" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -14,6 +13,7 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) func TestLoadBlockStoreStateJSON(t *testing.T) { @@ -49,7 +49,7 @@ func TestNewBlockStore(t *testing.T) { return nil, nil }) require.NotNil(t, panicErr, "#%d panicCauser: %q expected a panic", i, tt.data) - assert.Contains(t, panicErr.Error(), tt.wantErr, "#%d data: %q", i, tt.data) + assert.Contains(t, fmt.Sprintf("%#v", panicErr), tt.wantErr, "#%d data: %q", i, tt.data) } db.Set(blockStoreKey, nil) @@ -70,7 +70,7 @@ var ( part1 = partSet.GetPart(0) part2 = partSet.GetPart(1) seenCommit1 = &types.Commit{Precommits: []*types.Vote{{Height: 10, - Timestamp: time.Now().UTC()}}} + Timestamp: tmtime.Now()}}} ) // TODO: This test should be simplified ... @@ -91,7 +91,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { block := makeBlock(bs.Height()+1, state) validPartSet := block.MakePartSet(2) seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10, - Timestamp: time.Now().UTC()}}} + Timestamp: tmtime.Now()}}} bs.SaveBlock(block, partSet, seenCommit) require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") @@ -103,7 +103,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { Height: 1, NumTxs: 100, ChainID: "block_test", - Time: time.Now(), + Time: tmtime.Now(), } header2 := header1 header2.Height = 4 @@ -111,7 +111,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { // End of setup, test data commitAtH10 := &types.Commit{Precommits: []*types.Vote{{Height: 10, - Timestamp: time.Now().UTC()}}} + Timestamp: tmtime.Now()}}} tuples := []struct { block *types.Block parts *types.PartSet @@ -238,7 +238,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { if subStr := tuple.wantPanic; subStr != "" { if panicErr == nil { t.Errorf("#%d: want a non-nil panic", i) - } else if got := panicErr.Error(); !strings.Contains(got, subStr) { + } else if got := fmt.Sprintf("%#v", panicErr); !strings.Contains(got, subStr) { t.Errorf("#%d:\n\tgotErr: %q\nwant substring: %q", i, got, subStr) } continue @@ -335,7 +335,7 @@ func TestBlockFetchAtHeight(t *testing.T) { partSet := block.MakePartSet(2) seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10, - Timestamp: time.Now().UTC()}}} + Timestamp: tmtime.Now()}}} bs.SaveBlock(block, partSet, seenCommit) require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") diff --git a/cmd/tendermint/commands/gen_node_key.go b/cmd/tendermint/commands/gen_node_key.go index 7aedcd0dcc1..38dc5c66d8a 100644 --- a/cmd/tendermint/commands/gen_node_key.go +++ b/cmd/tendermint/commands/gen_node_key.go @@ -5,8 +5,8 @@ import ( "github.com/spf13/cobra" - "github.com/tendermint/tendermint/p2p" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/p2p" ) // GenNodeKeyCmd allows the generation of a node key. It prints node's ID to diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index a44c73ebf12..dac4cd9a5f7 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -1,15 +1,16 @@ package commands import ( - "time" + "fmt" "github.com/spf13/cobra" cfg "github.com/tendermint/tendermint/config" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" ) // InitFilesCmd initialises a fresh Tendermint Core instance. @@ -52,8 +53,8 @@ func initFilesWithConfig(config *cfg.Config) error { logger.Info("Found genesis file", "path", genFile) } else { genDoc := types.GenesisDoc{ - ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)), - GenesisTime: time.Now(), + ChainID: fmt.Sprintf("test-chain-%v", cmn.RandStr(6)), + GenesisTime: tmtime.Now(), ConsensusParams: types.DefaultConsensusParams(), } genDoc.Validators = []types.GenesisValidator{{ diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go index d5759881654..edad4fbb7a4 100644 --- a/cmd/tendermint/commands/lite.go +++ b/cmd/tendermint/commands/lite.go @@ -7,7 +7,6 @@ import ( "github.com/spf13/cobra" cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/lite/proxy" rpcclient "github.com/tendermint/tendermint/rpc/client" ) @@ -66,17 +65,21 @@ func runProxy(cmd *cobra.Command, args []string) error { } // First, connect a client + logger.Info("Connecting to source HTTP client...") node := rpcclient.NewHTTP(nodeAddr, "/websocket") - cert, err := proxy.GetCertifier(chainID, home, nodeAddr) + logger.Info("Constructing Verifier...") + cert, err := proxy.NewVerifier(chainID, home, node, logger) if err != nil { - return err + return cmn.ErrorWrap(err, "constructing Verifier") } + cert.SetLogger(logger) sc := proxy.SecureClient(node, cert) + logger.Info("Starting proxy...") err = proxy.StartProxy(sc, listenAddr, logger) if err != nil { - return err + return cmn.ErrorWrap(err, "starting proxy") } cmn.TrapSignal(func() { diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index ef0ba301912..53d3471294b 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -5,8 +5,8 @@ import ( "github.com/spf13/cobra" - "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/privval" ) // ResetAllCmd removes the database of this Tendermint core diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index f7639fb275d..d29c29eb1cb 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -6,15 +6,15 @@ import ( "os" "path/filepath" "strings" - "time" "github.com/spf13/cobra" cfg "github.com/tendermint/tendermint/config" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" ) var ( @@ -76,7 +76,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error { genVals := make([]types.GenesisValidator, nValidators) for i := 0; i < nValidators; i++ { - nodeDirName := cmn.Fmt("%s%d", nodeDirPrefix, i) + nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i) nodeDir := filepath.Join(outputDir, nodeDirName) config.SetRoot(nodeDir) @@ -98,7 +98,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error { } for i := 0; i < nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i+nValidators)) + nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i+nValidators)) config.SetRoot(nodeDir) err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm) @@ -112,14 +112,14 @@ func testnetFiles(cmd *cobra.Command, args []string) error { // Generate genesis doc from generated validators genDoc := &types.GenesisDoc{ - GenesisTime: time.Now(), + GenesisTime: tmtime.Now(), ChainID: "chain-" + cmn.RandStr(6), Validators: genVals, } // Write genesis file. for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i)) + nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil { _ = os.RemoveAll(outputDir) return err @@ -159,7 +159,7 @@ func hostnameOrIP(i int) string { func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error { persistentPeers := make([]string, nValidators+nNonValidators) for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i)) + nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) config.SetRoot(nodeDir) nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) if err != nil { @@ -170,7 +170,7 @@ func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error { persistentPeersList := strings.Join(persistentPeers, ",") for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i)) + nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) config.SetRoot(nodeDir) config.P2P.PersistentPeers = persistentPeersList config.P2P.AddrBookStrict = false diff --git a/config/config.go b/config/config.go index fb8e79086d0..c0882546f1a 100644 --- a/config/config.go +++ b/config/config.go @@ -94,7 +94,6 @@ func (cfg *Config) SetRoot(root string) *Config { // BaseConfig defines the base configuration for a Tendermint node type BaseConfig struct { - // chainID is unexposed and immutable but here for convenience chainID string @@ -102,49 +101,49 @@ type BaseConfig struct { // This should be set in viper so it can unmarshal into this struct RootDir string `mapstructure:"home"` + // TCP or UNIX socket address of the ABCI application, + // or the name of an ABCI application compiled in with the Tendermint binary + ProxyApp string `mapstructure:"proxy_app"` + + // A custom human readable name for this node + Moniker string `mapstructure:"moniker"` + + // If this node is many blocks behind the tip of the chain, FastSync + // allows them to catchup quickly by downloading blocks in parallel + // and verifying their commits + FastSync bool `mapstructure:"fast_sync"` + + // Database backend: leveldb | memdb + DBBackend string `mapstructure:"db_backend"` + + // Database directory + DBPath string `mapstructure:"db_dir"` + + // Output level for logging + LogLevel string `mapstructure:"log_level"` + // Path to the JSON file containing the initial validator set and other meta data Genesis string `mapstructure:"genesis_file"` // Path to the JSON file containing the private key to use as a validator in the consensus protocol PrivValidator string `mapstructure:"priv_validator_file"` - // A JSON file containing the private key to use for p2p authenticated encryption - NodeKey string `mapstructure:"node_key_file"` - - // A custom human readable name for this node - Moniker string `mapstructure:"moniker"` - // TCP or UNIX socket address for Tendermint to listen on for // connections from an external PrivValidator process PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"` - // TCP or UNIX socket address of the ABCI application, - // or the name of an ABCI application compiled in with the Tendermint binary - ProxyApp string `mapstructure:"proxy_app"` + // A JSON file containing the private key to use for p2p authenticated encryption + NodeKey string `mapstructure:"node_key_file"` // Mechanism to connect to the ABCI application: socket | grpc ABCI string `mapstructure:"abci"` - // Output level for logging - LogLevel string `mapstructure:"log_level"` - // TCP or UNIX socket address for the profiling server to listen on ProfListenAddress string `mapstructure:"prof_laddr"` - // If this node is many blocks behind the tip of the chain, FastSync - // allows them to catchup quickly by downloading blocks in parallel - // and verifying their commits - FastSync bool `mapstructure:"fast_sync"` - // If true, query the ABCI app on connecting to a new peer // so the app can decide if we should keep the connection or not FilterPeers bool `mapstructure:"filter_peers"` // false - - // Database backend: leveldb | memdb - DBBackend string `mapstructure:"db_backend"` - - // Database directory - DBPath string `mapstructure:"db_dir"` } // DefaultBaseConfig returns a default base configuration for a Tendermint node @@ -239,6 +238,8 @@ type RPCConfig struct { // If you want to accept more significant number than the default, make sure // you increase your OS limits. // 0 - unlimited. + // Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} + // 1024 - 40 - 10 - 50 = 924 = ~900 MaxOpenConnections int `mapstructure:"max_open_connections"` } @@ -248,11 +249,9 @@ func DefaultRPCConfig() *RPCConfig { ListenAddress: "tcp://0.0.0.0:26657", GRPCListenAddress: "", - GRPCMaxOpenConnections: 900, // no ipv4 + GRPCMaxOpenConnections: 900, - Unsafe: false, - // should be < {ulimit -Sn} - {MaxNumPeers} - {N of wal, db and other open files} - // 1024 - 50 - 50 = 924 = ~900 + Unsafe: false, MaxOpenConnections: 900, } } @@ -293,10 +292,14 @@ type P2PConfig struct { AddrBook string `mapstructure:"addr_book_file"` // Set true for strict address routability rules + // Set false for private or local networks AddrBookStrict bool `mapstructure:"addr_book_strict"` - // Maximum number of peers to connect to - MaxNumPeers int `mapstructure:"max_num_peers"` + // Maximum number of inbound peers + MaxNumInboundPeers int `mapstructure:"max_num_inbound_peers"` + + // Maximum number of outbound peers to connect to, excluding persistent peers + MaxNumOutboundPeers int `mapstructure:"max_num_outbound_peers"` // Time to wait before flushing messages out on the connection, in ms FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"` @@ -346,7 +349,8 @@ func DefaultP2PConfig() *P2PConfig { UPNP: false, AddrBook: defaultAddrBookPath, AddrBookStrict: true, - MaxNumPeers: 50, + MaxNumInboundPeers: 40, + MaxNumOutboundPeers: 10, FlushThrottleTimeout: 100, MaxPacketMsgPayloadSize: 1024, // 1 kB SendRate: 5120000, // 5 mB/s @@ -417,8 +421,10 @@ func DefaultMempoolConfig() *MempoolConfig { RecheckEmpty: true, Broadcast: true, WalPath: filepath.Join(defaultDataDir, "mempool.wal"), - Size: 100000, - CacheSize: 100000, + // Each signature verification takes .5ms, size reduced until we implement + // ABCI Recheck + Size: 5000, + CacheSize: 10000, } } @@ -463,6 +469,9 @@ type ConsensusConfig struct { // Reactor sleep duration parameters are in milliseconds PeerGossipSleepDuration int `mapstructure:"peer_gossip_sleep_duration"` PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"` + + // Block time parameters in milliseconds. Corresponds to the minimum time increment between consecutive blocks. + BlockTimeIota int `mapstructure:"blocktime_iota"` } // DefaultConsensusConfig returns a default configuration for the consensus service @@ -481,6 +490,7 @@ func DefaultConsensusConfig() *ConsensusConfig { CreateEmptyBlocksInterval: 0, PeerGossipSleepDuration: 100, PeerQueryMaj23SleepDuration: 2000, + BlockTimeIota: 1000, } } @@ -497,9 +507,17 @@ func TestConsensusConfig() *ConsensusConfig { cfg.SkipTimeoutCommit = true cfg.PeerGossipSleepDuration = 5 cfg.PeerQueryMaj23SleepDuration = 250 + cfg.BlockTimeIota = 10 return cfg } +// MinValidVoteTime returns the minimum acceptable block time. +// See the [BFT time spec](https://godoc.org/github.com/tendermint/tendermint/docs/spec/consensus/bft-time.md). +func (cfg *ConsensusConfig) MinValidVoteTime(lastBlockTime time.Time) time.Time { + return lastBlockTime. + Add(time.Duration(cfg.BlockTimeIota) * time.Millisecond) +} + // WaitForTxs returns true if the consensus should wait for transactions before entering the propose step func (cfg *ConsensusConfig) WaitForTxs() bool { return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0 @@ -556,8 +574,8 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) { //----------------------------------------------------------------------------- // TxIndexConfig -// TxIndexConfig defines the configuration for the transaction -// indexer, including tags to index. +// TxIndexConfig defines the configuration for the transaction indexer, +// including tags to index. type TxIndexConfig struct { // What indexer to use for transactions // @@ -566,16 +584,21 @@ type TxIndexConfig struct { // 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). Indexer string `mapstructure:"indexer"` - // Comma-separated list of tags to index (by default the only tag is tx hash) + // Comma-separated list of tags to index (by default the only tag is "tx.hash") // + // You can also index transactions by height by adding "tx.height" tag here. + // // It's recommended to index only a subset of tags due to possible memory // bloat. This is, of course, depends on the indexer's DB and the volume of // transactions. IndexTags string `mapstructure:"index_tags"` - // When set to true, tells indexer to index all tags. Note this may be not - // desirable (see the comment above). IndexTags has a precedence over - // IndexAllTags (i.e. when given both, IndexTags will be indexed). + // When set to true, tells indexer to index all tags (predefined tags: + // "tx.hash", "tx.height" and all tags from DeliverTx responses). + // + // Note this may be not desirable (see the comment above). IndexTags has a + // precedence over IndexAllTags (i.e. when given both, IndexTags will be + // indexed). IndexAllTags bool `mapstructure:"index_all_tags"` } diff --git a/config/toml.go b/config/toml.go index 60ce15de84e..2a35d7c36de 100644 --- a/config/toml.go +++ b/config/toml.go @@ -81,7 +81,7 @@ fast_sync = {{ .BaseConfig.FastSync }} db_backend = "{{ .BaseConfig.DBBackend }}" # Database directory -db_path = "{{ js .BaseConfig.DBPath }}" +db_dir = "{{ js .BaseConfig.DBPath }}" # Output level for logging, including package level options log_level = "{{ .BaseConfig.LogLevel }}" @@ -94,6 +94,10 @@ genesis_file = "{{ js .BaseConfig.Genesis }}" # Path to the JSON file containing the private key to use as a validator in the consensus protocol priv_validator_file = "{{ js .BaseConfig.PrivValidator }}" +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +priv_validator_laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}" + # Path to the JSON file containing the private key to use for node authentication in the p2p protocol node_key_file = "{{ js .BaseConfig.NodeKey}}" @@ -124,6 +128,8 @@ grpc_laddr = "{{ .RPC.GRPCListenAddress }}" # If you want to accept more significant number than the default, make sure # you increase your OS limits. # 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 grpc_max_open_connections = {{ .RPC.GRPCMaxOpenConnections }} # Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool @@ -134,6 +140,8 @@ unsafe = {{ .RPC.Unsafe }} # If you want to accept more significant number than the default, make sure # you increase your OS limits. # 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 max_open_connections = {{ .RPC.MaxOpenConnections }} ##### peer to peer configuration options ##### @@ -161,13 +169,17 @@ upnp = {{ .P2P.UPNP }} addr_book_file = "{{ js .P2P.AddrBook }}" # Set true for strict address routability rules +# Set false for private or local networks addr_book_strict = {{ .P2P.AddrBookStrict }} # Time to wait before flushing messages out on the connection, in ms flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }} -# Maximum number of peers to connect to -max_num_peers = {{ .P2P.MaxNumPeers }} +# Maximum number of inbound peers +max_num_inbound_peers = {{ .P2P.MaxNumInboundPeers }} + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = {{ .P2P.MaxNumOutboundPeers }} # Maximum size of a message packet payload, in bytes max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }} @@ -239,16 +251,21 @@ peer_query_maj23_sleep_duration = {{ .Consensus.PeerQueryMaj23SleepDuration }} # 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). indexer = "{{ .TxIndex.Indexer }}" -# Comma-separated list of tags to index (by default the only tag is tx hash) +# Comma-separated list of tags to index (by default the only tag is "tx.hash") +# +# You can also index transactions by height by adding "tx.height" tag here. # # It's recommended to index only a subset of tags due to possible memory # bloat. This is, of course, depends on the indexer's DB and the volume of # transactions. index_tags = "{{ .TxIndex.IndexTags }}" -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). +# When set to true, tells indexer to index all tags (predefined tags: +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# +# Note this may be not desirable (see the comment above). IndexTags has a +# precedence over IndexAllTags (i.e. when given both, IndexTags will be +# indexed). index_all_tags = {{ .TxIndex.IndexAllTags }} ##### instrumentation configuration options ##### diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 5360a92c945..0aba77432e7 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -2,14 +2,15 @@ package consensus import ( "context" + "fmt" "sync" "testing" "time" "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) func init() { @@ -156,8 +157,8 @@ func TestByzantine(t *testing.T) { case <-done: case <-tick.C: for i, reactor := range reactors { - t.Log(cmn.Fmt("Consensus Reactor %v", i)) - t.Log(cmn.Fmt("%v", reactor)) + t.Log(fmt.Sprintf("Consensus Reactor %v", i)) + t.Log(fmt.Sprintf("%v", reactor)) } t.Fatalf("Timed out waiting for all validators to commit first block") } diff --git a/consensus/common_test.go b/consensus/common_test.go index f4855992d80..d7e66148103 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -25,6 +25,7 @@ import ( "github.com/tendermint/tendermint/privval" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" @@ -75,7 +76,7 @@ func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartS ValidatorAddress: vs.PrivValidator.GetAddress(), Height: vs.Height, Round: vs.Round, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), Type: voteType, BlockID: types.BlockID{hash, header}, } @@ -348,13 +349,13 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou for i := 0; i < nValidators; i++ { stateDB := dbm.NewMemDB() // each state needs its own db state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) - thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) + thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) for _, opt := range configOpts { opt(thisConfig) } ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal app := appFunc() - vals := types.TM2PB.Validators(state.Validators) + vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app) @@ -372,7 +373,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF for i := 0; i < nPeers; i++ { stateDB := dbm.NewMemDB() // each state needs its own db state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) - thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) + thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal var privVal types.PrivValidator if i < nValidators { @@ -386,7 +387,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF } app := appFunc() - vals := types.TM2PB.Validators(state.Validators) + vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app) @@ -423,7 +424,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G sort.Sort(types.PrivValidatorsByAddress(privValidators)) return &types.GenesisDoc{ - GenesisTime: time.Now(), + GenesisTime: tmtime.Now(), ChainID: config.ChainID(), Validators: validators, }, privValidators diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index a811de731b0..16a167fd698 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -10,7 +10,6 @@ import ( "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/types" ) @@ -89,7 +88,7 @@ func deliverTxsRange(cs *ConsensusState, start, end int) { binary.BigEndian.PutUint64(txBytes, uint64(i)) err := cs.mempool.CheckTx(txBytes, nil) if err != nil { - panic(cmn.Fmt("Error after CheckTx: %v", err)) + panic(fmt.Sprintf("Error after CheckTx: %v", err)) } } } @@ -100,7 +99,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { height, round := cs.Height, cs.Round newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - NTxs := 10000 + NTxs := 3000 go deliverTxsRange(cs, 0, NTxs) startTestRound(cs, height, round) @@ -126,7 +125,7 @@ func TestMempoolRmBadTx(t *testing.T) { binary.BigEndian.PutUint64(txBytes, uint64(0)) resDeliver := app.DeliverTx(txBytes) - assert.False(t, resDeliver.IsErr(), cmn.Fmt("expected no error. got %v", resDeliver)) + assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver)) resCommit := app.Commit() assert.True(t, len(resCommit.Data) > 0) @@ -149,7 +148,7 @@ func TestMempoolRmBadTx(t *testing.T) { // check for the tx for { - txs := cs.mempool.Reap(1) + txs := cs.mempool.ReapMaxBytes(len(txBytes)) if len(txs) == 0 { emptyMempoolCh <- struct{}{} return @@ -190,7 +189,7 @@ func NewCounterApplication() *CounterApplication { } func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo { - return abci.ResponseInfo{Data: cmn.Fmt("txs:%v", app.txCount)} + return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)} } func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx { diff --git a/consensus/reactor.go b/consensus/reactor.go index 58ff42ae2b7..6ba8172641c 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -17,6 +17,7 @@ import ( "github.com/tendermint/tendermint/p2p" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) const ( @@ -241,7 +242,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) "height", hb.Height, "round", hb.Round, "sequence", hb.Sequence, "valIdx", hb.ValidatorIndex, "valAddr", hb.ValidatorAddress) default: - conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } case DataChannel: @@ -262,7 +263,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) } conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} default: - conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } case VoteChannel: @@ -287,7 +288,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) default: // don't punish (leave room for soft upgrades) - conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } case VoteSetBitsChannel: @@ -319,11 +320,11 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) } default: // don't punish (leave room for soft upgrades) - conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) + conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } default: - conR.Logger.Error(cmn.Fmt("Unknown chId %X", chID)) + conR.Logger.Error(fmt.Sprintf("Unknown chId %X", chID)) } if err != nil { @@ -482,7 +483,7 @@ OUTER_LOOP: if prs.ProposalBlockParts == nil { blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) if blockMeta == nil { - cmn.PanicCrisis(cmn.Fmt("Failed to load block %d when blockStore is at %d", + cmn.PanicCrisis(fmt.Sprintf("Failed to load block %d when blockStore is at %d", prs.Height, conR.conS.blockStore.Height())) } ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader) @@ -1034,7 +1035,7 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida NOTE: This is wrong, 'round' could change. e.g. if orig round is not the same as block LastCommit round. if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round { - cmn.PanicSanity(cmn.Fmt("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round)) + cmn.PanicSanity(fmt.Sprintf("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round)) } */ if ps.PRS.CatchupCommitRound == round { @@ -1138,7 +1139,7 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) { } func (ps *PeerState) setHasVote(height int64, round int, type_ byte, index int) { - logger := ps.logger.With("peerH/R", cmn.Fmt("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", cmn.Fmt("%d/%d", height, round)) + logger := ps.logger.With("peerH/R", fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", fmt.Sprintf("%d/%d", height, round)) logger.Debug("setHasVote", "type", type_, "index", index) // NOTE: some may be nil BitArrays -> no side effects. @@ -1165,7 +1166,7 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) { psCatchupCommitRound := ps.PRS.CatchupCommitRound psCatchupCommit := ps.PRS.CatchupCommit - startTime := time.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second) + startTime := tmtime.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second) ps.PRS.Height = msg.Height ps.PRS.Round = msg.Round ps.PRS.Step = msg.Step diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 63dd9075fdb..98b058b8d61 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -20,6 +20,7 @@ import ( "github.com/tendermint/tendermint/libs/log" mempl "github.com/tendermint/tendermint/mempool" sm "github.com/tendermint/tendermint/state" + tmtime "github.com/tendermint/tendermint/types/time" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" @@ -115,10 +116,10 @@ func TestReactorWithEvidence(t *testing.T) { for i := 0; i < nValidators; i++ { stateDB := dbm.NewMemDB() // each state needs its own db state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) - thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) + thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal app := appFunc() - vals := types.TM2PB.Validators(state.Validators) + vals := types.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) pv := privVals[i] @@ -194,7 +195,8 @@ func newMockEvidencePool(val []byte) *mockEvidencePool { } } -func (m *mockEvidencePool) PendingEvidence() []types.Evidence { +// NOTE: maxBytes is ignored +func (m *mockEvidencePool) PendingEvidence(maxBytes int) []types.Evidence { if m.height > 0 { return m.ev } @@ -207,7 +209,7 @@ func (m *mockEvidencePool) Update(block *types.Block, state sm.State) { panic("block has no evidence") } } - m.height += 1 + m.height++ } //------------------------------------ @@ -295,14 +297,14 @@ func TestReactorRecordsBlockParts(t *testing.T) { require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same") } -// Test we record votes from other peers +// Test we record votes from other peers. func TestReactorRecordsVotes(t *testing.T) { - // create dummy peer + // Create dummy peer. peer := p2pdummy.NewPeer() ps := NewPeerState(peer).SetLogger(log.TestingLogger()) peer.Set(types.PeerStateKey, ps) - // create reactor + // Create reactor. css := randConsensusNet(1, "consensus_reactor_records_votes_test", newMockTickerFunc(true), newPersistentKVStore) reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states reactor.SetEventBus(css[0].eventBus) @@ -320,7 +322,7 @@ func TestReactorRecordsVotes(t *testing.T) { ValidatorAddress: val.Address, Height: 2, Round: 0, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), Type: types.VoteTypePrevote, BlockID: types.BlockID{}, } @@ -540,7 +542,7 @@ func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{} err := validateBlock(newBlock, activeVals) assert.Nil(t, err) for _, tx := range txs { - css[j].mempool.CheckTx(tx, nil) + err := css[j].mempool.CheckTx(tx, nil) assert.Nil(t, err) } }, css) diff --git a/consensus/replay.go b/consensus/replay.go index bb1f2e46db9..c92654f2ce9 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -264,15 +264,15 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight stateBlockHeight := state.LastBlockHeight h.logger.Info("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight) - // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain + // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain. if appBlockHeight == 0 { - validators := types.TM2PB.Validators(state.Validators) + nextVals := types.TM2PB.ValidatorUpdates(state.NextValidators) // state.Validators would work too. csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams) req := abci.RequestInitChain{ Time: h.genDoc.GenesisTime, ChainId: h.genDoc.ChainID, ConsensusParams: csParams, - Validators: validators, + Validators: nextVals, AppStateBytes: h.genDoc.AppState, } res, err := proxyApp.Consensus().InitChainSync(req) @@ -280,11 +280,9 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight return nil, err } - // if the app returned validators - // or consensus params, update the state - // with the them + // If the app returned validators or consensus params, update the state. if len(res.Validators) > 0 { - vals, err := types.PB2TM.Validators(res.Validators) + vals, err := types.PB2TM.ValidatorUpdates(res.Validators) if err != nil { return nil, err } @@ -296,7 +294,7 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight sm.SaveState(h.stateDB, state) } - // First handle edge cases and constraints on the storeBlockHeight + // First handle edge cases and constraints on the storeBlockHeight. if storeBlockHeight == 0 { return appHash, checkAppHash(state, appHash) @@ -306,11 +304,11 @@ func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight } else if storeBlockHeight < stateBlockHeight { // the state should never be ahead of the store (this is under tendermint's control) - cmn.PanicSanity(cmn.Fmt("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight)) + cmn.PanicSanity(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight)) } else if storeBlockHeight > stateBlockHeight+1 { // store should be at most one ahead of the state (this is under tendermint's control) - cmn.PanicSanity(cmn.Fmt("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1)) + cmn.PanicSanity(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1)) } var err error diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 0c0b0dcb169..e4b9f01960a 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -13,12 +13,12 @@ import ( bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" ) const ( @@ -34,7 +34,7 @@ func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console consensusState := newConsensusStateForReplay(config, csConfig) if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil { - cmn.Exit(cmn.Fmt("Error during consensus replay: %v", err)) + cmn.Exit(fmt.Sprintf("Error during consensus replay: %v", err)) } } @@ -302,12 +302,12 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo NewHandshaker(stateDB, state, blockStore, gdoc)) err = proxyApp.Start() if err != nil { - cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err)) + cmn.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err)) } eventBus := types.NewEventBus() if err := eventBus.Start(); err != nil { - cmn.Exit(cmn.Fmt("Failed to start event bus: %v", err)) + cmn.Exit(fmt.Sprintf("Failed to start event bus: %v", err)) } mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{} diff --git a/consensus/replay_test.go b/consensus/replay_test.go index fa0ec040897..8ea71d353e5 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -3,7 +3,6 @@ package consensus import ( "bytes" "context" - "errors" "fmt" "io" "io/ioutil" @@ -20,15 +19,14 @@ import ( abci "github.com/tendermint/tendermint/abci/types" crypto "github.com/tendermint/tendermint/crypto" auto "github.com/tendermint/tendermint/libs/autofile" - cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/libs/log" ) var consensusReplayConfig *cfg.Config @@ -418,7 +416,7 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, } defer proxyApp.Stop() - validators := types.TM2PB.Validators(state.Validators) + validators := types.TM2PB.ValidatorUpdates(state.Validators) if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ Validators: validators, }); err != nil { @@ -455,7 +453,7 @@ func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, c } defer proxyApp.Stop() - validators := types.TM2PB.Validators(state.Validators) + validators := types.TM2PB.ValidatorUpdates(state.Validators) if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ Validators: validators, }); err != nil { @@ -494,7 +492,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { return nil, nil, err } if !found { - return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1)) + return nil, nil, fmt.Errorf("WAL does not contain height %d.", 1) } defer gr.Close() // nolint: errcheck @@ -531,11 +529,11 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { panic(err) } if block.Height != height+1 { - panic(cmn.Fmt("read bad block from wal. got height %d, expected %d", block.Height, height+1)) + panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) } commitHeight := thisBlockCommit.Precommits[0].Height if commitHeight != height+1 { - panic(cmn.Fmt("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) + panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) } blocks = append(blocks, block) commits = append(commits, thisBlockCommit) @@ -564,11 +562,11 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { panic(err) } if block.Height != height+1 { - panic(cmn.Fmt("read bad block from wal. got height %d, expected %d", block.Height, height+1)) + panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) } commitHeight := thisBlockCommit.Precommits[0].Height if commitHeight != height+1 { - panic(cmn.Fmt("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) + panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) } blocks = append(blocks, block) commits = append(commits, thisBlockCommit) @@ -641,7 +639,7 @@ func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { func TestInitChainUpdateValidators(t *testing.T) { val, _ := types.RandValidator(true, 10) vals := types.NewValidatorSet([]*types.Validator{val}) - app := &initChainApp{vals: types.TM2PB.Validators(vals)} + app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} clientCreator := proxy.NewLocalClientCreator(app) config := ResetConfig("proxy_test_") @@ -668,7 +666,7 @@ func TestInitChainUpdateValidators(t *testing.T) { assert.Equal(t, newValAddr, expectValAddr) } -func newInitChainApp(vals []abci.Validator) *initChainApp { +func newInitChainApp(vals []abci.ValidatorUpdate) *initChainApp { return &initChainApp{ vals: vals, } @@ -677,7 +675,7 @@ func newInitChainApp(vals []abci.Validator) *initChainApp { // returns the vals on InitChain type initChainApp struct { abci.BaseApplication - vals []abci.Validator + vals []abci.ValidatorUpdate } func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain { diff --git a/consensus/state.go b/consensus/state.go index 6ffe6ef6431..d77afafe983 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -12,6 +12,7 @@ import ( fail "github.com/ebuchman/fail-test" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" + tmtime "github.com/tendermint/tendermint/types/time" cfg "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/consensus/types" @@ -74,7 +75,6 @@ type ConsensusState struct { privValidator types.PrivValidator // for signing votes // services for creating and executing blocks - // TODO: encapsulate all of this in one "BlockManager" blockExec *sm.BlockExecutor blockStore sm.BlockStore mempool sm.Mempool @@ -154,6 +154,7 @@ func NewConsensusState( cs.setProposal = cs.defaultSetProposal cs.updateToState(state) + // Don't call scheduleRound0 yet. // We do that upon Start(). cs.reconstructLastCommit(state) @@ -187,7 +188,7 @@ func WithMetrics(metrics *Metrics) CSOption { // String returns a string. func (cs *ConsensusState) String() string { // better not to access shared variables - return cmn.Fmt("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step) + return fmt.Sprintf("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step) } // GetState returns a copy of the chain state. @@ -197,6 +198,15 @@ func (cs *ConsensusState) GetState() sm.State { return cs.state.Copy() } +// GetLastHeight returns the last height committed. +// If there were no blocks, returns 0. +func (cs *ConsensusState) GetLastHeight() int64 { + cs.mtx.Lock() + defer cs.mtx.Unlock() + + return cs.RoundState.Height - 1 +} + // GetRoundState returns a shallow copy of the internal consensus state. func (cs *ConsensusState) GetRoundState() *cstypes.RoundState { cs.mtx.RLock() @@ -413,8 +423,8 @@ func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType) // enterNewRound(height, 0) at cs.StartTime. func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) { - //cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime) - sleepDuration := rs.StartTime.Sub(time.Now()) // nolint: gotype, gosimple + //cs.Logger.Info("scheduleRound0", "now", tmtime.Now(), "startTime", cs.StartTime) + sleepDuration := rs.StartTime.Sub(tmtime.Now()) // nolint: gotype, gosimple cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight) } @@ -451,7 +461,7 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) { } added, err := lastPrecommits.AddVote(precommit) if !added || err != nil { - cmn.PanicCrisis(cmn.Fmt("Failed to reconstruct LastCommit: %v", err)) + cmn.PanicCrisis(fmt.Sprintf("Failed to reconstruct LastCommit: %v", err)) } } if !lastPrecommits.HasTwoThirdsMajority() { @@ -464,13 +474,13 @@ func (cs *ConsensusState) reconstructLastCommit(state sm.State) { // The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight. func (cs *ConsensusState) updateToState(state sm.State) { if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight { - cmn.PanicSanity(cmn.Fmt("updateToState() expected state height of %v but found %v", + cmn.PanicSanity(fmt.Sprintf("updateToState() expected state height of %v but found %v", cs.Height, state.LastBlockHeight)) } if !cs.state.IsEmpty() && cs.state.LastBlockHeight+1 != cs.Height { // This might happen when someone else is mutating cs.state. // Someone forgot to pass in state.Copy() somewhere?! - cmn.PanicSanity(cmn.Fmt("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", + cmn.PanicSanity(fmt.Sprintf("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", cs.state.LastBlockHeight+1, cs.Height)) } @@ -507,7 +517,7 @@ func (cs *ConsensusState) updateToState(state sm.State) { // to be gathered for the first block. // And alternative solution that relies on clocks: // cs.StartTime = state.LastBlockTime.Add(timeoutCommit) - cs.StartTime = cs.config.Commit(time.Now()) + cs.StartTime = cs.config.Commit(tmtime.Now()) } else { cs.StartTime = cs.config.Commit(cs.CommitTime) } @@ -690,7 +700,7 @@ func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()) cs.enterNewRound(ti.Height, ti.Round+1) default: - panic(cmn.Fmt("Invalid timeout step: %v", ti.Step)) + panic(fmt.Sprintf("Invalid timeout step: %v", ti.Step)) } } @@ -716,15 +726,15 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { - logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Debug(fmt.Sprintf("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } - if now := time.Now(); cs.StartTime.After(now) { + if now := tmtime.Now(); cs.StartTime.After(now) { logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now) } - logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(fmt.Sprintf("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) // Increment validators if necessary validators := cs.Validators @@ -811,10 +821,10 @@ func (cs *ConsensusState) enterPropose(height int64, round int) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) { - logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Debug(fmt.Sprintf("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } - logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(fmt.Sprintf("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPropose: @@ -894,7 +904,7 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) { cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""}) } cs.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal) - cs.Logger.Debug(cmn.Fmt("Signed proposal block: %v", block)) + cs.Logger.Debug(fmt.Sprintf("Signed proposal block: %v", block)) } else { if !cs.replayMode { cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) @@ -938,13 +948,25 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts return } + maxBytes := cs.state.ConsensusParams.BlockSize.MaxBytes + // bound evidence to 1/10th of the block + evidence := cs.evpool.PendingEvidence(maxBytes / 10) // Mempool validated transactions - txs := cs.mempool.Reap(cs.state.ConsensusParams.BlockSize.MaxTxs) - evidence := cs.evpool.PendingEvidence() - block, parts := cs.state.MakeBlock(cs.Height, txs, commit, evidence) + txs := cs.mempool.ReapMaxBytes(maxDataBytes(maxBytes, cs.state.Validators.Size(), len(evidence))) + proposerAddr := cs.privValidator.GetAddress() + block, parts := cs.state.MakeBlock(cs.Height, txs, commit, evidence, proposerAddr) + return block, parts } +func maxDataBytes(maxBytes, valsCount, evidenceCount int) int { + return maxBytes - + types.MaxAminoOverheadForBlock - + types.MaxHeaderBytes - + (valsCount * types.MaxVoteBytes) - + (evidenceCount * types.MaxEvidenceBytes) +} + // Enter: `timeoutPropose` after entering Propose. // Enter: proposal block and POL is ready. // Enter: any +2/3 prevotes for future round. @@ -952,7 +974,7 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts // Otherwise vote nil. func (cs *ConsensusState) enterPrevote(height int64, round int) { if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) { - cs.Logger.Debug(cmn.Fmt("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + cs.Logger.Debug(fmt.Sprintf("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } @@ -970,7 +992,7 @@ func (cs *ConsensusState) enterPrevote(height int64, round int) { // TODO: catchup event? } - cs.Logger.Info(cmn.Fmt("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + cs.Logger.Info(fmt.Sprintf("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) // Sign and broadcast vote as necessary cs.doPrevote(height, round) @@ -1016,13 +1038,13 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { - logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Debug(fmt.Sprintf("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { - cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) + cmn.PanicSanity(fmt.Sprintf("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) } - logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(fmt.Sprintf("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPrevoteWait: @@ -1044,11 +1066,11 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { - logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Debug(fmt.Sprintf("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } - logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(fmt.Sprintf("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPrecommit: @@ -1076,7 +1098,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { // the latest POLRound should be this round. polRound, _ := cs.Votes.POLInfo() if polRound < round { - cmn.PanicSanity(cmn.Fmt("This POLRound should be %v but got %", round, polRound)) + cmn.PanicSanity(fmt.Sprintf("This POLRound should be %v but got %v", round, polRound)) } // +2/3 prevoted nil. Unlock and precommit nil. @@ -1110,7 +1132,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash) // Validate the block. if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { - cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) + cmn.PanicConsensus(fmt.Sprintf("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) } cs.LockedRound = round cs.LockedBlock = cs.ProposalBlock @@ -1140,13 +1162,13 @@ func (cs *ConsensusState) enterPrecommitWait(height int64, round int) { logger := cs.Logger.With("height", height, "round", round) if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) { - logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Debug(fmt.Sprintf("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) return } if !cs.Votes.Precommits(round).HasTwoThirdsAny() { - cmn.PanicSanity(cmn.Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round)) + cmn.PanicSanity(fmt.Sprintf("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round)) } - logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + logger.Info(fmt.Sprintf("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterPrecommitWait: @@ -1164,17 +1186,17 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) { logger := cs.Logger.With("height", height, "commitRound", commitRound) if cs.Height != height || cstypes.RoundStepCommit <= cs.Step { - logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) + logger.Debug(fmt.Sprintf("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) return } - logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) + logger.Info(fmt.Sprintf("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) defer func() { // Done enterCommit: // keep cs.Round the same, commitRound points to the right Precommits set. cs.updateRoundStep(cs.Round, cstypes.RoundStepCommit) cs.CommitRound = commitRound - cs.CommitTime = time.Now() + cs.CommitTime = tmtime.Now() cs.newStep() // Maybe finalize immediately. @@ -1214,7 +1236,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) { logger := cs.Logger.With("height", height) if cs.Height != height { - cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) + cmn.PanicSanity(fmt.Sprintf("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) } blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() @@ -1236,7 +1258,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) { // Increment height and goto cstypes.RoundStepNewHeight func (cs *ConsensusState) finalizeCommit(height int64) { if cs.Height != height || cs.Step != cstypes.RoundStepCommit { - cs.Logger.Debug(cmn.Fmt("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step)) + cs.Logger.Debug(fmt.Sprintf("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step)) return } @@ -1244,21 +1266,21 @@ func (cs *ConsensusState) finalizeCommit(height int64) { block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts if !ok { - cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, commit does not have two thirds majority")) + cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority")) } if !blockParts.HasHeader(blockID.PartsHeader) { - cmn.PanicSanity(cmn.Fmt("Expected ProposalBlockParts header to be commit header")) + cmn.PanicSanity(fmt.Sprintf("Expected ProposalBlockParts header to be commit header")) } if !block.HashesTo(blockID.Hash) { - cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, ProposalBlock does not hash to commit hash")) + cmn.PanicSanity(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash")) } if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil { - cmn.PanicConsensus(cmn.Fmt("+2/3 committed an invalid block: %v", err)) + cmn.PanicConsensus(fmt.Sprintf("+2/3 committed an invalid block: %v", err)) } - cs.Logger.Info(cmn.Fmt("Finalizing commit of block with %d txs", block.NumTxs), + cs.Logger.Info(fmt.Sprintf("Finalizing commit of block with %d txs", block.NumTxs), "height", block.Height, "hash", block.Hash(), "root", block.AppHash) - cs.Logger.Info(cmn.Fmt("%v", block)) + cs.Logger.Info(fmt.Sprintf("%v", block)) fail.Fail() // XXX @@ -1429,7 +1451,11 @@ func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p } if added && cs.ProposalBlockParts.IsComplete() { // Added and completed! - _, err = cdc.UnmarshalBinaryReader(cs.ProposalBlockParts.GetReader(), &cs.ProposalBlock, int64(cs.state.ConsensusParams.BlockSize.MaxBytes)) + _, err = cdc.UnmarshalBinaryReader( + cs.ProposalBlockParts.GetReader(), + &cs.ProposalBlock, + int64(cs.state.ConsensusParams.BlockSize.MaxBytes), + ) if err != nil { return true, err } @@ -1510,7 +1536,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, return added, err } - cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort())) + cs.Logger.Info(fmt.Sprintf("Added to lastPrecommits: %v", cs.LastCommit.StringShort())) cs.eventBus.PublishEventVote(types.EventDataVote{vote}) cs.evsw.FireEvent(types.EventVote, vote) @@ -1626,7 +1652,7 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, cs.enterPrecommitWait(height, vote.Round) } default: - panic(cmn.Fmt("Unexpected vote type %X", vote.Type)) // go-wire should prevent this. + panic(fmt.Sprintf("Unexpected vote type %X", vote.Type)) // go-wire should prevent this. } return @@ -1635,12 +1661,13 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) { addr := cs.privValidator.GetAddress() valIndex, _ := cs.Validators.GetByAddress(addr) + vote := &types.Vote{ ValidatorAddress: addr, ValidatorIndex: valIndex, Height: cs.Height, Round: cs.Round, - Timestamp: time.Now().UTC(), + Timestamp: cs.voteTime(), Type: type_, BlockID: types.BlockID{hash, header}, } @@ -1648,6 +1675,23 @@ func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSet return vote, err } +func (cs *ConsensusState) voteTime() time.Time { + now := tmtime.Now() + minVoteTime := now + // TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil, + // even if cs.LockedBlock != nil. See https://github.com/tendermint/spec. + if cs.LockedBlock != nil { + minVoteTime = cs.config.MinValidVoteTime(cs.LockedBlock.Time) + } else if cs.ProposalBlock != nil { + minVoteTime = cs.config.MinValidVoteTime(cs.ProposalBlock.Time) + } + + if now.After(minVoteTime) { + return now + } + return minVoteTime +} + // sign the vote and publish on internalMsgQueue func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.PartSetHeader) *types.Vote { // if we don't have a key or we're not in the validator set, do nothing diff --git a/consensus/state_test.go b/consensus/state_test.go index 6a14e17b56e..14cd0593e46 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -8,10 +8,9 @@ import ( "time" cstypes "github.com/tendermint/tendermint/consensus/types" + "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" - "github.com/tendermint/tendermint/libs/log" ) func init() { @@ -84,7 +83,7 @@ func TestStateProposerSelection0(t *testing.T) { prop = cs1.GetRoundState().Validators.GetProposer() if !bytes.Equal(prop.Address, vss[1].GetAddress()) { - panic(cmn.Fmt("expected proposer to be validator %d. Got %X", 1, prop.Address)) + panic(fmt.Sprintf("expected proposer to be validator %d. Got %X", 1, prop.Address)) } } @@ -104,8 +103,9 @@ func TestStateProposerSelection2(t *testing.T) { // everyone just votes nil. we get a new proposer each round for i := 0; i < len(vss); i++ { prop := cs1.GetRoundState().Validators.GetProposer() - if !bytes.Equal(prop.Address, vss[(i+2)%len(vss)].GetAddress()) { - panic(cmn.Fmt("expected proposer to be validator %d. Got %X", (i+2)%len(vss), prop.Address)) + correctProposer := vss[(i+2)%len(vss)].GetAddress() + if !bytes.Equal(prop.Address, correctProposer) { + panic(fmt.Sprintf("expected RoundState.Validators.GetProposer() to be validator %d. Got %X", (i+2)%len(vss), prop.Address)) } rs := cs1.GetRoundState() @@ -444,7 +444,7 @@ func TestStateLockNoPOL(t *testing.T) { // now we're on a new round and are the proposer if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) { - panic(cmn.Fmt("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock)) + panic(fmt.Sprintf("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock)) } <-voteCh // prevote diff --git a/consensus/types/height_vote_set.go b/consensus/types/height_vote_set.go index 70a38668ff9..1c8ac67cb39 100644 --- a/consensus/types/height_vote_set.go +++ b/consensus/types/height_vote_set.go @@ -6,9 +6,9 @@ import ( "strings" "sync" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) type RoundVoteSet struct { @@ -169,7 +169,7 @@ func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet { case types.VoteTypePrecommit: return rvs.Precommits default: - cmn.PanicSanity(cmn.Fmt("Unexpected vote type %X", type_)) + cmn.PanicSanity(fmt.Sprintf("Unexpected vote type %X", type_)) return nil } } @@ -219,7 +219,7 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string { voteSetString = roundVoteSet.Precommits.StringShort() vsStrings = append(vsStrings, voteSetString) } - return cmn.Fmt(`HeightVoteSet{H:%v R:0~%v + return fmt.Sprintf(`HeightVoteSet{H:%v R:0~%v %s %v %s}`, hvs.height, hvs.round, diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index 0de656000cf..5f469221d86 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -1,12 +1,12 @@ package types import ( + "fmt" "testing" - "time" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" ) var config *cfg.Config // NOTE: must be reset for each _test.go file @@ -55,14 +55,14 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivVali ValidatorIndex: valIndex, Height: height, Round: round, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), Type: types.VoteTypePrecommit, BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}}, } chainID := config.ChainID() err := privVal.SignVote(chainID, vote) if err != nil { - panic(cmn.Fmt("Error signing vote: %v", err)) + panic(fmt.Sprintf("Error signing vote: %v", err)) return nil } return vote diff --git a/consensus/types/peer_round_state.go b/consensus/types/peer_round_state.go index 7a5d69b8eb9..e42395bc3aa 100644 --- a/consensus/types/peer_round_state.go +++ b/consensus/types/peer_round_state.go @@ -4,8 +4,8 @@ import ( "fmt" "time" - "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/types" ) //----------------------------------------------------------------------------- diff --git a/consensus/types/round_state.go b/consensus/types/round_state.go index cca560ccf83..c22880c2b8e 100644 --- a/consensus/types/round_state.go +++ b/consensus/types/round_state.go @@ -5,8 +5,8 @@ import ( "fmt" "time" - "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/types" ) //----------------------------------------------------------------------------- diff --git a/consensus/types/round_state_test.go b/consensus/types/round_state_test.go index 0257ea2ff98..a330981f6fb 100644 --- a/consensus/types/round_state_test.go +++ b/consensus/types/round_state_test.go @@ -2,12 +2,12 @@ package types import ( "testing" - "time" - amino "github.com/tendermint/go-amino" + "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) func BenchmarkRoundStateDeepCopy(b *testing.B) { @@ -23,11 +23,11 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { Hash: cmn.RandBytes(20), }, } - sig := make([]byte, ed25519.SignatureEd25519Size) + sig := make([]byte, ed25519.SignatureSize) for i := 0; i < nval; i++ { precommits[i] = &types.Vote{ ValidatorAddress: types.Address(cmn.RandBytes(20)), - Timestamp: time.Now(), + Timestamp: tmtime.Now(), BlockID: blockID, Signature: sig, } @@ -40,7 +40,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { block := &types.Block{ Header: types.Header{ ChainID: cmn.RandStr(12), - Time: time.Now(), + Time: tmtime.Now(), LastBlockID: blockID, LastCommitHash: cmn.RandBytes(20), DataHash: cmn.RandBytes(20), @@ -62,7 +62,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { parts := block.MakePartSet(4096) // Random Proposal proposal := &types.Proposal{ - Timestamp: time.Now(), + Timestamp: tmtime.Now(), BlockPartsHeader: types.PartSetHeader{ Hash: cmn.RandBytes(20), }, @@ -73,8 +73,8 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { // TODO: hvs := rs := &RoundState{ - StartTime: time.Now(), - CommitTime: time.Now(), + StartTime: tmtime.Now(), + CommitTime: tmtime.Now(), Validators: vset, Proposal: proposal, ProposalBlock: block, diff --git a/consensus/version.go b/consensus/version.go index 5c74a16db37..c04d2ac7dbd 100644 --- a/consensus/version.go +++ b/consensus/version.go @@ -1,8 +1,6 @@ package consensus -import ( - cmn "github.com/tendermint/tendermint/libs/common" -) +import "fmt" // kind of arbitrary var Spec = "1" // async @@ -10,4 +8,4 @@ var Major = "0" // var Minor = "2" // replay refactor var Revision = "2" // validation -> commit -var Version = cmn.Fmt("v%s/%s.%s.%s", Spec, Major, Minor, Revision) +var Version = fmt.Sprintf("v%s/%s.%s.%s", Spec, Major, Minor, Revision) diff --git a/consensus/wal.go b/consensus/wal.go index 8c4c10bc7b0..870701f1c44 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -11,9 +11,10 @@ import ( "github.com/pkg/errors" amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/types" auto "github.com/tendermint/tendermint/libs/autofile" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) const ( @@ -119,8 +120,8 @@ func (wal *baseWAL) Write(msg WALMessage) { } // Write the wal message - if err := wal.enc.Encode(&TimedWALMessage{time.Now(), msg}); err != nil { - panic(cmn.Fmt("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg)) + if err := wal.enc.Encode(&TimedWALMessage{tmtime.Now(), msg}); err != nil { + panic(fmt.Sprintf("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg)) } } @@ -134,7 +135,7 @@ func (wal *baseWAL) WriteSync(msg WALMessage) { wal.Write(msg) if err := wal.group.Flush(); err != nil { - panic(cmn.Fmt("Error flushing consensus wal buf to file. Error: %v \n", err)) + panic(fmt.Sprintf("Error flushing consensus wal buf to file. Error: %v \n", err)) } } diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index f3a365809ce..6d889aa6fc8 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -13,14 +13,14 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" bc "github.com/tendermint/tendermint/blockchain" cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" auto "github.com/tendermint/tendermint/libs/autofile" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/privval" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" ) // WALWithNBlocks generates a consensus WAL. It does this by spining up a diff --git a/consensus/wal_test.go b/consensus/wal_test.go index 3ecb4fe8fb2..e5744c0a1d2 100644 --- a/consensus/wal_test.go +++ b/consensus/wal_test.go @@ -3,20 +3,21 @@ package consensus import ( "bytes" "crypto/rand" + "fmt" // "sync" "testing" "time" "github.com/tendermint/tendermint/consensus/types" tmtypes "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestWALEncoderDecoder(t *testing.T) { - now := time.Now() + now := tmtime.Now() msgs := []TimedWALMessage{ TimedWALMessage{Time: now, Msg: EndHeightMessage{0}}, TimedWALMessage{Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}}, @@ -54,8 +55,8 @@ func TestWALSearchForEndHeight(t *testing.T) { h := int64(3) gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) - assert.NoError(t, err, cmn.Fmt("expected not to err on height %d", h)) - assert.True(t, found, cmn.Fmt("expected to find end height for %d", h)) + assert.NoError(t, err, fmt.Sprintf("expected not to err on height %d", h)) + assert.True(t, found, fmt.Sprintf("expected to find end height for %d", h)) assert.NotNil(t, gr, "expected group not to be nil") defer gr.Close() @@ -64,7 +65,7 @@ func TestWALSearchForEndHeight(t *testing.T) { assert.NoError(t, err, "expected to decode a message") rs, ok := msg.Msg.(tmtypes.EventDataRoundState) assert.True(t, ok, "expected message of type EventDataRoundState") - assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height")) + assert.Equal(t, rs.Height, h+1, fmt.Sprintf("wrong height")) } /* @@ -93,7 +94,7 @@ func benchmarkWalDecode(b *testing.B, n int) { enc := NewWALEncoder(buf) data := nBytes(n) - enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second)}) + enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second).UTC()}) encoded := buf.Bytes() diff --git a/crypto/ed25519/ed25519.go b/crypto/ed25519/ed25519.go index fa7526f3fcf..c55b3588f53 100644 --- a/crypto/ed25519/ed25519.go +++ b/crypto/ed25519/ed25519.go @@ -18,11 +18,11 @@ import ( var _ crypto.PrivKey = PrivKeyEd25519{} const ( - Ed25519PrivKeyAminoRoute = "tendermint/PrivKeyEd25519" - Ed25519PubKeyAminoRoute = "tendermint/PubKeyEd25519" + PrivKeyAminoRoute = "tendermint/PrivKeyEd25519" + PubKeyAminoRoute = "tendermint/PubKeyEd25519" // Size of an Edwards25519 signature. Namely the size of a compressed // Edwards25519 point, and a field element. Both of which are 32 bytes. - SignatureEd25519Size = 64 + SignatureSize = 64 ) var cdc = amino.NewCodec() @@ -30,11 +30,11 @@ var cdc = amino.NewCodec() func init() { cdc.RegisterInterface((*crypto.PubKey)(nil), nil) cdc.RegisterConcrete(PubKeyEd25519{}, - Ed25519PubKeyAminoRoute, nil) + PubKeyAminoRoute, nil) cdc.RegisterInterface((*crypto.PrivKey)(nil), nil) cdc.RegisterConcrete(PrivKeyEd25519{}, - Ed25519PrivKeyAminoRoute, nil) + PrivKeyAminoRoute, nil) } // PrivKeyEd25519 implements crypto.PrivKey. @@ -158,10 +158,10 @@ func (pubKey PubKeyEd25519) Bytes() []byte { func (pubKey PubKeyEd25519) VerifyBytes(msg []byte, sig_ []byte) bool { // make sure we use the same algorithm to sign - if len(sig_) != SignatureEd25519Size { + if len(sig_) != SignatureSize { return false } - sig := new([SignatureEd25519Size]byte) + sig := new([SignatureSize]byte) copy(sig[:], sig_) pubKeyBytes := [PubKeyEd25519Size]byte(pubKey) return ed25519.Verify(&pubKeyBytes, msg, sig) diff --git a/crypto/encoding/amino/amino.go b/crypto/encoding/amino/amino.go index fd9a08442e8..7728e6afbea 100644 --- a/crypto/encoding/amino/amino.go +++ b/crypto/encoding/amino/amino.go @@ -2,8 +2,10 @@ package cryptoAmino import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/multisig" "github.com/tendermint/tendermint/crypto/secp256k1" ) @@ -24,15 +26,17 @@ func RegisterAmino(cdc *amino.Codec) { // These are all written here instead of cdc.RegisterInterface((*crypto.PubKey)(nil), nil) cdc.RegisterConcrete(ed25519.PubKeyEd25519{}, - "tendermint/PubKeyEd25519", nil) + ed25519.PubKeyAminoRoute, nil) cdc.RegisterConcrete(secp256k1.PubKeySecp256k1{}, - "tendermint/PubKeySecp256k1", nil) + secp256k1.PubKeyAminoRoute, nil) + cdc.RegisterConcrete(multisig.PubKeyMultisigThreshold{}, + multisig.PubKeyMultisigThresholdAminoRoute, nil) cdc.RegisterInterface((*crypto.PrivKey)(nil), nil) cdc.RegisterConcrete(ed25519.PrivKeyEd25519{}, - "tendermint/PrivKeyEd25519", nil) + ed25519.PrivKeyAminoRoute, nil) cdc.RegisterConcrete(secp256k1.PrivKeySecp256k1{}, - "tendermint/PrivKeySecp256k1", nil) + secp256k1.PrivKeyAminoRoute, nil) } func PrivKeyFromBytes(privKeyBytes []byte) (privKey crypto.PrivKey, err error) { diff --git a/crypto/encoding/amino/encode_test.go b/crypto/encoding/amino/encode_test.go index 0581ba6438f..7235ba69494 100644 --- a/crypto/encoding/amino/encode_test.go +++ b/crypto/encoding/amino/encode_test.go @@ -53,24 +53,27 @@ func ExamplePrintRegisteredTypes() { //| ---- | ---- | ------ | ----- | ------ | //| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | | //| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | | + //| PubKeyMultisigThreshold | tendermint/PubKeyMultisigThreshold | 0x22C1F7E2 | variable | | //| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | | //| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | | } func TestKeyEncodings(t *testing.T) { cases := []struct { - privKey crypto.PrivKey - privSize, pubSize int // binary sizes + privKey crypto.PrivKey + privSize, pubSize, sigSize int // binary sizes }{ { privKey: ed25519.GenPrivKey(), privSize: 69, pubSize: 37, + sigSize: 65, }, { privKey: secp256k1.GenPrivKey(), privSize: 37, pubSize: 38, + sigSize: 65, }, } @@ -87,7 +90,7 @@ func TestKeyEncodings(t *testing.T) { var sig1, sig2 []byte sig1, err := tc.privKey.Sign([]byte("something")) assert.NoError(t, err, "tc #%d", tcIndex) - checkAminoBinary(t, sig1, &sig2, -1) // Signature size changes for Secp anyways. + checkAminoBinary(t, sig1, &sig2, tc.sigSize) assert.EqualValues(t, sig1, sig2, "tc #%d", tcIndex) // Check (de/en)codings of PubKeys. diff --git a/crypto/merkle/simple_tree_test.go b/crypto/merkle/simple_tree_test.go index 488e0c90767..e2dccd3b3d7 100644 --- a/crypto/merkle/simple_tree_test.go +++ b/crypto/merkle/simple_tree_test.go @@ -6,8 +6,9 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" . "github.com/tendermint/tendermint/libs/test" - "github.com/tendermint/tendermint/crypto/tmhash" "testing" + + "github.com/tendermint/tendermint/crypto/tmhash" ) type testItem []byte diff --git a/crypto/multisig/bitarray/compact_bit_array.go b/crypto/multisig/bitarray/compact_bit_array.go new file mode 100644 index 00000000000..0152db72469 --- /dev/null +++ b/crypto/multisig/bitarray/compact_bit_array.go @@ -0,0 +1,233 @@ +package bitarray + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "regexp" + "strings" +) + +// CompactBitArray is an implementation of a space efficient bit array. +// This is used to ensure that the encoded data takes up a minimal amount of +// space after amino encoding. +// This is not thread safe, and is not intended for concurrent usage. +type CompactBitArray struct { + ExtraBitsStored byte `json:"extra_bits"` // The number of extra bits in elems. + Elems []byte `json:"bits"` +} + +// NewCompactBitArray returns a new compact bit array. +// It returns nil if the number of bits is zero. +func NewCompactBitArray(bits int) *CompactBitArray { + if bits <= 0 { + return nil + } + return &CompactBitArray{ + ExtraBitsStored: byte(bits % 8), + Elems: make([]byte, (bits+7)/8), + } +} + +// Size returns the number of bits in the bitarray +func (bA *CompactBitArray) Size() int { + if bA == nil { + return 0 + } else if bA.ExtraBitsStored == byte(0) { + return len(bA.Elems) * 8 + } + // num_bits = 8*num_full_bytes + overflow_in_last_byte + // num_full_bytes = (len(bA.Elems)-1) + return (len(bA.Elems)-1)*8 + int(bA.ExtraBitsStored) +} + +// GetIndex returns the bit at index i within the bit array. +// The behavior is undefined if i >= bA.Size() +func (bA *CompactBitArray) GetIndex(i int) bool { + if bA == nil { + return false + } + if i >= bA.Size() { + return false + } + return bA.Elems[i>>3]&(uint8(1)< 0 +} + +// SetIndex sets the bit at index i within the bit array. +// The behavior is undefined if i >= bA.Size() +func (bA *CompactBitArray) SetIndex(i int, v bool) bool { + if bA == nil { + return false + } + if i >= bA.Size() { + return false + } + if v { + bA.Elems[i>>3] |= (uint8(1) << uint8(7-(i%8))) + } else { + bA.Elems[i>>3] &= ^(uint8(1) << uint8(7-(i%8))) + } + return true +} + +// NumTrueBitsBefore returns the number of bits set to true before the +// given index. e.g. if bA = _XX__XX, NumOfTrueBitsBefore(4) = 2, since +// there are two bits set to true before index 4. +func (bA *CompactBitArray) NumTrueBitsBefore(index int) int { + numTrueValues := 0 + for i := 0; i < index; i++ { + if bA.GetIndex(i) { + numTrueValues++ + } + } + return numTrueValues +} + +// Copy returns a copy of the provided bit array. +func (bA *CompactBitArray) Copy() *CompactBitArray { + if bA == nil { + return nil + } + c := make([]byte, len(bA.Elems)) + copy(c, bA.Elems) + return &CompactBitArray{ + ExtraBitsStored: bA.ExtraBitsStored, + Elems: c, + } +} + +// String returns a string representation of CompactBitArray: BA{}, +// where is a sequence of 'x' (1) and '_' (0). +// The includes spaces and newlines to help people. +// For a simple sequence of 'x' and '_' characters with no spaces or newlines, +// see the MarshalJSON() method. +// Example: "BA{_x_}" or "nil-BitArray" for nil. +func (bA *CompactBitArray) String() string { + return bA.StringIndented("") +} + +// StringIndented returns the same thing as String(), but applies the indent +// at every 10th bit, and twice at every 50th bit. +func (bA *CompactBitArray) StringIndented(indent string) string { + if bA == nil { + return "nil-BitArray" + } + lines := []string{} + bits := "" + size := bA.Size() + for i := 0; i < size; i++ { + if bA.GetIndex(i) { + bits += "x" + } else { + bits += "_" + } + if i%100 == 99 { + lines = append(lines, bits) + bits = "" + } + if i%10 == 9 { + bits += indent + } + if i%50 == 49 { + bits += indent + } + } + if len(bits) > 0 { + lines = append(lines, bits) + } + return fmt.Sprintf("BA{%v:%v}", size, strings.Join(lines, indent)) +} + +// MarshalJSON implements json.Marshaler interface by marshaling bit array +// using a custom format: a string of '-' or 'x' where 'x' denotes the 1 bit. +func (bA *CompactBitArray) MarshalJSON() ([]byte, error) { + if bA == nil { + return []byte("null"), nil + } + + bits := `"` + size := bA.Size() + for i := 0; i < size; i++ { + if bA.GetIndex(i) { + bits += `x` + } else { + bits += `_` + } + } + bits += `"` + return []byte(bits), nil +} + +var bitArrayJSONRegexp = regexp.MustCompile(`\A"([_x]*)"\z`) + +// UnmarshalJSON implements json.Unmarshaler interface by unmarshaling a custom +// JSON description. +func (bA *CompactBitArray) UnmarshalJSON(bz []byte) error { + b := string(bz) + if b == "null" { + // This is required e.g. for encoding/json when decoding + // into a pointer with pre-allocated BitArray. + bA.ExtraBitsStored = 0 + bA.Elems = nil + return nil + } + + // Validate 'b'. + match := bitArrayJSONRegexp.FindStringSubmatch(b) + if match == nil { + return fmt.Errorf("BitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b) + } + bits := match[1] + + // Construct new CompactBitArray and copy over. + numBits := len(bits) + bA2 := NewCompactBitArray(numBits) + for i := 0; i < numBits; i++ { + if bits[i] == 'x' { + bA2.SetIndex(i, true) + } + } + *bA = *bA2 + return nil +} + +// CompactMarshal is a space efficient encoding for CompactBitArray. +// It is not amino compatible. +func (bA *CompactBitArray) CompactMarshal() []byte { + size := bA.Size() + if size <= 0 { + return []byte("null") + } + bz := make([]byte, 0, size/8) + // length prefix number of bits, not number of bytes. This difference + // takes 3-4 bits in encoding, as opposed to instead encoding the number of + // bytes (saving 3-4 bits) and including the offset as a full byte. + bz = appendUvarint(bz, uint64(size)) + bz = append(bz, bA.Elems...) + return bz +} + +// CompactUnmarshal is a space efficient decoding for CompactBitArray. +// It is not amino compatible. +func CompactUnmarshal(bz []byte) (*CompactBitArray, error) { + if len(bz) < 2 { + return nil, errors.New("compact bit array: invalid compact unmarshal size") + } else if bytes.Equal(bz, []byte("null")) { + return NewCompactBitArray(0), nil + } + size, n := binary.Uvarint(bz) + bz = bz[n:] + if len(bz) != int(size+7)/8 { + return nil, errors.New("compact bit array: invalid compact unmarshal size") + } + + bA := &CompactBitArray{byte(int(size % 8)), bz} + return bA, nil +} + +func appendUvarint(b []byte, x uint64) []byte { + var a [binary.MaxVarintLen64]byte + n := binary.PutUvarint(a[:], x) + return append(b, a[:n]...) +} diff --git a/crypto/multisig/bitarray/compact_bit_array_test.go b/crypto/multisig/bitarray/compact_bit_array_test.go new file mode 100644 index 00000000000..4612ae25af6 --- /dev/null +++ b/crypto/multisig/bitarray/compact_bit_array_test.go @@ -0,0 +1,196 @@ +package bitarray + +import ( + "encoding/json" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" +) + +func randCompactBitArray(bits int) (*CompactBitArray, []byte) { + numBytes := (bits + 7) / 8 + src := cmn.RandBytes((bits + 7) / 8) + bA := NewCompactBitArray(bits) + + for i := 0; i < numBytes-1; i++ { + for j := uint8(0); j < 8; j++ { + bA.SetIndex(i*8+int(j), src[i]&(uint8(1)<<(8-j)) > 0) + } + } + // Set remaining bits + for i := uint8(0); i < 8-uint8(bA.ExtraBitsStored); i++ { + bA.SetIndex(numBytes*8+int(i), src[numBytes-1]&(uint8(1)<<(8-i)) > 0) + } + return bA, src +} + +func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) { + bitList := []int{-127, -128, -1 << 31} + for _, bits := range bitList { + bA := NewCompactBitArray(bits) + require.Nil(t, bA) + } +} + +func TestJSONMarshalUnmarshal(t *testing.T) { + + bA1 := NewCompactBitArray(0) + bA2 := NewCompactBitArray(1) + + bA3 := NewCompactBitArray(1) + bA3.SetIndex(0, true) + + bA4 := NewCompactBitArray(5) + bA4.SetIndex(0, true) + bA4.SetIndex(1, true) + + bA5 := NewCompactBitArray(9) + bA5.SetIndex(0, true) + bA5.SetIndex(1, true) + bA5.SetIndex(8, true) + + bA6 := NewCompactBitArray(16) + bA6.SetIndex(0, true) + bA6.SetIndex(1, true) + bA6.SetIndex(8, false) + bA6.SetIndex(15, true) + + testCases := []struct { + bA *CompactBitArray + marshalledBA string + }{ + {nil, `null`}, + {bA1, `null`}, + {bA2, `"_"`}, + {bA3, `"x"`}, + {bA4, `"xx___"`}, + {bA5, `"xx______x"`}, + {bA6, `"xx_____________x"`}, + } + + for _, tc := range testCases { + t.Run(tc.bA.String(), func(t *testing.T) { + bz, err := json.Marshal(tc.bA) + require.NoError(t, err) + + assert.Equal(t, tc.marshalledBA, string(bz)) + + var unmarshalledBA *CompactBitArray + err = json.Unmarshal(bz, &unmarshalledBA) + require.NoError(t, err) + + if tc.bA == nil { + require.Nil(t, unmarshalledBA) + } else { + require.NotNil(t, unmarshalledBA) + assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) + if assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) { + assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) + } + } + }) + } +} + +func TestCompactMarshalUnmarshal(t *testing.T) { + bA1 := NewCompactBitArray(0) + bA2 := NewCompactBitArray(1) + + bA3 := NewCompactBitArray(1) + bA3.SetIndex(0, true) + + bA4 := NewCompactBitArray(5) + bA4.SetIndex(0, true) + bA4.SetIndex(1, true) + + bA5 := NewCompactBitArray(9) + bA5.SetIndex(0, true) + bA5.SetIndex(1, true) + bA5.SetIndex(8, true) + + bA6 := NewCompactBitArray(16) + bA6.SetIndex(0, true) + bA6.SetIndex(1, true) + bA6.SetIndex(8, false) + bA6.SetIndex(15, true) + + testCases := []struct { + bA *CompactBitArray + marshalledBA []byte + }{ + {nil, []byte("null")}, + {bA1, []byte("null")}, + {bA2, []byte{byte(1), byte(0)}}, + {bA3, []byte{byte(1), byte(128)}}, + {bA4, []byte{byte(5), byte(192)}}, + {bA5, []byte{byte(9), byte(192), byte(128)}}, + {bA6, []byte{byte(16), byte(192), byte(1)}}, + } + + for _, tc := range testCases { + t.Run(tc.bA.String(), func(t *testing.T) { + bz := tc.bA.CompactMarshal() + + assert.Equal(t, tc.marshalledBA, bz) + + unmarshalledBA, err := CompactUnmarshal(bz) + require.NoError(t, err) + if tc.bA == nil { + require.Nil(t, unmarshalledBA) + } else { + require.NotNil(t, unmarshalledBA) + assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) + if assert.EqualValues(t, tc.bA.String(), unmarshalledBA.String()) { + assert.EqualValues(t, tc.bA.Elems, unmarshalledBA.Elems) + } + } + }) + } +} + +func TestCompactBitArrayNumOfTrueBitsBefore(t *testing.T) { + testCases := []struct { + marshalledBA string + bAIndex []int + trueValueIndex []int + }{ + {`"_____"`, []int{0, 1, 2, 3, 4}, []int{0, 0, 0, 0, 0}}, + {`"x"`, []int{0}, []int{0}}, + {`"_x"`, []int{1}, []int{0}}, + {`"x___xxxx"`, []int{0, 4, 5, 6, 7}, []int{0, 1, 2, 3, 4}}, + {`"__x_xx_x__x_x___"`, []int{2, 4, 5, 7, 10, 12}, []int{0, 1, 2, 3, 4, 5}}, + {`"______________xx"`, []int{14, 15}, []int{0, 1}}, + } + for tcIndex, tc := range testCases { + t.Run(tc.marshalledBA, func(t *testing.T) { + var bA *CompactBitArray + err := json.Unmarshal([]byte(tc.marshalledBA), &bA) + require.NoError(t, err) + + for i := 0; i < len(tc.bAIndex); i++ { + require.Equal(t, tc.trueValueIndex[i], bA.NumTrueBitsBefore(tc.bAIndex[i]), "tc %d, i %d", tcIndex, i) + } + }) + } +} + +func TestCompactBitArrayGetSetIndex(t *testing.T) { + r := rand.New(rand.NewSource(100)) + numTests := 10 + numBitsPerArr := 100 + for i := 0; i < numTests; i++ { + bits := r.Intn(1000) + bA, _ := randCompactBitArray(bits) + + for j := 0; j < numBitsPerArr; j++ { + copy := bA.Copy() + index := r.Intn(bits) + val := (r.Int63() % 2) == 0 + bA.SetIndex(index, val) + require.Equal(t, val, bA.GetIndex(index), "bA.SetIndex(%d, %v) failed on bit array: %s", index, val, copy) + } + } +} diff --git a/crypto/multisig/multisignature.go b/crypto/multisig/multisignature.go new file mode 100644 index 00000000000..0d1796890cb --- /dev/null +++ b/crypto/multisig/multisignature.go @@ -0,0 +1,70 @@ +package multisig + +import ( + "errors" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/multisig/bitarray" +) + +// Multisignature is used to represent the signature object used in the multisigs. +// Sigs is a list of signatures, sorted by corresponding index. +type Multisignature struct { + BitArray *bitarray.CompactBitArray + Sigs [][]byte +} + +// NewMultisig returns a new Multisignature of size n. +func NewMultisig(n int) *Multisignature { + // Default the signature list to have a capacity of two, since we can + // expect that most multisigs will require multiple signers. + return &Multisignature{bitarray.NewCompactBitArray(n), make([][]byte, 0, 2)} +} + +// GetIndex returns the index of pk in keys. Returns -1 if not found +func getIndex(pk crypto.PubKey, keys []crypto.PubKey) int { + for i := 0; i < len(keys); i++ { + if pk.Equals(keys[i]) { + return i + } + } + return -1 +} + +// AddSignature adds a signature to the multisig, at the corresponding index. +// If the signature already exists, replace it. +func (mSig *Multisignature) AddSignature(sig []byte, index int) { + newSigIndex := mSig.BitArray.NumTrueBitsBefore(index) + // Signature already exists, just replace the value there + if mSig.BitArray.GetIndex(index) { + mSig.Sigs[newSigIndex] = sig + return + } + mSig.BitArray.SetIndex(index, true) + // Optimization if the index is the greatest index + if newSigIndex == len(mSig.Sigs) { + mSig.Sigs = append(mSig.Sigs, sig) + return + } + // Expand slice by one with a dummy element, move all elements after i + // over by one, then place the new signature in that gap. + mSig.Sigs = append(mSig.Sigs, make([]byte, 0)) + copy(mSig.Sigs[newSigIndex+1:], mSig.Sigs[newSigIndex:]) + mSig.Sigs[newSigIndex] = sig +} + +// AddSignatureFromPubKey adds a signature to the multisig, +// at the index in keys corresponding to the provided pubkey. +func (mSig *Multisignature) AddSignatureFromPubKey(sig []byte, pubkey crypto.PubKey, keys []crypto.PubKey) error { + index := getIndex(pubkey, keys) + if index == -1 { + return errors.New("provided key didn't exist in pubkeys") + } + mSig.AddSignature(sig, index) + return nil +} + +// Marshal the multisignature with amino +func (mSig *Multisignature) Marshal() []byte { + return cdc.MustMarshalBinaryBare(mSig) +} diff --git a/crypto/multisig/threshold_pubkey.go b/crypto/multisig/threshold_pubkey.go new file mode 100644 index 00000000000..ca8d4230304 --- /dev/null +++ b/crypto/multisig/threshold_pubkey.go @@ -0,0 +1,92 @@ +package multisig + +import ( + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/tmhash" +) + +// PubKeyMultisigThreshold implements a K of N threshold multisig. +type PubKeyMultisigThreshold struct { + K uint `json:"threshold"` + PubKeys []crypto.PubKey `json:"pubkeys"` +} + +var _ crypto.PubKey = &PubKeyMultisigThreshold{} + +// NewPubKeyMultisigThreshold returns a new PubKeyMultisigThreshold. +// Panics if len(pubkeys) < k or 0 >= k. +func NewPubKeyMultisigThreshold(k int, pubkeys []crypto.PubKey) crypto.PubKey { + if k <= 0 { + panic("threshold k of n multisignature: k <= 0") + } + if len(pubkeys) < k { + panic("threshold k of n multisignature: len(pubkeys) < k") + } + return &PubKeyMultisigThreshold{uint(k), pubkeys} +} + +// VerifyBytes expects sig to be an amino encoded version of a MultiSignature. +// Returns true iff the multisignature contains k or more signatures +// for the correct corresponding keys, +// and all signatures are valid. (Not just k of the signatures) +// The multisig uses a bitarray, so multiple signatures for the same key is not +// a concern. +func (pk *PubKeyMultisigThreshold) VerifyBytes(msg []byte, marshalledSig []byte) bool { + var sig *Multisignature + err := cdc.UnmarshalBinaryBare(marshalledSig, &sig) + if err != nil { + return false + } + size := sig.BitArray.Size() + // ensure bit array is the correct size + if len(pk.PubKeys) != size { + return false + } + // ensure size of signature list + if len(sig.Sigs) < int(pk.K) || len(sig.Sigs) > size { + return false + } + // ensure at least k signatures are set + if sig.BitArray.NumTrueBitsBefore(size) < int(pk.K) { + return false + } + // index in the list of signatures which we are concerned with. + sigIndex := 0 + for i := 0; i < size; i++ { + if sig.BitArray.GetIndex(i) { + if !pk.PubKeys[i].VerifyBytes(msg, sig.Sigs[sigIndex]) { + return false + } + sigIndex++ + } + } + return true +} + +// Bytes returns the amino encoded version of the PubKeyMultisigThreshold +func (pk *PubKeyMultisigThreshold) Bytes() []byte { + return cdc.MustMarshalBinaryBare(pk) +} + +// Address returns tmhash(PubKeyMultisigThreshold.Bytes()) +func (pk *PubKeyMultisigThreshold) Address() crypto.Address { + return crypto.Address(tmhash.Sum(pk.Bytes())) +} + +// Equals returns true iff pk and other both have the same number of keys, and +// all constituent keys are the same, and in the same order. +func (pk *PubKeyMultisigThreshold) Equals(other crypto.PubKey) bool { + otherKey, sameType := other.(*PubKeyMultisigThreshold) + if !sameType { + return false + } + if pk.K != otherKey.K || len(pk.PubKeys) != len(otherKey.PubKeys) { + return false + } + for i := 0; i < len(pk.PubKeys); i++ { + if !pk.PubKeys[i].Equals(otherKey.PubKeys[i]) { + return false + } + } + return true +} diff --git a/crypto/multisig/threshold_pubkey_test.go b/crypto/multisig/threshold_pubkey_test.go new file mode 100644 index 00000000000..bfc874ebedf --- /dev/null +++ b/crypto/multisig/threshold_pubkey_test.go @@ -0,0 +1,112 @@ +package multisig + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" +) + +// This tests multisig functionality, but it expects the first k signatures to be valid +// TODO: Adapt it to give more flexibility about first k signatures being valid +func TestThresholdMultisigValidCases(t *testing.T) { + pkSet1, sigSet1 := generatePubKeysAndSignatures(5, []byte{1, 2, 3, 4}) + cases := []struct { + msg []byte + k int + pubkeys []crypto.PubKey + signingIndices []int + // signatures should be the same size as signingIndices. + signatures [][]byte + passAfterKSignatures []bool + }{ + { + msg: []byte{1, 2, 3, 4}, + k: 2, + pubkeys: pkSet1, + signingIndices: []int{0, 3, 1}, + signatures: sigSet1, + passAfterKSignatures: []bool{false}, + }, + } + for tcIndex, tc := range cases { + multisigKey := NewPubKeyMultisigThreshold(tc.k, tc.pubkeys) + multisignature := NewMultisig(len(tc.pubkeys)) + for i := 0; i < tc.k-1; i++ { + signingIndex := tc.signingIndices[i] + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) + require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig passed when i < k, tc %d, i %d", tcIndex, i) + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) + require.Equal(t, i+1, len(multisignature.Sigs), + "adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex) + } + require.False(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig passed with k - 1 sigs, tc %d", tcIndex) + multisignature.AddSignatureFromPubKey(tc.signatures[tc.signingIndices[tc.k]], tc.pubkeys[tc.signingIndices[tc.k]], tc.pubkeys) + require.True(t, multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig failed after k good signatures, tc %d", tcIndex) + for i := tc.k + 1; i < len(tc.signingIndices); i++ { + signingIndex := tc.signingIndices[i] + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) + require.Equal(t, tc.passAfterKSignatures[i-tc.k-1], + multisigKey.VerifyBytes(tc.msg, multisignature.Marshal()), + "multisig didn't verify as expected after k sigs, tc %d, i %d", tcIndex, i) + + multisignature.AddSignatureFromPubKey(tc.signatures[signingIndex], tc.pubkeys[signingIndex], tc.pubkeys) + require.Equal(t, i+1, len(multisignature.Sigs), + "adding a signature for the same pubkey twice increased signature count by 2, tc %d", tcIndex) + } + } +} + +// TODO: Fully replace this test with table driven tests +func TestThresholdMultisigDuplicateSignatures(t *testing.T) { + msg := []byte{1, 2, 3, 4, 5} + pubkeys, sigs := generatePubKeysAndSignatures(5, msg) + multisigKey := NewPubKeyMultisigThreshold(2, pubkeys) + multisignature := NewMultisig(5) + require.False(t, multisigKey.VerifyBytes(msg, multisignature.Marshal())) + multisignature.AddSignatureFromPubKey(sigs[0], pubkeys[0], pubkeys) + // Add second signature manually + multisignature.Sigs = append(multisignature.Sigs, sigs[0]) + require.False(t, multisigKey.VerifyBytes(msg, multisignature.Marshal())) +} + +// TODO: Fully replace this test with table driven tests +func TestMultiSigPubKeyEquality(t *testing.T) { + msg := []byte{1, 2, 3, 4} + pubkeys, _ := generatePubKeysAndSignatures(5, msg) + multisigKey := NewPubKeyMultisigThreshold(2, pubkeys) + var unmarshalledMultisig *PubKeyMultisigThreshold + cdc.MustUnmarshalBinaryBare(multisigKey.Bytes(), &unmarshalledMultisig) + require.True(t, multisigKey.Equals(unmarshalledMultisig)) + + // Ensure that reordering pubkeys is treated as a different pubkey + pubkeysCpy := make([]crypto.PubKey, 5) + copy(pubkeysCpy, pubkeys) + pubkeysCpy[4] = pubkeys[3] + pubkeysCpy[3] = pubkeys[4] + multisigKey2 := NewPubKeyMultisigThreshold(2, pubkeysCpy) + require.False(t, multisigKey.Equals(multisigKey2)) +} + +func generatePubKeysAndSignatures(n int, msg []byte) (pubkeys []crypto.PubKey, signatures [][]byte) { + pubkeys = make([]crypto.PubKey, n) + signatures = make([][]byte, n) + for i := 0; i < n; i++ { + var privkey crypto.PrivKey + if rand.Int63()%2 == 0 { + privkey = ed25519.GenPrivKey() + } else { + privkey = secp256k1.GenPrivKey() + } + pubkeys[i] = privkey.PubKey() + signatures[i], _ = privkey.Sign(msg) + } + return +} diff --git a/crypto/multisig/wire.go b/crypto/multisig/wire.go new file mode 100644 index 00000000000..68b84fbfb7a --- /dev/null +++ b/crypto/multisig/wire.go @@ -0,0 +1,26 @@ +package multisig + +import ( + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/secp256k1" +) + +// TODO: Figure out API for others to either add their own pubkey types, or +// to make verify / marshal accept a cdc. +const ( + PubKeyMultisigThresholdAminoRoute = "tendermint/PubKeyMultisigThreshold" +) + +var cdc = amino.NewCodec() + +func init() { + cdc.RegisterInterface((*crypto.PubKey)(nil), nil) + cdc.RegisterConcrete(PubKeyMultisigThreshold{}, + PubKeyMultisigThresholdAminoRoute, nil) + cdc.RegisterConcrete(ed25519.PubKeyEd25519{}, + ed25519.PubKeyAminoRoute, nil) + cdc.RegisterConcrete(secp256k1.PubKeySecp256k1{}, + secp256k1.PubKeyAminoRoute, nil) +} diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index aee5dafe7a2..2c64d1e9dd5 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -7,7 +7,7 @@ import ( "fmt" "io" - secp256k1 "github.com/btcsuite/btcd/btcec" + secp256k1 "github.com/tendermint/btcd/btcec" amino "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto" "golang.org/x/crypto/ripemd160" @@ -15,8 +15,8 @@ import ( //------------------------------------- const ( - Secp256k1PrivKeyAminoRoute = "tendermint/PrivKeySecp256k1" - Secp256k1PubKeyAminoRoute = "tendermint/PubKeySecp256k1" + PrivKeyAminoRoute = "tendermint/PrivKeySecp256k1" + PubKeyAminoRoute = "tendermint/PubKeySecp256k1" ) var cdc = amino.NewCodec() @@ -24,11 +24,11 @@ var cdc = amino.NewCodec() func init() { cdc.RegisterInterface((*crypto.PubKey)(nil), nil) cdc.RegisterConcrete(PubKeySecp256k1{}, - Secp256k1PubKeyAminoRoute, nil) + PubKeyAminoRoute, nil) cdc.RegisterInterface((*crypto.PrivKey)(nil), nil) cdc.RegisterConcrete(PrivKeySecp256k1{}, - Secp256k1PrivKeyAminoRoute, nil) + PrivKeyAminoRoute, nil) } //------------------------------------- @@ -141,10 +141,12 @@ func (pubKey PubKeySecp256k1) VerifyBytes(msg []byte, sig []byte) bool { if err != nil { return false } - parsedSig, err := secp256k1.ParseDERSignature(sig[:], secp256k1.S256()) + parsedSig, err := secp256k1.ParseSignature(sig[:], secp256k1.S256()) if err != nil { return false } + // Underlying library ensures that this signature is in canonical form, to + // prevent Secp256k1 malleability from altering the sign of the s term. return parsedSig.Verify(crypto.Sha256(msg), pub) } diff --git a/crypto/secp256k1/secpk256k1_test.go b/crypto/secp256k1/secpk256k1_test.go index 0f0b5adce9c..2fa483014db 100644 --- a/crypto/secp256k1/secpk256k1_test.go +++ b/crypto/secp256k1/secpk256k1_test.go @@ -11,7 +11,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/secp256k1" - underlyingSecp256k1 "github.com/btcsuite/btcd/btcec" + underlyingSecp256k1 "github.com/tendermint/btcd/btcec" ) type keyData struct { diff --git a/crypto/xsalsa20symmetric/symmetric.go b/crypto/xsalsa20symmetric/symmetric.go index d2369675d1f..aa33ee14a81 100644 --- a/crypto/xsalsa20symmetric/symmetric.go +++ b/crypto/xsalsa20symmetric/symmetric.go @@ -2,6 +2,7 @@ package xsalsa20symmetric import ( "errors" + "fmt" "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" @@ -18,7 +19,7 @@ const secretLen = 32 // NOTE: call crypto.MixEntropy() first. func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { if len(secret) != secretLen { - cmn.PanicSanity(cmn.Fmt("Secret must be 32 bytes long, got len %v", len(secret))) + cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) } nonce := crypto.CRandBytes(nonceLen) nonceArr := [nonceLen]byte{} @@ -35,7 +36,7 @@ func EncryptSymmetric(plaintext []byte, secret []byte) (ciphertext []byte) { // The ciphertext is (secretbox.Overhead + 24) bytes longer than the plaintext. func DecryptSymmetric(ciphertext []byte, secret []byte) (plaintext []byte, err error) { if len(secret) != secretLen { - cmn.PanicSanity(cmn.Fmt("Secret must be 32 bytes long, got len %v", len(secret))) + cmn.PanicSanity(fmt.Sprintf("Secret must be 32 bytes long, got len %v", len(secret))) } if len(ciphertext) <= secretbox.Overhead+nonceLen { return nil, errors.New("Ciphertext is too short") diff --git a/docs/.textlintrc.json b/docs/.textlintrc.json new file mode 100644 index 00000000000..4103f89e89d --- /dev/null +++ b/docs/.textlintrc.json @@ -0,0 +1,9 @@ +{ + "rules": { + "stop-words": { + "severity": "warning", + "defaultWords": false, + "words": "stop-words.txt" + } + } +} diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index 016bac5e4ec..0be6e4c6687 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -8,14 +8,10 @@ and built using [VuePress](https://vuepress.vuejs.org/) from the tendermint webs - https://github.com/tendermint/tendermint.com -which has a [configuration file](https://github.com/tendermint/tendermint.com/blob/develop/docs/.vuepress/config.js) for displaying -the Table of Contents that lists all the documentation. +Under the hood, Jenkins listens for changes (on develop or master) in ./docs then rebuilds +either the staging or production site depending on which branch the changes were made. -Under the hood, Jenkins listens for changes in ./docs then pushes a `docs-staging` branch to the tendermint.com repo with the latest documentation. That branch must be manually PR'd to `develop` then `master` for staging then production. This process should happen in synchrony with a release. +To update the Table of Contents (layout of the documentation sidebar), edit the +`config.js` in this directory, while the `README.md` is the landing page for the +website documentation. -The `README.md` in this directory is the landing page for -website documentation and the following folders are intentionally -ommitted: - -- `architecture/` ==> contains Architecture Design Records -- `spec/` ==> contains the detailed specification diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 442c9be651e..00000000000 --- a/docs/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = python -msphinx -SPHINXPROJ = Tendermint -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -install: - @pip install -r requirements.txt - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/README.md b/docs/README.md index 16ea708ad0a..8c6c5d10582 100644 --- a/docs/README.md +++ b/docs/README.md @@ -11,17 +11,17 @@ replicates it on many machines. In other words, a blockchain. Tendermint requires an application running over the Application Blockchain Interface (ABCI) - and comes packaged with an example application to do so. -Follow the [installation instructions](./introduction/install) to get up and running -quickly. For more details on [using tendermint](./tendermint-core/using-tendermint) see that +Follow the [installation instructions](./introduction/install.md) to get up and running +quickly. For more details on [using tendermint](./tendermint-core/using-tendermint.md) see that and the following sections. ## Networks Testnets can be setup manually on one or more machines, or automatically on one or more machine, using a variety of methods described in the [deploy testnets -section](./networks/deploy-testnets). +section](./networks/deploy-testnets.md). ## Application Development The first step to building application on Tendermint is to [install -ABCI-CLI](./app-dev/getting-started) and play with the example applications. +ABCI-CLI](./app-dev/getting-started.md) and play with the example applications. diff --git a/docs/app-dev/abci-cli.md b/docs/app-dev/abci-cli.md index 4f9019fdae4..263c2c5ee73 100644 --- a/docs/app-dev/abci-cli.md +++ b/docs/app-dev/abci-cli.md @@ -36,7 +36,7 @@ Available Commands: console Start an interactive abci console for multiple commands counter ABCI demo example deliver_tx Deliver a new tx to the application - kvstore ABCI demo example + kvstore ABCI demo example echo Have the application echo a message help Help about any command info Get some info about the application @@ -140,7 +140,7 @@ response. The server may be generic for a particular language, and we provide a [reference implementation in Golang](https://github.com/tendermint/tendermint/tree/develop/abci/server). See the -[list of other ABCI implementations](./ecosystem.html) for servers in +[list of other ABCI implementations](./ecosystem.md) for servers in other languages. The handler is specific to the application, and may be arbitrary, so diff --git a/docs/app-dev/abci-spec.md b/docs/app-dev/abci-spec.md index 770740b86c8..6d0f712a2c0 100644 --- a/docs/app-dev/abci-spec.md +++ b/docs/app-dev/abci-spec.md @@ -1,5 +1,9 @@ # ABCI Specification +### XXX + +DEPRECATED: Moved [here](../spec/abci/abci.md) + ## Message Types ABCI requests/responses are defined as simple Protobuf messages in [this @@ -111,14 +115,21 @@ See below for more details on the message types and how they are used. - `Time (google.protobuf.Timestamp)`: Genesis time. - `ChainID (string)`: ID of the blockchain. - `ConsensusParams (ConsensusParams)`: Initial consensus-critical parameters. - - `Validators ([]Validator)`: Initial genesis validators. + - `Validators ([]ValidatorUpdate)`: Initial genesis validators. - `AppStateBytes ([]byte)`: Serialized initial application state. Amino-encoded JSON bytes. - **Response**: - `ConsensusParams (ConsensusParams)`: Initial consensus-critical parameters. - - `Validators ([]Validator)`: Initial validator set. + - `Validators ([]ValidatorUpdate)`: Initial validator set (if non empty). - **Usage**: - Called once upon genesis. + - If ResponseInitChain.Validators is empty, the initial validator set will be the RequestInitChain.Validators + - If ResponseInitChain.Validators is not empty, the initial validator set will be the + ResponseInitChain.Validators (regardless of what is in RequestInitChain.Validators). + - This allows the app to decide if it wants to accept the initial validator + set proposed by tendermint (ie. in the genesis file), or if it wants to use + a different one (perhaps computed based on some application specific + information in the genesis file). ### Query @@ -161,15 +172,17 @@ See below for more details on the message types and how they are used. - `Hash ([]byte)`: The block's hash. This can be derived from the block header. - `Header (struct{})`: The block header. - - `LastCommitInfo (LastCommitInfo)`: Info about the last commit. + - `LastCommitInfo (LastCommitInfo)`: Info about the last commit, including the + round, and the list of validators and which ones signed the last block. - `ByzantineValidators ([]Evidence)`: List of evidence of - validators that acted maliciously + validators that acted maliciously. - **Response**: - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing - **Usage**: - Signals the beginning of a new block. Called prior to any DeliverTxs. - - The header is expected to at least contain the Height. + - The header contains the height, timestamp, and more - it exactly matches the + Tendermint block header. We may seek to generalize this in the future. - The `LastCommitInfo` and `ByzantineValidators` can be used to determine rewards and punishments for the validators. NOTE validators here do not include pubkeys. @@ -237,7 +250,7 @@ See below for more details on the message types and how they are used. - **Request**: - `Height (int64)`: Height of the block just executed. - **Response**: - - `ValidatorUpdates ([]Validator)`: Changes to validator set (set + - `ValidatorUpdates ([]ValidatorUpdate)`: Changes to validator set (set voting power to 0 to remove). - `ConsensusParamUpdates (ConsensusParams)`: Changes to consensus-critical time, size, and other parameters. @@ -245,8 +258,11 @@ See below for more details on the message types and how they are used. - **Usage**: - Signals the end of a block. - Called prior to each Commit, after all transactions. - - Validator set and consensus params are updated with the result. - - Validator pubkeys are expected to be go-wire encoded. + - Validator updates returned for block H: + - apply to the NextValidatorsHash of block H+1 + - apply to the ValidatorsHash (and thus the validator set) for block H+2 + - apply to the RequestBeginBlock.LastCommitInfo (ie. the last validator set) for block H+3 + - Consensus params returned for block H apply for block H+1 ### Commit @@ -271,12 +287,17 @@ See below for more details on the message types and how they are used. - `NumTxs (int32)`: Number of transactions in the block - `TotalTxs (int64)`: Total number of transactions in the blockchain until now - - `LastBlockHash ([]byte)`: Hash of the previous (parent) block + - `LastBlockID (BlockID)`: Hash of the previous (parent) block + - `LastCommitHash ([]byte)`: Hash of the previous block's commit - `ValidatorsHash ([]byte)`: Hash of the validator set for this block + - `NextValidatorsHash ([]byte)`: Hash of the validator set for the next block + - `ConsensusHash ([]byte)`: Hash of the consensus parameters for this block - `AppHash ([]byte)`: Data returned by the last call to `Commit` - typically the Merkle root of the application state after executing the previous block's transactions - - `Proposer (Validator)`: Original proposer for the block + - `LastResultsHash ([]byte)`: Hash of the ABCI results returned by the last block + - `EvidenceHash ([]byte)`: Hash of the evidence included in this block + - `ProposerAddress ([]byte)`: Original proposer for the block - **Usage**: - Provided in RequestBeginBlock - Provides important context about the current state of the blockchain - @@ -288,16 +309,27 @@ See below for more details on the message types and how they are used. - **Fields**: - `Address ([]byte)`: Address of the validator (hash of the public key) + - `Power (int64)`: Voting power of the validator +- **Usage**: + - Validator identified by address + - Used in RequestBeginBlock as part of VoteInfo + - Does not include PubKey to avoid sending potentially large quantum pubkeys + over the ABCI + +### ValidatorUpdate + +- **Fields**: - `PubKey (PubKey)`: Public key of the validator - `Power (int64)`: Voting power of the validator - **Usage**: - - Provides all identifying information about the validator + - Validator identified by PubKey + - Used to tell Tendermint to update the validator set -### SigningValidator +### VoteInfo - **Fields**: - `Validator (Validator)`: A validator - - `SignedLastBlock (bool)`: Indicated whether or not the validator signed + - `SignedLastBlock (bool)`: Indicates whether or not the validator signed the last block - **Usage**: - Indicates whether a validator signed the last block, allowing for rewards @@ -330,6 +362,6 @@ See below for more details on the message types and how they are used. ### LastCommitInfo - **Fields**: - - `CommitRound (int32)`: Commit round. - - `Validators ([]SigningValidator)`: List of validators in the current - validator set and whether or not they signed a vote. + - `Round (int32)`: Commit round. + - `Votes ([]VoteInfo)`: List of validators addresses in the last validator set + with their voting power and whether or not they signed a vote. diff --git a/docs/app-dev/app-architecture.md b/docs/app-dev/app-architecture.md index 9ce0fae9f78..b141c0f3008 100644 --- a/docs/app-dev/app-architecture.md +++ b/docs/app-dev/app-architecture.md @@ -46,6 +46,5 @@ See the following for more extensive documentation: - [Interchain Standard for the Light-Client REST API](https://github.com/cosmos/cosmos-sdk/pull/1028) - [Tendermint RPC Docs](https://tendermint.github.io/slate/) -- [Tendermint in Production](https://github.com/tendermint/tendermint/pull/1618) -- [Tendermint Basics](https://tendermint.readthedocs.io/en/master/using-tendermint.html) -- [ABCI spec](https://github.com/tendermint/tendermint/blob/develop/abci/docs/abci-spec.md) +- [Tendermint in Production](../tendermint-core/running-in-production.md) +- [ABCI spec](./abci-spec.md) diff --git a/docs/app-dev/app-development.md b/docs/app-dev/app-development.md index d3f22362bf1..3aaebb230c6 100644 --- a/docs/app-dev/app-development.md +++ b/docs/app-dev/app-development.md @@ -1,5 +1,10 @@ # Application Development Guide +## XXX + +This page is undergoing deprecation. All content is being moved to the new [home +of the ABCI specification](../spec/abci/README.md). + ## ABCI Design The purpose of ABCI is to provide a clean interface between state @@ -502,7 +507,7 @@ In go: ``` func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) { - return types.ResponseInfo{Data: cmn.Fmt("{\"size\":%v}", app.state.Size())} + return types.ResponseInfo{Data: fmt.Sprintf("{\"size\":%v}", app.state.Size())} } ``` diff --git a/docs/app-dev/ecosystem.json b/docs/app-dev/ecosystem.json index 363f1890287..67aca2efb20 100644 --- a/docs/app-dev/ecosystem.json +++ b/docs/app-dev/ecosystem.json @@ -5,24 +5,21 @@ "url": "https://github.com/cosmos/cosmos-sdk", "language": "Go", "author": "Cosmos", - "description": - "A prototypical account based crypto currency state machine supporting plugins" + "description": "A prototypical account based crypto currency state machine supporting plugins" }, { "name": "cb-ledger", "url": "https://github.com/block-finance/cpp-abci", "language": "C++", "author": "Block Finance", - "description": - "Custodian Bank Ledger, integrating central banking with the blockchains of tomorrow" + "description": "Custodian Bank Ledger, integrating central banking with the blockchains of tomorrow" }, { "name": "Clearchain", "url": "https://github.com/tendermint/clearchain", "language": "Go", "author": "FXCLR", - "description": - "Application to manage a distributed ledger for money transfers that support multi-currency accounts" + "description": "Application to manage a distributed ledger for money transfers that support multi-currency accounts" }, { "name": "Ethermint", @@ -43,8 +40,7 @@ "url": "https://github.com/hyperledger/burrow", "language": "Go", "author": "Monax Industries", - "description": - "Ethereum Virtual Machine augmented with native permissioning scheme and global key-value store" + "description": "Ethereum Virtual Machine augmented with native permissioning scheme and global key-value store" }, { "name": "Merkle AVL Tree", @@ -72,8 +68,7 @@ "url": "https://github.com/trusch/passchain", "language": "Go", "author": "trusch", - "description": - "Tool to securely store and share passwords, tokens and other short secrets" + "description": "Tool to securely store and share passwords, tokens and other short secrets" }, { "name": "Passwerk", @@ -87,8 +82,7 @@ "url": "https://github.com/davebryson/py-tendermint", "language": "Python", "author": "Dave Bryson", - "description": - "A Python microframework for building blockchain applications with Tendermint" + "description": "A Python microframework for building blockchain applications with Tendermint" }, { "name": "Stratumn SDK", @@ -102,16 +96,14 @@ "url": "https://github.com/keppel/lotion", "language": "Javascript", "author": "Judd Keppel", - "description": - "A Javascript microframework for building blockchain applications with Tendermint" + "description": "A Javascript microframework for building blockchain applications with Tendermint" }, { "name": "Tendermint Blockchain Chat App", "url": "https://github.com/SaifRehman/tendermint-chat-app/", "language": "Javascript", "author": "Saif Rehman", - "description": - "This is a minimal chat application based on Tendermint using Lotion.js in 30 lines of code!. It also includes web/mobile application built using Ionic 3." + "description": "This is a minimal chat application based on Tendermint using Lotion.js in 30 lines of code!. It also includes web/mobile application built using Ionic 3." }, { "name": "BigchainDB", @@ -131,7 +123,7 @@ "abciServers": [ { "name": "abci", - "url": "https://github.com/tendermint/abci", + "url": "https://github.com/tendermint/tendermint/tree/master/abci", "language": "Go", "author": "Tendermint" }, @@ -184,16 +176,14 @@ "url": "https://github.com/tendermint/tools", "technology": "Docker and Kubernetes", "author": "Tendermint", - "description": - "Deploy a Tendermint test network using Google's kubernetes" + "description": "Deploy a Tendermint test network using Google's kubernetes" }, { "name": "terraforce", "url": "https://github.com/tendermint/tools", "technology": "Terraform", "author": "Tendermint", - "description": - "Terraform + our custom terraforce tool; deploy a production Tendermint network with load balancing over multiple AWS availability zones" + "description": "Terraform + our custom terraforce tool; deploy a production Tendermint network with load balancing over multiple AWS availability zones" }, { "name": "ansible-tendermint", diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md index 3bca109591f..3ba097c4c43 100644 --- a/docs/app-dev/indexing-transactions.md +++ b/docs/app-dev/indexing-transactions.md @@ -16,16 +16,21 @@ Let's take a look at the `[tx_index]` config section: # 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). indexer = "kv" -# Comma-separated list of tags to index (by default the only tag is tx hash) +# Comma-separated list of tags to index (by default the only tag is "tx.hash") +# +# You can also index transactions by height by adding "tx.height" tag here. # # It's recommended to index only a subset of tags due to possible memory # bloat. This is, of course, depends on the indexer's DB and the volume of # transactions. index_tags = "" -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). +# When set to true, tells indexer to index all tags (predefined tags: +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# +# Note this may be not desirable (see the comment above). IndexTags has a +# precedence over IndexAllTags (i.e. when given both, IndexTags will be +# indexed). index_all_tags = false ``` @@ -59,7 +64,6 @@ all tags, set `index_all_tags=true` Note, there are a few predefined tags: -- `tm.event` (event type) - `tx.hash` (transaction's hash) - `tx.height` (height of the block transaction was committed in) diff --git a/docs/app-dev/subscribing-to-events-via-websocket.md b/docs/app-dev/subscribing-to-events-via-websocket.md index 9e7c642a020..69ab59f5050 100644 --- a/docs/app-dev/subscribing-to-events-via-websocket.md +++ b/docs/app-dev/subscribing-to-events-via-websocket.md @@ -26,3 +26,39 @@ more information on query syntax and other options. You can also use tags, given you had included them into DeliverTx response, to query transaction results. See [Indexing transactions](./indexing-transactions.md) for details. + +### ValidatorSetUpdates + +When validator set changes, ValidatorSetUpdates event is published. The +event carries a list of pubkey/power pairs. The list is the same +Tendermint receives from ABCI application (see [EndBlock +section](https://tendermint.com/docs/app-dev/abci-spec.html#endblock) in +the ABCI spec). + +Response: + +``` +{ + "jsonrpc": "2.0", + "id": "0#event", + "result": { + "query": "tm.event='ValidatorSetUpdates'", + "data": { + "type": "tendermint/event/ValidatorSetUpdates", + "value": { + "validator_updates": [ + { + "address": "09EAD022FD25DE3A02E64B0FE9610B1417183EE4", + "pub_key": { + "type": "tendermint/PubKeyEd25519", + "value": "ww0z4WaZ0Xg+YI10w43wTWbBmM3dpVza4mmSQYsd0ck=" + }, + "voting_power": "10", + "accum": "0" + } + ] + } + } + } +} +``` diff --git a/docs/architecture/adr-002-event-subscription.md b/docs/architecture/adr-002-event-subscription.md index cc207c4af38..a73d584abc9 100644 --- a/docs/architecture/adr-002-event-subscription.md +++ b/docs/architecture/adr-002-event-subscription.md @@ -7,8 +7,7 @@ a subset of transactions** (rather than all of them) using `/subscribe?event=X`. example, I want to subscribe for all transactions associated with a particular account. Same for fetching. The user may want to **fetch transactions based on some filter** (rather than fetching all the blocks). For example, I want to get -all transactions for a particular account in the last two weeks (`tx's block -time >= '2017-06-05'`). +all transactions for a particular account in the last two weeks (`tx's block time >= '2017-06-05'`). Now you can't even subscribe to "all txs" in Tendermint. diff --git a/docs/architecture/adr-004-historical-validators.md b/docs/architecture/adr-004-historical-validators.md index be0de22c150..c98af702696 100644 --- a/docs/architecture/adr-004-historical-validators.md +++ b/docs/architecture/adr-004-historical-validators.md @@ -3,11 +3,11 @@ ## Context Right now, we can query the present validator set, but there is no history. -If you were offline for a long time, there is no way to reconstruct past validators. This is needed for the light client and we agreed needs enhancement of the API. +If you were offline for a long time, there is no way to reconstruct past validators. This is needed for the light client and we agreed needs enhancement of the API. ## Decision -For every block, store a new structure that contains either the latest validator set, +For every block, store a new structure that contains either the latest validator set, or the height of the last block for which the validator set changed. Note this is not the height of the block which returned the validator set change itself, but the next block, ie. the first block it comes into effect for. @@ -19,7 +19,7 @@ are updated frequently - for instance by only saving the diffs, rather than the An alternative approach suggested keeping the validator set, or diffs of it, in a merkle IAVL tree. While it might afford cheaper proofs that a validator set has not changed, it would be more complex, -and likely less efficient. +and likely less efficient. ## Status diff --git a/docs/architecture/adr-005-consensus-params.md b/docs/architecture/adr-005-consensus-params.md index 6656d35b2ab..ad132c9b983 100644 --- a/docs/architecture/adr-005-consensus-params.md +++ b/docs/architecture/adr-005-consensus-params.md @@ -7,7 +7,7 @@ Since they may be need to be different in different networks, and potentially to networks, we seek to initialize them in a genesis file, and expose them through the ABCI. While we have some specific parameters now, like maximum block and transaction size, we expect to have more in the future, -such as a period over which evidence is valid, or the frequency of checkpoints. +such as a period over which evidence is valid, or the frequency of checkpoints. ## Decision @@ -45,7 +45,7 @@ type BlockGossip struct { The `ConsensusParams` can evolve over time by adding new structs that cover different aspects of the consensus rules. -The `BlockPartSizeBytes` and the `BlockSize.MaxBytes` are enforced to be greater than 0. +The `BlockPartSizeBytes` and the `BlockSize.MaxBytes` are enforced to be greater than 0. The former because we need a part size, the latter so that we always have at least some sanity check over the size of blocks. ### ABCI @@ -53,14 +53,14 @@ The former because we need a part size, the latter so that we always have at lea #### InitChain InitChain currently takes the initial validator set. It should be extended to also take parts of the ConsensusParams. -There is some case to be made for it to take the entire Genesis, except there may be things in the genesis, +There is some case to be made for it to take the entire Genesis, except there may be things in the genesis, like the BlockPartSize, that the app shouldn't really know about. #### EndBlock The EndBlock response includes a `ConsensusParams`, which includes BlockSize and TxSize, but not BlockGossip. Other param struct can be added to `ConsensusParams` in the future. -The `0` value is used to denote no change. +The `0` value is used to denote no change. Any other value will update that parameter in the `State.ConsensusParams`, to be applied for the next block. Tendermint should have hard-coded upper limits as sanity checks. @@ -83,4 +83,3 @@ Proposed. ### Neutral - The TxSize, which checks validity, may be in conflict with the config's `max_block_size_tx`, which determines proposal sizes - diff --git a/docs/architecture/adr-006-trust-metric.md b/docs/architecture/adr-006-trust-metric.md index ec8a0cce76f..6fa77a609b2 100644 --- a/docs/architecture/adr-006-trust-metric.md +++ b/docs/architecture/adr-006-trust-metric.md @@ -8,13 +8,13 @@ The proposed trust metric will allow Tendermint to maintain local trust rankings The Tendermint Core project developers would like to improve Tendermint security and reliability by keeping track of the level of trustworthiness peers have demonstrated within the peer-to-peer network. This way, undesirable outcomes from peers will not immediately result in them being dropped from the network (potentially causing drastic changes to take place). Instead, peers behavior can be monitored with appropriate metrics and be removed from the network once Tendermint Core is certain the peer is a threat. For example, when the PEXReactor makes a request for peers network addresses from a already known peer, and the returned network addresses are unreachable, this untrustworthy behavior should be tracked. Returning a few bad network addresses probably shouldn’t cause a peer to be dropped, while excessive amounts of this behavior does qualify the peer being dropped. -Trust metrics can be circumvented by malicious nodes through the use of strategic oscillation techniques, which adapts the malicious node’s behavior pattern in order to maximize its goals. For instance, if the malicious node learns that the time interval of the Tendermint trust metric is *X* hours, then it could wait *X* hours in-between malicious activities. We could try to combat this issue by increasing the interval length, yet this will make the system less adaptive to recent events. +Trust metrics can be circumvented by malicious nodes through the use of strategic oscillation techniques, which adapts the malicious node’s behavior pattern in order to maximize its goals. For instance, if the malicious node learns that the time interval of the Tendermint trust metric is _X_ hours, then it could wait _X_ hours in-between malicious activities. We could try to combat this issue by increasing the interval length, yet this will make the system less adaptive to recent events. Instead, having shorter intervals, but keeping a history of interval values, will give our metric the flexibility needed in order to keep the network stable, while also making it resilient against a strategic malicious node in the Tendermint peer-to-peer network. Also, the metric can access trust data over a rather long period of time while not greatly increasing its history size by aggregating older history values over a larger number of intervals, and at the same time, maintain great precision for the recent intervals. This approach is referred to as fading memories, and closely resembles the way human beings remember their experiences. The trade-off to using history data is that the interval values should be preserved in-between executions of the node. ### References -S. Mudhakar, L. Xiong, and L. Liu, “TrustGuard: Countering Vulnerabilities in Reputation Management for Decentralized Overlay Networks,” in *Proceedings of the 14th international conference on World Wide Web, pp. 422-431*, May 2005. +S. Mudhakar, L. Xiong, and L. Liu, “TrustGuard: Countering Vulnerabilities in Reputation Management for Decentralized Overlay Networks,” in _Proceedings of the 14th international conference on World Wide Web, pp. 422-431_, May 2005. ## Decision @@ -26,25 +26,23 @@ The three subsections below will cover the process being considered for calculat The proposed trust metric will count good and bad events relevant to the object, and calculate the percent of counters that are good over an interval with a predefined duration. This is the procedure that will continue for the life of the trust metric. When the trust metric is queried for the current **trust value**, a resilient equation will be utilized to perform the calculation. -The equation being proposed resembles a Proportional-Integral-Derivative (PID) controller used in control systems. The proportional component allows us to be sensitive to the value of the most recent interval, while the integral component allows us to incorporate trust values stored in the history data, and the derivative component allows us to give weight to sudden changes in the behavior of a peer. We compute the trust value of a peer in interval i based on its current trust ranking, its trust rating history prior to interval *i* (over the past *maxH* number of intervals) and its trust ranking fluctuation. We will break up the equation into the three components. +The equation being proposed resembles a Proportional-Integral-Derivative (PID) controller used in control systems. The proportional component allows us to be sensitive to the value of the most recent interval, while the integral component allows us to incorporate trust values stored in the history data, and the derivative component allows us to give weight to sudden changes in the behavior of a peer. We compute the trust value of a peer in interval i based on its current trust ranking, its trust rating history prior to interval _i_ (over the past _maxH_ number of intervals) and its trust ranking fluctuation. We will break up the equation into the three components. ```math (1) Proportional Value = a * R[i] ``` -where *R*[*i*] denotes the raw trust value at time interval *i* (where *i* == 0 being current time) and *a* is the weight applied to the contribution of the current reports. The next component of our equation uses a weighted sum over the last *maxH* intervals to calculate the history value for time *i*: - +where _R_[*i*] denotes the raw trust value at time interval _i_ (where _i_ == 0 being current time) and _a_ is the weight applied to the contribution of the current reports. The next component of our equation uses a weighted sum over the last _maxH_ intervals to calculate the history value for time _i_: -`H[i] = ` ![formula1](img/formula1.png "Weighted Sum Formula") +`H[i] =` ![formula1](img/formula1.png "Weighted Sum Formula") - -The weights can be chosen either optimistically or pessimistically. An optimistic weight creates larger weights for newer history data values, while the the pessimistic weight creates larger weights for time intervals with lower scores. The default weights used during the calculation of the history value are optimistic and calculated as *Wk* = 0.8^*k*, for time interval *k*. With the history value available, we can now finish calculating the integral value: +The weights can be chosen either optimistically or pessimistically. An optimistic weight creates larger weights for newer history data values, while the the pessimistic weight creates larger weights for time intervals with lower scores. The default weights used during the calculation of the history value are optimistic and calculated as _Wk_ = 0.8^_k_, for time interval _k_. With the history value available, we can now finish calculating the integral value: ```math (2) Integral Value = b * H[i] ``` -Where *H*[*i*] denotes the history value at time interval *i* and *b* is the weight applied to the contribution of past performance for the object being measured. The derivative component will be calculated as follows: +Where _H_[*i*] denotes the history value at time interval _i_ and _b_ is the weight applied to the contribution of past performance for the object being measured. The derivative component will be calculated as follows: ```math D[i] = R[i] – H[i] @@ -52,25 +50,25 @@ D[i] = R[i] – H[i] (3) Derivative Value = c(D[i]) * D[i] ``` -Where the value of *c* is selected based on the *D*[*i*] value relative to zero. The default selection process makes *c* equal to 0 unless *D*[*i*] is a negative value, in which case c is equal to 1. The result is that the maximum penalty is applied when current behavior is lower than previously experienced behavior. If the current behavior is better than the previously experienced behavior, then the Derivative Value has no impact on the trust value. With the three components brought together, our trust value equation is calculated as follows: +Where the value of _c_ is selected based on the _D_[*i*] value relative to zero. The default selection process makes _c_ equal to 0 unless _D_[*i*] is a negative value, in which case c is equal to 1. The result is that the maximum penalty is applied when current behavior is lower than previously experienced behavior. If the current behavior is better than the previously experienced behavior, then the Derivative Value has no impact on the trust value. With the three components brought together, our trust value equation is calculated as follows: ```math TrustValue[i] = a * R[i] + b * H[i] + c(D[i]) * D[i] ``` -As a performance optimization that will keep the amount of raw interval data being saved to a reasonable size of *m*, while allowing us to represent 2^*m* - 1 history intervals, we can employ the fading memories technique that will trade space and time complexity for the precision of the history data values by summarizing larger quantities of less recent values. While our equation above attempts to access up to *maxH* (which can be 2^*m* - 1), we will map those requests down to *m* values using equation 4 below: +As a performance optimization that will keep the amount of raw interval data being saved to a reasonable size of _m_, while allowing us to represent 2^_m_ - 1 history intervals, we can employ the fading memories technique that will trade space and time complexity for the precision of the history data values by summarizing larger quantities of less recent values. While our equation above attempts to access up to _maxH_ (which can be 2^_m_ - 1), we will map those requests down to _m_ values using equation 4 below: ```math (4) j = index, where index > 0 ``` -Where *j* is one of *(0, 1, 2, … , m – 1)* indices used to access history interval data. Now we can access the raw intervals using the following calculations: +Where _j_ is one of _(0, 1, 2, … , m – 1)_ indices used to access history interval data. Now we can access the raw intervals using the following calculations: ```math R[0] = raw data for current time interval ``` -`R[j] = ` ![formula2](img/formula2.png "Fading Memories Formula") +`R[j] =` ![formula2](img/formula2.png "Fading Memories Formula") ### Trust Metric Store @@ -84,9 +82,7 @@ When the node is shutting down, the trust metric store will save history data fo Each trust metric allows for the recording of positive/negative events, querying the current trust value/score, and the stopping/pausing of tracking over time intervals. This can be seen below: - ```go - // TrustMetric - keeps track of peer reliability type TrustMetric struct { // Private elements. @@ -123,13 +119,11 @@ tm.BadEvents(1) score := tm.TrustScore() tm.Stop() - ``` Some of the trust metric parameters can be configured. The weight values should probably be left alone in more cases, yet the time durations for the tracking window and individual time interval should be considered. ```go - // TrustMetricConfig - Configures the weight functions and time intervals for the metric type TrustMetricConfig struct { // Determines the percentage given to current behavior @@ -165,23 +159,21 @@ config := TrustMetricConfig{ tm := NewMetricWithConfig(config) tm.BadEvents(10) -tm.Pause() +tm.Pause() tm.GoodEvents(1) // becomes active again - ``` -A trust metric store should be created with a DB that has persistent storage so it can save history data across node executions. All trust metrics instantiated by the store will be created with the provided TrustMetricConfig configuration. +A trust metric store should be created with a DB that has persistent storage so it can save history data across node executions. All trust metrics instantiated by the store will be created with the provided TrustMetricConfig configuration. When you attempt to fetch the trust metric for a peer, and an entry does not exist in the trust metric store, a new metric is automatically created and the entry made within the store. In additional to the fetching method, GetPeerTrustMetric, the trust metric store provides a method to call when a peer has disconnected from the node. This is so the metric can be paused (history data will not be saved) for periods of time when the node is not having direct experiences with the peer. ```go - // TrustMetricStore - Manages all trust metrics for peers type TrustMetricStore struct { cmn.BaseService - + // Private elements } @@ -214,7 +206,6 @@ tm := tms.GetPeerTrustMetric(key) tm.BadEvents(1) tms.PeerDisconnected(key) - ``` ## Status diff --git a/docs/architecture/adr-007-trust-metric-usage.md b/docs/architecture/adr-007-trust-metric-usage.md index 4d833a69ff8..de3a088cb04 100644 --- a/docs/architecture/adr-007-trust-metric-usage.md +++ b/docs/architecture/adr-007-trust-metric-usage.md @@ -17,11 +17,13 @@ For example, when the PEXReactor makes a request for peers network addresses fro The trust metric implementation allows a developer to obtain a peer's trust metric from a trust metric store, and track good and bad events relevant to a peer's behavior, and at any time, the peer's metric can be queried for a current trust value. The current trust value is calculated with a formula that utilizes current behavior, previous behavior, and change between the two. Current behavior is calculated as the percentage of good behavior within a time interval. The time interval is short; probably set between 30 seconds and 5 minutes. On the other hand, the historic data can estimate a peer's behavior over days worth of tracking. At the end of a time interval, the current behavior becomes part of the historic data, and a new time interval begins with the good and bad counters reset to zero. These are some important things to keep in mind regarding how the trust metrics handle time intervals and scoring: + - Each new time interval begins with a perfect score - Bad events quickly bring the score down and good events cause the score to slowly rise - When the time interval is over, the percentage of good events becomes historic data. Some useful information about the inner workings of the trust metric: + - When a trust metric is first instantiated, a timer (ticker) periodically fires in order to handle transitions between trust metric time intervals - If a peer is disconnected from a node, the timer should be paused, since the node is no longer connected to that peer - The ability to pause the metric is supported with the store **PeerDisconnected** method and the metric **Pause** method @@ -76,6 +78,7 @@ Peer quality is tracked in the connection and across the reactors by storing the thread safe Data store. Peer behaviour is then defined as one of the following: + - Fatal - something outright malicious that causes us to disconnect the peer and ban it from the address book for some amount of time - Bad - Any kind of timeout, messages that don't unmarshal, fail other validity checks, or messages we didn't ask for or aren't expecting (usually worth one bad event) - Neutral - Unknown channels/message types/version upgrades (no good or bad events recorded) diff --git a/docs/architecture/adr-008-priv-validator.md b/docs/architecture/adr-008-priv-validator.md index 4c1d87bed6d..94e882af418 100644 --- a/docs/architecture/adr-008-priv-validator.md +++ b/docs/architecture/adr-008-priv-validator.md @@ -11,18 +11,18 @@ implementations: The SocketPV address can be provided via flags at the command line - doing so will cause Tendermint to ignore any "priv_validator.json" file and to listen on the given address for incoming connections from an external priv_validator -process. It will halt any operation until at least one external process +process. It will halt any operation until at least one external process succesfully connected. The external priv_validator process will dial the address to connect to Tendermint, and then Tendermint will send requests on the ensuing connection to -sign votes and proposals. Thus the external process initiates the connection, -but the Tendermint process makes all requests. In a later stage we're going to +sign votes and proposals. Thus the external process initiates the connection, +but the Tendermint process makes all requests. In a later stage we're going to support multiple validators for fault tolerance. To prevent double signing they need to be synced, which is deferred to an external solution (see #1185). In addition, Tendermint will provide implementations that can be run in that -external process. These include: +external process. These include: - FilePV will encrypt the private key, and the user must enter password to decrypt key when process is started. diff --git a/docs/architecture/adr-009-ABCI-design.md b/docs/architecture/adr-009-ABCI-design.md index 8b85679b883..fab28853b80 100644 --- a/docs/architecture/adr-009-ABCI-design.md +++ b/docs/architecture/adr-009-ABCI-design.md @@ -8,7 +8,7 @@ ## Context -The ABCI was first introduced in late 2015. It's purpose is to be: +The ABCI was first introduced in late 2015. It's purpose is to be: - a generic interface between state machines and their replication engines - agnostic to the language the state machine is written in @@ -66,8 +66,8 @@ possible. ### Validators To change the validator set, applications can return a list of validator updates -with ResponseEndBlock. In these updates, the public key *must* be included, -because Tendermint requires the public key to verify validator signatures. This +with ResponseEndBlock. In these updates, the public key _must_ be included, +because Tendermint requires the public key to verify validator signatures. This means ABCI developers have to work with PubKeys. That said, it would also be convenient to work with address information, and for it to be simple to do so. @@ -80,7 +80,7 @@ in commits. ### InitChain -Tendermint passes in a list of validators here, and nothing else. It would +Tendermint passes in a list of validators here, and nothing else. It would benefit the application to be able to control the initial validator set. For instance the genesis file could include application-based information about the initial validator set that the application could process to determine the @@ -120,7 +120,6 @@ v1 will: That said, an Amino v2 will be worked on to improve the performance of the format and its useability in cryptographic applications. - ### PubKey Encoding schemes infect software. As a generic middleware, ABCI aims to have @@ -143,7 +142,6 @@ where `type` can be: - "ed225519", with `data = ` - "secp256k1", with `data = <33-byte OpenSSL compressed pubkey>` - As we want to retain flexibility here, and since ideally, PubKey would be an interface type, we do not use `enum` or `oneof`. diff --git a/docs/architecture/adr-010-crypto-changes.md b/docs/architecture/adr-010-crypto-changes.md index cfe6184210d..0bc07d69cae 100644 --- a/docs/architecture/adr-010-crypto-changes.md +++ b/docs/architecture/adr-010-crypto-changes.md @@ -66,13 +66,10 @@ Make the following changes: - More modern and standard cryptographic functions with wider adoption and hardware acceleration - ### Negative - Exact authenticated encryption construction isn't already provided in a well-used library - ### Neutral ## References - diff --git a/docs/architecture/adr-011-monitoring.md b/docs/architecture/adr-011-monitoring.md index ca16a9a1c05..8f2d009dd62 100644 --- a/docs/architecture/adr-011-monitoring.md +++ b/docs/architecture/adr-011-monitoring.md @@ -15,11 +15,11 @@ https://github.com/tendermint/tendermint/issues/986. A few solutions were considered: 1. [Prometheus](https://prometheus.io) - a) Prometheus API - b) [go-kit metrics package](https://github.com/go-kit/kit/tree/master/metrics) as an interface plus Prometheus - c) [telegraf](https://github.com/influxdata/telegraf) - d) new service, which will listen to events emitted by pubsub and report metrics -5. [OpenCensus](https://opencensus.io/go/index.html) + a) Prometheus API + b) [go-kit metrics package](https://github.com/go-kit/kit/tree/master/metrics) as an interface plus Prometheus + c) [telegraf](https://github.com/influxdata/telegraf) + d) new service, which will listen to events emitted by pubsub and report metrics +2. [OpenCensus](https://opencensus.io/go/index.html) ### 1. Prometheus @@ -70,30 +70,30 @@ will need to write interfaces ourselves. ### List of metrics -| | Name | Type | Description | -| - | --------------------------------------- | ------- | ----------------------------------------------------------------------------- | -| A | consensus_height | Gauge | | -| A | consensus_validators | Gauge | Number of validators who signed | -| A | consensus_validators_power | Gauge | Total voting power of all validators | -| A | consensus_missing_validators | Gauge | Number of validators who did not sign | -| A | consensus_missing_validators_power | Gauge | Total voting power of the missing validators | -| A | consensus_byzantine_validators | Gauge | Number of validators who tried to double sign | -| A | consensus_byzantine_validators_power | Gauge | Total voting power of the byzantine validators | -| A | consensus_block_interval | Timing | Time between this and last block (Block.Header.Time) | -| | consensus_block_time | Timing | Time to create a block (from creating a proposal to commit) | -| | consensus_time_between_blocks | Timing | Time between committing last block and (receiving proposal creating proposal) | -| A | consensus_rounds | Gauge | Number of rounds | -| | consensus_prevotes | Gauge | | -| | consensus_precommits | Gauge | | -| | consensus_prevotes_total_power | Gauge | | -| | consensus_precommits_total_power | Gauge | | -| A | consensus_num_txs | Gauge | | -| A | mempool_size | Gauge | | -| A | consensus_total_txs | Gauge | | -| A | consensus_block_size | Gauge | In bytes | -| A | p2p_peers | Gauge | Number of peers node's connected to | - -`A` - will be implemented in the fist place. +| | Name | Type | Description | +| --- | ------------------------------------ | ------ | ----------------------------------------------------------------------------- | +| A | consensus_height | Gauge | | +| A | consensus_validators | Gauge | Number of validators who signed | +| A | consensus_validators_power | Gauge | Total voting power of all validators | +| A | consensus_missing_validators | Gauge | Number of validators who did not sign | +| A | consensus_missing_validators_power | Gauge | Total voting power of the missing validators | +| A | consensus_byzantine_validators | Gauge | Number of validators who tried to double sign | +| A | consensus_byzantine_validators_power | Gauge | Total voting power of the byzantine validators | +| A | consensus_block_interval | Timing | Time between this and last block (Block.Header.Time) | +| | consensus_block_time | Timing | Time to create a block (from creating a proposal to commit) | +| | consensus_time_between_blocks | Timing | Time between committing last block and (receiving proposal creating proposal) | +| A | consensus_rounds | Gauge | Number of rounds | +| | consensus_prevotes | Gauge | | +| | consensus_precommits | Gauge | | +| | consensus_prevotes_total_power | Gauge | | +| | consensus_precommits_total_power | Gauge | | +| A | consensus_num_txs | Gauge | | +| A | mempool_size | Gauge | | +| A | consensus_total_txs | Gauge | | +| A | consensus_block_size | Gauge | In bytes | +| A | p2p_peers | Gauge | Number of peers node's connected to | + +`A` - will be implemented in the fist place. **Proposed solution** diff --git a/docs/architecture/adr-012-ABCI-propose-tx.md b/docs/architecture/adr-012-ABCI-propose-tx.md new file mode 100644 index 00000000000..497ccd18407 --- /dev/null +++ b/docs/architecture/adr-012-ABCI-propose-tx.md @@ -0,0 +1,183 @@ +# ADR 012: ABCI `ProposeTx` Method + +## Changelog + +25-06-2018: Initial draft based on [#1776](https://github.com/tendermint/tendermint/issues/1776) + +## Context + +[#1776](https://github.com/tendermint/tendermint/issues/1776) was +opened in relation to implementation of a Plasma child chain using Tendermint +Core as consensus/replication engine. + +Due to the requirements of [Minimal Viable Plasma (MVP)](https://ethresear.ch/t/minimal-viable-plasma/426) and [Plasma Cash](https://ethresear.ch/t/plasma-cash-plasma-with-much-less-per-user-data-checking/1298), it is necessary for ABCI apps to have a mechanism to handle the following cases (more may emerge in the near future): + +1. `deposit` transactions on the Root Chain, which must consist of a block + with a single transaction, where there are no inputs and only one output + made in favour of the depositor. In this case, a `block` consists of + a transaction with the following shape: + + ``` + [0, 0, 0, 0, #input1 - zeroed out + 0, 0, 0, 0, #input2 - zeroed out + , , #output1 - in favour of depositor + 0, 0, #output2 - zeroed out + , + ] + ``` + + `exit` transactions may also be treated in a similar manner, wherein the + input is the UTXO being exited on the Root Chain, and the output belongs to + a reserved "burn" address, e.g., `0x0`. In such cases, it is favourable for + the containing block to only hold a single transaction that may receive + special treatment. + +2. Other "internal" transactions on the child chain, which may be initiated + unilaterally. The most basic example of is a coinbase transaction + implementing validator node incentives, but may also be app-specific. In + these cases, it may be favourable for such transactions to + be ordered in a specific manner, e.g., coinbase transactions will always be + at index 0. In general, such strategies increase the determinism and + predictability of blockchain applications. + +While it is possible to deal with the cases enumerated above using the +existing ABCI, currently available result in suboptimal workarounds. Two are +explained in greater detail below. + +### Solution 1: App state-based Plasma chain + +In this work around, the app maintains a `PlasmaStore` with a corresponding +`Keeper`. The PlasmaStore is responsible for maintaing a second, separate +blockchain that complies with the MVP specification, including `deposit` +blocks and other "internal" transactions. These "virtual" blocks are then broadcasted +to the Root Chain. + +This naive approach is, however, fundamentally flawed, as it by definition +diverges from the canonical chain maintained by Tendermint. This is further +exacerbated if the business logic for generating such transactions is +potentially non-deterministic, as this should not even be done in +`Begin/EndBlock`, which may, as a result, break consensus guarantees. + +Additinoally, this has serious implications for "watchers" - independent third parties, +or even an auxilliary blockchain, responsible for ensuring that blocks recorded +on the Root Chain are consistent with the Plasma chain's. Since, in this case, +the Plasma chain is inconsistent with the canonical one maintained by Tendermint +Core, it seems that there exists no compact means of verifying the legitimacy of +the Plasma chain without replaying every state transition from genesis (!). + +### Solution 2: Broadcast to Tendermint Core from ABCI app + +This approach is inspired by `tendermint`, in which Ethereum transactions are +relayed to Tendermint Core. It requires the app to maintain a client connection +to the consensus engine. + +Whenever an "internal" transaction needs to be created, the proposer of the +current block broadcasts the transaction or transactions to Tendermint as +needed in order to ensure that the Tendermint chain and Plasma chain are +completely consistent. + +This allows "internal" transactions to pass through the full consensus +process, and can be validated in methods like `CheckTx`, i.e., signed by the +proposer, is the semantically correct, etc. Note that this involves informing +the ABCI app of the block proposer, which was temporarily hacked in as a means +of conducting this experiment, although this should not be necessary when the +current proposer is passed to `BeginBlock`. + +It is much easier to relay these transactions directly to the Root +Chain smart contract and/or maintain a "compressed" auxiliary chain comprised +of Plasma-friendly blocks that 100% reflect the canonical (Tendermint) +blockchain. Unfortunately, this approach not idiomatic (i.e., utilises the +Tendermint consensus engine in unintended ways). Additionally, it does not +allow the application developer to: + +- Control the _ordering_ of transactions in the proposed block (e.g., index 0, + or 0 to `n` for coinbase transactions) +- Control the _number_ of transactions in the block (e.g., when a `deposit` + block is required) + +Since determinism is of utmost importance in blockchain engineering, this approach, +while more viable, should also not be considered as fit for production. + +## Decision + +### `ProposeTx` + +In order to address the difficulties described above, the ABCI interface must +expose an additional method, tentatively named `ProposeTx`. + +It should have the following signature: + +``` +ProposeTx(RequestProposeTx) ResponseProposeTx +``` + +Where `RequestProposeTx` and `ResponseProposeTx` are `message`s with the +following shapes: + +``` +message RequestProposeTx { + int64 next_block_height = 1; // height of the block the proposed tx would be part of + Validator proposer = 2; // the proposer details +} + +message ResponseProposeTx { + int64 num_tx = 1; // the number of tx to include in proposed block + repeated bytes txs = 2; // ordered transaction data to include in block + bool exclusive = 3; // whether the block should include other transactions (from `mempool`) +} +``` + +`ProposeTx` would be called by before `mempool.Reap` at this +[line](https://github.com/tendermint/tendermint/blob/master/consensus/state.go#L906). +Depending on whether `exclusive` is `true` or `false`, the proposed +transactions are then pushed on top of the transactions received from +`mempool.Reap`. + +### `DeliverTx` + +Since the list of `tx` received from `ProposeTx` are _not_ passed through `CheckTx`, +it is probably a good idea to provide a means of differentiatiating "internal" transactions +from user-generated ones, in case the app developer needs/wants to take extra measures to +ensure validity of the proposed transactions. + +Therefore, the `RequestDeliverTx` message should be changed to provide an additional flag, like so: + +``` +message RequestDeliverTx { + bytes tx = 1; + bool internal = 2; +} +``` + +Alternatively, an additional method `DeliverProposeTx` may be added as an accompanient to +`ProposeTx`. However, it is not clear at this stage if this additional overhead is necessary +to preserve consensus guarantees given that a simple flag may suffice for now. + +## Status + +Pending + +## Consequences + +### Positive + +- Tendermint ABCI apps will be able to function as minimally viable Plasma chains. +- It will thereby become possible to add an extension to `cosmos-sdk` to enable + ABCI apps to support both IBC and Plasma, maximising interop. +- ABCI apps will have great control and flexibility in managing blockchain state, + without having to resort to non-deterministic hacks and/or unsafe workarounds + +### Negative + +- Maintenance overhead of exposing additional ABCI method +- Potential security issues that may have been overlooked and must now be tested extensively + +### Neutral + +- ABCI developers must deal with increased (albeit nominal) API surface area. + +## References + +- [#1776 Plasma and "Internal" Transactions in ABCI Apps](https://github.com/tendermint/tendermint/issues/1776) +- [Minimal Viable Plasma](https://ethresear.ch/t/minimal-viable-plasma/426) +- [Plasma Cash: Plasma with much less per-user data checking](https://ethresear.ch/t/plasma-cash-plasma-with-much-less-per-user-data-checking/1298) diff --git a/docs/architecture/adr-012-peer-transport.md b/docs/architecture/adr-012-peer-transport.md index 01a79c8a9bd..1cf4fb80b81 100644 --- a/docs/architecture/adr-012-peer-transport.md +++ b/docs/architecture/adr-012-peer-transport.md @@ -9,8 +9,9 @@ handling. An artifact is the dependency of the Switch on `[config.P2PConfig`](https://github.com/tendermint/tendermint/blob/05a76fb517f50da27b4bfcdc7b4cf185fc61eff6/config/config.go#L272-L339). Addresses: -* [#2046](https://github.com/tendermint/tendermint/issues/2046) -* [#2047](https://github.com/tendermint/tendermint/issues/2047) + +- [#2046](https://github.com/tendermint/tendermint/issues/2046) +- [#2047](https://github.com/tendermint/tendermint/issues/2047) First iteraton in [#2067](https://github.com/tendermint/tendermint/issues/2067) @@ -29,15 +30,14 @@ transport implementation is responsible to filter establishing peers specific to its domain, for the default multiplexed implementation the following will apply: -* connections from our own node -* handshake fails -* upgrade to secret connection fails -* prevent duplicate ip -* prevent duplicate id -* nodeinfo incompatibility - +- connections from our own node +- handshake fails +- upgrade to secret connection fails +- prevent duplicate ip +- prevent duplicate id +- nodeinfo incompatibility -``` go +```go // PeerTransport proxies incoming and outgoing peer connections. type PeerTransport interface { // Accept returns a newly connected Peer. @@ -75,7 +75,7 @@ func NewMTransport( nodeAddr NetAddress, nodeInfo NodeInfo, nodeKey NodeKey, -) *multiplexTransport +) *multiplexTransport ``` ### Switch @@ -84,7 +84,7 @@ From now the Switch will depend on a fully setup `PeerTransport` to retrieve/reach out to its peers. As the more low-level concerns are pushed to the transport, we can omit passing the `config.P2PConfig` to the Switch. -``` go +```go func NewSwitch(transport PeerTransport, opts ...SwitchOption) *Switch ``` @@ -96,17 +96,17 @@ In Review. ### Positive -* free Switch from transport concerns - simpler implementation -* pluggable transport implementation - simpler test setup -* remove Switch dependency on P2PConfig - easier to test +- free Switch from transport concerns - simpler implementation +- pluggable transport implementation - simpler test setup +- remove Switch dependency on P2PConfig - easier to test ### Negative -* more setup for tests which depend on Switches +- more setup for tests which depend on Switches ### Neutral -* multiplexed will be the default implementation +- multiplexed will be the default implementation [0] These guards could be potentially extended to be pluggable much like middlewares to express different concerns required by differentally configured diff --git a/docs/architecture/adr-013-symmetric-crypto.md b/docs/architecture/adr-013-symmetric-crypto.md index 00442ab0d27..69bfc2f2901 100644 --- a/docs/architecture/adr-013-symmetric-crypto.md +++ b/docs/architecture/adr-013-symmetric-crypto.md @@ -14,22 +14,23 @@ to easily swap these out. ### How do we encrypt with AEAD's -AEAD's typically require a nonce in addition to the key. +AEAD's typically require a nonce in addition to the key. For the purposes we require symmetric cryptography for, we need encryption to be stateless. -Because of this we use random nonces. +Because of this we use random nonces. (Thus the AEAD must support random nonces) -We currently construct a random nonce, and encrypt the data with it. +We currently construct a random nonce, and encrypt the data with it. The returned value is `nonce || encrypted data`. The limitation of this is that does not provide a way to identify which algorithm was used in encryption. -Consequently decryption with multiple algoritms is sub-optimal. +Consequently decryption with multiple algoritms is sub-optimal. (You have to try them all) ## Decision -We should create the following two methods in a new `crypto/encoding/symmetric` package: +We should create the following two methods in a new `crypto/encoding/symmetric` package: + ```golang func Encrypt(aead cipher.AEAD, plaintext []byte) (ciphertext []byte, err error) func Decrypt(key []byte, ciphertext []byte) (plaintext []byte, err error) @@ -37,18 +38,19 @@ func Register(aead cipher.AEAD, algo_name string, NewAead func(key []byte) (ciph ``` This allows you to specify the algorithm in encryption, but not have to specify -it in decryption. +it in decryption. This is intended for ease of use in downstream applications, in addition to people looking at the file directly. One downside is that for the encrypt function you must have already initialized an AEAD, -but I don't really see this as an issue. +but I don't really see this as an issue. -If there is no error in encryption, Encrypt will return `algo_name || nonce || aead_ciphertext`. +If there is no error in encryption, Encrypt will return `algo_name || nonce || aead_ciphertext`. `algo_name` should be length prefixed, using standard varuint encoding. This will be binary data, but thats not a problem considering the nonce and ciphertext are also binary. -This solution requires a mapping from aead type to name. -We can achieve this via reflection. +This solution requires a mapping from aead type to name. +We can achieve this via reflection. + ```golang func getType(myvar interface{}) string { if t := reflect.TypeOf(myvar); t.Kind() == reflect.Ptr { @@ -58,7 +60,8 @@ func getType(myvar interface{}) string { } } ``` -Then we maintain a map from the name returned from `getType(aead)` to `algo_name`. + +Then we maintain a map from the name returned from `getType(aead)` to `algo_name`. In decryption, we read the `algo_name`, and then instantiate a new AEAD with the key. Then we call the AEAD's decrypt method on the provided nonce/ciphertext. @@ -81,13 +84,16 @@ Proposed. ## Consequences ### Positive -* Allows us to support new AEAD's, in a way that makes decryption easier -* Allows downstream users to add their own AEAD + +- Allows us to support new AEAD's, in a way that makes decryption easier +- Allows downstream users to add their own AEAD ### Negative -* We will have to break all private keys stored on disk. -They can be recovered using seed words, and upgrade scripts are simple. + +- We will have to break all private keys stored on disk. + They can be recovered using seed words, and upgrade scripts are simple. ### Neutral -* Caller has to instantiate the AEAD with the private key. -However it forces them to be aware of what signing algorithm they are using, which is a positive. \ No newline at end of file + +- Caller has to instantiate the AEAD with the private key. + However it forces them to be aware of what signing algorithm they are using, which is a positive. diff --git a/docs/architecture/adr-014-secp-malleability.md b/docs/architecture/adr-014-secp-malleability.md index 3351056c61b..e6014c09b4d 100644 --- a/docs/architecture/adr-014-secp-malleability.md +++ b/docs/architecture/adr-014-secp-malleability.md @@ -22,21 +22,21 @@ Removing this second layer of signature malleability concerns could ease downstr ### ECDSA context Secp256k1 is ECDSA over a particular curve. -The signature is of the form `(r, s)`, where `s` is a field element. +The signature is of the form `(r, s)`, where `s` is a field element. (The particular field is the `Z_n`, where the elliptic curve has order `n`) However `(r, -s)` is also another valid solution. Note that anyone can negate a group element, and therefore can get this second signature. ## Decision -We can just distinguish a canonical form for the ECDSA signatures. +We can just distinguish a canonical form for the ECDSA signatures. Then we require that all ECDSA signatures be in the form which we defined as canonical. We reject signatures in non-canonical form. -A canonical form is rather easy to define and check. +A canonical form is rather easy to define and check. It would just be the smaller of the two values for `s`, defined lexicographically. This is a simple check, instead of checking if `s < n`, instead check `s <= (n - 1)/2`. -An example of another cryptosystem using this +An example of another cryptosystem using this is the parity definition here https://github.com/zkcrypto/pairing/pull/30#issuecomment-372910663. This is the same solution Ethereum has chosen for solving secp malleability. @@ -47,15 +47,17 @@ Fork https://github.com/btcsuite/btcd, and just update the [parse sig method](ht ## Status -Proposed. +Implemented ## Consequences ### Positive -* Lets us maintain the ability to expect a tx hash to appear in the blockchain. + +- Lets us maintain the ability to expect a tx hash to appear in the blockchain. ### Negative -* More work in all future implementations (Though this is a very simple check) -* Requires us to maintain another fork + +- More work in all future implementations (Though this is a very simple check) +- Requires us to maintain another fork ### Neutral diff --git a/docs/architecture/adr-015-crypto-encoding.md b/docs/architecture/adr-015-crypto-encoding.md index 67cce95f933..665129f12f3 100644 --- a/docs/architecture/adr-015-crypto-encoding.md +++ b/docs/architecture/adr-015-crypto-encoding.md @@ -1,8 +1,8 @@ -# ADR 015: Crypto encoding +# ADR 015: Crypto encoding ## Context -We must standardize our method for encoding public keys and signatures on chain. +We must standardize our method for encoding public keys and signatures on chain. Currently we amino encode the public keys and signatures. The reason we are using amino here is primarily due to ease of support in parsing for other languages. @@ -54,9 +54,11 @@ When placed in state, signatures will still be amino encoded, but it will be the primitive type `[]byte` getting encoded. #### Ed25519 + Use the canonical representation for signatures. #### Secp256k1 + There isn't a clear canonical representation here. Signatures have two elements `r,s`. These bytes are encoded as `r || s`, where `r` and `s` are both exactly @@ -65,16 +67,18 @@ This is basically Ethereum's encoding, but without the leading recovery bit. ## Status -Proposed. The signature section seems to be agreed upon for the most part. -Needs decision on Enum types. +Implemented ## Consequences ### Positive -* More space efficient signatures + +- More space efficient signatures ### Negative -* We have an amino dependency for cryptography. + +- We have an amino dependency for cryptography. ### Neutral -* No change to public keys \ No newline at end of file + +- No change to public keys diff --git a/docs/architecture/adr-016-protocol-versions.md b/docs/architecture/adr-016-protocol-versions.md new file mode 100644 index 00000000000..4dba4e84899 --- /dev/null +++ b/docs/architecture/adr-016-protocol-versions.md @@ -0,0 +1,301 @@ +# ADR 016: Protocol Versions + +## TODO + +- How to / should we version the authenticated encryption handshake itself (ie. + upfront protocol negotiation for the P2PVersion) + +## Changelog + +- 03-08-2018: Updates from discussion with Jae: + - ProtocolVersion contains Block/AppVersion, not Current/Next + - signal upgrades to Tendermint using EndBlock fields + - dont restrict peer compatibilty by version to simplify syncing old nodes +- 28-07-2018: Updates from review + - split into two ADRs - one for protocol, one for chains + - include signalling for upgrades in header +- 16-07-2018: Initial draft - was originally joint ADR for protocol and chain + versions + +## Context + +The Software Version is covered by SemVer and described elsewhere. +It is not relevant to the protocol description, suffice to say that if any protocol version +changes, the software version changes, but not necessarily vice versa. + +Software version shoudl be included in NodeInfo for convenience/diagnostics. + +We are also interested in versioning across different blockchains in a +meaningful way, for instance to differentiate branches of a contentious +hard-fork. We leave that for a later ADR. + +Here we focus on protocol versions. + +## Requirements + +We need to version components of the blockchain that may be independently upgraded. +We need to do it in a way that is scalable and maintainable - we can't just litter +the code with conditionals. + +We can consider the complete version of the protocol to contain the following sub-versions: +BlockVersion, P2PVersion, AppVersion. These versions reflect the major sub-components +of the software that are likely to evolve together, at different rates, and in different ways, +as described below. + +The BlockVersion defines the core of the blockchain data structures and +should change infrequently. + +The P2PVersion defines how peers connect and communicate with eachother - it's +not part of the blockchain data structures, but defines the protocols used to build the +blockchain. It may change gradually. + +The AppVersion determines how we compute app specific information, like the +AppHash and the Results. + +All of these versions may change over the life of a blockchain, and we need to +be able to help new nodes sync up across version changes. This means we must be willing +to connect to peers with older version. + +### BlockVersion + +- All tendermint hashed data-structures (headers, votes, txs, responses, etc.). + - Note the semantic meaning of a transaction may change according to the AppVersion, but the way txs are merklized into the header is part of the BlockVersion +- It should be the least frequent/likely to change. + - Tendermint should be stabilizing - it's just Atomic Broadcast. + - We can start considering for Tendermint v2.0 in a year +- It's easy to determine the version of a block from its serialized form + +### P2PVersion + +- All p2p and reactor messaging (messages, detectable behaviour) +- Will change gradually as reactors evolve to improve performance and support new features - eg proposed new message types BatchTx in the mempool and HasBlockPart in the consensus +- It's easy to determine the version of a peer from its first serialized message/s +- New versions must be compatible with at least one old version to allow gradual upgrades + +### AppVersion + +- The ABCI state machine (txs, begin/endblock behaviour, commit hashing) +- Behaviour and message types will change abruptly in the course of the life of a chain +- Need to minimize complexity of the code for supporting different AppVersions at different heights +- Ideally, each version of the software supports only a _single_ AppVersion at one time + - this means we checkout different versions of the software at different heights instead of littering the code + with conditionals + - minimize the number of data migrations required across AppVersion (ie. most AppVersion should be able to read the same state from disk as previous AppVersion). + +## Ideal + +Each component of the software is independently versioned in a modular way and its easy to mix and match and upgrade. + +Good luck pal ;) + +## Proposal + +Each of BlockVersion, AppVersion, P2PVersion is a monotonically increasing int64. + +To use these versions, we need to update the block Header, the p2p NodeInfo, and the ABCI. + +### Header + +Block Header should include a `Version` struct as its first field like: + +``` +type Version struct { + CurrentVersion ProtocolVersion + ChainID string + + NextVersion ProtocolVersion +} + +type ProtocolVersion struct { + BlockVersion int64 + AppVersion int64 +} +``` + +Note this effectively makes BlockVersion the first field in the block Header. +Since we have settled on a proto3 header, the ability to read the BlockVersion out of the serialized header is unanimous. + +Using a Version struct gives us more flexibility to add fields without breaking +the header. + +The ProtocolVersion struct includes both the Block and App versions - it should +serve as a complete description of the consensus-critical protocol. +Using the `NextVersion` field, proposer's can signal their readiness to upgrade +to a new Block and/or App version. + +### NodeInfo + +NodeInfo should include a Version struct as its first field like: + +``` +type Version struct { + P2PVersion int64 + + ChainID string + BlockVersion int64 + AppVersion int64 + SoftwareVersion string +} +``` + +Note this effectively makes P2PVersion the first field in the NodeInfo, so it +should be easy to read this out of the serialized header if need be to facilitate an upgrade. + +The SoftwareVersion here should include the name of the software client and +it's SemVer version - this is for convenience only. Eg. +`tendermint-core/v0.22.8`. + +The other versions and ChainID will determine peer compatibility (described below). + +### ABCI + +Since the ABCI is responsible for keeping Tendermint and the App in sync, we +need to communicate version information through it. + +On startup, we use Info to perform a basic handshake. It should include all the +version information. + +We also need to be able to update versions in the life of a blockchain. The +natural place to do this is EndBlock. + +#### Info + +RequestInfo should add support for protocol versions like: + +``` +message RequestInfo { + string software_version + int64 block_version + int64 p2p_version +} +``` + +Similarly, ResponseInfo should return the versions: + +``` +message ResponseInfo { + string data + + string software_version + int64 app_version + + int64 last_block_height + bytes last_block_app_hash +} +``` + +#### EndBlock + +Updating the version could be done either with new fields or by using the +existing `tags`. Since we're trying to communicate information that will be +included in Tendermint block Headers, it should be native to the ABCI, and not +something embedded through some scheme in the tags. + +ResponseEndBlock will include a new field `version_updates`: + +``` +message ResponseEndBlock { + repeated Validator validator_updates + ConsensusParams consensus_param_updates + repeated common.KVPair tags + + VersionUpdates version_updates +} + +message VersionUpdates { + ProtocolVersion current_version + ProtocolVersion next_version +} + +message ProtocolVersion { + int64 block_version + int64 app_version +} +``` + +Tendermint will use the information in VersionUpdates for the next block it +proposes. + +### BlockVersion + +BlockVersion is included in both the Header and the NodeInfo. + +Changing BlockVersion should happen quite infrequently and ideally only for extreme emergency. + +Note Ethereum has not had to make an upgrade like this (everything has been at state machine level, AFAIK). + +### P2PVersion + +P2PVersion is not included in the block Header, just the NodeInfo. + +P2PVersion is the first field in the NodeInfo. NodeInfo is also proto3 so this is easy to read out. + +Note we need the peer/reactor protocols to take the versions of peers into account when sending messages: + +- don't send messages they don't understand +- don't send messages they don't expect + +Doing this will be specific to the upgrades being made. + +Note we also include the list of reactor channels in the NodeInfo and already don't send messages for channels the peer doesn't understand. +If upgrades always use new channels, this simplifies the development cost of backwards compatibility. + +Note NodeInfo is only exchanged after the authenticated encryption handshake to ensure that it's private. +Doing any version exchange before encrypting could be considered information leakage, though I'm not sure +how much that matters compared to being able to upgrade the protocol. + +XXX: if needed, can we change the meaning of the first byte of the first message to encode a handshake version? +this is the first byte of a 32-byte ed25519 pubkey. + +### AppVersion + +AppVersion is also included in the block Header and the NodeInfo. + +AppVersion essentially defines how the AppHash and Results are computed. + +### Peer Compatibility + +Restricting peer compatibility based on version is complicated by the need to +help old peers, possibly on older versions, sync the blockchain. + +We might be tempted to say that we only connect to peers with the same +AppVersion and BlockVersion (since these define the consensus critical +computations), and a select list of P2PVersions (ie. those compatible with +ours), but then we'd need to make accomodations for connecting to peers with the +right Block/AppVersion for the height they're on. + +For now, we will connect to peers with any version and restrict compatibility +solely based on the ChainID. We leave more restrictive rules on peer +compatibiltiy to a future proposal. + +### Future Changes + +It may be valuable to support an `/unsafe_stop?height=_` endpoint to tell Tendermint to shutdown at a given height. +This could be use by an external manager process that oversees upgrades by +checking out and installing new software versions and restarting the process. It +would subscribe to the relevant upgrade event (needs to be implemented) and call `/unsafe_stop` at +the correct height (of course only after getting approval from its user!) + +## Consequences + +### Positive + +- Make tendermint and application versions native to the ABCI to more clearly + communicate about them +- Distinguish clearly between protocol versions and software version to + facilitate implementations in other languages +- Versions included in key data structures in easy to discern way +- Allows proposers to signal for upgrades and apps to decide when to actually change the + version (and start signalling for a new version) + +### Neutral + +- Unclear how to version the initial P2P handshake itself +- Versions aren't being used (yet) to restrict peer compatibility +- Signalling for a new version happens through the proposer and must be + tallied/tracked in the app. + +### Negative + +- Adds more fields to the ABCI +- Implies that a single codebase must be able to handle multiple versions diff --git a/docs/architecture/adr-017-chain-versions.md b/docs/architecture/adr-017-chain-versions.md new file mode 100644 index 00000000000..7113dbaee76 --- /dev/null +++ b/docs/architecture/adr-017-chain-versions.md @@ -0,0 +1,99 @@ +# ADR 017: Chain Versions + +## TODO + +- clarify how to handle slashing when ChainID changes + +## Changelog + +- 28-07-2018: Updates from review + - split into two ADRs - one for protocol, one for chains +- 16-07-2018: Initial draft - was originally joint ADR for protocol and chain + versions + +## Context + +Software and Protocol versions are covered in a separate ADR. + +Here we focus on chain versions. + +## Requirements + +We need to version blockchains across protocols, networks, forks, etc. +We need chain identifiers and descriptions so we can talk about a multitude of chains, +and especially the differences between them, in a meaningful way. + +### Networks + +We need to support many independent networks running the same version of the software, +even possibly starting from the same initial state. +They must have distinct identifiers so that peers know which one they are joining and so +validators and users can prevent replay attacks. + +Call this the `NetworkName` (note we currently call this `ChainID` in the software. In this +ADR, ChainID has a different meaning). +It represents both the application being run and the community or intention +of running it. + +Peers only connect to other peers with the same NetworkName. + +### Forks + +We need to support existing networks upgrading and forking, wherein they may do any of: + + - revert back to some height, continue with the same versions but new blocks + - arbitrarily mutate state at some height, continue with the same versions (eg. Dao Fork) + - change the AppVersion at some height + +Note because of Tendermint's voting power threshold rules, a chain can only be extended under the "original" rules and under the new rules +if 1/3 or more is double signing, which is expressly prohibited, and is supposed to result in their punishment on both chains. Since they can censor +the punishment, the chain is expected to be hardforked to remove the validators. Thus, if both branches are to continue after a fork, +they will each require a new identifier, and the old chain identifier will be retired (ie. only useful for syncing history, not for new blocks).. + +TODO: explain how to handle slashing when chain id changed! + +We need a consistent way to describe forks. + +## Proposal + +### ChainDescription + +ChainDescription is a complete immutable description of a blockchain. It takes the following form: + +``` +ChainDescription = ///// +``` + +Here, StateHash is the merkle root of the initial state, ValHash is the merkle root of the initial Tendermint validator set, +and ConsensusParamsHash is the merkle root of the initial Tendermint consensus parameters. + +The `genesis.json` file must contain enough information to compute this value. It need not contain the StateHash or ValHash itself, +but contain the state from which they can be computed with the given protocol versions. + +NOTE: consider splitting NetworkName into NetworkName and AppName - this allows +folks to independently use the same application for different networks (ie we +could imagine multiple communities of validators wanting to put up a Hub using +the same app but having a distinct network name. Arguably not needed if +differences will come via different initial state / validators). + +#### ChainID + +Define `ChainID = TMHASH(ChainDescriptor)`. It's the unique ID of a blockchain. + +It should be Bech32 encoded when handled by users, eg. with `cosmoschain` prefix. + +#### Forks and Uprades + +When a chain forks or upgrades but continues the same history, it takes a new ChainDescription as follows: + +``` +ChainDescription = /x// +``` + +Where + +- ChainID is the ChainID from the previous ChainDescription (ie. its hash) +- `x` denotes that a change occured +- `Height` is the height the change occured +- ForkDescription has the same form as ChainDescription but for the fork +- this allows forks to specify new versions for tendermint or the app, as well as arbitrary changes to the state or validator set diff --git a/docs/architecture/adr-018-ABCI-Validators.md b/docs/architecture/adr-018-ABCI-Validators.md new file mode 100644 index 00000000000..b632da855c4 --- /dev/null +++ b/docs/architecture/adr-018-ABCI-Validators.md @@ -0,0 +1,103 @@ +# ADR 018: ABCI Validator Improvements + +## Changelog + +016-08-2018: Follow up from review: + - Revert changes to commit round + - Remind about justification for removing pubkey + - Update pros/cons +05-08-2018: Initial draft + +## Context + +ADR 009 introduced major improvements to the ABCI around validators and the use +of Amino. Here we follow up with some additional changes to improve the naming +and expected use of Validator messages. + +## Decision + +### Validator + +Currently a Validator contains `address` and `pub_key`, and one or the other is +optional/not-sent depending on the use case. Instead, we should have a +`Validator` (with just the address, used for RequestBeginBlock) +and a `ValidatorUpdate` (with the pubkey, used for ResponseEndBlock): + +``` +message Validator { + bytes address + int64 power +} + +message ValidatorUpdate { + PubKey pub_key + int64 power +} +``` + +As noted in ADR-009[https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-009-ABCI-design.md], +the `Validator` does not contain a pubkey because quantum public keys are +quite large and it would be wasteful to send them all over ABCI with every block. +Thus, applications that want to take advantage of the information in BeginBlock +are *required* to store pubkeys in state (or use much less efficient lazy means +of verifying BeginBlock data). + +### RequestBeginBlock + +LastCommitInfo currently has an array of `SigningValidator` that contains +information for each validator in the entire validator set. +Instead, this should be called `VoteInfo`, since it is information about the +validator votes. + +Note that all votes in a commit must be from the same round. + +``` +message LastCommitInfo { + int64 round + repeated VoteInfo commit_votes +} + +message VoteInfo { + Validator validator + bool signed_last_block +} +``` + +### ResponseEndBlock + +Use ValidatorUpdates instead of Validators. Then it's clear we don't need an +address, and we do need a pubkey. + +We could require the address here as well as a sanity check, but it doesn't seem +necessary. + +### InitChain + +Use ValidatorUpdates for both Request and Response. InitChain +is about setting/updating the initial validator set, unlike BeginBlock +which is just informational. + +## Status + +Proposal. + +## Consequences + +### Positive + +- Clarifies the distinction between the different uses of validator information + +### Negative + +- Apps must still store the public keys in state to utilize the RequestBeginBlock info + +### Neutral + +- ResponseEndBlock does not require an address + +## References + +- [Latest ABCI Spec](https://github.com/tendermint/tendermint/blob/v0.22.8/docs/app-dev/abci-spec.md) +- [ADR-009](https://github.com/tendermint/tendermint/blob/v0.22.8/docs/architecture/adr-009-ABCI-design.md) +- [Issue #1712 - Don't send PubKey in + RequestBeginBlock](https://github.com/tendermint/tendermint/issues/1712) diff --git a/docs/architecture/adr-019-multisigs.md b/docs/architecture/adr-019-multisigs.md new file mode 100644 index 00000000000..3d1c5ba871f --- /dev/null +++ b/docs/architecture/adr-019-multisigs.md @@ -0,0 +1,160 @@ +# ADR 019: Encoding standard for Multisignatures + +## Changelog + +06-08-2018: Minor updates + +27-07-2018: Update draft to use amino encoding + +11-07-2018: Initial Draft + +## Context + +Multisignatures, or technically _Accountable Subgroup Multisignatures_ (ASM), +are signature schemes which enable any subgroup of a set of signers to sign any message, +and reveal to the verifier exactly who the signers were. +This allows for complex conditionals of when to validate a signature. + +Suppose the set of signers is of size _n_. +If we validate a signature if any subgroup of size _k_ signs a message, +this becomes what is commonly reffered to as a _k of n multisig_ in Bitcoin. + +This ADR specifies the encoding standard for general accountable subgroup multisignatures, +k of n accountable subgroup multisignatures, and its weighted variant. + +In the future, we can also allow for more complex conditionals on the accountable subgroup. + +## Proposed Solution + +### New structs + +Every ASM will then have its own struct, implementing the crypto.Pubkey interface. + +This ADR assumes that [replacing crypto.Signature with []bytes](https://github.com/tendermint/tendermint/issues/1957) has been accepted. + +#### K of N threshold signature + +The pubkey is the following struct: + +```golang +type ThresholdMultiSignaturePubKey struct { // K of N threshold multisig + K uint `json:"threshold"` + Pubkeys []crypto.Pubkey `json:"pubkeys"` +} +``` + +We will derive N from the length of pubkeys. (For spatial efficiency in encoding) + +`Verify` will expect an `[]byte` encoded version of the Multisignature. +(Multisignature is described in the next section) +The multisignature will be rejected if the bitmap has less than k indices, +or if any signature at any of the k indices is not a valid signature from +the kth public key on the message. +(If more than k signatures are included, all must be valid) + +`Bytes` will be the amino encoded version of the pubkey. + +Address will be `Hash(amino_encoded_pubkey)` + +The reason this doesn't use `log_8(n)` bytes per signer is because that heavily optimizes for the case where a very small number of signers are required. +e.g. for `n` of size `24`, that would only be more space efficient for `k < 3`. +This seems less likely, and that it should not be the case optimized for. + +#### Weighted threshold signature + +The pubkey is the following struct: + +```golang +type WeightedThresholdMultiSignaturePubKey struct { + Weights []uint `json:"weights"` + Threshold uint `json:"threshold"` + Pubkeys []crypto.Pubkey `json:"pubkeys"` +} +``` + +Weights and Pubkeys must be of the same length. +Everything else proceeds identically to the K of N multisig, +except the multisig fails if the sum of the weights is less than the threshold. + +#### Multisignature + +The inter-mediate phase of the signatures (as it accrues more signatures) will be the following struct: + +```golang +type Multisignature struct { + BitArray CryptoBitArray // Documented later + Sigs [][]byte +``` + +It is important to recall that each private key will output a signature on the provided message itself. +So no signing algorithm ever outputs the multisignature. +The UI will take a signature, cast into a multisignature, and then keep adding +new signatures into it, and when done marshal into `[]byte`. +This will require the following helper methods: + +```golang +func SigToMultisig(sig []byte, n int) +func GetIndex(pk crypto.Pubkey, []crypto.Pubkey) +func AddSignature(sig Signature, index int, multiSig *Multisignature) +``` + +The multisignature will be converted to an `[]byte` using amino.MarshalBinaryBare. \* + +#### Bit Array + +We would be using a new implementation of a bitarray. The struct it would be encoded/decoded from is + +```golang +type CryptoBitArray struct { + ExtraBitsStored byte `json:"extra_bits"` // The number of extra bits in elems. + Elems []byte `json:"elems"` +} +``` + +The reason for not using the BitArray currently implemented in `libs/common/bit_array.go` +is that it is less space efficient, due to a space / time trade-off. +Evidence for this is outlined in [this issue](https://github.com/tendermint/tendermint/issues/2077). + +In the multisig, we will not be performing arithmetic operations, +so there is no performance increase with the current implementation, +and just loss of spatial efficiency. +Implementing this new bit array with `[]byte` _should_ be simple, as no +arithmetic operations between bit arrays are required, and save a couple of bytes. +(Explained in that same issue) + +When this bit array encoded, the number of elements is encoded due to amino. +However we may be encoding a full byte for what we actually only need 1-7 bits for. +We store that difference in ExtraBitsStored. +This allows for us to have an unbounded number of signers, and is more space efficient than what is currently used in `libs/common`. +Again the implementation of this space saving feature is straight forward. + +### Encoding the structs + +We will use straight forward amino encoding. This is chosen for ease of compatibility in other languages. + +### Future points of discussion + +If desired, we can use ed25519 batch verification for all ed25519 keys. +This is a future point of discussion, but would be backwards compatible as this information won't need to be marshalled. +(There may even be cofactor concerns without ristretto) +Aggregation of pubkeys / sigs in Schnorr sigs / BLS sigs is not backwards compatible, and would need to be a new ASM type. + +## Status + +Proposed. + +## Consequences + +### Positive + +- Supports multisignatures, in a way that won't require any special cases in our downstream verification code. +- Easy to serialize / deserialize +- Unbounded number of signers + +### Negative + +- Larger codebase, however this should reside in a subfolder of tendermint/crypto, as it provides no new interfaces. (Ref #https://github.com/tendermint/go-crypto/issues/136) +- Space inefficient due to utilization of amino encoding +- Suggested implementation requires a new struct for every ASM. + +### Neutral diff --git a/docs/architecture/adr-020-block-size.md b/docs/architecture/adr-020-block-size.md new file mode 100644 index 00000000000..aebf3069c3b --- /dev/null +++ b/docs/architecture/adr-020-block-size.md @@ -0,0 +1,77 @@ +# ADR 020: Limiting txs size inside a block + +## Changelog + +13-08-2018: Initial Draft +15-08-2018: Second version after Dev's comments +28-08-2018: Third version after Ethan's comments +30-08-2018: AminoOverheadForBlock => MaxAminoOverheadForBlock +31-08-2018: Bounding evidence and chain ID + +## Context + +We currently use MaxTxs to reap txs from the mempool when proposing a block, +but enforce MaxBytes when unmarshalling a block, so we could easily propose a +block thats too large to be valid. + +We should just remove MaxTxs all together and stick with MaxBytes, and have a +`mempool.ReapMaxBytes`. + +But we can't just reap BlockSize.MaxBytes, since MaxBytes is for the entire block, +not for the txs inside the block. There's extra amino overhead + the actual +headers on top of the actual transactions + evidence + last commit. + +## Proposed solution + +Therefore, we should + +1) Get rid of MaxTxs. +2) Rename MaxTxsBytes to MaxBytes. + +When we need to ReapMaxBytes from the mempool, we calculate the upper bound as follows: + +``` +ExactLastCommitBytes = {number of validators currently enabled} * {MaxVoteBytes} +MaxEvidenceBytesPerBlock = MaxBytes / 10 +ExactEvidenceBytes = cs.evpool.PendingEvidence(MaxEvidenceBytesPerBlock) * MaxEvidenceBytes + +mempool.ReapMaxBytes(MaxBytes - MaxAminoOverheadForBlock - ExactLastCommitBytes - ExactEvidenceBytes - MaxHeaderBytes) +``` + +where MaxVoteBytes, MaxEvidenceBytes, MaxHeaderBytes and MaxAminoOverheadForBlock +are constants defined inside the `types` package: + +- MaxVoteBytes - 170 bytes +- MaxEvidenceBytes - 364 bytes +- MaxHeaderBytes - 476 bytes (~276 bytes hashes + 200 bytes - 50 UTF-8 encoded + symbols of chain ID 4 bytes each in the worst case + amino overhead) +- MaxAminoOverheadForBlock - 8 bytes (assuming MaxHeaderBytes includes amino + overhead for encoding header, MaxVoteBytes - for encoding vote, etc.) + +ChainID needs to bound to 50 symbols max. + +When reaping evidence, we use MaxBytes to calculate the upper bound (e.g. 1/10) +to save some space for transactions. + +NOTE while reaping the `max int` bytes in mempool, we should account that every +transaction will take `len(tx)+aminoOverhead`, where aminoOverhead=1-4 bytes. + +We should write a test that fails if the underlying structs got changed, but +MaxXXX stayed the same. + +## Status + +Proposed. + +## Consequences + +### Positive + +* one way to limit the size of a block +* less variables to configure + +### Negative + +* constants that need to be adjusted if the underlying structs got changed + +### Neutral diff --git a/docs/architecture/adr-template.md b/docs/architecture/adr-template.md index 2303490ad29..d47c7f5580f 100644 --- a/docs/architecture/adr-template.md +++ b/docs/architecture/adr-template.md @@ -6,7 +6,6 @@ ## Status - ## Consequences ### Positive diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index 0cfc05cdfa0..00000000000 --- a/docs/conf.py +++ /dev/null @@ -1,199 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Tendermint documentation build configuration file, created by -# sphinx-quickstart on Mon Aug 7 04:55:09 2017. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) -import urllib - -import sphinx_rtd_theme - - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# - -from recommonmark.parser import CommonMarkParser - -source_parsers = { - '.md': CommonMarkParser, -} - -source_suffix = ['.rst', '.md'] -#source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Tendermint' -copyright = u'2018, The Authors' -author = u'Tendermint' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = u'' -# The full version, including alpha/beta/rc tags. -release = u'' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture', 'spec', 'examples'] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'sphinx_rtd_theme' -# html_theme = 'alabaster' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Custom sidebar templates, must be a dictionary that maps document names -# to template names. -# -# This is required for the alabaster theme -# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars -html_sidebars = { - '**': [ - 'about.html', - 'navigation.html', - 'relations.html', # needs 'show_related': True theme option to display - 'searchbox.html', - 'donate.html', - ] -} - - -# -- Options for HTMLHelp output ------------------------------------------ - -# Output file base name for HTML help builder. -htmlhelp_basename = 'Tendermintdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'Tendermint.tex', u'Tendermint Documentation', - u'The Authors', 'manual'), -] - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'Tendermint', u'Tendermint Documentation', - [author], 1) -] - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'Tendermint', u'Tendermint Documentation', - author, 'Tendermint', 'Byzantine Fault Tolerant Consensus.', - 'Database'), -] - -# ---------------- customizations ---------------------- - -# for Docker README, below -from shutil import copyfile - -# tm-bench and tm-monitor -tools_repo = "https://raw.githubusercontent.com/tendermint/tools/" -tools_branch = "master" - -tools_dir = "./tools" - - -if os.path.isdir(tools_dir) != True: - os.mkdir(tools_dir) - -copyfile('../DOCKER/README.md', tools_dir+'/docker.md') - -urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.md', filename=tools_dir+'/benchmarking.md') -urllib.urlretrieve(tools_repo+tools_branch+'/tm-monitor/README.md', filename=tools_dir+'/monitoring.md') diff --git a/docs/config.js b/docs/config.js index 09d0112e1ef..a006a075669 100644 --- a/docs/config.js +++ b/docs/config.js @@ -63,6 +63,15 @@ module.exports = { "/app-dev/ecosystem" ] }, + { + title: "ABCI Specification", + collapsable: false, + children: [ + "/spec/abci/abci", + "/spec/abci/apps", + "/spec/abci/client-server" + ] + }, { title: "Research", collapsable: false, diff --git a/docs/assets/a_plus_t.png b/docs/imgs/a_plus_t.png similarity index 100% rename from docs/assets/a_plus_t.png rename to docs/imgs/a_plus_t.png diff --git a/docs/assets/abci.png b/docs/imgs/abci.png similarity index 100% rename from docs/assets/abci.png rename to docs/imgs/abci.png diff --git a/docs/assets/consensus_logic.png b/docs/imgs/consensus_logic.png similarity index 100% rename from docs/assets/consensus_logic.png rename to docs/imgs/consensus_logic.png diff --git a/docs/assets/tm-application-example.png b/docs/imgs/tm-application-example.png similarity index 100% rename from docs/assets/tm-application-example.png rename to docs/imgs/tm-application-example.png diff --git a/docs/assets/tm-transaction-flow.png b/docs/imgs/tm-transaction-flow.png similarity index 100% rename from docs/assets/tm-transaction-flow.png rename to docs/imgs/tm-transaction-flow.png diff --git a/docs/assets/tmint-logo-blue.png b/docs/imgs/tmint-logo-blue.png similarity index 100% rename from docs/assets/tmint-logo-blue.png rename to docs/imgs/tmint-logo-blue.png diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index bafbec3543e..00000000000 --- a/docs/index.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. Tendermint documentation master file, created by - sphinx-quickstart on Mon Aug 7 04:55:09 2017. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to Tendermint! -====================== - -This location for our documentation has been deprecated, please see: - -- https://tendermint.com/docs/ - -The last version built by Read The Docs will still be available at: - -- https://tendermint.readthedocs.io/projects/tools/en/v0.21.0/ diff --git a/docs/introduction/introduction.md b/docs/introduction/introduction.md index d43fa9b2d00..389bf96584c 100644 --- a/docs/introduction/introduction.md +++ b/docs/introduction/introduction.md @@ -90,7 +90,7 @@ it can be used as a plug-and-play replacement for the consensus engines of other blockchain software. So one can take the current Ethereum code base, whether in Rust, or Go, or Haskell, and run it as a ABCI application using Tendermint consensus. Indeed, [we did that with -Ethereum](https://github.com/tendermint/ethermint). And we plan to do +Ethereum](https://github.com/cosmos/ethermint). And we plan to do the same for Bitcoin, ZCash, and various other deterministic applications as well. @@ -227,7 +227,7 @@ design their message handlers to create a blockchain that does anything useful but this architecture provides a place to start. The diagram below illustrates the flow of messages via ABCI. -![](assets/abci.png) +![](../imgs/abci.png) ## A Note on Determinism @@ -263,7 +263,7 @@ Tendermint is an easy-to-understand, mostly asynchronous, BFT consensus protocol. The protocol follows a simple state machine that looks like this: -![](assets/consensus_logic.png) +![](../imgs/consensus_logic.png) Participants in the protocol are called **validators**; they take turns proposing blocks of transactions and voting on them. Blocks are @@ -321,7 +321,7 @@ consensus protocol. This adds an economic element to the security of the protocol, allowing one to quantify the cost of violating the assumption that less than one-third of voting power is Byzantine. -The [Cosmos Network](http://cosmos.network) is designed to use this +The [Cosmos Network](https://cosmos.network) is designed to use this Proof-of-Stake mechanism across an array of cryptocurrencies implemented as ABCI applications. @@ -329,4 +329,4 @@ The following diagram is Tendermint in a (technical) nutshell. [See here for high resolution version](https://github.com/mobfoundry/hackatom/blob/master/tminfo.pdf). -![](assets/tm-transaction-flow.png) +![](../imgs/tm-transaction-flow.png) diff --git a/docs/introduction/quick-start.md b/docs/introduction/quick-start.md index 8e4908784c3..c10ba10a11c 100644 --- a/docs/introduction/quick-start.md +++ b/docs/introduction/quick-start.md @@ -26,7 +26,7 @@ Requires: - `go` minimum version 1.10 - `$GOPATH` environment variable must be set -- `$GOPATH/bin` must be on your `$PATH` (see https://github.com/tendermint/tendermint/wiki/Setting-GOPATH) +- `$GOPATH/bin` must be on your `$PATH` (see [here](https://github.com/tendermint/tendermint/wiki/Setting-GOPATH)) To install Tendermint, run: @@ -43,9 +43,12 @@ Confirm installation: ``` $ tendermint version -0.23.0-dev +0.23.0 ``` +Note: see the [releases page](https://github.com/tendermint/tendermint/releases) and the latest version +should match what you see above. + ## Initialization Running: @@ -142,8 +145,6 @@ tendermint node --home ./mytestnet/node3 --proxy_app=kvstore --p2p.persistent_pe Note that after the third node is started, blocks will start to stream in because >2/3 of validators (defined in the `genesis.json`) have come online. -Seeds can also be specified in the `config.toml`. See [this -PR](https://github.com/tendermint/tendermint/pull/792) for more information -about configuration options. +Seeds can also be specified in the `config.toml`. See [here](../tendermint-core/configuration.md) for more information about configuration options. Transactions can then be sent as covered in the single, local node example above. diff --git a/docs/networks/deploy-testnets.md b/docs/networks/deploy-testnets.md index 88e5c6f7250..04f95310551 100644 --- a/docs/networks/deploy-testnets.md +++ b/docs/networks/deploy-testnets.md @@ -71,4 +71,4 @@ local testnet. Review the target in the Makefile to debug any problems. ### Cloud -See the [next section](./terraform-and-ansible.html) for details. +See the [next section](./terraform-and-ansible.md) for details. diff --git a/docs/package.json b/docs/package.json index c76bb37c43b..d45ba539b1e 100644 --- a/docs/package.json +++ b/docs/package.json @@ -3,7 +3,9 @@ "prettier": "^1.13.7", "remark-cli": "^5.0.0", "remark-lint-no-dead-urls": "^0.3.0", - "textlint": "^10.2.1" + "remark-lint-write-good": "^1.0.3", + "textlint": "^10.2.1", + "textlint-rule-stop-words": "^1.0.3" }, "name": "tendermint", "description": "Tendermint Core Documentation", @@ -31,7 +33,8 @@ "homepage": "https://tendermint.com/docs/", "remarkConfig": { "plugins": [ - "remark-lint-no-dead-urls" + "remark-lint-no-dead-urls", + "remark-lint-write-good" ] } } diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index 85e42ba8354..00000000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -sphinx -sphinx-autobuild -recommonmark -sphinx_rtd_theme diff --git a/docs/spec/README.md b/docs/spec/README.md index ab689d9d6cc..4de5104f1bf 100644 --- a/docs/spec/README.md +++ b/docs/spec/README.md @@ -31,7 +31,7 @@ please submit them to our [bug bounty](https://tendermint.com/security)! - [Block Sync](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/block_sync): gossip blocks so peers can catch up quickly - [Consensus](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/consensus): gossip votes and block parts so new blocks can be committed - [Mempool](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/mempool): gossip transactions so they get included in blocks -- Evidence: TODO +- Evidence: Forthcoming, see [this issue](https://github.com/tendermint/tendermint/issues/2329). ### Software @@ -57,7 +57,7 @@ is malicious or faulty. A commit in Tendermint is a set of signed messages from more than 2/3 of the total weight of the current Validator set. Validators take turns proposing blocks and voting on them. Once enough votes are received, the block is considered -committed. These votes are included in the *next* block as proof that the previous block +committed. These votes are included in the _next_ block as proof that the previous block was committed - they cannot be included in the current block, as that block has already been created. @@ -71,8 +71,8 @@ of the latest state of the blockchain. To achieve this, it embeds cryptographic commitments to certain information in the block "header". This information includes the contents of the block (eg. the transactions), the validator set committing the block, as well as the various results returned by the application. -Note, however, that block execution only occurs *after* a block is committed. -Thus, application results can only be included in the *next* block. +Note, however, that block execution only occurs _after_ a block is committed. +Thus, application results can only be included in the _next_ block. Also note that information like the transaction results and the validator set are never directly included in the block - only their cryptographic digests (Merkle roots) are. diff --git a/docs/spec/abci/README.md b/docs/spec/abci/README.md new file mode 100644 index 00000000000..c0956db6f76 --- /dev/null +++ b/docs/spec/abci/README.md @@ -0,0 +1,19 @@ +# ABCI + +ABCI is the interface between Tendermint (a state-machine replication engine) +and an application (the actual state machine). It consists of a set of +*methods*, where each method has a corresponding `Request` and `Response` +message type. Tendermint calls the ABCI methods on the ABCI application by sending the `Request*` +messages and receiving the `Response*` messages in return. + +All message types are defined in a [protobuf file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). +This allows Tendermint to run applications written in any programming language. + +This specification is split as follows: + +- [Methods and Types](abci.md) - complete details on all ABCI methods and + message types +- [Applications](apps.md) - how to manage ABCI application state and other + details about building ABCI applications +- [Client and Server](client-server.md) - for those looking to implement their + own ABCI application servers diff --git a/docs/spec/abci/abci.md b/docs/spec/abci/abci.md new file mode 100644 index 00000000000..b4314e3e12f --- /dev/null +++ b/docs/spec/abci/abci.md @@ -0,0 +1,375 @@ +# Methods and Types + +## Overview + +The ABCI message types are defined in a [protobuf +file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). + +ABCI methods are split across 3 separate ABCI *connections*: + +- `Consensus Connection: InitChain, BeginBlock, DeliverTx, EndBlock, Commit` +- `Mempool Connection: CheckTx` +- `Info Connection: Info, SetOption, Query` + +The `Consensus Connection` is driven by a consensus protocol and is responsible +for block execution. +The `Mempool Connection` is for validating new transactions, before they're +shared or included in a block. +The `Info Connection` is for initialization and for queries from the user. + +Additionally, there is a `Flush` method that is called on every connection, +and an `Echo` method that is just for debugging. + +More details on managing state across connections can be found in the section on +[ABCI Applications](apps.md). + +## Errors + +Some methods (`Echo, Info, InitChain, BeginBlock, EndBlock, Commit`), +don't return errors because an error would indicate a critical failure +in the application and there's nothing Tendermint can do. The problem +should be addressed and both Tendermint and the application restarted. +All other methods (`SetOption, Query, CheckTx, DeliverTx`) return an +application-specific response `Code uint32`, where only `0` is reserved +for `OK`. + +## Tags + +Some methods (`CheckTx, BeginBlock, DeliverTx, EndBlock`) +include a `Tags` field in their `Response*`. Each tag is key-value pair denoting +something about what happened during the methods execution. + +Tags can be used to index transactions and blocks according to what happened +during their execution. + +Keys and values in tags must be UTF-8 encoded strings (e.g. +"account.owner": "Bob", "balance": "100.0", +"time": "2018-01-02T12:30:00Z") + +## Determinism + +Some methods (`SetOption, Query, CheckTx, DeliverTx`) return +non-deterministic data in the form of `Info` and `Log`. The `Log` is +intended for the literal output from the application's logger, while the +`Info` is any additional info that should be returned. + +All other fields in the `Response*` of all methods must be strictly deterministic. + +For this reason, it is recommended that applications not be exposed to any +external user or process except via the ABCI connections to a consensus engine +like Tendermint Core. + +## Block Execution + +The first time a new blockchain is started, Tendermint calls +`InitChain`. From then on, the follow sequence of methods is executed for each +block: + +`BeginBlock, [DeliverTx], EndBlock, Commit` + +where one `DeliverTx` is called for each transaction in the block. +The result is an updated application state. +Cryptographic commitments to the results of DeliverTx, EndBlock, and +Commit are included in the header of the next block. + +## Messages + +### Echo + +- **Request**: + - `Message (string)`: A string to echo back +- **Response**: + - `Message (string)`: The input string +- **Usage**: + - Echo a string to test an abci client/server implementation + +### Flush + +- **Usage**: + - Signals that messages queued on the client should be flushed to + the server. It is called periodically by the client + implementation to ensure asynchronous requests are actually + sent, and is called immediately to make a synchronous request, + which returns when the Flush response comes back. + +### Info + +- **Request**: + - `Version (string)`: The Tendermint version +- **Response**: + - `Data (string)`: Some arbitrary information + - `Version (Version)`: Version information + - `LastBlockHeight (int64)`: Latest block for which the app has + called Commit + - `LastBlockAppHash ([]byte)`: Latest result of Commit +- **Usage**: + - Return information about the application state. + - Used to sync Tendermint with the application during a handshake + that happens on startup. + - Tendermint expects `LastBlockAppHash` and `LastBlockHeight` to + be updated during `Commit`, ensuring that `Commit` is never + called twice for the same block height. + +### SetOption + +- **Request**: + - `Key (string)`: Key to set + - `Value (string)`: Value to set for key +- **Response**: + - `Code (uint32)`: Response code + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. +- **Usage**: + - Set non-consensus critical application specific options. + - e.g. Key="min-fee", Value="100fermion" could set the minimum fee + required for CheckTx (but not DeliverTx - that would be + consensus critical). + +### InitChain + +- **Request**: + - `Time (google.protobuf.Timestamp)`: Genesis time. + - `ChainID (string)`: ID of the blockchain. + - `ConsensusParams (ConsensusParams)`: Initial consensus-critical parameters. + - `Validators ([]ValidatorUpdate)`: Initial genesis validators. + - `AppStateBytes ([]byte)`: Serialized initial application state. Amino-encoded JSON bytes. +- **Response**: + - `ConsensusParams (ConsensusParams)`: Initial + consensus-critical parameters. + - `Validators ([]ValidatorUpdate)`: Initial validator set (if non empty). +- **Usage**: + - Called once upon genesis. + - If ResponseInitChain.Validators is empty, the initial validator set will be the RequestInitChain.Validators + - If ResponseInitChain.Validators is not empty, the initial validator set will be the + ResponseInitChain.Validators (regardless of what is in RequestInitChain.Validators). + - This allows the app to decide if it wants to accept the initial validator + set proposed by tendermint (ie. in the genesis file), or if it wants to use + a different one (perhaps computed based on some application specific + information in the genesis file). + +### Query + +- **Request**: + - `Data ([]byte)`: Raw query bytes. Can be used with or in lieu + of Path. + - `Path (string)`: Path of request, like an HTTP GET path. Can be + used with or in liue of Data. + - Apps MUST interpret '/store' as a query by key on the + underlying store. The key SHOULD be specified in the Data field. + - Apps SHOULD allow queries over specific types like + '/accounts/...' or '/votes/...' + - `Height (int64)`: The block height for which you want the query + (default=0 returns data for the latest committed block). Note + that this is the height of the block containing the + application's Merkle root hash, which represents the state as it + was after committing the block at Height-1 + - `Prove (bool)`: Return Merkle proof with response if possible +- **Response**: + - `Code (uint32)`: Response code. + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. + - `Index (int64)`: The index of the key in the tree. + - `Key ([]byte)`: The key of the matching data. + - `Value ([]byte)`: The value of the matching data. + - `Proof ([]byte)`: Proof for the data, if requested. + - `Height (int64)`: The block height from which data was derived. + Note that this is the height of the block containing the + application's Merkle root hash, which represents the state as it + was after committing the block at Height-1 +- **Usage**: + - Query for data from the application at current or past height. + - Optionally return Merkle proof. + +### BeginBlock + +- **Request**: + - `Hash ([]byte)`: The block's hash. This can be derived from the + block header. + - `Header (struct{})`: The block header. + - `LastCommitInfo (LastCommitInfo)`: Info about the last commit, including the + round, and the list of validators and which ones signed the last block. + - `ByzantineValidators ([]Evidence)`: List of evidence of + validators that acted maliciously. +- **Response**: + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing +- **Usage**: + - Signals the beginning of a new block. Called prior to + any DeliverTxs. + - The header contains the height, timestamp, and more - it exactly matches the + Tendermint block header. We may seek to generalize this in the future. + - The `LastCommitInfo` and `ByzantineValidators` can be used to determine + rewards and punishments for the validators. NOTE validators here do not + include pubkeys. + +### CheckTx + +- **Request**: + - `Tx ([]byte)`: The request transaction bytes +- **Response**: + - `Code (uint32)`: Response code + - `Data ([]byte)`: Result bytes, if any. + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. + - `GasWanted (int64)`: Amount of gas request for transaction. + - `GasUsed (int64)`: Amount of gas consumed by transaction. + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing + transactions (eg. by account). +- **Usage**: + - Technically optional - not involved in processing blocks. + - Guardian of the mempool: every node runs CheckTx before letting a + transaction into its local mempool. + - The transaction may come from an external user or another node + - CheckTx need not execute the transaction in full, but rather a light-weight + yet stateful validation, like checking signatures and account balances, but + not running code in a virtual machine. + - Transactions where `ResponseCheckTx.Code != 0` will be rejected - they will not be broadcast to + other nodes or included in a proposal block. + - Tendermint attributes no other value to the response code + +### DeliverTx + +- **Request**: + - `Tx ([]byte)`: The request transaction bytes. +- **Response**: + - `Code (uint32)`: Response code. + - `Data ([]byte)`: Result bytes, if any. + - `Log (string)`: The output of the application's logger. May + be non-deterministic. + - `Info (string)`: Additional information. May + be non-deterministic. + - `GasWanted (int64)`: Amount of gas requested for transaction. + - `GasUsed (int64)`: Amount of gas consumed by transaction. + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing + transactions (eg. by account). +- **Usage**: + - The workhorse of the application - non-optional. + - Execute the transaction in full. + - `ResponseDeliverTx.Code == 0` only if the transaction is fully valid. + +### EndBlock + +- **Request**: + - `Height (int64)`: Height of the block just executed. +- **Response**: + - `ValidatorUpdates ([]ValidatorUpdate)`: Changes to validator set (set + voting power to 0 to remove). + - `ConsensusParamUpdates (ConsensusParams)`: Changes to + consensus-critical time, size, and other parameters. + - `Tags ([]cmn.KVPair)`: Key-Value tags for filtering and indexing +- **Usage**: + - Signals the end of a block. + - Called after all transactions, prior to each Commit. + - Validator updates returned by block `H` impact blocks `H+1`, `H+2`, and + `H+3`, but only effects changes on the validator set of `H+2`: + - `H+1`: NextValidatorsHash + - `H+2`: ValidatorsHash (and thus the validator set) + - `H+3`: LastCommitInfo (ie. the last validator set) + - Consensus params returned for block `H` apply for block `H+1` + +### Commit + +- **Response**: + - `Data ([]byte)`: The Merkle root hash +- **Usage**: + - Persist the application state. + - Return a Merkle root hash of the application state. + - It's critical that all application instances return the + same hash. If not, they will not be able to agree on the next + block, because the hash is included in the next block! + +## Data Types + +### Header + +- **Fields**: + - `ChainID (string)`: ID of the blockchain + - `Height (int64)`: Height of the block in the chain + - `Time (google.protobuf.Timestamp)`: Time of the block. It is the proposer's + local time when block was created. + - `NumTxs (int32)`: Number of transactions in the block + - `TotalTxs (int64)`: Total number of transactions in the blockchain until + now + - `LastBlockID (BlockID)`: Hash of the previous (parent) block + - `LastCommitHash ([]byte)`: Hash of the previous block's commit + - `ValidatorsHash ([]byte)`: Hash of the validator set for this block + - `NextValidatorsHash ([]byte)`: Hash of the validator set for the next block + - `ConsensusHash ([]byte)`: Hash of the consensus parameters for this block + - `AppHash ([]byte)`: Data returned by the last call to `Commit` - typically the + Merkle root of the application state after executing the previous block's + transactions + - `LastResultsHash ([]byte)`: Hash of the ABCI results returned by the last block + - `EvidenceHash ([]byte)`: Hash of the evidence included in this block + - `ProposerAddress ([]byte)`: Original proposer for the block +- **Usage**: + - Provided in RequestBeginBlock + - Provides important context about the current state of the blockchain - + especially height and time. + - Provides the proposer of the current block, for use in proposer-based + reward mechanisms. + +### Validator + +- **Fields**: + - `Address ([]byte)`: Address of the validator (hash of the public key) + - `Power (int64)`: Voting power of the validator +- **Usage**: + - Validator identified by address + - Used in RequestBeginBlock as part of VoteInfo + - Does not include PubKey to avoid sending potentially large quantum pubkeys + over the ABCI + +### ValidatorUpdate + +- **Fields**: + - `PubKey (PubKey)`: Public key of the validator + - `Power (int64)`: Voting power of the validator +- **Usage**: + - Validator identified by PubKey + - Used to tell Tendermint to update the validator set + +### VoteInfo + +- **Fields**: + - `Validator (Validator)`: A validator + - `SignedLastBlock (bool)`: Indicates whether or not the validator signed + the last block +- **Usage**: + - Indicates whether a validator signed the last block, allowing for rewards + based on validator availability + +### PubKey + +- **Fields**: + - `Type (string)`: Type of the public key. A simple string like `"ed25519"`. + In the future, may indicate a serialization algorithm to parse the `Data`, + for instance `"amino"`. + - `Data ([]byte)`: Public key data. For a simple public key, it's just the + raw bytes. If the `Type` indicates an encoding algorithm, this is the + encoded public key. +- **Usage**: + - A generic and extensible typed public key + +### Evidence + +- **Fields**: + - `Type (string)`: Type of the evidence. A hierarchical path like + "duplicate/vote". + - `Validator (Validator`: The offending validator + - `Height (int64)`: Height when the offense was committed + - `Time (google.protobuf.Timestamp)`: Time of the block at height `Height`. + It is the proposer's local time when block was created. + - `TotalVotingPower (int64)`: Total voting power of the validator set at + height `Height` + +### LastCommitInfo + +- **Fields**: + - `Round (int32)`: Commit round. + - `Votes ([]VoteInfo)`: List of validators addresses in the last validator set + with their voting power and whether or not they signed a vote. diff --git a/docs/spec/abci/apps.md b/docs/spec/abci/apps.md new file mode 100644 index 00000000000..92a4f49d5ff --- /dev/null +++ b/docs/spec/abci/apps.md @@ -0,0 +1,264 @@ +# Applications + +Please ensure you've first read the spec for [ABCI Methods and Types](abci.md) + +Here we cover the following components of ABCI applications: + +- [State](#state) - the interplay between ABCI connections and application state + and the differences between `CheckTx` and `DeliverTx`. +- [Transaction Results](#transaction-results) - rules around transaction + results and validity +- [Validator Set Updates](#validator-updates) - how validator sets are + changed during `InitChain` and `EndBlock` +- [Query](#query) - standards for using the `Query` method +- [Crash Recovery](#crash-recovery) - handshake protocol to synchronize + Tendermint and the application on startup. + +## State + +Since Tendermint maintains multiple concurrent ABCI connections, it is typical +for an application to maintain a distinct state for each, and for the states to +be synchronized during `Commit`. + +### Commit + +Application state should only be persisted to disk during `Commit`. + +Before `Commit` is called, Tendermint locks and flushes the mempool so that no new messages will +be received on the mempool connection. This provides an opportunity to safely update all three +states to the latest committed state at once. + +When `Commit` completes, it unlocks the mempool. + +Note that it is not possible to send transactions to Tendermint during `Commit` - if your app +tries to send a `/broadcast_tx` to Tendermint during Commit, it will deadlock. + +### Consensus Connection + +The Consensus Connection should maintain a `DeliverTxState` - +the working state for block execution. It should be updated by the calls to +`BeginBlock`, `DeliverTx`, and `EndBlock` during block execution and committed to +disk as the "latest committed state" during `Commit`. + +Updates made to the DeliverTxState by each method call must be readable by each subsequent method - +ie. the updates are linearizable. + +### Mempool Connection + +The Mempool Connection should maintain a `CheckTxState` +to sequentially process pending transactions in the mempool that have +not yet been committed. It should be initialized to the latest committed state +at the end of every `Commit`. + +The CheckTxState may be updated concurrently with the DeliverTxState, as +messages may be sent concurrently on the Consensus and Mempool connections. However, +before calling `Commit`, Tendermint will lock and flush the mempool connection, +ensuring that all existing CheckTx are responded to and no new ones can +begin. + +After `Commit`, CheckTx is run again on all transactions that remain in the +node's local mempool after filtering those included in the block. To prevent the +mempool from rechecking all transactions every time a block is committed, set +the configuration option `mempool.recheck=false`. + +Finally, the mempool will unlock and new transactions can be processed through CheckTx again. + +Note that CheckTx doesn't have to check everything that affects transaction validity; the +expensive things can be skipped. In fact, CheckTx doesn't have to check +anything; it might say that any transaction is a valid transaction. +Unlike DeliverTx, CheckTx is just there as +a sort of weak filter to keep invalid transactions out of the blockchain. It's +weak, because a Byzantine node doesn't care about CheckTx; it can propose a +block full of invalid transactions if it wants. + +### Info Connection + +The Info Connection should maintain a `QueryState` for answering queries from the user, +and for initialization when Tendermint first starts up (both described further +below). +It should always contain the latest committed state associated with the +latest committed block. + +QueryState should be set to the latest `DeliverTxState` at the end of every `Commit`, +ie. after the full block has been processed and the state committed to disk. +Otherwise it should never be modified. + +## Transaction Results + +`ResponseCheckTx` and `ResponseDeliverTx` contain the same fields, though they +have slightly different effects. + +In both cases, `Info` and `Log` are non-deterministic values for debugging/convenience purposes +that are otherwise ignored. + +In both cases, `GasWanted` and `GasUsed` parameters are currently ignored, +though see issues +[#1861](https://github.com/tendermint/tendermint/issues/1861), +[#2299](https://github.com/tendermint/tendermint/issues/2299) and +[#2310](https://github.com/tendermint/tendermint/issues/2310) for how this may +change. + +### CheckTx + +If `Code != 0`, it will be rejected from the mempool and hence +not broadcasted to other peers and not included in a proposal block. + +`Data` contains the result of the CheckTx transaction execution, if any. It is +semantically meaningless to Tendermint. + +`Tags` include any tags for the execution, though since the transaction has not +been committed yet, they are effectively ignored by Tendermint. + +### DeliverTx + +If DeliverTx returns `Code != 0`, the transaction will be considered invalid, +though it is still included in the block. + +`Data` contains the result of the CheckTx transaction execution, if any. It is +semantically meaningless to Tendermint. + +Both the `Code` and `Data` are included in a structure that is hashed into the +`LastResultsHash` of the next block header. + +`Tags` include any tags for the execution, which Tendermint will use to index +the transaction by. This allows transactions to be queried according to what +events took place during their execution. + +See issue [#1007](https://github.com/tendermint/tendermint/issues/1007) for how +the tags will be hashed into the next block header. + +## Validator Updates + +The application may set the validator set during InitChain, and update it during +EndBlock. + +### InitChain + +ResponseInitChain can return a list of validators. +If the list is empty, Tendermint will use the validators loaded in the genesis +file. +If the list is not empty, Tendermint will use it for the validator set. +This way the application can determine the initial validator set for the +blockchain. + +ResponseInitChain also includes ConsensusParams, but these are presently +ignored. + +### EndBlock + +Updates to the Tendermint validator set can be made by returning +`ValidatorUpdate` objects in the `ResponseEndBlock`: + +``` +message ValidatorUpdate { + PubKey pub_key + int64 power +} + +message PubKey { + string type + bytes data +} +``` + +The `pub_key` currently supports only one type: + +- `type = "ed25519" and`data = ` + +The `power` is the new voting power for the validator, with the +following rules: + +- power must be non-negative +- if power is 0, the validator must already exist, and will be removed from the + validator set +- if power is non-0: + - if the validator does not already exist, it will be added to the validator + set with the given power + - if the validator does already exist, its power will be adjusted to the given power + +Note the updates returned in block `H` will only take effect at block `H+2`. + +## Query + +Query is a generic message type with lots of flexibility to enable diverse sets +of queries from applications. Tendermint has no requirements from the Query +message for normal operation - that is, the ABCI app developer need not implement Query functionality if they do not wish too. +That said, Tendermint makes a number of queries to support some optional +features. These are: + +### Peer Filtering + +When Tendermint connects to a peer, it sends two queries to the ABCI application +using the following paths, with no additional data: + +- `/p2p/filter/addr/`, where `` denote the IP address and + the port of the connection +- `p2p/filter/id/`, where `` is the peer node ID (ie. the + pubkey.Address() for the peer's PubKey) + +If either of these queries return a non-zero ABCI code, Tendermint will refuse +to connect to the peer. + + +## Crash Recovery + +On startup, Tendermint calls the `Info` method on the Info Connection to get the latest +committed state of the app. The app MUST return information consistent with the +last block it succesfully completed Commit for. + +If the app succesfully committed block H but not H+1, then `last_block_height = H` and `last_block_app_hash = `. If the app +failed during the Commit of block H, then `last_block_height = H-1` and +`last_block_app_hash = `. + +We now distinguish three heights, and describe how Tendermint syncs itself with +the app. + +``` +storeBlockHeight = height of the last block Tendermint saw a commit for +stateBlockHeight = height of the last block for which Tendermint completed all + block processing and saved all ABCI results to disk +appBlockHeight = height of the last block for which ABCI app succesfully + completed Commit +``` + +Note we always have `storeBlockHeight >= stateBlockHeight` and `storeBlockHeight >= appBlockHeight` +Note also we never call Commit on an ABCI app twice for the same height. + +The procedure is as follows. + +First, some simple start conditions: + +If `appBlockHeight == 0`, then call InitChain. + +If `storeBlockHeight == 0`, we're done. + +Now, some sanity checks: + +If `storeBlockHeight < appBlockHeight`, error +If `storeBlockHeight < stateBlockHeight`, panic +If `storeBlockHeight > stateBlockHeight+1`, panic + +Now, the meat: + +If `storeBlockHeight == stateBlockHeight && appBlockHeight < storeBlockHeight`, +replay all blocks in full from `appBlockHeight` to `storeBlockHeight`. +This happens if we completed processing the block, but the app forgot its height. + +If `storeBlockHeight == stateBlockHeight && appBlockHeight == storeBlockHeight`, we're done +This happens if we crashed at an opportune spot. + +If `storeBlockHeight == stateBlockHeight+1` +This happens if we started processing the block but didn't finish. + + If `appBlockHeight < stateBlockHeight` + replay all blocks in full from `appBlockHeight` to `storeBlockHeight-1`, + and replay the block at `storeBlockHeight` using the WAL. + This happens if the app forgot the last block it committed. + + If `appBlockHeight == stateBlockHeight`, + replay the last block (storeBlockHeight) in full. + This happens if we crashed before the app finished Commit + + If appBlockHeight == storeBlockHeight { + update the state using the saved ABCI responses but dont run the block against the real app. + This happens if we crashed after the app finished Commit but before Tendermint saved the state. diff --git a/docs/spec/abci/client-server.md b/docs/spec/abci/client-server.md new file mode 100644 index 00000000000..822bfd1fcb5 --- /dev/null +++ b/docs/spec/abci/client-server.md @@ -0,0 +1,104 @@ +# Client and Server + +This section is for those looking to implement their own ABCI Server, perhaps in +a new programming language. + +You are expected to have read [ABCI Methods and Types](abci.md) and [ABCI +Applications](apps.md). + +See additional details in the [ABCI +readme](https://github.com/tendermint/tendermint/blob/develop/abci/README.md)(TODO: deduplicate +those details). + +## Message Protocol + +The message protocol consists of pairs of requests and responses defined in the +[protobuf file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). + +Some messages have no fields, while others may include byte-arrays, strings, integers, +or custom protobuf types. + +For more details on protobuf, see the [documentation](https://developers.google.com/protocol-buffers/docs/overview). + +For each request, a server should respond with the corresponding +response, where the order of requests is preserved in the order of +responses. + +## Server + +To use ABCI in your programming language of choice, there must be a ABCI +server in that language. Tendermint supports two kinds of implementation +of the server: + +- Asynchronous, raw socket server (Tendermint Socket Protocol, also + known as TSP or Teaspoon) +- GRPC + +Both can be tested using the `abci-cli` by setting the `--abci` flag +appropriately (ie. to `socket` or `grpc`). + +See examples, in various stages of maintenance, in +[Go](https://github.com/tendermint/tendermint/tree/develop/abci/server), +[JavaScript](https://github.com/tendermint/js-abci), +[Python](https://github.com/tendermint/tendermint/tree/develop/abci/example/python3/abci), +[C++](https://github.com/mdyring/cpp-tmsp), and +[Java](https://github.com/jTendermint/jabci). + +### GRPC + +If GRPC is available in your language, this is the easiest approach, +though it will have significant performance overhead. + +To get started with GRPC, copy in the [protobuf +file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto) +and compile it using the GRPC plugin for your language. For instance, +for golang, the command is `protoc --go_out=plugins=grpc:. types.proto`. +See the [grpc documentation for more details](http://www.grpc.io/docs/). +`protoc` will autogenerate all the necessary code for ABCI client and +server in your language, including whatever interface your application +must satisfy to be used by the ABCI server for handling requests. + +### TSP + +If GRPC is not available in your language, or you require higher +performance, or otherwise enjoy programming, you may implement your own +ABCI server using the Tendermint Socket Protocol, known affectionately +as Teaspoon. The first step is still to auto-generate the relevant data +types and codec in your language using `protoc`. Messages coming over +the socket are proto3 encoded, but additionally length-prefixed to +facilitate use as a streaming protocol. proto3 doesn't have an +official length-prefix standard, so we use our own. The first byte in +the prefix represents the length of the Big Endian encoded length. The +remaining bytes in the prefix are the Big Endian encoded length. + +For example, if the proto3 encoded ABCI message is 0xDEADBEEF (4 +bytes), the length-prefixed message is 0x0104DEADBEEF. If the proto3 +encoded ABCI message is 65535 bytes long, the length-prefixed message +would be like 0x02FFFF.... + +Note this prefixing does not apply for grpc. + +An ABCI server must also be able to support multiple connections, as +Tendermint uses three connections. + + +### Async vs Sync + +The main ABCI server (ie. non-GRPC) provides ordered asynchronous messages. +This is useful for DeliverTx and CheckTx, since it allows Tendermint to forward +transactions to the app before it's finished processing previous ones. + +Thus, DeliverTx and CheckTx messages are sent asynchronously, while all other +messages are sent synchronously. + +## Client + +There are currently two use-cases for an ABCI client. One is a testing +tool, as in the `abci-cli`, which allows ABCI requests to be sent via +command line. The other is a consensus engine, such as Tendermint Core, +which makes requests to the application every time a new transaction is +received or a block is committed. + +It is unlikely that you will need to implement a client. For details of +our client, see +[here](https://github.com/tendermint/tendermint/tree/develop/abci/client). diff --git a/docs/spec/blockchain/blockchain.md b/docs/spec/blockchain/blockchain.md index e3171818a56..795a2292696 100644 --- a/docs/spec/blockchain/blockchain.md +++ b/docs/spec/blockchain/blockchain.md @@ -8,25 +8,28 @@ The Tendermint blockchains consists of a short list of basic data types: - `Block` - `Header` -- `Vote` - `BlockID` -- `Signature` -- `Evidence` +- `Time` +- `Data` (for transactions) +- `Commit` and `Vote` +- `EvidenceData` and `Evidence` ## Block -A block consists of a header, a list of transactions, a list of votes (the commit), +A block consists of a header, transactions, votes (the commit), and a list of evidence of malfeasance (ie. signing conflicting votes). ```go type Block struct { Header Header - Txs [][]byte - LastCommit []Vote - Evidence []Evidence + Txs Data + Evidence EvidenceData + LastCommit Commit } ``` +Note the `LastCommit` is the set of votes that committed the last block. + ## Header A block header contains metadata about the block and about the consensus, as well as commitments to @@ -34,32 +37,30 @@ the data in the current block, the previous block, and the results returned by t ```go type Header struct { - // block metadata - Version string // Version string - ChainID string // ID of the chain - Height int64 // Current block height - Time int64 // UNIX time, in millisconds - - // current block - NumTxs int64 // Number of txs in this block - TxHash []byte // SimpleMerkle of the block.Txs - LastCommitHash []byte // SimpleMerkle of the block.LastCommit - - // previous block - TotalTxs int64 // prevBlock.TotalTxs + block.NumTxs - LastBlockID BlockID // BlockID of prevBlock - - // application - ResultsHash []byte // SimpleMerkle of []abci.Result from prevBlock - AppHash []byte // Arbitrary state digest - ValidatorsHash []byte // SimpleMerkle of the current ValidatorSet - NextValidatorsHash []byte // SimpleMerkle of the next ValidatorSet - ConsensusParamsHash []byte // SimpleMerkle of the ConsensusParams - - // consensus - Proposer []byte // Address of the block proposer - EvidenceHash []byte // SimpleMerkle of []Evidence -} + // basic block info + ChainID string + Height int64 + Time Time + NumTxs int64 + TotalTxs int64 + + // prev block info + LastBlockID BlockID + + // hashes of block data + LastCommitHash []byte // commit from validators from the last block + DataHash []byte // Merkle root of transactions + + // hashes from the app output from the prev block + ValidatorsHash []byte // validators for the current block + NextValidatorsHash []byte // validators for the next block + ConsensusHash []byte // consensus params for current block + AppHash []byte // state after txs from the previous block + LastResultsHash []byte // root hash of all results from the txs from the previous block + + // consensus info + EvidenceHash []byte // evidence included in the block + ProposerAddress []byte // original proposer of the block ``` Further details on each of these fields is described below. @@ -85,6 +86,44 @@ type PartsHeader struct { } ``` +TODO: link to details of merkle sums. + +## Time + +Tendermint uses the +[Google.Protobuf.WellKnownTypes.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/timestamp) +format, which uses two integers, one for Seconds and for Nanoseconds. + +NOTE: there is currently a small divergence between Tendermint and the +Google.Protobuf.WellKnownTypes.Timestamp that should be resolved. See [this +issue](https://github.com/tendermint/go-amino/issues/223) for details. + +## Data + +Data is just a wrapper for a list of transactions, where transactions are +arbitrary byte arrays: + +``` +type Data struct { + Txs [][]byte +} +``` + +## Commit + +Commit is a simple wrapper for a list of votes, with one vote for each +validator. It also contains the relevant BlockID: + +``` +type Commit struct { + BlockID BlockID + Precommits []Vote +} +``` + +NOTE: this will likely change to reduce the commit size by eliminating redundant +information - see [issue #1648](https://github.com/tendermint/tendermint/issues/1648). + ## Vote A vote is a signed message from a validator for a particular block. @@ -92,63 +131,52 @@ The vote includes information about the validator signing it. ```go type Vote struct { - Timestamp int64 - Address []byte - Index int - Height int64 - Round int - Type int8 - BlockID BlockID - Signature Signature + ValidatorAddress []byte + ValidatorIndex int + Height int64 + Round int + Timestamp Time + Type int8 + BlockID BlockID + Signature []byte } ``` There are two types of votes: -a *prevote* has `vote.Type == 1` and -a *precommit* has `vote.Type == 2`. +a _prevote_ has `vote.Type == 1` and +a _precommit_ has `vote.Type == 2`. ## Signature -Tendermint allows for multiple signature schemes to be used by prepending a single type-byte -to the signature bytes. Different signatures may also come with fixed or variable lengths. -Currently, Tendermint supports Ed25519 and Secp256k1. +Signatures in Tendermint are raw bytes representing the underlying signature. +The only signature scheme currently supported for Tendermint validators is +ED25519. The signature is the raw 64-byte ED25519 signature. -### ED25519 +## EvidenceData -An ED25519 signature has `Type == 0x1`. It looks like: +EvidenceData is a simple wrapper for a list of evidence: -```go -// Implements Signature -type Ed25519Signature struct { - Type int8 = 0x1 - Signature [64]byte +``` +type EvidenceData struct { + Evidence []Evidence } ``` -where `Signature` is the 64 byte signature. - -### Secp256k1 +## Evidence -A `Secp256k1` signature has `Type == 0x2`. It looks like: +Evidence in Tendermint is implemented as an interface. +This means any evidence is encoded using its Amino prefix. +There is currently only a single type, the `DuplicateVoteEvidence`. -```go -// Implements Signature -type Secp256k1Signature struct { - Type int8 = 0x2 - Signature []byte -} ``` - -where `Signature` is the DER encoded signature, ie: - -```hex -0x30 <0x02> 0x2 . +// amino name: "tendermint/DuplicateVoteEvidence" +type DuplicateVoteEvidence struct { + PubKey PubKey + VoteA Vote + VoteB Vote +} ``` -## Evidence - -TODO - ## Validation Here we describe the validation rules for every element in a block. @@ -162,23 +190,23 @@ We refer to certain globally available objects: `prevBlock` is the `block` at the previous height, and `state` keeps track of the validator set, the consensus parameters and other results from the application. At the point when `block` is the block under consideration, -the current version of the `state` corresponds to the state -after executing transactions from the `prevBlock`. +the current version of the `state` corresponds to the state +after executing transactions from the `prevBlock`. Elements of an object are accessed as expected, -ie. `block.Header`. +ie. `block.Header`. See [here](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/state.md) for the definition of `state`. ### Header A Header is valid if its corresponding fields are valid. -### Version - -Arbitrary string. - ### ChainID -Arbitrary constant string. +``` +len(block.ChainID) < 50 +``` + +ChainID must be maximum 50 UTF-8 symbols. ### Height @@ -191,39 +219,27 @@ The height is an incrementing integer. The first block has `block.Header.Height ### Time -The median of the timestamps of the valid votes in the block.LastCommit. -Corresponds to the number of nanoseconds, with millisecond resolution, since January 1, 1970. +``` +block.Header.Timestamp >= prevBlock.Header.Timestamp + 1 ms +block.Header.Timestamp == MedianTime(block.LastCommit, state.LastValidators) +``` + +The block timestamp must be monotonic. +It must equal the weighted median of the timestamps of the valid votes in the block.LastCommit. Note: the timestamp of a vote must be greater by at least one millisecond than that of the block being voted on. +See the section on [BFT time](../consensus/bft-time.md) for more details. + ### NumTxs ```go -block.Header.NumTxs == len(block.Txs) +block.Header.NumTxs == len(block.Txs.Txs) ``` Number of transactions included in the block. -### TxHash - -```go -block.Header.TxHash == SimpleMerkleRoot(block.Txs) -``` - -Simple Merkle root of the transactions in the block. - -### LastCommitHash - -```go -block.Header.LastCommitHash == SimpleMerkleRoot(block.LastCommit) -``` - -Simple Merkle root of the votes included in the block. -These are the votes that committed the previous block. - -The first block has `block.Header.LastCommitHash == []byte{}` - ### TotalTxs ```go @@ -254,25 +270,24 @@ which are held in the `state` and may be updated by the application. The first block has `block.Header.LastBlockID == BlockID{}`. -### ResultsHash +### LastCommitHash ```go -block.ResultsHash == SimpleMerkleRoot(state.LastResults) +block.Header.LastCommitHash == SimpleMerkleRoot(block.LastCommit) ``` -Simple Merkle root of the results of the transactions in the previous block. +Simple Merkle root of the votes included in the block. +These are the votes that committed the previous block. -The first block has `block.Header.ResultsHash == []byte{}`. +The first block has `block.Header.LastCommitHash == []byte{}` -### AppHash +### DataHash ```go -block.AppHash == state.AppHash +block.Header.DataHash == SimpleMerkleRoot(block.Txs.Txs) ``` -Arbitrary byte array returned by the application after executing and commiting the previous block. - -The first block has `block.Header.AppHash == []byte{}`. +Simple Merkle root of the transactions included in the block. ### ValidatorsHash @@ -288,8 +303,10 @@ This can be used to validate the `LastCommit` included in the next block. ```go block.NextValidatorsHash == SimpleMerkleRoot(state.NextValidators) ``` + Simple Merkle root of the next validator set that will be the validator set that commits the next block. -Modifications to the validator set are defined by the application. +This is included so that the current validator set gets a chance to sign the +next validator sets Merkle root. ### ConsensusParamsHash @@ -298,17 +315,26 @@ block.ConsensusParamsHash == SimpleMerkleRoot(state.ConsensusParams) ``` Simple Merkle root of the consensus parameters. -May be updated by the application. -### Proposer +### AppHash + +```go +block.AppHash == state.AppHash +``` + +Arbitrary byte array returned by the application after executing and commiting the previous block. It serves as the basis for validating any merkle proofs that comes from the ABCI application and represents the state of the actual application rather than the state of the blockchain itself. + +The first block has `block.Header.AppHash == []byte{}`. + +### LastResultsHash ```go -block.Header.Proposer in state.Validators +block.ResultsHash == SimpleMerkleRoot(state.LastResults) ``` -Original proposer of the block. Must be a current validator. +Simple Merkle root of the results of the transactions in the previous block. -NOTE: we also need to track the round. +The first block has `block.Header.ResultsHash == []byte{}`. ## EvidenceHash @@ -318,6 +344,14 @@ block.EvidenceHash == SimpleMerkleRoot(block.Evidence) Simple Merkle root of the evidence of Byzantine behaviour included in this block. +### ProposerAddress + +```go +block.Header.ProposerAddress in state.Validators +``` + +Address of the original proposer of the block. Must be a current validator. + ## Txs Arbitrary length array of arbitrary length byte-arrays. @@ -366,7 +400,7 @@ must be greater than 2/3 of the total voting power of the complete validator set ### Vote A vote is a signed message broadcast in the consensus for a particular block at a particular height and round. -When stored in the blockchain or propagated over the network, votes are encoded in TMBIN. +When stored in the blockchain or propagated over the network, votes are encoded in Amino. For signing, votes are encoded in JSON, and the ChainID is included, in the form of the `CanonicalSignBytes`. We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the CanonicalSignBytes @@ -383,16 +417,7 @@ against the given signature and message bytes. ## Evidence -There is currently only one kind of evidence: - -``` -// amino: "tendermint/DuplicateVoteEvidence" -type DuplicateVoteEvidence struct { - PubKey crypto.PubKey - VoteA *Vote - VoteB *Vote -} -``` +There is currently only one kind of evidence, `DuplicateVoteEvidence`. DuplicateVoteEvidence `ev` is valid if @@ -427,11 +452,8 @@ Execute(s State, app ABCIApp, block Block) State { AppHash: AppHash, LastValidators: state.Validators, Validators: state.NextValidators, - NextValidators: UpdateValidators(state.NextValidators, ValidatorChanges), + NextValidators: UpdateValidators(state.NextValidators, ValidatorChanges), ConsensusParams: UpdateConsensusParams(state.ConsensusParams, ConsensusParamChanges), } } - ``` - - diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index 49c88475bd3..1af470403eb 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -48,33 +48,33 @@ spec](https://github.com/tendermint/go-amino#computing-the-prefix-and-disambigua In what follows, we provide the type names and prefix bytes directly. Notice that when encoding byte-arrays, the length of the byte-array is appended -to the PrefixBytes. Thus the encoding of a byte array becomes ` - `. In other words, to encode any type listed below you do not need to be +to the PrefixBytes. Thus the encoding of a byte array becomes ` `. In other words, to encode any type listed below you do not need to be familiar with amino encoding. You can simply use below table and concatenate Prefix || Length (of raw bytes) || raw bytes ( while || stands for byte concatenation here). -| Type | Name | Prefix | Length | Notes | -| ---- | ---- | ------ | ----- | ------ | -| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | | -| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | | -| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | | -| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | | -| SignatureEd25519 | tendermint/SignatureEd25519 | 0x2031EA53 | 0x40 | | +| Type | Name | Prefix | Length | Notes | +| ------------------ | ----------------------------- | ---------- | -------- | ----- | +| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE64 | 0x20 | | +| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE987 | 0x21 | | +| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288910 | 0x40 | | +| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79B | 0x20 | | +| SignatureEd25519 | tendermint/SignatureEd25519 | 0x2031EA53 | 0x40 | | | SignatureSecp256k1 | tendermint/SignatureSecp256k1 | 0x7FC4A495 | variable | + | ### Examples 1. For example, the 33-byte (or 0x21-byte in hex) Secp256k1 pubkey -`020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` -would be encoded as -`EB5AE98221020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` + `020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` + would be encoded as + `EB5AE98221020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` 2. For example, the variable size Secp256k1 signature (in this particular example 70 or 0x46 bytes) -`304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7` -would be encoded as -`16E1FEEA46304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7` + `304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7` + would be encoded as + `16E1FEEA46304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7` ### Addresses @@ -152,28 +152,27 @@ func MakeParts(obj interface{}, partSize int) []Part For an overview of Merkle trees, see [wikipedia](https://en.wikipedia.org/wiki/Merkle_tree) - A Simple Tree is a simple compact binary tree for a static list of items. Simple Merkle trees are used in numerous places in Tendermint to compute a cryptographic digest of a data structure. In a Simple Tree, the transactions and validation signatures of a block are hashed using this simple merkle tree logic. If the number of items is not a power of two, the tree will not be full and some leaf nodes will be at different levels. Simple Tree tries to keep both sides of the tree the same size, but the left side may be one -greater, for example: +greater, for example: ``` - Simple Tree with 6 items Simple Tree with 7 items - - * * - / \ / \ - / \ / \ - / \ / \ - / \ / \ - * * * * - / \ / \ / \ / \ - / \ / \ / \ / \ - / \ / \ / \ / \ + Simple Tree with 6 items Simple Tree with 7 items + + * * + / \ / \ + / \ / \ + / \ / \ + / \ / \ + * * * * + / \ / \ / \ / \ + / \ / \ / \ / \ + / \ / \ / \ / \ * h2 * h5 * * * h6 - / \ / \ / \ / \ / \ + / \ / \ / \ / \ / \ h0 h1 h3 h4 h0 h1 h2 h3 h4 h5 ``` @@ -224,7 +223,6 @@ For `[]struct` arguments, we compute a `[][]byte` by hashing the individual `str Proof that a leaf is in a Merkle tree consists of a simple structure: - ``` type SimpleProof struct { Aunts [][]byte @@ -265,8 +263,8 @@ func computeHashFromAunts(index, total int, leafHash []byte, innerHashes [][]byt The Simple Tree is used to merkelize a list of items, so to merkelize a (short) dictionary of key-value pairs, encode the dictionary as an -ordered list of ``KVPair`` structs. The block hash is such a hash -derived from all the fields of the block ``Header``. The state hash is +ordered list of `KVPair` structs. The block hash is such a hash +derived from all the fields of the block `Header`. The state hash is similarly derived. ### IAVL+ Tree @@ -277,13 +275,11 @@ Because Tendermint only uses a Simple Merkle Tree, application developers are ex ### Amino -TODO: improve this - Amino also supports JSON encoding - registered types are simply encoded as: ``` { - "type": "", + "type": "", "value": } ``` @@ -298,20 +294,18 @@ For instance, an ED25519 PubKey would look like: ``` Where the `"value"` is the base64 encoding of the raw pubkey bytes, and the -`"type"` is the full disfix bytes for Ed25519 pubkeys. - +`"type"` is the amino name for Ed25519 pubkeys. ### Signed Messages -Signed messages (eg. votes, proposals) in the consensus are encoded using Amino-JSON, rather than in the standard binary format. +Signed messages (eg. votes, proposals) in the consensus are encoded using Amino-JSON, rather than in the standard binary format +(NOTE: this is subject to change: https://github.com/tendermint/tendermint/issues/1622) -When signing, the elements of a message are sorted by key and the sorted message is embedded in an -outer JSON that includes a `chain_id` field. +When signing, the elements of a message are sorted by key and prepended with +a `@chain_id` and `@type` field. We call this encoding the CanonicalSignBytes. For instance, CanonicalSignBytes for a vote would look like: ```json -{"chain_id":"my-chain-id","vote":{"block_id":{"hash":DEADBEEF,"parts":{"hash":BEEFDEAD,"total":3}},"height":3,"round":2,"timestamp":1234567890, "type":2} +{"@chain_id":"test_chain_id","@type":"vote","block_id":{"hash":"8B01023386C371778ECB6368573E539AFC3CC860","parts":{"hash":"72DB3D959635DFF1BB567BEDAA70573392C51596","total":"1000000"}},"height":"12345","round":"2","timestamp":"2017-12-25T03:00:01.234Z","type":2} ``` - -Note how the fields within each level are sorted. diff --git a/docs/spec/blockchain/state.md b/docs/spec/blockchain/state.md index df86cd45021..e9da53b5073 100644 --- a/docs/spec/blockchain/state.md +++ b/docs/spec/blockchain/state.md @@ -8,10 +8,10 @@ transactions are never included in blocks, but their Merkle roots are - the stat Note that the `State` object itself is an implementation detail, since it is never included in a block or gossipped over the network, and we never compute -its hash. However, the types it contains are part of the specification, since -their Merkle roots are included in blocks. - -For details on an implementation of `State` with persistence, see TODO +its hash. Thus we do not include here details of how the `State` object is +persisted or queried. That said, the types it contains are part of the specification, since +their Merkle roots are included in blocks and their values are used in +validation. ```go type State struct { @@ -32,20 +32,15 @@ type State struct { type Result struct { Code uint32 Data []byte - Tags []KVPair -} - -type KVPair struct { - Key []byte - Value []byte } ``` `Result` is the result of executing a transaction against the application. -It returns a result code, an arbitrary byte array (ie. a return value), -and a list of key-value pairs ordered by key. The key-value pairs, or tags, -can be used to index transactions according to their "effects", which are -represented in the tags. +It returns a result code and an arbitrary byte array (ie. a return value). + +NOTE: the Result needs to be updated to include more fields returned from +processing transactions, like gas variables and tags - see +[issue 1007](https://github.com/tendermint/tendermint/issues/1007). ### Validator @@ -60,7 +55,7 @@ type Validator struct { } ``` -The `state.Validators` and `state.LastValidators` must always by sorted by validator address, +The `state.Validators`, `state.LastValidators`, and `state.NextValidators`, must always by sorted by validator address, so that there is a canonical order for computing the SimpleMerkleRoot. We also define a `TotalVotingPower` function, to return the total voting power: @@ -75,7 +70,61 @@ func TotalVotingPower(vals []Validators) int64{ } ``` - ### ConsensusParams -TODO +ConsensusParams define various limits for blockchain data structures. +Like validator sets, they are set during genesis and can be updated by the application through ABCI. + +``` +type ConsensusParams struct { + BlockSize + TxSize + BlockGossip + EvidenceParams +} + +type BlockSize struct { + MaxBytes int + MaxGas int64 +} + +type TxSize struct { + MaxBytes int + MaxGas int64 +} + +type BlockGossip struct { + BlockPartSizeBytes int +} + +type EvidenceParams struct { + MaxAge int64 +} +``` + +#### BlockSize + +The total size of a block is limited in bytes by the `ConsensusParams.BlockSize.MaxBytes`. +Proposed blocks must be less than this size, and will be considered invalid +otherwise. + +Blocks should additionally be limited by the amount of "gas" consumed by the +transactions in the block, though this is not yet implemented. + +#### TxSize + +These parameters are not yet enforced and may disappear. See [issue +#2347](https://github.com/tendermint/tendermint/issues/2347). + +#### BlockGossip + +When gossipping blocks in the consensus, they are first split into parts. The +size of each part is `ConsensusParams.BlockGossip.BlockPartSizeBytes`. + +#### EvidenceParams + +For evidence in a block to be valid, it must satisfy: + +``` +block.Header.Height - evidence.Height < ConsensusParams.EvidenceParams.MaxAge +``` diff --git a/docs/spec/consensus/bft-time.md b/docs/spec/consensus/bft-time.md index a005e904051..06e66dbfcce 100644 --- a/docs/spec/consensus/bft-time.md +++ b/docs/spec/consensus/bft-time.md @@ -16,10 +16,10 @@ In the context of Tendermint, time is of type int64 and denotes UNIX time in mil corresponds to the number of milliseconds since January 1, 1970. Before defining rules that need to be enforced by the Tendermint consensus protocol, so the properties above holds, we introduce the following definition: -- median of a set of `Vote` messages is equal to the median of `Vote.Time` fields of the corresponding `Vote` messages, +- median of a Commit is equal to the median of `Vote.Time` fields of the `Vote` messages, where the value of `Vote.Time` is counted number of times proportional to the process voting power. As in Tendermint the voting power is not uniform (one process one vote), a vote message is actually an aggregator of the same votes whose -number is equal to the voting power of the process that has casted the corresponding votes message. +number is equal to the voting power of the process that has casted the corresponding votes message. Let's consider the following example: - we have four processes p1, p2, p3 and p4, with the following voting power distribution (p1, 23), (p2, 27), (p3, 10) @@ -40,17 +40,15 @@ rs.Proposal.Timestamp == rs.ProposalBlock.Header.Time`. - Furthermore, when creating the `vote` message, the following rules for determining `vote.Time` field should hold: - - if `rs.Proposal` is defined then - `vote.Time = max(rs.Proposal.Timestamp + 1, time.Now())`, where `time.Now()` - denotes local Unix time in milliseconds. + - if `rs.LockedBlock` is defined then + `vote.Time = max(rs.LockedBlock.Timestamp + config.BlockTimeIota, time.Now())`, where `time.Now()` + denotes local Unix time in milliseconds, and `config.BlockTimeIota` is a configuration parameter that corresponds + to the minimum timestamp increment of the next block. + + - else if `rs.Proposal` is defined then + `vote.Time = max(rs.Proposal.Timestamp + config.BlockTimeIota, time.Now())`, - - if `rs.Proposal` is not defined and `rs.Votes` contains +2/3 of the corresponding vote messages (votes for the - current height and round, and with the corresponding type (`Prevote` or `Precommit`)), then - - `vote.Time = max(median(getVotes(rs.Votes, vote.Height, vote.Round, vote.Type)), time.Now())`, - - where `getVotes` function returns the votes for particular `Height`, `Round` and `Type`. - The second rule is relevant for the case when a process jumps to a higher round upon receiving +2/3 votes for a higher - round, but the corresponding `Proposal` message for the higher round hasn't been received yet. + - otherwise, `vote.Time = time.Now())`. In this case vote is for `nil` so it is not taken into account for + the timestamp of the next block. diff --git a/docs/spec/consensus/consensus.md b/docs/spec/consensus/consensus.md index d6804779c11..c77c09d2687 100644 --- a/docs/spec/consensus/consensus.md +++ b/docs/spec/consensus/consensus.md @@ -2,31 +2,31 @@ ## Terms -- The network is composed of optionally connected *nodes*. Nodes - directly connected to a particular node are called *peers*. -- The consensus process in deciding the next block (at some *height* - `H`) is composed of one or many *rounds*. -- `NewHeight`, `Propose`, `Prevote`, `Precommit`, and `Commit` - represent state machine states of a round. (aka `RoundStep` or - just "step"). -- A node is said to be *at* a given height, round, and step, or at - `(H,R,S)`, or at `(H,R)` in short to omit the step. -- To *prevote* or *precommit* something means to broadcast a [prevote - vote](https://godoc.org/github.com/tendermint/tendermint/types#Vote) - or [first precommit - vote](https://godoc.org/github.com/tendermint/tendermint/types#FirstPrecommit) - for something. -- A vote *at* `(H,R)` is a vote signed with the bytes for `H` and `R` - included in its [sign-bytes](block-structure.html#vote-sign-bytes). -- *+2/3* is short for "more than 2/3" -- *1/3+* is short for "1/3 or more" -- A set of +2/3 of prevotes for a particular block or `` at - `(H,R)` is called a *proof-of-lock-change* or *PoLC* for short. +- The network is composed of optionally connected _nodes_. Nodes + directly connected to a particular node are called _peers_. +- The consensus process in deciding the next block (at some _height_ + `H`) is composed of one or many _rounds_. +- `NewHeight`, `Propose`, `Prevote`, `Precommit`, and `Commit` + represent state machine states of a round. (aka `RoundStep` or + just "step"). +- A node is said to be _at_ a given height, round, and step, or at + `(H,R,S)`, or at `(H,R)` in short to omit the step. +- To _prevote_ or _precommit_ something means to broadcast a [prevote + vote](https://godoc.org/github.com/tendermint/tendermint/types#Vote) + or [first precommit + vote](https://godoc.org/github.com/tendermint/tendermint/types#FirstPrecommit) + for something. +- A vote _at_ `(H,R)` is a vote signed with the bytes for `H` and `R` + included in its [sign-bytes](../blockchain/blockchain.md). +- _+2/3_ is short for "more than 2/3" +- _1/3+_ is short for "1/3 or more" +- A set of +2/3 of prevotes for a particular block or `` at + `(H,R)` is called a _proof-of-lock-change_ or _PoLC_ for short. ## State Machine Overview At each height of the blockchain a round-based protocol is run to -determine the next block. Each round is composed of three *steps* +determine the next block. Each round is composed of three _steps_ (`Propose`, `Prevote`, and `Precommit`), along with two special steps `Commit` and `NewHeight`. @@ -36,22 +36,22 @@ In the optimal scenario, the order of steps is: NewHeight -> (Propose -> Prevote -> Precommit)+ -> Commit -> NewHeight ->... ``` -The sequence `(Propose -> Prevote -> Precommit)` is called a *round*. +The sequence `(Propose -> Prevote -> Precommit)` is called a _round_. There may be more than one round required to commit a block at a given height. Examples for why more rounds may be required include: -- The designated proposer was not online. -- The block proposed by the designated proposer was not valid. -- The block proposed by the designated proposer did not propagate - in time. -- The block proposed was valid, but +2/3 of prevotes for the proposed - block were not received in time for enough validator nodes by the - time they reached the `Precommit` step. Even though +2/3 of prevotes - are necessary to progress to the next step, at least one validator - may have voted `` or maliciously voted for something else. -- The block proposed was valid, and +2/3 of prevotes were received for - enough nodes, but +2/3 of precommits for the proposed block were not - received for enough validator nodes. +- The designated proposer was not online. +- The block proposed by the designated proposer was not valid. +- The block proposed by the designated proposer did not propagate + in time. +- The block proposed was valid, but +2/3 of prevotes for the proposed + block were not received in time for enough validator nodes by the + time they reached the `Precommit` step. Even though +2/3 of prevotes + are necessary to progress to the next step, at least one validator + may have voted `` or maliciously voted for something else. +- The block proposed was valid, and +2/3 of prevotes were received for + enough nodes, but +2/3 of precommits for the proposed block were not + received for enough validator nodes. Some of these problems are resolved by moving onto the next round & proposer. Others are resolved by increasing certain round timeout @@ -80,14 +80,13 @@ parameters over each successive round. +--------------------------------------------------------------------+ ``` -Background Gossip -================= +# Background Gossip A node may not have a corresponding validator private key, but it nevertheless plays an active role in the consensus process by relaying relevant meta-data, proposals, blocks, and votes to its peers. A node that has the private keys of an active validator and is engaged in -signing votes is called a *validator-node*. All nodes (not just +signing votes is called a _validator-node_. All nodes (not just validator-nodes) have an associated state (the current height, round, and step) and work to make progress. @@ -97,21 +96,21 @@ epidemic gossip protocol is implemented among some of these channels to bring peers up to speed on the most recent state of consensus. For example, -- Nodes gossip `PartSet` parts of the current round's proposer's - proposed block. A LibSwift inspired algorithm is used to quickly - broadcast blocks across the gossip network. -- Nodes gossip prevote/precommit votes. A node `NODE_A` that is ahead - of `NODE_B` can send `NODE_B` prevotes or precommits for `NODE_B`'s - current (or future) round to enable it to progress forward. -- Nodes gossip prevotes for the proposed PoLC (proof-of-lock-change) - round if one is proposed. -- Nodes gossip to nodes lagging in blockchain height with block - [commits](https://godoc.org/github.com/tendermint/tendermint/types#Commit) - for older blocks. -- Nodes opportunistically gossip `HasVote` messages to hint peers what - votes it already has. -- Nodes broadcast their current state to all neighboring peers. (but - is not gossiped further) +- Nodes gossip `PartSet` parts of the current round's proposer's + proposed block. A LibSwift inspired algorithm is used to quickly + broadcast blocks across the gossip network. +- Nodes gossip prevote/precommit votes. A node `NODE_A` that is ahead + of `NODE_B` can send `NODE_B` prevotes or precommits for `NODE_B`'s + current (or future) round to enable it to progress forward. +- Nodes gossip prevotes for the proposed PoLC (proof-of-lock-change) + round if one is proposed. +- Nodes gossip to nodes lagging in blockchain height with block + [commits](https://godoc.org/github.com/tendermint/tendermint/types#Commit) + for older blocks. +- Nodes opportunistically gossip `HasVote` messages to hint peers what + votes it already has. +- Nodes broadcast their current state to all neighboring peers. (but + is not gossiped further) There's more, but let's not get ahead of ourselves here. @@ -144,14 +143,14 @@ and all prevotes at `PoLC-Round`. --> goto `Prevote(H,R)` - After Upon entering `Prevote`, each validator broadcasts its prevote vote. -- First, if the validator is locked on a block since `LastLockRound` - but now has a PoLC for something else at round `PoLC-Round` where - `LastLockRound < PoLC-Round < R`, then it unlocks. -- If the validator is still locked on a block, it prevotes that. -- Else, if the proposed block from `Propose(H,R)` is good, it - prevotes that. -- Else, if the proposal is invalid or wasn't received on time, it - prevotes ``. +- First, if the validator is locked on a block since `LastLockRound` + but now has a PoLC for something else at round `PoLC-Round` where + `LastLockRound < PoLC-Round < R`, then it unlocks. +- If the validator is still locked on a block, it prevotes that. +- Else, if the proposed block from `Propose(H,R)` is good, it + prevotes that. +- Else, if the proposal is invalid or wasn't received on time, it + prevotes ``. The `Prevote` step ends: - After +2/3 prevotes for a particular block or ``. -->; goto `Precommit(H,R)` - After `timeoutPrevote` after @@ -161,11 +160,12 @@ receiving any +2/3 prevotes. --> goto `Precommit(H,R)` - After ### Precommit Step (height:H,round:R) Upon entering `Precommit`, each validator broadcasts its precommit vote. + - If the validator has a PoLC at `(H,R)` for a particular block `B`, it -(re)locks (or changes lock to) and precommits `B` and sets -`LastLockRound = R`. - Else, if the validator has a PoLC at `(H,R)` for -``, it unlocks and precommits ``. - Else, it keeps the lock -unchanged and precommits ``. + (re)locks (or changes lock to) and precommits `B` and sets + `LastLockRound = R`. - Else, if the validator has a PoLC at `(H,R)` for + ``, it unlocks and precommits ``. - Else, it keeps the lock + unchanged and precommits ``. A precommit for `` means "I didn’t see a PoLC for this round, but I did get +2/3 prevotes and waited a bit". @@ -177,24 +177,24 @@ conditions](#common-exit-conditions) ### Common exit conditions -- After +2/3 precommits for a particular block. --> goto - `Commit(H)` -- After any +2/3 prevotes received at `(H,R+x)`. --> goto - `Prevote(H,R+x)` -- After any +2/3 precommits received at `(H,R+x)`. --> goto - `Precommit(H,R+x)` +- After +2/3 precommits for a particular block. --> goto + `Commit(H)` +- After any +2/3 prevotes received at `(H,R+x)`. --> goto + `Prevote(H,R+x)` +- After any +2/3 precommits received at `(H,R+x)`. --> goto + `Precommit(H,R+x)` ### Commit Step (height:H) -- Set `CommitTime = now()` -- Wait until block is received. --> goto `NewHeight(H+1)` +- Set `CommitTime = now()` +- Wait until block is received. --> goto `NewHeight(H+1)` ### NewHeight Step (height:H) -- Move `Precommits` to `LastCommit` and increment height. -- Set `StartTime = CommitTime+timeoutCommit` -- Wait until `StartTime` to receive straggler commits. --> goto - `Propose(H,0)` +- Move `Precommits` to `LastCommit` and increment height. +- Set `StartTime = CommitTime+timeoutCommit` +- Wait until `StartTime` to receive straggler commits. --> goto + `Propose(H,0)` ## Proofs @@ -236,20 +236,20 @@ Further, define the JSet at height `H` of a set of validators `VSet` to be the union of the JSets for each validator in `VSet`. For a given commit by honest validators at round `R` for block `B` we can construct a JSet to justify the commit for `B` at `R`. We say that a JSet -*justifies* a commit at `(H,R)` if all the committers (validators in the +_justifies_ a commit at `(H,R)` if all the committers (validators in the commit-set) are each justified in the JSet with no duplicitous vote signatures (by the committers). -- **Lemma**: When a fork is detected by the existence of two - conflicting [commits](./validators.html#commiting-a-block), the - union of the JSets for both commits (if they can be compiled) must - include double-signing by at least 1/3+ of the validator set. - **Proof**: The commit cannot be at the same round, because that - would immediately imply double-signing by 1/3+. Take the union of - the JSets of both commits. If there is no double-signing by at least - 1/3+ of the validator set in the union, then no honest validator - could have precommitted any different block after the first commit. - Yet, +2/3 did. Reductio ad absurdum. +- **Lemma**: When a fork is detected by the existence of two + conflicting [commits](./validators.html#commiting-a-block), the + union of the JSets for both commits (if they can be compiled) must + include double-signing by at least 1/3+ of the validator set. + **Proof**: The commit cannot be at the same round, because that + would immediately imply double-signing by 1/3+. Take the union of + the JSets of both commits. If there is no double-signing by at least + 1/3+ of the validator set in the union, then no honest validator + could have precommitted any different block after the first commit. + Yet, +2/3 did. Reductio ad absurdum. As a corollary, when there is a fork, an external process can determine the blame by requiring each validator to justify all of its round votes. diff --git a/docs/spec/consensus/light-client.md b/docs/spec/consensus/light-client.md index 0ed9d36d406..1b608627ce6 100644 --- a/docs/spec/consensus/light-client.md +++ b/docs/spec/consensus/light-client.md @@ -1,14 +1,14 @@ # Light client -A light client is a process that connects to the Tendermint Full Node(s) and then tries to verify the Merkle proofs -about the blockchain application. In this document we describe mechanisms that ensures that the Tendermint light client -has the same level of security as Full Node processes (without being itself a Full Node). +A light client is a process that connects to the Tendermint Full Node(s) and then tries to verify the Merkle proofs +about the blockchain application. In this document we describe mechanisms that ensures that the Tendermint light client +has the same level of security as Full Node processes (without being itself a Full Node). -To be able to validate a Merkle proof, a light client needs to validate the blockchain header that contains the root app hash. -Validating a blockchain header in Tendermint consists in verifying that the header is committed (signed) by >2/3 of the -voting power of the corresponding validator set. As the validator set is a dynamic set (it is changing), one of the -core functionality of the light client is updating the current validator set, that is then used to verify the -blockchain header, and further the corresponding Merkle proofs. +To be able to validate a Merkle proof, a light client needs to validate the blockchain header that contains the root app hash. +Validating a blockchain header in Tendermint consists in verifying that the header is committed (signed) by >2/3 of the +voting power of the corresponding validator set. As the validator set is a dynamic set (it is changing), one of the +core functionality of the light client is updating the current validator set, that is then used to verify the +blockchain header, and further the corresponding Merkle proofs. For the purpose of this light client specification, we assume that the Tendermint Full Node exposes the following functions over Tendermint RPC: @@ -19,51 +19,50 @@ Validators(height int64) (ResultValidators, error) // returns validator set for LastHeader(valSetNumber int64) (SignedHeader, error) // returns last header signed by the validator set with the given validator set number type SignedHeader struct { - Header Header + Header Header Commit Commit - ValSetNumber int64 + ValSetNumber int64 } type ResultValidators struct { - BlockHeight int64 - Validators []Validator - // time the current validator set is initialised, i.e, time of the last validator change before header BlockHeight - ValSetTime int64 + BlockHeight int64 + Validators []Validator + // time the current validator set is initialised, i.e, time of the last validator change before header BlockHeight + ValSetTime int64 } ``` -We assume that Tendermint keeps track of the validator set changes and that each time a validator set is changed it is -being assigned the next sequence number. We can call this number the validator set sequence number. Tendermint also remembers +We assume that Tendermint keeps track of the validator set changes and that each time a validator set is changed it is +being assigned the next sequence number. We can call this number the validator set sequence number. Tendermint also remembers the Time from the header when the next validator set is initialised (starts to be in power), and we refer to this time as validator set init time. Furthermore, we assume that each validator set change is signed (committed) by the current validator set. More precisely, -given a block `H` that contains transactions that are modifying the current validator set, the Merkle root hash of the next -validator set (modified based on transactions from block H) will be in block `H+1` (and signed by the current validator -set), and then starting from the block `H+2`, it will be signed by the next validator set. - -Note that the real Tendermint RPC API is slightly different (for example, response messages contain more data and function -names are slightly different); we shortened (and modified) it for the purpose of this document to make the spec more -clear and simple. Furthermore, note that in case of the third function, the returned header has `ValSetNumber` equals to -`valSetNumber+1`. +given a block `H` that contains transactions that are modifying the current validator set, the Merkle root hash of the next +validator set (modified based on transactions from block H) will be in block `H+1` (and signed by the current validator +set), and then starting from the block `H+2`, it will be signed by the next validator set. +Note that the real Tendermint RPC API is slightly different (for example, response messages contain more data and function +names are slightly different); we shortened (and modified) it for the purpose of this document to make the spec more +clear and simple. Furthermore, note that in case of the third function, the returned header has `ValSetNumber` equals to +`valSetNumber+1`. Locally, light client manages the following state: ```golang -valSet []Validator // current validator set (last known and verified validator set) -valSetNumber int64 // sequence number of the current validator set +valSet []Validator // current validator set (last known and verified validator set) +valSetNumber int64 // sequence number of the current validator set valSetHash []byte // hash of the current validator set -valSetTime int64 // time when the current validator set is initialised +valSetTime int64 // time when the current validator set is initialised ``` The light client is initialised with the trusted validator set, for example based on the known validator set hash, validator set sequence number and the validator set init time. The core of the light client logic is captured by the VerifyAndUpdate function that is used to 1) verify if the given header is valid, -and 2) update the validator set (when the given header is valid and it is more recent than the seen headers). +and 2) update the validator set (when the given header is valid and it is more recent than the seen headers). ```golang VerifyAndUpdate(signedHeader SignedHeader): - assertThat signedHeader.valSetNumber >= valSetNumber + assertThat signedHeader.valSetNumber >= valSetNumber if isValid(signedHeader) and signedHeader.Header.Time <= valSetTime + UNBONDING_PERIOD then setValidatorSet(signedHeader) return true @@ -76,7 +75,7 @@ isValid(signedHeader SignedHeader): assertThat Hash(valSetOfTheHeader) == signedHeader.Header.ValSetHash assertThat signedHeader is passing basic validation if votingPower(signedHeader.Commit) > 2/3 * votingPower(valSetOfTheHeader) then return true - else + else return false setValidatorSet(signedHeader SignedHeader): @@ -85,7 +84,7 @@ setValidatorSet(signedHeader SignedHeader): valSet = nextValSet.Validators valSetHash = signedHeader.Header.ValidatorsHash valSetNumber = signedHeader.ValSetNumber - valSetTime = nextValSet.ValSetTime + valSetTime = nextValSet.ValSetTime votingPower(commit Commit): votingPower = 0 @@ -96,9 +95,9 @@ votingPower(commit Commit): votingPower(validatorSet []Validator): for each validator in validatorSet do: - votingPower += validator.VotingPower + votingPower += validator.VotingPower return votingPower - + updateValidatorSet(valSetNumberOfTheHeader): while valSetNumber != valSetNumberOfTheHeader do signedHeader = LastHeader(valSetNumber) @@ -110,5 +109,5 @@ updateValidatorSet(valSetNumberOfTheHeader): Note that in the logic above we assume that the light client will always go upward with respect to header verifications, i.e., that it will always be used to verify more recent headers. In case a light client needs to be used to verify older -headers (go backward) the same mechanisms and similar logic can be used. In case a call to the FullNode or subsequent -checks fail, a light client need to implement some recovery strategy, for example connecting to other FullNode. +headers (go backward) the same mechanisms and similar logic can be used. In case a call to the FullNode or subsequent +checks fail, a light client need to implement some recovery strategy, for example connecting to other FullNode. diff --git a/docs/spec/p2p/connection.md b/docs/spec/p2p/connection.md index 9b5e496752f..47366a549ca 100644 --- a/docs/spec/p2p/connection.md +++ b/docs/spec/p2p/connection.md @@ -4,10 +4,10 @@ `MConnection` is a multiplex connection that supports multiple independent streams with distinct quality of service guarantees atop a single TCP connection. -Each stream is known as a `Channel` and each `Channel` has a globally unique *byte id*. +Each stream is known as a `Channel` and each `Channel` has a globally unique _byte id_. Each `Channel` also has a relative priority that determines the quality of service of the `Channel` compared to other `Channel`s. -The *byte id* and the relative priorities of each `Channel` are configured upon +The _byte id_ and the relative priorities of each `Channel` are configured upon initialization of the connection. The `MConnection` supports three packet types: @@ -38,7 +38,7 @@ type msgPacket struct { } ``` -The `msgPacket` is serialized using [go-wire](https://github.com/tendermint/go-wire) and prefixed with 0x3. +The `msgPacket` is serialized using [go-amino](https://github.com/tendermint/go-amino) and prefixed with 0x3. The received `Bytes` of a sequential set of packets are appended together until a packet with `EOF=1` is received, then the complete serialized message is returned for processing by the `onReceive` function of the corresponding channel. @@ -53,13 +53,14 @@ Messages are chosen for a batch one at a time from the channel with the lowest r ## Sending Messages There are two methods for sending messages: + ```go func (m MConnection) Send(chID byte, msg interface{}) bool {} func (m MConnection) TrySend(chID byte, msg interface{}) bool {} ``` `Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued -for the channel with the given id byte `chID`. The message `msg` is serialized +for the channel with the given id byte `chID`. The message `msg` is serialized using the `tendermint/wire` submodule's `WriteBinary()` reflection routine. `TrySend(chID, msg)` is a nonblocking call that queues the message msg in the channel @@ -76,8 +77,8 @@ and other higher level thread-safe data used by the reactors. ## Switch/Reactor The `Switch` handles peer connections and exposes an API to receive incoming messages -on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one -or more `Channels`. So while sending outgoing messages is typically performed on the peer, +on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one +or more `Channels`. So while sending outgoing messages is typically performed on the peer, incoming messages are received on the reactor. ```go diff --git a/docs/spec/p2p/node.md b/docs/spec/p2p/node.md index 366b27dd2df..2771356af6f 100644 --- a/docs/spec/p2p/node.md +++ b/docs/spec/p2p/node.md @@ -17,8 +17,9 @@ See [the peer-exchange docs](https://github.com/tendermint/tendermint/blob/maste ## New Full Node A new node needs a few things to connect to the network: + - a list of seeds, which can be provided to Tendermint via config file or flags, -or hardcoded into the software by in-process apps + or hardcoded into the software by in-process apps - a `ChainID`, also called `Network` at the p2p layer - a recent block height, H, and hash, HASH for the blockchain. diff --git a/docs/spec/p2p/peer.md b/docs/spec/p2p/peer.md index dadb4a3a599..116fec4f7db 100644 --- a/docs/spec/p2p/peer.md +++ b/docs/spec/p2p/peer.md @@ -29,26 +29,26 @@ Both handshakes have configurable timeouts (they should complete quickly). Tendermint implements the Station-to-Station protocol using X25519 keys for Diffie-Helman key-exchange and chacha20poly1305 for encryption. It goes as follows: + - generate an ephemeral X25519 keypair - send the ephemeral public key to the peer - wait to receive the peer's ephemeral public key - compute the Diffie-Hellman shared secret using the peers ephemeral public key and our ephemeral private key - generate two keys to use for encryption (sending and receiving) and a challenge for authentication as follows: - - create a hkdf-sha256 instance with the key being the diffie hellman shared secret, and info parameter as - `TENDERMINT_SECRET_CONNECTION_KEY_AND_CHALLENGE_GEN` - - get 96 bytes of output from hkdf-sha256 - - if we had the smaller ephemeral pubkey, use the first 32 bytes for the key for receiving, the second 32 bytes for sending; else the opposite - - use the last 32 bytes of output for the challenge -- use a seperate nonce for receiving and sending. Both nonces start at 0, and should support the full 96 bit nonce range + - create a hkdf-sha256 instance with the key being the diffie hellman shared secret, and info parameter as + `TENDERMINT_SECRET_CONNECTION_KEY_AND_CHALLENGE_GEN` + - get 96 bytes of output from hkdf-sha256 + - if we had the smaller ephemeral pubkey, use the first 32 bytes for the key for receiving, the second 32 bytes for sending; else the opposite + - use the last 32 bytes of output for the challenge +- use a separate nonce for receiving and sending. Both nonces start at 0, and should support the full 96 bit nonce range - all communications from now on are encrypted in 1024 byte frames, -using the respective secret and nonce. Each nonce is incremented by one after each use. + using the respective secret and nonce. Each nonce is incremented by one after each use. - we now have an encrypted channel, but still need to authenticate - sign the common challenge obtained from the hkdf with our persistent private key - send the amino encoded persistent pubkey and signature to the peer - wait to receive the persistent public key and signature from the peer - verify the signature on the challenge using the peer's persistent public key - If this is an outgoing connection (we dialed the peer) and we used a peer ID, then finally verify that the peer's persistent public key corresponds to the peer ID we dialed, ie. `peer.PubKey.Address() == `. @@ -69,7 +69,6 @@ an optional whitelist which can be managed through the ABCI app - if the whitelist is enabled and the peer does not qualify, the connection is terminated. - ### Tendermint Version Handshake The Tendermint Version Handshake allows the peers to exchange their NodeInfo: @@ -89,6 +88,7 @@ type NodeInfo struct { ``` The connection is disconnected if: + - `peer.NodeInfo.ID` is not equal `peerConn.ID` - `peer.NodeInfo.Version` is not formatted as `X.X.X` where X are integers known as Major, Minor, and Revision - `peer.NodeInfo.Version` Major is not the same as ours @@ -97,7 +97,6 @@ The connection is disconnected if: - `peer.NodeInfo.ListenAddr` is malformed or is a DNS host that cannot be resolved - At this point, if we have not disconnected, the peer is valid. It is added to the switch and hence all reactors via the `AddPeer` method. Note that each reactor may handle multiple channels. diff --git a/docs/spec/reactors/block_sync/impl.md b/docs/spec/reactors/block_sync/impl.md index a96f83b3211..195f9b86279 100644 --- a/docs/spec/reactors/block_sync/impl.md +++ b/docs/spec/reactors/block_sync/impl.md @@ -1,46 +1,41 @@ ## Blockchain Reactor -* coordinates the pool for syncing -* coordinates the store for persistence -* coordinates the playing of blocks towards the app using a sm.BlockExecutor -* handles switching between fastsync and consensus -* it is a p2p.BaseReactor -* starts the pool.Start() and its poolRoutine() -* registers all the concrete types and interfaces for serialisation +- coordinates the pool for syncing +- coordinates the store for persistence +- coordinates the playing of blocks towards the app using a sm.BlockExecutor +- handles switching between fastsync and consensus +- it is a p2p.BaseReactor +- starts the pool.Start() and its poolRoutine() +- registers all the concrete types and interfaces for serialisation ### poolRoutine -* listens to these channels: - * pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends +- listens to these channels: + - pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends a &bcBlockRequestMessage for a specific height - * pool signals timeout of a specific peer by posting to timeoutsCh - * switchToConsensusTicker to periodically try and switch to consensus - * trySyncTicker to periodically check if we have fallen behind and then catch-up sync - * if there aren't any new blocks available on the pool it skips syncing -* tries to sync the app by taking downloaded blocks from the pool, gives them to the app and stores + - pool signals timeout of a specific peer by posting to timeoutsCh + - switchToConsensusTicker to periodically try and switch to consensus + - trySyncTicker to periodically check if we have fallen behind and then catch-up sync + - if there aren't any new blocks available on the pool it skips syncing +- tries to sync the app by taking downloaded blocks from the pool, gives them to the app and stores them on disk -* implements Receive which is called by the switch/peer - * calls AddBlock on the pool when it receives a new block from a peer +- implements Receive which is called by the switch/peer + - calls AddBlock on the pool when it receives a new block from a peer ## Block Pool -* responsible for downloading blocks from peers -* makeRequestersRoutine() - * removes timeout peers - * starts new requesters by calling makeNextRequester() -* requestRoutine(): - * picks a peer and sends the request, then blocks until: - * pool is stopped by listening to pool.Quit - * requester is stopped by listening to Quit - * request is redone - * we receive a block - * gotBlockCh is strange +- responsible for downloading blocks from peers +- makeRequestersRoutine() + - removes timeout peers + - starts new requesters by calling makeNextRequester() +- requestRoutine(): + - picks a peer and sends the request, then blocks until: + - pool is stopped by listening to pool.Quit + - requester is stopped by listening to Quit + - request is redone + - we receive a block + - gotBlockCh is strange ## Block Store -* persists blocks to disk - -# TODO - -* How does the switch from bcR to conR happen? Does conR persist blocks to disk too? -* What is the interaction between the consensus and blockchain reactors? +- persists blocks to disk diff --git a/docs/spec/reactors/block_sync/reactor.md b/docs/spec/reactors/block_sync/reactor.md index 97104eeeb89..045bbd40039 100644 --- a/docs/spec/reactors/block_sync/reactor.md +++ b/docs/spec/reactors/block_sync/reactor.md @@ -46,11 +46,11 @@ type bcStatusResponseMessage struct { ## Architecture and algorithm -The Blockchain reactor is organised as a set of concurrent tasks: - - Receive routine of Blockchain Reactor - - Task for creating Requesters - - Set of Requesters tasks and - - Controller task. +The Blockchain reactor is organised as a set of concurrent tasks: + +- Receive routine of Blockchain Reactor +- Task for creating Requesters +- Set of Requesters tasks and - Controller task. ![Blockchain Reactor Architecture Diagram](img/bc-reactor.png) @@ -58,41 +58,39 @@ The Blockchain reactor is organised as a set of concurrent tasks: These are the core data structures necessarily to provide the Blockchain Reactor logic. -Requester data structure is used to track assignment of request for `block` at position `height` to a -peer with id equals to `peerID`. +Requester data structure is used to track assignment of request for `block` at position `height` to a peer with id equals to `peerID`. ```go type Requester { - mtx Mutex + mtx Mutex block Block - height int64 - 
 peerID p2p.ID + height int64 + 
peerID p2p.ID redoChannel chan struct{} } ``` -Pool is core data structure that stores last executed block (`height`), assignment of requests to peers (`requesters`), -current height for each peer and number of pending requests for each peer (`peers`), maximum peer height, etc. + +Pool is core data structure that stores last executed block (`height`), assignment of requests to peers (`requesters`), current height for each peer and number of pending requests for each peer (`peers`), maximum peer height, etc. ```go type Pool { - mtx Mutex + mtx Mutex requesters map[int64]*Requester - 
height int64 + height int64 peers map[p2p.ID]*Peer - 
maxPeerHeight int64 

 - 
numPending int32 + maxPeerHeight int64 + numPending int32 store BlockStore - 
requestsChannel chan<- BlockRequest - 
errorsChannel chan<- peerError + requestsChannel chan<- BlockRequest + errorsChannel chan<- peerError } ``` -Peer data structure stores for each peer current `height` and number of pending requests sent to -the peer (`numPending`), etc. +Peer data structure stores for each peer current `height` and number of pending requests sent to the peer (`numPending`), etc. ```go type Peer struct { - id p2p.ID + id p2p.ID height int64 numPending int32 timeout *time.Timer @@ -100,202 +98,202 @@ type Peer struct { } ``` -BlockRequest is internal data structure used to denote current mapping of request for a block at some `height` to -a peer (`PeerID`). - +BlockRequest is internal data structure used to denote current mapping of request for a block at some `height` to a peer (`PeerID`). + ```go type BlockRequest { Height int64 - PeerID p2p.ID + PeerID p2p.ID } ``` ### Receive routine of Blockchain Reactor -It is executed upon message reception on the BlockchainChannel inside p2p receive routine. There is a separate p2p -receive routine (and therefore receive routine of the Blockchain Reactor) executed for each peer. Note that -try to send will not block (returns immediately) if outgoing buffer is full. +It is executed upon message reception on the BlockchainChannel inside p2p receive routine. There is a separate p2p receive routine (and therefore receive routine of the Blockchain Reactor) executed for each peer. Note that try to send will not block (returns immediately) if outgoing buffer is full. ```go handleMsg(pool, m): upon receiving bcBlockRequestMessage m from peer p: - block = load block for height m.Height from pool.store - if block != nil then - try to send BlockResponseMessage(block) to p - else - try to send bcNoBlockResponseMessage(m.Height) to p - - upon receiving bcBlockResponseMessage m from peer p: - pool.mtx.Lock() - requester = pool.requesters[m.Height] - if requester == nil then - error("peer sent us a block we didn't expect") - continue - - if requester.block == nil and requester.peerID == p then + block = load block for height m.Height from pool.store + if block != nil then + try to send BlockResponseMessage(block) to p + else + try to send bcNoBlockResponseMessage(m.Height) to p + + upon receiving bcBlockResponseMessage m from peer p: + pool.mtx.Lock() + requester = pool.requesters[m.Height] + if requester == nil then + error("peer sent us a block we didn't expect") + continue + + if requester.block == nil and requester.peerID == p then requester.block = m - pool.numPending -= 1 // atomic decrement - peer = pool.peers[p] - if peer != nil then - peer.numPending-- - if peer.numPending == 0 then - peer.timeout.Stop() - // NOTE: we don't send Quit signal to the corresponding requester task! - else - trigger peer timeout to expire after peerTimeout - pool.mtx.Unlock() - - + pool.numPending -= 1 // atomic decrement + peer = pool.peers[p] + if peer != nil then + peer.numPending-- + if peer.numPending == 0 then + peer.timeout.Stop() + // NOTE: we don't send Quit signal to the corresponding requester task! + else + trigger peer timeout to expire after peerTimeout + pool.mtx.Unlock() + + upon receiving bcStatusRequestMessage m from peer p: - try to send bcStatusResponseMessage(pool.store.Height) + try to send bcStatusResponseMessage(pool.store.Height) upon receiving bcStatusResponseMessage m from peer p: - pool.mtx.Lock() - peer = pool.peers[p] - if peer != nil then - peer.height = m.height - else - peer = create new Peer data structure with id = p and height = m.Height - pool.peers[p] = peer - - if m.Height > pool.maxPeerHeight then - pool.maxPeerHeight = m.Height - pool.mtx.Unlock() - + pool.mtx.Lock() + peer = pool.peers[p] + if peer != nil then + peer.height = m.height + else + peer = create new Peer data structure with id = p and height = m.Height + pool.peers[p] = peer + + if m.Height > pool.maxPeerHeight then + pool.maxPeerHeight = m.Height + pool.mtx.Unlock() + onTimeout(p): - send error message to pool error channel - peer = pool.peers[p] - peer.didTimeout = true + send error message to pool error channel + peer = pool.peers[p] + peer.didTimeout = true ``` ### Requester tasks -Requester task is responsible for fetching a single block at position `height`. +Requester task is responsible for fetching a single block at position `height`. ```go fetchBlock(height, pool): - while true do - peerID = nil + while true do + peerID = nil block = nil - peer = pickAvailablePeer(height) - peerId = peer.id + peer = pickAvailablePeer(height) + peerId = peer.id enqueue BlockRequest(height, peerID) to pool.requestsChannel - redo = false - while !redo do - select { + redo = false + while !redo do + select { upon receiving Quit message do - return - upon receiving message on redoChannel do - mtx.Lock() + return + upon receiving message on redoChannel do + mtx.Lock() pool.numPending++ - redo = true - mtx.UnLock() - } + redo = true + mtx.UnLock() + } pickAvailablePeer(height): - selectedPeer = nil - while selectedPeer = nil do - pool.mtx.Lock() - for each peer in pool.peers do - if !peer.didTimeout and peer.numPending < maxPendingRequestsPerPeer and peer.height >= height then - peer.numPending++ - selectedPeer = peer - break - pool.mtx.Unlock() - - if selectedPeer = nil then - sleep requestIntervalMS - - return selectedPeer + selectedPeer = nil + while selectedPeer = nil do + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout and peer.numPending < maxPendingRequestsPerPeer and peer.height >= height then + peer.numPending++ + selectedPeer = peer + break + pool.mtx.Unlock() + + if selectedPeer = nil then + sleep requestIntervalMS + + return selectedPeer ``` + sleep for requestIntervalMS + ### Task for creating Requesters This task is responsible for continuously creating and starting Requester tasks. + ```go createRequesters(pool): - while true do - if !pool.isRunning then break - if pool.numPending < maxPendingRequests or size(pool.requesters) < maxTotalRequesters then + while true do + if !pool.isRunning then break + if pool.numPending < maxPendingRequests or size(pool.requesters) < maxTotalRequesters then pool.mtx.Lock() nextHeight = pool.height + size(pool.requesters) - requester = create new requester for height nextHeight - pool.requesters[nextHeight] = requester - pool.numPending += 1 // atomic increment - start requester task - pool.mtx.Unlock() - else + requester = create new requester for height nextHeight + pool.requesters[nextHeight] = requester + pool.numPending += 1 // atomic increment + start requester task + pool.mtx.Unlock() + else sleep requestIntervalMS - pool.mtx.Lock() - for each peer in pool.peers do - if !peer.didTimeout && peer.numPending > 0 && peer.curRate < minRecvRate then - send error on pool error channel + pool.mtx.Lock() + for each peer in pool.peers do + if !peer.didTimeout && peer.numPending > 0 && peer.curRate < minRecvRate then + send error on pool error channel peer.didTimeout = true - if peer.didTimeout then - for each requester in pool.requesters do - if requester.getPeerID() == peer then + if peer.didTimeout then + for each requester in pool.requesters do + if requester.getPeerID() == peer then enqueue msg on requestor's redoChannel - delete(pool.peers, peerID) - pool.mtx.Unlock() + delete(pool.peers, peerID) + pool.mtx.Unlock() ``` - -### Main blockchain reactor controller task +### Main blockchain reactor controller task + ```go main(pool): - create trySyncTicker with interval trySyncIntervalMS - create statusUpdateTicker with interval statusUpdateIntervalSeconds - create switchToConsensusTicker with interbal switchToConsensusIntervalSeconds - - while true do - select { + create trySyncTicker with interval trySyncIntervalMS + create statusUpdateTicker with interval statusUpdateIntervalSeconds + create switchToConsensusTicker with interbal switchToConsensusIntervalSeconds + + while true do + select { upon receiving BlockRequest(Height, Peer) on pool.requestsChannel: - try to send bcBlockRequestMessage(Height) to Peer + try to send bcBlockRequestMessage(Height) to Peer upon receiving error(peer) on errorsChannel: - stop peer for error + stop peer for error upon receiving message on statusUpdateTickerChannel: - broadcast bcStatusRequestMessage(bcR.store.Height) // message sent in a separate routine + broadcast bcStatusRequestMessage(bcR.store.Height) // message sent in a separate routine upon receiving message on switchToConsensusTickerChannel: - pool.mtx.Lock() - receivedBlockOrTimedOut = pool.height > 0 || (time.Now() - pool.startTime) > 5 Seconds - ourChainIsLongestAmongPeers = pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight - haveSomePeers = size of pool.peers > 0 + pool.mtx.Lock() + receivedBlockOrTimedOut = pool.height > 0 || (time.Now() - pool.startTime) > 5 Seconds + ourChainIsLongestAmongPeers = pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight + haveSomePeers = size of pool.peers > 0 pool.mtx.Unlock() if haveSomePeers && receivedBlockOrTimedOut && ourChainIsLongestAmongPeers then - switch to consensus mode + switch to consensus mode upon receiving message on trySyncTickerChannel: - for i = 0; i < 10; i++ do - pool.mtx.Lock() + for i = 0; i < 10; i++ do + pool.mtx.Lock() firstBlock = pool.requesters[pool.height].block secondBlock = pool.requesters[pool.height].block if firstBlock == nil or secondBlock == nil then continue pool.mtx.Unlock() - verify firstBlock using LastCommit from secondBlock - if verification failed - pool.mtx.Lock() + verify firstBlock using LastCommit from secondBlock + if verification failed + pool.mtx.Lock() peerID = pool.requesters[pool.height].peerID redoRequestsForPeer(peerId) delete(pool.peers, peerID) - stop peer peerID for error - pool.mtx.Unlock() - else + stop peer peerID for error + pool.mtx.Unlock() + else delete(pool.requesters, pool.height) save firstBlock to store - pool.height++ - execute firstBlock + pool.height++ + execute firstBlock } - + redoRequestsForPeer(pool, peerId): - for each requester in pool.requesters do - if requester.getPeerID() == peerID - enqueue msg on redoChannel for requester + for each requester in pool.requesters do + if requester.getPeerID() == peerID + enqueue msg on redoChannel for requester ``` - + ## Channels Defines `maxMsgSize` for the maximum size of incoming messages, diff --git a/docs/spec/reactors/consensus/consensus-reactor.md b/docs/spec/reactors/consensus/consensus-reactor.md index 0f03b44b787..7be35032214 100644 --- a/docs/spec/reactors/consensus/consensus-reactor.md +++ b/docs/spec/reactors/consensus/consensus-reactor.md @@ -1,49 +1,48 @@ # Consensus Reactor -Consensus Reactor defines a reactor for the consensus service. It contains the ConsensusState service that -manages the state of the Tendermint consensus internal state machine. -When Consensus Reactor is started, it starts Broadcast Routine which starts ConsensusState service. -Furthermore, for each peer that is added to the Consensus Reactor, it creates (and manages) the known peer state -(that is used extensively in gossip routines) and starts the following three routines for the peer p: -Gossip Data Routine, Gossip Votes Routine and QueryMaj23Routine. Finally, Consensus Reactor is responsible +Consensus Reactor defines a reactor for the consensus service. It contains the ConsensusState service that +manages the state of the Tendermint consensus internal state machine. +When Consensus Reactor is started, it starts Broadcast Routine which starts ConsensusState service. +Furthermore, for each peer that is added to the Consensus Reactor, it creates (and manages) the known peer state +(that is used extensively in gossip routines) and starts the following three routines for the peer p: +Gossip Data Routine, Gossip Votes Routine and QueryMaj23Routine. Finally, Consensus Reactor is responsible for decoding messages received from a peer and for adequate processing of the message depending on its type and content. -The processing normally consists of updating the known peer state and for some messages -(`ProposalMessage`, `BlockPartMessage` and `VoteMessage`) also forwarding message to ConsensusState module -for further processing. In the following text we specify the core functionality of those separate unit of executions -that are part of the Consensus Reactor. +The processing normally consists of updating the known peer state and for some messages +(`ProposalMessage`, `BlockPartMessage` and `VoteMessage`) also forwarding message to ConsensusState module +for further processing. In the following text we specify the core functionality of those separate unit of executions +that are part of the Consensus Reactor. ## ConsensusState service -Consensus State handles execution of the Tendermint BFT consensus algorithm. It processes votes and proposals, +Consensus State handles execution of the Tendermint BFT consensus algorithm. It processes votes and proposals, and upon reaching agreement, commits blocks to the chain and executes them against the application. The internal state machine receives input from peers, the internal validator and from a timer. Inside Consensus State we have the following units of execution: Timeout Ticker and Receive Routine. -Timeout Ticker is a timer that schedules timeouts conditional on the height/round/step that are processed -by the Receive Routine. - +Timeout Ticker is a timer that schedules timeouts conditional on the height/round/step that are processed +by the Receive Routine. ### Receive Routine of the ConsensusState service Receive Routine of the ConsensusState handles messages which may cause internal consensus state transitions. -It is the only routine that updates RoundState that contains internal consensus state. -Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. -It receives messages from peers, internal validators and from Timeout Ticker -and invokes the corresponding handlers, potentially updating the RoundState. -The details of the protocol (together with formal proofs of correctness) implemented by the Receive Routine are +It is the only routine that updates RoundState that contains internal consensus state. +Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. +It receives messages from peers, internal validators and from Timeout Ticker +and invokes the corresponding handlers, potentially updating the RoundState. +The details of the protocol (together with formal proofs of correctness) implemented by the Receive Routine are discussed in separate document. For understanding of this document -it is sufficient to understand that the Receive Routine manages and updates RoundState data structure that is +it is sufficient to understand that the Receive Routine manages and updates RoundState data structure that is then extensively used by the gossip routines to determine what information should be sent to peer processes. ## Round State RoundState defines the internal consensus state. It contains height, round, round step, a current validator set, -a proposal and proposal block for the current round, locked round and block (if some block is being locked), set of -received votes and last commit and last validators set. +a proposal and proposal block for the current round, locked round and block (if some block is being locked), set of +received votes and last commit and last validators set. ```golang type RoundState struct { - Height int64 + Height int64 Round int Step RoundStepType Validators ValidatorSet @@ -54,10 +53,10 @@ type RoundState struct { LockedBlock Block LockedBlockParts PartSet Votes HeightVoteSet - LastCommit VoteSet + LastCommit VoteSet LastValidators ValidatorSet -} -``` +} +``` Internally, consensus will run as a state machine with the following states: @@ -82,8 +81,8 @@ type PeerRoundState struct { Round int // Round peer is at, -1 if unknown. Step RoundStepType // Step peer is at Proposal bool // True if peer has proposal for this round - ProposalBlockPartsHeader PartSetHeader - ProposalBlockParts BitArray + ProposalBlockPartsHeader PartSetHeader + ProposalBlockParts BitArray ProposalPOLRound int // Proposal's POL round. -1 if none. ProposalPOL BitArray // nil until ProposalPOLMessage received. Prevotes BitArray // All votes peer has for this round @@ -93,19 +92,19 @@ type PeerRoundState struct { CatchupCommitRound int // Round that we have commit for. Not necessarily unique. -1 if none. CatchupCommit BitArray // All commit precommits peer has for this height & CatchupCommitRound } -``` +``` ## Receive method of Consensus reactor -The entry point of the Consensus reactor is a receive method. When a message is received from a peer p, -normally the peer round state is updated correspondingly, and some messages +The entry point of the Consensus reactor is a receive method. When a message is received from a peer p, +normally the peer round state is updated correspondingly, and some messages are passed for further processing, for example to ConsensusState service. We now specify the processing of messages in the receive method of Consensus reactor for each message type. In the following message handler, `rs` and `prs` denote `RoundState` and `PeerRoundState`, respectively. -### NewRoundStepMessage handler +### NewRoundStepMessage handler -``` +``` handleMessage(msg): if msg is from smaller height/round/step then return // Just remember these values. @@ -116,10 +115,10 @@ handleMessage(msg): Update prs with values from msg if prs.Height or prs.Round has been updated then - reset Proposal related fields of the peer state + reset Proposal related fields of the peer state if prs.Round has been updated and msg.Round == prsCatchupCommitRound then prs.Precommits = psCatchupCommit - if prs.Height has been updated then + if prs.Height has been updated then if prsHeight+1 == msg.Height && prsRound == msg.LastCommitRound then prs.LastCommitRound = msg.LastCommitRound prs.LastCommit = prs.Precommits @@ -128,111 +127,111 @@ handleMessage(msg): prs.LastCommit = nil } Reset prs.CatchupCommitRound and prs.CatchupCommit -``` +``` ### CommitStepMessage handler -``` +``` handleMessage(msg): - if prs.Height == msg.Height then + if prs.Height == msg.Height then prs.ProposalBlockPartsHeader = msg.BlockPartsHeader prs.ProposalBlockParts = msg.BlockParts -``` +``` ### HasVoteMessage handler -``` +``` handleMessage(msg): - if prs.Height == msg.Height then + if prs.Height == msg.Height then prs.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) -``` +``` ### VoteSetMaj23Message handler -``` +``` handleMessage(msg): if prs.Height == msg.Height then Record in rs that a peer claim to have ⅔ majority for msg.BlockID - Send VoteSetBitsMessage showing votes node has for that BlockId -``` + Send VoteSetBitsMessage showing votes node has for that BlockId +``` ### ProposalMessage handler ``` handleMessage(msg): - if prs.Height != msg.Height || prs.Round != msg.Round || prs.Proposal then return + if prs.Height != msg.Height || prs.Round != msg.Round || prs.Proposal then return prs.Proposal = true prs.ProposalBlockPartsHeader = msg.BlockPartsHeader - prs.ProposalBlockParts = empty set + prs.ProposalBlockParts = empty set prs.ProposalPOLRound = msg.POLRound - prs.ProposalPOL = nil + prs.ProposalPOL = nil Send msg through internal peerMsgQueue to ConsensusState service -``` +``` ### ProposalPOLMessage handler -``` +``` handleMessage(msg): if prs.Height != msg.Height or prs.ProposalPOLRound != msg.ProposalPOLRound then return prs.ProposalPOL = msg.ProposalPOL -``` +``` ### BlockPartMessage handler -``` +``` handleMessage(msg): if prs.Height != msg.Height || prs.Round != msg.Round then return - Record in prs that peer has block part msg.Part.Index + Record in prs that peer has block part msg.Part.Index Send msg trough internal peerMsgQueue to ConsensusState service -``` +``` ### VoteMessage handler -``` +``` handleMessage(msg): Record in prs that a peer knows vote with index msg.vote.ValidatorIndex for particular height and round Send msg trough internal peerMsgQueue to ConsensusState service -``` +``` ### VoteSetBitsMessage handler -``` +``` handleMessage(msg): Update prs for the bit-array of votes peer claims to have for the msg.BlockID -``` +``` ## Gossip Data Routine -It is used to send the following messages to the peer: `BlockPartMessage`, `ProposalMessage` and -`ProposalPOLMessage` on the DataChannel. The gossip data routine is based on the local RoundState (`rs`) +It is used to send the following messages to the peer: `BlockPartMessage`, `ProposalMessage` and +`ProposalPOLMessage` on the DataChannel. The gossip data routine is based on the local RoundState (`rs`) and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: ``` 1a) if rs.ProposalBlockPartsHeader == prs.ProposalBlockPartsHeader and the peer does not have all the proposal parts then - Part = pick a random proposal block part the peer does not have - Send BlockPartMessage(rs.Height, rs.Round, Part) to the peer on the DataChannel + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(rs.Height, rs.Round, Part) to the peer on the DataChannel if send returns true, record that the peer knows the corresponding block Part - Continue - + Continue + 1b) if (0 < prs.Height) and (prs.Height < rs.Height) then help peer catch up using gossipDataForCatchup function Continue -1c) if (rs.Height != prs.Height) or (rs.Round != prs.Round) then +1c) if (rs.Height != prs.Height) or (rs.Round != prs.Round) then Sleep PeerGossipSleepDuration - Continue + Continue // at this point rs.Height == prs.Height and rs.Round == prs.Round -1d) if (rs.Proposal != nil and !prs.Proposal) then +1d) if (rs.Proposal != nil and !prs.Proposal) then Send ProposalMessage(rs.Proposal) to the peer if send returns true, record that the peer knows Proposal if 0 <= rs.Proposal.POLRound then - polRound = rs.Proposal.POLRound - prevotesBitArray = rs.Votes.Prevotes(polRound).BitArray() + polRound = rs.Proposal.POLRound + prevotesBitArray = rs.Votes.Prevotes(polRound).BitArray() Send ProposalPOLMessage(rs.Height, polRound, prevotesBitArray) - Continue + Continue -2) Sleep PeerGossipSleepDuration +2) Sleep PeerGossipSleepDuration ``` ### Gossip Data For Catchup @@ -240,65 +239,65 @@ and the known PeerRoundState (`prs`). The routine repeats forever the logic show This function is responsible for helping peer catch up if it is at the smaller height (prs.Height < rs.Height). The function executes the following logic: - if peer does not have all block parts for prs.ProposalBlockPart then + if peer does not have all block parts for prs.ProposalBlockPart then blockMeta = Load Block Metadata for height prs.Height from blockStore if (!blockMeta.BlockID.PartsHeader == prs.ProposalBlockPartsHeader) then Sleep PeerGossipSleepDuration return - Part = pick a random proposal block part the peer does not have - Send BlockPartMessage(prs.Height, prs.Round, Part) to the peer on the DataChannel + Part = pick a random proposal block part the peer does not have + Send BlockPartMessage(prs.Height, prs.Round, Part) to the peer on the DataChannel if send returns true, record that the peer knows the corresponding block Part return - else Sleep PeerGossipSleepDuration - + else Sleep PeerGossipSleepDuration + ## Gossip Votes Routine It is used to send the following message: `VoteMessage` on the VoteChannel. -The gossip votes routine is based on the local RoundState (`rs`) +The gossip votes routine is based on the local RoundState (`rs`) and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: ``` 1a) if rs.Height == prs.Height then - if prs.Step == RoundStepNewHeight then - vote = random vote from rs.LastCommit the peer does not have - Send VoteMessage(vote) to the peer + if prs.Step == RoundStepNewHeight then + vote = random vote from rs.LastCommit the peer does not have + Send VoteMessage(vote) to the peer if send returns true, continue - - if prs.Step <= RoundStepPrevote and prs.Round != -1 and prs.Round <= rs.Round then + + if prs.Step <= RoundStepPrevote and prs.Round != -1 and prs.Round <= rs.Round then Prevotes = rs.Votes.Prevotes(prs.Round) - vote = random vote from Prevotes the peer does not have - Send VoteMessage(vote) to the peer + vote = random vote from Prevotes the peer does not have + Send VoteMessage(vote) to the peer if send returns true, continue - if prs.Step <= RoundStepPrecommit and prs.Round != -1 and prs.Round <= rs.Round then - Precommits = rs.Votes.Precommits(prs.Round) - vote = random vote from Precommits the peer does not have - Send VoteMessage(vote) to the peer + if prs.Step <= RoundStepPrecommit and prs.Round != -1 and prs.Round <= rs.Round then + Precommits = rs.Votes.Precommits(prs.Round) + vote = random vote from Precommits the peer does not have + Send VoteMessage(vote) to the peer if send returns true, continue - - if prs.ProposalPOLRound != -1 then + + if prs.ProposalPOLRound != -1 then PolPrevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) - vote = random vote from PolPrevotes the peer does not have - Send VoteMessage(vote) to the peer - if send returns true, continue + vote = random vote from PolPrevotes the peer does not have + Send VoteMessage(vote) to the peer + if send returns true, continue 1b) if prs.Height != 0 and rs.Height == prs.Height+1 then - vote = random vote from rs.LastCommit peer does not have - Send VoteMessage(vote) to the peer + vote = random vote from rs.LastCommit peer does not have + Send VoteMessage(vote) to the peer if send returns true, continue - + 1c) if prs.Height != 0 and rs.Height >= prs.Height+2 then - Commit = get commit from BlockStore for prs.Height - vote = random vote from Commit the peer does not have - Send VoteMessage(vote) to the peer + Commit = get commit from BlockStore for prs.Height + vote = random vote from Commit the peer does not have + Send VoteMessage(vote) to the peer if send returns true, continue -2) Sleep PeerGossipSleepDuration +2) Sleep PeerGossipSleepDuration ``` ## QueryMaj23Routine -It is used to send the following message: `VoteSetMaj23Message`. `VoteSetMaj23Message` is sent to indicate that a given +It is used to send the following message: `VoteSetMaj23Message`. `VoteSetMaj23Message` is sent to indicate that a given BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs`) and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below. @@ -324,8 +323,8 @@ BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs` Send m to peer Sleep PeerQueryMaj23SleepDuration -1d) if prs.CatchupCommitRound != -1 and 0 < prs.Height and - prs.Height <= blockStore.Height() then +1d) if prs.CatchupCommitRound != -1 and 0 < prs.Height and + prs.Height <= blockStore.Height() then Commit = LoadCommit(prs.Height) m = VoteSetMaj23Message(prs.Height,Commit.Round,Precommit,Commit.blockId) Send m to peer @@ -339,14 +338,14 @@ BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs` The Broadcast routine subscribes to an internal event bus to receive new round steps, votes messages and proposal heartbeat messages, and broadcasts messages to peers upon receiving those events. It broadcasts `NewRoundStepMessage` or `CommitStepMessage` upon new round state event. Note that -broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel. -Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel. +broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel. +Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel. `ProposalHeartbeatMessage` is sent the same way on the StateChannel. ## Channels Defines 4 channels: state, data, vote and vote_set_bits. Each channel -has `SendQueueCapacity` and `RecvBufferCapacity` and +has `SendQueueCapacity` and `RecvBufferCapacity` and `RecvMessageCapacity` set to `maxMsgSize`. Sending incorrectly encoded data will result in stopping the peer. diff --git a/docs/spec/reactors/consensus/consensus.md b/docs/spec/reactors/consensus/consensus.md index 4ea619b5150..a1cf17bcbd1 100644 --- a/docs/spec/reactors/consensus/consensus.md +++ b/docs/spec/reactors/consensus/consensus.md @@ -23,7 +23,7 @@ processes using `BlockPartMessage`. Validators in Tendermint communicate by peer-to-peer gossiping protocol. Each validator is connected only to a subset of processes called peers. By the gossiping protocol, a validator send to its peers -all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can +all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can reach agreement on some block, and also obtain the content of the chosen block (block parts). As part of the gossiping protocol, processes also send auxiliary messages that inform peers about the executed steps of the core consensus algorithm (`NewRoundStepMessage` and `CommitStepMessage`), and diff --git a/docs/spec/reactors/consensus/proposer-selection.md b/docs/spec/reactors/consensus/proposer-selection.md index 649d3dd21d7..b5e0b35afbc 100644 --- a/docs/spec/reactors/consensus/proposer-selection.md +++ b/docs/spec/reactors/consensus/proposer-selection.md @@ -1,6 +1,6 @@ # Proposer selection procedure in Tendermint -This document specifies the Proposer Selection Procedure that is used in Tendermint to choose a round proposer. +This document specifies the Proposer Selection Procedure that is used in Tendermint to choose a round proposer. As Tendermint is “leader-based protocol”, the proposer selection is critical for its correct functioning. Let denote with `proposer_p(h,r)` a process returned by the Proposer Selection Procedure at the process p, at height h and round r. Then the Proposer Selection procedure should fulfill the following properties: @@ -9,13 +9,13 @@ and round r. Then the Proposer Selection procedure should fulfill the following p and q, for each height h, and each round r, proposer_p(h,r) = proposer_q(h,r) -`Liveness`: In every consecutive sequence of rounds of size K (K is system parameter), at least a -single round has an honest proposer. +`Liveness`: In every consecutive sequence of rounds of size K (K is system parameter), at least a +single round has an honest proposer. -`Fairness`: The proposer selection is proportional to the validator voting power, i.e., a validator with more -voting power is selected more frequently, proportional to its power. More precisely, given a set of processes -with the total voting power N, during a sequence of rounds of size N, every process is proposer in a number of rounds -equal to its voting power. +`Fairness`: The proposer selection is proportional to the validator voting power, i.e., a validator with more +voting power is selected more frequently, proportional to its power. More precisely, given a set of processes +with the total voting power N, during a sequence of rounds of size N, every process is proposer in a number of rounds +equal to its voting power. We now look at a few particular cases to understand better how fairness should be implemented. If we have 4 processes with the following voting power distribution (p0,4), (p1, 2), (p2, 2), (p3, 2) at some round r, @@ -27,20 +27,20 @@ Let consider now the following scenario where a total voting power of faulty pro p0: (p0,3), (p1, 1), (p2, 1), (p3, 1), (p4, 1), (p5, 1), (p6, 1), (p7, 1). In this case the sequence of proposer selections looks like this: -`p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, etc` +`p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, etc` In this case, we see that a number of rounds coordinated by a faulty process is proportional to its voting power. -We consider also the case where we have voting power uniformly distributed among processes, i.e., we have 10 processes -each with voting power of 1. And let consider that there are 3 faulty processes with consecutive addresses, +We consider also the case where we have voting power uniformly distributed among processes, i.e., we have 10 processes +each with voting power of 1. And let consider that there are 3 faulty processes with consecutive addresses, for example the first 3 processes are faulty. Then the sequence looks like this: `p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, etc` -In this case, we have 3 consecutive rounds with a faulty proposer. +In this case, we have 3 consecutive rounds with a faulty proposer. One special case we consider is the case where a single honest process p0 has most of the voting power, for example: (p0,100), (p1, 2), (p2, 3), (p3, 4). Then the sequence of proposer selection looks like this: p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p1, p0, p0, p0, p0, p0, etc -This basically means that almost all rounds have the same proposer. But in this case, the process p0 has anyway enough +This basically means that almost all rounds have the same proposer. But in this case, the process p0 has anyway enough voting power to decide whatever he wants, so the fact that he coordinates almost all rounds seems correct. diff --git a/docs/spec/reactors/mempool/concurrency.md b/docs/spec/reactors/mempool/concurrency.md index 991113e6da0..a6870db9bb5 100644 --- a/docs/spec/reactors/mempool/concurrency.md +++ b/docs/spec/reactors/mempool/concurrency.md @@ -2,7 +2,7 @@ Look at the concurrency model this uses... -* Receiving CheckTx -* Broadcasting new tx -* Interfaces with consensus engine, reap/update while checking -* Calling the ABCI app (ordering. callbacks. how proxy works alongside the blockchain proxy which actually writes blocks) +- Receiving CheckTx +- Broadcasting new tx +- Interfaces with consensus engine, reap/update while checking +- Calling the ABCI app (ordering. callbacks. how proxy works alongside the blockchain proxy which actually writes blocks) diff --git a/docs/spec/reactors/mempool/config.md b/docs/spec/reactors/mempool/config.md index 776149ba050..3e3c0d37345 100644 --- a/docs/spec/reactors/mempool/config.md +++ b/docs/spec/reactors/mempool/config.md @@ -11,12 +11,12 @@ Flag: `--mempool.recheck_empty=false` Environment: `TM_MEMPOOL_RECHECK_EMPTY=false` Config: + ``` [mempool] recheck_empty = false ``` - ## Recheck `--mempool.recheck=false` (default: true) diff --git a/docs/spec/reactors/mempool/functionality.md b/docs/spec/reactors/mempool/functionality.md index 85c3dc58d40..4f811801f87 100644 --- a/docs/spec/reactors/mempool/functionality.md +++ b/docs/spec/reactors/mempool/functionality.md @@ -6,26 +6,25 @@ consensus reactor when it is selected as the block proposer. There are two sides to the mempool state: -* External: get, check, and broadcast new transactions -* Internal: return valid transaction, update list after block commit - +- External: get, check, and broadcast new transactions +- Internal: return valid transaction, update list after block commit ## External functionality External functionality is exposed via network interfaces to potentially untrusted actors. -* CheckTx - triggered via RPC or P2P -* Broadcast - gossip messages after a successful check +- CheckTx - triggered via RPC or P2P +- Broadcast - gossip messages after a successful check ## Internal functionality Internal functionality is exposed via method calls to other code compiled into the tendermint binary. -* Reap - get tx to propose in next block -* Update - remove tx that were included in last block -* ABCI.CheckTx - call ABCI app to validate the tx +- Reap - get tx to propose in next block +- Update - remove tx that were included in last block +- ABCI.CheckTx - call ABCI app to validate the tx What does it provide the consensus reactor? What guarantees does it need from the ABCI app? @@ -33,5 +32,11 @@ What guarantees does it need from the ABCI app? ## Optimizations -Talk about the LRU cache to make sure we don't process any -tx that we have seen before +The implementation within this library also implements a tx cache. +This is so that signatures don't have to be reverified if the tx has +already been seen before. +However, we only store valid txs in the cache, not invalid ones. +This is because invalid txs could become good later. +Txs that are included in a block aren't removed from the cache, +as they still may be getting received over the p2p network. +These txs are stored in the cache by their hash, to mitigate memory concerns. \ No newline at end of file diff --git a/docs/spec/reactors/mempool/messages.md b/docs/spec/reactors/mempool/messages.md index 9a624dff1dd..117fc5f2f61 100644 --- a/docs/spec/reactors/mempool/messages.md +++ b/docs/spec/reactors/mempool/messages.md @@ -35,12 +35,12 @@ Request (`POST http://gaia.zone:26657/`): ```json { - "id": "", - "jsonrpc": "2.0", - "method": "broadcast_sync", - "params": { + "id": "", + "jsonrpc": "2.0", + "method": "broadcast_sync", + "params": { "tx": "F012A4BC68..." - } + } } ``` @@ -48,14 +48,14 @@ Response: ```json { - "error": "", - "result": { - "hash": "E39AAB7A537ABAA237831742DCE1117F187C3C52", - "log": "", - "data": "", - "code": 0 - }, - "id": "", - "jsonrpc": "2.0" + "error": "", + "result": { + "hash": "E39AAB7A537ABAA237831742DCE1117F187C3C52", + "log": "", + "data": "", + "code": 0 + }, + "id": "", + "jsonrpc": "2.0" } ``` diff --git a/docs/spec/reactors/mempool/reactor.md b/docs/spec/reactors/mempool/reactor.md index 2bdbd895126..fa25eeb3eae 100644 --- a/docs/spec/reactors/mempool/reactor.md +++ b/docs/spec/reactors/mempool/reactor.md @@ -2,12 +2,12 @@ ## Channels -[#1503](https://github.com/tendermint/tendermint/issues/1503) +See [this issue](https://github.com/tendermint/tendermint/issues/1503) Mempool maintains a cache of the last 10000 transactions to prevent replaying old transactions (plus transactions coming from other validators, who are continually exchanging transactions). Read [Replay -Protection](https://tendermint.readthedocs.io/projects/tools/en/master/app-development.html?#replay-protection) +Protection](../../../../app-development.md#replay-protection) for details. Sending incorrectly encoded data or data exceeding `maxMsgSize` will result diff --git a/docs/spec/reactors/pex/pex.md b/docs/spec/reactors/pex/pex.md index 0f13c0cb549..26f1fa8bb04 100644 --- a/docs/spec/reactors/pex/pex.md +++ b/docs/spec/reactors/pex/pex.md @@ -15,6 +15,9 @@ we will not put them in the address book or gossip them to others. All peers except private peers and peers coming from them are tracked using the address book. +The rest of our peers are only distinguished by being either +inbound (they dialed our public address) or outbound (we dialed them). + ## Discovery Peer discovery begins with a list of seeds. @@ -26,15 +29,15 @@ and will attempt to maintain persistent connections with them. If the connection we will redial every 5s for a few minutes, then switch to an exponential backoff schedule, and after about a day of trying, stop dialing the peer. -So long as we have less than `MinNumOutboundPeers`, we periodically request additional peers +So long as we have less than `MaxNumOutboundPeers`, we periodically request additional peers from each of our own. If sufficient time goes by and we still can't find enough peers, we try the seeds again. ## Listening Peers listen on a configurable ListenAddr that they self-report in their -NodeInfo during handshakes with other peers. Peers accept up to (MaxNumPeers - -MinNumOutboundPeers) incoming peers. +NodeInfo during handshakes with other peers. Peers accept up to +`MaxNumInboundPeers` incoming peers. ## Address Book @@ -73,10 +76,11 @@ a trust metric (see below), but it's best to start with something simple. ## Select Peers to Dial -When we need more peers, we pick them randomly from the addrbook with some -configurable bias for unvetted peers. The bias should be lower when we have fewer peers -and can increase as we obtain more, ensuring that our first peers are more trustworthy, -but always giving us the chance to discover new good peers. +When we need more peers, we pick addresses randomly from the addrbook with some +configurable bias for unvetted peers. The bias should be lower when we have +fewer peers and can increase as we obtain more, ensuring that our first peers +are more trustworthy, but always giving us the chance to discover new good +peers. We track the last time we dialed a peer and the number of unsuccessful attempts we've made. If too many attempts are made, we mark the peer as bad. @@ -85,11 +89,13 @@ Connection attempts are made with exponential backoff (plus jitter). Because the selection process happens every `ensurePeersPeriod`, we might not end up dialing a peer for much longer than the backoff duration. -If we fail to connect to the peer after 16 tries (with exponential backoff), we remove from address book completely. +If we fail to connect to the peer after 16 tries (with exponential backoff), we +remove from address book completely. ## Select Peers to Exchange When we’re asked for peers, we select them as follows: + - select at most `maxGetSelection` peers - try to select at least `minGetSelection` peers - if we have less than that, select them all. - select a random, unbiased `getSelectionPercent` of the peers @@ -121,4 +127,3 @@ to use it in the PEX. See the [trustmetric](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-006-trust-metric.md) and [trustmetric useage](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-007-trust-metric-usage.md) architecture docs for more details. - diff --git a/docs/spec/software/abci.md b/docs/spec/software/abci.md index 613e181f0d9..6e17089f368 100644 --- a/docs/spec/software/abci.md +++ b/docs/spec/software/abci.md @@ -1,192 +1,3 @@ # Application Blockchain Interface (ABCI) -ABCI is the interface between Tendermint (a state-machine replication engine) -and an application (the actual state machine). - -The ABCI message types are defined in a [protobuf -file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.proto). - -For full details on the ABCI message types and protocol, see the [ABCI -specification](https://github.com/tendermint/tendermint/blob/develop/docs/abci-spec.md). -Be sure to read the specification if you're trying to build an ABCI app! - -For additional details on server implementation, see the [ABCI -readme](https://github.com/tendermint/tendermint/blob/develop/abci/README.md). - -Here we provide some more details around the use of ABCI by Tendermint and -clarify common "gotchas". - -## ABCI connections - -Tendermint opens 3 ABCI connections to the app: one for Consensus, one for -Mempool, one for Queries. - -## Async vs Sync - -The main ABCI server (ie. non-GRPC) provides ordered asynchronous messages. -This is useful for DeliverTx and CheckTx, since it allows Tendermint to forward -transactions to the app before it's finished processing previous ones. - -Thus, DeliverTx and CheckTx messages are sent asycnhronously, while all other -messages are sent synchronously. - -## CheckTx and Commit - -It is typical to hold three distinct states in an ABCI app: CheckTxState, DeliverTxState, -QueryState. The QueryState contains the latest committed state for a block. -The CheckTxState and DeliverTxState may be updated concurrently with one another. -Before Commit is called, Tendermint locks and flushes the mempool so that no new changes will happen -to CheckTxState. When Commit completes, it unlocks the mempool. - -Thus, during Commit, it is safe to reset the QueryState and the CheckTxState to the latest DeliverTxState -(ie. the new state from executing all the txs in the block). - -Note, however, that it is not possible to send transactions to Tendermint during Commit - if your app -tries to send a `/broadcast_tx` to Tendermint during Commit, it will deadlock. - - -## EndBlock Validator Updates - -Updates to the Tendermint validator set can be made by returning `Validator` -objects in the `ResponseBeginBlock`: - -``` -message Validator { - bytes address = 1; - PubKey pub_key = 2; - int64 power = 3; -} - -message PubKey { - string type = 1; - bytes data = 2; -} - -``` - -The `pub_key` currently supports two types: - - `type = "ed25519" and `data = ` - - `type = "secp256k1" and `data = <33-byte OpenSSL compressed public key>` - -If the address is provided, it must match the address of the pubkey, as -specified [here](/docs/spec/blockchain/encoding.md#Addresses) - -(Note: In the v0.19 series, the `pub_key` is the [Amino encoded public -key](/docs/spec/blockchain/encoding.md#public-key-cryptography). -For Ed25519 pubkeys, the Amino prefix is always "1624DE6220". For example, the 32-byte Ed25519 pubkey -`76852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85` would be -Amino encoded as -`1624DE622076852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85`) - -(Note: In old versions of Tendermint (pre-v0.19.0), the pubkey is just prefixed with a -single type byte, so for ED25519 we'd have `pub_key = 0x1 | pub`) - -The `power` is the new voting power for the validator, with the -following rules: - -- power must be non-negative -- if power is 0, the validator must already exist, and will be removed from the - validator set -- if power is non-0: - - if the validator does not already exist, it will be added to the validator - set with the given power - - if the validator does already exist, its power will be adjusted to the given power - -## InitChain Validator Updates - -ResponseInitChain has the option to return a list of validators. -If the list is not empty, Tendermint will adopt it for the validator set. -This way the application can determine the initial validator set for the -blockchain. - -Note that if addressses are included in the returned validators, they must match -the address of the public key. - -ResponseInitChain also includes ConsensusParams, but these are presently -ignored. - -## Query - -Query is a generic message type with lots of flexibility to enable diverse sets -of queries from applications. Tendermint has no requirements from the Query -message for normal operation - that is, the ABCI app developer need not implement Query functionality if they do not wish too. -That said, Tendermint makes a number of queries to support some optional -features. These are: - -### Peer Filtering - -When Tendermint connects to a peer, it sends two queries to the ABCI application -using the following paths, with no additional data: - - - `/p2p/filter/addr/`, where `` denote the IP address and - the port of the connection - - `p2p/filter/id/`, where `` is the peer node ID (ie. the - pubkey.Address() for the peer's PubKey) - -If either of these queries return a non-zero ABCI code, Tendermint will refuse -to connect to the peer. - -## Info and the Handshake/Replay - -On startup, Tendermint calls Info on the Query connection to get the latest -committed state of the app. The app MUST return information consistent with the -last block it succesfully completed Commit for. - -If the app succesfully committed block H but not H+1, then `last_block_height = -H` and `last_block_app_hash = `. If the app -failed during the Commit of block H, then `last_block_height = H-1` and -`last_block_app_hash = `. - -We now distinguish three heights, and describe how Tendermint syncs itself with -the app. - -``` -storeBlockHeight = height of the last block Tendermint saw a commit for -stateBlockHeight = height of the last block for which Tendermint completed all - block processing and saved all ABCI results to disk -appBlockHeight = height of the last block for which ABCI app succesfully - completely Commit -``` - -Note we always have `storeBlockHeight >= stateBlockHeight` and `storeBlockHeight >= appBlockHeight` -Note also we never call Commit on an ABCI app twice for the same height. - -The procedure is as follows. - -First, some simeple start conditions: - -If `appBlockHeight == 0`, then call InitChain. - -If `storeBlockHeight == 0`, we're done. - -Now, some sanity checks: - -If `storeBlockHeight < appBlockHeight`, error -If `storeBlockHeight < stateBlockHeight`, panic -If `storeBlockHeight > stateBlockHeight+1`, panic - -Now, the meat: - -If `storeBlockHeight == stateBlockHeight && appBlockHeight < storeBlockHeight`, - replay all blocks in full from `appBlockHeight` to `storeBlockHeight`. - This happens if we completed processing the block, but the app forgot its height. - -If `storeBlockHeight == stateBlockHeight && appBlockHeight == storeBlockHeight`, we're done - This happens if we crashed at an opportune spot. - -If `storeBlockHeight == stateBlockHeight+1` - This happens if we started processing the block but didn't finish. - - If `appBlockHeight < stateBlockHeight` - replay all blocks in full from `appBlockHeight` to `storeBlockHeight-1`, - and replay the block at `storeBlockHeight` using the WAL. - This happens if the app forgot the last block it committed. - - If `appBlockHeight == stateBlockHeight`, - replay the last block (storeBlockHeight) in full. - This happens if we crashed before the app finished Commit - - If appBlockHeight == storeBlockHeight { - update the state using the saved ABCI responses but dont run the block against the real app. - This happens if we crashed after the app finished Commit but before Tendermint saved the state. +This page has [moved](../abci/apps.md). diff --git a/docs/spec/software/wal.md b/docs/spec/software/wal.md index a2e03137d39..1f5d712c5ed 100644 --- a/docs/spec/software/wal.md +++ b/docs/spec/software/wal.md @@ -28,6 +28,5 @@ WAL. Then it will go to precommit, and that time it will work because the private validator contains the `LastSignBytes` and then we’ll replay the precommit from the WAL. -Make sure to read about [WAL -corruption](https://tendermint.readthedocs.io/projects/tools/en/master/specification/corruption.html#wal-corruption) +Make sure to read about [WAL corruption](../../../tendermint-core/running-in-production.md#wal-corruption) and recovery strategies. diff --git a/docs/stop-words.txt b/docs/stop-words.txt new file mode 100644 index 00000000000..7f90eca320e --- /dev/null +++ b/docs/stop-words.txt @@ -0,0 +1,6 @@ +investor +invest +investing +token distribution +atom distribution +distribution of atoms diff --git a/docs/tendermint-core/block-structure.md b/docs/tendermint-core/block-structure.md index 803805529fa..587db0ff44a 100644 --- a/docs/tendermint-core/block-structure.md +++ b/docs/tendermint-core/block-structure.md @@ -7,200 +7,6 @@ nodes. This blockchain is accessible via various rpc endpoints, mainly `/blockchain?minHeight=_&maxHeight=_` to get a list of headers. But what exactly is stored in these blocks? -## Block +The [specification](../spec/blockchain/blockchain.md) contains a detailed description of each component - that's the best place to get started. -A -[Block](https://godoc.org/github.com/tendermint/tendermint/types#Block) -contains: - -- a [Header](#header) contains merkle hashes for various chain states -- the - [Data](https://godoc.org/github.com/tendermint/tendermint/types#Data) - is all transactions which are to be processed -- the [LastCommit](#commit) > 2/3 signatures for the last block - -The signatures returned along with block `H` are those validating block -`H-1`. This can be a little confusing, but we must also consider that -the `Header` also contains the `LastCommitHash`. It would be impossible -for a Header to include the commits that sign it, as it would cause an -infinite loop here. But when we get block `H`, we find -`Header.LastCommitHash`, which must match the hash of `LastCommit`. - -## Header - -The -[Header](https://godoc.org/github.com/tendermint/tendermint/types#Header) -contains lots of information (follow link for up-to-date info). Notably, -it maintains the `Height`, the `LastBlockID` (to make it a chain), and -hashes of the data, the app state, and the validator set. This is -important as the only item that is signed by the validators is the -`Header`, and all other data must be validated against one of the merkle -hashes in the `Header`. - -The `DataHash` can provide a nice check on the -[Data](https://godoc.org/github.com/tendermint/tendermint/types#Data) -returned in this same block. If you are subscribed to new blocks, via -tendermint RPC, in order to display or process the new transactions you -should at least validate that the `DataHash` is valid. If it is -important to verify autheniticity, you must wait for the `LastCommit` -from the next block to make sure the block header (including `DataHash`) -was properly signed. - -The `ValidatorHash` contains a hash of the current -[Validators](https://godoc.org/github.com/tendermint/tendermint/types#Validator). -Tracking all changes in the validator set is complex, but a client can -quickly compare this hash with the [hash of the currently known -validators](https://godoc.org/github.com/tendermint/tendermint/types#ValidatorSet.Hash) -to see if there have been changes. - -The `AppHash` serves as the basis for validating any merkle proofs that -come from the ABCI application. It represents the state of the actual -application, rather that the state of the blockchain itself. This means -it's necessary in order to perform any business logic, such as verifying -an account balance. - -**Note** After the transactions are committed to a block, they still -need to be processed in a separate step, which happens between the -blocks. If you find a given transaction in the block at height `H`, the -effects of running that transaction will be first visible in the -`AppHash` from the block header at height `H+1`. - -Like the `LastCommit` issue, this is a requirement of the immutability -of the block chain, as the application only applies transactions *after* -they are commited to the chain. - -## Commit - -The -[Commit](https://godoc.org/github.com/tendermint/tendermint/types#Commit) -contains a set of -[Votes](https://godoc.org/github.com/tendermint/tendermint/types#Vote) -that were made by the validator set to reach consensus on this block. -This is the key to the security in any PoS system, and actually no data -that cannot be traced back to a block header with a valid set of Votes -can be trusted. Thus, getting the Commit data and verifying the votes is -extremely important. - -As mentioned above, in order to find the `precommit votes` for block -header `H`, we need to query block `H+1`. Then we need to check the -votes, make sure they really are for that block, and properly formatted. -Much of this code is implemented in Go in the -[light-client](https://github.com/tendermint/light-client) package. If -you look at the code, you will notice that we need to provide the -`chainID` of the blockchain in order to properly calculate the votes. -This is to protect anyone from swapping votes between chains to fake (or -frame) a validator. Also note that this `chainID` is in the -`genesis.json` from *Tendermint*, not the `genesis.json` from the -basecoin app ([that is a different -chainID...](https://github.com/cosmos/cosmos-sdk/issues/32)). - -Once we have those votes, and we calculated the proper [sign -bytes](https://godoc.org/github.com/tendermint/tendermint/types#Vote.WriteSignBytes) -using the chainID and a [nice helper -function](https://godoc.org/github.com/tendermint/tendermint/types#SignBytes), -we can verify them. The light client is responsible for maintaining a -set of validators that we trust. Each vote only stores the validators -`Address`, as well as the `Signature`. Assuming we have a local copy of -the trusted validator set, we can look up the `Public Key` of the -validator given its `Address`, then verify that the `Signature` matches -the `SignBytes` and `Public Key`. Then we sum up the total voting power -of all validators, whose votes fulfilled all these stringent -requirements. If the total number of voting power for a single block is -greater than 2/3 of all voting power, then we can finally trust the -block header, the AppHash, and the proof we got from the ABCI -application. - -### Vote Sign Bytes - -The `sign-bytes` of a vote is produced by taking a -[stable-json](https://github.com/substack/json-stable-stringify)-like -deterministic JSON [wire](./wire-protocol.html) encoding of the vote -(excluding the `Signature` field), and wrapping it with -`{"chain_id":"my_chain","vote":...}`. - -For example, a precommit vote might have the following `sign-bytes`: - -``` -{"chain_id":"my_chain","vote":{"block_hash":"611801F57B4CE378DF1A3FFF1216656E89209A99","block_parts_header":{"hash":"B46697379DBE0774CC2C3B656083F07CA7E0F9CE","total":123},"height":1234,"round":1,"type":2}} -``` - -## Block Hash - -The [block -hash](https://godoc.org/github.com/tendermint/tendermint/types#Block.Hash) -is the [Simple Tree hash](./merkle.html#simple-tree-with-dictionaries) -of the fields of the block `Header` encoded as a list of `KVPair`s. - -## Transaction - -A transaction is any sequence of bytes. It is up to your ABCI -application to accept or reject transactions. - -## BlockID - -Many of these data structures refer to the -[BlockID](https://godoc.org/github.com/tendermint/tendermint/types#BlockID), -which is the `BlockHash` (hash of the block header, also referred to by -the next block) along with the `PartSetHeader`. The `PartSetHeader` is -explained below and is used internally to orchestrate the p2p -propogation. For clients, it is basically opaque bytes, but they must -match for all votes. - -## PartSetHeader - -The -[PartSetHeader](https://godoc.org/github.com/tendermint/tendermint/types#PartSetHeader) -contains the total number of pieces in a -[PartSet](https://godoc.org/github.com/tendermint/tendermint/types#PartSet), -and the Merkle root hash of those pieces. - -## PartSet - -PartSet is used to split a byteslice of data into parts (pieces) for -transmission. By splitting data into smaller parts and computing a -Merkle root hash on the list, you can verify that a part is legitimately -part of the complete data, and the part can be forwarded to other peers -before all the parts are known. In short, it's a fast way to securely -propagate a large chunk of data (like a block) over a gossip network. - -PartSet was inspired by the LibSwift project. - -Usage: - -``` -data := RandBytes(2 << 20) // Something large - -partSet := NewPartSetFromData(data) -partSet.Total() // Total number of 4KB parts -partSet.Count() // Equal to the Total, since we already have all the parts -partSet.Hash() // The Merkle root hash -partSet.BitArray() // A BitArray of partSet.Total() 1's - -header := partSet.Header() // Send this to the peer -header.Total // Total number of parts -header.Hash // The merkle root hash - -// Now we'll reconstruct the data from the parts -partSet2 := NewPartSetFromHeader(header) -partSet2.Total() // Same total as partSet.Total() -partSet2.Count() // Zero, since this PartSet doesn't have any parts yet. -partSet2.Hash() // Same hash as in partSet.Hash() -partSet2.BitArray() // A BitArray of partSet.Total() 0's - -// In a gossip network the parts would arrive in arbitrary order, perhaps -// in response to explicit requests for parts, or optimistically in response -// to the receiving peer's partSet.BitArray(). -for !partSet2.IsComplete() { - part := receivePartFromGossipNetwork() - added, err := partSet2.AddPart(part) - if err != nil { - // A wrong part, - // the merkle trail does not hash to partSet2.Hash() - } else if !added { - // A duplicate part already received - } -} - -data2, _ := ioutil.ReadAll(partSet2.GetReader()) -bytes.Equal(data, data2) // true -``` +To dig deeper, check out the [types package documentation](https://godoc.org/github.com/tendermint/tendermint/types). diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index ab2d7cc1729..7e20277f420 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -34,7 +34,7 @@ fast_sync = true db_backend = "leveldb" # Database directory -db_path = "data" +db_dir = "data" # Output level for logging log_level = "state:info,*:error" @@ -77,6 +77,8 @@ grpc_laddr = "" # If you want to accept more significant number than the default, make sure # you increase your OS limits. # 0 - unlimited. +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 grpc_max_open_connections = 900 # Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool @@ -87,7 +89,9 @@ unsafe = false # If you want to accept more significant number than the default, make sure # you increase your OS limits. # 0 - unlimited. -max_open_connections = 450 +# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} +# 1024 - 40 - 10 - 50 = 924 = ~900 +max_open_connections = 900 ##### peer to peer configuration options ##### [p2p] @@ -108,13 +112,17 @@ upnp = false addr_book_file = "addrbook.json" # Set true for strict address routability rules +# Set false for private or local networks addr_book_strict = true # Time to wait before flushing messages out on the connection, in ms flush_throttle_timeout = 100 -# Maximum number of peers to connect to -max_num_peers = 50 +# Maximum number of inbound peers +max_num_inbound_peers = 40 + +# Maximum number of outbound peers to connect to, excluding persistent peers +max_num_outbound_peers = 10 # Maximum size of a message packet payload, in bytes max_packet_msg_payload_size = 1024 @@ -186,16 +194,21 @@ peer_query_maj23_sleep_duration = 2000 # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). indexer = "kv" -# Comma-separated list of tags to index (by default the only tag is tx hash) +# Comma-separated list of tags to index (by default the only tag is "tx.hash") # +# You can also index transactions by height by adding "tx.height" tag here. +# # It's recommended to index only a subset of tags due to possible memory # bloat. This is, of course, depends on the indexer's DB and the volume of # transactions. index_tags = "" -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). +# When set to true, tells indexer to index all tags (predefined tags: +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# +# Note this may be not desirable (see the comment above). IndexTags has a +# precedence over IndexAllTags (i.e. when given both, IndexTags will be +# indexed). index_all_tags = false ##### instrumentation configuration options ##### diff --git a/docs/tendermint-core/how-to-read-logs.md b/docs/tendermint-core/how-to-read-logs.md index 83dab38704a..bf9b49252df 100644 --- a/docs/tendermint-core/how-to-read-logs.md +++ b/docs/tendermint-core/how-to-read-logs.md @@ -63,8 +63,8 @@ Next follows a standard block creation cycle, where we enter a new round, propose a block, receive more than 2/3 of prevotes, then precommits and finally have a chance to commit a block. For details, please refer to [Consensus -Overview](./introduction.md#consensus-overview) or [Byzantine Consensus -Algorithm](./spec/consensus). +Overview](../introduction/introduction.md#consensus-overview) or [Byzantine Consensus +Algorithm](../spec/consensus/consensus.md). ``` I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus @@ -112,7 +112,7 @@ I[10-04|13:54:30.410] Recheck txs module=mempoo Here is the list of modules you may encounter in Tendermint's log and a little overview what they do. -- `abci-client` As mentioned in [Application Development Guide](./app-development.md), Tendermint acts as an ABCI +- `abci-client` As mentioned in [Application Development Guide](../app-dev/app-development.md), Tendermint acts as an ABCI client with respect to the application and maintains 3 connections: mempool, consensus and query. The code used by Tendermint Core can be found [here](https://github.com/tendermint/tendermint/tree/develop/abci/client). @@ -133,9 +133,9 @@ little overview what they do. - `p2p` Provides an abstraction around peer-to-peer communication. For more details, please check out the [README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md). -- `rpc` [Tendermint's RPC](./specification/rpc.md). +- `rpc` [Tendermint's RPC](./rpc.md). - `rpc-server` RPC server. For implementation details, please read the - [README](https://github.com/tendermint/tendermint/blob/master/rpc/lib/README.md). + [doc.go](https://github.com/tendermint/tendermint/blob/master/rpc/lib/doc.go). - `state` Represents the latest state and execution submodule, which executes blocks against the application. - `types` A collection of the publicly exposed types and methods to diff --git a/docs/tendermint-core/light-client-protocol.md b/docs/tendermint-core/light-client-protocol.md index 6d905be32fa..7318ad16f4e 100644 --- a/docs/tendermint-core/light-client-protocol.md +++ b/docs/tendermint-core/light-client-protocol.md @@ -18,13 +18,13 @@ proofs](./merkle.md#iavl-tree). ## Properties -- You get the full collateralized security benefits of Tendermint; No - need to wait for confirmations. -- You get the full speed benefits of Tendermint; transactions - commit instantly. -- You can get the most recent version of the application state - non-interactively (without committing anything to the blockchain). - For example, this means that you can get the most recent value of a - name from the name-registry without worrying about fork censorship - attacks, without posting a commit and waiting for confirmations. - It's fast, secure, and free! +- You get the full collateralized security benefits of Tendermint; No + need to wait for confirmations. +- You get the full speed benefits of Tendermint; transactions + commit instantly. +- You can get the most recent version of the application state + non-interactively (without committing anything to the blockchain). + For example, this means that you can get the most recent value of a + name from the name-registry without worrying about fork censorship + attacks, without posting a commit and waiting for confirmations. + It's fast, secure, and free! diff --git a/docs/tendermint-core/rpc.md b/docs/tendermint-core/rpc.md index 2f3a72c747e..51f34fc25f9 100644 --- a/docs/tendermint-core/rpc.md +++ b/docs/tendermint-core/rpc.md @@ -1,3 +1,5 @@ # RPC The RPC documentation is hosted [here](https://tendermint.github.io/slate) and is generated by the CI from our [Slate repo](https://github.com/tendermint/slate). To update the documentation, edit the relevant `godoc` comments in the [rpc/core directory](https://github.com/tendermint/tendermint/tree/develop/rpc/core). + +NOTE: We will be moving the RPC documentation into the website in the near future. Stay tuned! diff --git a/docs/tendermint-core/running-in-production.md b/docs/tendermint-core/running-in-production.md index 0947343207a..cb228be4a9f 100644 --- a/docs/tendermint-core/running-in-production.md +++ b/docs/tendermint-core/running-in-production.md @@ -28,7 +28,7 @@ send & receive rate per connection (`SendRate`, `RecvRate`). ### RPC Endpoints returning multiple entries are limited by default to return 30 -elements (100 max). +elements (100 max). See [here](./rpc.md) for more information about the RPC. Rate-limiting and authentication are another key aspects to help protect against DOS attacks. While in the future we may implement these @@ -40,12 +40,12 @@ to achieve the same things. ## Debugging Tendermint If you ever have to debug Tendermint, the first thing you should -probably do is to check out the logs. See ["How to read -logs"](./how-to-read-logs.md), where we explain what certain log +probably do is to check out the logs. See [How to read +logs](./how-to-read-logs.md), where we explain what certain log statements mean. If, after skimming through the logs, things are not clear still, the -second TODO is to query the /status RPC endpoint. It provides the +next thing to try is query the /status RPC endpoint. It provides the necessary info: whenever the node is syncing or not, what height it is on, etc. @@ -80,7 +80,7 @@ Other useful endpoints include mentioned earlier `/status`, `/net_info` and We have a small tool, called `tm-monitor`, which outputs information from the endpoints above plus some statistics. The tool can be found -[here](https://github.com/tendermint/tools/tree/master/tm-monitor). +[here](https://github.com/tendermint/tendermint/tree/master/tools/tm-monitor). Tendermint also can report and serve Prometheus metrics. See [Metrics](./metrics.md). @@ -135,36 +135,37 @@ Tendermint, replay will fail with panic. Recovering from data corruption can be hard and time-consuming. Here are two approaches you can take: -1) Delete the WAL file and restart Tendermint. It will attempt to sync with other peers. -2) Try to repair the WAL file manually: +1. Delete the WAL file and restart Tendermint. It will attempt to sync with other peers. +2. Try to repair the WAL file manually: - 1. Create a backup of the corrupted WAL file: +1) Create a backup of the corrupted WAL file: ``` cp "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal_backup ``` - 2. Use `./scripts/wal2json` to create a human-readable version +2. Use `./scripts/wal2json` to create a human-readable version ``` ./scripts/wal2json/wal2json "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal ``` - 3. Search for a "CORRUPTED MESSAGE" line. - 4. By looking at the previous message and the message after the corrupted one - and looking at the logs, try to rebuild the message. If the consequent - messages are marked as corrupted too (this may happen if length header - got corrupted or some writes did not make it to the WAL ~ truncation), - then remove all the lines starting from the corrupted one and restart - Tendermint. +3. Search for a "CORRUPTED MESSAGE" line. +4. By looking at the previous message and the message after the corrupted one + and looking at the logs, try to rebuild the message. If the consequent + messages are marked as corrupted too (this may happen if length header + got corrupted or some writes did not make it to the WAL ~ truncation), + then remove all the lines starting from the corrupted one and restart + Tendermint. ``` $EDITOR /tmp/corrupted_wal ``` - 5. After editing, convert this file back into binary form by running: + +5. After editing, convert this file back into binary form by running: ``` -./scripts/json2wal/json2wal /tmp/corrupted_wal > "$TMHOME/data/cs.wal/wal" +./scripts/json2wal/json2wal /tmp/corrupted_wal $TMHOME/data/cs.wal/wal ``` ## Hardware @@ -206,14 +207,15 @@ operation systems (like Mac OS). ### Miscellaneous NOTE: if you are going to use Tendermint in a public domain, make sure -you read [hardware recommendations (see "4. -Hardware")](https://cosmos.network/validators) for a validator in the +you read [hardware recommendations](https://cosmos.network/validators) for a validator in the Cosmos network. ## Configuration parameters -- `p2p.flush_throttle_timeout` `p2p.max_packet_msg_payload_size` - `p2p.send_rate` `p2p.recv_rate` +- `p2p.flush_throttle_timeout` +- `p2p.max_packet_msg_payload_size` +- `p2p.send_rate` +- `p2p.recv_rate` If you are going to use Tendermint in a private domain and you have a private high-speed network among your peers, it makes sense to lower @@ -268,9 +270,9 @@ saving it to the address book. The address is considered as routable if the IP is [valid and within allowed ranges](https://github.com/tendermint/tendermint/blob/27bd1deabe4ba6a2d9b463b8f3e3f1e31b993e61/p2p/netaddress.go#L209). -This may not be the case for private networks, where your IP range is usually +This may not be the case for private or local networks, where your IP range is usually strictly limited and private. If that case, you need to set `addr_book_strict` -to `false` (turn off). +to `false` (turn it off). - `rpc.max_open_connections` diff --git a/docs/tendermint-core/secure-p2p.md b/docs/tendermint-core/secure-p2p.md index aad5eac4113..01d2f22b533 100644 --- a/docs/tendermint-core/secure-p2p.md +++ b/docs/tendermint-core/secure-p2p.md @@ -3,10 +3,6 @@ The Tendermint p2p protocol uses an authenticated encryption scheme based on the [Station-to-Station Protocol](https://en.wikipedia.org/wiki/Station-to-Station_protocol). -The implementation uses -[golang's](https://godoc.org/golang.org/x/crypto/nacl/box) [nacl -box](http://nacl.cr.yp.to/box.html) for the actual authenticated -encryption algorithm. Each peer generates an ED25519 key-pair to use as a persistent (long-term) id. @@ -63,11 +59,15 @@ are connected to at least one validator. Authenticated encryption is enabled by default. +## Specification + +The full p2p specification can be found [here](https://github.com/tendermint/tendermint/tree/master/docs/spec/p2p). + ## Additional Reading -- [Implementation](https://github.com/tendermint/tendermint/blob/64bae01d007b5bee0d0827ab53259ffd5910b4e6/p2p/conn/secret_connection.go#L47) -- [Original STS paper by Whitfield Diffie, Paul C. van Oorschot and - Michael J. - Wiener](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.216.6107&rep=rep1&type=pdf) -- [Further work on secret - handshakes](https://dominictarr.github.io/secret-handshake-paper/shs.pdf) +- [Implementation](https://github.com/tendermint/tendermint/blob/64bae01d007b5bee0d0827ab53259ffd5910b4e6/p2p/conn/secret_connection.go#L47) +- [Original STS paper by Whitfield Diffie, Paul C. van Oorschot and + Michael J. + Wiener](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.216.6107&rep=rep1&type=pdf) +- [Further work on secret + handshakes](https://dominictarr.github.io/secret-handshake-paper/shs.pdf) diff --git a/docs/tendermint-core/using-tendermint.md b/docs/tendermint-core/using-tendermint.md index 11949c79847..28acc046bb1 100644 --- a/docs/tendermint-core/using-tendermint.md +++ b/docs/tendermint-core/using-tendermint.md @@ -39,20 +39,22 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g #### Fields -- `genesis_time`: Official time of blockchain start. -- `chain_id`: ID of the blockchain. This must be unique for - every blockchain. If your testnet blockchains do not have unique - chain IDs, you will have a bad time. -- `validators`: -- `pub_key`: The first element specifies the `pub_key` type. 1 - == Ed25519. The second element are the pubkey bytes. -- `power`: The validator's voting power. -- `name`: Name of the validator (optional). -- `app_hash`: The expected application hash (as returned by the - `ResponseInfo` ABCI message) upon genesis. If the app's hash does - not match, Tendermint will panic. -- `app_state`: The application state (e.g. initial distribution - of tokens). +- `genesis_time`: Official time of blockchain start. +- `chain_id`: ID of the blockchain. This must be unique for + every blockchain. If your testnet blockchains do not have unique + chain IDs, you will have a bad time. The ChainID must be less than 50 symbols. +- `validators`: List of initial validators. Note this may be overridden entirely by the + application, and may be left empty to make explicit that the + application will initialize the validator set with ResponseInitChain. + - `pub_key`: The first element specifies the `pub_key` type. 1 + == Ed25519. The second element are the pubkey bytes. + - `power`: The validator's voting power. + - `name`: Name of the validator (optional). +- `app_hash`: The expected application hash (as returned by the + `ResponseInfo` ABCI message) upon genesis. If the app's hash does + not match, Tendermint will panic. +- `app_state`: The application state (e.g. initial distribution + of tokens). #### Sample genesis.json @@ -93,8 +95,7 @@ definition](https://github.com/tendermint/tendermint/blob/master/types/genesis.g "power": "1", "name": "node3" } - ], - "app_hash": "" + ] } ``` @@ -151,7 +152,7 @@ and the `latest_app_hash` in particular: curl http://localhost:26657/status | json_pp | grep latest_app_hash ``` -Visit http://localhost:26657> in your browser to see the list of other +Visit http://localhost:26657 in your browser to see the list of other endpoints. Some take no arguments (like `/status`), while others specify the argument name and use `_` as a placeholder. @@ -224,7 +225,7 @@ new blockchain will not make any blocks. ## Configuration Tendermint uses a `config.toml` for configuration. For details, see [the -config specification](./specification/configuration.md). +config specification](./tendermint-core/configuration.md). Notable options include the socket address of the application (`proxy_app`), the listening address of the Tendermint peer @@ -235,8 +236,7 @@ Some fields from the config file can be overwritten with flags. ## No Empty Blocks -This much requested feature was implemented in version 0.10.3. While the -default behaviour of `tendermint` is still to create blocks +While the default behaviour of `tendermint` is still to create blocks approximately once per second, it is possible to disable empty blocks or set a block creation interval. In the former case, blocks will be created when there are new transactions or when the AppHash changes. @@ -365,10 +365,7 @@ case, the genesis file contains the public key of our root directory will be able to make progress. Voting power uses an int64 but must be positive, thus the range is: 0 through 9223372036854775807. Because of how the current proposer selection algorithm works, we do not -recommend having voting powers greater than 10\^12 (ie. 1 trillion) (see -[Proposals section of Byzantine Consensus -Algorithm](./specification/byzantine-consensus-algorithm.md#proposals) -for details). +recommend having voting powers greater than 10\^12 (ie. 1 trillion). If we want to add more nodes to the network, we have two choices: we can add a new validator node, who will also participate in the consensus by @@ -520,14 +517,14 @@ failing, you need at least four validator nodes (e.g., 2/3). Updating validators in a live network is supported but must be explicitly programmed by the application developer. See the [application -developers guide](./app-development.md) for more details. +developers guide](../app-dev/app-development.md) for more details. ### Local Network To run a network locally, say on a single machine, you must change the `_laddr` fields in the `config.toml` (or using the flags) so that the listening addresses of the various sockets don't conflict. Additionally, -you must set `addrbook_strict=false` in the `config.toml`, otherwise +you must set `addr_book_strict=false` in the `config.toml`, otherwise Tendermint's p2p library will deny making connections to peers with the same IP address. diff --git a/docs/tendermint-core/validators.md b/docs/tendermint-core/validators.md index 0c1d7d89a7f..5513886ad72 100644 --- a/docs/tendermint-core/validators.md +++ b/docs/tendermint-core/validators.md @@ -2,7 +2,7 @@ Validators are responsible for committing new blocks in the blockchain. These validators participate in the consensus protocol by broadcasting -*votes* which contain cryptographic signatures signed by each +_votes_ which contain cryptographic signatures signed by each validator's private key. Some Proof-of-Stake consensus algorithms aim to create a "completely" @@ -28,12 +28,12 @@ There are two ways to become validator. ## Committing a Block -*+2/3 is short for "more than 2/3"* +_+2/3 is short for "more than 2/3"_ A block is committed when +2/3 of the validator set sign [precommit votes](../spec/blockchain/blockchain.md#vote) for that block at the same `round`. The +2/3 set of precommit votes is called a -[*commit*](../spec/blockchain/blockchain.md#commit). While any +2/3 set of +[_commit_](../spec/blockchain/blockchain.md#commit). While any +2/3 set of precommits for the same block at the same height&round can serve as validation, the canonical commit is included in the next block (see [LastCommit](../spec/blockchain/blockchain.md#last-commit)). diff --git a/docs/tools/benchmarking.md b/docs/tools/benchmarking.md index 20c368e2954..691d3b6edf4 100644 --- a/docs/tools/benchmarking.md +++ b/docs/tools/benchmarking.md @@ -23,7 +23,7 @@ Blocks/sec 0.818 0.386 1 9 [Install Tendermint](../introduction/install) This currently is setup to work on tendermint's develop branch. Please ensure you are on that. (If not, update `tendermint` and `tmlibs` in gopkg.toml to use - the master branch.) +the master branch.) then run: @@ -32,7 +32,7 @@ tendermint init tendermint node --proxy_app=kvstore ``` -``` +``` tm-bench localhost:26657 ``` diff --git a/docs/tools/monitoring.md b/docs/tools/monitoring.md index 5cc2ad3b132..bd0105c8e78 100644 --- a/docs/tools/monitoring.md +++ b/docs/tools/monitoring.md @@ -3,7 +3,7 @@ Tendermint blockchain monitoring tool; watches over one or more nodes, collecting and providing various statistics to the user: -- https://github.com/tendermint/tools/tree/master/tm-monitor +- https://github.com/tendermint/tendermint/tree/master/tools/tm-monitor ## Quick Start @@ -26,6 +26,7 @@ use `kvstore`: docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init docker run -it --rm -v "/tmp:/tendermint" -p "26657:26657" --name=tm tendermint/tendermint node --proxy_app=kvstore ``` + ``` docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 ``` @@ -71,7 +72,7 @@ Flags: Run `tm-monitor` and visit http://localhost:26670 You should see the list of the available RPC endpoints: -``` +``` http://localhost:26670/status http://localhost:26670/status/network http://localhost:26670/monitor?endpoint=_ diff --git a/docs/yarn.lock b/docs/yarn.lock index 5591b8fa04b..4f453ed47d2 100644 --- a/docs/yarn.lock +++ b/docs/yarn.lock @@ -16,27 +16,27 @@ version "0.7.0" resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.7.0.tgz#9a06f4f137ee84d7df0460c1fdb1135ffa6c50fd" -"@textlint/ast-node-types@^4.0.2": - version "4.0.2" - resolved "https://registry.yarnpkg.com/@textlint/ast-node-types/-/ast-node-types-4.0.2.tgz#5386a15187798efb48eb71fa1cbf6ca2770b206a" +"@textlint/ast-node-types@^4.0.2", "@textlint/ast-node-types@^4.0.3": + version "4.0.3" + resolved "https://registry.yarnpkg.com/@textlint/ast-node-types/-/ast-node-types-4.0.3.tgz#b51c87bb86022323f764fbdc976b173f19261cc5" -"@textlint/ast-traverse@^2.0.8": - version "2.0.8" - resolved "https://registry.yarnpkg.com/@textlint/ast-traverse/-/ast-traverse-2.0.8.tgz#c180fe23dc3b8a6aa68539be70efb4ff17c38a3a" +"@textlint/ast-traverse@^2.0.8", "@textlint/ast-traverse@^2.0.9": + version "2.0.9" + resolved "https://registry.yarnpkg.com/@textlint/ast-traverse/-/ast-traverse-2.0.9.tgz#4bf427cf01b7195013e75d27540a77ad68c363d9" dependencies: - "@textlint/ast-node-types" "^4.0.2" + "@textlint/ast-node-types" "^4.0.3" -"@textlint/feature-flag@^3.0.4": - version "3.0.4" - resolved "https://registry.yarnpkg.com/@textlint/feature-flag/-/feature-flag-3.0.4.tgz#4290a4bb53da28c1f5f1d5ce0f4ae6630ab939ea" +"@textlint/feature-flag@^3.0.4", "@textlint/feature-flag@^3.0.5": + version "3.0.5" + resolved "https://registry.yarnpkg.com/@textlint/feature-flag/-/feature-flag-3.0.5.tgz#3783e0f2661053d2a74fdad775993395a2d530b4" dependencies: map-like "^2.0.0" "@textlint/fixer-formatter@^3.0.7": - version "3.0.7" - resolved "https://registry.yarnpkg.com/@textlint/fixer-formatter/-/fixer-formatter-3.0.7.tgz#4ef15d5e606e2d32b89257afd382ed9dbb218846" + version "3.0.8" + resolved "https://registry.yarnpkg.com/@textlint/fixer-formatter/-/fixer-formatter-3.0.8.tgz#90ef804c60b9e694c8c048a06febbf1f331abd49" dependencies: - "@textlint/kernel" "^2.0.9" + "@textlint/kernel" "^3.0.0" chalk "^1.1.3" debug "^2.1.0" diff "^2.2.2" @@ -60,13 +60,28 @@ object-assign "^4.1.1" structured-source "^3.0.2" +"@textlint/kernel@^3.0.0": + version "3.0.0" + resolved "https://registry.yarnpkg.com/@textlint/kernel/-/kernel-3.0.0.tgz#ba10962acff64f17b9e5fce8089a40f1f8880dcd" + dependencies: + "@textlint/ast-node-types" "^4.0.3" + "@textlint/ast-traverse" "^2.0.9" + "@textlint/feature-flag" "^3.0.5" + "@types/bluebird" "^3.5.18" + bluebird "^3.5.1" + debug "^2.6.6" + deep-equal "^1.0.1" + map-like "^2.0.0" + object-assign "^4.1.1" + structured-source "^3.0.2" + "@textlint/linter-formatter@^3.0.7": - version "3.0.7" - resolved "https://registry.yarnpkg.com/@textlint/linter-formatter/-/linter-formatter-3.0.7.tgz#66716cac94c047d94627a7e6af427a0d199eda7c" + version "3.0.8" + resolved "https://registry.yarnpkg.com/@textlint/linter-formatter/-/linter-formatter-3.0.8.tgz#030aa03ff3d85dda94ca9fa9e6bf824f9c1cb7ef" dependencies: "@azu/format-text" "^1.0.1" "@azu/style-format" "^1.0.0" - "@textlint/kernel" "^2.0.9" + "@textlint/kernel" "^3.0.0" chalk "^1.0.0" concat-stream "^1.5.1" js-yaml "^3.2.4" @@ -81,10 +96,10 @@ xml-escape "^1.0.0" "@textlint/markdown-to-ast@^6.0.8": - version "6.0.8" - resolved "https://registry.yarnpkg.com/@textlint/markdown-to-ast/-/markdown-to-ast-6.0.8.tgz#baa509c42f842b4dba36ad91547a288c063396b8" + version "6.0.9" + resolved "https://registry.yarnpkg.com/@textlint/markdown-to-ast/-/markdown-to-ast-6.0.9.tgz#e7c89e5ad15d17dcd8e5a62758358936827658fa" dependencies: - "@textlint/ast-node-types" "^4.0.2" + "@textlint/ast-node-types" "^4.0.3" debug "^2.1.3" remark-frontmatter "^1.2.0" remark-parse "^5.0.0" @@ -93,10 +108,10 @@ unified "^6.1.6" "@textlint/text-to-ast@^3.0.8": - version "3.0.8" - resolved "https://registry.yarnpkg.com/@textlint/text-to-ast/-/text-to-ast-3.0.8.tgz#6211977f369cec484447867f10dc155120f4c082" + version "3.0.9" + resolved "https://registry.yarnpkg.com/@textlint/text-to-ast/-/text-to-ast-3.0.9.tgz#dcb63f09cc79ea2096fc823c3b6cd07c79a060b5" dependencies: - "@textlint/ast-node-types" "^4.0.2" + "@textlint/ast-node-types" "^4.0.3" "@textlint/textlint-plugin-markdown@^4.0.10": version "4.0.10" @@ -111,13 +126,17 @@ "@textlint/text-to-ast" "^3.0.8" "@types/bluebird@^3.5.18": - version "3.5.21" - resolved "https://registry.yarnpkg.com/@types/bluebird/-/bluebird-3.5.21.tgz#567615589cc913e84a28ecf9edb031732bdf2634" + version "3.5.23" + resolved "https://registry.yarnpkg.com/@types/bluebird/-/bluebird-3.5.23.tgz#e805da976b76892b2b2e50eec29e84914c730670" abbrev@1: version "1.1.1" resolved "https://registry.yarnpkg.com/abbrev/-/abbrev-1.1.1.tgz#f8f2c887ad10bf67f634f005b6987fed3179aac8" +adverb-where@0.0.9: + version "0.0.9" + resolved "https://registry.yarnpkg.com/adverb-where/-/adverb-where-0.0.9.tgz#09c5cddd8d503b9fe5f76e0b8dc5c70a8f193e34" + aggregate-error@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-1.0.0.tgz#888344dad0220a72e3af50906117f48771925fac" @@ -220,8 +239,10 @@ arrify@^1.0.0: resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d" asn1@~0.2.3: - version "0.2.3" - resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.3.tgz#dac8787713c9966849fc8180777ebe9c1ddf3b86" + version "0.2.4" + resolved "https://registry.yarnpkg.com/asn1/-/asn1-0.2.4.tgz#8d2475dfab553bb33e77b54e59e880bb8ce23136" + dependencies: + safer-buffer "~2.1.0" assert-plus@1.0.0, assert-plus@^1.0.0: version "1.0.0" @@ -240,8 +261,8 @@ aws-sign2@~0.7.0: resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8" aws4@^1.6.0: - version "1.7.0" - resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.7.0.tgz#d4d0e9b9dbfca77bf08eeb0a8a471550fe39e289" + version "1.8.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.8.0.tgz#f0e003d9ca9e7f59c7a508945d7b2ef9a04a542f" bail@^1.0.0: version "1.0.3" @@ -285,8 +306,8 @@ braces@^1.8.2: repeat-element "^1.1.2" buffer-from@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.0.tgz#87fcaa3a298358e0ade6e442cfce840740d1ad04" + version "1.1.1" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.1.tgz#32713bc028f75c02fdb710d7c7bcec1f2c6070ef" builtin-modules@^1.0.0: version "1.1.1" @@ -460,7 +481,7 @@ dashdash@^1.12.0: dependencies: assert-plus "^1.0.0" -debug@^2.1.0, debug@^2.1.2, debug@^2.1.3, debug@^2.2.0, debug@^2.6.6: +debug@^2.1.0, debug@^2.1.2, debug@^2.1.3, debug@^2.6.6: version "2.6.9" resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" dependencies: @@ -546,11 +567,16 @@ duplexer3@^0.1.4: version "0.1.4" resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" +e-prime@^0.10.2: + version "0.10.2" + resolved "https://registry.yarnpkg.com/e-prime/-/e-prime-0.10.2.tgz#ea9375eb985636de88013c7a9fb129ad9e15eff8" + ecc-jsbn@~0.1.1: - version "0.1.1" - resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.1.tgz#0fc73a9ed5f0d53c38193398523ef7e543777505" + version "0.1.2" + resolved "https://registry.yarnpkg.com/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz#3a83a904e54353287874c564b7549386849a98c9" dependencies: jsbn "~0.1.0" + safer-buffer "^2.1.0" error-ex@^1.2.0, error-ex@^1.3.1: version "1.3.2" @@ -581,8 +607,8 @@ escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" esprima@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.0.tgz#4499eddcd1110e0b218bacf2fa7f7f59f55ca804" + version "4.0.1" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" expand-brackets@^0.1.4: version "0.1.5" @@ -597,8 +623,8 @@ expand-range@^1.8.1: fill-range "^2.1.0" extend@^3.0.0, extend@~3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.1.tgz#a755ea7bc1adfcc5a31ce7e762dbaadc5e636444" + version "3.0.2" + resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa" extglob@^0.3.1: version "0.3.2" @@ -863,6 +889,10 @@ has-symbol-support-x@^1.4.1: version "1.4.2" resolved "https://registry.yarnpkg.com/has-symbol-support-x/-/has-symbol-support-x-1.4.2.tgz#1409f98bc00247da45da67cee0a36f282ff26455" +has-symbols@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.0.tgz#ba1a8f1af2a0fc39650f5c850367704122063b44" + has-to-string-tag-x@^1.2.0: version "1.4.1" resolved "https://registry.yarnpkg.com/has-to-string-tag-x/-/has-to-string-tag-x-1.4.1.tgz#a045ab383d7b4b2012a00148ab0aa5f290044d4d" @@ -880,8 +910,8 @@ has@^1.0.1: function-bind "^1.1.1" hosted-git-info@^2.1.4: - version "2.6.1" - resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.6.1.tgz#6e4cee78b01bb849dcf93527708c69fdbee410df" + version "2.7.1" + resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.7.1.tgz#97f236977bd6e125408930ff6de3eec6281ec047" http-cache-semantics@3.8.1: version "3.8.1" @@ -1156,8 +1186,8 @@ isarray@1.0.0, isarray@~1.0.0: resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" isemail@^3.1.2: - version "3.1.2" - resolved "https://registry.yarnpkg.com/isemail/-/isemail-3.1.2.tgz#937cf919002077999a73ea8b1951d590e84e01dd" + version "3.1.3" + resolved "https://registry.yarnpkg.com/isemail/-/isemail-3.1.3.tgz#64f37fc113579ea12523165c3ebe3a71a56ce571" dependencies: punycode "2.x.x" @@ -1178,7 +1208,7 @@ isurl@^1.0.0-alpha5: has-to-string-tag-x "^1.2.0" is-object "^1.0.1" -js-yaml@^3.2.4, js-yaml@^3.6.1: +js-yaml@^3.12.0, js-yaml@^3.2.4, js-yaml@^3.6.1: version "3.12.0" resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.12.0.tgz#eaed656ec8344f10f527c6bfa1b6e2244de167d1" dependencies: @@ -1215,10 +1245,16 @@ json-stringify-safe@~5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" -json5@^0.5.0, json5@^0.5.1: +json5@^0.5.1: version "0.5.1" resolved "https://registry.yarnpkg.com/json5/-/json5-0.5.1.tgz#1eade7acc012034ad84e2396767ead9fa5495821" +json5@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json5/-/json5-1.0.1.tgz#779fb0018604fa854eacbf6252180d83543e3dbe" + dependencies: + minimist "^1.2.0" + jsonify@~0.0.0: version "0.0.0" resolved "https://registry.yarnpkg.com/jsonify/-/jsonify-0.0.0.tgz#2c74b6ee41d93ca51b7b5aaee8f503631d252a73" @@ -1297,7 +1333,7 @@ locate-path@^2.0.0: p-locate "^2.0.0" path-exists "^3.0.0" -lodash@^4.0.0: +lodash@^4.0.0, lodash@^4.17.4: version "4.17.10" resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.10.tgz#1b7793cf7259ea38fb3661d4d38b3260af8ae4e7" @@ -1372,19 +1408,19 @@ micromatch@^2.1.5: parse-glob "^3.0.4" regex-cache "^0.4.2" -mime-db@~1.33.0: - version "1.33.0" - resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.33.0.tgz#a3492050a5cb9b63450541e39d9788d2272783db" +mime-db@~1.35.0: + version "1.35.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.35.0.tgz#0569d657466491283709663ad379a99b90d9ab47" mime-types@^2.1.12, mime-types@~2.1.17: - version "2.1.18" - resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.18.tgz#6f323f60a83d11146f831ff11fd66e2fe5503bb8" + version "2.1.19" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.19.tgz#71e464537a7ef81c15f2db9d97e913fc0ff606f0" dependencies: - mime-db "~1.33.0" + mime-db "~1.35.0" mimic-response@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.0.tgz#df3d3652a73fded6b9b0b24146e6fd052353458e" + version "1.0.1" + resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.1.tgz#4923538878eef42063cb8a3e3b0798781487ab1b" minimatch@^3.0.2, minimatch@^3.0.4: version "3.0.4" @@ -1431,7 +1467,7 @@ nan@^2.9.2: version "2.10.0" resolved "https://registry.yarnpkg.com/nan/-/nan-2.10.0.tgz#96d0cd610ebd58d4b4de9cc0c6828cda99c7548f" -needle@^2.2.0: +needle@^2.2.1: version "2.2.1" resolved "https://registry.yarnpkg.com/needle/-/needle-2.2.1.tgz#b5e325bd3aae8c2678902fa296f729455d1d3a7d" dependencies: @@ -1439,13 +1475,21 @@ needle@^2.2.0: iconv-lite "^0.4.4" sax "^1.2.4" +nlcst-to-string@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/nlcst-to-string/-/nlcst-to-string-2.0.2.tgz#7125af4d4d369850c697192a658f01f36af9937b" + +no-cliches@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/no-cliches/-/no-cliches-0.1.0.tgz#f4eb81a551fecde813f8c611e35e64a5118dc38c" + node-pre-gyp@^0.10.0: - version "0.10.2" - resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.10.2.tgz#e8945c20ef6795a20aac2b44f036eb13cf5146e3" + version "0.10.3" + resolved "https://registry.yarnpkg.com/node-pre-gyp/-/node-pre-gyp-0.10.3.tgz#3070040716afdc778747b61b6887bf78880b80fc" dependencies: detect-libc "^1.0.2" mkdirp "^0.5.1" - needle "^2.2.0" + needle "^2.2.1" nopt "^4.0.1" npm-packlist "^1.1.6" npmlog "^4.0.2" @@ -1489,8 +1533,8 @@ npm-bundled@^1.0.1: resolved "https://registry.yarnpkg.com/npm-bundled/-/npm-bundled-1.0.3.tgz#7e71703d973af3370a9591bafe3a63aca0be2308" npm-packlist@^1.1.6: - version "1.1.10" - resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.1.10.tgz#1039db9e985727e464df066f4cf0ab6ef85c398a" + version "1.1.11" + resolved "https://registry.yarnpkg.com/npm-packlist/-/npm-packlist-1.1.11.tgz#84e8c683cbe7867d34b1d357d893ce29e28a02de" dependencies: ignore-walk "^3.0.1" npm-bundled "^1.0.1" @@ -1524,10 +1568,19 @@ object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: version "4.1.1" resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" -object-keys@^1.0.8, object-keys@^1.0.9: +object-keys@^1.0.11, object-keys@^1.0.12, object-keys@^1.0.8: version "1.0.12" resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.0.12.tgz#09c53855377575310cca62f55bb334abff7b3ed2" +object.assign@^4.0.4: + version "4.1.0" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.0.tgz#968bf1100d7956bb3ca086f006f846b3bc4008da" + dependencies: + define-properties "^1.1.2" + function-bind "^1.1.1" + has-symbols "^1.0.0" + object-keys "^1.0.11" + object.omit@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/object.omit/-/object.omit-2.0.1.tgz#1a9c744829f39dbb858c76ca3579ae2a54ebd1fa" @@ -1652,11 +1705,9 @@ parse-json@^4.0.0: error-ex "^1.3.1" json-parse-better-errors "^1.0.1" -path-exists@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-2.1.0.tgz#0feb6c64f0fc518d9a754dd5efb62c7022761f4b" - dependencies: - pinkie-promise "^2.0.0" +passive-voice@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/passive-voice/-/passive-voice-0.1.0.tgz#16ff91ae40ba0e92c43e671763fdc842a70270b1" path-exists@^3.0.0: version "3.0.0" @@ -1731,8 +1782,8 @@ preserve@^0.2.0: resolved "https://registry.yarnpkg.com/preserve/-/preserve-0.2.0.tgz#815ed1f6ebc65926f865b310c0713bcb3315ce4b" prettier@^1.13.7: - version "1.13.7" - resolved "https://registry.yarnpkg.com/prettier/-/prettier-1.13.7.tgz#850f3b8af784a49a6ea2d2eaa7ed1428a34b7281" + version "1.14.2" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-1.14.2.tgz#0ac1c6e1a90baa22a62925f41963c841983282f9" process-nextick-args@~2.0.0: version "2.0.0" @@ -1768,24 +1819,24 @@ query-string@^5.0.1: strict-uri-encode "^1.0.0" randomatic@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-3.0.0.tgz#d35490030eb4f7578de292ce6dfb04a91a128923" + version "3.1.0" + resolved "https://registry.yarnpkg.com/randomatic/-/randomatic-3.1.0.tgz#36f2ca708e9e567f5ed2ec01949026d50aa10116" dependencies: is-number "^4.0.0" kind-of "^6.0.0" math-random "^1.0.1" rc-config-loader@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/rc-config-loader/-/rc-config-loader-2.0.1.tgz#8c8452f59bdd10d448a67762dccf7c1b247db860" + version "2.0.2" + resolved "https://registry.yarnpkg.com/rc-config-loader/-/rc-config-loader-2.0.2.tgz#46eb2f98fb5b2aa7b1119d66c0554de5133f1bc1" dependencies: - debug "^2.2.0" - js-yaml "^3.6.1" - json5 "^0.5.0" + debug "^3.1.0" + js-yaml "^3.12.0" + json5 "^1.0.1" object-assign "^4.1.0" - object-keys "^1.0.9" - path-exists "^2.1.0" - require-from-string "^2.0.1" + object-keys "^1.0.12" + path-exists "^3.0.0" + require-from-string "^2.0.2" rc@^1.1.0, rc@^1.2.7: version "1.2.8" @@ -1871,6 +1922,15 @@ remark-lint-no-dead-urls@^0.3.0: unified-lint-rule "^1.0.1" unist-util-visit "^1.1.3" +remark-lint-write-good@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/remark-lint-write-good/-/remark-lint-write-good-1.0.3.tgz#daa4cf122212cfa06e437702ef7b43a12875bd5d" + dependencies: + nlcst-to-string "^2.0.0" + unified-lint-rule "^1.0.1" + unist-util-visit "^1.1.1" + write-good "^0.11.1" + remark-parse@^5.0.0: version "5.0.0" resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-5.0.0.tgz#4c077f9e499044d1d5c13f80d7a98cf7b9285d95" @@ -1959,7 +2019,7 @@ request@^2.87.0: tunnel-agent "^0.6.0" uuid "^3.1.0" -require-from-string@^2.0.1: +require-from-string@^2.0.2: version "2.0.2" resolved "https://registry.yarnpkg.com/require-from-string/-/require-from-string-2.0.2.tgz#89a7fdd938261267318eafe14f9c32e598c36909" @@ -1983,7 +2043,7 @@ safe-buffer@^5.0.1, safe-buffer@^5.1.1, safe-buffer@^5.1.2, safe-buffer@~5.1.0, version "5.1.2" resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" -"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2: +"safer-buffer@>= 2.1.2 < 3", safer-buffer@^2.0.2, safer-buffer@^2.1.0, safer-buffer@~2.1.0: version "2.1.2" resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" @@ -2047,6 +2107,10 @@ spdx-license-ids@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.0.tgz#7a7cd28470cc6d3a1cfe6d66886f6bc430d3ac87" +split-lines@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/split-lines/-/split-lines-1.1.0.tgz#3abba8f598614142f9db8d27ab6ab875662a1e09" + sprintf-js@~1.0.2: version "1.0.3" resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" @@ -2172,8 +2236,8 @@ table@^3.7.8: string-width "^2.0.0" tar@^4: - version "4.4.4" - resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.4.tgz#ec8409fae9f665a4355cc3b4087d0820232bb8cd" + version "4.4.6" + resolved "https://registry.yarnpkg.com/tar/-/tar-4.4.6.tgz#63110f09c00b4e60ac8bcfe1bf3c8660235fbc9b" dependencies: chownr "^1.0.1" fs-minipass "^1.2.5" @@ -2187,6 +2251,20 @@ text-table@^0.2.0: version "0.2.0" resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" +textlint-rule-helper@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/textlint-rule-helper/-/textlint-rule-helper-2.0.0.tgz#95cb4696c95c4258d2e3389e9e64b849f9721382" + dependencies: + unist-util-visit "^1.1.0" + +textlint-rule-stop-words@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/textlint-rule-stop-words/-/textlint-rule-stop-words-1.0.3.tgz#fe2f40cbe5837331b2a09fdec57cc71758093bf0" + dependencies: + lodash "^4.17.4" + split-lines "^1.1.0" + textlint-rule-helper "^2.0.0" + textlint@^10.2.1: version "10.2.1" resolved "https://registry.yarnpkg.com/textlint/-/textlint-10.2.1.tgz#ee22b7967d59cef7c74a04a5f4e8883134e5c79d" @@ -2234,6 +2312,10 @@ to-vfile@^2.0.0: vfile "^2.0.0" x-is-function "^1.0.4" +too-wordy@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/too-wordy/-/too-wordy-0.1.4.tgz#8e7b20a7b7a4d8fc3759f4e00c4929993d1b12f0" + tough-cookie@~2.3.3: version "2.3.4" resolved "https://registry.yarnpkg.com/tough-cookie/-/tough-cookie-2.3.4.tgz#ec60cee38ac675063ffc97a5c18970578ee83655" @@ -2253,8 +2335,8 @@ trim@0.0.1: resolved "https://registry.yarnpkg.com/trim/-/trim-0.0.1.tgz#5858547f6b290757ee95cccc666fb50084c460dd" trough@^1.0.0: - version "1.0.2" - resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.2.tgz#7f1663ec55c480139e2de5e486c6aef6cc24a535" + version "1.0.3" + resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.3.tgz#e29bd1614c6458d44869fc28b255ab7857ef7c24" try-resolve@^1.0.1: version "1.0.1" @@ -2351,7 +2433,7 @@ unist-util-inspect@^4.1.2: dependencies: is-empty "^1.0.0" -unist-util-is@^2.1.1: +unist-util-is@^2.1.2: version "2.1.2" resolved "https://registry.yarnpkg.com/unist-util-is/-/unist-util-is-2.1.2.tgz#1193fa8f2bfbbb82150633f3a8d2eb9a1c1d55db" @@ -2371,11 +2453,17 @@ unist-util-stringify-position@^1.0.0, unist-util-stringify-position@^1.1.1: version "1.1.2" resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-1.1.2.tgz#3f37fcf351279dcbca7480ab5889bb8a832ee1c6" -unist-util-visit@^1.1.0, unist-util-visit@^1.1.3: - version "1.3.1" - resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-1.3.1.tgz#c019ac9337a62486be58531bc27e7499ae7d55c7" +unist-util-visit-parents@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/unist-util-visit-parents/-/unist-util-visit-parents-2.0.1.tgz#63fffc8929027bee04bfef7d2cce474f71cb6217" dependencies: - unist-util-is "^2.1.1" + unist-util-is "^2.1.2" + +unist-util-visit@^1.1.0, unist-util-visit@^1.1.1, unist-util-visit@^1.1.3: + version "1.4.0" + resolved "https://registry.yarnpkg.com/unist-util-visit/-/unist-util-visit-1.4.0.tgz#1cb763647186dc26f5e1df5db6bd1e48b3cc2fb1" + dependencies: + unist-util-visit-parents "^2.0.0" untildify@^2.1.0: version "2.1.0" @@ -2412,8 +2500,8 @@ uuid@^3.1.0: resolved "https://registry.yarnpkg.com/uuid/-/uuid-3.3.2.tgz#1b4af4955eb3077c501c23872fc6513811587131" validate-npm-package-license@^3.0.1: - version "3.0.3" - resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.3.tgz#81643bcbef1bdfecd4623793dc4648948ba98338" + version "3.0.4" + resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz#fc91f6b9c7ba15c857f4cb2c5defeec39d4f410a" dependencies: spdx-correct "^3.0.0" spdx-expression-parse "^3.0.0" @@ -2459,6 +2547,10 @@ vfile@^2.0.0: unist-util-stringify-position "^1.0.0" vfile-message "^1.0.0" +weasel-words@^0.1.1: + version "0.1.1" + resolved "https://registry.yarnpkg.com/weasel-words/-/weasel-words-0.1.1.tgz#7137946585c73fe44882013853bd000c5d687a4e" + wide-align@^1.1.0: version "1.1.3" resolved "https://registry.yarnpkg.com/wide-align/-/wide-align-1.1.3.tgz#ae074e6bdc0c14a431e804e624549c633b000457" @@ -2480,6 +2572,18 @@ wrappy@1: version "1.0.2" resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" +write-good@^0.11.1: + version "0.11.3" + resolved "https://registry.yarnpkg.com/write-good/-/write-good-0.11.3.tgz#8eeb5da9a8e155dafb1325d27eba33cb67d24d8c" + dependencies: + adverb-where "0.0.9" + e-prime "^0.10.2" + no-cliches "^0.1.0" + object.assign "^4.0.4" + passive-voice "^0.1.0" + too-wordy "^0.1.4" + weasel-words "^0.1.1" + write@^0.2.1: version "0.2.1" resolved "https://registry.yarnpkg.com/write/-/write-0.2.1.tgz#5fc03828e264cea3fe91455476f7a3c566cb0757" diff --git a/evidence/pool.go b/evidence/pool.go index 247629b6be4..21cab5e0746 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -57,9 +57,10 @@ func (evpool *EvidencePool) PriorityEvidence() []types.Evidence { return evpool.evidenceStore.PriorityEvidence() } -// PendingEvidence returns all uncommitted evidence. -func (evpool *EvidencePool) PendingEvidence() []types.Evidence { - return evpool.evidenceStore.PendingEvidence() +// PendingEvidence returns uncommitted evidence up to maxBytes. +// If maxBytes is -1, all evidence is returned. +func (evpool *EvidencePool) PendingEvidence(maxBytes int) []types.Evidence { + return evpool.evidenceStore.PendingEvidence(maxBytes) } // State returns the current state of the evpool. diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 915cba32753..159ae7cd384 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -3,13 +3,13 @@ package evidence import ( "sync" "testing" - "time" "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tendermint/libs/db" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tendermint/libs/db" + tmtime "github.com/tendermint/tendermint/types/time" ) var mockState = sm.State{} @@ -25,8 +25,9 @@ func initializeValidatorState(valAddr []byte, height int64) dbm.DB { } state := sm.State{ LastBlockHeight: 0, - LastBlockTime: time.Now(), + LastBlockTime: tmtime.Now(), Validators: valSet, + NextValidators: valSet.CopyIncrementAccum(1), LastHeightValidatorsChanged: 1, ConsensusParams: types.ConsensusParams{ EvidenceParams: types.EvidenceParams{ diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index 1687f25a39c..23fd008af4d 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -79,11 +79,11 @@ func waitForEvidence(t *testing.T, evs types.EvidenceList, reactors []*EvidenceR func _waitForEvidence(t *testing.T, wg *sync.WaitGroup, evs types.EvidenceList, reactorIdx int, reactors []*EvidenceReactor) { evpool := reactors[reactorIdx].evpool - for len(evpool.PendingEvidence()) != len(evs) { + for len(evpool.PendingEvidence(-1)) != len(evs) { time.Sleep(time.Millisecond * 100) } - reapedEv := evpool.PendingEvidence() + reapedEv := evpool.PendingEvidence(-1) // put the reaped evidence in a map so we can quickly check we got everything evMap := make(map[string]types.Evidence) for _, e := range reapedEv { diff --git a/evidence/store.go b/evidence/store.go index 20b37bdb27a..60656f052c7 100644 --- a/evidence/store.go +++ b/evidence/store.go @@ -3,8 +3,8 @@ package evidence import ( "fmt" - "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/types" ) /* @@ -78,7 +78,7 @@ func NewEvidenceStore(db dbm.DB) *EvidenceStore { // PriorityEvidence returns the evidence from the outqueue, sorted by highest priority. func (store *EvidenceStore) PriorityEvidence() (evidence []types.Evidence) { // reverse the order so highest priority is first - l := store.ListEvidence(baseKeyOutqueue) + l := store.listEvidence(baseKeyOutqueue, -1) l2 := make([]types.Evidence, len(l)) for i := range l { l2[i] = l[len(l)-1-i] @@ -86,18 +86,26 @@ func (store *EvidenceStore) PriorityEvidence() (evidence []types.Evidence) { return l2 } -// PendingEvidence returns all known uncommitted evidence. -func (store *EvidenceStore) PendingEvidence() (evidence []types.Evidence) { - return store.ListEvidence(baseKeyPending) +// PendingEvidence returns known uncommitted evidence up to maxBytes. +// If maxBytes is -1, all evidence is returned. +func (store *EvidenceStore) PendingEvidence(maxBytes int) (evidence []types.Evidence) { + return store.listEvidence(baseKeyPending, maxBytes) } -// ListEvidence lists the evidence for the given prefix key. +// listEvidence lists the evidence for the given prefix key up to maxBytes. // It is wrapped by PriorityEvidence and PendingEvidence for convenience. -func (store *EvidenceStore) ListEvidence(prefixKey string) (evidence []types.Evidence) { +// If maxBytes is -1, there's no cap on the size of returned evidence. +func (store *EvidenceStore) listEvidence(prefixKey string, maxBytes int) (evidence []types.Evidence) { + var bytes int iter := dbm.IteratePrefix(store.db, []byte(prefixKey)) for ; iter.Valid(); iter.Next() { val := iter.Value() + if maxBytes > 0 && bytes+len(val) > maxBytes { + return evidence + } + bytes += len(val) + var ei EvidenceInfo err := cdc.UnmarshalBinaryBare(val, &ei) if err != nil { diff --git a/evidence/store_test.go b/evidence/store_test.go index 30dc1c4d5c2..35eb28d01f3 100644 --- a/evidence/store_test.go +++ b/evidence/store_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/types" ) //------------------------------------------- @@ -35,7 +35,7 @@ func TestStoreMark(t *testing.T) { // before we do anything, priority/pending are empty priorityEv := store.PriorityEvidence() - pendingEv := store.PendingEvidence() + pendingEv := store.PendingEvidence(-1) assert.Equal(0, len(priorityEv)) assert.Equal(0, len(pendingEv)) @@ -53,21 +53,21 @@ func TestStoreMark(t *testing.T) { // new evidence should be returns in priority/pending priorityEv = store.PriorityEvidence() - pendingEv = store.PendingEvidence() + pendingEv = store.PendingEvidence(-1) assert.Equal(1, len(priorityEv)) assert.Equal(1, len(pendingEv)) // priority is now empty store.MarkEvidenceAsBroadcasted(ev) priorityEv = store.PriorityEvidence() - pendingEv = store.PendingEvidence() + pendingEv = store.PendingEvidence(-1) assert.Equal(0, len(priorityEv)) assert.Equal(1, len(pendingEv)) // priority and pending are now empty store.MarkEvidenceAsCommitted(ev) priorityEv = store.PriorityEvidence() - pendingEv = store.PendingEvidence() + pendingEv = store.PendingEvidence(-1) assert.Equal(0, len(priorityEv)) assert.Equal(0, len(pendingEv)) diff --git a/libs/autofile/group.go b/libs/autofile/group.go index 500bdbc7ce7..286447cdaa9 100644 --- a/libs/autofile/group.go +++ b/libs/autofile/group.go @@ -726,11 +726,11 @@ func (gr *GroupReader) SetIndex(index int) error { func MakeSimpleSearchFunc(prefix string, target int) SearchFunc { return func(line string) (int, error) { if !strings.HasPrefix(line, prefix) { - return -1, errors.New(cmn.Fmt("Marker line did not have prefix: %v", prefix)) + return -1, fmt.Errorf("Marker line did not have prefix: %v", prefix) } i, err := strconv.Atoi(line[len(prefix):]) if err != nil { - return -1, errors.New(cmn.Fmt("Failed to parse marker line: %v", err.Error())) + return -1, fmt.Errorf("Failed to parse marker line: %v", err.Error()) } if target < i { return 1, nil diff --git a/libs/clist/bench_test.go b/libs/clist/bench_test.go new file mode 100644 index 00000000000..95973cc7678 --- /dev/null +++ b/libs/clist/bench_test.go @@ -0,0 +1,46 @@ +package clist + +import "testing" + +func BenchmarkDetaching(b *testing.B) { + lst := New() + for i := 0; i < b.N+1; i++ { + lst.PushBack(i) + } + start := lst.Front() + nxt := start.Next() + b.ResetTimer() + for i := 0; i < b.N; i++ { + start.removed = true + start.DetachNext() + start.DetachPrev() + tmp := nxt + nxt = nxt.Next() + start = tmp + } +} + +// This is used to benchmark the time of RMutex. +func BenchmarkRemoved(b *testing.B) { + lst := New() + for i := 0; i < b.N+1; i++ { + lst.PushBack(i) + } + start := lst.Front() + nxt := start.Next() + b.ResetTimer() + for i := 0; i < b.N; i++ { + start.Removed() + tmp := nxt + nxt = nxt.Next() + start = tmp + } +} + +func BenchmarkPushBack(b *testing.B) { + lst := New() + b.ResetTimer() + for i := 0; i < b.N; i++ { + lst.PushBack(i) + } +} diff --git a/libs/clist/clist.go b/libs/clist/clist.go index ccb1f5777a1..c69d3d5f3a7 100644 --- a/libs/clist/clist.go +++ b/libs/clist/clist.go @@ -12,9 +12,15 @@ to ensure garbage collection of removed elements. */ import ( + "fmt" "sync" ) +// MaxLength is the max allowed number of elements a linked list is +// allowed to contain. +// If more elements are pushed to the list it will panic. +const MaxLength = int(^uint(0) >> 1) + /* CElement is an element of a linked-list @@ -115,43 +121,42 @@ func (e *CElement) Next() *CElement { // Nonblocking, may return nil if at the end. func (e *CElement) Prev() *CElement { e.mtx.RLock() - defer e.mtx.RUnlock() - - return e.prev + prev := e.prev + e.mtx.RUnlock() + return prev } func (e *CElement) Removed() bool { e.mtx.RLock() - defer e.mtx.RUnlock() - - return e.removed + isRemoved := e.removed + e.mtx.RUnlock() + return isRemoved } func (e *CElement) DetachNext() { - if !e.Removed() { + e.mtx.Lock() + if !e.removed { + e.mtx.Unlock() panic("DetachNext() must be called after Remove(e)") } - e.mtx.Lock() - defer e.mtx.Unlock() - e.next = nil + e.mtx.Unlock() } func (e *CElement) DetachPrev() { - if !e.Removed() { + e.mtx.Lock() + if !e.removed { + e.mtx.Unlock() panic("DetachPrev() must be called after Remove(e)") } - e.mtx.Lock() - defer e.mtx.Unlock() - e.prev = nil + e.mtx.Unlock() } // NOTE: This function needs to be safe for // concurrent goroutines waiting on nextWg. func (e *CElement) SetNext(newNext *CElement) { e.mtx.Lock() - defer e.mtx.Unlock() oldNext := e.next e.next = newNext @@ -168,13 +173,13 @@ func (e *CElement) SetNext(newNext *CElement) { e.nextWg.Done() close(e.nextWaitCh) } + e.mtx.Unlock() } // NOTE: This function needs to be safe for // concurrent goroutines waiting on prevWg func (e *CElement) SetPrev(newPrev *CElement) { e.mtx.Lock() - defer e.mtx.Unlock() oldPrev := e.prev e.prev = newPrev @@ -186,11 +191,11 @@ func (e *CElement) SetPrev(newPrev *CElement) { e.prevWg.Done() close(e.prevWaitCh) } + e.mtx.Unlock() } func (e *CElement) SetRemoved() { e.mtx.Lock() - defer e.mtx.Unlock() e.removed = true @@ -203,6 +208,7 @@ func (e *CElement) SetRemoved() { e.nextWg.Done() close(e.nextWaitCh) } + e.mtx.Unlock() } //-------------------------------------------------------------------------------- @@ -210,6 +216,7 @@ func (e *CElement) SetRemoved() { // CList represents a linked list. // The zero value for CList is an empty list ready to use. // Operations are goroutine-safe. +// Panics if length grows beyond the max. type CList struct { mtx sync.RWMutex wg *sync.WaitGroup @@ -217,34 +224,44 @@ type CList struct { head *CElement // first element tail *CElement // last element len int // list length + maxLen int // max list length } func (l *CList) Init() *CList { l.mtx.Lock() - defer l.mtx.Unlock() l.wg = waitGroup1() l.waitCh = make(chan struct{}) l.head = nil l.tail = nil l.len = 0 + l.mtx.Unlock() return l } -func New() *CList { return new(CList).Init() } +// Return CList with MaxLength. CList will panic if it goes beyond MaxLength. +func New() *CList { return newWithMax(MaxLength) } + +// Return CList with given maxLength. +// Will panic if list exceeds given maxLength. +func newWithMax(maxLength int) *CList { + l := new(CList) + l.maxLen = maxLength + return l.Init() +} func (l *CList) Len() int { l.mtx.RLock() - defer l.mtx.RUnlock() - - return l.len + len := l.len + l.mtx.RUnlock() + return len } func (l *CList) Front() *CElement { l.mtx.RLock() - defer l.mtx.RUnlock() - - return l.head + head := l.head + l.mtx.RUnlock() + return head } func (l *CList) FrontWait() *CElement { @@ -265,9 +282,9 @@ func (l *CList) FrontWait() *CElement { func (l *CList) Back() *CElement { l.mtx.RLock() - defer l.mtx.RUnlock() - - return l.tail + back := l.tail + l.mtx.RUnlock() + return back } func (l *CList) BackWait() *CElement { @@ -295,9 +312,9 @@ func (l *CList) WaitChan() <-chan struct{} { return l.waitCh } +// Panics if list grows beyond its max length. func (l *CList) PushBack(v interface{}) *CElement { l.mtx.Lock() - defer l.mtx.Unlock() // Construct a new element e := &CElement{ @@ -316,6 +333,9 @@ func (l *CList) PushBack(v interface{}) *CElement { l.wg.Done() close(l.waitCh) } + if l.len >= l.maxLen { + panic(fmt.Sprintf("clist: maximum length list reached %d", l.maxLen)) + } l.len++ // Modify the tail @@ -327,7 +347,7 @@ func (l *CList) PushBack(v interface{}) *CElement { l.tail.SetNext(e) // This will make e accessible. l.tail = e // Update the list. } - + l.mtx.Unlock() return e } @@ -335,18 +355,20 @@ func (l *CList) PushBack(v interface{}) *CElement { // NOTE: As per the contract of CList, removed elements cannot be added back. func (l *CList) Remove(e *CElement) interface{} { l.mtx.Lock() - defer l.mtx.Unlock() prev := e.Prev() next := e.Next() if l.head == nil || l.tail == nil { + l.mtx.Unlock() panic("Remove(e) on empty CList") } if prev == nil && l.head != e { + l.mtx.Unlock() panic("Remove(e) with false head") } if next == nil && l.tail != e { + l.mtx.Unlock() panic("Remove(e) with false tail") } @@ -374,6 +396,7 @@ func (l *CList) Remove(e *CElement) interface{} { // Set .Done() on e, otherwise waiters will wait forever. e.SetRemoved() + l.mtx.Unlock() return e.Value } diff --git a/libs/clist/clist_test.go b/libs/clist/clist_test.go index dbdf2f02860..4ded6177a82 100644 --- a/libs/clist/clist_test.go +++ b/libs/clist/clist_test.go @@ -7,9 +7,22 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" cmn "github.com/tendermint/tendermint/libs/common" ) +func TestPanicOnMaxLength(t *testing.T) { + maxLength := 1000 + + l := newWithMax(maxLength) + for i := 0; i < maxLength; i++ { + l.PushBack(1) + } + assert.Panics(t, func() { + l.PushBack(1) + }) +} + func TestSmall(t *testing.T) { l := New() el1 := l.PushBack(1) @@ -149,8 +162,8 @@ func _TestGCRandom(t *testing.T) { func TestScanRightDeleteRandom(t *testing.T) { - const numElements = 10000 - const numTimes = 1000 + const numElements = 1000 + const numTimes = 100 const numScanners = 10 l := New() @@ -209,7 +222,7 @@ func TestScanRightDeleteRandom(t *testing.T) { // Stop scanners close(stop) - time.Sleep(time.Second * 1) + // time.Sleep(time.Second * 1) // And remove all the elements. for el := l.Front(); el != nil; el = el.Next() { @@ -244,7 +257,7 @@ func TestWaitChan(t *testing.T) { for i := 1; i < 100; i++ { l.PushBack(i) pushed++ - time.Sleep(time.Duration(cmn.RandIntn(100)) * time.Millisecond) + time.Sleep(time.Duration(cmn.RandIntn(25)) * time.Millisecond) } close(done) }() @@ -283,7 +296,7 @@ FOR_LOOP2: if prev == nil { t.Fatal("expected PrevWaitChan to block forever on nil when reached first elem") } - case <-time.After(5 * time.Second): + case <-time.After(3 * time.Second): break FOR_LOOP2 } } diff --git a/libs/common/bit_array_test.go b/libs/common/bit_array_test.go index c697ba5de86..b1efd3f6216 100644 --- a/libs/common/bit_array_test.go +++ b/libs/common/bit_array_test.go @@ -3,6 +3,7 @@ package common import ( "bytes" "encoding/json" + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -149,7 +150,7 @@ func TestBytes(t *testing.T) { bA.SetIndex(0, true) check := func(bA *BitArray, bz []byte) { if !bytes.Equal(bA.Bytes(), bz) { - panic(Fmt("Expected %X but got %X", bz, bA.Bytes())) + panic(fmt.Sprintf("Expected %X but got %X", bz, bA.Bytes())) } } check(bA, []byte{0x01}) diff --git a/libs/common/cmap.go b/libs/common/cmap.go index c65c27d4c42..2f7720d2e12 100644 --- a/libs/common/cmap.go +++ b/libs/common/cmap.go @@ -16,58 +16,60 @@ func NewCMap() *CMap { func (cm *CMap) Set(key string, value interface{}) { cm.l.Lock() - defer cm.l.Unlock() cm.m[key] = value + cm.l.Unlock() } func (cm *CMap) Get(key string) interface{} { cm.l.Lock() - defer cm.l.Unlock() - return cm.m[key] + val := cm.m[key] + cm.l.Unlock() + return val } func (cm *CMap) Has(key string) bool { cm.l.Lock() - defer cm.l.Unlock() _, ok := cm.m[key] + cm.l.Unlock() return ok } func (cm *CMap) Delete(key string) { cm.l.Lock() - defer cm.l.Unlock() delete(cm.m, key) + cm.l.Unlock() } func (cm *CMap) Size() int { cm.l.Lock() - defer cm.l.Unlock() - return len(cm.m) + size := len(cm.m) + cm.l.Unlock() + return size } func (cm *CMap) Clear() { cm.l.Lock() - defer cm.l.Unlock() cm.m = make(map[string]interface{}) + cm.l.Unlock() } func (cm *CMap) Keys() []string { cm.l.Lock() - defer cm.l.Unlock() keys := []string{} for k := range cm.m { keys = append(keys, k) } + cm.l.Unlock() return keys } func (cm *CMap) Values() []interface{} { cm.l.Lock() - defer cm.l.Unlock() items := []interface{}{} for _, v := range cm.m { items = append(items, v) } + cm.l.Unlock() return items } diff --git a/libs/common/cmap_test.go b/libs/common/cmap_test.go index c665a7f3ed6..33d9f047754 100644 --- a/libs/common/cmap_test.go +++ b/libs/common/cmap_test.go @@ -51,3 +51,14 @@ func TestContains(t *testing.T) { assert.False(t, cmap.Has("key2")) assert.Nil(t, cmap.Get("key2")) } + +func BenchmarkCMapHas(b *testing.B) { + m := NewCMap() + for i := 0; i < 1000; i++ { + m.Set(string(i), i) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Has(string(i)) + } +} diff --git a/libs/common/colors.go b/libs/common/colors.go index 049ce7a5056..4837f97b49a 100644 --- a/libs/common/colors.go +++ b/libs/common/colors.go @@ -88,7 +88,7 @@ func ColoredBytes(data []byte, textColor, bytesColor func(...interface{}) string if 0x21 <= b && b < 0x7F { s += textColor(string(b)) } else { - s += bytesColor(Fmt("%02X", b)) + s += bytesColor(fmt.Sprintf("%02X", b)) } } return s diff --git a/libs/common/errors.go b/libs/common/errors.go index 5c31b8968dd..1dc909e89a5 100644 --- a/libs/common/errors.go +++ b/libs/common/errors.go @@ -10,13 +10,13 @@ import ( func ErrorWrap(cause interface{}, format string, args ...interface{}) Error { if causeCmnError, ok := cause.(*cmnError); ok { - msg := Fmt(format, args...) + msg := fmt.Sprintf(format, args...) return causeCmnError.Stacktrace().Trace(1, msg) } else if cause == nil { return newCmnError(FmtError{format, args}).Stacktrace() } else { // NOTE: causeCmnError is a typed nil here. - msg := Fmt(format, args...) + msg := fmt.Sprintf(format, args...) return newCmnError(cause).Stacktrace().Trace(1, msg) } } @@ -98,7 +98,7 @@ func (err *cmnError) Stacktrace() Error { // Add tracing information with msg. // Set n=0 unless wrapped with some function, then n > 0. func (err *cmnError) Trace(offset int, format string, args ...interface{}) Error { - msg := Fmt(format, args...) + msg := fmt.Sprintf(format, args...) return err.doTrace(msg, offset) } @@ -221,7 +221,7 @@ func (fe FmtError) Format() string { // and some guarantee is not satisfied. // XXX DEPRECATED func PanicSanity(v interface{}) { - panic(Fmt("Panicked on a Sanity Check: %v", v)) + panic(fmt.Sprintf("Panicked on a Sanity Check: %v", v)) } // A panic here means something has gone horribly wrong, in the form of data corruption or @@ -229,18 +229,18 @@ func PanicSanity(v interface{}) { // If they do, it's indicative of a much more serious problem. // XXX DEPRECATED func PanicCrisis(v interface{}) { - panic(Fmt("Panicked on a Crisis: %v", v)) + panic(fmt.Sprintf("Panicked on a Crisis: %v", v)) } // Indicates a failure of consensus. Someone was malicious or something has // gone horribly wrong. These should really boot us into an "emergency-recover" mode // XXX DEPRECATED func PanicConsensus(v interface{}) { - panic(Fmt("Panicked on a Consensus Failure: %v", v)) + panic(fmt.Sprintf("Panicked on a Consensus Failure: %v", v)) } // For those times when we're not sure if we should panic // XXX DEPRECATED func PanicQ(v interface{}) { - panic(Fmt("Panicked questionably: %v", v)) + panic(fmt.Sprintf("Panicked questionably: %v", v)) } diff --git a/libs/common/math.go b/libs/common/math.go index b037d1a71ef..ae91f2058df 100644 --- a/libs/common/math.go +++ b/libs/common/math.go @@ -1,47 +1,5 @@ package common -func MaxInt8(a, b int8) int8 { - if a > b { - return a - } - return b -} - -func MaxUint8(a, b uint8) uint8 { - if a > b { - return a - } - return b -} - -func MaxInt16(a, b int16) int16 { - if a > b { - return a - } - return b -} - -func MaxUint16(a, b uint16) uint16 { - if a > b { - return a - } - return b -} - -func MaxInt32(a, b int32) int32 { - if a > b { - return a - } - return b -} - -func MaxUint32(a, b uint32) uint32 { - if a > b { - return a - } - return b -} - func MaxInt64(a, b int64) int64 { if a > b { return a @@ -49,13 +7,6 @@ func MaxInt64(a, b int64) int64 { return b } -func MaxUint64(a, b uint64) uint64 { - if a > b { - return a - } - return b -} - func MaxInt(a, b int) int { if a > b { return a @@ -63,57 +14,8 @@ func MaxInt(a, b int) int { return b } -func MaxUint(a, b uint) uint { - if a > b { - return a - } - return b -} - //----------------------------------------------------------------------------- -func MinInt8(a, b int8) int8 { - if a < b { - return a - } - return b -} - -func MinUint8(a, b uint8) uint8 { - if a < b { - return a - } - return b -} - -func MinInt16(a, b int16) int16 { - if a < b { - return a - } - return b -} - -func MinUint16(a, b uint16) uint16 { - if a < b { - return a - } - return b -} - -func MinInt32(a, b int32) int32 { - if a < b { - return a - } - return b -} - -func MinUint32(a, b uint32) uint32 { - if a < b { - return a - } - return b -} - func MinInt64(a, b int64) int64 { if a < b { return a @@ -121,37 +23,9 @@ func MinInt64(a, b int64) int64 { return b } -func MinUint64(a, b uint64) uint64 { - if a < b { - return a - } - return b -} - func MinInt(a, b int) int { if a < b { return a } return b } - -func MinUint(a, b uint) uint { - if a < b { - return a - } - return b -} - -//----------------------------------------------------------------------------- - -func ExpUint64(a, b uint64) uint64 { - accum := uint64(1) - for b > 0 { - if b&1 == 1 { - accum *= a - } - a *= a - b >>= 1 - } - return accum -} diff --git a/libs/common/os.go b/libs/common/os.go index b8419764e9f..501bb564008 100644 --- a/libs/common/os.go +++ b/libs/common/os.go @@ -106,7 +106,7 @@ func ReadFile(filePath string) ([]byte, error) { func MustReadFile(filePath string) []byte { fileBytes, err := ioutil.ReadFile(filePath) if err != nil { - Exit(Fmt("MustReadFile failed: %v", err)) + Exit(fmt.Sprintf("MustReadFile failed: %v", err)) return nil } return fileBytes @@ -119,7 +119,7 @@ func WriteFile(filePath string, contents []byte, mode os.FileMode) error { func MustWriteFile(filePath string, contents []byte, mode os.FileMode) { err := WriteFile(filePath, contents, mode) if err != nil { - Exit(Fmt("MustWriteFile failed: %v", err)) + Exit(fmt.Sprintf("MustWriteFile failed: %v", err)) } } diff --git a/libs/common/random.go b/libs/common/random.go index 4b0594d0e58..2de65945c58 100644 --- a/libs/common/random.go +++ b/libs/common/random.go @@ -295,7 +295,7 @@ func (r *Rand) Perm(n int) []int { // NOTE: This relies on the os's random number generator. // For real security, we should salt that with some seed. -// See github.com/tendermint/go-crypto for a more secure reader. +// See github.com/tendermint/tendermint/crypto for a more secure reader. func cRandBytes(numBytes int) []byte { b := make([]byte, numBytes) _, err := crand.Read(b) diff --git a/libs/common/service.go b/libs/common/service.go index b6f166e77cf..03e392855d7 100644 --- a/libs/common/service.go +++ b/libs/common/service.go @@ -123,10 +123,10 @@ func (bs *BaseService) SetLogger(l log.Logger) { func (bs *BaseService) Start() error { if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { if atomic.LoadUint32(&bs.stopped) == 1 { - bs.Logger.Error(Fmt("Not starting %v -- already stopped", bs.name), "impl", bs.impl) + bs.Logger.Error(fmt.Sprintf("Not starting %v -- already stopped", bs.name), "impl", bs.impl) return ErrAlreadyStopped } - bs.Logger.Info(Fmt("Starting %v", bs.name), "impl", bs.impl) + bs.Logger.Info(fmt.Sprintf("Starting %v", bs.name), "impl", bs.impl) err := bs.impl.OnStart() if err != nil { // revert flag @@ -135,7 +135,7 @@ func (bs *BaseService) Start() error { } return nil } - bs.Logger.Debug(Fmt("Not starting %v -- already started", bs.name), "impl", bs.impl) + bs.Logger.Debug(fmt.Sprintf("Not starting %v -- already started", bs.name), "impl", bs.impl) return ErrAlreadyStarted } @@ -148,12 +148,12 @@ func (bs *BaseService) OnStart() error { return nil } // channel. An error will be returned if the service is already stopped. func (bs *BaseService) Stop() error { if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { - bs.Logger.Info(Fmt("Stopping %v", bs.name), "impl", bs.impl) + bs.Logger.Info(fmt.Sprintf("Stopping %v", bs.name), "impl", bs.impl) bs.impl.OnStop() close(bs.quit) return nil } - bs.Logger.Debug(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) + bs.Logger.Debug(fmt.Sprintf("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) return ErrAlreadyStopped } @@ -166,7 +166,7 @@ func (bs *BaseService) OnStop() {} // will be returned if the service is running. func (bs *BaseService) Reset() error { if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { - bs.Logger.Debug(Fmt("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) + bs.Logger.Debug(fmt.Sprintf("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) return fmt.Errorf("can't reset running %s", bs.name) } diff --git a/libs/common/string.go b/libs/common/string.go index fac1be6c959..e341b49e8f3 100644 --- a/libs/common/string.go +++ b/libs/common/string.go @@ -6,14 +6,6 @@ import ( "strings" ) -// Like fmt.Sprintf, but skips formatting if args are empty. -var Fmt = func(format string, a ...interface{}) string { - if len(a) == 0 { - return format - } - return fmt.Sprintf(format, a...) -} - // IsHex returns true for non-empty hex-string prefixed with "0x" func IsHex(s string) bool { if len(s) > 2 && strings.EqualFold(s[:2], "0x") { diff --git a/libs/common/string_test.go b/libs/common/string_test.go index 5d1b68febc3..0fc677a91d3 100644 --- a/libs/common/string_test.go +++ b/libs/common/string_test.go @@ -31,25 +31,6 @@ func TestIsHex(t *testing.T) { } } -func TestSplitAndTrim(t *testing.T) { - testCases := []struct { - s string - sep string - cutset string - expected []string - }{ - {"a,b,c", ",", " ", []string{"a", "b", "c"}}, - {" a , b , c ", ",", " ", []string{"a", "b", "c"}}, - {" a, b, c ", ",", " ", []string{"a", "b", "c"}}, - {" , ", ",", " ", []string{"", ""}}, - {" ", ",", " ", []string{""}}, - } - - for _, tc := range testCases { - assert.Equal(t, tc.expected, SplitAndTrim(tc.s, tc.sep, tc.cutset), "%s", tc.s) - } -} - func TestIsASCIIText(t *testing.T) { notASCIIText := []string{ "", "\xC2", "\xC2\xA2", "\xFF", "\x80", "\xF0", "\n", "\t", diff --git a/libs/common/types.pb.go b/libs/common/types.pb.go index 6442daeb49a..9cd62273b3e 100644 --- a/libs/common/types.pb.go +++ b/libs/common/types.pb.go @@ -1,16 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: libs/common/types.proto -/* - Package common is a generated protocol buffer package. - - It is generated from these files: - libs/common/types.proto - - It has these top-level messages: - KVPair - KI64Pair -*/ //nolint package common @@ -38,14 +28,45 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // Define these here for compatibility but use tmlibs/common.KVPair. type KVPair struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *KVPair) Reset() { *m = KVPair{} } -func (m *KVPair) String() string { return proto.CompactTextString(m) } -func (*KVPair) ProtoMessage() {} -func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } +func (m *KVPair) Reset() { *m = KVPair{} } +func (m *KVPair) String() string { return proto.CompactTextString(m) } +func (*KVPair) ProtoMessage() {} +func (*KVPair) Descriptor() ([]byte, []int) { + return fileDescriptor_types_611b4364a8604338, []int{0} +} +func (m *KVPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KVPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KVPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *KVPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_KVPair.Merge(dst, src) +} +func (m *KVPair) XXX_Size() int { + return m.Size() +} +func (m *KVPair) XXX_DiscardUnknown() { + xxx_messageInfo_KVPair.DiscardUnknown(m) +} + +var xxx_messageInfo_KVPair proto.InternalMessageInfo func (m *KVPair) GetKey() []byte { if m != nil { @@ -63,14 +84,45 @@ func (m *KVPair) GetValue() []byte { // Define these here for compatibility but use tmlibs/common.KI64Pair. type KI64Pair struct { - Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *KI64Pair) Reset() { *m = KI64Pair{} } +func (m *KI64Pair) String() string { return proto.CompactTextString(m) } +func (*KI64Pair) ProtoMessage() {} +func (*KI64Pair) Descriptor() ([]byte, []int) { + return fileDescriptor_types_611b4364a8604338, []int{1} +} +func (m *KI64Pair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KI64Pair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_KI64Pair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *KI64Pair) XXX_Merge(src proto.Message) { + xxx_messageInfo_KI64Pair.Merge(dst, src) +} +func (m *KI64Pair) XXX_Size() int { + return m.Size() +} +func (m *KI64Pair) XXX_DiscardUnknown() { + xxx_messageInfo_KI64Pair.DiscardUnknown(m) } -func (m *KI64Pair) Reset() { *m = KI64Pair{} } -func (m *KI64Pair) String() string { return proto.CompactTextString(m) } -func (*KI64Pair) ProtoMessage() {} -func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } +var xxx_messageInfo_KI64Pair proto.InternalMessageInfo func (m *KI64Pair) GetKey() []byte { if m != nil { @@ -117,6 +169,9 @@ func (this *KVPair) Equal(that interface{}) bool { if !bytes.Equal(this.Value, that1.Value) { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *KI64Pair) Equal(that interface{}) bool { @@ -144,6 +199,9 @@ func (this *KI64Pair) Equal(that interface{}) bool { if this.Value != that1.Value { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (m *KVPair) Marshal() (dAtA []byte, err error) { @@ -173,6 +231,9 @@ func (m *KVPair) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -202,6 +263,9 @@ func (m *KI64Pair) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintTypes(dAtA, i, uint64(m.Value)) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -227,6 +291,7 @@ func NewPopulatedKVPair(r randyTypes, easy bool) *KVPair { this.Value[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } return this } @@ -243,6 +308,7 @@ func NewPopulatedKI64Pair(r randyTypes, easy bool) *KI64Pair { this.Value *= -1 } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } return this } @@ -330,6 +396,9 @@ func (m *KVPair) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -343,6 +412,9 @@ func (m *KI64Pair) Size() (n int) { if m.Value != 0 { n += 1 + sovTypes(uint64(m.Value)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -462,6 +534,7 @@ func (m *KVPair) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -562,6 +635,7 @@ func (m *KI64Pair) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -676,10 +750,12 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("libs/common/types.proto", fileDescriptorTypes) } -func init() { golang_proto.RegisterFile("libs/common/types.proto", fileDescriptorTypes) } +func init() { proto.RegisterFile("libs/common/types.proto", fileDescriptor_types_611b4364a8604338) } +func init() { + golang_proto.RegisterFile("libs/common/types.proto", fileDescriptor_types_611b4364a8604338) +} -var fileDescriptorTypes = []byte{ +var fileDescriptor_types_611b4364a8604338 = []byte{ // 174 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcf, 0xc9, 0x4c, 0x2a, 0xd6, 0x4f, 0xce, 0xcf, 0xcd, 0xcd, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, diff --git a/libs/common/typespb_test.go b/libs/common/typespb_test.go index 583c9050247..439cc1273ab 100644 --- a/libs/common/typespb_test.go +++ b/libs/common/typespb_test.go @@ -1,23 +1,14 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: libs/common/types.proto -/* -Package common is a generated protocol buffer package. - -It is generated from these files: - libs/common/types.proto - -It has these top-level messages: - KVPair - KI64Pair -*/ package common import testing "testing" -import rand "math/rand" +import math_rand "math/rand" import time "time" +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" +import github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" import proto "github.com/gogo/protobuf/proto" -import jsonpb "github.com/gogo/protobuf/jsonpb" import golang_proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" @@ -31,14 +22,14 @@ var _ = math.Inf func TestKVPairProto(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKVPair(popr, false) - dAtA, err := proto.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &KVPair{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } littlefuzz := make([]byte, len(dAtA)) @@ -56,13 +47,13 @@ func TestKVPairProto(t *testing.T) { littlefuzz = append(littlefuzz, byte(popr.Intn(256))) } // shouldn't panic - _ = proto.Unmarshal(littlefuzz, msg) + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) } } func TestKVPairMarshalTo(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKVPair(popr, false) size := p.Size() dAtA := make([]byte, size) @@ -74,7 +65,7 @@ func TestKVPairMarshalTo(t *testing.T) { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &KVPair{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } for i := range dAtA { @@ -87,14 +78,14 @@ func TestKVPairMarshalTo(t *testing.T) { func TestKI64PairProto(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKI64Pair(popr, false) - dAtA, err := proto.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &KI64Pair{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } littlefuzz := make([]byte, len(dAtA)) @@ -112,13 +103,13 @@ func TestKI64PairProto(t *testing.T) { littlefuzz = append(littlefuzz, byte(popr.Intn(256))) } // shouldn't panic - _ = proto.Unmarshal(littlefuzz, msg) + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) } } func TestKI64PairMarshalTo(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKI64Pair(popr, false) size := p.Size() dAtA := make([]byte, size) @@ -130,7 +121,7 @@ func TestKI64PairMarshalTo(t *testing.T) { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &KI64Pair{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } for i := range dAtA { @@ -143,15 +134,15 @@ func TestKI64PairMarshalTo(t *testing.T) { func TestKVPairJSON(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKVPair(popr, true) - marshaler := jsonpb.Marshaler{} + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &KVPair{} - err = jsonpb.UnmarshalString(jsondata, msg) + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -161,15 +152,15 @@ func TestKVPairJSON(t *testing.T) { } func TestKI64PairJSON(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKI64Pair(popr, true) - marshaler := jsonpb.Marshaler{} + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &KI64Pair{} - err = jsonpb.UnmarshalString(jsondata, msg) + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -179,11 +170,11 @@ func TestKI64PairJSON(t *testing.T) { } func TestKVPairProtoText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKVPair(popr, true) - dAtA := proto.MarshalTextString(p) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) msg := &KVPair{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -193,11 +184,11 @@ func TestKVPairProtoText(t *testing.T) { func TestKVPairProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKVPair(popr, true) - dAtA := proto.CompactTextString(p) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) msg := &KVPair{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -207,11 +198,11 @@ func TestKVPairProtoCompactText(t *testing.T) { func TestKI64PairProtoText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKI64Pair(popr, true) - dAtA := proto.MarshalTextString(p) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) msg := &KI64Pair{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -221,11 +212,11 @@ func TestKI64PairProtoText(t *testing.T) { func TestKI64PairProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKI64Pair(popr, true) - dAtA := proto.CompactTextString(p) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) msg := &KI64Pair{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -235,10 +226,10 @@ func TestKI64PairProtoCompactText(t *testing.T) { func TestKVPairSize(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKVPair(popr, true) - size2 := proto.Size(p) - dAtA, err := proto.Marshal(p) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -249,7 +240,7 @@ func TestKVPairSize(t *testing.T) { if size2 != size { t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) } - size3 := proto.Size(p) + size3 := github_com_gogo_protobuf_proto.Size(p) if size3 != size { t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) } @@ -257,10 +248,10 @@ func TestKVPairSize(t *testing.T) { func TestKI64PairSize(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedKI64Pair(popr, true) - size2 := proto.Size(p) - dAtA, err := proto.Marshal(p) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -271,7 +262,7 @@ func TestKI64PairSize(t *testing.T) { if size2 != size { t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) } - size3 := proto.Size(p) + size3 := github_com_gogo_protobuf_proto.Size(p) if size3 != size { t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) } diff --git a/libs/db/backend_test.go b/libs/db/backend_test.go index b31b4d74ad0..496f4c41013 100644 --- a/libs/db/backend_test.go +++ b/libs/db/backend_test.go @@ -54,7 +54,7 @@ func TestBackendsGetSetDelete(t *testing.T) { } func withDB(t *testing.T, creator dbCreator, fn func(DB)) { - name := cmn.Fmt("test_%x", cmn.RandStr(12)) + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) db, err := creator(name, "") defer cleanupDBDir("", name) assert.Nil(t, err) @@ -143,7 +143,7 @@ func TestBackendsNilKeys(t *testing.T) { } func TestGoLevelDBBackend(t *testing.T) { - name := cmn.Fmt("test_%x", cmn.RandStr(12)) + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) db := NewDB(name, GoLevelDBBackend, "") defer cleanupDBDir("", name) @@ -160,7 +160,7 @@ func TestDBIterator(t *testing.T) { } func testDBIterator(t *testing.T, backend DBBackendType) { - name := cmn.Fmt("test_%x", cmn.RandStr(12)) + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) db := NewDB(name, backend, "") defer cleanupDBDir("", name) diff --git a/libs/db/c_level_db_test.go b/libs/db/c_level_db_test.go index 2d30500dd07..d01a85e9d78 100644 --- a/libs/db/c_level_db_test.go +++ b/libs/db/c_level_db_test.go @@ -19,7 +19,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) { for i := 0; i < int(numItems); i++ { internal[int64(i)] = int64(0) } - db, err := NewCLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "") + db, err := NewCLevelDB(fmt.Sprintf("test_%x", cmn.RandStr(12)), "") if err != nil { b.Fatal(err.Error()) return @@ -87,7 +87,7 @@ func bytes2Int64(buf []byte) int64 { */ func TestCLevelDBBackend(t *testing.T) { - name := cmn.Fmt("test_%x", cmn.RandStr(12)) + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) db := NewDB(name, LevelDBBackend, "") defer cleanupDBDir("", name) diff --git a/libs/db/go_level_db.go b/libs/db/go_level_db.go index 349e447b2bf..8a48879218c 100644 --- a/libs/db/go_level_db.go +++ b/libs/db/go_level_db.go @@ -28,8 +28,12 @@ type GoLevelDB struct { } func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { + return NewGoLevelDBWithOpts(name, dir, nil) +} + +func NewGoLevelDBWithOpts(name string, dir string, o *opt.Options) (*GoLevelDB, error) { dbPath := filepath.Join(dir, name+".db") - db, err := leveldb.OpenFile(dbPath, nil) + db, err := leveldb.OpenFile(dbPath, o) if err != nil { return nil, err } diff --git a/libs/db/go_level_db_test.go b/libs/db/go_level_db_test.go index 47be216a652..2b234658896 100644 --- a/libs/db/go_level_db_test.go +++ b/libs/db/go_level_db_test.go @@ -6,9 +6,30 @@ import ( "fmt" "testing" + "github.com/syndtr/goleveldb/leveldb/opt" + + "github.com/stretchr/testify/require" cmn "github.com/tendermint/tendermint/libs/common" ) +func TestNewGoLevelDB(t *testing.T) { + name := fmt.Sprintf("test_%x", cmn.RandStr(12)) + // Test write locks + db, err := NewGoLevelDB(name, "") + require.Nil(t, err) + _, err = NewGoLevelDB(name, "") + require.NotNil(t, err) + db.Close() // Close the db to release the lock + + // Open the db twice in a row to test read-only locks + ro1, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true}) + defer ro1.Close() + require.Nil(t, err) + ro2, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true}) + defer ro2.Close() + require.Nil(t, err) +} + func BenchmarkRandomReadsWrites(b *testing.B) { b.StopTimer() @@ -17,7 +38,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) { for i := 0; i < int(numItems); i++ { internal[int64(i)] = int64(0) } - db, err := NewGoLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "") + db, err := NewGoLevelDB(fmt.Sprintf("test_%x", cmn.RandStr(12)), "") if err != nil { b.Fatal(err.Error()) return diff --git a/libs/log/tmfmt_logger.go b/libs/log/tmfmt_logger.go index d0397971858..de155fefab6 100644 --- a/libs/log/tmfmt_logger.go +++ b/libs/log/tmfmt_logger.go @@ -90,7 +90,7 @@ func (l tmfmtLogger) Log(keyvals ...interface{}) error { // D - first character of the level, uppercase (ASCII only) // [05-02|11:06:44.322] - our time format (see https://golang.org/src/time/format.go) // Stopping ... - message - enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().UTC().Format("01-02|15:04:05.000"), msg)) + enc.buf.WriteString(fmt.Sprintf("%c[%s] %-44s ", lvl[0]-32, time.Now().Format("01-02|15:04:05.000"), msg)) if module != unknown { enc.buf.WriteString("module=" + module + " ") diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 4c0d97e2ffc..c104439f8bc 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -291,7 +291,8 @@ loop: } func (state *state) add(clientID string, q Query, ch chan<- interface{}) { - // add query if needed + + // initialize clientToChannelMap per query if needed if _, ok := state.queries[q]; !ok { state.queries[q] = make(map[string]chan<- interface{}) } diff --git a/lite/base_verifier.go b/lite/base_verifier.go new file mode 100644 index 00000000000..e60d3953a98 --- /dev/null +++ b/lite/base_verifier.go @@ -0,0 +1,72 @@ +package lite + +import ( + "bytes" + + cmn "github.com/tendermint/tendermint/libs/common" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" +) + +var _ Verifier = (*BaseVerifier)(nil) + +// BaseVerifier lets us check the validity of SignedHeaders at height or +// later, requiring sufficient votes (> 2/3) from the given valset. +// To certify blocks produced by a blockchain with mutable validator sets, +// use the DynamicVerifier. +// TODO: Handle unbonding time. +type BaseVerifier struct { + chainID string + height int64 + valset *types.ValidatorSet +} + +// NewBaseVerifier returns a new Verifier initialized with a validator set at +// some height. +func NewBaseVerifier(chainID string, height int64, valset *types.ValidatorSet) *BaseVerifier { + if valset.IsNilOrEmpty() { + panic("NewBaseVerifier requires a valid valset") + } + return &BaseVerifier{ + chainID: chainID, + height: height, + valset: valset, + } +} + +// Implements Verifier. +func (bc *BaseVerifier) ChainID() string { + return bc.chainID +} + +// Implements Verifier. +func (bc *BaseVerifier) Certify(signedHeader types.SignedHeader) error { + + // We can't certify commits older than bc.height. + if signedHeader.Height < bc.height { + return cmn.NewError("BaseVerifier height is %v, cannot certify height %v", + bc.height, signedHeader.Height) + } + + // We can't certify with the wrong validator set. + if !bytes.Equal(signedHeader.ValidatorsHash, + bc.valset.Hash()) { + return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bc.valset.Hash()) + } + + // Do basic sanity checks. + err := signedHeader.ValidateBasic(bc.chainID) + if err != nil { + return cmn.ErrorWrap(err, "in certify") + } + + // Check commit signatures. + err = bc.valset.VerifyCommit( + bc.chainID, signedHeader.Commit.BlockID, + signedHeader.Height, signedHeader.Commit) + if err != nil { + return cmn.ErrorWrap(err, "in certify") + } + + return nil +} diff --git a/lite/base_verifier_test.go b/lite/base_verifier_test.go new file mode 100644 index 00000000000..dab7885f678 --- /dev/null +++ b/lite/base_verifier_test.go @@ -0,0 +1,56 @@ +package lite + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" +) + +func TestBaseCert(t *testing.T) { + assert := assert.New(t) + + keys := genPrivKeys(4) + // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! + vals := keys.ToValidators(20, 10) + // and a Verifier based on our known set + chainID := "test-static" + cert := NewBaseVerifier(chainID, 2, vals) + + cases := []struct { + keys privKeys + vals *types.ValidatorSet + height int64 + first, last int // who actually signs + proper bool // true -> expect no error + changed bool // true -> expect validator change error + }{ + // height regression + {keys, vals, 1, 0, len(keys), false, false}, + // perfect, signed by everyone + {keys, vals, 2, 0, len(keys), true, false}, + // skip little guy is okay + {keys, vals, 3, 1, len(keys), true, false}, + // but not the big guy + {keys, vals, 4, 0, len(keys) - 1, false, false}, + // Changing the power a little bit breaks the static validator. + // The sigs are enough, but the validator hash is unknown. + {keys, keys.ToValidators(20, 11), 5, 0, len(keys), false, true}, + } + + for _, tc := range cases { + sh := tc.keys.GenSignedHeader(chainID, tc.height, nil, tc.vals, tc.vals, + []byte("foo"), []byte("params"), []byte("results"), tc.first, tc.last) + err := cert.Certify(sh) + if tc.proper { + assert.Nil(err, "%+v", err) + } else { + assert.NotNil(err) + if tc.changed { + assert.True(lerr.IsErrUnexpectedValidators(err), "%+v", err) + } + } + } +} diff --git a/lite/client/main_test.go b/lite/client/main_test.go deleted file mode 100644 index 49b19436685..00000000000 --- a/lite/client/main_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package client_test - -import ( - "os" - "testing" - - "github.com/tendermint/tendermint/abci/example/kvstore" - - nm "github.com/tendermint/tendermint/node" - rpctest "github.com/tendermint/tendermint/rpc/test" -) - -var node *nm.Node - -func TestMain(m *testing.M) { - // start a tendermint node (and merkleeyes) in the background to test against - app := kvstore.NewKVStoreApplication() - node = rpctest.StartTendermint(app) - code := m.Run() - - // and shut down proper at the end - node.Stop() - node.Wait() - os.Exit(code) -} diff --git a/lite/client/provider.go b/lite/client/provider.go index 5f3d724500a..e0c0a331b29 100644 --- a/lite/client/provider.go +++ b/lite/client/provider.go @@ -1,19 +1,19 @@ /* Package client defines a provider that uses a rpcclient to get information, which is used to get new headers -and validators directly from a node. +and validators directly from a Tendermint client. */ package client import ( - "bytes" + "fmt" + log "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/lite" + lerr "github.com/tendermint/tendermint/lite/errors" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" ) // SignStatusClient combines a SignClient and StatusClient. @@ -23,119 +23,112 @@ type SignStatusClient interface { } type provider struct { - node SignStatusClient - lastHeight int64 + logger log.Logger + chainID string + client SignStatusClient } -// NewProvider can wrap any rpcclient to expose it as -// a read-only provider. -func NewProvider(node SignStatusClient) lite.Provider { - return &provider{node: node} +// NewProvider implements Provider (but not PersistentProvider). +func NewProvider(chainID string, client SignStatusClient) lite.Provider { + return &provider{ + logger: log.NewNopLogger(), + chainID: chainID, + client: client, + } } // NewHTTPProvider can connect to a tendermint json-rpc endpoint // at the given url, and uses that as a read-only provider. -func NewHTTPProvider(remote string) lite.Provider { - return &provider{ - node: rpcclient.NewHTTP(remote, "/websocket"), - } +func NewHTTPProvider(chainID, remote string) lite.Provider { + return NewProvider(chainID, rpcclient.NewHTTP(remote, "/websocket")) } -// StatusClient returns the internal node as a StatusClient -func (p *provider) StatusClient() rpcclient.StatusClient { - return p.node +// Implements Provider. +func (p *provider) SetLogger(logger log.Logger) { + logger = logger.With("module", "lite/client") + p.logger = logger } -// StoreCommit is a noop, as clients can only read from the chain... -func (p *provider) StoreCommit(_ lite.FullCommit) error { return nil } +// StatusClient returns the internal client as a StatusClient +func (p *provider) StatusClient() rpcclient.StatusClient { + return p.client +} -// GetHash gets the most recent validator and sees if it matches -// -// TODO: improve when the rpc interface supports more functionality -func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { - var fc lite.FullCommit - vals, err := p.node.Validators(nil) - // if we get no validators, or a different height, return an error - if err != nil { - return fc, err +// LatestFullCommit implements Provider. +func (p *provider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc lite.FullCommit, err error) { + if chainID != p.chainID { + err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID) + return } - p.updateHeight(vals.BlockHeight) - vhash := types.NewValidatorSet(vals.Validators).Hash() - if !bytes.Equal(hash, vhash) { - return fc, liteErr.ErrCommitNotFound() + if maxHeight != 0 && maxHeight < minHeight { + err = fmt.Errorf("need maxHeight == 0 or minHeight <= maxHeight, got min %v and max %v", + minHeight, maxHeight) + return } - return p.seedFromVals(vals) -} - -// GetByHeight gets the validator set by height -func (p *provider) GetByHeight(h int64) (fc lite.FullCommit, err error) { - commit, err := p.node.Commit(&h) + commit, err := p.fetchLatestCommit(minHeight, maxHeight) if err != nil { - return fc, err + return } - return p.seedFromCommit(commit) + fc, err = p.fillFullCommit(commit.SignedHeader) + return } -// LatestCommit returns the newest commit stored. -func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { - commit, err := p.GetLatestCommit() +// fetchLatestCommit fetches the latest commit from the client. +func (p *provider) fetchLatestCommit(minHeight int64, maxHeight int64) (*ctypes.ResultCommit, error) { + status, err := p.client.Status() if err != nil { - return fc, err + return nil, err } - return p.seedFromCommit(commit) -} - -// GetLatestCommit should return the most recent commit there is, -// which handles queries for future heights as per the semantics -// of GetByHeight. -func (p *provider) GetLatestCommit() (*ctypes.ResultCommit, error) { - status, err := p.node.Status() - if err != nil { + if status.SyncInfo.LatestBlockHeight < minHeight { + err = fmt.Errorf("provider is at %v but require minHeight=%v", + status.SyncInfo.LatestBlockHeight, minHeight) return nil, err } - return p.node.Commit(&status.SyncInfo.LatestBlockHeight) + if maxHeight == 0 { + maxHeight = status.SyncInfo.LatestBlockHeight + } else if status.SyncInfo.LatestBlockHeight < maxHeight { + maxHeight = status.SyncInfo.LatestBlockHeight + } + return p.client.Commit(&maxHeight) } -// CommitFromResult ... -func CommitFromResult(result *ctypes.ResultCommit) lite.Commit { - return (lite.Commit)(result.SignedHeader) +// Implements Provider. +func (p *provider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + return p.getValidatorSet(chainID, height) } -func (p *provider) seedFromVals(vals *ctypes.ResultValidators) (lite.FullCommit, error) { - // now get the commits and build a full commit - commit, err := p.node.Commit(&vals.BlockHeight) +func (p *provider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + if chainID != p.chainID { + err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID) + return + } + if height < 1 { + err = fmt.Errorf("expected height >= 1, got height %v", height) + return + } + res, err := p.client.Validators(&height) if err != nil { - return lite.FullCommit{}, err + // TODO pass through other types of errors. + return nil, lerr.ErrUnknownValidators(chainID, height) } - fc := lite.NewFullCommit( - CommitFromResult(commit), - types.NewValidatorSet(vals.Validators), - ) - return fc, nil + valset = types.NewValidatorSet(res.Validators) + return } -func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc lite.FullCommit, err error) { - fc.Commit = CommitFromResult(commit) +// This does no validation. +func (p *provider) fillFullCommit(signedHeader types.SignedHeader) (fc lite.FullCommit, err error) { - // now get the proper validators - vals, err := p.node.Validators(&commit.Header.Height) + // Get the validators. + valset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height) if err != nil { - return fc, err + return lite.FullCommit{}, err } - // make sure they match the commit (as we cannot enforce height) - vset := types.NewValidatorSet(vals.Validators) - if !bytes.Equal(vset.Hash(), commit.Header.ValidatorsHash) { - return fc, liteErr.ErrValidatorsChanged() + // Get the next validators. + nextValset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height+1) + if err != nil { + return lite.FullCommit{}, err } - p.updateHeight(commit.Header.Height) - fc.Validators = vset - return fc, nil -} - -func (p *provider) updateHeight(h int64) { - if h > p.lastHeight { - p.lastHeight = h - } + return lite.NewFullCommit(signedHeader, valset, nextValset), nil } diff --git a/lite/client/provider_test.go b/lite/client/provider_test.go index 94d47da3f36..d8704a52ec5 100644 --- a/lite/client/provider_test.go +++ b/lite/client/provider_test.go @@ -1,63 +1,61 @@ package client import ( + "os" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/abci/example/kvstore" rpcclient "github.com/tendermint/tendermint/rpc/client" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" ) +func TestMain(m *testing.M) { + app := kvstore.NewKVStoreApplication() + node := rpctest.StartTendermint(app) + + code := m.Run() + + node.Stop() + node.Wait() + os.Exit(code) +} + func TestProvider(t *testing.T) { assert, require := assert.New(t), require.New(t) cfg := rpctest.GetConfig() rpcAddr := cfg.RPC.ListenAddress - genDoc, _ := types.GenesisDocFromFile(cfg.GenesisFile()) + genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) + if err != nil { + panic(err) + } chainID := genDoc.ChainID - p := NewHTTPProvider(rpcAddr) + t.Log("chainID:", chainID) + p := NewHTTPProvider(chainID, rpcAddr) require.NotNil(t, p) // let it produce some blocks - err := rpcclient.WaitForHeight(p.(*provider).node, 6, nil) + err = rpcclient.WaitForHeight(p.(*provider).client, 6, nil) require.Nil(err) // let's get the highest block - seed, err := p.LatestCommit() + fc, err := p.LatestFullCommit(chainID, 1, 1<<63-1) require.Nil(err, "%+v", err) - sh := seed.Height() - vhash := seed.Header.ValidatorsHash + sh := fc.Height() assert.True(sh < 5000) // let's check this is valid somehow - assert.Nil(seed.ValidateBasic(chainID)) - cert := lite.NewStaticCertifier(chainID, seed.Validators) + assert.Nil(fc.ValidateFull(chainID)) // historical queries now work :) lower := sh - 5 - seed, err = p.GetByHeight(lower) + fc, err = p.LatestFullCommit(chainID, lower, lower) assert.Nil(err, "%+v", err) - assert.Equal(lower, seed.Height()) + assert.Equal(lower, fc.Height()) - // also get by hash (given the match) - seed, err = p.GetByHash(vhash) - require.Nil(err, "%+v", err) - require.Equal(vhash, seed.Header.ValidatorsHash) - err = cert.Certify(seed.Commit) - assert.Nil(err, "%+v", err) - - // get by hash fails without match - seed, err = p.GetByHash([]byte("foobar")) - assert.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - // storing the seed silently ignored - err = p.StoreCommit(seed) - assert.Nil(err, "%+v", err) } diff --git a/lite/commit.go b/lite/commit.go index 11ae6d7ffac..25efb8dc088 100644 --- a/lite/commit.go +++ b/lite/commit.go @@ -2,98 +2,86 @@ package lite import ( "bytes" - - "github.com/pkg/errors" + "errors" + "fmt" "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" ) -// Certifier checks the votes to make sure the block really is signed properly. -// Certifier must know the current set of validitors by some other means. -type Certifier interface { - Certify(check Commit) error - ChainID() string -} - -// Commit is basically the rpc /commit response, but extended -// -// This is the basepoint for proving anything on the blockchain. It contains -// a signed header. If the signatures are valid and > 2/3 of the known set, -// we can store this checkpoint and use it to prove any number of aspects of -// the system: such as txs, abci state, validator sets, etc... -type Commit types.SignedHeader - -// FullCommit is a commit and the actual validator set, -// the base info you need to update to a given point, -// assuming knowledge of some previous validator set +// FullCommit is a signed header (the block header and a commit that signs it), +// the validator set which signed the commit, and the next validator set. The +// next validator set (which is proven from the block header) allows us to +// revert to block-by-block updating of lite Verifier's latest validator set, +// even in the face of arbitrarily large power changes. type FullCommit struct { - Commit `json:"commit"` - Validators *types.ValidatorSet `json:"validator_set"` + SignedHeader types.SignedHeader `json:"signed_header"` + Validators *types.ValidatorSet `json:"validator_set"` + NextValidators *types.ValidatorSet `json:"next_validator_set"` } // NewFullCommit returns a new FullCommit. -func NewFullCommit(commit Commit, vals *types.ValidatorSet) FullCommit { +func NewFullCommit(signedHeader types.SignedHeader, valset, nextValset *types.ValidatorSet) FullCommit { return FullCommit{ - Commit: commit, - Validators: vals, + SignedHeader: signedHeader, + Validators: valset, + NextValidators: nextValset, } } -// Height returns the height of the header. -func (c Commit) Height() int64 { - if c.Header == nil { - return 0 +// Validate the components and check for consistency. +// This also checks to make sure that Validators actually +// signed the SignedHeader.Commit. +// If > 2/3 did not sign the Commit from fc.Validators, it +// is not a valid commit! +func (fc FullCommit) ValidateFull(chainID string) error { + // Ensure that Validators exists and matches the header. + if fc.Validators.Size() == 0 { + return errors.New("need FullCommit.Validators") } - return c.Header.Height -} - -// ValidatorsHash returns the hash of the validator set. -func (c Commit) ValidatorsHash() []byte { - if c.Header == nil { - return nil + if !bytes.Equal( + fc.SignedHeader.ValidatorsHash, + fc.Validators.Hash()) { + return fmt.Errorf("header has vhash %X but valset hash is %X", + fc.SignedHeader.ValidatorsHash, + fc.Validators.Hash(), + ) } - return c.Header.ValidatorsHash -} - -// ValidateBasic does basic consistency checks and makes sure the headers -// and commits are all consistent and refer to our chain. -// -// Make sure to use a Verifier to validate the signatures actually provide -// a significantly strong proof for this header's validity. -func (c Commit) ValidateBasic(chainID string) error { - // make sure the header is reasonable - if c.Header == nil { - return errors.New("Commit missing header") + // Ensure that NextValidators exists and matches the header. + if fc.NextValidators.Size() == 0 { + return errors.New("need FullCommit.NextValidators") } - if c.Header.ChainID != chainID { - return errors.Errorf("Header belongs to another chain '%s' not '%s'", - c.Header.ChainID, chainID) + if !bytes.Equal( + fc.SignedHeader.NextValidatorsHash, + fc.NextValidators.Hash()) { + return fmt.Errorf("header has next vhash %X but next valset hash is %X", + fc.SignedHeader.NextValidatorsHash, + fc.NextValidators.Hash(), + ) } - - if c.Commit == nil { - return errors.New("Commit missing signatures") + // Validate the header. + err := fc.SignedHeader.ValidateBasic(chainID) + if err != nil { + return err } + // Validate the signatures on the commit. + hdr, cmt := fc.SignedHeader.Header, fc.SignedHeader.Commit + return fc.Validators.VerifyCommit( + hdr.ChainID, cmt.BlockID, + hdr.Height, cmt) +} - // make sure the header and commit match (height and hash) - if c.Commit.Height() != c.Header.Height { - return liteErr.ErrHeightMismatch(c.Commit.Height(), c.Header.Height) - } - hhash := c.Header.Hash() - chash := c.Commit.BlockID.Hash - if !bytes.Equal(hhash, chash) { - return errors.Errorf("Commits sign block %X header is block %X", - chash, hhash) +// Height returns the height of the header. +func (fc FullCommit) Height() int64 { + if fc.SignedHeader.Header == nil { + panic("should not happen") } + return fc.SignedHeader.Height +} - // make sure the commit is reasonable - err := c.Commit.ValidateBasic() - if err != nil { - return errors.WithStack(err) +// ChainID returns the chainID of the header. +func (fc FullCommit) ChainID() string { + if fc.SignedHeader.Header == nil { + panic("should not happen") } - - // looks good, we just need to make sure the signatures are really from - // empowered validators - return nil + return fc.SignedHeader.ChainID } diff --git a/lite/dbprovider.go b/lite/dbprovider.go new file mode 100644 index 00000000000..cab695b4a9d --- /dev/null +++ b/lite/dbprovider.go @@ -0,0 +1,268 @@ +package lite + +import ( + "fmt" + "regexp" + "strconv" + + amino "github.com/tendermint/go-amino" + cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino" + dbm "github.com/tendermint/tendermint/libs/db" + log "github.com/tendermint/tendermint/libs/log" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" +) + +type DBProvider struct { + logger log.Logger + label string + db dbm.DB + cdc *amino.Codec + limit int +} + +func NewDBProvider(label string, db dbm.DB) *DBProvider { + + // NOTE: when debugging, this type of construction might be useful. + //db = dbm.NewDebugDB("db provider "+cmn.RandStr(4), db) + + cdc := amino.NewCodec() + cryptoAmino.RegisterAmino(cdc) + dbp := &DBProvider{ + logger: log.NewNopLogger(), + label: label, + db: db, + cdc: cdc, + } + return dbp +} + +func (dbp *DBProvider) SetLogger(logger log.Logger) { + dbp.logger = logger.With("label", dbp.label) +} + +func (dbp *DBProvider) SetLimit(limit int) *DBProvider { + dbp.limit = limit + return dbp +} + +// Implements PersistentProvider. +func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { + + dbp.logger.Info("DBProvider.SaveFullCommit()...", "fc", fc) + batch := dbp.db.NewBatch() + + // Save the fc.validators. + // We might be overwriting what we already have, but + // it makes the logic easier for now. + vsKey := validatorSetKey(fc.ChainID(), fc.Height()) + vsBz, err := dbp.cdc.MarshalBinary(fc.Validators) + if err != nil { + return err + } + batch.Set(vsKey, vsBz) + + // Save the fc.NextValidators. + nvsKey := validatorSetKey(fc.ChainID(), fc.Height()+1) + nvsBz, err := dbp.cdc.MarshalBinary(fc.NextValidators) + if err != nil { + return err + } + batch.Set(nvsKey, nvsBz) + + // Save the fc.SignedHeader + shKey := signedHeaderKey(fc.ChainID(), fc.Height()) + shBz, err := dbp.cdc.MarshalBinary(fc.SignedHeader) + if err != nil { + return err + } + batch.Set(shKey, shBz) + + // And write sync. + batch.WriteSync() + + // Garbage collect. + // TODO: optimize later. + if dbp.limit > 0 { + dbp.deleteAfterN(fc.ChainID(), dbp.limit) + } + + return nil +} + +// Implements Provider. +func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) ( + FullCommit, error) { + + dbp.logger.Info("DBProvider.LatestFullCommit()...", + "chainID", chainID, "minHeight", minHeight, "maxHeight", maxHeight) + + if minHeight <= 0 { + minHeight = 1 + } + if maxHeight == 0 { + maxHeight = 1<<63 - 1 + } + + itr := dbp.db.ReverseIterator( + signedHeaderKey(chainID, maxHeight), + signedHeaderKey(chainID, minHeight-1), + ) + defer itr.Close() + + for itr.Valid() { + key := itr.Key() + _, _, ok := parseSignedHeaderKey(key) + if !ok { + // Skip over other keys. + itr.Next() + continue + } else { + // Found the latest full commit signed header. + shBz := itr.Value() + sh := types.SignedHeader{} + err := dbp.cdc.UnmarshalBinary(shBz, &sh) + if err != nil { + return FullCommit{}, err + } else { + lfc, err := dbp.fillFullCommit(sh) + if err == nil { + dbp.logger.Info("DBProvider.LatestFullCommit() found latest.", "height", lfc.Height()) + return lfc, nil + } else { + dbp.logger.Error("DBProvider.LatestFullCommit() got error", "lfc", lfc) + dbp.logger.Error(fmt.Sprintf("%+v", err)) + return lfc, err + } + } + } + } + return FullCommit{}, lerr.ErrCommitNotFound() +} + +func (dbp *DBProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + return dbp.getValidatorSet(chainID, height) +} + +func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + vsBz := dbp.db.Get(validatorSetKey(chainID, height)) + if vsBz == nil { + err = lerr.ErrUnknownValidators(chainID, height) + return + } + err = dbp.cdc.UnmarshalBinary(vsBz, &valset) + if err != nil { + return + } + + // To test deep equality. This makes it easier to test for e.g. valset + // equivalence using assert.Equal (tests for deep equality) in our tests, + // which also tests for unexported/private field equivalence. + valset.TotalVotingPower() + + return +} + +func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error) { + var chainID = sh.ChainID + var height = sh.Height + var valset, nextValset *types.ValidatorSet + // Load the validator set. + valset, err := dbp.getValidatorSet(chainID, height) + if err != nil { + return FullCommit{}, err + } + // Load the next validator set. + nextValset, err = dbp.getValidatorSet(chainID, height+1) + if err != nil { + return FullCommit{}, err + } + // Return filled FullCommit. + return FullCommit{ + SignedHeader: sh, + Validators: valset, + NextValidators: nextValset, + }, nil +} + +func (dbp *DBProvider) deleteAfterN(chainID string, after int) error { + + dbp.logger.Info("DBProvider.deleteAfterN()...", "chainID", chainID, "after", after) + + itr := dbp.db.ReverseIterator( + signedHeaderKey(chainID, 1<<63-1), + signedHeaderKey(chainID, 0), + ) + defer itr.Close() + + var lastHeight int64 = 1<<63 - 1 + var numSeen = 0 + var numDeleted = 0 + + for itr.Valid() { + key := itr.Key() + _, height, ok := parseChainKeyPrefix(key) + if !ok { + return fmt.Errorf("unexpected key %v", key) + } else { + if height < lastHeight { + lastHeight = height + numSeen += 1 + } + if numSeen > after { + dbp.db.Delete(key) + numDeleted += 1 + } + } + itr.Next() + } + + dbp.logger.Info(fmt.Sprintf("DBProvider.deleteAfterN() deleted %v items", numDeleted)) + return nil +} + +//---------------------------------------- +// key encoding + +func signedHeaderKey(chainID string, height int64) []byte { + return []byte(fmt.Sprintf("%s/%010d/sh", chainID, height)) +} + +func validatorSetKey(chainID string, height int64) []byte { + return []byte(fmt.Sprintf("%s/%010d/vs", chainID, height)) +} + +//---------------------------------------- +// key parsing + +var keyPattern = regexp.MustCompile(`^([^/]+)/([0-9]*)/(.*)$`) + +func parseKey(key []byte) (chainID string, height int64, part string, ok bool) { + submatch := keyPattern.FindSubmatch(key) + if submatch == nil { + return "", 0, "", false + } + chainID = string(submatch[1]) + heightStr := string(submatch[2]) + heightInt, err := strconv.Atoi(heightStr) + if err != nil { + return "", 0, "", false + } + height = int64(heightInt) + part = string(submatch[3]) + ok = true // good! + return +} + +func parseSignedHeaderKey(key []byte) (chainID string, height int64, ok bool) { + chainID, height, part, ok := parseKey(key) + if part != "sh" { + return "", 0, false + } + return chainID, height, true +} + +func parseChainKeyPrefix(key []byte) (chainID string, height int64, ok bool) { + chainID, height, _, ok = parseKey(key) + return chainID, height, true +} diff --git a/lite/doc.go b/lite/doc.go index 89dc702fcf9..59f7705674b 100644 --- a/lite/doc.go +++ b/lite/doc.go @@ -1,133 +1,140 @@ /* -Package lite allows you to securely validate headers -without a full node. +Package lite allows you to securely validate headers without a full node. -This library pulls together all the crypto and algorithms, -so given a relatively recent (< unbonding period) known -validator set, one can get indisputable proof that data is in -the chain (current state) or detect if the node is lying to -the client. +This library pulls together all the crypto and algorithms, so given a +relatively recent (< unbonding period) known validator set, one can get +indisputable proof that data is in the chain (current state) or detect if the +node is lying to the client. -Tendermint RPC exposes a lot of info, but a malicious node -could return any data it wants to queries, or even to block -headers, even making up fake signatures from non-existent -validators to justify it. This is a lot of logic to get -right, to be contained in a small, easy to use library, -that does this for you, so you can just build nice UI. +Tendermint RPC exposes a lot of info, but a malicious node could return any +data it wants to queries, or even to block headers, even making up fake +signatures from non-existent validators to justify it. This is a lot of logic +to get right, to be contained in a small, easy to use library, that does this +for you, so you can just build nice applications. -We design for clients who have no strong trust relationship -with any tendermint node, just the validator set as a whole. -Beyond building nice mobile or desktop applications, the -cosmos hub is another important example of a client, -that needs undeniable proof without syncing the full chain, -in order to efficiently implement IBC. +We design for clients who have no strong trust relationship with any Tendermint +node, just the blockchain and validator set as a whole. -Commits +# Data structures -There are two main data structures that we pass around - Commit -and FullCommit. Both of them mirror what information is -exposed in tendermint rpc. +## SignedHeader -Commit is a block header along with enough validator signatures -to prove its validity (> 2/3 of the voting power). A FullCommit -is a Commit along with the full validator set. When the -validator set doesn't change, the Commit is enough, but since -the block header only has a hash, we need the FullCommit to -follow any changes to the validator set. +SignedHeader is a block header along with a commit -- enough validator +precommit-vote signatures to prove its validity (> 2/3 of the voting power) +given the validator set responsible for signing that header. A FullCommit is a +SignedHeader along with the current and next validator sets. -Certifiers +The hash of the next validator set is included and signed in the SignedHeader. +This lets the lite client keep track of arbitrary changes to the validator set, +as every change to the validator set must be approved by inclusion in the +header and signed in the commit. -A Certifier validates a new Commit given the currently known -state. There are three different types of Certifiers exposed, -each one building on the last one, with additional complexity. +In the worst case, with every block changing the validators around completely, +a lite client can sync up with every block header to verify each validator set +change on the chain. In practice, most applications will not have frequent +drastic updates to the validator set, so the logic defined in this package for +lite client syncing is optimized to use intelligent bisection and +block-skipping for efficient sourcing and verification of these data structures +and updates to the validator set (see the DynamicVerifier for more +information). -Static - given the validator set upon initialization. Verifies -all signatures against that set and if the validator set -changes, it will reject all headers. +The FullCommit is also declared in this package as a convenience structure, +which includes the SignedHeader along with the full current and next +ValidatorSets. -Dynamic - This wraps Static and has the same Certify -method. However, it adds an Update method, which can be called -with a FullCommit when the validator set changes. If it can -prove this is a valid transition, it will update the validator -set. +## Verifier -Inquiring - this wraps Dynamic and implements an auto-update -strategy on top of the Dynamic update. If a call to -Certify fails as the validator set has changed, then it -attempts to find a FullCommit and Update to that header. -To get these FullCommits, it makes use of a Provider. +A Verifier validates a new SignedHeader given the currently known state. There +are two different types of Verifiers provided. -Providers +BaseVerifier - given a validator set and a height, this Verifier verifies +that > 2/3 of the voting power of the given validator set had signed the +SignedHeader, and that the SignedHeader was to be signed by the exact given +validator set, and that the height of the commit is at least height (or +greater). -A Provider allows us to store and retrieve the FullCommits, -to provide memory to the Inquiring Certifier. +SignedHeader.Commit may be signed by a different validator set, it can get +certified with a BaseVerifier as long as sufficient signatures from the +previous validator set are present in the commit. -NewMemStoreProvider - in-memory cache. +DynamicVerifier - this Verifier implements an auto-update and persistence +strategy to certify any SignedHeader of the blockchain. -files.NewProvider - disk backed storage. +## Provider and PersistentProvider -client.NewHTTPProvider - query tendermint rpc. +A Provider allows us to store and retrieve the FullCommits. -NewCacheProvider - combine multiple providers. +```go +type Provider interface { + // LatestFullCommit returns the latest commit with + // minHeight <= height <= maxHeight. + // If maxHeight is zero, returns the latest where + // minHeight <= height. + LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) +} +``` -The suggested use for local light clients is -client.NewHTTPProvider for getting new data (Source), -and NewCacheProvider(NewMemStoreProvider(), -files.NewProvider()) to store confirmed headers (Trusted) +* client.NewHTTPProvider - query Tendermint rpc. -How We Track Validators +A PersistentProvider is a Provider that also allows for saving state. This is +used by the DynamicVerifier for persistence. -Unless you want to blindly trust the node you talk with, you -need to trace every response back to a hash in a block header -and validate the commit signatures of that block header match -the proper validator set. If there is a contant validator -set, you store it locally upon initialization of the client, +```go +type PersistentProvider interface { + Provider + + // SaveFullCommit saves a FullCommit (without verification). + SaveFullCommit(fc FullCommit) error +} +``` + +* DBProvider - persistence provider for use with any tmlibs/DB. +* MultiProvider - combine multiple providers. + +The suggested use for local light clients is client.NewHTTPProvider(...) for +getting new data (Source), and NewMultiProvider(NewDBProvider("label", +dbm.NewMemDB()), NewDBProvider("label", db.NewFileDB(...))) to store confirmed +full commits (Trusted) + + +# How We Track Validators + +Unless you want to blindly trust the node you talk with, you need to trace +every response back to a hash in a block header and validate the commit +signatures of that block header match the proper validator set. If there is a +static validator set, you store it locally upon initialization of the client, and check against that every time. -Once there is a dynamic validator set, the issue of -verifying a block becomes a bit more tricky. There is -background information in a -github issue (https://github.com/tendermint/tendermint/issues/377). - -In short, if there is a block at height H with a known -(trusted) validator set V, and another block at height H' -(H' > H) with validator set V' != V, then we want a way to -safely update it. - -First, get the new (unconfirmed) validator set V' and -verify H' is internally consistent and properly signed by -this V'. Assuming it is a valid block, we check that at -least 2/3 of the validators in V also signed it, meaning -it would also be valid under our old assumptions. -That should be enough, but we can also check that the -V counts for at least 2/3 of the total votes in H' -for extra safety (we can have a discussion if this is -strictly required). If we can verify all this, -then we can accept H' and V' as valid and use that to -validate all blocks X > H'. - -If we cannot update directly from H -> H' because there was -too much change to the validator set, then we can look for -some Hm (H < Hm < H') with a validator set Vm. Then we try -to update H -> Hm and Hm -> H' in two separate steps. -If one of these steps doesn't work, then we continue -bisecting, until we eventually have to externally -validate the valdiator set changes at every block. - -Since we never trust any server in this protocol, only the -signatures themselves, it doesn't matter if the seed comes -from a (possibly malicious) node or a (possibly malicious) user. -We can accept it or reject it based only on our trusted -validator set and cryptographic proofs. This makes it -extremely important to verify that you have the proper -validator set when initializing the client, as that is the -root of all trust. - -Or course, this assumes that the known block is within the -unbonding period to avoid the "nothing at stake" problem. -If you haven't seen the state in a few months, you will need -to manually verify the new validator set hash using off-chain -means (the same as getting the initial hash). +If the validator set for the blockchain is dynamic, verifying block commits is +a bit more involved -- if there is a block at height H with a known (trusted) +validator set V, and another block at height H' (H' > H) with validator set V' +!= V, then we want a way to safely update it. + +First, we get the new (unconfirmed) validator set V' and verify that H' is +internally consistent and properly signed by this V'. Assuming it is a valid +block, we check that at least 2/3 of the validators in V also signed it, +meaning it would also be valid under our old assumptions. Then, we accept H' +and V' as valid and trusted and use that to validate for heights X > H' until a +more recent and updated validator set is found. + +If we cannot update directly from H -> H' because there was too much change to +the validator set, then we can look for some Hm (H < Hm < H') with a validator +set Vm. Then we try to update H -> Hm and then Hm -> H' in two steps. If one +of these steps doesn't work, then we continue bisecting, until we eventually +have to externally validate the valdiator set changes at every block. + +Since we never trust any server in this protocol, only the signatures +themselves, it doesn't matter if the seed comes from a (possibly malicious) +node or a (possibly malicious) user. We can accept it or reject it based only +on our trusted validator set and cryptographic proofs. This makes it extremely +important to verify that you have the proper validator set when initializing +the client, as that is the root of all trust. + +The software currently assumes that the unbonding period is infinite in +duration. If the DynamicVerifier hasn't been updated in a while, you should +manually verify the block headers using other sources. + +TODO: Update the software to handle cases around the unbonding period. */ package lite diff --git a/lite/dynamic_certifier.go b/lite/dynamic_certifier.go deleted file mode 100644 index 0ddace8b6d5..00000000000 --- a/lite/dynamic_certifier.go +++ /dev/null @@ -1,96 +0,0 @@ -package lite - -import ( - "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -var _ Certifier = (*DynamicCertifier)(nil) - -// DynamicCertifier uses a StaticCertifier for Certify, but adds an -// Update method to allow for a change of validators. -// -// You can pass in a FullCommit with another validator set, -// and if this is a provably secure transition (< 1/3 change, -// sufficient signatures), then it will update the -// validator set for the next Certify call. -// For security, it will only follow validator set changes -// going forward. -type DynamicCertifier struct { - cert *StaticCertifier - lastHeight int64 -} - -// NewDynamic returns a new dynamic certifier. -func NewDynamicCertifier(chainID string, vals *types.ValidatorSet, height int64) *DynamicCertifier { - return &DynamicCertifier{ - cert: NewStaticCertifier(chainID, vals), - lastHeight: height, - } -} - -// ChainID returns the chain id of this certifier. -// Implements Certifier. -func (dc *DynamicCertifier) ChainID() string { - return dc.cert.ChainID() -} - -// Validators returns the validators of this certifier. -func (dc *DynamicCertifier) Validators() *types.ValidatorSet { - return dc.cert.vSet -} - -// Hash returns the hash of this certifier. -func (dc *DynamicCertifier) Hash() []byte { - return dc.cert.Hash() -} - -// LastHeight returns the last height of this certifier. -func (dc *DynamicCertifier) LastHeight() int64 { - return dc.lastHeight -} - -// Certify will verify whether the commit is valid and will update the height if it is or return an -// error if it is not. -// Implements Certifier. -func (dc *DynamicCertifier) Certify(check Commit) error { - err := dc.cert.Certify(check) - if err == nil { - // update last seen height if input is valid - dc.lastHeight = check.Height() - } - return err -} - -// Update will verify if this is a valid change and update -// the certifying validator set if safe to do so. -// -// Returns an error if update is impossible (invalid proof or IsTooMuchChangeErr) -func (dc *DynamicCertifier) Update(fc FullCommit) error { - // ignore all checkpoints in the past -> only to the future - h := fc.Height() - if h <= dc.lastHeight { - return liteErr.ErrPastTime() - } - - // first, verify if the input is self-consistent.... - err := fc.ValidateBasic(dc.ChainID()) - if err != nil { - return err - } - - // now, make sure not too much change... meaning this commit - // would be approved by the currently known validator set - // as well as the new set - commit := fc.Commit.Commit - err = dc.Validators().VerifyCommitAny(fc.Validators, dc.ChainID(), commit.BlockID, h, commit) - if err != nil { - return liteErr.ErrTooMuchChange() - } - - // looks good, we can update - dc.cert = NewStaticCertifier(dc.ChainID(), fc.Validators) - dc.lastHeight = h - return nil -} diff --git a/lite/dynamic_certifier_test.go b/lite/dynamic_certifier_test.go deleted file mode 100644 index 88c145f958e..00000000000 --- a/lite/dynamic_certifier_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package lite_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/lite" - "github.com/tendermint/tendermint/lite/errors" -) - -// TestDynamicCert just makes sure it still works like StaticCert -func TestDynamicCert(t *testing.T) { - // assert, require := assert.New(t), require.New(t) - assert := assert.New(t) - // require := require.New(t) - - keys := lite.GenValKeys(4) - // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals := keys.ToValidators(20, 10) - // and a certifier based on our known set - chainID := "test-dyno" - cert := lite.NewDynamicCertifier(chainID, vals, 0) - - cases := []struct { - keys lite.ValKeys - vals *types.ValidatorSet - height int64 - first, last int // who actually signs - proper bool // true -> expect no error - changed bool // true -> expect validator change error - }{ - // perfect, signed by everyone - {keys, vals, 1, 0, len(keys), true, false}, - // skip little guy is okay - {keys, vals, 2, 1, len(keys), true, false}, - // but not the big guy - {keys, vals, 3, 0, len(keys) - 1, false, false}, - // even changing the power a little bit breaks the static validator - // the sigs are enough, but the validator hash is unknown - {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, - } - - for _, tc := range cases { - check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, - []byte("bar"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Certify(check) - if tc.proper { - assert.Nil(err, "%+v", err) - assert.Equal(cert.LastHeight(), tc.height) - } else { - assert.NotNil(err) - if tc.changed { - assert.True(errors.IsValidatorsChangedErr(err), "%+v", err) - } - } - } -} - -// TestDynamicUpdate makes sure we update safely and sanely -func TestDynamicUpdate(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - chainID := "test-dyno-up" - keys := lite.GenValKeys(5) - vals := keys.ToValidators(20, 0) - cert := lite.NewDynamicCertifier(chainID, vals, 40) - - // one valid block to give us a sense of time - h := int64(100) - good := keys.GenCommit(chainID, h, nil, vals, []byte("foo"), []byte("params"), []byte("results"), 0, len(keys)) - err := cert.Certify(good) - require.Nil(err, "%+v", err) - - // some new sets to try later - keys2 := keys.Extend(2) - keys3 := keys2.Extend(4) - - // we try to update with some blocks - cases := []struct { - keys lite.ValKeys - vals *types.ValidatorSet - height int64 - first, last int // who actually signs - proper bool // true -> expect no error - changed bool // true -> expect too much change error - }{ - // same validator set, well signed, of course it is okay - {keys, vals, h + 10, 0, len(keys), true, false}, - // same validator set, poorly signed, fails - {keys, vals, h + 20, 2, len(keys), false, false}, - - // shift the power a little, works if properly signed - {keys, keys.ToValidators(10, 0), h + 30, 1, len(keys), true, false}, - // but not on a poor signature - {keys, keys.ToValidators(10, 0), h + 40, 2, len(keys), false, false}, - // and not if it was in the past - {keys, keys.ToValidators(10, 0), h + 25, 0, len(keys), false, false}, - - // let's try to adjust to a whole new validator set (we have 5/7 of the votes) - {keys2, keys2.ToValidators(10, 0), h + 33, 0, len(keys2), true, false}, - - // properly signed but too much change, not allowed (only 7/11 validators known) - {keys3, keys3.ToValidators(10, 0), h + 50, 0, len(keys3), false, true}, - } - - for _, tc := range cases { - fc := tc.keys.GenFullCommit(chainID, tc.height, nil, tc.vals, - []byte("bar"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Update(fc) - if tc.proper { - assert.Nil(err, "%d: %+v", tc.height, err) - // we update last seen height - assert.Equal(cert.LastHeight(), tc.height) - // and we update the proper validators - assert.EqualValues(fc.Header.ValidatorsHash, cert.Hash()) - } else { - assert.NotNil(err, "%d", tc.height) - // we don't update the height - assert.NotEqual(cert.LastHeight(), tc.height) - if tc.changed { - assert.True(errors.IsTooMuchChangeErr(err), - "%d: %+v", tc.height, err) - } - } - } -} diff --git a/lite/dynamic_verifier.go b/lite/dynamic_verifier.go new file mode 100644 index 00000000000..3d1a70f271b --- /dev/null +++ b/lite/dynamic_verifier.go @@ -0,0 +1,216 @@ +package lite + +import ( + "bytes" + + log "github.com/tendermint/tendermint/libs/log" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" +) + +var _ Verifier = (*DynamicVerifier)(nil) + +// DynamicVerifier implements an auto-updating Verifier. It uses a +// "source" provider to obtain the needed FullCommits to securely sync with +// validator set changes. It stores properly validated data on the +// "trusted" local system. +type DynamicVerifier struct { + logger log.Logger + chainID string + // These are only properly validated data, from local system. + trusted PersistentProvider + // This is a source of new info, like a node rpc, or other import method. + source Provider +} + +// NewDynamicVerifier returns a new DynamicVerifier. It uses the +// trusted provider to store validated data and the source provider to +// obtain missing data (e.g. FullCommits). +// +// The trusted provider should a CacheProvider, MemProvider or +// files.Provider. The source provider should be a client.HTTPProvider. +func NewDynamicVerifier(chainID string, trusted PersistentProvider, source Provider) *DynamicVerifier { + return &DynamicVerifier{ + logger: log.NewNopLogger(), + chainID: chainID, + trusted: trusted, + source: source, + } +} + +func (ic *DynamicVerifier) SetLogger(logger log.Logger) { + logger = logger.With("module", "lite") + ic.logger = logger + ic.trusted.SetLogger(logger) + ic.source.SetLogger(logger) +} + +// Implements Verifier. +func (ic *DynamicVerifier) ChainID() string { + return ic.chainID +} + +// Implements Verifier. +// +// If the validators have changed since the last known time, it looks to +// ic.trusted and ic.source to prove the new validators. On success, it will +// try to store the SignedHeader in ic.trusted if the next +// validator can be sourced. +func (ic *DynamicVerifier) Certify(shdr types.SignedHeader) error { + + // Get the latest known full commit <= h-1 from our trusted providers. + // The full commit at h-1 contains the valset to sign for h. + h := shdr.Height - 1 + trustedFC, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h) + if err != nil { + return err + } + + if trustedFC.Height() == h { + // Return error if valset doesn't match. + if !bytes.Equal( + trustedFC.NextValidators.Hash(), + shdr.Header.ValidatorsHash) { + return lerr.ErrUnexpectedValidators( + trustedFC.NextValidators.Hash(), + shdr.Header.ValidatorsHash) + } + } else { + // If valset doesn't match... + if !bytes.Equal(trustedFC.NextValidators.Hash(), + shdr.Header.ValidatorsHash) { + // ... update. + trustedFC, err = ic.updateToHeight(h) + if err != nil { + return err + } + // Return error if valset _still_ doesn't match. + if !bytes.Equal(trustedFC.NextValidators.Hash(), + shdr.Header.ValidatorsHash) { + return lerr.ErrUnexpectedValidators( + trustedFC.NextValidators.Hash(), + shdr.Header.ValidatorsHash) + } + } + } + + // Certify the signed header using the matching valset. + cert := NewBaseVerifier(ic.chainID, trustedFC.Height()+1, trustedFC.NextValidators) + err = cert.Certify(shdr) + if err != nil { + return err + } + + // Get the next validator set. + nextValset, err := ic.source.ValidatorSet(ic.chainID, shdr.Height+1) + if lerr.IsErrUnknownValidators(err) { + // Ignore this error. + return nil + } else if err != nil { + return err + } + + // Create filled FullCommit. + nfc := FullCommit{ + SignedHeader: shdr, + Validators: trustedFC.NextValidators, + NextValidators: nextValset, + } + // Validate the full commit. This checks the cryptographic + // signatures of Commit against Validators. + if err := nfc.ValidateFull(ic.chainID); err != nil { + return err + } + // Trust it. + return ic.trusted.SaveFullCommit(nfc) +} + +// verifyAndSave will verify if this is a valid source full commit given the +// best match trusted full commit, and if good, persist to ic.trusted. +// Returns ErrTooMuchChange when >2/3 of trustedFC did not sign sourceFC. +// Panics if trustedFC.Height() >= sourceFC.Height(). +func (ic *DynamicVerifier) verifyAndSave(trustedFC, sourceFC FullCommit) error { + if trustedFC.Height() >= sourceFC.Height() { + panic("should not happen") + } + err := trustedFC.NextValidators.VerifyFutureCommit( + sourceFC.Validators, + ic.chainID, sourceFC.SignedHeader.Commit.BlockID, + sourceFC.SignedHeader.Height, sourceFC.SignedHeader.Commit, + ) + if err != nil { + return err + } + + return ic.trusted.SaveFullCommit(sourceFC) +} + +// updateToHeight will use divide-and-conquer to find a path to h. +// Returns nil error iff we successfully verify and persist a full commit +// for height h, using repeated applications of bisection if necessary. +// +// Returns ErrCommitNotFound if source provider doesn't have the commit for h. +func (ic *DynamicVerifier) updateToHeight(h int64) (FullCommit, error) { + + // Fetch latest full commit from source. + sourceFC, err := ic.source.LatestFullCommit(ic.chainID, h, h) + if err != nil { + return FullCommit{}, err + } + + // Validate the full commit. This checks the cryptographic + // signatures of Commit against Validators. + if err := sourceFC.ValidateFull(ic.chainID); err != nil { + return FullCommit{}, err + } + + // If sourceFC.Height() != h, we can't do it. + if sourceFC.Height() != h { + return FullCommit{}, lerr.ErrCommitNotFound() + } + +FOR_LOOP: + for { + // Fetch latest full commit from trusted. + trustedFC, err := ic.trusted.LatestFullCommit(ic.chainID, 1, h) + if err != nil { + return FullCommit{}, err + } + // We have nothing to do. + if trustedFC.Height() == h { + return trustedFC, nil + } + + // Try to update to full commit with checks. + err = ic.verifyAndSave(trustedFC, sourceFC) + if err == nil { + // All good! + return sourceFC, nil + } + + // Handle special case when err is ErrTooMuchChange. + if lerr.IsErrTooMuchChange(err) { + // Divide and conquer. + start, end := trustedFC.Height(), sourceFC.Height() + if !(start < end) { + panic("should not happen") + } + mid := (start + end) / 2 + _, err = ic.updateToHeight(mid) + if err != nil { + return FullCommit{}, err + } + // If we made it to mid, we retry. + continue FOR_LOOP + } + return FullCommit{}, err + } +} + +func (ic *DynamicVerifier) LastTrustedHeight() int64 { + fc, err := ic.trusted.LatestFullCommit(ic.chainID, 1, 1<<63-1) + if err != nil { + panic("should not happen") + } + return fc.Height() +} diff --git a/lite/dynamic_verifier_test.go b/lite/dynamic_verifier_test.go new file mode 100644 index 00000000000..74e2d55a9c3 --- /dev/null +++ b/lite/dynamic_verifier_test.go @@ -0,0 +1,153 @@ +package lite + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + dbm "github.com/tendermint/tendermint/libs/db" + log "github.com/tendermint/tendermint/libs/log" +) + +func TestInquirerValidPath(t *testing.T) { + assert, require := assert.New(t), require.New(t) + trust := NewDBProvider("trust", dbm.NewMemDB()) + source := NewDBProvider("source", dbm.NewMemDB()) + + // Set up the validators to generate test blocks. + var vote int64 = 10 + keys := genPrivKeys(5) + nkeys := keys.Extend(1) + + // Construct a bunch of commits, each with one more height than the last. + chainID := "inquiry-test" + consHash := []byte("params") + resHash := []byte("results") + count := 50 + fcz := make([]FullCommit, count) + for i := 0; i < count; i++ { + vals := keys.ToValidators(vote, 0) + nextVals := nkeys.ToValidators(vote, 0) + h := int64(1 + i) + appHash := []byte(fmt.Sprintf("h=%d", h)) + fcz[i] = keys.GenFullCommit( + chainID, h, nil, + vals, nextVals, + appHash, consHash, resHash, 0, len(keys)) + // Extend the keys by 1 each time. + keys = nkeys + nkeys = nkeys.Extend(1) + } + + // Initialize a Verifier with the initial state. + err := trust.SaveFullCommit(fcz[0]) + require.Nil(err) + cert := NewDynamicVerifier(chainID, trust, source) + cert.SetLogger(log.TestingLogger()) + + // This should fail validation: + sh := fcz[count-1].SignedHeader + err = cert.Certify(sh) + require.NotNil(err) + + // Adding a few commits in the middle should be insufficient. + for i := 10; i < 13; i++ { + err := source.SaveFullCommit(fcz[i]) + require.Nil(err) + } + err = cert.Certify(sh) + assert.NotNil(err) + + // With more info, we succeed. + for i := 0; i < count; i++ { + err := source.SaveFullCommit(fcz[i]) + require.Nil(err) + } + err = cert.Certify(sh) + assert.Nil(err, "%+v", err) +} + +func TestInquirerVerifyHistorical(t *testing.T) { + assert, require := assert.New(t), require.New(t) + trust := NewDBProvider("trust", dbm.NewMemDB()) + source := NewDBProvider("source", dbm.NewMemDB()) + + // Set up the validators to generate test blocks. + var vote int64 = 10 + keys := genPrivKeys(5) + nkeys := keys.Extend(1) + + // Construct a bunch of commits, each with one more height than the last. + chainID := "inquiry-test" + count := 10 + consHash := []byte("special-params") + fcz := make([]FullCommit, count) + for i := 0; i < count; i++ { + vals := keys.ToValidators(vote, 0) + nextVals := nkeys.ToValidators(vote, 0) + h := int64(1 + i) + appHash := []byte(fmt.Sprintf("h=%d", h)) + resHash := []byte(fmt.Sprintf("res=%d", h)) + fcz[i] = keys.GenFullCommit( + chainID, h, nil, + vals, nextVals, + appHash, consHash, resHash, 0, len(keys)) + // Extend the keys by 1 each time. + keys = nkeys + nkeys = nkeys.Extend(1) + } + + // Initialize a Verifier with the initial state. + err := trust.SaveFullCommit(fcz[0]) + require.Nil(err) + cert := NewDynamicVerifier(chainID, trust, source) + cert.SetLogger(log.TestingLogger()) + + // Store a few full commits as trust. + for _, i := range []int{2, 5} { + trust.SaveFullCommit(fcz[i]) + } + + // See if we can jump forward using trusted full commits. + // Souce doesn't have fcz[9] so cert.LastTrustedHeight wont' change. + err = source.SaveFullCommit(fcz[7]) + require.Nil(err, "%+v", err) + sh := fcz[8].SignedHeader + err = cert.Certify(sh) + require.Nil(err, "%+v", err) + assert.Equal(fcz[7].Height(), cert.LastTrustedHeight()) + fc_, err := trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) + require.NotNil(err, "%+v", err) + assert.Equal(fc_, (FullCommit{})) + + // With fcz[9] Certify will update last trusted height. + err = source.SaveFullCommit(fcz[9]) + require.Nil(err, "%+v", err) + sh = fcz[8].SignedHeader + err = cert.Certify(sh) + require.Nil(err, "%+v", err) + assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) + fc_, err = trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) + require.Nil(err, "%+v", err) + assert.Equal(fc_.Height(), fcz[8].Height()) + + // Add access to all full commits via untrusted source. + for i := 0; i < count; i++ { + err := source.SaveFullCommit(fcz[i]) + require.Nil(err) + } + + // Try to check an unknown seed in the past. + sh = fcz[3].SignedHeader + err = cert.Certify(sh) + require.Nil(err, "%+v", err) + assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) + + // Jump all the way forward again. + sh = fcz[count-1].SignedHeader + err = cert.Certify(sh) + require.Nil(err, "%+v", err) + assert.Equal(fcz[9].Height(), cert.LastTrustedHeight()) +} diff --git a/lite/errors/errors.go b/lite/errors/errors.go index 99e42a0bdd3..61426b234a9 100644 --- a/lite/errors/errors.go +++ b/lite/errors/errors.go @@ -3,90 +3,110 @@ package errors import ( "fmt" - "github.com/pkg/errors" + cmn "github.com/tendermint/tendermint/libs/common" ) -var ( - errValidatorsChanged = fmt.Errorf("Validators differ between header and certifier") - errCommitNotFound = fmt.Errorf("Commit not found by provider") - errTooMuchChange = fmt.Errorf("Validators change too much to safely update") - errPastTime = fmt.Errorf("Update older than certifier height") - errNoPathFound = fmt.Errorf("Cannot find a path of validators") -) +//---------------------------------------- +// Error types + +type errCommitNotFound struct{} -// IsCommitNotFoundErr checks whether an error is due to missing data -func IsCommitNotFoundErr(err error) bool { - return err != nil && (errors.Cause(err) == errCommitNotFound) +func (e errCommitNotFound) Error() string { + return "Commit not found by provider" } -// ErrCommitNotFound indicates that a the requested commit was not found. -func ErrCommitNotFound() error { - return errors.WithStack(errCommitNotFound) +type errUnexpectedValidators struct { + got []byte + want []byte } -// IsValidatorsChangedErr checks whether an error is due -// to a differing validator set. -func IsValidatorsChangedErr(err error) bool { - return err != nil && (errors.Cause(err) == errValidatorsChanged) +func (e errUnexpectedValidators) Error() string { + return fmt.Sprintf("Validator set is different. Got %X want %X", + e.got, e.want) } -// ErrValidatorsChanged indicates that the validator set was changed between two commits. -func ErrValidatorsChanged() error { - return errors.WithStack(errValidatorsChanged) +type errTooMuchChange struct{} + +func (e errTooMuchChange) Error() string { + return "Insufficient signatures to validate due to valset changes" } -// IsTooMuchChangeErr checks whether an error is due to too much change -// between these validators sets. -func IsTooMuchChangeErr(err error) bool { - return err != nil && (errors.Cause(err) == errTooMuchChange) +type errUnknownValidators struct { + chainID string + height int64 } -// ErrTooMuchChange indicates that the underlying validator set was changed by >1/3. -func ErrTooMuchChange() error { - return errors.WithStack(errTooMuchChange) +func (e errUnknownValidators) Error() string { + return fmt.Sprintf("Validators are unknown or missing for chain %s and height %d", + e.chainID, e.height) } -// IsPastTimeErr ... -func IsPastTimeErr(err error) bool { - return err != nil && (errors.Cause(err) == errPastTime) +//---------------------------------------- +// Methods for above error types + +//----------------- +// ErrCommitNotFound + +// ErrCommitNotFound indicates that a the requested commit was not found. +func ErrCommitNotFound() error { + return cmn.ErrorWrap(errCommitNotFound{}, "") } -// ErrPastTime ... -func ErrPastTime() error { - return errors.WithStack(errPastTime) +func IsErrCommitNotFound(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errCommitNotFound) + return ok + } + return false } -// IsNoPathFoundErr checks whether an error is due to no path of -// validators in provider from where we are to where we want to be -func IsNoPathFoundErr(err error) bool { - return err != nil && (errors.Cause(err) == errNoPathFound) +//----------------- +// ErrUnexpectedValidators + +// ErrUnexpectedValidators indicates a validator set mismatch. +func ErrUnexpectedValidators(got, want []byte) error { + return cmn.ErrorWrap(errUnexpectedValidators{ + got: got, + want: want, + }, "") } -// ErrNoPathFound ... -func ErrNoPathFound() error { - return errors.WithStack(errNoPathFound) +func IsErrUnexpectedValidators(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errUnexpectedValidators) + return ok + } + return false } -//-------------------------------------------- +//----------------- +// ErrTooMuchChange -type errHeightMismatch struct { - h1, h2 int64 +// ErrTooMuchChange indicates that the underlying validator set was changed by >1/3. +func ErrTooMuchChange() error { + return cmn.ErrorWrap(errTooMuchChange{}, "") } -func (e errHeightMismatch) Error() string { - return fmt.Sprintf("Blocks don't match - %d vs %d", e.h1, e.h2) +func IsErrTooMuchChange(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errTooMuchChange) + return ok + } + return false } -// IsHeightMismatchErr checks whether an error is due to data from different blocks -func IsHeightMismatchErr(err error) bool { - if err == nil { - return false - } - _, ok := errors.Cause(err).(errHeightMismatch) - return ok +//----------------- +// ErrUnknownValidators + +// ErrUnknownValidators indicates that some validator set was missing or unknown. +func ErrUnknownValidators(chainID string, height int64) error { + return cmn.ErrorWrap(errUnknownValidators{chainID, height}, "") } -// ErrHeightMismatch returns an mismatch error with stack-trace -func ErrHeightMismatch(h1, h2 int64) error { - return errors.WithStack(errHeightMismatch{h1, h2}) +func IsErrUnknownValidators(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errUnknownValidators) + return ok + } + return false } diff --git a/lite/errors/errors_test.go b/lite/errors/errors_test.go deleted file mode 100644 index 479215e476a..00000000000 --- a/lite/errors/errors_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package errors - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestErrorHeight(t *testing.T) { - e1 := ErrHeightMismatch(2, 3) - e1.Error() - assert.True(t, IsHeightMismatchErr(e1)) - - e2 := errors.New("foobar") - assert.False(t, IsHeightMismatchErr(e2)) - assert.False(t, IsHeightMismatchErr(nil)) -} diff --git a/lite/files/commit.go b/lite/files/commit.go deleted file mode 100644 index 8a7e4721e12..00000000000 --- a/lite/files/commit.go +++ /dev/null @@ -1,93 +0,0 @@ -package files - -import ( - "io/ioutil" - "os" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -const ( - // MaxFullCommitSize is the maximum number of bytes we will - // read in for a full commit to avoid excessive allocations - // in the deserializer - MaxFullCommitSize = 1024 * 1024 -) - -// SaveFullCommit exports the seed in binary / go-amino style -func SaveFullCommit(fc lite.FullCommit, path string) error { - f, err := os.Create(path) - if err != nil { - return errors.WithStack(err) - } - defer f.Close() - - _, err = cdc.MarshalBinaryWriter(f, fc) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -// SaveFullCommitJSON exports the seed in a json format -func SaveFullCommitJSON(fc lite.FullCommit, path string) error { - f, err := os.Create(path) - if err != nil { - return errors.WithStack(err) - } - defer f.Close() - bz, err := cdc.MarshalJSON(fc) - if err != nil { - return errors.WithStack(err) - } - _, err = f.Write(bz) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -// LoadFullCommit loads the full commit from the file system. -func LoadFullCommit(path string) (lite.FullCommit, error) { - var fc lite.FullCommit - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return fc, liteErr.ErrCommitNotFound() - } - return fc, errors.WithStack(err) - } - defer f.Close() - - _, err = cdc.UnmarshalBinaryReader(f, &fc, 0) - if err != nil { - return fc, errors.WithStack(err) - } - return fc, nil -} - -// LoadFullCommitJSON loads the commit from the file system in JSON format. -func LoadFullCommitJSON(path string) (lite.FullCommit, error) { - var fc lite.FullCommit - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return fc, liteErr.ErrCommitNotFound() - } - return fc, errors.WithStack(err) - } - defer f.Close() - - bz, err := ioutil.ReadAll(f) - if err != nil { - return fc, errors.WithStack(err) - } - err = cdc.UnmarshalJSON(bz, &fc) - if err != nil { - return fc, errors.WithStack(err) - } - return fc, nil -} diff --git a/lite/files/commit_test.go b/lite/files/commit_test.go deleted file mode 100644 index 2891e58091f..00000000000 --- a/lite/files/commit_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package files - -import ( - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - cmn "github.com/tendermint/tendermint/libs/common" - - "github.com/tendermint/tendermint/lite" -) - -func tmpFile() string { - suffix := cmn.RandStr(16) - return filepath.Join(os.TempDir(), "fc-test-"+suffix) -} - -func TestSerializeFullCommits(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // some constants - appHash := []byte("some crazy thing") - chainID := "ser-ial" - h := int64(25) - - // build a fc - keys := lite.GenValKeys(5) - vals := keys.ToValidators(10, 0) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - - require.Equal(h, fc.Height()) - require.Equal(vals.Hash(), fc.ValidatorsHash()) - - // try read/write with json - jfile := tmpFile() - defer os.Remove(jfile) - jseed, err := LoadFullCommitJSON(jfile) - assert.NotNil(err) - err = SaveFullCommitJSON(fc, jfile) - require.Nil(err) - jseed, err = LoadFullCommitJSON(jfile) - assert.Nil(err, "%+v", err) - assert.Equal(h, jseed.Height()) - assert.Equal(vals.Hash(), jseed.ValidatorsHash()) - - // try read/write with binary - bfile := tmpFile() - defer os.Remove(bfile) - bseed, err := LoadFullCommit(bfile) - assert.NotNil(err) - err = SaveFullCommit(fc, bfile) - require.Nil(err) - bseed, err = LoadFullCommit(bfile) - assert.Nil(err, "%+v", err) - assert.Equal(h, bseed.Height()) - assert.Equal(vals.Hash(), bseed.ValidatorsHash()) - - // make sure they don't read the other format (different) - _, err = LoadFullCommit(jfile) - assert.NotNil(err) - _, err = LoadFullCommitJSON(bfile) - assert.NotNil(err) -} diff --git a/lite/files/provider.go b/lite/files/provider.go deleted file mode 100644 index 327b0331afc..00000000000 --- a/lite/files/provider.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Package files defines a Provider that stores all data in the filesystem - -We assume the same validator hash may be reused by many different -headers/Commits, and thus store it separately. This leaves us -with three issues: - - 1. Given a validator hash, retrieve the validator set if previously stored - 2. Given a block height, find the Commit with the highest height <= h - 3. Given a FullCommit, store it quickly to satisfy 1 and 2 - -Note that we do not worry about caching, as that can be achieved by -pairing this with a MemStoreProvider and CacheProvider from certifiers -*/ -package files - -import ( - "encoding/hex" - "fmt" - "math" - "os" - "path/filepath" - "sort" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -// nolint -const ( - Ext = ".tsd" - ValDir = "validators" - CheckDir = "checkpoints" - dirPerm = os.FileMode(0755) - //filePerm = os.FileMode(0644) -) - -type provider struct { - valDir string - checkDir string -} - -// NewProvider creates the parent dir and subdirs -// for validators and checkpoints as needed -func NewProvider(dir string) lite.Provider { - valDir := filepath.Join(dir, ValDir) - checkDir := filepath.Join(dir, CheckDir) - for _, d := range []string{valDir, checkDir} { - err := os.MkdirAll(d, dirPerm) - if err != nil { - panic(err) - } - } - return &provider{valDir: valDir, checkDir: checkDir} -} - -func (p *provider) encodeHash(hash []byte) string { - return hex.EncodeToString(hash) + Ext -} - -func (p *provider) encodeHeight(h int64) string { - // pad up to 10^12 for height... - return fmt.Sprintf("%012d%s", h, Ext) -} - -// StoreCommit saves a full commit after it has been verified. -func (p *provider) StoreCommit(fc lite.FullCommit) error { - // make sure the fc is self-consistent before saving - err := fc.ValidateBasic(fc.Commit.Header.ChainID) - if err != nil { - return err - } - - paths := []string{ - filepath.Join(p.checkDir, p.encodeHeight(fc.Height())), - filepath.Join(p.valDir, p.encodeHash(fc.Header.ValidatorsHash)), - } - for _, path := range paths { - err := SaveFullCommit(fc, path) - // unknown error in creating or writing immediately breaks - if err != nil { - return err - } - } - return nil -} - -// GetByHeight returns the closest commit with height <= h. -func (p *provider) GetByHeight(h int64) (lite.FullCommit, error) { - // first we look for exact match, then search... - path := filepath.Join(p.checkDir, p.encodeHeight(h)) - fc, err := LoadFullCommit(path) - if liteErr.IsCommitNotFoundErr(err) { - path, err = p.searchForHeight(h) - if err == nil { - fc, err = LoadFullCommit(path) - } - } - return fc, err -} - -// LatestCommit returns the newest commit stored. -func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { - // Note to future: please update by 2077 to avoid rollover - return p.GetByHeight(math.MaxInt32 - 1) -} - -// search for height, looks for a file with highest height < h -// return certifiers.ErrCommitNotFound() if not there... -func (p *provider) searchForHeight(h int64) (string, error) { - d, err := os.Open(p.checkDir) - if err != nil { - return "", errors.WithStack(err) - } - files, err := d.Readdirnames(0) - - d.Close() - if err != nil { - return "", errors.WithStack(err) - } - - desired := p.encodeHeight(h) - sort.Strings(files) - i := sort.SearchStrings(files, desired) - if i == 0 { - return "", liteErr.ErrCommitNotFound() - } - found := files[i-1] - path := filepath.Join(p.checkDir, found) - return path, errors.WithStack(err) -} - -// GetByHash returns a commit exactly matching this validator hash. -func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { - path := filepath.Join(p.valDir, p.encodeHash(hash)) - return LoadFullCommit(path) -} diff --git a/lite/files/provider_test.go b/lite/files/provider_test.go deleted file mode 100644 index 5deebb1a289..00000000000 --- a/lite/files/provider_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package files_test - -import ( - "bytes" - "errors" - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" - "github.com/tendermint/tendermint/lite/files" -) - -func checkEqual(stored, loaded lite.FullCommit, chainID string) error { - err := loaded.ValidateBasic(chainID) - if err != nil { - return err - } - if !bytes.Equal(stored.ValidatorsHash(), loaded.ValidatorsHash()) { - return errors.New("Different block hashes") - } - return nil -} - -func TestFileProvider(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - dir, err := ioutil.TempDir("", "fileprovider-test") - assert.Nil(err) - defer os.RemoveAll(dir) - p := files.NewProvider(dir) - - chainID := "test-files" - appHash := []byte("some-data") - keys := lite.GenValKeys(5) - count := 10 - - // make a bunch of seeds... - seeds := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // two seeds for each validator, to check how we handle dups - // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... - vals := keys.ToValidators(10, int64(count/2)) - h := int64(20 + 10*i) - check := keys.GenCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - seeds[i] = lite.NewFullCommit(check, vals) - } - - // check provider is empty - seed, err := p.GetByHeight(20) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - seed, err = p.GetByHash(seeds[3].ValidatorsHash()) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - // now add them all to the provider - for _, s := range seeds { - err = p.StoreCommit(s) - require.Nil(err) - // and make sure we can get it back - s2, err := p.GetByHash(s.ValidatorsHash()) - assert.Nil(err) - err = checkEqual(s, s2, chainID) - assert.Nil(err) - // by height as well - s2, err = p.GetByHeight(s.Height()) - err = checkEqual(s, s2, chainID) - assert.Nil(err) - } - - // make sure we get the last hash if we overstep - seed, err = p.GetByHeight(5000) - if assert.Nil(err, "%+v", err) { - assert.Equal(seeds[count-1].Height(), seed.Height()) - err = checkEqual(seeds[count-1], seed, chainID) - assert.Nil(err) - } - - // and middle ones as well - seed, err = p.GetByHeight(47) - if assert.Nil(err, "%+v", err) { - // we only step by 10, so 40 must be the one below this - assert.EqualValues(40, seed.Height()) - } - - // and proper error for too low - _, err = p.GetByHeight(5) - assert.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) -} diff --git a/lite/files/wire.go b/lite/files/wire.go deleted file mode 100644 index e7864831e0a..00000000000 --- a/lite/files/wire.go +++ /dev/null @@ -1,12 +0,0 @@ -package files - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/types" -) - -var cdc = amino.NewCodec() - -func init() { - types.RegisterBlockAmino(cdc) -} diff --git a/lite/helpers.go b/lite/helpers.go index 9f404f247a9..16d22e7081b 100644 --- a/lite/helpers.go +++ b/lite/helpers.go @@ -1,29 +1,26 @@ package lite import ( - "time" - - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) -// ValKeys is a helper for testing. +// privKeys is a helper type for testing. // -// It lets us simulate signing with many keys, either ed25519 or secp256k1. -// The main use case is to create a set, and call GenCommit -// to get properly signed header for testing. +// It lets us simulate signing with many keys. The main use case is to create +// a set, and call GenSignedHeader to get properly signed header for testing. // -// You can set different weights of validators each time you call -// ToValidators, and can optionally extend the validator set later -// with Extend or ExtendSecp -type ValKeys []crypto.PrivKey - -// GenValKeys produces an array of private keys to generate commits. -func GenValKeys(n int) ValKeys { - res := make(ValKeys, n) +// You can set different weights of validators each time you call ToValidators, +// and can optionally extend the validator set later with Extend. +type privKeys []crypto.PrivKey + +// genPrivKeys produces an array of private keys to generate commits. +func genPrivKeys(n int) privKeys { + res := make(privKeys, n) for i := range res { res[i] = ed25519.GenPrivKey() } @@ -31,22 +28,22 @@ func GenValKeys(n int) ValKeys { } // Change replaces the key at index i. -func (v ValKeys) Change(i int) ValKeys { - res := make(ValKeys, len(v)) - copy(res, v) +func (pkz privKeys) Change(i int) privKeys { + res := make(privKeys, len(pkz)) + copy(res, pkz) res[i] = ed25519.GenPrivKey() return res } // Extend adds n more keys (to remove, just take a slice). -func (v ValKeys) Extend(n int) ValKeys { - extra := GenValKeys(n) - return append(v, extra...) +func (pkz privKeys) Extend(n int) privKeys { + extra := genPrivKeys(n) + return append(pkz, extra...) } -// GenSecpValKeys produces an array of secp256k1 private keys to generate commits. -func GenSecpValKeys(n int) ValKeys { - res := make(ValKeys, n) +// GenSecpPrivKeys produces an array of secp256k1 private keys to generate commits. +func GenSecpPrivKeys(n int) privKeys { + res := make(privKeys, n) for i := range res { res[i] = secp256k1.GenPrivKey() } @@ -54,33 +51,33 @@ func GenSecpValKeys(n int) ValKeys { } // ExtendSecp adds n more secp256k1 keys (to remove, just take a slice). -func (v ValKeys) ExtendSecp(n int) ValKeys { - extra := GenSecpValKeys(n) - return append(v, extra...) +func (pkz privKeys) ExtendSecp(n int) privKeys { + extra := GenSecpPrivKeys(n) + return append(pkz, extra...) } -// ToValidators produces a list of validators from the set of keys +// ToValidators produces a valset from the set of keys. // The first key has weight `init` and it increases by `inc` every step // so we can have all the same weight, or a simple linear distribution // (should be enough for testing). -func (v ValKeys) ToValidators(init, inc int64) *types.ValidatorSet { - res := make([]*types.Validator, len(v)) - for i, k := range v { +func (pkz privKeys) ToValidators(init, inc int64) *types.ValidatorSet { + res := make([]*types.Validator, len(pkz)) + for i, k := range pkz { res[i] = types.NewValidator(k.PubKey(), init+int64(i)*inc) } return types.NewValidatorSet(res) } // signHeader properly signs the header with all keys from first to last exclusive. -func (v ValKeys) signHeader(header *types.Header, first, last int) *types.Commit { - votes := make([]*types.Vote, len(v)) +func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Commit { + votes := make([]*types.Vote, len(pkz)) - // we need this list to keep the ordering... - vset := v.ToValidators(1, 0) + // We need this list to keep the ordering. + vset := pkz.ToValidators(1, 0) - // fill in the votes we want - for i := first; i < last && i < len(v); i++ { - vote := makeVote(header, vset, v[i]) + // Fill in the votes we want. + for i := first; i < last && i < len(pkz); i++ { + vote := makeVote(header, vset, pkz[i]) votes[vote.ValidatorIndex] = vote } @@ -91,15 +88,15 @@ func (v ValKeys) signHeader(header *types.Header, first, last int) *types.Commit return res } -func makeVote(header *types.Header, vals *types.ValidatorSet, key crypto.PrivKey) *types.Vote { +func makeVote(header *types.Header, valset *types.ValidatorSet, key crypto.PrivKey) *types.Vote { addr := key.PubKey().Address() - idx, _ := vals.GetByAddress(addr) + idx, _ := valset.GetByAddress(addr) vote := &types.Vote{ ValidatorAddress: addr, ValidatorIndex: idx, Height: header.Height, Round: 1, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), Type: types.VoteTypePrecommit, BlockID: types.BlockID{Hash: header.Hash()}, } @@ -115,47 +112,46 @@ func makeVote(header *types.Header, vals *types.ValidatorSet, key crypto.PrivKey return vote } -// Silences warning that vals can also be merkle.Hashable -// nolint: interfacer func genHeader(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { + valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { return &types.Header{ ChainID: chainID, Height: height, - Time: time.Now(), + Time: tmtime.Now(), NumTxs: int64(len(txs)), TotalTxs: int64(len(txs)), // LastBlockID // LastCommitHash - ValidatorsHash: vals.Hash(), - DataHash: txs.Hash(), - AppHash: appHash, - ConsensusHash: consHash, - LastResultsHash: resHash, + ValidatorsHash: valset.Hash(), + NextValidatorsHash: nextValset.Hash(), + DataHash: txs.Hash(), + AppHash: appHash, + ConsensusHash: consHash, + LastResultsHash: resHash, } } -// GenCommit calls genHeader and signHeader and combines them into a Commit. -func (v ValKeys) GenCommit(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) Commit { +// GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader. +func (pkz privKeys) GenSignedHeader(chainID string, height int64, txs types.Txs, + valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) types.SignedHeader { - header := genHeader(chainID, height, txs, vals, appHash, consHash, resHash) - check := Commit{ + header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash) + check := types.SignedHeader{ Header: header, - Commit: v.signHeader(header, first, last), + Commit: pkz.signHeader(header, first, last), } return check } -// GenFullCommit calls genHeader and signHeader and combines them into a Commit. -func (v ValKeys) GenFullCommit(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { +// GenFullCommit calls genHeader and signHeader and combines them into a FullCommit. +func (pkz privKeys) GenFullCommit(chainID string, height int64, txs types.Txs, + valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { - header := genHeader(chainID, height, txs, vals, appHash, consHash, resHash) - commit := Commit{ + header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash) + commit := types.SignedHeader{ Header: header, - Commit: v.signHeader(header, first, last), + Commit: pkz.signHeader(header, first, last), } - return NewFullCommit(commit, vals) + return NewFullCommit(commit, valset, nextValset) } diff --git a/lite/inquiring_certifier.go b/lite/inquiring_certifier.go deleted file mode 100644 index 042bd08e3ec..00000000000 --- a/lite/inquiring_certifier.go +++ /dev/null @@ -1,163 +0,0 @@ -package lite - -import ( - "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -var _ Certifier = (*InquiringCertifier)(nil) - -// InquiringCertifier wraps a dynamic certifier and implements an auto-update strategy. If a call -// to Certify fails due to a change it validator set, InquiringCertifier will try and find a -// previous FullCommit which it can use to safely update the validator set. It uses a source -// provider to obtain the needed FullCommits. It stores properly validated data on the local system. -type InquiringCertifier struct { - cert *DynamicCertifier - // These are only properly validated data, from local system - trusted Provider - // This is a source of new info, like a node rpc, or other import method - Source Provider -} - -// NewInquiringCertifier returns a new Inquiring object. It uses the trusted provider to store -// validated data and the source provider to obtain missing FullCommits. -// -// Example: The trusted provider should a CacheProvider, MemProvider or files.Provider. The source -// provider should be a client.HTTPProvider. -func NewInquiringCertifier(chainID string, fc FullCommit, trusted Provider, - source Provider) (*InquiringCertifier, error) { - - // store the data in trusted - err := trusted.StoreCommit(fc) - if err != nil { - return nil, err - } - - return &InquiringCertifier{ - cert: NewDynamicCertifier(chainID, fc.Validators, fc.Height()), - trusted: trusted, - Source: source, - }, nil -} - -// ChainID returns the chain id. -// Implements Certifier. -func (ic *InquiringCertifier) ChainID() string { - return ic.cert.ChainID() -} - -// Validators returns the validator set. -func (ic *InquiringCertifier) Validators() *types.ValidatorSet { - return ic.cert.cert.vSet -} - -// LastHeight returns the last height. -func (ic *InquiringCertifier) LastHeight() int64 { - return ic.cert.lastHeight -} - -// Certify makes sure this is checkpoint is valid. -// -// If the validators have changed since the last know time, it looks -// for a path to prove the new validators. -// -// On success, it will store the checkpoint in the store for later viewing -// Implements Certifier. -func (ic *InquiringCertifier) Certify(commit Commit) error { - err := ic.useClosestTrust(commit.Height()) - if err != nil { - return err - } - - err = ic.cert.Certify(commit) - if !liteErr.IsValidatorsChangedErr(err) { - return err - } - err = ic.updateToHash(commit.Header.ValidatorsHash) - if err != nil { - return err - } - - err = ic.cert.Certify(commit) - if err != nil { - return err - } - - // store the new checkpoint - return ic.trusted.StoreCommit(NewFullCommit(commit, ic.Validators())) -} - -// Update will verify if this is a valid change and update -// the certifying validator set if safe to do so. -func (ic *InquiringCertifier) Update(fc FullCommit) error { - err := ic.useClosestTrust(fc.Height()) - if err != nil { - return err - } - - err = ic.cert.Update(fc) - if err == nil { - err = ic.trusted.StoreCommit(fc) - } - return err -} - -func (ic *InquiringCertifier) useClosestTrust(h int64) error { - closest, err := ic.trusted.GetByHeight(h) - if err != nil { - return err - } - - // if the best seed is not the one we currently use, - // let's just reset the dynamic validator - if closest.Height() != ic.LastHeight() { - ic.cert = NewDynamicCertifier(ic.ChainID(), closest.Validators, closest.Height()) - } - return nil -} - -// updateToHash gets the validator hash we want to update to -// if IsTooMuchChangeErr, we try to find a path by binary search over height -func (ic *InquiringCertifier) updateToHash(vhash []byte) error { - // try to get the match, and update - fc, err := ic.Source.GetByHash(vhash) - if err != nil { - return err - } - err = ic.cert.Update(fc) - // handle IsTooMuchChangeErr by using divide and conquer - if liteErr.IsTooMuchChangeErr(err) { - err = ic.updateToHeight(fc.Height()) - } - return err -} - -// updateToHeight will use divide-and-conquer to find a path to h -func (ic *InquiringCertifier) updateToHeight(h int64) error { - // try to update to this height (with checks) - fc, err := ic.Source.GetByHeight(h) - if err != nil { - return err - } - start, end := ic.LastHeight(), fc.Height() - if end <= start { - return liteErr.ErrNoPathFound() - } - err = ic.Update(fc) - - // we can handle IsTooMuchChangeErr specially - if !liteErr.IsTooMuchChangeErr(err) { - return err - } - - // try to update to mid - mid := (start + end) / 2 - err = ic.updateToHeight(mid) - if err != nil { - return err - } - - // if we made it to mid, we recurse - return ic.updateToHeight(h) -} diff --git a/lite/inquiring_certifier_test.go b/lite/inquiring_certifier_test.go deleted file mode 100644 index db8160bdc8d..00000000000 --- a/lite/inquiring_certifier_test.go +++ /dev/null @@ -1,173 +0,0 @@ -// nolint: vetshadow -package lite_test - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/lite" -) - -func TestInquirerValidPath(t *testing.T) { - assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() - - // set up the validators to generate test blocks - var vote int64 = 10 - keys := lite.GenValKeys(5) - - // construct a bunch of commits, each with one more height than the last - chainID := "inquiry-test" - consHash := []byte("params") - resHash := []byte("results") - count := 50 - commits := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // extend the keys by 1 each time - keys = keys.Extend(1) - vals := keys.ToValidators(vote, 0) - h := int64(20 + 10*i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) - } - - // initialize a certifier with the initial state - cert, err := lite.NewInquiringCertifier(chainID, commits[0], trust, source) - require.Nil(err) - - // this should fail validation.... - commit := commits[count-1].Commit - err = cert.Certify(commit) - require.NotNil(err) - - // adding a few commits in the middle should be insufficient - for i := 10; i < 13; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.NotNil(err) - - // with more info, we succeed - for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.Nil(err, "%+v", err) -} - -func TestInquirerMinimalPath(t *testing.T) { - assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() - - // set up the validators to generate test blocks - var vote int64 = 10 - keys := lite.GenValKeys(5) - - // construct a bunch of commits, each with one more height than the last - chainID := "minimal-path" - consHash := []byte("other-params") - count := 12 - commits := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // extend the validators, so we are just below 2/3 - keys = keys.Extend(len(keys)/2 - 1) - vals := keys.ToValidators(vote, 0) - h := int64(5 + 10*i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) - } - - // initialize a certifier with the initial state - cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source) - - // this should fail validation.... - commit := commits[count-1].Commit - err := cert.Certify(commit) - require.NotNil(err) - - // add a few seed in the middle should be insufficient - for i := 5; i < 8; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.NotNil(err) - - // with more info, we succeed - for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.Nil(err, "%+v", err) -} - -func TestInquirerVerifyHistorical(t *testing.T) { - assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() - - // set up the validators to generate test blocks - var vote int64 = 10 - keys := lite.GenValKeys(5) - - // construct a bunch of commits, each with one more height than the last - chainID := "inquiry-test" - count := 10 - consHash := []byte("special-params") - commits := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // extend the keys by 1 each time - keys = keys.Extend(1) - vals := keys.ToValidators(vote, 0) - h := int64(20 + 10*i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) - } - - // initialize a certifier with the initial state - cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source) - - // store a few commits as trust - for _, i := range []int{2, 5} { - trust.StoreCommit(commits[i]) - } - - // let's see if we can jump forward using trusted commits - err := source.StoreCommit(commits[7]) - require.Nil(err, "%+v", err) - check := commits[7].Commit - err = cert.Certify(check) - require.Nil(err, "%+v", err) - assert.Equal(check.Height(), cert.LastHeight()) - - // add access to all commits via untrusted source - for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - - // try to check an unknown seed in the past - mid := commits[3].Commit - err = cert.Certify(mid) - require.Nil(err, "%+v", err) - assert.Equal(mid.Height(), cert.LastHeight()) - - // and jump all the way forward again - end := commits[count-1].Commit - err = cert.Certify(end) - require.Nil(err, "%+v", err) - assert.Equal(end.Height(), cert.LastHeight()) -} diff --git a/lite/memprovider.go b/lite/memprovider.go deleted file mode 100644 index ac0d832156d..00000000000 --- a/lite/memprovider.go +++ /dev/null @@ -1,152 +0,0 @@ -package lite - -import ( - "encoding/hex" - "sort" - "sync" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -type memStoreProvider struct { - mtx sync.RWMutex - // byHeight is always sorted by Height... need to support range search (nil, h] - // btree would be more efficient for larger sets - byHeight fullCommits - byHash map[string]FullCommit - - sorted bool -} - -// fullCommits just exists to allow easy sorting -type fullCommits []FullCommit - -func (s fullCommits) Len() int { return len(s) } -func (s fullCommits) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s fullCommits) Less(i, j int) bool { - return s[i].Height() < s[j].Height() -} - -// NewMemStoreProvider returns a new in-memory provider. -func NewMemStoreProvider() Provider { - return &memStoreProvider{ - byHeight: fullCommits{}, - byHash: map[string]FullCommit{}, - } -} - -func (m *memStoreProvider) encodeHash(hash []byte) string { - return hex.EncodeToString(hash) -} - -// StoreCommit stores a FullCommit after verifying it. -func (m *memStoreProvider) StoreCommit(fc FullCommit) error { - // make sure the fc is self-consistent before saving - err := fc.ValidateBasic(fc.Commit.Header.ChainID) - if err != nil { - return err - } - - // store the valid fc - key := m.encodeHash(fc.ValidatorsHash()) - - m.mtx.Lock() - defer m.mtx.Unlock() - m.byHash[key] = fc - m.byHeight = append(m.byHeight, fc) - m.sorted = false - return nil -} - -// GetByHeight returns the FullCommit for height h or an error if the commit is not found. -func (m *memStoreProvider) GetByHeight(h int64) (FullCommit, error) { - // By heuristics, GetByHeight with linearsearch is fast enough - // for about 50 keys but after that, it needs binary search. - // See https://github.com/tendermint/tendermint/pull/1043#issue-285188242 - m.mtx.RLock() - n := len(m.byHeight) - m.mtx.RUnlock() - - if n <= 50 { - return m.getByHeightLinearSearch(h) - } - return m.getByHeightBinarySearch(h) -} - -func (m *memStoreProvider) sortByHeightIfNecessaryLocked() { - if !m.sorted { - sort.Sort(m.byHeight) - m.sorted = true - } -} - -func (m *memStoreProvider) getByHeightLinearSearch(h int64) (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.sortByHeightIfNecessaryLocked() - // search from highest to lowest - for i := len(m.byHeight) - 1; i >= 0; i-- { - if fc := m.byHeight[i]; fc.Height() <= h { - return fc, nil - } - } - return FullCommit{}, liteErr.ErrCommitNotFound() -} - -func (m *memStoreProvider) getByHeightBinarySearch(h int64) (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.sortByHeightIfNecessaryLocked() - low, high := 0, len(m.byHeight)-1 - var mid int - var hmid int64 - var midFC FullCommit - // Our goal is to either find: - // * item ByHeight with the query - // * greatest height with a height <= query - for low <= high { - mid = int(uint(low+high) >> 1) // Avoid an overflow - midFC = m.byHeight[mid] - hmid = midFC.Height() - switch { - case hmid == h: - return midFC, nil - case hmid < h: - low = mid + 1 - case hmid > h: - high = mid - 1 - } - } - - if high >= 0 { - if highFC := m.byHeight[high]; highFC.Height() < h { - return highFC, nil - } - } - return FullCommit{}, liteErr.ErrCommitNotFound() -} - -// GetByHash returns the FullCommit for the hash or an error if the commit is not found. -func (m *memStoreProvider) GetByHash(hash []byte) (FullCommit, error) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - fc, ok := m.byHash[m.encodeHash(hash)] - if !ok { - return fc, liteErr.ErrCommitNotFound() - } - return fc, nil -} - -// LatestCommit returns the latest FullCommit or an error if no commits exist. -func (m *memStoreProvider) LatestCommit() (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - l := len(m.byHeight) - if l == 0 { - return FullCommit{}, liteErr.ErrCommitNotFound() - } - m.sortByHeightIfNecessaryLocked() - return m.byHeight[l-1], nil -} diff --git a/lite/multiprovider.go b/lite/multiprovider.go new file mode 100644 index 00000000000..734d042c4b9 --- /dev/null +++ b/lite/multiprovider.go @@ -0,0 +1,83 @@ +package lite + +import ( + log "github.com/tendermint/tendermint/libs/log" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" +) + +// multiProvider allows you to place one or more caches in front of a source +// Provider. It runs through them in order until a match is found. +type multiProvider struct { + logger log.Logger + providers []PersistentProvider +} + +// NewMultiProvider returns a new provider which wraps multiple other providers. +func NewMultiProvider(providers ...PersistentProvider) *multiProvider { + return &multiProvider{ + logger: log.NewNopLogger(), + providers: providers, + } +} + +// SetLogger sets logger on self and all subproviders. +func (mc *multiProvider) SetLogger(logger log.Logger) { + mc.logger = logger + for _, p := range mc.providers { + p.SetLogger(logger) + } +} + +// SaveFullCommit saves on all providers, and aborts on the first error. +func (mc *multiProvider) SaveFullCommit(fc FullCommit) (err error) { + for _, p := range mc.providers { + err = p.SaveFullCommit(fc) + if err != nil { + return + } + } + return +} + +// LatestFullCommit loads the latest from all providers and provides +// the latest FullCommit that satisfies the conditions. +// Returns the first error encountered. +func (mc *multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (fc FullCommit, err error) { + for _, p := range mc.providers { + var fc_ FullCommit + fc_, err = p.LatestFullCommit(chainID, minHeight, maxHeight) + if lerr.IsErrCommitNotFound(err) { + err = nil + continue + } else if err != nil { + return + } + if fc == (FullCommit{}) { + fc = fc_ + } else if fc_.Height() > fc.Height() { + fc = fc_ + } + if fc.Height() == maxHeight { + return + } + } + if fc == (FullCommit{}) { + err = lerr.ErrCommitNotFound() + return + } + return +} + +// ValidatorSet returns validator set at height as provided by the first +// provider which has it, or an error otherwise. +func (mc *multiProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { + for _, p := range mc.providers { + valset, err = p.ValidatorSet(chainID, height) + if err == nil { + // TODO Log unexpected types of errors. + return valset, nil + } + } + return nil, lerr.ErrUnknownValidators(chainID, height) +} diff --git a/lite/performance_test.go b/lite/performance_test.go deleted file mode 100644 index 3b805a41855..00000000000 --- a/lite/performance_test.go +++ /dev/null @@ -1,368 +0,0 @@ -package lite - -import ( - "fmt" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - cmn "github.com/tendermint/tendermint/libs/common" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -func TestMemStoreProvidergetByHeightBinaryAndLinearSameResult(t *testing.T) { - p := NewMemStoreProvider().(*memStoreProvider) - - // Store a bunch of commits at specific heights - // and then ensure that: - // * getByHeightLinearSearch - // * getByHeightBinarySearch - // both return the exact same result - - // 1. Non-existent height commits - nonExistent := []int64{-1000, -1, 0, 1, 10, 11, 17, 31, 67, 1000, 1e9} - ensureNonExistentCommitsAtHeight(t, "getByHeightLinearSearch", p.getByHeightLinearSearch, nonExistent) - ensureNonExistentCommitsAtHeight(t, "getByHeightBinarySearch", p.getByHeightBinarySearch, nonExistent) - - // 2. Save some known height commits - knownHeights := []int64{0, 1, 7, 9, 12, 13, 18, 44, 23, 16, 1024, 100, 199, 1e9} - createAndStoreCommits(t, p, knownHeights) - - // 3. Now check if those heights are retrieved - ensureExistentCommitsAtHeight(t, "getByHeightLinearSearch", p.getByHeightLinearSearch, knownHeights) - ensureExistentCommitsAtHeight(t, "getByHeightBinarySearch", p.getByHeightBinarySearch, knownHeights) - - // 4. And now for the height probing to ensure that any height - // requested returns a fullCommit of height <= requestedHeight. - comparegetByHeightAlgorithms(t, p, 0, 0) - comparegetByHeightAlgorithms(t, p, 1, 1) - comparegetByHeightAlgorithms(t, p, 2, 1) - comparegetByHeightAlgorithms(t, p, 5, 1) - comparegetByHeightAlgorithms(t, p, 7, 7) - comparegetByHeightAlgorithms(t, p, 10, 9) - comparegetByHeightAlgorithms(t, p, 12, 12) - comparegetByHeightAlgorithms(t, p, 14, 13) - comparegetByHeightAlgorithms(t, p, 19, 18) - comparegetByHeightAlgorithms(t, p, 43, 23) - comparegetByHeightAlgorithms(t, p, 45, 44) - comparegetByHeightAlgorithms(t, p, 1025, 1024) - comparegetByHeightAlgorithms(t, p, 101, 100) - comparegetByHeightAlgorithms(t, p, 1e3, 199) - comparegetByHeightAlgorithms(t, p, 1e4, 1024) - comparegetByHeightAlgorithms(t, p, 1e9, 1e9) - comparegetByHeightAlgorithms(t, p, 1e9+1, 1e9) -} - -func createAndStoreCommits(t *testing.T, p Provider, heights []int64) { - chainID := "cache-best-height-binary-and-linear" - appHash := []byte("0xdeadbeef") - keys := GenValKeys(len(heights) / 2) - - for _, h := range heights { - vals := keys.ToValidators(10, int64(len(heights)/2)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p.StoreCommit(fc) - require.NoError(t, err, "StoreCommit height=%d", h) - } -} - -func comparegetByHeightAlgorithms(t *testing.T, p *memStoreProvider, ask, expect int64) { - algos := map[string]func(int64) (FullCommit, error){ - "getHeightByLinearSearch": p.getByHeightLinearSearch, - "getHeightByBinarySearch": p.getByHeightBinarySearch, - } - - for algo, fn := range algos { - fc, err := fn(ask) - // t.Logf("%s got=%v want=%d", algo, expect, fc.Height()) - require.Nil(t, err, "%s: %+v", algo, err) - if assert.Equal(t, expect, fc.Height()) { - err = p.StoreCommit(fc) - require.Nil(t, err, "%s: %+v", algo, err) - } - } -} - -var blankFullCommit FullCommit - -func ensureNonExistentCommitsAtHeight(t *testing.T, prefix string, fn func(int64) (FullCommit, error), data []int64) { - for i, qh := range data { - fc, err := fn(qh) - assert.NotNil(t, err, "#%d: %s: height=%d should return non-nil error", i, prefix, qh) - assert.Equal(t, fc, blankFullCommit, "#%d: %s: height=%d\ngot =%+v\nwant=%+v", i, prefix, qh, fc, blankFullCommit) - } -} - -func ensureExistentCommitsAtHeight(t *testing.T, prefix string, fn func(int64) (FullCommit, error), data []int64) { - for i, qh := range data { - fc, err := fn(qh) - assert.Nil(t, err, "#%d: %s: height=%d should not return an error: %v", i, prefix, qh, err) - assert.NotEqual(t, fc, blankFullCommit, "#%d: %s: height=%d got a blankCommit", i, prefix, qh) - } -} - -func BenchmarkGenCommit20(b *testing.B) { - keys := GenValKeys(20) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommit100(b *testing.B) { - keys := GenValKeys(100) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommitSec20(b *testing.B) { - keys := GenSecpValKeys(20) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommitSec100(b *testing.B) { - keys := GenSecpValKeys(100) - benchmarkGenCommit(b, keys) -} - -func benchmarkGenCommit(b *testing.B, keys ValKeys) { - chainID := fmt.Sprintf("bench-%d", len(keys)) - vals := keys.ToValidators(20, 10) - for i := 0; i < b.N; i++ { - h := int64(1 + i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - keys.GenCommit(chainID, h, nil, vals, appHash, []byte("params"), resHash, 0, len(keys)) - } -} - -// this benchmarks generating one key -func BenchmarkGenValKeys(b *testing.B) { - keys := GenValKeys(20) - for i := 0; i < b.N; i++ { - keys = keys.Extend(1) - } -} - -// this benchmarks generating one key -func BenchmarkGenSecpValKeys(b *testing.B) { - keys := GenSecpValKeys(20) - for i := 0; i < b.N; i++ { - keys = keys.Extend(1) - } -} - -func BenchmarkToValidators20(b *testing.B) { - benchmarkToValidators(b, 20) -} - -func BenchmarkToValidators100(b *testing.B) { - benchmarkToValidators(b, 100) -} - -// this benchmarks constructing the validator set (.PubKey() * nodes) -func benchmarkToValidators(b *testing.B, nodes int) { - keys := GenValKeys(nodes) - for i := 1; i <= b.N; i++ { - keys.ToValidators(int64(2*i), int64(i)) - } -} - -func BenchmarkToValidatorsSec100(b *testing.B) { - benchmarkToValidatorsSec(b, 100) -} - -// this benchmarks constructing the validator set (.PubKey() * nodes) -func benchmarkToValidatorsSec(b *testing.B, nodes int) { - keys := GenSecpValKeys(nodes) - for i := 1; i <= b.N; i++ { - keys.ToValidators(int64(2*i), int64(i)) - } -} - -func BenchmarkCertifyCommit20(b *testing.B) { - keys := GenValKeys(20) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommit100(b *testing.B) { - keys := GenValKeys(100) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommitSec20(b *testing.B) { - keys := GenSecpValKeys(20) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommitSec100(b *testing.B) { - keys := GenSecpValKeys(100) - benchmarkCertifyCommit(b, keys) -} - -func benchmarkCertifyCommit(b *testing.B, keys ValKeys) { - chainID := "bench-certify" - vals := keys.ToValidators(20, 10) - cert := NewStaticCertifier(chainID, vals) - check := keys.GenCommit(chainID, 123, nil, vals, []byte("foo"), []byte("params"), []byte("res"), 0, len(keys)) - for i := 0; i < b.N; i++ { - err := cert.Certify(check) - if err != nil { - panic(err) - } - } - -} - -type algo bool - -const ( - linearSearch = true - binarySearch = false -) - -// Lazy load the commits -var fcs5, fcs50, fcs100, fcs500, fcs1000 []FullCommit -var h5, h50, h100, h500, h1000 []int64 -var commitsOnce sync.Once - -func lazyGenerateFullCommits(b *testing.B) { - b.Logf("Generating FullCommits") - commitsOnce.Do(func() { - fcs5, h5 = genFullCommits(nil, nil, 5) - b.Logf("Generated 5 FullCommits") - fcs50, h50 = genFullCommits(fcs5, h5, 50) - b.Logf("Generated 50 FullCommits") - fcs100, h100 = genFullCommits(fcs50, h50, 100) - b.Logf("Generated 100 FullCommits") - fcs500, h500 = genFullCommits(fcs100, h100, 500) - b.Logf("Generated 500 FullCommits") - fcs1000, h1000 = genFullCommits(fcs500, h500, 1000) - b.Logf("Generated 1000 FullCommits") - }) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch5(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs5, h5, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch50(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs50, h50, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch100(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs100, h100, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch500(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs500, h500, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch1000(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch5(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs5, h5, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch50(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs50, h50, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch100(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs100, h100, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch500(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs500, h500, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch1000(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, binarySearch) -} - -var rng = cmn.NewRand() - -func init() { - rng.Seed(10) -} - -func benchmarkMemStoreProvidergetByHeight(b *testing.B, fcs []FullCommit, fHeights []int64, algo algo) { - lazyGenerateFullCommits(b) - - b.StopTimer() - mp := NewMemStoreProvider() - for i, fc := range fcs { - if err := mp.StoreCommit(fc); err != nil { - b.Fatalf("FullCommit #%d: err: %v", i, err) - } - } - qHeights := make([]int64, len(fHeights)) - copy(qHeights, fHeights) - // Append some non-existent heights to trigger the worst cases. - qHeights = append(qHeights, 19, -100, -10000, 1e7, -17, 31, -1e9) - - memP := mp.(*memStoreProvider) - searchFn := memP.getByHeightLinearSearch - if algo == binarySearch { // nolint - searchFn = memP.getByHeightBinarySearch - } - - hPerm := rng.Perm(len(qHeights)) - b.StartTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, j := range hPerm { - h := qHeights[j] - if _, err := searchFn(h); err != nil { - } - } - } - b.ReportAllocs() -} - -func genFullCommits(prevFC []FullCommit, prevH []int64, want int) ([]FullCommit, []int64) { - fcs := make([]FullCommit, len(prevFC)) - copy(fcs, prevFC) - heights := make([]int64, len(prevH)) - copy(heights, prevH) - - appHash := []byte("benchmarks") - chainID := "benchmarks-gen-full-commits" - n := want - keys := GenValKeys(2 + (n / 3)) - for i := 0; i < n; i++ { - vals := keys.ToValidators(10, int64(n/2)) - h := int64(20 + 10*i) - fcs = append(fcs, keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5)) - heights = append(heights, h) - } - return fcs, heights -} - -func TestMemStoreProviderLatestCommitAlwaysUsesSorted(t *testing.T) { - p := NewMemStoreProvider().(*memStoreProvider) - // 1. With no commits yet stored, it should return ErrCommitNotFound - got, err := p.LatestCommit() - require.Equal(t, err.Error(), liteErr.ErrCommitNotFound().Error(), "should return ErrCommitNotFound()") - require.Equal(t, got, blankFullCommit, "With no fullcommits, it should return a blank FullCommit") - - // 2. Generate some full commits now and we'll add them unsorted. - genAndStoreCommitsOfHeight(t, p, 27, 100, 1, 12, 1000, 17, 91) - fc, err := p.LatestCommit() - require.Nil(t, err, "with commits saved no error expected") - require.NotEqual(t, fc, blankFullCommit, "with commits saved no blank FullCommit") - require.Equal(t, fc.Height(), int64(1000), "the latest commit i.e. the largest expected") -} - -func genAndStoreCommitsOfHeight(t *testing.T, p Provider, heights ...int64) { - n := len(heights) - appHash := []byte("tests") - chainID := "tests-gen-full-commits" - keys := GenValKeys(2 + (n / 3)) - for i := 0; i < n; i++ { - h := heights[i] - vals := keys.ToValidators(10, int64(n/2)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p.StoreCommit(fc) - require.NoError(t, err, "StoreCommit height=%d", h) - } -} diff --git a/lite/provider.go b/lite/provider.go index 22dc964a1a0..97e06a06d3b 100644 --- a/lite/provider.go +++ b/lite/provider.go @@ -1,103 +1,32 @@ package lite -// Provider is used to get more validators by other means. -// -// Examples: MemProvider, files.Provider, client.Provider, CacheProvider.... +import ( + log "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +// Provider provides information for the lite client to sync validators. +// Examples: MemProvider, files.Provider, client.Provider, CacheProvider. type Provider interface { - // StoreCommit saves a FullCommit after we have verified it, - // so we can query for it later. Important for updating our - // store of trusted commits. - StoreCommit(fc FullCommit) error - // GetByHeight returns the closest commit with height <= h. - GetByHeight(h int64) (FullCommit, error) - // GetByHash returns a commit exactly matching this validator hash. - GetByHash(hash []byte) (FullCommit, error) - // LatestCommit returns the newest commit stored. - LatestCommit() (FullCommit, error) -} -// cacheProvider allows you to place one or more caches in front of a source -// Provider. It runs through them in order until a match is found. -// So you can keep a local cache, and check with the network if -// no data is there. -type cacheProvider struct { - Providers []Provider -} + // LatestFullCommit returns the latest commit with minHeight <= height <= + // maxHeight. + // If maxHeight is zero, returns the latest where minHeight <= height. + LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) -// NewCacheProvider returns a new provider which wraps multiple other providers. -func NewCacheProvider(providers ...Provider) Provider { - return cacheProvider{ - Providers: providers, - } -} + // Get the valset that corresponds to chainID and height and return. + // Height must be >= 1. + ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) -// StoreCommit tries to add the seed to all providers. -// -// Aborts on first error it encounters (closest provider) -func (c cacheProvider) StoreCommit(fc FullCommit) (err error) { - for _, p := range c.Providers { - err = p.StoreCommit(fc) - if err != nil { - break - } - } - return err + // Set a logger. + SetLogger(logger log.Logger) } -// GetByHeight should return the closest possible match from all providers. -// -// The Cache is usually organized in order from cheapest call (memory) -// to most expensive calls (disk/network). However, since GetByHeight returns -// a FullCommit at h' <= h, if the memory has a seed at h-10, but the network would -// give us the exact match, a naive "stop at first non-error" would hide -// the actual desired results. -// -// Thus, we query each provider in order until we find an exact match -// or we finished querying them all. If at least one returned a non-error, -// then this returns the best match (minimum h-h'). -func (c cacheProvider) GetByHeight(h int64) (fc FullCommit, err error) { - for _, p := range c.Providers { - var tfc FullCommit - tfc, err = p.GetByHeight(h) - if err == nil { - if tfc.Height() > fc.Height() { - fc = tfc - } - if tfc.Height() == h { - break - } - } - } - // even if the last one had an error, if any was a match, this is good - if fc.Height() > 0 { - err = nil - } - return fc, err -} - -// GetByHash returns the FullCommit for the hash or an error if the commit is not found. -func (c cacheProvider) GetByHash(hash []byte) (fc FullCommit, err error) { - for _, p := range c.Providers { - fc, err = p.GetByHash(hash) - if err == nil { - break - } - } - return fc, err -} +// A provider that can also persist new information. +// Examples: MemProvider, files.Provider, CacheProvider. +type PersistentProvider interface { + Provider -// LatestCommit returns the latest FullCommit or an error if no commit exists. -func (c cacheProvider) LatestCommit() (fc FullCommit, err error) { - for _, p := range c.Providers { - var tfc FullCommit - tfc, err = p.LatestCommit() - if err == nil && tfc.Height() > fc.Height() { - fc = tfc - } - } - // even if the last one had an error, if any was a match, this is good - if fc.Height() > 0 { - err = nil - } - return fc, err + // SaveFullCommit saves a FullCommit (without verification). + SaveFullCommit(fc FullCommit) error } diff --git a/lite/provider_test.go b/lite/provider_test.go index 77b5b1a858f..94b467de812 100644 --- a/lite/provider_test.go +++ b/lite/provider_test.go @@ -1,98 +1,90 @@ -// nolint: vetshadow -package lite_test +package lite import ( + "errors" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" + dbm "github.com/tendermint/tendermint/libs/db" + log "github.com/tendermint/tendermint/libs/log" + lerr "github.com/tendermint/tendermint/lite/errors" + "github.com/tendermint/tendermint/types" ) -// missingProvider doesn't store anything, always a miss -// Designed as a mock for testing +// missingProvider doesn't store anything, always a miss. +// Designed as a mock for testing. type missingProvider struct{} // NewMissingProvider returns a provider which does not store anything and always misses. -func NewMissingProvider() lite.Provider { +func NewMissingProvider() PersistentProvider { return missingProvider{} } -func (missingProvider) StoreCommit(lite.FullCommit) error { return nil } -func (missingProvider) GetByHeight(int64) (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() +func (missingProvider) SaveFullCommit(FullCommit) error { return nil } +func (missingProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) { + return FullCommit{}, lerr.ErrCommitNotFound() } -func (missingProvider) GetByHash([]byte) (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() -} -func (missingProvider) LatestCommit() (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() +func (missingProvider) ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) { + return nil, errors.New("missing validator set") } +func (missingProvider) SetLogger(_ log.Logger) {} func TestMemProvider(t *testing.T) { - p := lite.NewMemStoreProvider() + p := NewDBProvider("mem", dbm.NewMemDB()) checkProvider(t, p, "test-mem", "empty") } -func TestCacheProvider(t *testing.T) { - p := lite.NewCacheProvider( +func TestMultiProvider(t *testing.T) { + p := NewMultiProvider( NewMissingProvider(), - lite.NewMemStoreProvider(), + NewDBProvider("mem", dbm.NewMemDB()), NewMissingProvider(), ) checkProvider(t, p, "test-cache", "kjfhekfhkewhgit") } -func checkProvider(t *testing.T, p lite.Provider, chainID, app string) { +func checkProvider(t *testing.T, p PersistentProvider, chainID, app string) { assert, require := assert.New(t), require.New(t) appHash := []byte(app) - keys := lite.GenValKeys(5) + keys := genPrivKeys(5) count := 10 - // make a bunch of commits... - commits := make([]lite.FullCommit, count) + // Make a bunch of full commits. + fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - // two commits for each validator, to check how we handle dups - // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... vals := keys.ToValidators(10, int64(count/2)) h := int64(20 + 10*i) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) + fcz[i] = keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5) } - // check provider is empty - fc, err := p.GetByHeight(20) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - fc, err = p.GetByHash(commits[3].ValidatorsHash()) + // Check that provider is initially empty. + fc, err := p.LatestFullCommit(chainID, 1, 1<<63-1) require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) + assert.True(lerr.IsErrCommitNotFound(err)) - // now add them all to the provider - for _, s := range commits { - err = p.StoreCommit(s) + // Save all full commits to the provider. + for _, fc := range fcz { + err = p.SaveFullCommit(fc) require.Nil(err) - // and make sure we can get it back - s2, err := p.GetByHash(s.ValidatorsHash()) - assert.Nil(err) - assert.Equal(s, s2) - // by height as well - s2, err = p.GetByHeight(s.Height()) + // Make sure we can get it back. + fc2, err := p.LatestFullCommit(chainID, fc.Height(), fc.Height()) assert.Nil(err) - assert.Equal(s, s2) + assert.Equal(fc.SignedHeader, fc2.SignedHeader) + assert.Equal(fc.Validators, fc2.Validators) + assert.Equal(fc.NextValidators, fc2.NextValidators) } - // make sure we get the last hash if we overstep - fc, err = p.GetByHeight(5000) + // Make sure we get the last hash if we overstep. + fc, err = p.LatestFullCommit(chainID, 1, 5000) if assert.Nil(err) { - assert.Equal(commits[count-1].Height(), fc.Height()) - assert.Equal(commits[count-1], fc) + assert.Equal(fcz[count-1].Height(), fc.Height()) + assert.Equal(fcz[count-1], fc) } - // and middle ones as well - fc, err = p.GetByHeight(47) + // ... and middle ones as well. + fc, err = p.LatestFullCommit(chainID, 1, 47) if assert.Nil(err) { // we only step by 10, so 40 must be the one below this assert.EqualValues(40, fc.Height()) @@ -100,50 +92,49 @@ func checkProvider(t *testing.T, p lite.Provider, chainID, app string) { } -// this will make a get height, and if it is good, set the data as well -func checkGetHeight(t *testing.T, p lite.Provider, ask, expect int64) { - fc, err := p.GetByHeight(ask) - require.Nil(t, err, "GetByHeight") +// This will make a get height, and if it is good, set the data as well. +func checkLatestFullCommit(t *testing.T, p PersistentProvider, chainID string, ask, expect int64) { + fc, err := p.LatestFullCommit(chainID, 1, ask) + require.Nil(t, err) if assert.Equal(t, expect, fc.Height()) { - err = p.StoreCommit(fc) - require.Nil(t, err, "StoreCommit") + err = p.SaveFullCommit(fc) + require.Nil(t, err) } } -func TestCacheGetsBestHeight(t *testing.T) { - // assert, require := assert.New(t), require.New(t) +func TestMultiLatestFullCommit(t *testing.T) { require := require.New(t) - // we will write data to the second level of the cache (p2), - // and see what gets cached, stored in - p := lite.NewMemStoreProvider() - p2 := lite.NewMemStoreProvider() - cp := lite.NewCacheProvider(p, p2) + // We will write data to the second level of the cache (p2), and see what + // gets cached/stored in. + p := NewDBProvider("mem1", dbm.NewMemDB()) + p2 := NewDBProvider("mem2", dbm.NewMemDB()) + cp := NewMultiProvider(p, p2) chainID := "cache-best-height" appHash := []byte("01234567") - keys := lite.GenValKeys(5) + keys := genPrivKeys(5) count := 10 - // set a bunch of commits + // Set a bunch of full commits. for i := 0; i < count; i++ { vals := keys.ToValidators(10, int64(count/2)) h := int64(10 * (i + 1)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p2.StoreCommit(fc) + fc := keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5) + err := p2.SaveFullCommit(fc) require.NoError(err) } - // let's get a few heights from the cache and set them proper - checkGetHeight(t, cp, 57, 50) - checkGetHeight(t, cp, 33, 30) + // Get a few heights from the cache and set them proper. + checkLatestFullCommit(t, cp, chainID, 57, 50) + checkLatestFullCommit(t, cp, chainID, 33, 30) // make sure they are set in p as well (but nothing else) - checkGetHeight(t, p, 44, 30) - checkGetHeight(t, p, 50, 50) - checkGetHeight(t, p, 99, 50) + checkLatestFullCommit(t, p, chainID, 44, 30) + checkLatestFullCommit(t, p, chainID, 50, 50) + checkLatestFullCommit(t, p, chainID, 99, 50) // now, query the cache for a higher value - checkGetHeight(t, p2, 99, 90) - checkGetHeight(t, cp, 99, 90) + checkLatestFullCommit(t, p2, chainID, 99, 90) + checkLatestFullCommit(t, cp, chainID, 99, 90) } diff --git a/lite/proxy/block.go b/lite/proxy/block.go index 00b8c87fb05..035871a1185 100644 --- a/lite/proxy/block.go +++ b/lite/proxy/block.go @@ -2,27 +2,24 @@ package proxy import ( "bytes" + "errors" - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - certerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" ) -func ValidateBlockMeta(meta *types.BlockMeta, check lite.Commit) error { +func ValidateBlockMeta(meta *types.BlockMeta, sh types.SignedHeader) error { if meta == nil { return errors.New("expecting a non-nil BlockMeta") } // TODO: check the BlockID?? - return ValidateHeader(&meta.Header, check) + return ValidateHeader(&meta.Header, sh) } -func ValidateBlock(meta *types.Block, check lite.Commit) error { +func ValidateBlock(meta *types.Block, sh types.SignedHeader) error { if meta == nil { return errors.New("expecting a non-nil Block") } - err := ValidateHeader(&meta.Header, check) + err := ValidateHeader(&meta.Header, sh) if err != nil { return err } @@ -32,17 +29,19 @@ func ValidateBlock(meta *types.Block, check lite.Commit) error { return nil } -func ValidateHeader(head *types.Header, check lite.Commit) error { +func ValidateHeader(head *types.Header, sh types.SignedHeader) error { if head == nil { return errors.New("expecting a non-nil Header") } - // make sure they are for the same height (obvious fail) - if head.Height != check.Height() { - return certerr.ErrHeightMismatch(head.Height, check.Height()) + if sh.Header == nil { + return errors.New("unexpected empty SignedHeader") + } + // Make sure they are for the same height (obvious fail). + if head.Height != sh.Height { + return errors.New("Header heights mismatched") } - // check if they are equal by using hashes - chead := check.Header - if !bytes.Equal(head.Hash(), chead.Hash()) { + // Check if they are equal by using hashes. + if !bytes.Equal(head.Hash(), sh.Hash()) { return errors.New("Headers don't match") } return nil diff --git a/lite/proxy/certifier.go b/lite/proxy/certifier.go deleted file mode 100644 index 6e319dc0d25..00000000000 --- a/lite/proxy/certifier.go +++ /dev/null @@ -1,35 +0,0 @@ -package proxy - -import ( - "github.com/tendermint/tendermint/lite" - certclient "github.com/tendermint/tendermint/lite/client" - "github.com/tendermint/tendermint/lite/files" -) - -func GetCertifier(chainID, rootDir, nodeAddr string) (*lite.InquiringCertifier, error) { - trust := lite.NewCacheProvider( - lite.NewMemStoreProvider(), - files.NewProvider(rootDir), - ) - - source := certclient.NewHTTPProvider(nodeAddr) - - // XXX: total insecure hack to avoid `init` - fc, err := source.LatestCommit() - /* XXX - // this gets the most recent verified commit - fc, err := trust.LatestCommit() - if certerr.IsCommitNotFoundErr(err) { - return nil, errors.New("Please run init first to establish a root of trust") - }*/ - if err != nil { - return nil, err - } - - cert, err := lite.NewInquiringCertifier(chainID, fc, trust, source) - if err != nil { - return nil, err - } - - return cert, nil -} diff --git a/lite/proxy/errors.go b/lite/proxy/errors.go index 5a2713e3c0f..6a7c2354cd5 100644 --- a/lite/proxy/errors.go +++ b/lite/proxy/errors.go @@ -1,22 +1,24 @@ package proxy import ( - "fmt" - - "github.com/pkg/errors" + cmn "github.com/tendermint/tendermint/libs/common" ) -//-------------------------------------------- +type errNoData struct{} -var errNoData = fmt.Errorf("No data returned for query") +func (e errNoData) Error() string { + return "No data returned for query" +} -// IsNoDataErr checks whether an error is due to a query returning empty data -func IsNoDataErr(err error) bool { - return errors.Cause(err) == errNoData +// IsErrNoData checks whether an error is due to a query returning empty data +func IsErrNoData(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errNoData) + return ok + } + return false } func ErrNoData() error { - return errors.WithStack(errNoData) + return cmn.ErrorWrap(errNoData{}, "") } - -//-------------------------------------------- diff --git a/lite/proxy/errors_test.go b/lite/proxy/errors_test.go deleted file mode 100644 index 7f51be50fff..00000000000 --- a/lite/proxy/errors_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package proxy - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestErrorNoData(t *testing.T) { - e1 := ErrNoData() - assert.True(t, IsNoDataErr(e1)) - - e2 := errors.New("foobar") - assert.False(t, IsNoDataErr(e2)) - assert.False(t, IsNoDataErr(nil)) -} diff --git a/lite/proxy/query.go b/lite/proxy/query.go index 0ca5be1749d..6f5a2899220 100644 --- a/lite/proxy/query.go +++ b/lite/proxy/query.go @@ -1,15 +1,16 @@ package proxy import ( + "fmt" + "github.com/pkg/errors" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/lite" - "github.com/tendermint/tendermint/lite/client" - certerr "github.com/tendermint/tendermint/lite/errors" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" ) // KeyProof represents a proof of existence or absence of a single key. @@ -27,13 +28,13 @@ type KeyProof interface { } // GetWithProof will query the key on the given node, and verify it has -// a valid proof, as defined by the certifier. +// a valid proof, as defined by the Verifier. // // If there is any error in checking, returns an error. // If val is non-empty, proof should be KeyExistsProof // If val is empty, proof should be KeyMissingProof func GetWithProof(key []byte, reqHeight int64, node rpcclient.Client, - cert lite.Certifier) ( + cert lite.Verifier) ( val cmn.HexBytes, height int64, proof KeyProof, err error) { if reqHeight < 0 { @@ -53,7 +54,7 @@ func GetWithProof(key []byte, reqHeight int64, node rpcclient.Client, // GetWithProofOptions is useful if you want full access to the ABCIQueryOptions func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOptions, - node rpcclient.Client, cert lite.Certifier) ( + node rpcclient.Client, cert lite.Verifier) ( *ctypes.ResultABCIQuery, KeyProof, error) { _resp, err := node.ABCIQueryWithOptions(path, key, opts) @@ -75,12 +76,12 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption } // AppHash for height H is in header H+1 - commit, err := GetCertifiedCommit(resp.Height+1, node, cert) + signedHeader, err := GetCertifiedCommit(resp.Height+1, node, cert) if err != nil { return nil, nil, err } - _ = commit + _ = signedHeader return &ctypes.ResultABCIQuery{Response: resp}, nil, nil /* // TODO refactor so iavl stuff is not in tendermint core @@ -98,7 +99,7 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption } // Validate the proof against the certified header to ensure data integrity. - err = eproof.Verify(resp.Key, resp.Value, commit.Header.AppHash) + err = eproof.Verify(resp.Key, resp.Value, signedHeader.AppHash) if err != nil { return nil, nil, errors.Wrap(err, "Couldn't verify proof") } @@ -117,7 +118,7 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption } // Validate the proof against the certified header to ensure data integrity. - err = aproof.Verify(resp.Key, nil, commit.Header.AppHash) + err = aproof.Verify(resp.Key, nil, signedHeader.AppHash) if err != nil { return nil, nil, errors.Wrap(err, "Couldn't verify proof") } @@ -125,28 +126,29 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption */ } -// GetCertifiedCommit gets the signed header for a given height -// and certifies it. Returns error if unable to get a proven header. -func GetCertifiedCommit(h int64, node rpcclient.Client, cert lite.Certifier) (lite.Commit, error) { +// GetCertifiedCommit gets the signed header for a given height and certifies +// it. Returns error if unable to get a proven header. +func GetCertifiedCommit(h int64, client rpcclient.Client, cert lite.Verifier) (types.SignedHeader, error) { // FIXME: cannot use cert.GetByHeight for now, as it also requires // Validators and will fail on querying tendermint for non-current height. // When this is supported, we should use it instead... - rpcclient.WaitForHeight(node, h, nil) - cresp, err := node.Commit(&h) + rpcclient.WaitForHeight(client, h, nil) + cresp, err := client.Commit(&h) if err != nil { - return lite.Commit{}, err + return types.SignedHeader{}, err } - commit := client.CommitFromResult(cresp) - // validate downloaded checkpoint with our request and trust store. - if commit.Height() != h { - return lite.Commit{}, certerr.ErrHeightMismatch(h, commit.Height()) + // Validate downloaded checkpoint with our request and trust store. + sh := cresp.SignedHeader + if sh.Height != h { + return types.SignedHeader{}, fmt.Errorf("height mismatch: want %v got %v", + h, sh.Height) } - if err = cert.Certify(commit); err != nil { - return lite.Commit{}, err + if err = cert.Certify(sh); err != nil { + return types.SignedHeader{}, err } - return commit, nil + return sh, nil } diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go index 38a43af2b81..7f759cc690c 100644 --- a/lite/proxy/query_test.go +++ b/lite/proxy/query_test.go @@ -19,12 +19,12 @@ import ( ) var node *nm.Node +var chainID = "tendermint_test" // TODO use from config. // TODO fix tests!! func TestMain(m *testing.M) { app := kvstore.NewKVStoreApplication() - node = rpctest.StartTendermint(app) code := m.Run() @@ -55,28 +55,28 @@ func _TestAppProofs(t *testing.T) { brh := br.Height // This sets up our trust on the node based on some past point. - source := certclient.NewProvider(cl) - seed, err := source.GetByHeight(brh - 2) + source := certclient.NewProvider(chainID, cl) + seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) require.NoError(err, "%+v", err) - cert := lite.NewStaticCertifier("my-chain", seed.Validators) + cert := lite.NewBaseVerifier("my-chain", seed.Height(), seed.Validators) client.WaitForHeight(cl, 3, nil) - latest, err := source.LatestCommit() + latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1) require.NoError(err, "%+v", err) - rootHash := latest.Header.AppHash + rootHash := latest.SignedHeader.AppHash // verify a query before the tx block has no data (and valid non-exist proof) bs, height, proof, err := GetWithProof(k, brh-1, cl, cert) fmt.Println(bs, height, proof, err) require.NotNil(err) - require.True(IsNoDataErr(err), err.Error()) + require.True(IsErrNoData(err), err.Error()) require.Nil(bs) // but given that block it is good bs, height, proof, err = GetWithProof(k, brh, cl, cert) require.NoError(err, "%+v", err) require.NotNil(proof) - require.True(height >= int64(latest.Header.Height)) + require.True(height >= int64(latest.Height())) // Alexis there is a bug here, somehow the above code gives us rootHash = nil // and proof.Verify doesn't care, while proofNotExists.Verify fails. @@ -92,7 +92,7 @@ func _TestAppProofs(t *testing.T) { // Test non-existing key. missing := []byte("my-missing-key") bs, _, proof, err = GetWithProof(missing, 0, cl, cert) - require.True(IsNoDataErr(err)) + require.True(IsErrNoData(err)) require.Nil(bs) require.NotNil(proof) err = proof.Verify(missing, nil, rootHash) @@ -114,10 +114,10 @@ func _TestTxProofs(t *testing.T) { require.EqualValues(0, br.DeliverTx.Code) brh := br.Height - source := certclient.NewProvider(cl) - seed, err := source.GetByHeight(brh - 2) + source := certclient.NewProvider(chainID, cl) + seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) require.NoError(err, "%+v", err) - cert := lite.NewStaticCertifier("my-chain", seed.Validators) + cert := lite.NewBaseVerifier("my-chain", seed.Height(), seed.Validators) // First let's make sure a bogus transaction hash returns a valid non-existence proof. key := types.Tx([]byte("bogus")).Hash() diff --git a/lite/proxy/validate_test.go b/lite/proxy/validate_test.go index 47c0ff6deb7..1ce4d667e33 100644 --- a/lite/proxy/validate_test.go +++ b/lite/proxy/validate_test.go @@ -6,7 +6,6 @@ import ( "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/lite" "github.com/tendermint/tendermint/lite/proxy" "github.com/tendermint/tendermint/types" ) @@ -26,40 +25,40 @@ var hdrHeight11 = types.Header{ func TestValidateBlock(t *testing.T) { tests := []struct { - block *types.Block - commit lite.Commit - wantErr string + block *types.Block + signedHeader types.SignedHeader + wantErr string }{ { block: nil, wantErr: "non-nil Block", }, { - block: &types.Block{}, + block: &types.Block{}, wantErr: "unexpected empty SignedHeader", }, // Start Header.Height mismatch test { - block: &types.Block{Header: types.Header{Height: 10}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "don't match - 10 vs 11", + block: &types.Block{Header: types.Header{Height: 10}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Header heights mismatched", }, { - block: &types.Block{Header: types.Header{Height: 11}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, + block: &types.Block{Header: types.Header{Height: 11}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, }, // End Header.Height mismatch test // Start Header.Hash mismatch test { - block: &types.Block{Header: hdrHeight11}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "Headers don't match", + block: &types.Block{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Headers don't match", }, { - block: &types.Block{Header: hdrHeight11}, - commit: lite.Commit{Header: &hdrHeight11}, + block: &types.Block{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: &hdrHeight11}, }, // End Header.Hash mismatch test @@ -69,7 +68,7 @@ func TestValidateBlock(t *testing.T) { Header: types.Header{Height: 11}, Data: types.Data{Txs: []types.Tx{[]byte("0xDE"), []byte("AD")}}, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{Height: 11}, Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("0xDEADBEEF")}}, }, @@ -80,7 +79,7 @@ func TestValidateBlock(t *testing.T) { Header: types.Header{Height: 11, DataHash: deadBeefHash}, Data: types.Data{Txs: deadBeefTxs}, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{Height: 11}, Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}}, }, @@ -89,7 +88,7 @@ func TestValidateBlock(t *testing.T) { } for i, tt := range tests { - err := proxy.ValidateBlock(tt.block, tt.commit) + err := proxy.ValidateBlock(tt.block, tt.signedHeader) if tt.wantErr != "" { if err == nil { assert.FailNowf(t, "Unexpectedly passed", "#%d", i) @@ -105,40 +104,40 @@ func TestValidateBlock(t *testing.T) { func TestValidateBlockMeta(t *testing.T) { tests := []struct { - meta *types.BlockMeta - commit lite.Commit - wantErr string + meta *types.BlockMeta + signedHeader types.SignedHeader + wantErr string }{ { meta: nil, wantErr: "non-nil BlockMeta", }, { - meta: &types.BlockMeta{}, + meta: &types.BlockMeta{}, wantErr: "unexpected empty SignedHeader", }, // Start Header.Height mismatch test { - meta: &types.BlockMeta{Header: types.Header{Height: 10}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "don't match - 10 vs 11", + meta: &types.BlockMeta{Header: types.Header{Height: 10}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Header heights mismatched", }, { - meta: &types.BlockMeta{Header: types.Header{Height: 11}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, + meta: &types.BlockMeta{Header: types.Header{Height: 11}}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, }, // End Header.Height mismatch test // Start Headers don't match test { - meta: &types.BlockMeta{Header: hdrHeight11}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "Headers don't match", + meta: &types.BlockMeta{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: &types.Header{Height: 11}}, + wantErr: "Headers don't match", }, { - meta: &types.BlockMeta{Header: hdrHeight11}, - commit: lite.Commit{Header: &hdrHeight11}, + meta: &types.BlockMeta{Header: hdrHeight11}, + signedHeader: types.SignedHeader{Header: &hdrHeight11}, }, { @@ -150,7 +149,7 @@ func TestValidateBlockMeta(t *testing.T) { Time: testTime1, }, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{Height: 11, DataHash: deadBeefHash}, }, wantErr: "Headers don't match", @@ -164,7 +163,7 @@ func TestValidateBlockMeta(t *testing.T) { Time: testTime1, }, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{ Height: 11, DataHash: deadBeefHash, ValidatorsHash: []byte("Tendermint"), @@ -183,7 +182,7 @@ func TestValidateBlockMeta(t *testing.T) { Time: testTime2, }, }, - commit: lite.Commit{ + signedHeader: types.SignedHeader{ Header: &types.Header{ Height: 11, DataHash: deadBeefHash, ValidatorsHash: []byte("Tendermint-x"), @@ -197,7 +196,7 @@ func TestValidateBlockMeta(t *testing.T) { } for i, tt := range tests { - err := proxy.ValidateBlockMeta(tt.meta, tt.commit) + err := proxy.ValidateBlockMeta(tt.meta, tt.signedHeader) if tt.wantErr != "" { if err == nil { assert.FailNowf(t, "Unexpectedly passed", "#%d: wanted error %q", i, tt.wantErr) diff --git a/lite/proxy/verifier.go b/lite/proxy/verifier.go new file mode 100644 index 00000000000..a93d30c7f7a --- /dev/null +++ b/lite/proxy/verifier.go @@ -0,0 +1,41 @@ +package proxy + +import ( + cmn "github.com/tendermint/tendermint/libs/common" + dbm "github.com/tendermint/tendermint/libs/db" + log "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/lite" + lclient "github.com/tendermint/tendermint/lite/client" +) + +func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger) (*lite.DynamicVerifier, error) { + + logger = logger.With("module", "lite/proxy") + logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client) + + memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(10) + lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir)) + trust := lite.NewMultiProvider( + memProvider, + lvlProvider, + ) + source := lclient.NewProvider(chainID, client) + cert := lite.NewDynamicVerifier(chainID, trust, source) + cert.SetLogger(logger) // Sets logger recursively. + + // TODO: Make this more secure, e.g. make it interactive in the console? + _, err := trust.LatestFullCommit(chainID, 1, 1<<63-1) + if err != nil { + logger.Info("lite/proxy/NewVerifier found no trusted full commit, initializing from source from height 1...") + fc, err := source.LatestFullCommit(chainID, 1, 1) + if err != nil { + return nil, cmn.ErrorWrap(err, "fetching source full commit @ height 1") + } + err = trust.SaveFullCommit(fc) + if err != nil { + return nil, cmn.ErrorWrap(err, "saving full commit to trusted") + } + } + + return cert, nil +} diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go index f0eb6b41e20..522511a81c3 100644 --- a/lite/proxy/wrapper.go +++ b/lite/proxy/wrapper.go @@ -4,25 +4,24 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/lite" - certclient "github.com/tendermint/tendermint/lite/client" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" ) var _ rpcclient.Client = Wrapper{} -// Wrapper wraps a rpcclient with a Certifier and double-checks any input that is +// Wrapper wraps a rpcclient with a Verifier and double-checks any input that is // provable before passing it along. Allows you to make any rpcclient fully secure. type Wrapper struct { rpcclient.Client - cert *lite.InquiringCertifier + cert *lite.DynamicVerifier } -// SecureClient uses a given certifier to wrap an connection to an untrusted +// SecureClient uses a given Verifier to wrap an connection to an untrusted // host and return a cryptographically secure rpc client. // // If it is wrapping an HTTP rpcclient, it will also wrap the websocket interface -func SecureClient(c rpcclient.Client, cert *lite.InquiringCertifier) Wrapper { +func SecureClient(c rpcclient.Client, cert *lite.DynamicVerifier) Wrapper { wrap := Wrapper{c, cert} // TODO: no longer possible as no more such interface exposed.... // if we wrap http client, then we can swap out the event switch to filter @@ -53,11 +52,11 @@ func (w Wrapper) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { return res, err } h := int64(res.Height) - check, err := GetCertifiedCommit(h, w.Client, w.cert) + sh, err := GetCertifiedCommit(h, w.Client, w.cert) if err != nil { return res, err } - err = res.Proof.Validate(check.Header.DataHash) + err = res.Proof.Validate(sh.DataHash) return res, err } @@ -74,12 +73,12 @@ func (w Wrapper) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlock // go and verify every blockmeta in the result.... for _, meta := range r.BlockMetas { // get a checkpoint to verify from - c, err := w.Commit(&meta.Header.Height) + res, err := w.Commit(&meta.Header.Height) if err != nil { return nil, err } - check := certclient.CommitFromResult(c) - err = ValidateBlockMeta(meta, check) + sh := res.SignedHeader + err = ValidateBlockMeta(meta, sh) if err != nil { return nil, err } @@ -90,41 +89,57 @@ func (w Wrapper) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlock // Block returns an entire block and verifies all signatures func (w Wrapper) Block(height *int64) (*ctypes.ResultBlock, error) { - r, err := w.Client.Block(height) + resBlock, err := w.Client.Block(height) if err != nil { return nil, err } // get a checkpoint to verify from - c, err := w.Commit(height) + resCommit, err := w.Commit(height) if err != nil { return nil, err } - check := certclient.CommitFromResult(c) + sh := resCommit.SignedHeader // now verify - err = ValidateBlockMeta(r.BlockMeta, check) + err = ValidateBlockMeta(resBlock.BlockMeta, sh) if err != nil { return nil, err } - err = ValidateBlock(r.Block, check) + err = ValidateBlock(resBlock.Block, sh) if err != nil { return nil, err } - return r, nil + return resBlock, nil } // Commit downloads the Commit and certifies it with the lite. // // This is the foundation for all other verification in this module func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) { + if height == nil { + resStatus, err := w.Client.Status() + if err != nil { + return nil, err + } + // NOTE: If resStatus.CatchingUp, there is a race + // condition where the validator set for the next height + // isn't available until some time after the blockstore + // has height h on the remote node. This isn't an issue + // once the node has caught up, and a syncing node likely + // won't have this issue esp with the implementation we + // have here, but we may have to address this at some + // point. + height = new(int64) + *height = resStatus.SyncInfo.LatestBlockHeight + } rpcclient.WaitForHeight(w.Client, *height, nil) - r, err := w.Client.Commit(height) + res, err := w.Client.Commit(height) // if we got it, then certify it if err == nil { - check := certclient.CommitFromResult(r) - err = w.cert.Certify(check) + sh := res.SignedHeader + err = w.cert.Certify(sh) } - return r, err + return res, err } // // WrappedSwitch creates a websocket connection that auto-verifies any info diff --git a/lite/static_certifier.go b/lite/static_certifier.go deleted file mode 100644 index 1ec3b809a81..00000000000 --- a/lite/static_certifier.go +++ /dev/null @@ -1,73 +0,0 @@ -package lite - -import ( - "bytes" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -var _ Certifier = (*StaticCertifier)(nil) - -// StaticCertifier assumes a static set of validators, set on -// initilization and checks against them. -// The signatures on every header is checked for > 2/3 votes -// against the known validator set upon Certify -// -// Good for testing or really simple chains. Building block -// to support real-world functionality. -type StaticCertifier struct { - chainID string - vSet *types.ValidatorSet - vhash []byte -} - -// NewStaticCertifier returns a new certifier with a static validator set. -func NewStaticCertifier(chainID string, vals *types.ValidatorSet) *StaticCertifier { - return &StaticCertifier{ - chainID: chainID, - vSet: vals, - } -} - -// ChainID returns the chain id. -// Implements Certifier. -func (sc *StaticCertifier) ChainID() string { - return sc.chainID -} - -// Validators returns the validator set. -func (sc *StaticCertifier) Validators() *types.ValidatorSet { - return sc.vSet -} - -// Hash returns the hash of the validator set. -func (sc *StaticCertifier) Hash() []byte { - if len(sc.vhash) == 0 { - sc.vhash = sc.vSet.Hash() - } - return sc.vhash -} - -// Certify makes sure that the commit is valid. -// Implements Certifier. -func (sc *StaticCertifier) Certify(commit Commit) error { - // do basic sanity checks - err := commit.ValidateBasic(sc.chainID) - if err != nil { - return err - } - - // make sure it has the same validator set we have (static means static) - if !bytes.Equal(sc.Hash(), commit.Header.ValidatorsHash) { - return liteErr.ErrValidatorsChanged() - } - - // then make sure we have the proper signatures for this - err = sc.vSet.VerifyCommit(sc.chainID, commit.Commit.BlockID, - commit.Header.Height, commit.Commit) - return errors.WithStack(err) -} diff --git a/lite/static_certifier_test.go b/lite/static_certifier_test.go deleted file mode 100644 index 03567daa667..00000000000 --- a/lite/static_certifier_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package lite_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -func TestStaticCert(t *testing.T) { - // assert, require := assert.New(t), require.New(t) - assert := assert.New(t) - // require := require.New(t) - - keys := lite.GenValKeys(4) - // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals := keys.ToValidators(20, 10) - // and a certifier based on our known set - chainID := "test-static" - cert := lite.NewStaticCertifier(chainID, vals) - - cases := []struct { - keys lite.ValKeys - vals *types.ValidatorSet - height int64 - first, last int // who actually signs - proper bool // true -> expect no error - changed bool // true -> expect validator change error - }{ - // perfect, signed by everyone - {keys, vals, 1, 0, len(keys), true, false}, - // skip little guy is okay - {keys, vals, 2, 1, len(keys), true, false}, - // but not the big guy - {keys, vals, 3, 0, len(keys) - 1, false, false}, - // even changing the power a little bit breaks the static validator - // the sigs are enough, but the validator hash is unknown - {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, - } - - for _, tc := range cases { - check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, - []byte("foo"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Certify(check) - if tc.proper { - assert.Nil(err, "%+v", err) - } else { - assert.NotNil(err) - if tc.changed { - assert.True(liteErr.IsValidatorsChangedErr(err), "%+v", err) - } - } - } - -} diff --git a/lite/types.go b/lite/types.go new file mode 100644 index 00000000000..7228c74a9bb --- /dev/null +++ b/lite/types.go @@ -0,0 +1,13 @@ +package lite + +import ( + "github.com/tendermint/tendermint/types" +) + +// Verifier checks the votes to make sure the block really is signed properly. +// Verifier must know the current or recent set of validitors by some other +// means. +type Verifier interface { + Certify(sheader types.SignedHeader) error + ChainID() string +} diff --git a/mempool/mempool.go b/mempool/mempool.go index e1ed4f260a4..381653e64ea 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -3,6 +3,8 @@ package mempool import ( "bytes" "container/list" + "crypto/sha256" + "encoding/binary" "fmt" "sync" "sync/atomic" @@ -79,6 +81,8 @@ type Mempool struct { recheckEnd *clist.CElement // re-checking stops here notifiedTxsAvailable bool txsAvailable chan struct{} // fires once for each height, when the mempool is not empty + // Filter mempool to only accept txs for which filter(tx) returns true. + filter func(types.Tx) bool // Keep a cache of already-seen txs. // This reduces the pressure on the proxyApp. @@ -138,6 +142,12 @@ func (mem *Mempool) SetLogger(l log.Logger) { mem.logger = l } +// WithFilter sets a filter for mempool to only accept txs for which f(tx) +// returns true. +func WithFilter(f func(types.Tx) bool) MempoolOption { + return func(mem *Mempool) { mem.filter = f } +} + // WithMetrics sets the metrics. func WithMetrics(metrics *Metrics) MempoolOption { return func(mem *Mempool) { mem.metrics = metrics } @@ -239,6 +249,10 @@ func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { return ErrMempoolIsFull } + if mem.filter != nil && !mem.filter(tx) { + return + } + // CACHE if !mem.cache.Push(tx) { return ErrTxInCache @@ -312,7 +326,7 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) { case *abci.Response_CheckTx: memTx := mem.recheckCursor.Value.(*mempoolTx) if !bytes.Equal(req.GetCheckTx().Tx, memTx.tx) { - cmn.PanicSanity(cmn.Fmt("Unexpected tx response from proxy during recheck\n"+ + cmn.PanicSanity(fmt.Sprintf("Unexpected tx response from proxy during recheck\n"+ "Expected %X, got %X", r.CheckTx.Data, memTx.tx)) } if r.CheckTx.Code == abci.CodeTypeOK { @@ -366,9 +380,12 @@ func (mem *Mempool) notifyTxsAvailable() { } } -// Reap returns a list of transactions currently in the mempool. -// If maxTxs is -1, there is no cap on the number of returned transactions. -func (mem *Mempool) Reap(maxTxs int) types.Txs { +// ReapMaxBytes reaps transactions from the mempool up to n bytes total. +// If max is negative, there is no cap on the size of all returned +// transactions (~ all available transactions). +func (mem *Mempool) ReapMaxBytes(max int) types.Txs { + var buf [binary.MaxVarintLen64]byte + mem.proxyMtx.Lock() defer mem.proxyMtx.Unlock() @@ -377,19 +394,42 @@ func (mem *Mempool) Reap(maxTxs int) types.Txs { time.Sleep(time.Millisecond * 10) } - txs := mem.collectTxs(maxTxs) + var cur int + // TODO: we will get a performance boost if we have a good estimate of avg + // size per tx, and set the initial capacity based off of that. + // txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max/mem.avgTxSize)) + txs := make([]types.Tx, 0, mem.txs.Len()) + for e := mem.txs.Front(); e != nil; e = e.Next() { + memTx := e.Value.(*mempoolTx) + // amino.UvarintSize is not used here because it won't be possible to reuse buf + aminoOverhead := binary.PutUvarint(buf[:], uint64(len(memTx.tx))) + if max > 0 && cur+len(memTx.tx)+aminoOverhead > max { + return txs + } + cur += len(memTx.tx) + aminoOverhead + txs = append(txs, memTx.tx) + } return txs } -// maxTxs: -1 means uncapped, 0 means none -func (mem *Mempool) collectTxs(maxTxs int) types.Txs { - if maxTxs == 0 { - return []types.Tx{} - } else if maxTxs < 0 { - maxTxs = mem.txs.Len() +// ReapMaxTxs reaps up to max transactions from the mempool. +// If max is negative, there is no cap on the size of all returned +// transactions (~ all available transactions). +func (mem *Mempool) ReapMaxTxs(max int) types.Txs { + mem.proxyMtx.Lock() + defer mem.proxyMtx.Unlock() + + if max < 0 { + max = mem.txs.Len() } - txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), maxTxs)) - for e := mem.txs.Front(); e != nil && len(txs) < maxTxs; e = e.Next() { + + for atomic.LoadInt32(&mem.rechecking) > 0 { + // TODO: Something better? + time.Sleep(time.Millisecond * 10) + } + + txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), max)) + for e := mem.txs.Front(); e != nil && len(txs) <= max; e = e.Next() { memTx := e.Value.(*mempoolTx) txs = append(txs, memTx.tx) } @@ -399,9 +439,9 @@ func (mem *Mempool) collectTxs(maxTxs int) types.Txs { // Update informs the mempool that the given txs were committed and can be discarded. // NOTE: this should be called *after* block is committed by consensus. // NOTE: unsafe; Lock/Unlock must be managed by caller -func (mem *Mempool) Update(height int64, txs types.Txs) error { +func (mem *Mempool) Update(height int64, txs types.Txs, filter func(types.Tx) bool) error { // First, create a lookup map of txns in new txs. - txsMap := make(map[string]struct{}) + txsMap := make(map[string]struct{}, len(txs)) for _, tx := range txs { txsMap[string(tx)] = struct{}{} } @@ -410,6 +450,10 @@ func (mem *Mempool) Update(height int64, txs types.Txs) error { mem.height = height mem.notifiedTxsAvailable = false + if filter != nil { + mem.filter = filter + } + // Remove transactions that are already in txs. goodTxs := mem.filterTxs(txsMap) // Recheck mempool txs if any txs were committed in the block @@ -422,7 +466,10 @@ func (mem *Mempool) Update(height int64, txs types.Txs) error { // mem.recheckCursor re-scans mem.txs and possibly removes some txs. // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. } + + // Update metrics mem.metrics.Size.Set(float64(mem.Size())) + return nil } @@ -484,11 +531,12 @@ type txCache interface { Remove(tx types.Tx) } -// mapTxCache maintains a cache of transactions. +// mapTxCache maintains a cache of transactions. This only stores +// the hash of the tx, due to memory concerns. type mapTxCache struct { mtx sync.Mutex size int - map_ map[string]struct{} + map_ map[[sha256.Size]byte]*list.Element list *list.List // to remove oldest tx when cache gets too big } @@ -498,7 +546,7 @@ var _ txCache = (*mapTxCache)(nil) func newMapTxCache(cacheSize int) *mapTxCache { return &mapTxCache{ size: cacheSize, - map_: make(map[string]struct{}, cacheSize), + map_: make(map[[sha256.Size]byte]*list.Element, cacheSize), list: list.New(), } } @@ -506,7 +554,7 @@ func newMapTxCache(cacheSize int) *mapTxCache { // Reset resets the cache to an empty state. func (cache *mapTxCache) Reset() { cache.mtx.Lock() - cache.map_ = make(map[string]struct{}, cache.size) + cache.map_ = make(map[[sha256.Size]byte]*list.Element, cache.size) cache.list.Init() cache.mtx.Unlock() } @@ -517,27 +565,35 @@ func (cache *mapTxCache) Push(tx types.Tx) bool { cache.mtx.Lock() defer cache.mtx.Unlock() - if _, exists := cache.map_[string(tx)]; exists { + // Use the tx hash in the cache + txHash := sha256.Sum256(tx) + if _, exists := cache.map_[txHash]; exists { return false } if cache.list.Len() >= cache.size { popped := cache.list.Front() - poppedTx := popped.Value.(types.Tx) - // NOTE: the tx may have already been removed from the map - // but deleting a non-existent element is fine - delete(cache.map_, string(poppedTx)) - cache.list.Remove(popped) + poppedTxHash := popped.Value.([sha256.Size]byte) + delete(cache.map_, poppedTxHash) + if popped != nil { + cache.list.Remove(popped) + } } - cache.map_[string(tx)] = struct{}{} - cache.list.PushBack(tx) + cache.list.PushBack(txHash) + cache.map_[txHash] = cache.list.Back() return true } // Remove removes the given tx from the cache. func (cache *mapTxCache) Remove(tx types.Tx) { cache.mtx.Lock() - delete(cache.map_, string(tx)) + txHash := sha256.Sum256(tx) + popped := cache.map_[txHash] + delete(cache.map_, txHash) + if popped != nil { + cache.list.Remove(popped) + } + cache.mtx.Unlock() } diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index c0f66051f9f..0dbe2bb6f70 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -14,7 +14,6 @@ import ( "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" cfg "github.com/tendermint/tendermint/config" @@ -92,7 +91,7 @@ func TestTxsAvailable(t *testing.T) { // it should fire once now for the new height // since there are still txs left committedTxs, txs := txs[:50], txs[50:] - if err := mempool.Update(1, committedTxs); err != nil { + if err := mempool.Update(1, committedTxs, nil); err != nil { t.Error(err) } ensureFire(t, mempool.TxsAvailable(), timeoutMS) @@ -104,7 +103,7 @@ func TestTxsAvailable(t *testing.T) { // now call update with all the txs. it should not fire as there are no txs left committedTxs = append(txs, moreTxs...) - if err := mempool.Update(2, committedTxs); err != nil { + if err := mempool.Update(2, committedTxs, nil); err != nil { t.Error(err) } ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) @@ -150,8 +149,8 @@ func TestSerialReap(t *testing.T) { } reapCheck := func(exp int) { - txs := mempool.Reap(-1) - require.Equal(t, len(txs), exp, cmn.Fmt("Expected to reap %v txs but got %v", exp, len(txs))) + txs := mempool.ReapMaxBytes(-1) + require.Equal(t, len(txs), exp, fmt.Sprintf("Expected to reap %v txs but got %v", exp, len(txs))) } updateRange := func(start, end int) { @@ -161,7 +160,7 @@ func TestSerialReap(t *testing.T) { binary.BigEndian.PutUint64(txBytes, uint64(i)) txs = append(txs, txBytes) } - if err := mempool.Update(0, txs); err != nil { + if err := mempool.Update(0, txs, nil); err != nil { t.Error(err) } } @@ -224,6 +223,28 @@ func TestSerialReap(t *testing.T) { reapCheck(600) } +func TestCacheRemove(t *testing.T) { + cache := newMapTxCache(100) + numTxs := 10 + txs := make([][]byte, numTxs) + for i := 0; i < numTxs; i++ { + // probability of collision is 2**-256 + txBytes := make([]byte, 32) + rand.Read(txBytes) + txs[i] = txBytes + cache.Push(txBytes) + // make sure its added to both the linked list and the map + require.Equal(t, i+1, len(cache.map_)) + require.Equal(t, i+1, cache.list.Len()) + } + for i := 0; i < numTxs; i++ { + cache.Remove(txs[i]) + // make sure its removed from both the map and the linked list + require.Equal(t, numTxs-(i+1), len(cache.map_)) + require.Equal(t, numTxs-(i+1), cache.list.Len()) + } +} + func TestMempoolCloseWAL(t *testing.T) { // 1. Create the temporary directory for mempool and WAL testing. rootDir, err := ioutil.TempDir("", "mempool-test") @@ -273,6 +294,35 @@ func TestMempoolCloseWAL(t *testing.T) { require.Equal(t, 1, len(m3), "expecting the wal match in") } +func BenchmarkCacheInsertTime(b *testing.B) { + cache := newMapTxCache(b.N) + txs := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + txs[i] = make([]byte, 8) + binary.BigEndian.PutUint64(txs[i], uint64(i)) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Push(txs[i]) + } +} + +// This benchmark is probably skewed, since we actually will be removing +// txs in parallel, which may cause some overhead due to mutex locking. +func BenchmarkCacheRemoveTime(b *testing.B) { + cache := newMapTxCache(b.N) + txs := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + txs[i] = make([]byte, 8) + binary.BigEndian.PutUint64(txs[i], uint64(i)) + cache.Push(txs[i]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Remove(txs[i]) + } +} + func checksumIt(data []byte) string { h := md5.New() h.Write(data) diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go index b4362032a95..8ac400b0a4e 100644 --- a/mempool/reactor_test.go +++ b/mempool/reactor_test.go @@ -86,7 +86,7 @@ func _waitForTxs(t *testing.T, wg *sync.WaitGroup, txs types.Txs, reactorIdx int time.Sleep(time.Millisecond * 100) } - reapedTxs := mempool.Reap(len(txs)) + reapedTxs := mempool.ReapMaxTxs(len(txs)) for i, tx := range txs { assert.Equal(t, tx, reapedTxs[i], fmt.Sprintf("txs at index %d on reactor %d don't match: %v vs %v", i, reactorIdx, tx, reapedTxs[i])) } diff --git a/node/node.go b/node/node.go index 0c3396dc686..76f23dfdd45 100644 --- a/node/node.go +++ b/node/node.go @@ -40,6 +40,7 @@ import ( "github.com/tendermint/tendermint/version" _ "net/http/pprof" + "strings" ) //------------------------------------------------------------------------------ @@ -80,8 +81,14 @@ type NodeProvider func(*cfg.Config, log.Logger) (*Node, error) // PrivValidator, ClientCreator, GenesisDoc, and DBProvider. // It implements NodeProvider. func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { + // Generate node PrivKey + nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + if err != nil { + return nil, err + } return NewNode(config, privval.LoadOrGenFilePV(config.PrivValidatorFile()), + nodeKey, proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), DefaultGenesisDocProviderFunc(config), DefaultDBProvider, @@ -119,6 +126,7 @@ type Node struct { // network sw *p2p.Switch // p2p connections addrBook pex.AddrBook // known peers + nodeKey *p2p.NodeKey // our node privkey // services eventBus *types.EventBus // pub/sub for services @@ -139,6 +147,7 @@ type Node struct { // NewNode returns a new, ready to go, Tendermint Node. func NewNode(config *cfg.Config, privValidator types.PrivValidator, + nodeKey *p2p.NodeKey, clientCreator proxy.ClientCreator, genesisDocProvider GenesisDocProvider, dbProvider DBProvider, @@ -232,13 +241,15 @@ func NewNode(config *cfg.Config, csMetrics, p2pMetrics, memplMetrics := metricsProvider() // Make MempoolReactor - mempoolLogger := logger.With("module", "mempool") + maxBytes := state.ConsensusParams.TxSize.MaxBytes mempool := mempl.NewMempool( config.Mempool, proxyApp.Mempool(), state.LastBlockHeight, mempl.WithMetrics(memplMetrics), + mempl.WithFilter(func(tx types.Tx) bool { return len(tx) <= maxBytes }), ) + mempoolLogger := logger.With("module", "mempool") mempool.SetLogger(mempoolLogger) mempool.InitWAL() // no need to have the mempool wal during tests mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool) @@ -293,6 +304,7 @@ func NewNode(config *cfg.Config, sw.AddReactor("BLOCKCHAIN", bcReactor) sw.AddReactor("CONSENSUS", consensusReactor) sw.AddReactor("EVIDENCE", evidenceReactor) + p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile()) // Optionally, start the pex reactor // @@ -312,7 +324,7 @@ func NewNode(config *cfg.Config, // TODO persistent peers ? so we can have their DNS addrs saved pexReactor := pex.NewPEXReactor(addrBook, &pex.PEXReactorConfig{ - Seeds: cmn.SplitAndTrim(config.P2P.Seeds, ",", " "), + Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "), SeedMode: config.P2P.SeedMode, }) pexReactor.SetLogger(p2pLogger) @@ -327,7 +339,7 @@ func NewNode(config *cfg.Config, if config.FilterPeers { // NOTE: addr is ip:port sw.SetAddrFilter(func(addr net.Addr) error { - resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/addr/%s", addr.String())}) + resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: fmt.Sprintf("/p2p/filter/addr/%s", addr.String())}) if err != nil { return err } @@ -337,7 +349,7 @@ func NewNode(config *cfg.Config, return nil }) sw.SetIDFilter(func(id p2p.ID) error { - resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/id/%s", id)}) + resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: fmt.Sprintf("/p2p/filter/id/%s", id)}) if err != nil { return err } @@ -364,7 +376,7 @@ func NewNode(config *cfg.Config, return nil, err } if config.TxIndex.IndexTags != "" { - txIndexer = kv.NewTxIndex(store, kv.IndexTags(cmn.SplitAndTrim(config.TxIndex.IndexTags, ",", " "))) + txIndexer = kv.NewTxIndex(store, kv.IndexTags(splitAndTrimEmpty(config.TxIndex.IndexTags, ",", " "))) } else if config.TxIndex.IndexAllTags { txIndexer = kv.NewTxIndex(store, kv.IndexAllTags()) } else { @@ -392,6 +404,7 @@ func NewNode(config *cfg.Config, sw: sw, addrBook: addrBook, + nodeKey: nodeKey, stateDB: stateDB, blockStore: blockStore, @@ -424,23 +437,15 @@ func (n *Node) OnStart() error { n.Logger.With("module", "p2p")) n.sw.AddListener(l) - // Generate node PrivKey - // TODO: pass in like privValidator - nodeKey, err := p2p.LoadOrGenNodeKey(n.config.NodeKeyFile()) - if err != nil { - return err - } - n.Logger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", n.config.NodeKeyFile()) - - nodeInfo := n.makeNodeInfo(nodeKey.ID()) + nodeInfo := n.makeNodeInfo(n.nodeKey.ID()) n.sw.SetNodeInfo(nodeInfo) - n.sw.SetNodeKey(nodeKey) + n.sw.SetNodeKey(n.nodeKey) // Add ourselves to addrbook to prevent dialing ourselves n.addrBook.AddOurAddress(nodeInfo.NetAddress()) // Add private IDs to addrbook to block those peers being added - n.addrBook.AddPrivateIDs(cmn.SplitAndTrim(n.config.P2P.PrivatePeerIDs, ",", " ")) + n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) // Start the RPC server before the P2P server // so we can eg. receive txs for the first block @@ -465,7 +470,7 @@ func (n *Node) OnStart() error { // Always connect to persistent peers if n.config.P2P.PersistentPeers != "" { - err = n.sw.DialPeersAsync(n.addrBook, cmn.SplitAndTrim(n.config.P2P.PersistentPeers, ",", " "), true) + err = n.sw.DialPeersAsync(n.addrBook, splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "), true) if err != nil { return err } @@ -547,7 +552,7 @@ func (n *Node) ConfigureRPC() { func (n *Node) startRPC() ([]net.Listener, error) { n.ConfigureRPC() - listenAddrs := cmn.SplitAndTrim(n.config.RPC.ListenAddress, ",", " ") + listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") coreCodec := amino.NewCodec() ctypes.RegisterAmino(coreCodec) @@ -683,11 +688,11 @@ func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo { }, Moniker: n.config.Moniker, Other: []string{ - cmn.Fmt("amino_version=%v", amino.Version), - cmn.Fmt("p2p_version=%v", p2p.Version), - cmn.Fmt("consensus_version=%v", cs.Version), - cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version), - cmn.Fmt("tx_index=%v", txIndexerStatus), + fmt.Sprintf("amino_version=%v", amino.Version), + fmt.Sprintf("p2p_version=%v", p2p.Version), + fmt.Sprintf("consensus_version=%v", cs.Version), + fmt.Sprintf("rpc_version=%v/%v", rpc.Version, rpccore.Version), + fmt.Sprintf("tx_index=%v", txIndexerStatus), }, } @@ -696,7 +701,7 @@ func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo { } rpcListenAddr := n.config.RPC.ListenAddress - nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("rpc_addr=%v", rpcListenAddr)) + nodeInfo.Other = append(nodeInfo.Other, fmt.Sprintf("rpc_addr=%v", rpcListenAddr)) if !n.sw.IsListening() { return nodeInfo @@ -705,7 +710,7 @@ func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo { p2pListener := n.sw.Listeners()[0] p2pHost := p2pListener.ExternalAddressHost() p2pPort := p2pListener.ExternalAddress().Port - nodeInfo.ListenAddr = cmn.Fmt("%v:%v", p2pHost, p2pPort) + nodeInfo.ListenAddr = fmt.Sprintf("%v:%v", p2pHost, p2pPort) return nodeInfo } @@ -745,3 +750,25 @@ func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { } db.SetSync(genesisDocKey, bytes) } + + +// splitAndTrimEmpty slices s into all subslices separated by sep and returns a +// slice of the string s with all leading and trailing Unicode code points +// contained in cutset removed. If sep is empty, SplitAndTrim splits after each +// UTF-8 sequence. First part is equivalent to strings.SplitN with a count of +// -1. also filter out empty strings, only return non-empty strings. +func splitAndTrimEmpty(s, sep, cutset string) []string { + if s == "" { + return []string{} + } + + spl := strings.Split(s, sep) + nonEmptyStrings := make([]string, 0, len(spl)) + for i := 0; i < len(spl); i++ { + element := strings.Trim(spl[i], cutset) + if element != "" { + nonEmptyStrings = append(nonEmptyStrings, element) + } + } + return nonEmptyStrings +} diff --git a/node/node_test.go b/node/node_test.go index ca074e1bc34..d4e35f7355c 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -56,3 +56,22 @@ func TestNodeStartStop(t *testing.T) { t.Fatal("timed out waiting for shutdown") } } + +func TestSplitAndTrimEmpty(t *testing.T) { + testCases := []struct { + s string + sep string + cutset string + expected []string + }{ + {"a,b,c", ",", " ", []string{"a", "b", "c"}}, + {" a , b , c ", ",", " ", []string{"a", "b", "c"}}, + {" a, b, c ", ",", " ", []string{"a", "b", "c"}}, + {" a, ", ",", " ", []string{"a"}}, + {" ", ",", " ", []string{}}, + } + + for _, tc := range testCases { + assert.Equal(t, tc.expected, splitAndTrimEmpty(tc.s, tc.sep, tc.cutset), "%s", tc.s) + } +} diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go index da1296da0d8..be65d2f14d8 100644 --- a/p2p/base_reactor.go +++ b/p2p/base_reactor.go @@ -1,8 +1,8 @@ package p2p import ( - "github.com/tendermint/tendermint/p2p/conn" cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/p2p/conn" ) type Reactor interface { diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index 9672e01174f..bb67eab3075 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -257,7 +257,7 @@ func (c *MConnection) Send(chID byte, msgBytes []byte) bool { // Send message to channel. channel, ok := c.channelsIdx[chID] if !ok { - c.Logger.Error(cmn.Fmt("Cannot send bytes, unknown channel %X", chID)) + c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) return false } @@ -286,7 +286,7 @@ func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool { // Send message to channel. channel, ok := c.channelsIdx[chID] if !ok { - c.Logger.Error(cmn.Fmt("Cannot send bytes, unknown channel %X", chID)) + c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) return false } @@ -311,7 +311,7 @@ func (c *MConnection) CanSend(chID byte) bool { channel, ok := c.channelsIdx[chID] if !ok { - c.Logger.Error(cmn.Fmt("Unknown channel %X", chID)) + c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID)) return false } return channel.canSend() diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index 19e05fbc794..95b5488a4a2 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -433,7 +433,6 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { _, err = client.Write(buf.Bytes()) assert.Nil(t, err) assert.True(t, expectSend(chOnRcv), "msg just right") - assert.False(t, expectSend(chOnErr), "msg just right") // send msg thats too long buf = new(bytes.Buffer) @@ -446,7 +445,6 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { assert.Nil(t, err) _, err = client.Write(buf.Bytes()) assert.NotNil(t, err) - assert.False(t, expectSend(chOnRcv), "msg too long") assert.True(t, expectSend(chOnErr), "msg too long") } diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go index 75199ee6ba3..3628eb4a39c 100644 --- a/p2p/conn/secret_connection.go +++ b/p2p/conn/secret_connection.go @@ -123,7 +123,7 @@ func (sc *SecretConnection) Write(data []byte) (n int, err error) { data = nil } chunkLength := len(chunk) - binary.BigEndian.PutUint32(frame, uint32(chunkLength)) + binary.LittleEndian.PutUint32(frame, uint32(chunkLength)) copy(frame[dataLenSize:], chunk) aead, err := chacha20poly1305.New(sc.sendSecret[:]) @@ -172,7 +172,7 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) { incrNonce(sc.recvNonce) // end decryption - var chunkLength = binary.BigEndian.Uint32(frame) // read the first two bytes + var chunkLength = binary.LittleEndian.Uint32(frame) // read the first four bytes if chunkLength > dataMaxSize { return 0, errors.New("chunkLength is greater than dataMaxSize") } @@ -332,13 +332,12 @@ func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature [] //-------------------------------------------------------------------------------- -// increment nonce big-endian by 1 with wraparound. +// Increment nonce little-endian by 1 with wraparound. +// Due to chacha20poly1305 expecting a 12 byte nonce we do not use the first four +// bytes. We only increment a 64 bit unsigned int in the remaining 8 bytes +// (little-endian in nonce[4:]). func incrNonce(nonce *[aeadNonceSize]byte) { - for i := aeadNonceSize - 1; 0 <= i; i-- { - nonce[i]++ - // if this byte wrapped around to zero, we need to increment the next byte - if nonce[i] != 0 { - return - } - } + counter := binary.LittleEndian.Uint64(nonce[4:]) + counter++ + binary.LittleEndian.PutUint64(nonce[4:], counter) } diff --git a/p2p/listener.go b/p2p/listener.go index d73b3cabfef..d0dd3f42a93 100644 --- a/p2p/listener.go +++ b/p2p/listener.go @@ -259,7 +259,7 @@ func isIpv6(ip net.IP) bool { func getNaiveExternalAddress(defaultToIPv4 bool, port int, settleForLocal bool, logger log.Logger) *NetAddress { addrs, err := net.InterfaceAddrs() if err != nil { - panic(cmn.Fmt("Could not fetch interface addresses: %v", err)) + panic(fmt.Sprintf("Could not fetch interface addresses: %v", err)) } for _, a := range addrs { diff --git a/p2p/netaddress.go b/p2p/netaddress.go index ebac8cc82d5..a42f0fddecf 100644 --- a/p2p/netaddress.go +++ b/p2p/netaddress.go @@ -44,7 +44,7 @@ func NewNetAddress(id ID, addr net.Addr) *NetAddress { tcpAddr, ok := addr.(*net.TCPAddr) if !ok { if flag.Lookup("test.v") == nil { // normal run - cmn.PanicSanity(cmn.Fmt("Only TCPAddrs are supported. Got: %v", addr)) + cmn.PanicSanity(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr)) } else { // in testing netAddr := NewNetAddressIPPort(net.IP("0.0.0.0"), 0) netAddr.ID = id diff --git a/p2p/peer.go b/p2p/peer.go index 4f59fef7646..a5f0bbbd816 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -425,7 +425,7 @@ func createMConnection( if reactor == nil { // Note that its ok to panic here as it's caught in the conn._recover, // which does onPeerError. - panic(cmn.Fmt("Unknown channel %X", chID)) + panic(fmt.Sprintf("Unknown channel %X", chID)) } reactor.Receive(chID, p, msgBytes) } diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go index 4582ab6442d..a352cce000a 100644 --- a/p2p/peer_set_test.go +++ b/p2p/peer_set_test.go @@ -1,6 +1,7 @@ package p2p import ( + "fmt" "net" "sync" "testing" @@ -21,7 +22,7 @@ func randPeer(ip net.IP) *peer { p := &peer{ nodeInfo: NodeInfo{ ID: nodeKey.ID(), - ListenAddr: cmn.Fmt("%v.%v.%v.%v:26656", cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256), + ListenAddr: fmt.Sprintf("%v.%v.%v.%v:26656", cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256), }, } diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index ad6e0c00be2..e0c0e0b9ccc 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -7,6 +7,7 @@ package pex import ( "crypto/sha256" "encoding/binary" + "fmt" "math" "net" "sync" @@ -559,11 +560,11 @@ func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) { func (a *addrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool { // Sanity check if ka.isNew() { - a.Logger.Error(cmn.Fmt("Cannot add new address to old bucket: %v", ka)) + a.Logger.Error(fmt.Sprintf("Cannot add new address to old bucket: %v", ka)) return false } if len(ka.Buckets) != 0 { - a.Logger.Error(cmn.Fmt("Cannot add already old address to another old bucket: %v", ka)) + a.Logger.Error(fmt.Sprintf("Cannot add already old address to another old bucket: %v", ka)) return false } @@ -594,7 +595,7 @@ func (a *addrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool { func (a *addrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) { if ka.BucketType != bucketType { - a.Logger.Error(cmn.Fmt("Bucket type mismatch: %v", ka)) + a.Logger.Error(fmt.Sprintf("Bucket type mismatch: %v", ka)) return } bucket := a.getBucket(bucketType, bucketIdx) @@ -690,7 +691,7 @@ func (a *addrBook) expireNew(bucketIdx int) { for addrStr, ka := range a.bucketsNew[bucketIdx] { // If an entry is bad, throw it away if ka.isBad() { - a.Logger.Info(cmn.Fmt("expiring bad address %v", addrStr)) + a.Logger.Info(fmt.Sprintf("expiring bad address %v", addrStr)) a.removeFromBucket(ka, bucketTypeNew, bucketIdx) return } @@ -707,11 +708,11 @@ func (a *addrBook) expireNew(bucketIdx int) { func (a *addrBook) moveToOld(ka *knownAddress) { // Sanity check if ka.isOld() { - a.Logger.Error(cmn.Fmt("Cannot promote address that is already old %v", ka)) + a.Logger.Error(fmt.Sprintf("Cannot promote address that is already old %v", ka)) return } if len(ka.Buckets) == 0 { - a.Logger.Error(cmn.Fmt("Cannot promote address that isn't in any new buckets %v", ka)) + a.Logger.Error(fmt.Sprintf("Cannot promote address that isn't in any new buckets %v", ka)) return } @@ -733,7 +734,7 @@ func (a *addrBook) moveToOld(ka *knownAddress) { // Finally, add our ka to old bucket again. added = a.addToOldBucket(ka, oldBucketIdx) if !added { - a.Logger.Error(cmn.Fmt("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)) + a.Logger.Error(fmt.Sprintf("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)) } } } diff --git a/p2p/pex/file.go b/p2p/pex/file.go index 3237e12537e..33fec033695 100644 --- a/p2p/pex/file.go +++ b/p2p/pex/file.go @@ -2,6 +2,7 @@ package pex import ( "encoding/json" + "fmt" "os" cmn "github.com/tendermint/tendermint/libs/common" @@ -53,14 +54,14 @@ func (a *addrBook) loadFromFile(filePath string) bool { // Load addrBookJSON{} r, err := os.Open(filePath) if err != nil { - cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err)) + cmn.PanicCrisis(fmt.Sprintf("Error opening file %s: %v", filePath, err)) } defer r.Close() // nolint: errcheck aJSON := &addrBookJSON{} dec := json.NewDecoder(r) err = dec.Decode(aJSON) if err != nil { - cmn.PanicCrisis(cmn.Fmt("Error reading file %s: %v", filePath, err)) + cmn.PanicCrisis(fmt.Sprintf("Error reading file %s: %v", filePath, err)) } // Restore all the fields... diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 288cb0d150e..c919794ab0e 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -31,8 +31,7 @@ const ( maxMsgSize = maxAddressSize * maxGetSelection // ensure we have enough peers - defaultEnsurePeersPeriod = 30 * time.Second - defaultMinNumOutboundPeers = p2p.DefaultMinNumOutboundPeers + defaultEnsurePeersPeriod = 30 * time.Second // Seed/Crawler constants @@ -362,7 +361,7 @@ func (r *PEXReactor) ensurePeersRoutine() { func (r *PEXReactor) ensurePeers() { var ( out, in, dial = r.Switch.NumPeers() - numToDial = defaultMinNumOutboundPeers - (out + dial) + numToDial = r.Switch.MaxNumOutboundPeers() - (out + dial) ) r.Logger.Info( "Ensure peers", @@ -393,10 +392,7 @@ func (r *PEXReactor) ensurePeers() { if _, selected := toDial[try.ID]; selected { continue } - if dialling := r.Switch.IsDialing(try.ID); dialling { - continue - } - if connected := r.Switch.Peers().Has(try.ID); connected { + if r.Switch.IsDialingOrExistingAddress(try) { continue } // TODO: consider moving some checks from toDial into here diff --git a/p2p/switch.go b/p2p/switch.go index da94fa4b04d..b5413dabf1b 100644 --- a/p2p/switch.go +++ b/p2p/switch.go @@ -26,10 +26,6 @@ const ( // ie. 3**10 = 16hrs reconnectBackOffAttempts = 10 reconnectBackOffBaseSeconds = 3 - - // keep at least this many outbound peers - // TODO: move to config - DefaultMinNumOutboundPeers = 10 ) //----------------------------------------------------------------------------- @@ -268,6 +264,11 @@ func (sw *Switch) NumPeers() (outbound, inbound, dialing int) { return } +// MaxNumOutboundPeers returns a maximum number of outbound peers. +func (sw *Switch) MaxNumOutboundPeers() int { + return sw.config.MaxNumOutboundPeers +} + // Peers returns the set of peers that are connected to the switch. func (sw *Switch) Peers() IPeerSet { return sw.peers @@ -375,11 +376,6 @@ func (sw *Switch) MarkPeerAsGood(peer Peer) { //--------------------------------------------------------------------- // Dialing -// IsDialing returns true if the switch is currently dialing the given ID. -func (sw *Switch) IsDialing(id ID) bool { - return sw.dialing.Has(string(id)) -} - // DialPeersAsync dials a list of peers asynchronously in random order (optionally, making them persistent). // Used to dial peers from config on startup or from unsafe-RPC (trusted sources). // TODO: remove addrBook arg since it's now set on the switch @@ -416,10 +412,13 @@ func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent b for i := 0; i < len(perm); i++ { go func(i int) { j := perm[i] - addr := netAddrs[j] - // do not dial ourselves + if addr.Same(ourAddr) { + sw.Logger.Debug("Ignore attempt to connect to ourselves", "addr", addr, "ourAddr", ourAddr) + return + } else if sw.IsDialingOrExistingAddress(addr) { + sw.Logger.Debug("Ignore attempt to connect to an existing peer", "addr", addr) return } @@ -452,6 +451,14 @@ func (sw *Switch) randomSleep(interval time.Duration) { time.Sleep(r + interval) } +// IsDialingOrExistingAddress returns true if switch has a peer with the given +// address or dialing it at the moment. +func (sw *Switch) IsDialingOrExistingAddress(addr *NetAddress) bool { + return sw.dialing.Has(string(addr.ID)) || + sw.peers.Has(addr.ID) || + (!sw.config.AllowDuplicateIP && sw.peers.HasIP(addr.IP)) +} + //------------------------------------------------------------------------------------ // Connection filtering @@ -491,11 +498,15 @@ func (sw *Switch) listenerRoutine(l Listener) { break } - // ignore connection if we already have enough - // leave room for MinNumOutboundPeers - maxPeers := sw.config.MaxNumPeers - DefaultMinNumOutboundPeers - if maxPeers <= sw.peers.Size() { - sw.Logger.Info("Ignoring inbound connection: already have enough peers", "address", inConn.RemoteAddr().String(), "numPeers", sw.peers.Size(), "max", maxPeers) + // Ignore connection if we already have enough peers. + _, in, _ := sw.NumPeers() + if in >= sw.config.MaxNumInboundPeers { + sw.Logger.Info( + "Ignoring inbound connection: already have enough inbound peers", + "address", inConn.RemoteAddr().String(), + "have", in, + "max", sw.config.MaxNumInboundPeers, + ) inConn.Close() continue } @@ -602,8 +613,10 @@ func (sw *Switch) addPeer(pc peerConn) error { addr := peerNodeInfo.NetAddress() // remove the given address from the address book // and add to our addresses to avoid dialing again - sw.addrBook.RemoveAddress(addr) - sw.addrBook.AddOurAddress(addr) + if sw.addrBook != nil { + sw.addrBook.RemoveAddress(addr) + sw.addrBook.AddOurAddress(addr) + } return ErrSwitchConnectToSelf{addr} } diff --git a/p2p/test_util.go b/p2p/test_util.go index fdf9ae76474..90bcba4f179 100644 --- a/p2p/test_util.go +++ b/p2p/test_util.go @@ -35,7 +35,7 @@ func CreateRandomPeer(outbound bool) *peer { func CreateRoutableAddr() (addr string, netAddr *NetAddress) { for { var err error - addr = cmn.Fmt("%X@%v.%v.%v.%v:26656", cmn.RandBytes(20), cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256) + addr = fmt.Sprintf("%X@%v.%v.%v.%v:26656", cmn.RandBytes(20), cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256) netAddr, err = NewNetAddressString(addr) if err != nil { panic(err) @@ -142,7 +142,7 @@ func MakeSwitch(cfg *config.P2PConfig, i int, network, version string, initSwitc sw = initSwitch(i, sw) ni := NodeInfo{ ID: nodeKey.ID(), - Moniker: cmn.Fmt("switch%d", i), + Moniker: fmt.Sprintf("switch%d", i), Network: network, Version: version, ListenAddr: fmt.Sprintf("127.0.0.1:%d", cmn.RandIntn(64512)+1023), diff --git a/p2p/trust/store.go b/p2p/trust/store.go index 31f659a43f5..d6b4c049d51 100644 --- a/p2p/trust/store.go +++ b/p2p/trust/store.go @@ -5,6 +5,7 @@ package trust import ( "encoding/json" + "fmt" "sync" "time" @@ -155,7 +156,7 @@ func (tms *TrustMetricStore) loadFromDB() bool { peers := make(map[string]MetricHistoryJSON) err := json.Unmarshal(bytes, &peers) if err != nil { - cmn.PanicCrisis(cmn.Fmt("Could not unmarshal Trust Metric Store DB data: %v", err)) + cmn.PanicCrisis(fmt.Sprintf("Could not unmarshal Trust Metric Store DB data: %v", err)) } // If history data exists in the file, diff --git a/p2p/upnp/probe.go b/p2p/upnp/probe.go index 2de5e790505..16a53724144 100644 --- a/p2p/upnp/probe.go +++ b/p2p/upnp/probe.go @@ -5,7 +5,6 @@ import ( "net" "time" - cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" ) @@ -19,19 +18,19 @@ func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Lis if err != nil { return nil, nil, nil, fmt.Errorf("NAT upnp could not be discovered: %v", err) } - logger.Info(cmn.Fmt("ourIP: %v", nat.(*upnpNAT).ourIP)) + logger.Info(fmt.Sprintf("ourIP: %v", nat.(*upnpNAT).ourIP)) ext, err := nat.GetExternalAddress() if err != nil { return nat, nil, nil, fmt.Errorf("External address error: %v", err) } - logger.Info(cmn.Fmt("External address: %v", ext)) + logger.Info(fmt.Sprintf("External address: %v", ext)) port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0) if err != nil { return nat, nil, ext, fmt.Errorf("Port mapping error: %v", err) } - logger.Info(cmn.Fmt("Port mapping mapped: %v", port)) + logger.Info(fmt.Sprintf("Port mapping mapped: %v", port)) // also run the listener, open for all remote addresses. listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort)) @@ -46,17 +45,17 @@ func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supp go func() { inConn, err := listener.Accept() if err != nil { - logger.Info(cmn.Fmt("Listener.Accept() error: %v", err)) + logger.Info(fmt.Sprintf("Listener.Accept() error: %v", err)) return } - logger.Info(cmn.Fmt("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr())) + logger.Info(fmt.Sprintf("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr())) buf := make([]byte, 1024) n, err := inConn.Read(buf) if err != nil { - logger.Info(cmn.Fmt("Incoming connection read error: %v", err)) + logger.Info(fmt.Sprintf("Incoming connection read error: %v", err)) return } - logger.Info(cmn.Fmt("Incoming connection read %v bytes: %X", n, buf)) + logger.Info(fmt.Sprintf("Incoming connection read %v bytes: %X", n, buf)) if string(buf) == "test data" { supportsHairpin = true return @@ -66,16 +65,16 @@ func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supp // Establish outgoing outConn, err := net.Dial("tcp", extAddr) if err != nil { - logger.Info(cmn.Fmt("Outgoing connection dial error: %v", err)) + logger.Info(fmt.Sprintf("Outgoing connection dial error: %v", err)) return } n, err := outConn.Write([]byte("test data")) if err != nil { - logger.Info(cmn.Fmt("Outgoing connection write error: %v", err)) + logger.Info(fmt.Sprintf("Outgoing connection write error: %v", err)) return } - logger.Info(cmn.Fmt("Outgoing connection wrote %v bytes", n)) + logger.Info(fmt.Sprintf("Outgoing connection wrote %v bytes", n)) // Wait for data receipt time.Sleep(1 * time.Second) @@ -96,10 +95,10 @@ func Probe(logger log.Logger) (caps UPNPCapabilities, err error) { // Deferred cleanup defer func() { if err := nat.DeletePortMapping("tcp", intPort, extPort); err != nil { - logger.Error(cmn.Fmt("Port mapping delete error: %v", err)) + logger.Error(fmt.Sprintf("Port mapping delete error: %v", err)) } if err := listener.Close(); err != nil { - logger.Error(cmn.Fmt("Listener closing error: %v", err)) + logger.Error(fmt.Sprintf("Listener closing error: %v", err)) } }() diff --git a/privval/priv_validator.go b/privval/priv_validator.go index a81751a9147..3ba0519cbf7 100644 --- a/privval/priv_validator.go +++ b/privval/priv_validator.go @@ -12,6 +12,7 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) // TODO: type ? @@ -89,7 +90,7 @@ func LoadFilePV(filePath string) *FilePV { pv := &FilePV{} err = cdc.UnmarshalJSON(pvJSONBytes, &pv) if err != nil { - cmn.Exit(cmn.Fmt("Error reading PrivValidator from %v: %v\n", filePath, err)) + cmn.Exit(fmt.Sprintf("Error reading PrivValidator from %v: %v\n", filePath, err)) } // overwrite pubkey and address for convenience @@ -153,7 +154,7 @@ func (pv *FilePV) SignVote(chainID string, vote *types.Vote) error { pv.mtx.Lock() defer pv.mtx.Unlock() if err := pv.signVote(chainID, vote); err != nil { - return errors.New(cmn.Fmt("Error signing vote: %v", err)) + return fmt.Errorf("Error signing vote: %v", err) } return nil } @@ -324,7 +325,7 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.T } // set the times to the same value and check equality - now := types.CanonicalTime(time.Now()) + now := types.CanonicalTime(tmtime.Now()) lastVote.Timestamp = now newVote.Timestamp = now lastVoteBytes, _ := cdc.MarshalJSON(lastVote) @@ -350,7 +351,7 @@ func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (ti } // set the times to the same value and check equality - now := types.CanonicalTime(time.Now()) + now := types.CanonicalTime(tmtime.Now()) lastProposal.Timestamp = now newProposal.Timestamp = now lastProposalBytes, _ := cdc.MarshalJSON(lastProposal) diff --git a/privval/priv_validator_test.go b/privval/priv_validator_test.go index b4f9ddbc49c..404ff770b57 100644 --- a/privval/priv_validator_test.go +++ b/privval/priv_validator_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) func TestGenLoadValidator(t *testing.T) { @@ -235,7 +236,7 @@ func newVote(addr types.Address, idx int, height int64, round int, typ byte, blo Height: height, Round: round, Type: typ, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), BlockID: blockID, } } @@ -245,6 +246,6 @@ func newProposal(height int64, round int, partsHeader types.PartSetHeader) *type Height: height, Round: round, BlockPartsHeader: partsHeader, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), } } diff --git a/proxy/app_conn_test.go b/proxy/app_conn_test.go index 44936056aea..5eadb032fb2 100644 --- a/proxy/app_conn_test.go +++ b/proxy/app_conn_test.go @@ -1,6 +1,7 @@ package proxy import ( + "fmt" "strings" "testing" @@ -45,7 +46,7 @@ func (app *appConnTest) InfoSync(req types.RequestInfo) (*types.ResponseInfo, er var SOCKET = "socket" func TestEcho(t *testing.T) { - sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) // Start server @@ -70,7 +71,7 @@ func TestEcho(t *testing.T) { t.Log("Connected") for i := 0; i < 1000; i++ { - proxy.EchoAsync(cmn.Fmt("echo-%v", i)) + proxy.EchoAsync(fmt.Sprintf("echo-%v", i)) } if err := proxy.FlushSync(); err != nil { t.Error(err) @@ -79,7 +80,7 @@ func TestEcho(t *testing.T) { func BenchmarkEcho(b *testing.B) { b.StopTimer() // Initialize - sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) // Start server @@ -118,7 +119,7 @@ func BenchmarkEcho(b *testing.B) { } func TestInfo(t *testing.T) { - sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) + sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) // Start server diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 79c452fc937..da4625d51d5 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -8,9 +8,9 @@ import ( "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) var waitForEventTimeout = 5 * time.Second diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index 4b85bf01d77..a9c64f5da0f 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -7,11 +7,11 @@ import ( "github.com/pkg/errors" amino "github.com/tendermint/go-amino" + cmn "github.com/tendermint/tendermint/libs/common" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpcclient "github.com/tendermint/tendermint/rpc/lib/client" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) /* diff --git a/rpc/client/interface.go b/rpc/client/interface.go index f939c855bf8..f34410c5bb8 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -21,9 +21,9 @@ implementation. */ import ( + cmn "github.com/tendermint/tendermint/libs/common" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) // ABCIClient groups together the functionality that principally diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 4502c087aef..022e4f3632d 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -2,11 +2,11 @@ package mock import ( abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" - cmn "github.com/tendermint/tendermint/libs/common" ) // ABCIApp will send all abci related request to the named app, diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index bcf443cf04b..327ec9e7bfe 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -11,11 +11,11 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/mock" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) func TestABCIMock(t *testing.T) { diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 955df62774d..c578784999b 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -16,11 +16,11 @@ package mock import ( "reflect" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) // Client wraps arbitrary implementations of the various interfaces. diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go index 8e3c1506125..fa64c6a837f 100644 --- a/rpc/client/mock/status_test.go +++ b/rpc/client/mock/status_test.go @@ -6,9 +6,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/rpc/client/mock" ctypes "github.com/tendermint/tendermint/rpc/core/types" - cmn "github.com/tendermint/tendermint/libs/common" ) func TestStatus(t *testing.T) { diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index e7e9042a78a..767ae684729 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -242,7 +242,7 @@ func TestBroadcastTxSync(t *testing.T) { require.Equal(initMempoolSize+1, mempool.Size()) - txs := mempool.Reap(1) + txs := mempool.ReapMaxTxs(len(tx)) require.EqualValues(tx, txs[0]) mempool.Flush() } @@ -351,6 +351,11 @@ func TestTxSearch(t *testing.T) { assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) } + // query by height + result, err = c.TxSearch(fmt.Sprintf("tx.height >= %d", txHeight), true, 1, 30) + require.Nil(t, err, "%+v", err) + require.Len(t, result.Txs, 1) + // we query for non existing tx result, err = c.TxSearch(fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, 1, 30) require.Nil(t, err, "%+v", err) diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index 671250db847..bb69db63f9a 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -370,16 +370,16 @@ func BlockResults(heightPtr *int64) (*ctypes.ResultBlockResults, error) { return res, nil } -func getHeight(storeHeight int64, heightPtr *int64) (int64, error) { +func getHeight(currentHeight int64, heightPtr *int64) (int64, error) { if heightPtr != nil { height := *heightPtr if height <= 0 { return 0, fmt.Errorf("Height must be greater than 0") } - if height > storeHeight { + if height > currentHeight { return 0, fmt.Errorf("Height must be less than or equal to the current blockchain height") } return height, nil } - return storeHeight, nil + return currentHeight, nil } diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index c026cd91f49..4e4c54dead1 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -44,8 +44,10 @@ import ( // } // ``` func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) + // The latest validator that we know is the + // NextValidator of the last block. + height := consensusState.GetState().LastBlockHeight + 1 + height, err := getHeight(height, heightPtr) if err != nil { return nil, err } diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index ecc41ce12b2..728d77f6382 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -8,9 +8,9 @@ import ( "github.com/pkg/errors" abci "github.com/tendermint/tendermint/abci/types" + cmn "github.com/tendermint/tendermint/libs/common" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tendermint/libs/common" ) //----------------------------------------------------------------------------- @@ -243,7 +243,7 @@ func UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { // reuse per_page validator limit = validatePerPage(limit) - txs := mempool.Reap(limit) + txs := mempool.ReapMaxTxs(limit) return &ctypes.ResultUnconfirmedTxs{len(txs), txs}, nil } diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index 128b3e9a739..1d1f61466b8 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -5,13 +5,14 @@ import ( "github.com/tendermint/tendermint/consensus" crypto "github.com/tendermint/tendermint/crypto" + dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/libs/log" + mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tendermint/libs/db" - "github.com/tendermint/tendermint/libs/log" ) const ( @@ -28,6 +29,7 @@ var subscribeTimeout = 5 * time.Second type Consensus interface { GetState() sm.State GetValidators() (int64, []*types.Validator) + GetLastHeight() int64 GetRoundStateJSON() ([]byte, error) GetRoundStateSimpleJSON() ([]byte, error) } @@ -52,7 +54,6 @@ var ( // interfaces defined in types and above stateDB dbm.DB blockStore sm.BlockStore - mempool sm.Mempool evidencePool sm.EvidencePool consensusState Consensus p2pSwitch P2P @@ -64,6 +65,7 @@ var ( txIndexer txindex.TxIndexer consensusReactor *consensus.ConsensusReactor eventBus *types.EventBus // thread safe + mempool *mempl.Mempool logger log.Logger ) @@ -76,7 +78,7 @@ func SetBlockStore(bs sm.BlockStore) { blockStore = bs } -func SetMempool(mem sm.Mempool) { +func SetMempool(mem *mempl.Mempool) { mempool = mem } diff --git a/rpc/core/status.go b/rpc/core/status.go index 4cb7667b756..e34f5244cd6 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -64,7 +64,12 @@ import ( //} // ``` func Status() (*ctypes.ResultStatus, error) { - latestHeight := blockStore.Height() + var latestHeight int64 = -1 + if consensusReactor.FastSync() { + latestHeight = blockStore.Height() + } else { + latestHeight = consensusState.GetLastHeight() + } var ( latestBlockMeta *types.BlockMeta latestBlockHash cmn.HexBytes @@ -107,9 +112,8 @@ func Status() (*ctypes.ResultStatus, error) { func validatorAtHeight(h int64) *types.Validator { privValAddress := pubKey.Address() + // If we're still at height h, search in the current validator set. lastBlockHeight, vals := consensusState.GetValidators() - - // if we're still at height h, search in the current validator set if lastBlockHeight == h { for _, val := range vals { if bytes.Equal(val.Address, privValAddress) { @@ -118,12 +122,11 @@ func validatorAtHeight(h int64) *types.Validator { } } - // if we've moved to the next height, retrieve the validator set from DB + // If we've moved to the next height, retrieve the validator set from DB. if lastBlockHeight > h { vals, err := sm.LoadValidators(stateDB, h) if err != nil { - // should not happen - return nil + return nil // should not happen } _, val := vals.GetByAddress(privValAddress) return val diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 4fec416eda1..dbb50ff6b68 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -33,10 +33,8 @@ type ResultBlock struct { // Commit and Header type ResultCommit struct { - // SignedHeader is header and commit, embedded so we only have - // one level in the json output - types.SignedHeader - CanonicalCommit bool `json:"canonical"` + types.SignedHeader `json:"signed_header"` + CanonicalCommit bool `json:"canonical"` } // ABCI results from a block diff --git a/rpc/grpc/grpc_test.go b/rpc/grpc/grpc_test.go index fe979c549de..eda3896fbde 100644 --- a/rpc/grpc/grpc_test.go +++ b/rpc/grpc/grpc_test.go @@ -26,7 +26,7 @@ func TestMain(m *testing.M) { func TestBroadcastTx(t *testing.T) { require := require.New(t) - res, err := rpctest.GetGRPCClient().BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{[]byte("this is a tx")}) + res, err := rpctest.GetGRPCClient().BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{Tx: []byte("this is a tx")}) require.Nil(err, "%+v", err) require.EqualValues(0, res.CheckTx.Code) require.EqualValues(0, res.DeliverTx.Code) diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go index 8bc9761a40d..b33397dab45 100644 --- a/rpc/grpc/types.pb.go +++ b/rpc/grpc/types.pb.go @@ -1,18 +1,6 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: rpc/grpc/types.proto -/* - Package core_grpc is a generated protocol buffer package. - - It is generated from these files: - rpc/grpc/types.proto - - It has these top-level messages: - RequestPing - RequestBroadcastTx - ResponsePing - ResponseBroadcastTx -*/ //nolint package core_grpc @@ -25,8 +13,10 @@ import types "github.com/tendermint/tendermint/abci/types" import bytes "bytes" -import context "golang.org/x/net/context" -import grpc "google.golang.org/grpc" +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) import io "io" @@ -43,21 +33,83 @@ var _ = math.Inf const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type RequestPing struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *RequestPing) Reset() { *m = RequestPing{} } -func (m *RequestPing) String() string { return proto.CompactTextString(m) } -func (*RequestPing) ProtoMessage() {} -func (*RequestPing) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } +func (m *RequestPing) Reset() { *m = RequestPing{} } +func (m *RequestPing) String() string { return proto.CompactTextString(m) } +func (*RequestPing) ProtoMessage() {} +func (*RequestPing) Descriptor() ([]byte, []int) { + return fileDescriptor_types_48bb8d9591d37e66, []int{0} +} +func (m *RequestPing) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestPing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestPing.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RequestPing) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestPing.Merge(dst, src) +} +func (m *RequestPing) XXX_Size() int { + return m.Size() +} +func (m *RequestPing) XXX_DiscardUnknown() { + xxx_messageInfo_RequestPing.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestPing proto.InternalMessageInfo type RequestBroadcastTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` + Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } -func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } -func (*RequestBroadcastTx) ProtoMessage() {} -func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } +func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } +func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } +func (*RequestBroadcastTx) ProtoMessage() {} +func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { + return fileDescriptor_types_48bb8d9591d37e66, []int{1} +} +func (m *RequestBroadcastTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestBroadcastTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *RequestBroadcastTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestBroadcastTx.Merge(dst, src) +} +func (m *RequestBroadcastTx) XXX_Size() int { + return m.Size() +} +func (m *RequestBroadcastTx) XXX_DiscardUnknown() { + xxx_messageInfo_RequestBroadcastTx.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestBroadcastTx proto.InternalMessageInfo func (m *RequestBroadcastTx) GetTx() []byte { if m != nil { @@ -67,22 +119,84 @@ func (m *RequestBroadcastTx) GetTx() []byte { } type ResponsePing struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponsePing) Reset() { *m = ResponsePing{} } +func (m *ResponsePing) String() string { return proto.CompactTextString(m) } +func (*ResponsePing) ProtoMessage() {} +func (*ResponsePing) Descriptor() ([]byte, []int) { + return fileDescriptor_types_48bb8d9591d37e66, []int{2} +} +func (m *ResponsePing) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponsePing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponsePing.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ResponsePing) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponsePing.Merge(dst, src) +} +func (m *ResponsePing) XXX_Size() int { + return m.Size() +} +func (m *ResponsePing) XXX_DiscardUnknown() { + xxx_messageInfo_ResponsePing.DiscardUnknown(m) } -func (m *ResponsePing) Reset() { *m = ResponsePing{} } -func (m *ResponsePing) String() string { return proto.CompactTextString(m) } -func (*ResponsePing) ProtoMessage() {} -func (*ResponsePing) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} } +var xxx_messageInfo_ResponsePing proto.InternalMessageInfo type ResponseBroadcastTx struct { - CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx" json:"check_tx,omitempty"` - DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx" json:"deliver_tx,omitempty"` + CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx" json:"check_tx,omitempty"` + DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx" json:"deliver_tx,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } -func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } -func (*ResponseBroadcastTx) ProtoMessage() {} -func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{3} } +func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } +func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } +func (*ResponseBroadcastTx) ProtoMessage() {} +func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { + return fileDescriptor_types_48bb8d9591d37e66, []int{3} +} +func (m *ResponseBroadcastTx) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseBroadcastTx.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ResponseBroadcastTx) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseBroadcastTx.Merge(dst, src) +} +func (m *ResponseBroadcastTx) XXX_Size() int { + return m.Size() +} +func (m *ResponseBroadcastTx) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseBroadcastTx.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseBroadcastTx proto.InternalMessageInfo func (m *ResponseBroadcastTx) GetCheckTx() *types.ResponseCheckTx { if m != nil { @@ -127,6 +241,9 @@ func (this *RequestPing) Equal(that interface{}) bool { } else if this == nil { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *RequestBroadcastTx) Equal(that interface{}) bool { @@ -151,6 +268,9 @@ func (this *RequestBroadcastTx) Equal(that interface{}) bool { if !bytes.Equal(this.Tx, that1.Tx) { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *ResponsePing) Equal(that interface{}) bool { @@ -172,6 +292,9 @@ func (this *ResponsePing) Equal(that interface{}) bool { } else if this == nil { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } func (this *ResponseBroadcastTx) Equal(that interface{}) bool { @@ -199,6 +322,9 @@ func (this *ResponseBroadcastTx) Equal(that interface{}) bool { if !this.DeliverTx.Equal(that1.DeliverTx) { return false } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } return true } @@ -210,8 +336,9 @@ var _ grpc.ClientConn // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 -// Client API for BroadcastAPI service - +// BroadcastAPIClient is the client API for BroadcastAPI service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type BroadcastAPIClient interface { Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) @@ -227,7 +354,7 @@ func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient { func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { out := new(ResponsePing) - err := grpc.Invoke(ctx, "/core_grpc.BroadcastAPI/Ping", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/core_grpc.BroadcastAPI/Ping", in, out, opts...) if err != nil { return nil, err } @@ -236,7 +363,7 @@ func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ... func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { out := new(ResponseBroadcastTx) - err := grpc.Invoke(ctx, "/core_grpc.BroadcastAPI/BroadcastTx", in, out, c.cc, opts...) + err := c.cc.Invoke(ctx, "/core_grpc.BroadcastAPI/BroadcastTx", in, out, opts...) if err != nil { return nil, err } @@ -322,6 +449,9 @@ func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -346,6 +476,9 @@ func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) i += copy(dAtA[i:], m.Tx) } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -364,6 +497,9 @@ func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -402,6 +538,9 @@ func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { } i += n2 } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } return i, nil } @@ -417,6 +556,7 @@ func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { func NewPopulatedRequestPing(r randyTypes, easy bool) *RequestPing { this := &RequestPing{} if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 1) } return this } @@ -429,6 +569,7 @@ func NewPopulatedRequestBroadcastTx(r randyTypes, easy bool) *RequestBroadcastTx this.Tx[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 2) } return this } @@ -436,6 +577,7 @@ func NewPopulatedRequestBroadcastTx(r randyTypes, easy bool) *RequestBroadcastTx func NewPopulatedResponsePing(r randyTypes, easy bool) *ResponsePing { this := &ResponsePing{} if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 1) } return this } @@ -449,6 +591,7 @@ func NewPopulatedResponseBroadcastTx(r randyTypes, easy bool) *ResponseBroadcast this.DeliverTx = types.NewPopulatedResponseDeliverTx(r, easy) } if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } return this } @@ -528,6 +671,9 @@ func encodeVarintPopulateTypes(dAtA []byte, v uint64) []byte { func (m *RequestPing) Size() (n int) { var l int _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -538,12 +684,18 @@ func (m *RequestBroadcastTx) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } func (m *ResponsePing) Size() (n int) { var l int _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -558,6 +710,9 @@ func (m *ResponseBroadcastTx) Size() (n int) { l = m.DeliverTx.Size() n += 1 + l + sovTypes(uint64(l)) } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } return n } @@ -615,6 +770,7 @@ func (m *RequestPing) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -696,6 +852,7 @@ func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -746,6 +903,7 @@ func (m *ResponsePing) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -862,6 +1020,7 @@ func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -976,10 +1135,10 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("rpc/grpc/types.proto", fileDescriptorTypes) } -func init() { golang_proto.RegisterFile("rpc/grpc/types.proto", fileDescriptorTypes) } +func init() { proto.RegisterFile("rpc/grpc/types.proto", fileDescriptor_types_48bb8d9591d37e66) } +func init() { golang_proto.RegisterFile("rpc/grpc/types.proto", fileDescriptor_types_48bb8d9591d37e66) } -var fileDescriptorTypes = []byte{ +var fileDescriptor_types_48bb8d9591d37e66 = []byte{ // 321 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x29, 0x2a, 0x48, 0xd6, 0x4f, 0x07, 0x11, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x9c, diff --git a/rpc/grpc/typespb_test.go b/rpc/grpc/typespb_test.go index 3d28002b1b1..da076bf64a5 100644 --- a/rpc/grpc/typespb_test.go +++ b/rpc/grpc/typespb_test.go @@ -1,25 +1,14 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. // source: rpc/grpc/types.proto -/* -Package core_grpc is a generated protocol buffer package. - -It is generated from these files: - rpc/grpc/types.proto - -It has these top-level messages: - RequestPing - RequestBroadcastTx - ResponsePing - ResponseBroadcastTx -*/ package core_grpc import testing "testing" -import rand "math/rand" +import math_rand "math/rand" import time "time" +import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" +import github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb" import proto "github.com/gogo/protobuf/proto" -import jsonpb "github.com/gogo/protobuf/jsonpb" import golang_proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" @@ -34,14 +23,14 @@ var _ = math.Inf func TestRequestPingProto(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestPing(popr, false) - dAtA, err := proto.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &RequestPing{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } littlefuzz := make([]byte, len(dAtA)) @@ -59,13 +48,13 @@ func TestRequestPingProto(t *testing.T) { littlefuzz = append(littlefuzz, byte(popr.Intn(256))) } // shouldn't panic - _ = proto.Unmarshal(littlefuzz, msg) + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) } } func TestRequestPingMarshalTo(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestPing(popr, false) size := p.Size() dAtA := make([]byte, size) @@ -77,7 +66,7 @@ func TestRequestPingMarshalTo(t *testing.T) { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &RequestPing{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } for i := range dAtA { @@ -90,14 +79,14 @@ func TestRequestPingMarshalTo(t *testing.T) { func TestRequestBroadcastTxProto(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestBroadcastTx(popr, false) - dAtA, err := proto.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &RequestBroadcastTx{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } littlefuzz := make([]byte, len(dAtA)) @@ -115,13 +104,13 @@ func TestRequestBroadcastTxProto(t *testing.T) { littlefuzz = append(littlefuzz, byte(popr.Intn(256))) } // shouldn't panic - _ = proto.Unmarshal(littlefuzz, msg) + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) } } func TestRequestBroadcastTxMarshalTo(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestBroadcastTx(popr, false) size := p.Size() dAtA := make([]byte, size) @@ -133,7 +122,7 @@ func TestRequestBroadcastTxMarshalTo(t *testing.T) { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &RequestBroadcastTx{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } for i := range dAtA { @@ -146,14 +135,14 @@ func TestRequestBroadcastTxMarshalTo(t *testing.T) { func TestResponsePingProto(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponsePing(popr, false) - dAtA, err := proto.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &ResponsePing{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } littlefuzz := make([]byte, len(dAtA)) @@ -171,13 +160,13 @@ func TestResponsePingProto(t *testing.T) { littlefuzz = append(littlefuzz, byte(popr.Intn(256))) } // shouldn't panic - _ = proto.Unmarshal(littlefuzz, msg) + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) } } func TestResponsePingMarshalTo(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponsePing(popr, false) size := p.Size() dAtA := make([]byte, size) @@ -189,7 +178,7 @@ func TestResponsePingMarshalTo(t *testing.T) { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &ResponsePing{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } for i := range dAtA { @@ -202,14 +191,14 @@ func TestResponsePingMarshalTo(t *testing.T) { func TestResponseBroadcastTxProto(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponseBroadcastTx(popr, false) - dAtA, err := proto.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &ResponseBroadcastTx{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } littlefuzz := make([]byte, len(dAtA)) @@ -227,13 +216,13 @@ func TestResponseBroadcastTxProto(t *testing.T) { littlefuzz = append(littlefuzz, byte(popr.Intn(256))) } // shouldn't panic - _ = proto.Unmarshal(littlefuzz, msg) + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) } } func TestResponseBroadcastTxMarshalTo(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponseBroadcastTx(popr, false) size := p.Size() dAtA := make([]byte, size) @@ -245,7 +234,7 @@ func TestResponseBroadcastTxMarshalTo(t *testing.T) { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &ResponseBroadcastTx{} - if err := proto.Unmarshal(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } for i := range dAtA { @@ -258,15 +247,15 @@ func TestResponseBroadcastTxMarshalTo(t *testing.T) { func TestRequestPingJSON(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestPing(popr, true) - marshaler := jsonpb.Marshaler{} + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &RequestPing{} - err = jsonpb.UnmarshalString(jsondata, msg) + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -276,15 +265,15 @@ func TestRequestPingJSON(t *testing.T) { } func TestRequestBroadcastTxJSON(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestBroadcastTx(popr, true) - marshaler := jsonpb.Marshaler{} + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &RequestBroadcastTx{} - err = jsonpb.UnmarshalString(jsondata, msg) + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -294,15 +283,15 @@ func TestRequestBroadcastTxJSON(t *testing.T) { } func TestResponsePingJSON(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponsePing(popr, true) - marshaler := jsonpb.Marshaler{} + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &ResponsePing{} - err = jsonpb.UnmarshalString(jsondata, msg) + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -312,15 +301,15 @@ func TestResponsePingJSON(t *testing.T) { } func TestResponseBroadcastTxJSON(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponseBroadcastTx(popr, true) - marshaler := jsonpb.Marshaler{} + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} jsondata, err := marshaler.MarshalToString(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } msg := &ResponseBroadcastTx{} - err = jsonpb.UnmarshalString(jsondata, msg) + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -330,11 +319,11 @@ func TestResponseBroadcastTxJSON(t *testing.T) { } func TestRequestPingProtoText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestPing(popr, true) - dAtA := proto.MarshalTextString(p) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) msg := &RequestPing{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -344,11 +333,11 @@ func TestRequestPingProtoText(t *testing.T) { func TestRequestPingProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestPing(popr, true) - dAtA := proto.CompactTextString(p) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) msg := &RequestPing{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -358,11 +347,11 @@ func TestRequestPingProtoCompactText(t *testing.T) { func TestRequestBroadcastTxProtoText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestBroadcastTx(popr, true) - dAtA := proto.MarshalTextString(p) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) msg := &RequestBroadcastTx{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -372,11 +361,11 @@ func TestRequestBroadcastTxProtoText(t *testing.T) { func TestRequestBroadcastTxProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestBroadcastTx(popr, true) - dAtA := proto.CompactTextString(p) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) msg := &RequestBroadcastTx{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -386,11 +375,11 @@ func TestRequestBroadcastTxProtoCompactText(t *testing.T) { func TestResponsePingProtoText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponsePing(popr, true) - dAtA := proto.MarshalTextString(p) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) msg := &ResponsePing{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -400,11 +389,11 @@ func TestResponsePingProtoText(t *testing.T) { func TestResponsePingProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponsePing(popr, true) - dAtA := proto.CompactTextString(p) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) msg := &ResponsePing{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -414,11 +403,11 @@ func TestResponsePingProtoCompactText(t *testing.T) { func TestResponseBroadcastTxProtoText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponseBroadcastTx(popr, true) - dAtA := proto.MarshalTextString(p) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) msg := &ResponseBroadcastTx{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -428,11 +417,11 @@ func TestResponseBroadcastTxProtoText(t *testing.T) { func TestResponseBroadcastTxProtoCompactText(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponseBroadcastTx(popr, true) - dAtA := proto.CompactTextString(p) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) msg := &ResponseBroadcastTx{} - if err := proto.UnmarshalText(dAtA, msg); err != nil { + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } if !p.Equal(msg) { @@ -442,10 +431,10 @@ func TestResponseBroadcastTxProtoCompactText(t *testing.T) { func TestRequestPingSize(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestPing(popr, true) - size2 := proto.Size(p) - dAtA, err := proto.Marshal(p) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -456,7 +445,7 @@ func TestRequestPingSize(t *testing.T) { if size2 != size { t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) } - size3 := proto.Size(p) + size3 := github_com_gogo_protobuf_proto.Size(p) if size3 != size { t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) } @@ -464,10 +453,10 @@ func TestRequestPingSize(t *testing.T) { func TestRequestBroadcastTxSize(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedRequestBroadcastTx(popr, true) - size2 := proto.Size(p) - dAtA, err := proto.Marshal(p) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -478,7 +467,7 @@ func TestRequestBroadcastTxSize(t *testing.T) { if size2 != size { t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) } - size3 := proto.Size(p) + size3 := github_com_gogo_protobuf_proto.Size(p) if size3 != size { t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) } @@ -486,10 +475,10 @@ func TestRequestBroadcastTxSize(t *testing.T) { func TestResponsePingSize(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponsePing(popr, true) - size2 := proto.Size(p) - dAtA, err := proto.Marshal(p) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -500,7 +489,7 @@ func TestResponsePingSize(t *testing.T) { if size2 != size { t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) } - size3 := proto.Size(p) + size3 := github_com_gogo_protobuf_proto.Size(p) if size3 != size { t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) } @@ -508,10 +497,10 @@ func TestResponsePingSize(t *testing.T) { func TestResponseBroadcastTxSize(t *testing.T) { seed := time.Now().UnixNano() - popr := rand.New(rand.NewSource(seed)) + popr := math_rand.New(math_rand.NewSource(seed)) p := NewPopulatedResponseBroadcastTx(popr, true) - size2 := proto.Size(p) - dAtA, err := proto.Marshal(p) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { t.Fatalf("seed = %d, err = %v", seed, err) } @@ -522,7 +511,7 @@ func TestResponseBroadcastTxSize(t *testing.T) { if size2 != size { t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) } - size3 := proto.Size(p) + size3 := github_com_gogo_protobuf_proto.Size(p) if size3 != size { t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) } diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go index 9a07c86761d..cff285222ca 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/lib/client/ws_client.go @@ -14,8 +14,8 @@ import ( metrics "github.com/rcrowley/go-metrics" "github.com/tendermint/go-amino" - types "github.com/tendermint/tendermint/rpc/lib/types" cmn "github.com/tendermint/tendermint/libs/common" + types "github.com/tendermint/tendermint/rpc/lib/types" ) const ( diff --git a/rpc/lib/server/handlers_test.go b/rpc/lib/server/handlers_test.go index 3471eb791df..6004959ae93 100644 --- a/rpc/lib/server/handlers_test.go +++ b/rpc/lib/server/handlers_test.go @@ -14,9 +14,9 @@ import ( "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/libs/log" rs "github.com/tendermint/tendermint/rpc/lib/server" types "github.com/tendermint/tendermint/rpc/lib/types" - "github.com/tendermint/tendermint/libs/log" ) ////////////////////////////////////////////////////////////////////////////// diff --git a/rpc/lib/server/http_server.go b/rpc/lib/server/http_server.go index 5d816ef22af..ff7173a101e 100644 --- a/rpc/lib/server/http_server.go +++ b/rpc/lib/server/http_server.go @@ -14,8 +14,8 @@ import ( "github.com/pkg/errors" "golang.org/x/net/netutil" - types "github.com/tendermint/tendermint/rpc/lib/types" "github.com/tendermint/tendermint/libs/log" + types "github.com/tendermint/tendermint/rpc/lib/types" ) // Config is an RPC server configuration. @@ -61,7 +61,7 @@ func StartHTTPServer( listener, RecoverAndLogHandler(maxBytesHandler{h: handler, n: maxBodyBytes}, logger), ) - logger.Error("RPC HTTP server stopped", "err", err) + logger.Info("RPC HTTP server stopped", "err", err) }() return listener, nil } diff --git a/rpc/lib/test/main.go b/rpc/lib/test/main.go index cb9560e1286..544284b9c1f 100644 --- a/rpc/lib/test/main.go +++ b/rpc/lib/test/main.go @@ -6,9 +6,9 @@ import ( "os" amino "github.com/tendermint/go-amino" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" cmn "github.com/tendermint/tendermint/libs/common" "github.com/tendermint/tendermint/libs/log" + rpcserver "github.com/tendermint/tendermint/rpc/lib/server" ) var routes = map[string]*rpcserver.RPCFunc{ diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 7e0cba0edd5..0a9cd9847e7 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -15,6 +15,7 @@ import ( cfg "github.com/tendermint/tendermint/config" nm "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" ctypes "github.com/tendermint/tendermint/rpc/core/types" @@ -84,7 +85,7 @@ func GetConfig() *cfg.Config { globalConfig.P2P.ListenAddress = tm globalConfig.RPC.ListenAddress = rpc globalConfig.RPC.GRPCListenAddress = grpc - globalConfig.TxIndex.IndexTags = "app.creator" // see kvstore application + globalConfig.TxIndex.IndexTags = "app.creator,tx.height" // see kvstore application } return globalConfig } @@ -120,7 +121,11 @@ func NewTendermint(app abci.Application) *nm.Node { pvFile := config.PrivValidatorFile() pv := privval.LoadOrGenFilePV(pvFile) papp := proxy.NewLocalClientCreator(app) - node, err := nm.NewNode(config, pv, papp, + nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile()) + if err != nil { + panic(err) + } + node, err := nm.NewNode(config, pv, nodeKey, papp, nm.DefaultGenesisDocProviderFunc(config), nm.DefaultDBProvider, nm.DefaultMetricsProvider(config.Instrumentation), diff --git a/scripts/json2wal/main.go b/scripts/json2wal/main.go new file mode 100644 index 00000000000..be3487e5f7a --- /dev/null +++ b/scripts/json2wal/main.go @@ -0,0 +1,74 @@ +/* + json2wal converts JSON file to binary WAL file. + + Usage: + json2wal +*/ + +package main + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" + + "github.com/tendermint/go-amino" + cs "github.com/tendermint/tendermint/consensus" + "github.com/tendermint/tendermint/types" +) + +var cdc = amino.NewCodec() + +func init() { + cs.RegisterConsensusMessages(cdc) + cs.RegisterWALMessages(cdc) + types.RegisterBlockAmino(cdc) +} + +func main() { + if len(os.Args) < 3 { + fmt.Fprintln(os.Stderr, "missing arguments: Usage:json2wal ") + os.Exit(1) + } + + f, err := os.Open(os.Args[1]) + if err != nil { + panic(fmt.Errorf("failed to open WAL file: %v", err)) + } + defer f.Close() + + walFile, err := os.OpenFile(os.Args[2], os.O_EXCL|os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + panic(fmt.Errorf("failed to open WAL file: %v", err)) + } + defer walFile.Close() + + br := bufio.NewReader(f) + dec := cs.NewWALEncoder(walFile) + + for { + msgJson, _, err := br.ReadLine() + if err == io.EOF { + break + } else if err != nil { + panic(fmt.Errorf("failed to read file: %v", err)) + } + // ignore the ENDHEIGHT in json.File + if strings.HasPrefix(string(msgJson), "ENDHEIGHT") { + continue + } + + var msg cs.TimedWALMessage + err = cdc.UnmarshalJSON(msgJson, &msg) + if err != nil { + panic(fmt.Errorf("failed to unmarshal json: %v", err)) + } + + err = dec.Encode(&msg) + if err != nil { + panic(fmt.Errorf("failed to encode msg: %v", err)) + } + } +} diff --git a/scripts/linkify_changelog.py b/scripts/linkify_changelog.py new file mode 100644 index 00000000000..16647c05f4c --- /dev/null +++ b/scripts/linkify_changelog.py @@ -0,0 +1,13 @@ +import fileinput +import re + +# This script goes through the provided file, and replaces any " \#", +# with the valid mark down formatted link to it. e.g. +# " [\#number](https://github.com/tendermint/tendermint/issues/) +# Note that if the number is for a PR, github will auto-redirect you when you click the link. +# It is safe to run the script multiple times in succession. +# +# Example usage $ python3 linkify_changelog.py ../CHANGELOG_PENDING.md +for line in fileinput.input(inplace=1): + line = re.sub(r"\s\\#([0-9]*)", r" [\\#\1](https://github.com/tendermint/tendermint/issues/\1)", line.rstrip()) + print(line) \ No newline at end of file diff --git a/scripts/localnet-blocks-test.sh b/scripts/localnet-blocks-test.sh new file mode 100755 index 00000000000..a33ab00f3a7 --- /dev/null +++ b/scripts/localnet-blocks-test.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +ITERATIONS=$1 +SLEEP=$2 +NUMBLOCKS=$3 +NODEADDR=$4 + +if [ -z "$1" ]; then + echo "Need to input number of iterations to run..." + exit 1 +fi + +if [ -z "$2" ]; then + echo "Need to input number of seconds to sleep between iterations" + exit 1 +fi + +if [ -z "$3" ]; then + echo "Need to input block height to declare completion..." + exit 1 +fi + +if [ -z "$4" ]; then + echo "Need to input node address to poll..." + exit 1 +fi + +I=0 +while [ ${I} -lt "$ITERATIONS" ]; do + var=$(curl -s "$NODEADDR:26657/status" | jq -r ".result.sync_info.latest_block_height") + echo "Number of Blocks: ${var}" + if [ ! -z "${var}" ] && [ "${var}" -gt "${NUMBLOCKS}" ]; then + echo "Number of blocks reached, exiting success..." + exit 0 + fi + I=$((I+1)) + sleep "$SLEEP" +done + +echo "Timeout reached, exiting failure..." +exit 1 diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go index f6ffea4319c..cf8ae86c1b8 100644 --- a/scripts/wal2json/main.go +++ b/scripts/wal2json/main.go @@ -8,14 +8,23 @@ package main import ( - "encoding/json" "fmt" "io" "os" + "github.com/tendermint/go-amino" cs "github.com/tendermint/tendermint/consensus" + "github.com/tendermint/tendermint/types" ) +var cdc = amino.NewCodec() + +func init() { + cs.RegisterConsensusMessages(cdc) + cs.RegisterWALMessages(cdc) + types.RegisterBlockAmino(cdc) +} + func main() { if len(os.Args) < 2 { fmt.Println("missing one argument: ") @@ -37,7 +46,7 @@ func main() { panic(fmt.Errorf("failed to decode msg: %v", err)) } - json, err := json.Marshal(msg) + json, err := cdc.MarshalJSON(msg) if err != nil { panic(fmt.Errorf("failed to marshal msg: %v", err)) } diff --git a/state/errors.go b/state/errors.go index d40c7e1413d..6010ed9bbd6 100644 --- a/state/errors.go +++ b/state/errors.go @@ -1,8 +1,6 @@ package state -import ( - cmn "github.com/tendermint/tendermint/libs/common" -) +import "fmt" type ( ErrInvalidBlock error @@ -48,32 +46,32 @@ type ( ) func (e ErrUnknownBlock) Error() string { - return cmn.Fmt("Could not find block #%d", e.Height) + return fmt.Sprintf("Could not find block #%d", e.Height) } func (e ErrBlockHashMismatch) Error() string { - return cmn.Fmt("App block hash (%X) does not match core block hash (%X) for height %d", e.AppHash, e.CoreHash, e.Height) + return fmt.Sprintf("App block hash (%X) does not match core block hash (%X) for height %d", e.AppHash, e.CoreHash, e.Height) } func (e ErrAppBlockHeightTooHigh) Error() string { - return cmn.Fmt("App block height (%d) is higher than core (%d)", e.AppHeight, e.CoreHeight) + return fmt.Sprintf("App block height (%d) is higher than core (%d)", e.AppHeight, e.CoreHeight) } func (e ErrLastStateMismatch) Error() string { - return cmn.Fmt("Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", e.Height, e.Core, e.App) + return fmt.Sprintf("Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", e.Height, e.Core, e.App) } func (e ErrStateMismatch) Error() string { - return cmn.Fmt("State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", e.Got, e.Expected) + return fmt.Sprintf("State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", e.Got, e.Expected) } func (e ErrNoValSetForHeight) Error() string { - return cmn.Fmt("Could not find validator set for height #%d", e.Height) + return fmt.Sprintf("Could not find validator set for height #%d", e.Height) } func (e ErrNoConsensusParamsForHeight) Error() string { - return cmn.Fmt("Could not find consensus params for height #%d", e.Height) + return fmt.Sprintf("Could not find consensus params for height #%d", e.Height) } func (e ErrNoABCIResponsesForHeight) Error() string { - return cmn.Fmt("Could not find results for height #%d", e.Height) + return fmt.Sprintf("Could not find results for height #%d", e.Height) } diff --git a/state/execution.go b/state/execution.go index 54e1ec73ae0..b1859c22050 100644 --- a/state/execution.go +++ b/state/execution.go @@ -80,19 +80,19 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b fail.Fail() // XXX - // save the results before we commit + // Save the results before we commit. saveABCIResponses(blockExec.db, block.Height, abciResponses) fail.Fail() // XXX - // update the state with the block and responses + // Update the state with the block and responses. state, err = updateState(state, blockID, &block.Header, abciResponses) if err != nil { return state, fmt.Errorf("Commit failed for application: %v", err) } - // lock mempool, commit app state, update mempoool - appHash, err := blockExec.Commit(block) + // Lock mempool, commit app state, update mempoool. + appHash, err := blockExec.Commit(state, block) if err != nil { return state, fmt.Errorf("Commit failed for application: %v", err) } @@ -102,13 +102,13 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b fail.Fail() // XXX - // update the app hash and save the state + // Update the app hash and save the state. state.AppHash = appHash SaveState(blockExec.db, state) fail.Fail() // XXX - // events are fired after everything else + // Events are fired after everything else. // NOTE: if we crash between Commit and Save, events wont be fired during replay fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses) @@ -119,7 +119,7 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b // It returns the result of calling abci.Commit (the AppHash), and an error. // The Mempool must be locked during commit and update because state is typically reset on Commit and old txs must be replayed // against committed state before new txs are run in the mempool, lest they be invalid. -func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) { +func (blockExec *BlockExecutor) Commit(state State, block *types.Block) ([]byte, error) { blockExec.mempool.Lock() defer blockExec.mempool.Unlock() @@ -145,7 +145,9 @@ func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) { "appHash", fmt.Sprintf("%X", res.Data)) // Update mempool. - if err := blockExec.mempool.Update(block.Height, block.Txs); err != nil { + maxBytes := state.ConsensusParams.TxSize.MaxBytes + filter := func(tx types.Tx) bool { return len(tx) <= maxBytes } + if err := blockExec.mempool.Update(block.Height, block.Txs, filter); err != nil { return nil, err } @@ -164,7 +166,7 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, txIndex := 0 abciResponses := NewABCIResponses(block) - // Execute transactions and get hash + // Execute transactions and get hash. proxyCb := func(req *abci.Request, res *abci.Response) { switch r := res.Value.(type) { case *abci.Response_DeliverTx: @@ -184,16 +186,13 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, } proxyAppConn.SetResponseCallback(proxyCb) - signVals, byzVals := getBeginBlockValidatorInfo(block, lastValSet, stateDB) + commitInfo, byzVals := getBeginBlockValidatorInfo(block, lastValSet, stateDB) - // Begin block + // Begin block. _, err := proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{ - Hash: block.Hash(), - Header: types.TM2PB.Header(&block.Header), - LastCommitInfo: abci.LastCommitInfo{ - CommitRound: int32(block.LastCommit.Round()), - Validators: signVals, - }, + Hash: block.Hash(), + Header: types.TM2PB.Header(&block.Header), + LastCommitInfo: commitInfo, ByzantineValidators: byzVals, }) if err != nil { @@ -201,7 +200,7 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, return nil, err } - // Run txs of block + // Run txs of block. for _, tx := range block.Txs { proxyAppConn.DeliverTxAsync(tx) if err := proxyAppConn.Error(); err != nil { @@ -209,7 +208,7 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, } } - // End block + // End block. abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(abci.RequestEndBlock{Height: block.Height}) if err != nil { logger.Error("Error in proxyAppConn.EndBlock", "err", err) @@ -220,13 +219,14 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, valUpdates := abciResponses.EndBlock.ValidatorUpdates if len(valUpdates) > 0 { - logger.Info("Updates to validators", "updates", abci.ValidatorsString(valUpdates)) + // TODO: cleanup the formatting + logger.Info("Updates to validators", "updates", valUpdates) } return abciResponses, nil } -func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) ([]abci.SigningValidator, []abci.Evidence) { +func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) (abci.LastCommitInfo, []abci.Evidence) { // Sanity check that commit length matches validator set size - // only applies after first block @@ -240,18 +240,23 @@ func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorS } } - // determine which validators did not sign last block. - signVals := make([]abci.SigningValidator, len(lastValSet.Validators)) + // Collect the vote info (list of validators and whether or not they signed). + voteInfos := make([]abci.VoteInfo, len(lastValSet.Validators)) for i, val := range lastValSet.Validators { var vote *types.Vote if i < len(block.LastCommit.Precommits) { vote = block.LastCommit.Precommits[i] } - val := abci.SigningValidator{ - Validator: types.TM2PB.ValidatorWithoutPubKey(val), + voteInfo := abci.VoteInfo{ + Validator: types.TM2PB.Validator(val), SignedLastBlock: vote != nil, } - signVals[i] = val + voteInfos[i] = voteInfo + } + + commitInfo := abci.LastCommitInfo{ + Round: int32(block.LastCommit.Round()), + Votes: voteInfos, } byzVals := make([]abci.Evidence, len(block.Evidence.Evidence)) @@ -266,15 +271,15 @@ func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorS byzVals[i] = types.TM2PB.Evidence(ev, valset, block.Time) } - return signVals, byzVals + return commitInfo, byzVals } // If more or equal than 1/3 of total voting power changed in one block, then // a light client could never prove the transition externally. See // ./lite/doc.go for details on how a light client tracks validators. -func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.Validator) error { - updates, err := types.PB2TM.Validators(abciUpdates) +func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.ValidatorUpdate) error { + updates, err := types.PB2TM.ValidatorUpdates(abciUpdates) if err != nil { return err } @@ -314,26 +319,25 @@ func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.Validat func updateState(state State, blockID types.BlockID, header *types.Header, abciResponses *ABCIResponses) (State, error) { - // copy the valset so we can apply changes from EndBlock - // and update s.LastValidators and s.Validators - prevValSet := state.Validators.Copy() - nextValSet := prevValSet.Copy() + // Copy the valset so we can apply changes from EndBlock + // and update s.LastValidators and s.Validators. + nValSet := state.NextValidators.Copy() - // update the validator set with the latest abciResponses + // Update the validator set with the latest abciResponses. lastHeightValsChanged := state.LastHeightValidatorsChanged if len(abciResponses.EndBlock.ValidatorUpdates) > 0 { - err := updateValidators(nextValSet, abciResponses.EndBlock.ValidatorUpdates) + err := updateValidators(nValSet, abciResponses.EndBlock.ValidatorUpdates) if err != nil { return state, fmt.Errorf("Error changing validator set: %v", err) } - // change results from this height but only applies to the next height - lastHeightValsChanged = header.Height + 1 + // Change results from this height but only applies to the next next height. + lastHeightValsChanged = header.Height + 1 + 1 } - // Update validator accums and set state variables - nextValSet.IncrementAccum(1) + // Update validator accums and set state variables. + nValSet.IncrementAccum(1) - // update the params with the latest abciResponses + // Update the params with the latest abciResponses. nextParams := state.ConsensusParams lastHeightParamsChanged := state.LastHeightConsensusParamsChanged if abciResponses.EndBlock.ConsensusParamUpdates != nil { @@ -343,7 +347,7 @@ func updateState(state State, blockID types.BlockID, header *types.Header, if err != nil { return state, fmt.Errorf("Error updating consensus params: %v", err) } - // change results from this height but only applies to the next height + // Change results from this height but only applies to the next height. lastHeightParamsChanged = header.Height + 1 } @@ -355,7 +359,8 @@ func updateState(state State, blockID types.BlockID, header *types.Header, LastBlockTotalTx: state.LastBlockTotalTx + header.NumTxs, LastBlockID: blockID, LastBlockTime: header.Time, - Validators: nextValSet, + NextValidators: nValSet, + Validators: state.NextValidators.Copy(), LastValidators: state.Validators.Copy(), LastHeightValidatorsChanged: lastHeightValsChanged, ConsensusParams: nextParams, @@ -380,6 +385,14 @@ func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *ty Result: *(abciResponses.DeliverTx[i]), }}) } + + abciValUpdates := abciResponses.EndBlock.ValidatorUpdates + if len(abciValUpdates) > 0 { + // if there were an error, we would've stopped in updateValidators + updates, _ := types.PB2TM.ValidatorUpdates(abciValUpdates) + eventBus.PublishEventValidatorSetUpdates( + types.EventDataValidatorSetUpdates{ValidatorUpdates: updates}) + } } //---------------------------------------------------------------------------------------------------- diff --git a/state/execution_test.go b/state/execution_test.go index 53c5c882be6..6a20084938b 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -1,6 +1,7 @@ package state import ( + "context" "fmt" "testing" "time" @@ -14,6 +15,7 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" + tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" @@ -61,7 +63,7 @@ func TestBeginBlockValidators(t *testing.T) { prevParts := types.PartSetHeader{} prevBlockID := types.BlockID{prevHash, prevParts} - now := time.Now().UTC() + now := tmtime.Now() vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit} vote1 := &types.Vote{ValidatorIndex: 1, Timestamp: now} @@ -79,13 +81,14 @@ func TestBeginBlockValidators(t *testing.T) { lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: tc.lastCommitPrecommits} // block for height 2 - block, _ := state.MakeBlock(2, makeTxs(2), lastCommit, nil) + block, _ := state.MakeBlock(2, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) + _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB) require.Nil(t, err, tc.desc) // -> app receives a list of validators with a bool indicating if they signed ctr := 0 - for i, v := range app.Validators { + for i, v := range app.CommitVotes { if ctr < len(tc.expectedAbsentValidators) && tc.expectedAbsentValidators[ctr] == i { @@ -118,7 +121,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { ev1 := types.NewMockGoodEvidence(height1, idx1, val1) ev2 := types.NewMockGoodEvidence(height2, idx2, val2) - now := time.Now() + now := tmtime.Now() valSet := state.Validators testCases := []struct { desc string @@ -138,7 +141,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: votes} for _, tc := range testCases { - block, _ := state.MakeBlock(10, makeTxs(2), lastCommit, nil) + block, _ := state.MakeBlock(10, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) block.Time = now block.Evidence.Evidence = tc.evidence _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB) @@ -159,7 +162,7 @@ func TestUpdateValidators(t *testing.T) { name string currentSet *types.ValidatorSet - abciUpdates []abci.Validator + abciUpdates []abci.ValidatorUpdate resultingSet *types.ValidatorSet shouldErr bool @@ -168,7 +171,7 @@ func TestUpdateValidators(t *testing.T) { "adding a validator is OK", types.NewValidatorSet([]*types.Validator{val1}), - []abci.Validator{{Address: []byte{}, PubKey: types.TM2PB.PubKey(pubkey2), Power: 20}}, + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: 20}}, types.NewValidatorSet([]*types.Validator{val1, val2}), false, @@ -177,7 +180,7 @@ func TestUpdateValidators(t *testing.T) { "updating a validator is OK", types.NewValidatorSet([]*types.Validator{val1}), - []abci.Validator{{Address: []byte{}, PubKey: types.TM2PB.PubKey(pubkey1), Power: 20}}, + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey1), Power: 20}}, types.NewValidatorSet([]*types.Validator{types.NewValidator(pubkey1, 20)}), false, @@ -186,7 +189,7 @@ func TestUpdateValidators(t *testing.T) { "removing a validator is OK", types.NewValidatorSet([]*types.Validator{val1, val2}), - []abci.Validator{{Address: []byte{}, PubKey: types.TM2PB.PubKey(pubkey2), Power: 0}}, + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: 0}}, types.NewValidatorSet([]*types.Validator{val1}), false, @@ -196,7 +199,7 @@ func TestUpdateValidators(t *testing.T) { "removing a non-existing validator results in error", types.NewValidatorSet([]*types.Validator{val1}), - []abci.Validator{{Address: []byte{}, PubKey: types.TM2PB.PubKey(pubkey2), Power: 0}}, + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: 0}}, types.NewValidatorSet([]*types.Validator{val1}), true, @@ -206,7 +209,7 @@ func TestUpdateValidators(t *testing.T) { "adding a validator with negative power results in error", types.NewValidatorSet([]*types.Validator{val1}), - []abci.Validator{{Address: []byte{}, PubKey: types.TM2PB.PubKey(pubkey2), Power: -100}}, + []abci.ValidatorUpdate{{PubKey: types.TM2PB.PubKey(pubkey2), Power: -100}}, types.NewValidatorSet([]*types.Validator{val1}), true, @@ -232,6 +235,62 @@ func TestUpdateValidators(t *testing.T) { } } +// TestEndBlockValidatorUpdates ensures we update validator set and send an event. +func TestEndBlockValidatorUpdates(t *testing.T) { + app := &testApp{} + cc := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(cc, nil) + err := proxyApp.Start() + require.Nil(t, err) + defer proxyApp.Stop() + + state, stateDB := state(1, 1) + + blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), + MockMempool{}, MockEvidencePool{}) + eventBus := types.NewEventBus() + err = eventBus.Start() + require.NoError(t, err) + defer eventBus.Stop() + blockExec.SetEventBus(eventBus) + + updatesCh := make(chan interface{}, 1) + err = eventBus.Subscribe(context.Background(), "TestEndBlockValidatorUpdates", types.EventQueryValidatorSetUpdates, updatesCh) + require.NoError(t, err) + + block := makeBlock(state, 1) + blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} + + pubkey := ed25519.GenPrivKey().PubKey() + app.ValidatorUpdates = []abci.ValidatorUpdate{ + {PubKey: types.TM2PB.PubKey(pubkey), Power: 10}, + } + + state, err = blockExec.ApplyBlock(state, blockID, block) + require.Nil(t, err) + + // test new validator was added to NextValidators + if assert.Equal(t, state.Validators.Size()+1, state.NextValidators.Size()) { + idx, _ := state.NextValidators.GetByAddress(pubkey.Address()) + if idx < 0 { + t.Fatalf("can't find address %v in the set %v", pubkey.Address(), state.NextValidators) + } + } + + // test we threw an event + select { + case e := <-updatesCh: + event, ok := e.(types.EventDataValidatorSetUpdates) + require.True(t, ok, "Expected event of type EventDataValidatorSetUpdates, got %T", e) + if assert.NotEmpty(t, event.ValidatorUpdates) { + assert.Equal(t, pubkey, event.ValidatorUpdates[0].PubKey) + assert.EqualValues(t, 10, event.ValidatorUpdates[0].VotingPower) + } + case <-time.After(1 * time.Second): + t.Fatal("Did not receive EventValidatorSetUpdates within 1 sec.") + } +} + //---------------------------------------------------------------------------- // make some bogus txs @@ -263,41 +322,43 @@ func state(nVals, height int) (State, dbm.DB) { for i := 1; i < height; i++ { s.LastBlockHeight++ + s.LastValidators = s.Validators.Copy() SaveState(stateDB, s) } return s, stateDB } func makeBlock(state State, height int64) *types.Block { - block, _ := state.MakeBlock(height, makeTxs(state.LastBlockHeight), new(types.Commit), nil) + block, _ := state.MakeBlock(height, makeTxs(state.LastBlockHeight), new(types.Commit), nil, state.Validators.GetProposer().Address) return block } //---------------------------------------------------------------------------- -var _ abci.Application = (*testApp)(nil) - type testApp struct { abci.BaseApplication - Validators []abci.SigningValidator + CommitVotes []abci.VoteInfo ByzantineValidators []abci.Evidence + ValidatorUpdates []abci.ValidatorUpdate } -func NewKVStoreApplication() *testApp { - return &testApp{} -} +var _ abci.Application = (*testApp)(nil) func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) { return abci.ResponseInfo{} } func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { - app.Validators = req.LastCommitInfo.Validators + app.CommitVotes = req.LastCommitInfo.Votes app.ByzantineValidators = req.ByzantineValidators return abci.ResponseBeginBlock{} } +func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { + return abci.ResponseEndBlock{ValidatorUpdates: app.ValidatorUpdates} +} + func (app *testApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { return abci.ResponseDeliverTx{Tags: []cmn.KVPair{}} } diff --git a/state/services.go b/state/services.go index c51fa97521a..13ab7383ff2 100644 --- a/state/services.go +++ b/state/services.go @@ -22,8 +22,8 @@ type Mempool interface { Size() int CheckTx(types.Tx, func(*abci.Response)) error - Reap(int) types.Txs - Update(height int64, txs types.Txs) error + ReapMaxBytes(max int) types.Txs + Update(height int64, txs types.Txs, filter func(types.Tx) bool) error Flush() FlushAppConn() error @@ -32,19 +32,18 @@ type Mempool interface { } // MockMempool is an empty implementation of a Mempool, useful for testing. -type MockMempool struct { -} - -func (m MockMempool) Lock() {} -func (m MockMempool) Unlock() {} -func (m MockMempool) Size() int { return 0 } -func (m MockMempool) CheckTx(tx types.Tx, cb func(*abci.Response)) error { return nil } -func (m MockMempool) Reap(n int) types.Txs { return types.Txs{} } -func (m MockMempool) Update(height int64, txs types.Txs) error { return nil } -func (m MockMempool) Flush() {} -func (m MockMempool) FlushAppConn() error { return nil } -func (m MockMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } -func (m MockMempool) EnableTxsAvailable() {} +type MockMempool struct{} + +func (MockMempool) Lock() {} +func (MockMempool) Unlock() {} +func (MockMempool) Size() int { return 0 } +func (MockMempool) CheckTx(tx types.Tx, cb func(*abci.Response)) error { return nil } +func (MockMempool) ReapMaxBytes(max int) types.Txs { return types.Txs{} } +func (MockMempool) Update(height int64, txs types.Txs, filter func(types.Tx) bool) error { return nil } +func (MockMempool) Flush() {} +func (MockMempool) FlushAppConn() error { return nil } +func (MockMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (MockMempool) EnableTxsAvailable() {} //------------------------------------------------------ // blockstore @@ -72,15 +71,14 @@ type BlockStore interface { // EvidencePool defines the EvidencePool interface used by the ConsensusState. type EvidencePool interface { - PendingEvidence() []types.Evidence + PendingEvidence(int) []types.Evidence AddEvidence(types.Evidence) error Update(*types.Block, State) } // MockMempool is an empty implementation of a Mempool, useful for testing. -type MockEvidencePool struct { -} +type MockEvidencePool struct{} -func (m MockEvidencePool) PendingEvidence() []types.Evidence { return nil } -func (m MockEvidencePool) AddEvidence(types.Evidence) error { return nil } -func (m MockEvidencePool) Update(*types.Block, State) {} +func (m MockEvidencePool) PendingEvidence(int) []types.Evidence { return nil } +func (m MockEvidencePool) AddEvidence(types.Evidence) error { return nil } +func (m MockEvidencePool) Update(*types.Block, State) {} diff --git a/state/state.go b/state/state.go index fb589a2404b..10da67e929e 100644 --- a/state/state.go +++ b/state/state.go @@ -7,6 +7,7 @@ import ( "time" "github.com/tendermint/tendermint/types" + tmtime "github.com/tendermint/tendermint/types/time" ) // database keys @@ -24,7 +25,7 @@ var ( // Instead, use state.Copy() or state.NextState(...). // NOTE: not goroutine-safe. type State struct { - // Immutable + // immutable ChainID string // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) @@ -38,6 +39,7 @@ type State struct { // so we can query for historical validator sets. // Note that if s.LastBlockHeight causes a valset change, // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + NextValidators *types.ValidatorSet Validators *types.ValidatorSet LastValidators *types.ValidatorSet LastHeightValidatorsChanged int64 @@ -50,7 +52,7 @@ type State struct { // Merkle root of the results from executing prev block LastResultsHash []byte - // The latest AppHash we've received from calling abci.Commit() + // the latest AppHash we've received from calling abci.Commit() AppHash []byte } @@ -64,6 +66,7 @@ func (state State) Copy() State { LastBlockID: state.LastBlockID, LastBlockTime: state.LastBlockTime, + NextValidators: state.NextValidators.Copy(), Validators: state.Validators.Copy(), LastValidators: state.LastValidators.Copy(), LastHeightValidatorsChanged: state.LastHeightValidatorsChanged, @@ -93,20 +96,18 @@ func (state State) IsEmpty() bool { return state.Validators == nil // XXX can't compare to Empty } -// GetValidators returns the last and current validator sets. -func (state State) GetValidators() (last *types.ValidatorSet, current *types.ValidatorSet) { - return state.LastValidators, state.Validators -} - //------------------------------------------------------------------------ // Create a block from the latest state -// MakeBlock builds a block from the current state with the given txs, commit, and evidence. +// MakeBlock builds a block from the current state with the given txs, commit, +// and evidence. Note it also takes a proposerAddress because the state does not +// track rounds, and hence doesn't know the correct proposer. TODO: alleviate this! func (state State) MakeBlock( height int64, txs []types.Tx, commit *types.Commit, evidence []types.Evidence, + proposerAddress []byte, ) (*types.Block, *types.PartSet) { // Build base block with block data. @@ -115,17 +116,52 @@ func (state State) MakeBlock( // Fill rest of header with state data. block.ChainID = state.ChainID + // Set time + if height == 1 { + block.Time = tmtime.Now() + if block.Time.Before(state.LastBlockTime) { + block.Time = state.LastBlockTime // state.LastBlockTime for height == 1 is genesis time + } + } else { + block.Time = MedianTime(commit, state.LastValidators) + } + block.LastBlockID = state.LastBlockID block.TotalTxs = state.LastBlockTotalTx + block.NumTxs block.ValidatorsHash = state.Validators.Hash() + block.NextValidatorsHash = state.NextValidators.Hash() block.ConsensusHash = state.ConsensusParams.Hash() block.AppHash = state.AppHash block.LastResultsHash = state.LastResultsHash + // NOTE: we can't use the state.Validators because we don't + // IncrementAccum for rounds there. + block.ProposerAddress = proposerAddress + return block, block.MakePartSet(state.ConsensusParams.BlockGossip.BlockPartSizeBytes) } +// MedianTime computes a median time for a given Commit (based on Timestamp field of votes messages) and the +// corresponding validator set. The computed time is always between timestamps of +// the votes sent by honest processes, i.e., a faulty processes can not arbitrarily increase or decrease the +// computed value. +func MedianTime(commit *types.Commit, validators *types.ValidatorSet) time.Time { + + weightedTimes := make([]*tmtime.WeightedTime, len(commit.Precommits)) + totalVotingPower := int64(0) + + for i, vote := range commit.Precommits { + if vote != nil { + _, validator := validators.GetByIndex(vote.ValidatorIndex) + totalVotingPower += validator.VotingPower + weightedTimes[i] = tmtime.NewWeightedTime(vote.Timestamp, validator.VotingPower) + } + } + + return tmtime.WeightedMedian(weightedTimes, totalVotingPower) +} + //------------------------------------------------------------------------ // Genesis @@ -183,6 +219,7 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { LastBlockID: types.BlockID{}, LastBlockTime: genDoc.GenesisTime, + NextValidators: types.NewValidatorSet(validators).CopyIncrementAccum(1), Validators: types.NewValidatorSet(validators), LastValidators: types.NewValidatorSet(nil), LastHeightValidatorsChanged: 1, diff --git a/state/state_test.go b/state/state_test.go index 05c1859efb0..9a793c8e7f7 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -17,7 +17,7 @@ import ( "github.com/tendermint/tendermint/types" ) -// setupTestCase does setup common to all test cases +// setupTestCase does setup common to all test cases. func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, State) { config := cfg.ResetTestRoot("state_") dbType := dbm.DBBackendType(config.DBBackend) @@ -40,11 +40,11 @@ func TestStateCopy(t *testing.T) { stateCopy := state.Copy() assert.True(state.Equals(stateCopy), - cmn.Fmt("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", + fmt.Sprintf("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", stateCopy, state)) stateCopy.LastBlockHeight++ - assert.False(state.Equals(stateCopy), cmn.Fmt(`expected states to be different. got same + assert.False(state.Equals(stateCopy), fmt.Sprintf(`expected states to be different. got same %v`, state)) } @@ -60,7 +60,7 @@ func TestStateSaveLoad(t *testing.T) { loadedState := LoadState(stateDB) assert.True(state.Equals(loadedState), - cmn.Fmt("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", + fmt.Sprintf("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", loadedState, state)) } @@ -73,24 +73,24 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { state.LastBlockHeight++ - // build mock responses + // Build mock responses. block := makeBlock(state, 2) abciResponses := NewABCIResponses(block) abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Tags: nil} abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Tags: nil} - abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.Validator{ - types.TM2PB.ValidatorFromPubKeyAndPower(ed25519.GenPrivKey().PubKey(), 10), + abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{ + types.TM2PB.NewValidatorUpdate(ed25519.GenPrivKey().PubKey(), 10), }} saveABCIResponses(stateDB, block.Height, abciResponses) loadedABCIResponses, err := LoadABCIResponses(stateDB, block.Height) assert.Nil(err) assert.Equal(abciResponses, loadedABCIResponses, - cmn.Fmt("ABCIResponses don't match:\ngot: %v\nexpected: %v\n", + fmt.Sprintf("ABCIResponses don't match:\ngot: %v\nexpected: %v\n", loadedABCIResponses, abciResponses)) } -// TestResultsSaveLoad tests saving and loading abci results. +// TestResultsSaveLoad tests saving and loading ABCI results. func TestABCIResponsesSaveLoad2(t *testing.T) { tearDown, stateDB, _ := setupTestCase(t) defer tearDown(t) @@ -98,8 +98,8 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { assert := assert.New(t) cases := [...]struct { - // height is implied index+2 - // as block 1 is created from genesis + // Height is implied to equal index+2, + // as block 1 is created from genesis. added []*abci.ResponseDeliverTx expected types.ABCIResults }{ @@ -119,8 +119,8 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { {Code: 383}, {Data: []byte("Gotcha!"), Tags: []cmn.KVPair{ - cmn.KVPair{[]byte("a"), []byte("1")}, - cmn.KVPair{[]byte("build"), []byte("stuff")}, + cmn.KVPair{Key: []byte("a"), Value: []byte("1")}, + cmn.KVPair{Key: []byte("build"), Value: []byte("stuff")}, }}, }, types.ABCIResults{ @@ -133,14 +133,14 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { }, } - // query all before, should return error + // Query all before, this should return error. for i := range cases { h := int64(i + 1) res, err := LoadABCIResponses(stateDB, h) assert.Error(err, "%d: %#v", i, res) } - // add all cases + // Add all cases. for i, tc := range cases { h := int64(i + 1) // last block height, one below what we save responses := &ABCIResponses{ @@ -150,7 +150,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { saveABCIResponses(stateDB, h, responses) } - // query all before, should return expected value + // Query all before, should return expected value. for i, tc := range cases { h := int64(i + 1) res, err := LoadABCIResponses(stateDB, h) @@ -166,34 +166,30 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { // nolint: vetshadow assert := assert.New(t) - // can't load anything for height 0 + // Can't load anything for height 0. v, err := LoadValidators(stateDB, 0) assert.IsType(ErrNoValSetForHeight{}, err, "expected err at height 0") - // should be able to load for height 1 + // Should be able to load for height 1. v, err = LoadValidators(stateDB, 1) assert.Nil(err, "expected no err at height 1") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") - // increment height, save; should be able to load for next height + // Should be able to load for height 2. + v, err = LoadValidators(stateDB, 2) + assert.Nil(err, "expected no err at height 2") + assert.Equal(v.Hash(), state.NextValidators.Hash(), "expected validator hashes to match") + + // Increment height, save; should be able to load for next & next next height. state.LastBlockHeight++ nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - v, err = LoadValidators(stateDB, nextHeight) + saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + vp0, err := LoadValidators(stateDB, nextHeight+0) assert.Nil(err, "expected no err") - assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") - - // increment height, save; should be able to load for next height - state.LastBlockHeight += 10 - nextHeight = state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - v, err = LoadValidators(stateDB, nextHeight) + vp1, err := LoadValidators(stateDB, nextHeight+1) assert.Nil(err, "expected no err") - assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") - - // should be able to load for next next height - _, err = LoadValidators(stateDB, state.LastBlockHeight+2) - assert.IsType(ErrNoValSetForHeight{}, err, "expected err at unknown height") + assert.Equal(vp0.Hash(), state.Validators.Hash(), "expected validator hashes to match") + assert.Equal(vp1.Hash(), state.NextValidators.Hash(), "expected next validator hashes to match") } // TestValidatorChangesSaveLoad tests saving and loading a validator set with changes. @@ -201,20 +197,19 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - // change vals at these heights + // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} N := len(changeHeights) - // build the validator history by running updateState - // with the right validator set for each height + // Build the validator history by running updateState + // with the right validator set for each height. highestHeight := changeHeights[N-1] + 5 changeIndex := 0 _, val := state.Validators.GetByIndex(0) power := val.VotingPower var err error for i := int64(1); i < highestHeight; i++ { - // when we get to a change height, - // use the next pubkey + // When we get to a change height, use the next pubkey. if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { changeIndex++ power++ @@ -223,16 +218,16 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { state, err = updateState(state, blockID, &header, responses) assert.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) } - // on each change height, increment the power by one. + // On each height change, increment the power by one. testCases := make([]int64, highestHeight) changeIndex = 0 power = val.VotingPower for i := int64(1); i < highestHeight+1; i++ { - // we we get to the height after a change height - // use the next pubkey (note our counter starts at 0 this time) + // We get to the height after a change height use the next pubkey (note + // our counter starts at 0 this time). if changeIndex < len(changeHeights) && i == changeHeights[changeIndex]+1 { changeIndex++ power++ @@ -241,7 +236,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } for i, power := range testCases { - v, err := LoadValidators(stateDB, int64(i+1)) + v, err := LoadValidators(stateDB, int64(i+1+1)) // +1 because vset changes delayed by 1 block. assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) _, val := v.GetByIndex(0) @@ -256,25 +251,42 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { func TestManyValidatorChangesSaveLoad(t *testing.T) { const valSetSize = 7 tearDown, stateDB, state := setupTestCase(t) + require.Equal(t, int64(0), state.LastBlockHeight) state.Validators = genValSet(valSetSize) + state.NextValidators = state.Validators.CopyIncrementAccum(1) SaveState(stateDB, state) defer tearDown(t) - const height = 1 + _, valOld := state.Validators.GetByIndex(0) + var pubkeyOld = valOld.PubKey pubkey := ed25519.GenPrivKey().PubKey() - // swap the first validator with a new one ^^^ (validator set size stays the same) + const height = 1 + + // Swap the first validator with a new one (validator set size stays the same). header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, height, pubkey) + + // Save state etc. var err error state, err = updateState(state, blockID, &header, responses) require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + saveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) - v, err := LoadValidators(stateDB, height+1) + // Load nextheight, it should be the oldpubkey. + v0, err := LoadValidators(stateDB, nextHeight) assert.Nil(t, err) - assert.Equal(t, valSetSize, v.Size()) + assert.Equal(t, valSetSize, v0.Size()) + index, val := v0.GetByAddress(pubkeyOld.Address()) + assert.NotNil(t, val) + if index < 0 { + t.Fatal("expected to find old validator") + } - index, val := v.GetByAddress(pubkey.Address()) + // Load nextheight+1, it should be the new pubkey. + v1, err := LoadValidators(stateDB, nextHeight+1) + assert.Nil(t, err) + assert.Equal(t, valSetSize, v1.Size()) + index, val = v1.GetByAddress(pubkey.Address()) assert.NotNil(t, val) if index < 0 { t.Fatal("expected to find newly added validator") @@ -289,18 +301,29 @@ func genValSet(size int) *types.ValidatorSet { return types.NewValidatorSet(vals) } +func TestStateMakeBlock(t *testing.T) { + tearDown, _, state := setupTestCase(t) + defer tearDown(t) + + proposerAddress := state.Validators.GetProposer().Address + block := makeBlock(state, 2) + + // test we set proposer address + assert.Equal(t, proposerAddress, block.ProposerAddress) +} + // TestConsensusParamsChangesSaveLoad tests saving and loading consensus params // with changes. func TestConsensusParamsChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) - // change vals at these heights + // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} N := len(changeHeights) - // each valset is just one validator - // create list of them + // Each valset is just one validator. + // create list of them. params := make([]types.ConsensusParams, N+1) params[0] = state.ConsensusParams for i := 1; i < N+1; i++ { @@ -308,15 +331,14 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { params[i].BlockSize.MaxBytes += i } - // build the params history by running updateState - // with the right params set for each height + // Build the params history by running updateState + // with the right params set for each height. highestHeight := changeHeights[N-1] + 5 changeIndex := 0 cp := params[changeIndex] var err error for i := int64(1); i < highestHeight; i++ { - // when we get to a change height, - // use the next params + // When we get to a change height, use the next params. if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { changeIndex++ cp = params[changeIndex] @@ -329,13 +351,13 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { saveConsensusParamsInfo(stateDB, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) } - // make all the test cases by using the same params until after the change + // Make all the test cases by using the same params until after the change. testCases := make([]paramsChangeTestCase, highestHeight) changeIndex = 0 cp = params[changeIndex] for i := int64(1); i < highestHeight+1; i++ { - // we we get to the height after a change height - // use the next pubkey (note our counter starts at 0 this time) + // We get to the height after a change height use the next pubkey (note + // our counter starts at 0 this time). if changeIndex < len(changeHeights) && i == changeHeights[changeIndex]+1 { changeIndex++ cp = params[changeIndex] @@ -351,13 +373,10 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { } } -func makeParams(blockBytes, blockTx, blockGas, txBytes, - txGas, partSize int) types.ConsensusParams { - +func makeParams(txsBytes, blockGas, txBytes, txGas, partSize int) types.ConsensusParams { return types.ConsensusParams{ BlockSize: types.BlockSize{ - MaxBytes: blockBytes, - MaxTxs: blockTx, + MaxBytes: txsBytes, MaxGas: int64(blockGas), }, TxSize: types.TxSize{ @@ -375,7 +394,7 @@ func pk() []byte { } func TestApplyUpdates(t *testing.T) { - initParams := makeParams(1, 2, 3, 4, 5, 6) + initParams := makeParams(1, 2, 3, 4, 5) cases := [...]struct { init types.ConsensusParams @@ -390,19 +409,19 @@ func TestApplyUpdates(t *testing.T) { MaxBytes: 123, }, }, - makeParams(1, 2, 3, 123, 5, 6)}, + makeParams(1, 2, 123, 4, 5)}, 3: {initParams, abci.ConsensusParams{ BlockSize: &abci.BlockSize{ - MaxTxs: 44, - MaxGas: 55, + MaxBytes: 1, + MaxGas: 55, }, }, - makeParams(1, 44, 55, 4, 5, 6)}, + makeParams(1, 55, 3, 4, 5)}, 4: {initParams, abci.ConsensusParams{ BlockSize: &abci.BlockSize{ - MaxTxs: 789, + MaxBytes: 1, }, TxSize: &abci.TxSize{ MaxGas: 888, @@ -411,7 +430,7 @@ func TestApplyUpdates(t *testing.T) { BlockPartSizeBytes: 2002, }, }, - makeParams(1, 789, 3, 4, 888, 2002)}, + makeParams(1, 2, 3, 888, 2002)}, } for i, tc := range cases { @@ -423,18 +442,18 @@ func TestApplyUpdates(t *testing.T) { func makeHeaderPartsResponsesValPubKeyChange(state State, height int64, pubkey crypto.PubKey) (types.Header, types.BlockID, *ABCIResponses) { - block := makeBlock(state, height) + block := makeBlock(state, state.LastBlockHeight+1) abciResponses := &ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - // if the pubkey is new, remove the old and add the new - _, val := state.Validators.GetByIndex(0) + // If the pubkey is new, remove the old and add the new. + _, val := state.NextValidators.GetByIndex(0) if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { abciResponses.EndBlock = &abci.ResponseEndBlock{ - ValidatorUpdates: []abci.Validator{ - types.TM2PB.ValidatorFromPubKeyAndPower(val.PubKey, 0), - types.TM2PB.ValidatorFromPubKeyAndPower(pubkey, 10), + ValidatorUpdates: []abci.ValidatorUpdate{ + types.TM2PB.NewValidatorUpdate(val.PubKey, 0), + types.TM2PB.NewValidatorUpdate(pubkey, 10), }, } } @@ -445,17 +464,17 @@ func makeHeaderPartsResponsesValPubKeyChange(state State, height int64, func makeHeaderPartsResponsesValPowerChange(state State, height int64, power int64) (types.Header, types.BlockID, *ABCIResponses) { - block := makeBlock(state, height) + block := makeBlock(state, state.LastBlockHeight+1) abciResponses := &ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - // if the pubkey is new, remove the old and add the new - _, val := state.Validators.GetByIndex(0) + // If the pubkey is new, remove the old and add the new. + _, val := state.NextValidators.GetByIndex(0) if val.VotingPower != power { abciResponses.EndBlock = &abci.ResponseEndBlock{ - ValidatorUpdates: []abci.Validator{ - types.TM2PB.ValidatorFromPubKeyAndPower(val.PubKey, power), + ValidatorUpdates: []abci.ValidatorUpdate{ + types.TM2PB.NewValidatorUpdate(val.PubKey, power), }, } } @@ -466,7 +485,7 @@ func makeHeaderPartsResponsesValPowerChange(state State, height int64, func makeHeaderPartsResponsesParams(state State, height int64, params types.ConsensusParams) (types.Header, types.BlockID, *ABCIResponses) { - block := makeBlock(state, height) + block := makeBlock(state, state.LastBlockHeight+1) abciResponses := &ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: types.TM2PB.ConsensusParams(¶ms)}, } diff --git a/state/store.go b/state/store.go index 3a1a6231b73..2f90c747ea3 100644 --- a/state/store.go +++ b/state/store.go @@ -12,15 +12,15 @@ import ( //------------------------------------------------------------------------ func calcValidatorsKey(height int64) []byte { - return []byte(cmn.Fmt("validatorsKey:%v", height)) + return []byte(fmt.Sprintf("validatorsKey:%v", height)) } func calcConsensusParamsKey(height int64) []byte { - return []byte(cmn.Fmt("consensusParamsKey:%v", height)) + return []byte(fmt.Sprintf("consensusParamsKey:%v", height)) } func calcABCIResponsesKey(height int64) []byte { - return []byte(cmn.Fmt("abciResponsesKey:%v", height)) + return []byte(fmt.Sprintf("abciResponsesKey:%v", height)) } // LoadStateFromDBOrGenesisFile loads the most recent state from the database, @@ -71,7 +71,7 @@ func loadState(db dbm.DB, key []byte) (state State) { err := cdc.UnmarshalBinaryBare(buf, &state) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadState: Data has been corrupted or its spec has changed: + cmn.Exit(fmt.Sprintf(`LoadState: Data has been corrupted or its spec has changed: %v\n`, err)) } // TODO: ensure that buf is completely read. @@ -87,7 +87,14 @@ func SaveState(db dbm.DB, state State) { func saveState(db dbm.DB, state State, key []byte) { nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(db, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + // If first block, save validators for block 1. + if nextHeight == 1 { + lastHeightVoteChanged := int64(1) // Due to Tendermint validator set changes being delayed 1 block. + saveValidatorsInfo(db, nextHeight, lastHeightVoteChanged, state.Validators) + } + // Save next validators. + saveValidatorsInfo(db, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + // Save next consensus params. saveConsensusParamsInfo(db, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) db.SetSync(stateKey, state.Bytes()) } @@ -137,7 +144,7 @@ func LoadABCIResponses(db dbm.DB, height int64) (*ABCIResponses, error) { err := cdc.UnmarshalBinaryBare(buf, abciResponses) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadABCIResponses: Data has been corrupted or its spec has + cmn.Exit(fmt.Sprintf(`LoadABCIResponses: Data has been corrupted or its spec has changed: %v\n`, err)) } // TODO: ensure that buf is completely read. @@ -200,7 +207,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo { err := cdc.UnmarshalBinaryBare(buf, v) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadValidators: Data has been corrupted or its spec has changed: + cmn.Exit(fmt.Sprintf(`LoadValidators: Data has been corrupted or its spec has changed: %v\n`, err)) } // TODO: ensure that buf is completely read. @@ -271,7 +278,7 @@ func loadConsensusParamsInfo(db dbm.DB, height int64) *ConsensusParamsInfo { err := cdc.UnmarshalBinaryBare(buf, paramsInfo) if err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadConsensusParams: Data has been corrupted or its spec has changed: + cmn.Exit(fmt.Sprintf(`LoadConsensusParams: Data has been corrupted or its spec has changed: %v\n`, err)) } // TODO: ensure that buf is completely read. diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index 707325929aa..363ab1193ec 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -89,6 +89,11 @@ func (txi *TxIndex) AddBatch(b *txindex.Batch) error { } } + // index tx by height + if txi.indexAllTags || cmn.StringInSlice(types.TxHeightKey, txi.tagsToIndex) { + storeBatch.Set(keyForHeight(result), hash) + } + // index tx by hash rawBytes, err := cdc.MarshalBinaryBare(result) if err != nil { @@ -114,6 +119,11 @@ func (txi *TxIndex) Index(result *types.TxResult) error { } } + // index tx by height + if txi.indexAllTags || cmn.StringInSlice(types.TxHeightKey, txi.tagsToIndex) { + b.Set(keyForHeight(result), hash) + } + // index tx by hash rawBytes, err := cdc.MarshalBinaryBare(result) if err != nil { @@ -153,12 +163,6 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) { // conditions to skip because they're handled before "everything else" skipIndexes := make([]int, 0) - // if there is a height condition ("tx.height=3"), extract it for faster lookups - height, heightIndex := lookForHeight(conditions) - if heightIndex >= 0 { - skipIndexes = append(skipIndexes, heightIndex) - } - // extract ranges // if both upper and lower bounds exist, it's better to get them in order not // no iterate over kvs that are not within range. @@ -176,6 +180,9 @@ func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) { } } + // if there is a height condition ("tx.height=3"), extract it + height := lookForHeight(conditions) + // for all other conditions for i, c := range conditions { if cmn.IntInSlice(i, skipIndexes) { @@ -218,13 +225,13 @@ func lookForHash(conditions []query.Condition) (hash []byte, err error, ok bool) return } -func lookForHeight(conditions []query.Condition) (height int64, index int) { - for i, c := range conditions { +func lookForHeight(conditions []query.Condition) (height int64) { + for _, c := range conditions { if c.Tag == types.TxHeightKey { - return c.Operand.(int64), i + return c.Operand.(int64) } } - return 0, -1 + return 0 } // special map to hold range conditions @@ -421,6 +428,10 @@ func keyForTag(tag cmn.KVPair, result *types.TxResult) []byte { return []byte(fmt.Sprintf("%s/%s/%d/%d", tag.Key, tag.Value, result.Height, result.Index)) } +func keyForHeight(result *types.TxResult) []byte { + return []byte(fmt.Sprintf("%s/%d/%d/%d", types.TxHeightKey, result.Height, result.Height, result.Index)) +} + /////////////////////////////////////////////////////////////////////////////// // Utils diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index c32c827d40a..67fdf9e2492 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -156,8 +156,8 @@ func TestIndexAllTags(t *testing.T) { indexer := NewTxIndex(db.NewMemDB(), IndexAllTags()) txResult := txResultWithTags([]cmn.KVPair{ - cmn.KVPair{[]byte("account.owner"), []byte("Ivan")}, - cmn.KVPair{[]byte("account.number"), []byte("1")}, + cmn.KVPair{Key: []byte("account.owner"), Value: []byte("Ivan")}, + cmn.KVPair{Key: []byte("account.number"), Value: []byte("1")}, }) err := indexer.Index(txResult) diff --git a/state/validation.go b/state/validation.go index c3633920322..ccfe1ef127a 100644 --- a/state/validation.go +++ b/state/validation.go @@ -5,55 +5,84 @@ import ( "errors" "fmt" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/crypto/tmhash" dbm "github.com/tendermint/tendermint/libs/db" + "github.com/tendermint/tendermint/types" ) //----------------------------------------------------- // Validate block func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { - // validate internal consistency + // Validate internal consistency. if err := block.ValidateBasic(); err != nil { return err } - // validate basic info + // Validate basic info. if block.ChainID != state.ChainID { - return fmt.Errorf("Wrong Block.Header.ChainID. Expected %v, got %v", state.ChainID, block.ChainID) + return fmt.Errorf( + "Wrong Block.Header.ChainID. Expected %v, got %v", + state.ChainID, + block.ChainID, + ) } if block.Height != state.LastBlockHeight+1 { - return fmt.Errorf("Wrong Block.Header.Height. Expected %v, got %v", state.LastBlockHeight+1, block.Height) + return fmt.Errorf( + "Wrong Block.Header.Height. Expected %v, got %v", + state.LastBlockHeight+1, + block.Height, + ) } - /* TODO: Determine bounds for Time - See blockchain/reactor "stopSyncingDurationMinutes" - - if !block.Time.After(lastBlockTime) { - return errors.New("Invalid Block.Header.Time") - } - */ - // validate prev block info + // Validate prev block info. if !block.LastBlockID.Equals(state.LastBlockID) { - return fmt.Errorf("Wrong Block.Header.LastBlockID. Expected %v, got %v", state.LastBlockID, block.LastBlockID) + return fmt.Errorf( + "Wrong Block.Header.LastBlockID. Expected %v, got %v", + state.LastBlockID, + block.LastBlockID, + ) } newTxs := int64(len(block.Data.Txs)) if block.TotalTxs != state.LastBlockTotalTx+newTxs { - return fmt.Errorf("Wrong Block.Header.TotalTxs. Expected %v, got %v", state.LastBlockTotalTx+newTxs, block.TotalTxs) + return fmt.Errorf( + "Wrong Block.Header.TotalTxs. Expected %v, got %v", + state.LastBlockTotalTx+newTxs, + block.TotalTxs, + ) } - // validate app info + // Validate app info if !bytes.Equal(block.AppHash, state.AppHash) { - return fmt.Errorf("Wrong Block.Header.AppHash. Expected %X, got %v", state.AppHash, block.AppHash) + return fmt.Errorf( + "Wrong Block.Header.AppHash. Expected %X, got %v", + state.AppHash, + block.AppHash, + ) } if !bytes.Equal(block.ConsensusHash, state.ConsensusParams.Hash()) { - return fmt.Errorf("Wrong Block.Header.ConsensusHash. Expected %X, got %v", state.ConsensusParams.Hash(), block.ConsensusHash) + return fmt.Errorf( + "Wrong Block.Header.ConsensusHash. Expected %X, got %v", + state.ConsensusParams.Hash(), + block.ConsensusHash, + ) } if !bytes.Equal(block.LastResultsHash, state.LastResultsHash) { - return fmt.Errorf("Wrong Block.Header.LastResultsHash. Expected %X, got %v", state.LastResultsHash, block.LastResultsHash) + return fmt.Errorf( + "Wrong Block.Header.LastResultsHash. Expected %X, got %v", + state.LastResultsHash, + block.LastResultsHash, + ) } if !bytes.Equal(block.ValidatorsHash, state.Validators.Hash()) { - return fmt.Errorf("Wrong Block.Header.ValidatorsHash. Expected %X, got %v", state.Validators.Hash(), block.ValidatorsHash) + return fmt.Errorf( + "Wrong Block.Header.ValidatorsHash. Expected %X, got %v", + state.Validators.Hash(), + block.ValidatorsHash, + ) + } + if !bytes.Equal(block.NextValidatorsHash, state.NextValidators.Hash()) { + return fmt.Errorf("Wrong Block.Header.NextValidatorsHash. Expected %X, got %v", state.NextValidators.Hash(), block.NextValidatorsHash) } // Validate block LastCommit. @@ -63,8 +92,11 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { } } else { if len(block.LastCommit.Precommits) != state.LastValidators.Size() { - return fmt.Errorf("Invalid block commit size. Expected %v, got %v", - state.LastValidators.Size(), len(block.LastCommit.Precommits)) + return fmt.Errorf( + "Invalid block commit size. Expected %v, got %v", + state.LastValidators.Size(), + len(block.LastCommit.Precommits), + ) } err := state.LastValidators.VerifyCommit( state.ChainID, state.LastBlockID, block.Height-1, block.LastCommit) @@ -73,6 +105,27 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { } } + // Validate block Time + if block.Height > 1 { + if !block.Time.After(state.LastBlockTime) { + return fmt.Errorf( + "Block time %v not greater than last block time %v", + block.Time, + state.LastBlockTime, + ) + } + + medianTime := MedianTime(block.LastCommit, state.LastValidators) + if !block.Time.Equal(medianTime) { + return fmt.Errorf( + "Invalid block time. Expected %v, got %v", + medianTime, + block.Time, + ) + } + } + + // Validate all evidence. // TODO: Each check requires loading an old validator set. // We should cap the amount of evidence per block // to prevent potential proposer DoS. @@ -82,6 +135,17 @@ func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { } } + // NOTE: We can't actually verify it's the right proposer because we dont + // know what round the block was first proposed. So just check that it's + // a legit address and a known validator. + if len(block.ProposerAddress) != tmhash.Size || + !state.Validators.HasAddress(block.ProposerAddress) { + return fmt.Errorf( + "Block.Header.ProposerAddress, %X, is not a validator", + block.ProposerAddress, + ) + } + return nil } diff --git a/state/validation_test.go b/state/validation_test.go index 362a407374f..ba76a72bcbf 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" dbm "github.com/tendermint/tendermint/libs/db" "github.com/tendermint/tendermint/libs/log" ) @@ -65,4 +66,13 @@ func TestValidateBlock(t *testing.T) { block.ValidatorsHash = []byte("wrong validators hash") err = blockExec.ValidateBlock(state, block) require.Error(t, err) + + // wrong proposer address + block = makeBlock(state, 1) + block.ProposerAddress = ed25519.GenPrivKey().PubKey().Address() + err = blockExec.ValidateBlock(state, block) + require.Error(t, err) + block.ProposerAddress = []byte("wrong size") + err = blockExec.ValidateBlock(state, block) + require.Error(t, err) } diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go index c55713c7f66..51c0d9b7ebc 100644 --- a/test/app/grpc_client.go +++ b/test/app/grpc_client.go @@ -2,13 +2,13 @@ package main import ( "encoding/hex" - "encoding/json" "fmt" "os" "context" - "github.com/tendermint/tendermint/rpc/grpc" + amino "github.com/tendermint/go-amino" + core_grpc "github.com/tendermint/tendermint/rpc/grpc" ) var grpcAddr = "tcp://localhost:36656" @@ -27,13 +27,13 @@ func main() { } clientGRPC := core_grpc.StartGRPCClient(grpcAddr) - res, err := clientGRPC.BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{txBytes}) + res, err := clientGRPC.BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{Tx: txBytes}) if err != nil { fmt.Println(err) os.Exit(1) } - bz, err := json.Marshal(res) + bz, err := amino.NewCodec().MarshalJSON(res) if err != nil { fmt.Println(err) os.Exit(1) diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index 34968c39149..6bb320be8f7 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -21,8 +21,15 @@ COPY . $REPO RUN make get_tools RUN make get_vendor_deps -RUN go install ./cmd/tendermint -RUN go install ./abci/cmd/abci-cli +# Now copy in the code +# NOTE: this will overwrite whatever is in vendor/ +COPY . $REPO + +# install ABCI CLI +RUN make install_abci + +# install Tendermint +RUN make install # expose the volume for debugging VOLUME $REPO diff --git a/test/p2p/basic/test.sh b/test/p2p/basic/test.sh index caf665122ff..423b5b0179c 100755 --- a/test/p2p/basic/test.sh +++ b/test/p2p/basic/test.sh @@ -56,6 +56,7 @@ for i in `seq 1 $N`; do # - assert block height is greater than 1 BLOCK_HEIGHT=`curl -s $addr/status | jq .result.sync_info.latest_block_height | jq fromjson` COUNT=0 + echo "$$BLOCK_HEIGHT IS $BLOCK_HEIGHT" while [ "$BLOCK_HEIGHT" -le 1 ]; do echo "Waiting for node $i to commit a block ..." sleep 1 diff --git a/test/p2p/kill_all/check_peers.sh b/test/p2p/kill_all/check_peers.sh index 87a7681109e..95da7484ebd 100644 --- a/test/p2p/kill_all/check_peers.sh +++ b/test/p2p/kill_all/check_peers.sh @@ -23,7 +23,7 @@ set -e # get the first peer's height addr=$(test/p2p/ip.sh 1):26657 -h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height) +h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height | sed -e "s/^\"\(.*\)\"$/\1/g") echo "1st peer is on height $h1" echo "Waiting until other peers reporting a height higher than the 1st one" @@ -33,7 +33,7 @@ for i in $(seq 2 "$NUM_OF_PEERS"); do while [[ $hi -le $h1 ]] ; do addr=$(test/p2p/ip.sh "$i"):26657 - hi=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height) + hi=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height | sed -e "s/^\"\(.*\)\"$/\1/g") echo "... peer $i is on height $hi" diff --git a/test/p2p/peer.sh b/test/p2p/peer.sh index 15d44ff3323..ad04d000ffc 100644 --- a/test/p2p/peer.sh +++ b/test/p2p/peer.sh @@ -14,14 +14,31 @@ echo "starting tendermint peer ID=$ID" # start tendermint container on the network # NOTE: $NODE_FLAGS should be unescaped (no quotes). otherwise it will be # treated as one flag. -docker run -d \ - --net="$NETWORK_NAME" \ - --ip=$(test/p2p/ip.sh "$ID") \ - --name "local_testnet_$ID" \ - --entrypoint tendermint \ - -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ - --log-driver=syslog \ - --log-opt syslog-address=udp://127.0.0.1:5514 \ - --log-opt syslog-facility=daemon \ - --log-opt tag="{{.Name}}" \ - "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" + +if [[ "$ID" == "x" ]]; then # Set "x" to "1" to print to console. + docker run \ + --net="$NETWORK_NAME" \ + --ip=$(test/p2p/ip.sh "$ID") \ + --name "local_testnet_$ID" \ + --entrypoint tendermint \ + -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ + -e GOMAXPROCS=1 \ + --log-driver=syslog \ + --log-opt syslog-address=udp://127.0.0.1:5514 \ + --log-opt syslog-facility=daemon \ + --log-opt tag="{{.Name}}" \ + "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" & +else + docker run -d \ + --net="$NETWORK_NAME" \ + --ip=$(test/p2p/ip.sh "$ID") \ + --name "local_testnet_$ID" \ + --entrypoint tendermint \ + -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ + -e GOMAXPROCS=1 \ + --log-driver=syslog \ + --log-opt syslog-address=udp://127.0.0.1:5514 \ + --log-opt syslog-facility=daemon \ + --log-opt tag="{{.Name}}" \ + "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" +fi diff --git a/test/p2p/pex/test_addrbook.sh b/test/p2p/pex/test_addrbook.sh index d54bcf428a4..9c58db30c7d 100644 --- a/test/p2p/pex/test_addrbook.sh +++ b/test/p2p/pex/test_addrbook.sh @@ -16,6 +16,7 @@ CLIENT_NAME="pex_addrbook_$ID" echo "1. restart peer $ID" docker stop "local_testnet_$ID" +echo "stopped local_testnet_$ID" # preserve addrbook.json docker cp "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" "/tmp/addrbook.json" set +e #CIRCLE @@ -24,6 +25,13 @@ set -e # NOTE that we do not provide persistent_peers bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$ID" "$PROXY_APP" "--p2p.pex --rpc.unsafe" +echo "started local_testnet_$ID" + +# if the client runs forever, it means addrbook wasn't saved or was empty +bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N" + +# Now we know that the node is up. + docker cp "/tmp/addrbook.json" "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" echo "with the following addrbook:" cat /tmp/addrbook.json @@ -31,9 +39,6 @@ cat /tmp/addrbook.json # docker exec "local_testnet_$ID" cat "/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" echo "" -# if the client runs forever, it means addrbook wasn't saved or was empty -bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N" - echo "----------------------------------------------------------------------" echo "Testing other peers connect to us if we have neither persistent_peers nor the addrbook" echo "(assuming peers are started with pex enabled)" @@ -42,16 +47,20 @@ CLIENT_NAME="pex_no_addrbook_$ID" echo "1. restart peer $ID" docker stop "local_testnet_$ID" +echo "stopped local_testnet_$ID" set +e #CIRCLE docker rm -vf "local_testnet_$ID" set -e # NOTE that we do not provide persistent_peers bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$ID" "$PROXY_APP" "--p2p.pex --rpc.unsafe" +echo "started local_testnet_$ID" # if the client runs forever, it means other peers have removed us from their books (which should not happen) bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N" +# Now we know that the node is up. + echo "" echo "PASS" echo "" diff --git a/tools/tm-monitor/monitor/node.go b/tools/tm-monitor/monitor/node.go index b99870207ce..8bc15a15290 100644 --- a/tools/tm-monitor/monitor/node.go +++ b/tools/tm-monitor/monitor/node.go @@ -217,8 +217,7 @@ func (n *Node) checkIsValidator() { if err == nil { for _, v := range validators { key, err1 := n.getPubKey() - // TODO: use bytes.Equal - if err1 == nil && v.PubKey == key { + if err1 == nil && v.PubKey.Equals(key) { n.IsValidator = true } } diff --git a/types/block.go b/types/block.go index 304e8bdee4e..d0a1a826f55 100644 --- a/types/block.go +++ b/types/block.go @@ -13,6 +13,20 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" ) +const ( + // MaxHeaderBytes is a maximum header size (including amino overhead). + MaxHeaderBytes = 511 + + // MaxAminoOverheadForBlock - maximum amino overhead to encode a block (up to + // MaxBlockSizeBytes in size) not including it's parts except Data. + // + // Uvarint length of MaxBlockSizeBytes: 4 bytes + // 2 fields (2 embedded): 2 bytes + // Uvarint length of Data.Txs: 4 bytes + // Data.Txs field: 1 byte + MaxAminoOverheadForBlock = 11 +) + // Block defines the atomic unit of a Tendermint blockchain. // TODO: add Version byte type Block struct { @@ -23,20 +37,20 @@ type Block struct { LastCommit *Commit `json:"last_commit"` } -// MakeBlock returns a new block with an empty header, except what can be computed from itself. -// It populates the same set of fields validated by ValidateBasic -func MakeBlock(height int64, txs []Tx, commit *Commit, evidence []Evidence) *Block { +// MakeBlock returns a new block with an empty header, except what can be +// computed from itself. +// It populates the same set of fields validated by ValidateBasic. +func MakeBlock(height int64, txs []Tx, lastCommit *Commit, evidence []Evidence) *Block { block := &Block{ Header: Header{ Height: height, - Time: time.Now(), NumTxs: int64(len(txs)), }, Data: Data{ Txs: txs, }, Evidence: EvidenceData{Evidence: evidence}, - LastCommit: commit, + LastCommit: lastCommit, } block.fillHeader() return block @@ -53,10 +67,18 @@ func (b *Block) ValidateBasic() error { newTxs := int64(len(b.Data.Txs)) if b.NumTxs != newTxs { - return fmt.Errorf("Wrong Block.Header.NumTxs. Expected %v, got %v", newTxs, b.NumTxs) + return fmt.Errorf( + "Wrong Block.Header.NumTxs. Expected %v, got %v", + newTxs, + b.NumTxs, + ) } if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { - return fmt.Errorf("Wrong Block.Header.LastCommitHash. Expected %v, got %v", b.LastCommitHash, b.LastCommit.Hash()) + return fmt.Errorf( + "Wrong Block.Header.LastCommitHash. Expected %v, got %v", + b.LastCommitHash, + b.LastCommit.Hash(), + ) } if b.Header.Height != 1 { if err := b.LastCommit.ValidateBasic(); err != nil { @@ -64,10 +86,18 @@ func (b *Block) ValidateBasic() error { } } if !bytes.Equal(b.DataHash, b.Data.Hash()) { - return fmt.Errorf("Wrong Block.Header.DataHash. Expected %v, got %v", b.DataHash, b.Data.Hash()) + return fmt.Errorf( + "Wrong Block.Header.DataHash. Expected %v, got %v", + b.DataHash, + b.Data.Hash(), + ) } if !bytes.Equal(b.EvidenceHash, b.Evidence.Hash()) { - return errors.New(cmn.Fmt("Wrong Block.Header.EvidenceHash. Expected %v, got %v", b.EvidenceHash, b.Evidence.Hash())) + return fmt.Errorf( + "Wrong Block.Header.EvidenceHash. Expected %v, got %v", + b.EvidenceHash, + b.Evidence.Hash(), + ) } return nil } @@ -177,29 +207,32 @@ func (b *Block) StringShort() string { // Header defines the structure of a Tendermint block header // TODO: limit header size // NOTE: changes to the Header should be duplicated in the abci Header +// and in /docs/spec/blockchain/blockchain.md type Header struct { // basic block info - ChainID string `json:"chain_id"` - Height int64 `json:"height"` - Time time.Time `json:"time"` - NumTxs int64 `json:"num_txs"` + ChainID string `json:"chain_id"` + Height int64 `json:"height"` + Time time.Time `json:"time"` + NumTxs int64 `json:"num_txs"` + TotalTxs int64 `json:"total_txs"` // prev block info LastBlockID BlockID `json:"last_block_id"` - TotalTxs int64 `json:"total_txs"` // hashes of block data LastCommitHash cmn.HexBytes `json:"last_commit_hash"` // commit from validators from the last block DataHash cmn.HexBytes `json:"data_hash"` // transactions // hashes from the app output from the prev block - ValidatorsHash cmn.HexBytes `json:"validators_hash"` // validators for the current block - ConsensusHash cmn.HexBytes `json:"consensus_hash"` // consensus params for current block - AppHash cmn.HexBytes `json:"app_hash"` // state after txs from the previous block - LastResultsHash cmn.HexBytes `json:"last_results_hash"` // root hash of all results from the txs from the previous block + ValidatorsHash cmn.HexBytes `json:"validators_hash"` // validators for the current block + NextValidatorsHash cmn.HexBytes `json:"next_validators_hash"` // validators for the next block + ConsensusHash cmn.HexBytes `json:"consensus_hash"` // consensus params for current block + AppHash cmn.HexBytes `json:"app_hash"` // state after txs from the previous block + LastResultsHash cmn.HexBytes `json:"last_results_hash"` // root hash of all results from the txs from the previous block // consensus info - EvidenceHash cmn.HexBytes `json:"evidence_hash"` // evidence included in the block + EvidenceHash cmn.HexBytes `json:"evidence_hash"` // evidence included in the block + ProposerAddress Address `json:"proposer_address"` // original proposer of the block } // Hash returns the hash of the header. @@ -211,19 +244,21 @@ func (h *Header) Hash() cmn.HexBytes { return nil } return merkle.SimpleHashFromMap(map[string]merkle.Hasher{ - "ChainID": aminoHasher(h.ChainID), - "Height": aminoHasher(h.Height), - "Time": aminoHasher(h.Time), - "NumTxs": aminoHasher(h.NumTxs), - "TotalTxs": aminoHasher(h.TotalTxs), - "LastBlockID": aminoHasher(h.LastBlockID), - "LastCommit": aminoHasher(h.LastCommitHash), - "Data": aminoHasher(h.DataHash), - "Validators": aminoHasher(h.ValidatorsHash), - "App": aminoHasher(h.AppHash), - "Consensus": aminoHasher(h.ConsensusHash), - "Results": aminoHasher(h.LastResultsHash), - "Evidence": aminoHasher(h.EvidenceHash), + "ChainID": aminoHasher(h.ChainID), + "Height": aminoHasher(h.Height), + "Time": aminoHasher(h.Time), + "NumTxs": aminoHasher(h.NumTxs), + "TotalTxs": aminoHasher(h.TotalTxs), + "LastBlockID": aminoHasher(h.LastBlockID), + "LastCommit": aminoHasher(h.LastCommitHash), + "Data": aminoHasher(h.DataHash), + "Validators": aminoHasher(h.ValidatorsHash), + "NextValidators": aminoHasher(h.NextValidatorsHash), + "App": aminoHasher(h.AppHash), + "Consensus": aminoHasher(h.ConsensusHash), + "Results": aminoHasher(h.LastResultsHash), + "Evidence": aminoHasher(h.EvidenceHash), + "Proposer": aminoHasher(h.ProposerAddress), }) } @@ -242,10 +277,12 @@ func (h *Header) StringIndented(indent string) string { %s LastCommit: %v %s Data: %v %s Validators: %v +%s NextValidators: %v %s App: %v %s Consensus: %v %s Results: %v %s Evidence: %v +%s Proposer: %v %s}#%v`, indent, h.ChainID, indent, h.Height, @@ -256,10 +293,12 @@ func (h *Header) StringIndented(indent string) string { indent, h.LastCommitHash, indent, h.DataHash, indent, h.ValidatorsHash, + indent, h.NextValidatorsHash, indent, h.AppHash, indent, h.ConsensusHash, indent, h.LastResultsHash, indent, h.EvidenceHash, + indent, h.ProposerAddress, indent, h.Hash()) } @@ -353,6 +392,7 @@ func (commit *Commit) IsCommit() bool { } // ValidateBasic performs basic validation that doesn't involve state data. +// Does not actually check the cryptographic signatures. func (commit *Commit) ValidateBasic() error { if commit.BlockID.IsZero() { return errors.New("Commit cannot be for nil block") @@ -362,23 +402,23 @@ func (commit *Commit) ValidateBasic() error { } height, round := commit.Height(), commit.Round() - // validate the precommits + // Validate the precommits. for _, precommit := range commit.Precommits { // It's OK for precommits to be missing. if precommit == nil { continue } - // Ensure that all votes are precommits + // Ensure that all votes are precommits. if precommit.Type != VoteTypePrecommit { return fmt.Errorf("Invalid commit vote. Expected precommit, got %v", precommit.Type) } - // Ensure that all heights are the same + // Ensure that all heights are the same. if precommit.Height != height { return fmt.Errorf("Invalid commit precommit height. Expected %v, got %v", height, precommit.Height) } - // Ensure that all rounds are the same + // Ensure that all rounds are the same. if precommit.Round != round { return fmt.Errorf("Invalid commit precommit round. Expected %v, got %v", round, precommit.Round) @@ -413,19 +453,77 @@ func (commit *Commit) StringIndented(indent string) string { } return fmt.Sprintf(`Commit{ %s BlockID: %v -%s Precommits: %v +%s Precommits: +%s %v %s}#%v`, indent, commit.BlockID, - indent, strings.Join(precommitStrings, "\n"+indent+" "), + indent, + indent, strings.Join(precommitStrings, "\n"+indent+" "), indent, commit.hash) } //----------------------------------------------------------------------------- -// SignedHeader is a header along with the commits that prove it +// SignedHeader is a header along with the commits that prove it. type SignedHeader struct { - Header *Header `json:"header"` - Commit *Commit `json:"commit"` + *Header `json:"header"` + Commit *Commit `json:"commit"` +} + +// ValidateBasic does basic consistency checks and makes sure the header +// and commit are consistent. +// +// NOTE: This does not actually check the cryptographic signatures. Make +// sure to use a Verifier to validate the signatures actually provide a +// significantly strong proof for this header's validity. +func (sh SignedHeader) ValidateBasic(chainID string) error { + + // Make sure the header is consistent with the commit. + if sh.Header == nil { + return errors.New("SignedHeader missing header.") + } + if sh.Commit == nil { + return errors.New("SignedHeader missing commit (precommit votes).") + } + // Check ChainID. + if sh.ChainID != chainID { + return fmt.Errorf("Header belongs to another chain '%s' not '%s'", + sh.ChainID, chainID) + } + // Check Height. + if sh.Commit.Height() != sh.Height { + return fmt.Errorf("SignedHeader header and commit height mismatch: %v vs %v", + sh.Height, sh.Commit.Height()) + } + // Check Hash. + hhash := sh.Hash() + chash := sh.Commit.BlockID.Hash + if !bytes.Equal(hhash, chash) { + return fmt.Errorf("SignedHeader commit signs block %X, header is block %X", + chash, hhash) + } + // ValidateBasic on the Commit. + err := sh.Commit.ValidateBasic() + if err != nil { + return cmn.ErrorWrap(err, "commit.ValidateBasic failed during SignedHeader.ValidateBasic") + } + return nil +} + +func (sh SignedHeader) String() string { + return sh.StringIndented("") +} + +// StringIndented returns a string representation of the SignedHeader. +func (sh SignedHeader) StringIndented(indent string) string { + return fmt.Sprintf(`SignedHeader{ +%s %v +%s %v +%s}`, + indent, sh.Header.StringIndented(indent+" "), + indent, sh.Commit.StringIndented(indent+" "), + indent) + return "" } //----------------------------------------------------------------------------- diff --git a/types/block_test.go b/types/block_test.go index 50695c84b84..c2a73bf880b 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -1,12 +1,15 @@ package types import ( + "math" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -44,6 +47,7 @@ func TestBlockValidateBasic(t *testing.T) { block := MakeBlock(h, txs, commit, evList) require.NotNil(t, block) + block.ProposerAddress = valSet.GetProposer().Address // proper block must pass err = block.ValidateBasic() @@ -104,7 +108,6 @@ func TestBlockMakePartSet(t *testing.T) { func TestBlockMakePartSetWithEvidence(t *testing.T) { assert.Nil(t, (*Block)(nil).MakePartSet(2)) - txs := []Tx{Tx("foo"), Tx("bar")} lastID := makeBlockIDRandom() h := int64(3) @@ -115,9 +118,9 @@ func TestBlockMakePartSetWithEvidence(t *testing.T) { ev := NewMockGoodEvidence(h, 0, valSet.Validators[0].Address) evList := []Evidence{ev} - partSet := MakeBlock(h, txs, commit, evList).MakePartSet(1024) + partSet := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList).MakePartSet(1024) assert.NotNil(t, partSet) - assert.Equal(t, 3, partSet.Total()) + assert.Equal(t, 2, partSet.Total()) } func TestBlockHashesTo(t *testing.T) { @@ -158,16 +161,16 @@ func TestBlockString(t *testing.T) { } func makeBlockIDRandom() BlockID { - blockHash, blockPartsHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} + blockHash, blockPartsHeader := crypto.CRandBytes(tmhash.Size), PartSetHeader{123, crypto.CRandBytes(tmhash.Size)} return BlockID{blockHash, blockPartsHeader} } -func makeBlockID(hash string, partSetSize int, partSetHash string) BlockID { +func makeBlockID(hash []byte, partSetSize int, partSetHash []byte) BlockID { return BlockID{ - Hash: []byte(hash), + Hash: hash, PartsHeader: PartSetHeader{ Total: partSetSize, - Hash: []byte(partSetHash), + Hash: partSetHash, }, } @@ -232,6 +235,40 @@ func TestCommitValidateBasic(t *testing.T) { assert.Error(t, commit.ValidateBasic()) } +func TestMaxHeaderBytes(t *testing.T) { + // Construct a UTF-8 string of MaxChainIDLen length using the supplementary + // characters. + // Each supplementary character takes 4 bytes. + // http://www.i18nguy.com/unicode/supplementary-test.html + maxChainID := "" + for i := 0; i < MaxChainIDLen; i++ { + maxChainID += "𠜎" + } + + h := Header{ + ChainID: maxChainID, + Height: math.MaxInt64, + Time: time.Now().UTC(), + NumTxs: math.MaxInt64, + TotalTxs: math.MaxInt64, + LastBlockID: makeBlockID(make([]byte, tmhash.Size), math.MaxInt64, make([]byte, tmhash.Size)), + LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), + DataHash: tmhash.Sum([]byte("data_hash")), + ValidatorsHash: tmhash.Sum([]byte("validators_hash")), + NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), + ConsensusHash: tmhash.Sum([]byte("consensus_hash")), + AppHash: tmhash.Sum([]byte("app_hash")), + LastResultsHash: tmhash.Sum([]byte("last_results_hash")), + EvidenceHash: tmhash.Sum([]byte("evidence_hash")), + ProposerAddress: tmhash.Sum([]byte("proposer_address")), + } + + bz, err := cdc.MarshalBinary(h) + require.NoError(t, err) + + assert.Equal(t, MaxHeaderBytes, len(bz)) +} + func randCommit() *Commit { lastID := makeBlockIDRandom() h := int64(3) diff --git a/types/canonical_json.go b/types/canonical_json.go index aca9e9b799c..d8399ff196a 100644 --- a/types/canonical_json.go +++ b/types/canonical_json.go @@ -4,6 +4,7 @@ import ( "time" cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" ) // Canonical json is amino's json for structs with fields in alphabetical order @@ -110,5 +111,5 @@ func CanonicalTime(t time.Time) string { // Note that sending time over amino resets it to // local time, we need to force UTC here, so the // signatures match - return t.UTC().Format(TimeFormat) + return tmtime.Canonical(t).Format(TimeFormat) } diff --git a/types/event_bus.go b/types/event_bus.go index b4965feee67..d11c6520532 100644 --- a/types/event_bus.go +++ b/types/event_bus.go @@ -71,34 +71,32 @@ func (b *EventBus) Publish(eventType string, eventData TMEventData) error { return nil } -//--- block, tx, and vote events - -func (b *EventBus) PublishEventNewBlock(event EventDataNewBlock) error { - return b.Publish(EventNewBlock, event) +func (b *EventBus) PublishEventNewBlock(data EventDataNewBlock) error { + return b.Publish(EventNewBlock, data) } -func (b *EventBus) PublishEventNewBlockHeader(event EventDataNewBlockHeader) error { - return b.Publish(EventNewBlockHeader, event) +func (b *EventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { + return b.Publish(EventNewBlockHeader, data) } -func (b *EventBus) PublishEventVote(event EventDataVote) error { - return b.Publish(EventVote, event) +func (b *EventBus) PublishEventVote(data EventDataVote) error { + return b.Publish(EventVote, data) } // PublishEventTx publishes tx event with tags from Result. Note it will add // predefined tags (EventTypeKey, TxHashKey). Existing tags with the same names // will be overwritten. -func (b *EventBus) PublishEventTx(event EventDataTx) error { +func (b *EventBus) PublishEventTx(data EventDataTx) error { // no explicit deadline for publishing events ctx := context.Background() tags := make(map[string]string) // validate and fill tags from tx result - for _, tag := range event.Result.Tags { + for _, tag := range data.Result.Tags { // basic validation if len(tag.Key) == 0 { - b.Logger.Info("Got tag with an empty key (skipping)", "tag", tag, "tx", event.Tx) + b.Logger.Info("Got tag with an empty key (skipping)", "tag", tag, "tx", data.Tx) continue } tags[string(tag.Key)] = string(tag.Value) @@ -109,55 +107,57 @@ func (b *EventBus) PublishEventTx(event EventDataTx) error { tags[EventTypeKey] = EventTx logIfTagExists(TxHashKey, tags, b.Logger) - tags[TxHashKey] = fmt.Sprintf("%X", event.Tx.Hash()) + tags[TxHashKey] = fmt.Sprintf("%X", data.Tx.Hash()) logIfTagExists(TxHeightKey, tags, b.Logger) - tags[TxHeightKey] = fmt.Sprintf("%d", event.Height) + tags[TxHeightKey] = fmt.Sprintf("%d", data.Height) - b.pubsub.PublishWithTags(ctx, event, tmpubsub.NewTagMap(tags)) + b.pubsub.PublishWithTags(ctx, data, tmpubsub.NewTagMap(tags)) return nil } -func (b *EventBus) PublishEventProposalHeartbeat(event EventDataProposalHeartbeat) error { - return b.Publish(EventProposalHeartbeat, event) +func (b *EventBus) PublishEventProposalHeartbeat(data EventDataProposalHeartbeat) error { + return b.Publish(EventProposalHeartbeat, data) } -//--- EventDataRoundState events +func (b *EventBus) PublishEventNewRoundStep(data EventDataRoundState) error { + return b.Publish(EventNewRoundStep, data) +} -func (b *EventBus) PublishEventNewRoundStep(event EventDataRoundState) error { - return b.Publish(EventNewRoundStep, event) +func (b *EventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { + return b.Publish(EventTimeoutPropose, data) } -func (b *EventBus) PublishEventTimeoutPropose(event EventDataRoundState) error { - return b.Publish(EventTimeoutPropose, event) +func (b *EventBus) PublishEventTimeoutWait(data EventDataRoundState) error { + return b.Publish(EventTimeoutWait, data) } -func (b *EventBus) PublishEventTimeoutWait(event EventDataRoundState) error { - return b.Publish(EventTimeoutWait, event) +func (b *EventBus) PublishEventNewRound(data EventDataRoundState) error { + return b.Publish(EventNewRound, data) } -func (b *EventBus) PublishEventNewRound(event EventDataRoundState) error { - return b.Publish(EventNewRound, event) +func (b *EventBus) PublishEventCompleteProposal(data EventDataRoundState) error { + return b.Publish(EventCompleteProposal, data) } -func (b *EventBus) PublishEventCompleteProposal(event EventDataRoundState) error { - return b.Publish(EventCompleteProposal, event) +func (b *EventBus) PublishEventPolka(data EventDataRoundState) error { + return b.Publish(EventPolka, data) } -func (b *EventBus) PublishEventPolka(event EventDataRoundState) error { - return b.Publish(EventPolka, event) +func (b *EventBus) PublishEventUnlock(data EventDataRoundState) error { + return b.Publish(EventUnlock, data) } -func (b *EventBus) PublishEventUnlock(event EventDataRoundState) error { - return b.Publish(EventUnlock, event) +func (b *EventBus) PublishEventRelock(data EventDataRoundState) error { + return b.Publish(EventRelock, data) } -func (b *EventBus) PublishEventRelock(event EventDataRoundState) error { - return b.Publish(EventRelock, event) +func (b *EventBus) PublishEventLock(data EventDataRoundState) error { + return b.Publish(EventLock, data) } -func (b *EventBus) PublishEventLock(event EventDataRoundState) error { - return b.Publish(EventLock, event) +func (b *EventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdates) error { + return b.Publish(EventValidatorSetUpdates, data) } func logIfTagExists(tag string, tags map[string]string, logger log.Logger) { diff --git a/types/event_bus_test.go b/types/event_bus_test.go index 907c69d3170..f0e825d5dcc 100644 --- a/types/event_bus_test.go +++ b/types/event_bus_test.go @@ -22,7 +22,7 @@ func TestEventBusPublishEventTx(t *testing.T) { defer eventBus.Stop() tx := Tx("foo") - result := abci.ResponseDeliverTx{Data: []byte("bar"), Tags: []cmn.KVPair{{[]byte("baz"), []byte("1")}}} + result := abci.ResponseDeliverTx{Data: []byte("bar"), Tags: []cmn.KVPair{{Key: []byte("baz"), Value: []byte("1")}}} txEventsCh := make(chan interface{}) @@ -68,7 +68,7 @@ func TestEventBusPublish(t *testing.T) { err = eventBus.Subscribe(context.Background(), "test", tmquery.Empty{}, eventsCh) require.NoError(t, err) - const numEventsExpected = 14 + const numEventsExpected = 15 done := make(chan struct{}) go func() { numEvents := 0 @@ -108,6 +108,8 @@ func TestEventBusPublish(t *testing.T) { require.NoError(t, err) err = eventBus.PublishEventLock(EventDataRoundState{}) require.NoError(t, err) + err = eventBus.PublishEventValidatorSetUpdates(EventDataValidatorSetUpdates{}) + require.NoError(t, err) select { case <-done: diff --git a/types/events.go b/types/events.go index c26fecb71dd..09f7216e9ca 100644 --- a/types/events.go +++ b/types/events.go @@ -8,42 +8,34 @@ import ( tmquery "github.com/tendermint/tendermint/libs/pubsub/query" ) -// Reserved event types +// Reserved event types (alphabetically sorted). const ( - EventCompleteProposal = "CompleteProposal" - EventLock = "Lock" - EventNewBlock = "NewBlock" - EventNewBlockHeader = "NewBlockHeader" - EventNewRound = "NewRound" - EventNewRoundStep = "NewRoundStep" - EventPolka = "Polka" - EventRelock = "Relock" - EventTimeoutPropose = "TimeoutPropose" - EventTimeoutWait = "TimeoutWait" - EventTx = "Tx" - EventUnlock = "Unlock" - EventVote = "Vote" - EventProposalHeartbeat = "ProposalHeartbeat" + EventCompleteProposal = "CompleteProposal" + EventLock = "Lock" + EventNewBlock = "NewBlock" + EventNewBlockHeader = "NewBlockHeader" + EventNewRound = "NewRound" + EventNewRoundStep = "NewRoundStep" + EventPolka = "Polka" + EventProposalHeartbeat = "ProposalHeartbeat" + EventRelock = "Relock" + EventTimeoutPropose = "TimeoutPropose" + EventTimeoutWait = "TimeoutWait" + EventTx = "Tx" + EventUnlock = "Unlock" + EventValidatorSetUpdates = "ValidatorSetUpdates" + EventVote = "Vote" ) /////////////////////////////////////////////////////////////////////////////// // ENCODING / DECODING /////////////////////////////////////////////////////////////////////////////// -// implements events.EventData +// TMEventData implements events.EventData. type TMEventData interface { - AssertIsTMEventData() // empty interface } -func (_ EventDataNewBlock) AssertIsTMEventData() {} -func (_ EventDataNewBlockHeader) AssertIsTMEventData() {} -func (_ EventDataTx) AssertIsTMEventData() {} -func (_ EventDataRoundState) AssertIsTMEventData() {} -func (_ EventDataVote) AssertIsTMEventData() {} -func (_ EventDataProposalHeartbeat) AssertIsTMEventData() {} -func (_ EventDataString) AssertIsTMEventData() {} - func RegisterEventDatas(cdc *amino.Codec) { cdc.RegisterInterface((*TMEventData)(nil), nil) cdc.RegisterConcrete(EventDataNewBlock{}, "tendermint/event/NewBlock", nil) @@ -52,6 +44,7 @@ func RegisterEventDatas(cdc *amino.Codec) { cdc.RegisterConcrete(EventDataRoundState{}, "tendermint/event/RoundState", nil) cdc.RegisterConcrete(EventDataVote{}, "tendermint/event/Vote", nil) cdc.RegisterConcrete(EventDataProposalHeartbeat{}, "tendermint/event/ProposalHeartbeat", nil) + cdc.RegisterConcrete(EventDataValidatorSetUpdates{}, "tendermint/event/ValidatorSetUpdates", nil) cdc.RegisterConcrete(EventDataString(""), "tendermint/event/ProposalString", nil) } @@ -92,6 +85,10 @@ type EventDataVote struct { type EventDataString string +type EventDataValidatorSetUpdates struct { + ValidatorUpdates []*Validator `json:"validator_updates"` +} + /////////////////////////////////////////////////////////////////////////////// // PUBSUB /////////////////////////////////////////////////////////////////////////////// @@ -108,20 +105,21 @@ const ( ) var ( - EventQueryNewBlock = QueryForEvent(EventNewBlock) - EventQueryNewBlockHeader = QueryForEvent(EventNewBlockHeader) - EventQueryNewRound = QueryForEvent(EventNewRound) - EventQueryNewRoundStep = QueryForEvent(EventNewRoundStep) - EventQueryTimeoutPropose = QueryForEvent(EventTimeoutPropose) - EventQueryCompleteProposal = QueryForEvent(EventCompleteProposal) - EventQueryPolka = QueryForEvent(EventPolka) - EventQueryUnlock = QueryForEvent(EventUnlock) - EventQueryLock = QueryForEvent(EventLock) - EventQueryRelock = QueryForEvent(EventRelock) - EventQueryTimeoutWait = QueryForEvent(EventTimeoutWait) - EventQueryVote = QueryForEvent(EventVote) - EventQueryProposalHeartbeat = QueryForEvent(EventProposalHeartbeat) - EventQueryTx = QueryForEvent(EventTx) + EventQueryCompleteProposal = QueryForEvent(EventCompleteProposal) + EventQueryLock = QueryForEvent(EventLock) + EventQueryNewBlock = QueryForEvent(EventNewBlock) + EventQueryNewBlockHeader = QueryForEvent(EventNewBlockHeader) + EventQueryNewRound = QueryForEvent(EventNewRound) + EventQueryNewRoundStep = QueryForEvent(EventNewRoundStep) + EventQueryPolka = QueryForEvent(EventPolka) + EventQueryProposalHeartbeat = QueryForEvent(EventProposalHeartbeat) + EventQueryRelock = QueryForEvent(EventRelock) + EventQueryTimeoutPropose = QueryForEvent(EventTimeoutPropose) + EventQueryTimeoutWait = QueryForEvent(EventTimeoutWait) + EventQueryTx = QueryForEvent(EventTx) + EventQueryUnlock = QueryForEvent(EventUnlock) + EventQueryValidatorSetUpdates = QueryForEvent(EventValidatorSetUpdates) + EventQueryVote = QueryForEvent(EventVote) ) func EventQueryTxFor(tx Tx) tmpubsub.Query { @@ -137,6 +135,7 @@ type BlockEventPublisher interface { PublishEventNewBlock(block EventDataNewBlock) error PublishEventNewBlockHeader(header EventDataNewBlockHeader) error PublishEventTx(EventDataTx) error + PublishEventValidatorSetUpdates(EventDataValidatorSetUpdates) error } type TxEventPublisher interface { diff --git a/types/evidence.go b/types/evidence.go index 92675868fd6..8377fcd7fdb 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -10,6 +10,11 @@ import ( "github.com/tendermint/tendermint/crypto/merkle" ) +const ( + // MaxEvidenceBytes is a maximum size of any evidence (including amino overhead). + MaxEvidenceBytes = 440 +) + // ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid. type ErrEvidenceInvalid struct { Evidence Evidence diff --git a/types/evidence_test.go b/types/evidence_test.go index 54eba01cde0..68c683518c9 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -1,9 +1,13 @@ package types import ( + "math" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/secp256k1" + "github.com/tendermint/tendermint/crypto/tmhash" ) type voteData struct { @@ -31,10 +35,11 @@ func makeVote(val PrivValidator, chainID string, valIndex int, height int64, rou func TestEvidence(t *testing.T) { val := NewMockPV() val2 := NewMockPV() - blockID := makeBlockID("blockhash", 1000, "partshash") - blockID2 := makeBlockID("blockhash2", 1000, "partshash") - blockID3 := makeBlockID("blockhash", 10000, "partshash") - blockID4 := makeBlockID("blockhash", 10000, "partshash2") + + blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) + blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) + blockID3 := makeBlockID([]byte("blockhash"), 10000, []byte("partshash")) + blockID4 := makeBlockID([]byte("blockhash"), 10000, []byte("partshash2")) const chainID = "mychain" @@ -89,10 +94,27 @@ func TestEvidenceList(t *testing.T) { assert.False(t, evl.Has(&DuplicateVoteEvidence{})) } +func TestMaxEvidenceBytes(t *testing.T) { + val := NewMockPV() + blockID := makeBlockID(tmhash.Sum([]byte("blockhash")), math.MaxInt64, tmhash.Sum([]byte("partshash"))) + blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt64, tmhash.Sum([]byte("partshash"))) + const chainID = "mychain" + ev := &DuplicateVoteEvidence{ + PubKey: secp256k1.GenPrivKey().PubKey(), // use secp because it's pubkey is longer + VoteA: makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID), + VoteB: makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID2), + } + + bz, err := cdc.MarshalBinary(ev) + require.NoError(t, err) + + assert.Equal(t, MaxEvidenceBytes, len(bz)) +} + func randomDuplicatedVoteEvidence() *DuplicateVoteEvidence { val := NewMockPV() - blockID := makeBlockID("blockhash", 1000, "partshash") - blockID2 := makeBlockID("blockhash2", 1000, "partshash") + blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) + blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) const chainID = "mychain" return &DuplicateVoteEvidence{ VoteA: makeVote(val, chainID, 0, 10, 2, 1, blockID), diff --git a/types/genesis.go b/types/genesis.go index 220ee0e0efc..4cf3b730986 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -2,11 +2,18 @@ package types import ( "encoding/json" + "fmt" "io/ioutil" "time" "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" +) + +const ( + // MaxChainIDLen is a maximum length of the chain ID. + MaxChainIDLen = 50 ) //------------------------------------------------------------ @@ -24,7 +31,7 @@ type GenesisDoc struct { GenesisTime time.Time `json:"genesis_time"` ChainID string `json:"chain_id"` ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` - Validators []GenesisValidator `json:"validators"` + Validators []GenesisValidator `json:"validators,omitempty"` AppHash cmn.HexBytes `json:"app_hash"` AppState json.RawMessage `json:"app_state,omitempty"` } @@ -51,10 +58,12 @@ func (genDoc *GenesisDoc) ValidatorHash() []byte { // ValidateAndComplete checks that all necessary fields are present // and fills in defaults for optional fields left empty func (genDoc *GenesisDoc) ValidateAndComplete() error { - if genDoc.ChainID == "" { return cmn.NewError("Genesis doc must include non-empty chain_id") } + if len(genDoc.ChainID) > MaxChainIDLen { + return cmn.NewError(fmt.Sprintf("chain_id in genesis doc is too long (max: %d)", MaxChainIDLen)) + } if genDoc.ConsensusParams == nil { genDoc.ConsensusParams = DefaultConsensusParams() @@ -64,10 +73,6 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error { } } - if len(genDoc.Validators) == 0 { - return cmn.NewError("The genesis file must have at least one validator") - } - for _, v := range genDoc.Validators { if v.Power == 0 { return cmn.NewError("The genesis file cannot contain validators with no voting power: %v", v) @@ -75,7 +80,7 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error { } if genDoc.GenesisTime.IsZero() { - genDoc.GenesisTime = time.Now() + genDoc.GenesisTime = tmtime.Now() } return nil @@ -107,7 +112,7 @@ func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) { } genDoc, err := GenesisDocFromJSON(jsonBlob) if err != nil { - return nil, cmn.ErrorWrap(err, cmn.Fmt("Error reading GenesisDoc at %v", genDocFile)) + return nil, cmn.ErrorWrap(err, fmt.Sprintf("Error reading GenesisDoc at %v", genDocFile)) } return genDoc, nil } diff --git a/types/genesis_test.go b/types/genesis_test.go index 925bba79078..c0cfcdeaa84 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -4,25 +4,24 @@ import ( "io/ioutil" "os" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" + tmtime "github.com/tendermint/tendermint/types/time" ) func TestGenesisBad(t *testing.T) { // test some bad ones from raw json testCases := [][]byte{ - []byte{}, // empty - []byte{1, 1, 1, 1, 1}, // junk - []byte(`{}`), // empty - []byte(`{"chain_id":"mychain"}`), // missing validators - []byte(`{"chain_id":"mychain","validators":[]}`), // missing validators - []byte(`{"chain_id":"mychain","validators":[{}]}`), // missing validators - []byte(`{"chain_id":"mychain","validators":null}`), // missing validators - []byte(`{"chain_id":"mychain"}`), // missing validators - []byte(`{"validators":[{"pub_key":{"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":"10","name":""}]}`), // missing chain_id + []byte{}, // empty + []byte{1, 1, 1, 1, 1}, // junk + []byte(`{}`), // empty + []byte(`{"chain_id":"mychain","validators":[{}]}`), // invalid validator + // missing pub_key type + []byte(`{"validators":[{"pub_key":{"value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":"10","name":""}]}`), + // missing chain_id + []byte(`{"validators":[{"pub_key":{"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":"10","name":""}]}`), } for _, testCase := range testCases { @@ -62,6 +61,19 @@ func TestGenesisGood(t *testing.T) { assert.NoError(t, err, "error marshalling genDoc") genDoc, err = GenesisDocFromJSON(genDocBytes) assert.Error(t, err, "expected error for genDoc json with block size of 0") + + // Genesis doc from raw json + missingValidatorsTestCases := [][]byte{ + []byte(`{"chain_id":"mychain"}`), // missing validators + []byte(`{"chain_id":"mychain","validators":[]}`), // missing validators + []byte(`{"chain_id":"mychain","validators":null}`), // nil validator + []byte(`{"chain_id":"mychain"}`), // missing validators + } + + for _, tc := range missingValidatorsTestCases { + _, err := GenesisDocFromJSON(tc) + assert.NoError(t, err) + } } func TestGenesisSaveAs(t *testing.T) { @@ -98,7 +110,7 @@ func TestGenesisValidatorHash(t *testing.T) { func randomGenesisDoc() *GenesisDoc { return &GenesisDoc{ - GenesisTime: time.Now().UTC(), + GenesisTime: tmtime.Now(), ChainID: "abc", Validators: []GenesisValidator{{ed25519.GenPrivKey().PubKey(), 10, "myval"}}, ConsensusParams: DefaultConsensusParams(), diff --git a/types/nop_event_bus.go b/types/nop_event_bus.go index cd1eab8cd36..93694da47b0 100644 --- a/types/nop_event_bus.go +++ b/types/nop_event_bus.go @@ -20,58 +20,58 @@ func (NopEventBus) UnsubscribeAll(ctx context.Context, subscriber string) error return nil } -//--- block, tx, and vote events - -func (NopEventBus) PublishEventNewBlock(block EventDataNewBlock) error { +func (NopEventBus) PublishEventNewBlock(data EventDataNewBlock) error { return nil } -func (NopEventBus) PublishEventNewBlockHeader(header EventDataNewBlockHeader) error { +func (NopEventBus) PublishEventNewBlockHeader(data EventDataNewBlockHeader) error { return nil } -func (NopEventBus) PublishEventVote(vote EventDataVote) error { +func (NopEventBus) PublishEventVote(data EventDataVote) error { return nil } -func (NopEventBus) PublishEventTx(tx EventDataTx) error { +func (NopEventBus) PublishEventTx(data EventDataTx) error { return nil } -//--- EventDataRoundState events +func (NopEventBus) PublishEventNewRoundStep(data EventDataRoundState) error { + return nil +} -func (NopEventBus) PublishEventNewRoundStep(rs EventDataRoundState) error { +func (NopEventBus) PublishEventTimeoutPropose(data EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventTimeoutPropose(rs EventDataRoundState) error { +func (NopEventBus) PublishEventTimeoutWait(data EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventTimeoutWait(rs EventDataRoundState) error { +func (NopEventBus) PublishEventNewRound(data EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventNewRound(rs EventDataRoundState) error { +func (NopEventBus) PublishEventCompleteProposal(data EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventCompleteProposal(rs EventDataRoundState) error { +func (NopEventBus) PublishEventPolka(data EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventPolka(rs EventDataRoundState) error { +func (NopEventBus) PublishEventUnlock(data EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventUnlock(rs EventDataRoundState) error { +func (NopEventBus) PublishEventRelock(data EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventRelock(rs EventDataRoundState) error { +func (NopEventBus) PublishEventLock(data EventDataRoundState) error { return nil } -func (NopEventBus) PublishEventLock(rs EventDataRoundState) error { +func (NopEventBus) PublishEventValidatorSetUpdates(data EventDataValidatorSetUpdates) error { return nil } diff --git a/types/params.go b/types/params.go index 3056c82a038..77f68eb7b3e 100644 --- a/types/params.go +++ b/types/params.go @@ -22,9 +22,8 @@ type ConsensusParams struct { // BlockSize contain limits on the block size. type BlockSize struct { - MaxBytes int `json:"max_bytes"` // NOTE: must not be 0 nor greater than 100MB - MaxTxs int `json:"max_txs"` - MaxGas int64 `json:"max_gas"` + MaxBytes int `json:"max_txs_bytes"` // NOTE: must not be 0 nor greater than 100MB + MaxGas int64 `json:"max_gas"` } // TxSize contain limits on the tx size. @@ -56,9 +55,8 @@ func DefaultConsensusParams() *ConsensusParams { // DefaultBlockSize returns a default BlockSize. func DefaultBlockSize() BlockSize { return BlockSize{ - MaxBytes: 22020096, // 21MB - MaxTxs: 10000, - MaxGas: -1, + MaxBytes: 22020096, // 21MB + MaxGas: -1, } } @@ -110,7 +108,6 @@ func (params *ConsensusParams) Hash() []byte { "block_gossip_part_size_bytes": aminoHasher(params.BlockGossip.BlockPartSizeBytes), "block_size_max_bytes": aminoHasher(params.BlockSize.MaxBytes), "block_size_max_gas": aminoHasher(params.BlockSize.MaxGas), - "block_size_max_txs": aminoHasher(params.BlockSize.MaxTxs), "tx_size_max_bytes": aminoHasher(params.TxSize.MaxBytes), "tx_size_max_gas": aminoHasher(params.TxSize.MaxGas), }) @@ -132,9 +129,6 @@ func (params ConsensusParams) Update(params2 *abci.ConsensusParams) ConsensusPar if params2.BlockSize.MaxBytes > 0 { res.BlockSize.MaxBytes = int(params2.BlockSize.MaxBytes) } - if params2.BlockSize.MaxTxs > 0 { - res.BlockSize.MaxTxs = int(params2.BlockSize.MaxTxs) - } if params2.BlockSize.MaxGas > 0 { res.BlockSize.MaxGas = params2.BlockSize.MaxGas } diff --git a/types/params_test.go b/types/params_test.go index e8e13dba0ff..119109ce0bd 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -9,9 +9,9 @@ import ( abci "github.com/tendermint/tendermint/abci/types" ) -func newConsensusParams(blockSize, partSize int) ConsensusParams { +func newConsensusParams(txsBytes, partSize int) ConsensusParams { return ConsensusParams{ - BlockSize: BlockSize{MaxBytes: blockSize}, + BlockSize: BlockSize{MaxBytes: txsBytes}, BlockGossip: BlockGossip{BlockPartSizeBytes: partSize}, } } @@ -33,22 +33,19 @@ func TestConsensusParamsValidation(t *testing.T) { {newConsensusParams(101*1024*1024, 400), false}, {newConsensusParams(1024*1024*1024, 400), false}, } - for _, testCase := range testCases { - if testCase.valid { - assert.NoError(t, testCase.params.Validate(), "expected no error for valid params") + for _, tc := range testCases { + if tc.valid { + assert.NoError(t, tc.params.Validate(), "expected no error for valid params") } else { - assert.Error(t, testCase.params.Validate(), "expected error for non valid params") + assert.Error(t, tc.params.Validate(), "expected error for non valid params") } } } -func makeParams(blockBytes, blockTx, blockGas, txBytes, - txGas, partSize int) ConsensusParams { - +func makeParams(txsBytes, blockGas, txBytes, txGas, partSize int) ConsensusParams { return ConsensusParams{ BlockSize: BlockSize{ - MaxBytes: blockBytes, - MaxTxs: blockTx, + MaxBytes: txsBytes, MaxGas: int64(blockGas), }, TxSize: TxSize{ @@ -63,14 +60,11 @@ func makeParams(blockBytes, blockTx, blockGas, txBytes, func TestConsensusParamsHash(t *testing.T) { params := []ConsensusParams{ - makeParams(1, 2, 3, 4, 5, 6), - makeParams(7, 2, 3, 4, 5, 6), - makeParams(1, 7, 3, 4, 5, 6), - makeParams(1, 2, 7, 4, 5, 6), - makeParams(1, 2, 3, 7, 5, 6), - makeParams(1, 2, 3, 4, 7, 6), - makeParams(1, 2, 3, 4, 5, 7), - makeParams(6, 5, 4, 3, 2, 1), + makeParams(6, 2, 3, 4, 5), + makeParams(1, 6, 3, 4, 5), + makeParams(1, 2, 6, 4, 5), + makeParams(1, 2, 3, 6, 5), + makeParams(1, 2, 3, 4, 6), } hashes := make([][]byte, len(params)) @@ -96,18 +90,17 @@ func TestConsensusParamsUpdate(t *testing.T) { }{ // empty updates { - makeParams(1, 2, 3, 4, 5, 6), + makeParams(1, 2, 3, 4, 5), &abci.ConsensusParams{}, - makeParams(1, 2, 3, 4, 5, 6), + makeParams(1, 2, 3, 4, 5), }, // negative BlockPartSizeBytes { - makeParams(1, 2, 3, 4, 5, 6), + makeParams(1, 2, 3, 4, 5), &abci.ConsensusParams{ BlockSize: &abci.BlockSize{ MaxBytes: -100, - MaxTxs: -200, - MaxGas: -300, + MaxGas: -200, }, TxSize: &abci.TxSize{ MaxBytes: -400, @@ -117,26 +110,25 @@ func TestConsensusParamsUpdate(t *testing.T) { BlockPartSizeBytes: -600, }, }, - makeParams(1, 2, 3, 4, 5, 6), + makeParams(1, 2, 3, 4, 5), }, // fine updates { - makeParams(1, 2, 3, 4, 5, 6), + makeParams(1, 2, 3, 4, 5), &abci.ConsensusParams{ BlockSize: &abci.BlockSize{ MaxBytes: 100, - MaxTxs: 200, - MaxGas: 300, + MaxGas: 200, }, TxSize: &abci.TxSize{ - MaxBytes: 400, - MaxGas: 500, + MaxBytes: 300, + MaxGas: 400, }, BlockGossip: &abci.BlockGossip{ - BlockPartSizeBytes: 600, + BlockPartSizeBytes: 500, }, }, - makeParams(100, 200, 300, 400, 500, 600), + makeParams(100, 200, 300, 400, 500), }, } for _, tc := range testCases { diff --git a/types/proposal.go b/types/proposal.go index 81a5e2c33f5..97e0dca370b 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -6,6 +6,7 @@ import ( "time" cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" ) var ( @@ -34,7 +35,7 @@ func NewProposal(height int64, round int, blockPartsHeader PartSetHeader, polRou return &Proposal{ Height: height, Round: round, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), BlockPartsHeader: blockPartsHeader, POLRound: polRound, POLBlockID: polBlockID, diff --git a/types/proto3/block.pb.go b/types/proto3/block.pb.go index 805828f8227..ab1c66cfb9d 100644 --- a/types/proto3/block.pb.go +++ b/types/proto3/block.pb.go @@ -1,18 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. -// source: block.proto +// source: types/proto3/block.proto -/* -Package proto3 is a generated protocol buffer package. - -It is generated from these files: - block.proto - -It has these top-level messages: - PartSetHeader - BlockID - Header - Timestamp -*/ +//nolint package proto3 import proto "github.com/golang/protobuf/proto" @@ -31,14 +20,36 @@ var _ = math.Inf const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package type PartSetHeader struct { - Total int32 `protobuf:"zigzag32,1,opt,name=Total" json:"Total,omitempty"` - Hash []byte `protobuf:"bytes,2,opt,name=Hash,proto3" json:"Hash,omitempty"` + Total int32 `protobuf:"zigzag32,1,opt,name=Total,proto3" json:"Total,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=Hash,proto3" json:"Hash,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } +func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } +func (*PartSetHeader) ProtoMessage() {} +func (*PartSetHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_block_c8c1dcbe91697ccd, []int{0} +} +func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PartSetHeader.Unmarshal(m, b) +} +func (m *PartSetHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PartSetHeader.Marshal(b, m, deterministic) +} +func (dst *PartSetHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartSetHeader.Merge(dst, src) +} +func (m *PartSetHeader) XXX_Size() int { + return xxx_messageInfo_PartSetHeader.Size(m) +} +func (m *PartSetHeader) XXX_DiscardUnknown() { + xxx_messageInfo_PartSetHeader.DiscardUnknown(m) } -func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } -func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } -func (*PartSetHeader) ProtoMessage() {} -func (*PartSetHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +var xxx_messageInfo_PartSetHeader proto.InternalMessageInfo func (m *PartSetHeader) GetTotal() int32 { if m != nil { @@ -55,14 +66,36 @@ func (m *PartSetHeader) GetHash() []byte { } type BlockID struct { - Hash []byte `protobuf:"bytes,1,opt,name=Hash,proto3" json:"Hash,omitempty"` - PartsHeader *PartSetHeader `protobuf:"bytes,2,opt,name=PartsHeader" json:"PartsHeader,omitempty"` + Hash []byte `protobuf:"bytes,1,opt,name=Hash,proto3" json:"Hash,omitempty"` + PartsHeader *PartSetHeader `protobuf:"bytes,2,opt,name=PartsHeader,proto3" json:"PartsHeader,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockID) Reset() { *m = BlockID{} } +func (m *BlockID) String() string { return proto.CompactTextString(m) } +func (*BlockID) ProtoMessage() {} +func (*BlockID) Descriptor() ([]byte, []int) { + return fileDescriptor_block_c8c1dcbe91697ccd, []int{1} +} +func (m *BlockID) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockID.Unmarshal(m, b) +} +func (m *BlockID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockID.Marshal(b, m, deterministic) +} +func (dst *BlockID) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockID.Merge(dst, src) +} +func (m *BlockID) XXX_Size() int { + return xxx_messageInfo_BlockID.Size(m) +} +func (m *BlockID) XXX_DiscardUnknown() { + xxx_messageInfo_BlockID.DiscardUnknown(m) } -func (m *BlockID) Reset() { *m = BlockID{} } -func (m *BlockID) String() string { return proto.CompactTextString(m) } -func (*BlockID) ProtoMessage() {} -func (*BlockID) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +var xxx_messageInfo_BlockID proto.InternalMessageInfo func (m *BlockID) GetHash() []byte { if m != nil { @@ -80,13 +113,13 @@ func (m *BlockID) GetPartsHeader() *PartSetHeader { type Header struct { // basic block info - ChainID string `protobuf:"bytes,1,opt,name=ChainID" json:"ChainID,omitempty"` - Height int64 `protobuf:"zigzag64,2,opt,name=Height" json:"Height,omitempty"` - Time *Timestamp `protobuf:"bytes,3,opt,name=Time" json:"Time,omitempty"` - NumTxs int64 `protobuf:"zigzag64,4,opt,name=NumTxs" json:"NumTxs,omitempty"` + ChainID string `protobuf:"bytes,1,opt,name=ChainID,proto3" json:"ChainID,omitempty"` + Height int64 `protobuf:"zigzag64,2,opt,name=Height,proto3" json:"Height,omitempty"` + Time *Timestamp `protobuf:"bytes,3,opt,name=Time,proto3" json:"Time,omitempty"` + NumTxs int64 `protobuf:"zigzag64,4,opt,name=NumTxs,proto3" json:"NumTxs,omitempty"` + TotalTxs int64 `protobuf:"zigzag64,5,opt,name=TotalTxs,proto3" json:"TotalTxs,omitempty"` // prev block info - LastBlockID *BlockID `protobuf:"bytes,5,opt,name=LastBlockID" json:"LastBlockID,omitempty"` - TotalTxs int64 `protobuf:"zigzag64,6,opt,name=TotalTxs" json:"TotalTxs,omitempty"` + LastBlockID *BlockID `protobuf:"bytes,6,opt,name=LastBlockID,proto3" json:"LastBlockID,omitempty"` // hashes of block data LastCommitHash []byte `protobuf:"bytes,7,opt,name=LastCommitHash,proto3" json:"LastCommitHash,omitempty"` DataHash []byte `protobuf:"bytes,8,opt,name=DataHash,proto3" json:"DataHash,omitempty"` @@ -96,13 +129,36 @@ type Header struct { AppHash []byte `protobuf:"bytes,11,opt,name=AppHash,proto3" json:"AppHash,omitempty"` LastResultsHash []byte `protobuf:"bytes,12,opt,name=LastResultsHash,proto3" json:"LastResultsHash,omitempty"` // consensus info - EvidenceHash []byte `protobuf:"bytes,13,opt,name=EvidenceHash,proto3" json:"EvidenceHash,omitempty"` + EvidenceHash []byte `protobuf:"bytes,13,opt,name=EvidenceHash,proto3" json:"EvidenceHash,omitempty"` + ProposerAddress []byte `protobuf:"bytes,14,opt,name=ProposerAddress,proto3" json:"ProposerAddress,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_block_c8c1dcbe91697ccd, []int{2} +} +func (m *Header) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Header.Unmarshal(m, b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) +} +func (dst *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(dst, src) +} +func (m *Header) XXX_Size() int { + return xxx_messageInfo_Header.Size(m) +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) } -func (m *Header) Reset() { *m = Header{} } -func (m *Header) String() string { return proto.CompactTextString(m) } -func (*Header) ProtoMessage() {} -func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +var xxx_messageInfo_Header proto.InternalMessageInfo func (m *Header) GetChainID() string { if m != nil { @@ -132,18 +188,18 @@ func (m *Header) GetNumTxs() int64 { return 0 } -func (m *Header) GetLastBlockID() *BlockID { +func (m *Header) GetTotalTxs() int64 { if m != nil { - return m.LastBlockID + return m.TotalTxs } - return nil + return 0 } -func (m *Header) GetTotalTxs() int64 { +func (m *Header) GetLastBlockID() *BlockID { if m != nil { - return m.TotalTxs + return m.LastBlockID } - return 0 + return nil } func (m *Header) GetLastCommitHash() []byte { @@ -195,19 +251,48 @@ func (m *Header) GetEvidenceHash() []byte { return nil } +func (m *Header) GetProposerAddress() []byte { + if m != nil { + return m.ProposerAddress + } + return nil +} + // Timestamp wraps how amino encodes time. Note that this is different from the protobuf well-known type // protobuf/timestamp.proto in the sense that there seconds and nanos are varint encoded. See: // https://github.com/google/protobuf/blob/d2980062c859649523d5fd51d6b55ab310e47482/src/google/protobuf/timestamp.proto#L123-L135 // Also nanos do not get skipped if they are zero in amino. type Timestamp struct { - Seconds int64 `protobuf:"fixed64,1,opt,name=seconds" json:"seconds,omitempty"` - Nanos int32 `protobuf:"fixed32,2,opt,name=nanos" json:"nanos,omitempty"` + Seconds int64 `protobuf:"fixed64,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"fixed32,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_block_c8c1dcbe91697ccd, []int{3} +} +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) } -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } +var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *Timestamp) GetSeconds() int64 { if m != nil { @@ -230,32 +315,33 @@ func init() { proto.RegisterType((*Timestamp)(nil), "proto3.Timestamp") } -func init() { proto.RegisterFile("block.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 372 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x92, 0x4f, 0x6b, 0xe3, 0x30, - 0x10, 0xc5, 0xf1, 0xe6, 0xff, 0x38, 0xd9, 0x6c, 0x86, 0xdd, 0xc5, 0xf4, 0x14, 0x4c, 0x5b, 0x72, - 0x0a, 0xb4, 0x39, 0x94, 0xd2, 0x53, 0x9b, 0x14, 0x12, 0x28, 0xa5, 0xa8, 0x21, 0x77, 0x25, 0x16, - 0x8d, 0xa9, 0x2d, 0x19, 0x4b, 0x29, 0xfd, 0x7c, 0xfd, 0x64, 0x45, 0x23, 0xdb, 0x8d, 0x73, 0x4a, - 0xde, 0x9b, 0x37, 0xbf, 0x91, 0x47, 0x02, 0x7f, 0x9b, 0xa8, 0xdd, 0xfb, 0x34, 0xcb, 0x95, 0x51, - 0xd8, 0xa6, 0x9f, 0x59, 0x78, 0x0b, 0x83, 0x17, 0x9e, 0x9b, 0x57, 0x61, 0x96, 0x82, 0x47, 0x22, - 0xc7, 0xbf, 0xd0, 0x5a, 0x2b, 0xc3, 0x93, 0xc0, 0x1b, 0x7b, 0x93, 0x11, 0x73, 0x02, 0x11, 0x9a, - 0x4b, 0xae, 0xf7, 0xc1, 0xaf, 0xb1, 0x37, 0xe9, 0x33, 0xfa, 0x1f, 0x6e, 0xa0, 0xf3, 0x60, 0x89, - 0xab, 0x45, 0x55, 0xf6, 0x7e, 0xca, 0x78, 0x03, 0xbe, 0x25, 0x6b, 0xc7, 0xa5, 0x4e, 0xff, 0xfa, - 0x9f, 0x1b, 0x3f, 0x9b, 0xd6, 0x86, 0xb2, 0xe3, 0x64, 0xf8, 0xd5, 0x80, 0x76, 0x71, 0x98, 0x00, - 0x3a, 0xf3, 0x3d, 0x8f, 0xe5, 0x6a, 0x41, 0xe8, 0x1e, 0x2b, 0x25, 0xfe, 0xb7, 0x99, 0xf8, 0x6d, - 0x6f, 0x08, 0x8c, 0xac, 0x50, 0x78, 0x01, 0xcd, 0x75, 0x9c, 0x8a, 0xa0, 0x41, 0xe3, 0x46, 0xe5, - 0x38, 0xeb, 0x69, 0xc3, 0xd3, 0x8c, 0x51, 0xd9, 0xb6, 0x3f, 0x1f, 0xd2, 0xf5, 0xa7, 0x0e, 0x9a, - 0xae, 0xdd, 0x29, 0xbc, 0x02, 0xff, 0x89, 0x6b, 0x53, 0x7c, 0x57, 0xd0, 0x22, 0xca, 0xb0, 0xa4, - 0x14, 0x36, 0x3b, 0xce, 0xe0, 0x19, 0x74, 0x69, 0x47, 0x16, 0xd6, 0x26, 0x58, 0xa5, 0xf1, 0x12, - 0x7e, 0xdb, 0xe8, 0x5c, 0xa5, 0x69, 0x6c, 0x68, 0x43, 0x1d, 0xda, 0xd0, 0x89, 0x6b, 0x19, 0x0b, - 0x6e, 0x38, 0x25, 0xba, 0x94, 0xa8, 0xb4, 0x65, 0x6c, 0x78, 0x12, 0x47, 0xdc, 0xa8, 0x5c, 0x53, - 0xa2, 0xe7, 0x18, 0x75, 0x17, 0xcf, 0x61, 0x30, 0x57, 0x52, 0x0b, 0xa9, 0x0f, 0x2e, 0x06, 0x14, - 0xab, 0x9b, 0x76, 0xa3, 0xf7, 0x59, 0x46, 0x75, 0x9f, 0xea, 0xa5, 0xc4, 0x09, 0x0c, 0xed, 0xa9, - 0x98, 0xd0, 0x87, 0xc4, 0x38, 0x42, 0x9f, 0x12, 0xa7, 0x36, 0x86, 0xd0, 0x7f, 0xfc, 0x88, 0x23, - 0x21, 0x77, 0x82, 0x62, 0x03, 0x8a, 0xd5, 0xbc, 0xf0, 0x0e, 0x7a, 0xd5, 0xce, 0xed, 0x50, 0x2d, - 0x76, 0x4a, 0x46, 0x9a, 0xae, 0xf1, 0x0f, 0x2b, 0xa5, 0x7d, 0x6d, 0x92, 0x4b, 0xa5, 0xe9, 0x16, - 0x87, 0xcc, 0x89, 0x6d, 0xf1, 0x38, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x74, 0x2f, 0xbd, - 0xb2, 0x02, 0x00, 0x00, +func init() { proto.RegisterFile("types/proto3/block.proto", fileDescriptor_block_c8c1dcbe91697ccd) } + +var fileDescriptor_block_c8c1dcbe91697ccd = []byte{ + // 395 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x52, 0x4b, 0x8b, 0xdb, 0x30, + 0x10, 0xc6, 0xcd, 0x7b, 0x9c, 0x47, 0x23, 0xda, 0x22, 0x7a, 0x0a, 0xa6, 0x2d, 0x39, 0x25, 0xb4, + 0x39, 0x94, 0xd2, 0x53, 0x9a, 0x14, 0x12, 0x28, 0x25, 0x68, 0x43, 0xee, 0x4a, 0x2c, 0x36, 0x66, + 0x6d, 0xcb, 0x78, 0x94, 0x65, 0xf7, 0x3f, 0xef, 0x8f, 0x58, 0x34, 0xb2, 0xbd, 0x71, 0x6e, 0xfe, + 0x1e, 0xfa, 0x3e, 0x79, 0x46, 0xc0, 0xcd, 0x73, 0xa6, 0x70, 0x9e, 0xe5, 0xda, 0xe8, 0xc5, 0xfc, + 0x18, 0xeb, 0xd3, 0xc3, 0x8c, 0x00, 0x6b, 0x3b, 0x2e, 0xf8, 0x05, 0x83, 0x9d, 0xcc, 0xcd, 0x9d, + 0x32, 0x1b, 0x25, 0x43, 0x95, 0xb3, 0x0f, 0xd0, 0xda, 0x6b, 0x23, 0x63, 0xee, 0x4d, 0xbc, 0xe9, + 0x58, 0x38, 0xc0, 0x18, 0x34, 0x37, 0x12, 0xcf, 0xfc, 0xdd, 0xc4, 0x9b, 0xf6, 0x05, 0x7d, 0x07, + 0x07, 0xe8, 0xfc, 0xb1, 0x89, 0xdb, 0x75, 0x25, 0x7b, 0x6f, 0x32, 0xfb, 0x09, 0xbe, 0x4d, 0x46, + 0x97, 0x4b, 0x27, 0xfd, 0x1f, 0x1f, 0x5d, 0xfd, 0x62, 0x56, 0x2b, 0x15, 0xd7, 0xce, 0xe0, 0xa5, + 0x01, 0xed, 0xe2, 0x32, 0x1c, 0x3a, 0xab, 0xb3, 0x8c, 0xd2, 0xed, 0x9a, 0xa2, 0x7b, 0xa2, 0x84, + 0xec, 0x93, 0xf5, 0x44, 0xf7, 0x67, 0x43, 0xc1, 0x4c, 0x14, 0x88, 0x7d, 0x85, 0xe6, 0x3e, 0x4a, + 0x14, 0x6f, 0x50, 0xdd, 0xb8, 0xac, 0xb3, 0x1c, 0x1a, 0x99, 0x64, 0x82, 0x64, 0x7b, 0xfc, 0xff, + 0x25, 0xd9, 0x3f, 0x21, 0x6f, 0xba, 0xe3, 0x0e, 0xb1, 0xcf, 0xd0, 0xa5, 0x1f, 0xb6, 0x4a, 0x8b, + 0x94, 0x0a, 0xb3, 0xef, 0xe0, 0xff, 0x93, 0x68, 0x8a, 0x7f, 0xe6, 0x6d, 0x6a, 0x18, 0x95, 0x0d, + 0x05, 0x2d, 0xae, 0x3d, 0xec, 0x1b, 0x0c, 0x2d, 0x5c, 0xe9, 0x24, 0x89, 0x0c, 0x4d, 0xa8, 0x43, + 0x13, 0xba, 0x61, 0x6d, 0xed, 0x5a, 0x1a, 0x49, 0x8e, 0x2e, 0x39, 0x2a, 0x6c, 0x33, 0x0e, 0x32, + 0x8e, 0x42, 0x69, 0x74, 0x8e, 0xe4, 0xe8, 0xb9, 0x8c, 0x3a, 0xcb, 0xbe, 0xc0, 0x60, 0xa5, 0x53, + 0x54, 0x29, 0x5e, 0x9c, 0x0d, 0xc8, 0x56, 0x27, 0xed, 0x44, 0x97, 0x59, 0x46, 0xba, 0x4f, 0x7a, + 0x09, 0xd9, 0x14, 0x46, 0xf6, 0x56, 0x42, 0xe1, 0x25, 0x36, 0x2e, 0xa1, 0x4f, 0x8e, 0x5b, 0x9a, + 0x05, 0xd0, 0xff, 0xfb, 0x18, 0x85, 0x2a, 0x3d, 0x29, 0xb2, 0x0d, 0xc8, 0x56, 0xe3, 0x6c, 0xda, + 0x2e, 0xd7, 0x99, 0x46, 0x95, 0x2f, 0xc3, 0x30, 0x57, 0x88, 0x7c, 0xe8, 0xd2, 0x6e, 0xe8, 0xe0, + 0x37, 0xf4, 0xaa, 0xed, 0xd8, 0xeb, 0xa1, 0x3a, 0xe9, 0x34, 0x44, 0x5a, 0xf8, 0x7b, 0x51, 0x42, + 0xfb, 0x2e, 0x53, 0x99, 0x6a, 0xa4, 0x7d, 0x8f, 0x84, 0x03, 0xc7, 0xe2, 0x19, 0xbf, 0x06, 0x00, + 0x00, 0xff, 0xff, 0xde, 0x29, 0x34, 0x75, 0xe9, 0x02, 0x00, 0x00, } diff --git a/types/proto3/block.proto b/types/proto3/block.proto index bc3cf8749ec..835d6b74b72 100644 --- a/types/proto3/block.proto +++ b/types/proto3/block.proto @@ -19,10 +19,10 @@ message Header { sint64 Height = 2; Timestamp Time = 3; sint64 NumTxs = 4; + sint64 TotalTxs = 5; // prev block info - BlockID LastBlockID = 5; - sint64 TotalTxs = 6; + BlockID LastBlockID = 6; // hashes of block data bytes LastCommitHash = 7; // commit from validators from the last block @@ -30,12 +30,14 @@ message Header { // hashes from the app output from the prev block bytes ValidatorsHash = 9; // validators for the current block - bytes ConsensusHash = 10; // consensus params for current block - bytes AppHash = 11; // state after txs from the previous block - bytes LastResultsHash = 12; // root hash of all results from the txs from the previous block + bytes NextValidatorsHash = 10; // validators for the next block + bytes ConsensusHash = 11; // consensus params for current block + bytes AppHash = 12; // state after txs from the previous block + bytes LastResultsHash = 13; // root hash of all results from the txs from the previous block // consensus info - bytes EvidenceHash = 13; // evidence included in the block + bytes EvidenceHash = 14; // evidence included in the block + bytes ProposerAddress = 15; // original proposer of the block } // Timestamp wraps how amino encodes time. Note that this is different from the protobuf well-known type diff --git a/types/proto3_test.go b/types/proto3_test.go index 19a624a65dd..c9dfa35a9b1 100644 --- a/types/proto3_test.go +++ b/types/proto3_test.go @@ -63,15 +63,10 @@ func TestProto3Compatibility(t *testing.T) { assert.Equal(t, ab, pb, "encoding doesn't match") emptyLastBlockPb := proto3.Header{ - ChainID: "cosmos", - Height: 150, - Time: &proto3.Timestamp{Seconds: seconds, Nanos: nanos}, - NumTxs: 7, - // This is not fully skipped in amino (yet) although it is empty: - LastBlockID: &proto3.BlockID{ - PartsHeader: &proto3.PartSetHeader{ - }, - }, + ChainID: "cosmos", + Height: 150, + Time: &proto3.Timestamp{Seconds: seconds, Nanos: nanos}, + NumTxs: 7, TotalTxs: 100, LastCommitHash: []byte("commit hash"), DataHash: []byte("data hash"), diff --git a/types/protobuf.go b/types/protobuf.go index 01d4ebf0cbb..9c448cd848d 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -1,7 +1,6 @@ package types import ( - "bytes" "fmt" "reflect" "time" @@ -35,34 +34,53 @@ type tm2pb struct{} func (tm2pb) Header(header *Header) abci.Header { return abci.Header{ - ChainID: header.ChainID, - Height: header.Height, - + ChainID: header.ChainID, + Height: header.Height, Time: header.Time, - NumTxs: int32(header.NumTxs), // XXX: overflow + NumTxs: header.NumTxs, TotalTxs: header.TotalTxs, - LastBlockHash: header.LastBlockID.Hash, - ValidatorsHash: header.ValidatorsHash, - AppHash: header.AppHash, + LastBlockId: TM2PB.BlockID(header.LastBlockID), + + LastCommitHash: header.LastCommitHash, + DataHash: header.DataHash, + + ValidatorsHash: header.ValidatorsHash, + ConsensusHash: header.ConsensusHash, + AppHash: header.AppHash, + LastResultsHash: header.LastResultsHash, - // Proposer: TODO + EvidenceHash: header.EvidenceHash, + ProposerAddress: header.ProposerAddress, } } -func (tm2pb) ValidatorWithoutPubKey(val *Validator) abci.Validator { +func (tm2pb) Validator(val *Validator) abci.Validator { return abci.Validator{ Address: val.PubKey.Address(), Power: val.VotingPower, } } +func (tm2pb) BlockID(blockID BlockID) abci.BlockID { + return abci.BlockID{ + Hash: blockID.Hash, + PartsHeader: TM2PB.PartSetHeader(blockID.PartsHeader), + } +} + +func (tm2pb) PartSetHeader(header PartSetHeader) abci.PartSetHeader { + return abci.PartSetHeader{ + Total: int32(header.Total), + Hash: header.Hash, + } +} + // XXX: panics on unknown pubkey type -func (tm2pb) Validator(val *Validator) abci.Validator { - return abci.Validator{ - Address: val.PubKey.Address(), - PubKey: TM2PB.PubKey(val.PubKey), - Power: val.VotingPower, +func (tm2pb) ValidatorUpdate(val *Validator) abci.ValidatorUpdate { + return abci.ValidatorUpdate{ + PubKey: TM2PB.PubKey(val.PubKey), + Power: val.VotingPower, } } @@ -86,10 +104,10 @@ func (tm2pb) PubKey(pubKey crypto.PubKey) abci.PubKey { } // XXX: panics on nil or unknown pubkey type -func (tm2pb) Validators(vals *ValidatorSet) []abci.Validator { - validators := make([]abci.Validator, vals.Size()) +func (tm2pb) ValidatorUpdates(vals *ValidatorSet) []abci.ValidatorUpdate { + validators := make([]abci.ValidatorUpdate, vals.Size()) for i, val := range vals.Validators { - validators[i] = TM2PB.Validator(val) + validators[i] = TM2PB.ValidatorUpdate(val) } return validators } @@ -97,10 +115,8 @@ func (tm2pb) Validators(vals *ValidatorSet) []abci.Validator { func (tm2pb) ConsensusParams(params *ConsensusParams) *abci.ConsensusParams { return &abci.ConsensusParams{ BlockSize: &abci.BlockSize{ - - MaxBytes: int32(params.BlockSize.MaxBytes), - MaxTxs: int32(params.BlockSize.MaxTxs), - MaxGas: params.BlockSize.MaxGas, + MaxBytes: int32(params.BlockSize.MaxBytes), + MaxGas: params.BlockSize.MaxGas, }, TxSize: &abci.TxSize{ MaxBytes: int32(params.TxSize.MaxBytes), @@ -136,7 +152,7 @@ func (tm2pb) Evidence(ev Evidence, valSet *ValidatorSet, evTime time.Time) abci. return abci.Evidence{ Type: evType, - Validator: TM2PB.ValidatorWithoutPubKey(val), + Validator: TM2PB.Validator(val), Height: ev.Height(), Time: evTime, TotalVotingPower: valSet.TotalVotingPower(), @@ -144,12 +160,11 @@ func (tm2pb) Evidence(ev Evidence, valSet *ValidatorSet, evTime time.Time) abci. } // XXX: panics on nil or unknown pubkey type -func (tm2pb) ValidatorFromPubKeyAndPower(pubkey crypto.PubKey, power int64) abci.Validator { +func (tm2pb) NewValidatorUpdate(pubkey crypto.PubKey, power int64) abci.ValidatorUpdate { pubkeyABCI := TM2PB.PubKey(pubkey) - return abci.Validator{ - Address: pubkey.Address(), - PubKey: pubkeyABCI, - Power: power, + return abci.ValidatorUpdate{ + PubKey: pubkeyABCI, + Power: power, } } @@ -185,26 +200,14 @@ func (pb2tm) PubKey(pubKey abci.PubKey) (crypto.PubKey, error) { } } -func (pb2tm) Validators(vals []abci.Validator) ([]*Validator, error) { +func (pb2tm) ValidatorUpdates(vals []abci.ValidatorUpdate) ([]*Validator, error) { tmVals := make([]*Validator, len(vals)) for i, v := range vals { pub, err := PB2TM.PubKey(v.PubKey) if err != nil { return nil, err } - // If the app provided an address too, it must match. - // This is just a sanity check. - if len(v.Address) > 0 { - if !bytes.Equal(pub.Address(), v.Address) { - return nil, fmt.Errorf("Validator.Address (%X) does not match PubKey.Address (%X)", - v.Address, pub.Address()) - } - } - tmVals[i] = &Validator{ - Address: pub.Address(), - PubKey: pub, - VotingPower: v.Power, - } + tmVals[i] = NewValidator(pub, v.Power) } return tmVals, nil } @@ -212,9 +215,8 @@ func (pb2tm) Validators(vals []abci.Validator) ([]*Validator, error) { func (pb2tm) ConsensusParams(csp *abci.ConsensusParams) ConsensusParams { return ConsensusParams{ BlockSize: BlockSize{ - MaxBytes: int(csp.BlockSize.MaxBytes), // XXX - MaxTxs: int(csp.BlockSize.MaxTxs), // XXX - MaxGas: csp.BlockSize.MaxGas, + MaxBytes: int(csp.BlockSize.MaxBytes), // XXX + MaxGas: csp.BlockSize.MaxGas, }, TxSize: TxSize{ MaxBytes: int(csp.TxSize.MaxBytes), // XXX diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 6c9ba3668e7..f8682abf895 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -9,6 +9,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" + tmtime "github.com/tendermint/tendermint/types/time" ) func TestABCIPubKey(t *testing.T) { @@ -41,26 +42,26 @@ func TestABCIValidators(t *testing.T) { VotingPower: 10, } - abciVal := TM2PB.Validator(tmVal) - tmVals, err := PB2TM.Validators([]abci.Validator{abciVal}) + abciVal := TM2PB.ValidatorUpdate(tmVal) + tmVals, err := PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{abciVal}) assert.Nil(t, err) assert.Equal(t, tmValExpected, tmVals[0]) - abciVals := TM2PB.Validators(NewValidatorSet(tmVals)) - assert.Equal(t, []abci.Validator{abciVal}, abciVals) + abciVals := TM2PB.ValidatorUpdates(NewValidatorSet(tmVals)) + assert.Equal(t, []abci.ValidatorUpdate{abciVal}, abciVals) // val with address tmVal.Address = pkEd.Address() - abciVal = TM2PB.Validator(tmVal) - tmVals, err = PB2TM.Validators([]abci.Validator{abciVal}) + abciVal = TM2PB.ValidatorUpdate(tmVal) + tmVals, err = PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{abciVal}) assert.Nil(t, err) assert.Equal(t, tmValExpected, tmVals[0]) - // val with incorrect address - abciVal = TM2PB.Validator(tmVal) - abciVal.Address = []byte("incorrect!") - tmVals, err = PB2TM.Validators([]abci.Validator{abciVal}) + // val with incorrect pubkey data + abciVal = TM2PB.ValidatorUpdate(tmVal) + abciVal.PubKey.Data = []byte("incorrect!") + tmVals, err = PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{abciVal}) assert.NotNil(t, err) assert.Nil(t, tmVals) } @@ -76,19 +77,21 @@ func TestABCIConsensusParams(t *testing.T) { func TestABCIHeader(t *testing.T) { header := &Header{ - Height: int64(3), - Time: time.Now(), - NumTxs: int64(10), + Height: int64(3), + Time: tmtime.Now(), + NumTxs: int64(10), + ProposerAddress: []byte("cloak"), } abciHeader := TM2PB.Header(header) assert.Equal(t, int64(3), abciHeader.Height) + assert.Equal(t, []byte("cloak"), abciHeader.ProposerAddress) } func TestABCIEvidence(t *testing.T) { val := NewMockPV() - blockID := makeBlockID("blockhash", 1000, "partshash") - blockID2 := makeBlockID("blockhash2", 1000, "partshash") + blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) + blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) const chainID = "mychain" ev := &DuplicateVoteEvidence{ PubKey: val.GetPubKey(), @@ -102,32 +105,29 @@ func TestABCIEvidence(t *testing.T) { ) assert.Equal(t, "duplicate/vote", abciEv.Type) - - // test we do not send pubkeys - assert.Empty(t, abciEv.Validator.PubKey) } type pubKeyEddie struct{} -func (pubKeyEddie) Address() Address { return []byte{} } -func (pubKeyEddie) Bytes() []byte { return []byte{} } +func (pubKeyEddie) Address() Address { return []byte{} } +func (pubKeyEddie) Bytes() []byte { return []byte{} } func (pubKeyEddie) VerifyBytes(msg []byte, sig []byte) bool { return false } -func (pubKeyEddie) Equals(crypto.PubKey) bool { return false } +func (pubKeyEddie) Equals(crypto.PubKey) bool { return false } func TestABCIValidatorFromPubKeyAndPower(t *testing.T) { pubkey := ed25519.GenPrivKey().PubKey() - abciVal := TM2PB.ValidatorFromPubKeyAndPower(pubkey, 10) + abciVal := TM2PB.NewValidatorUpdate(pubkey, 10) assert.Equal(t, int64(10), abciVal.Power) - assert.Panics(t, func() { TM2PB.ValidatorFromPubKeyAndPower(nil, 10) }) - assert.Panics(t, func() { TM2PB.ValidatorFromPubKeyAndPower(pubKeyEddie{}, 10) }) + assert.Panics(t, func() { TM2PB.NewValidatorUpdate(nil, 10) }) + assert.Panics(t, func() { TM2PB.NewValidatorUpdate(pubKeyEddie{}, 10) }) } func TestABCIValidatorWithoutPubKey(t *testing.T) { pkEd := ed25519.GenPrivKey().PubKey() - abciVal := TM2PB.ValidatorWithoutPubKey(&Validator{ + abciVal := TM2PB.Validator(&Validator{ Address: pkEd.Address(), PubKey: pkEd, VotingPower: 10, diff --git a/types/test_util.go b/types/test_util.go index f21c2831fe2..e20ea212e07 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -1,6 +1,8 @@ package types -import "time" +import ( + tmtime "github.com/tendermint/tendermint/types/time" +) func MakeCommit(blockID BlockID, height int64, round int, voteSet *VoteSet, @@ -16,7 +18,7 @@ func MakeCommit(blockID BlockID, height int64, round int, Round: round, Type: VoteTypePrecommit, BlockID: blockID, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), } _, err := signAddVote(validators[i], vote, voteSet) diff --git a/types/time/time.go b/types/time/time.go new file mode 100644 index 00000000000..022bdf574f8 --- /dev/null +++ b/types/time/time.go @@ -0,0 +1,58 @@ +package time + +import ( + "sort" + "time" +) + +// Now returns the current time in UTC with no monotonic component. +func Now() time.Time { + return Canonical(time.Now()) +} + +// Canonical returns UTC time with no monotonic component. +// Stripping the monotonic component is for time equality. +// See https://github.com/tendermint/tendermint/pull/2203#discussion_r215064334 +func Canonical(t time.Time) time.Time { + return t.Round(0).UTC() +} + +// WeightedTime for computing a median. +type WeightedTime struct { + Time time.Time + Weight int64 +} + +// NewWeightedTime with time and weight. +func NewWeightedTime(time time.Time, weight int64) *WeightedTime { + return &WeightedTime{ + Time: time, + Weight: weight, + } +} + +// WeightedMedian computes weighted median time for a given array of WeightedTime and the total voting power. +func WeightedMedian(weightedTimes []*WeightedTime, totalVotingPower int64) (res time.Time) { + median := totalVotingPower / 2 + + sort.Slice(weightedTimes, func(i, j int) bool { + if weightedTimes[i] == nil { + return false + } + if weightedTimes[j] == nil { + return true + } + return weightedTimes[i].Time.UnixNano() < weightedTimes[j].Time.UnixNano() + }) + + for _, weightedTime := range weightedTimes { + if weightedTime != nil { + if median <= weightedTime.Weight { + res = weightedTime.Time + break + } + median -= weightedTime.Weight + } + } + return +} diff --git a/types/time/time_test.go b/types/time/time_test.go new file mode 100644 index 00000000000..1b1a30e5058 --- /dev/null +++ b/types/time/time_test.go @@ -0,0 +1,56 @@ +package time + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestWeightedMedian(t *testing.T) { + m := make([]*WeightedTime, 3) + + t1 := Now() + t2 := t1.Add(5 * time.Second) + t3 := t1.Add(10 * time.Second) + + m[2] = NewWeightedTime(t1, 33) // faulty processes + m[0] = NewWeightedTime(t2, 40) // correct processes + m[1] = NewWeightedTime(t3, 27) // correct processes + totalVotingPower := int64(100) + + median := WeightedMedian(m, totalVotingPower) + assert.Equal(t, t2, median) + // median always returns value between values of correct processes + assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && + (median.Before(t3) || median.Equal(t3))) + + m[1] = NewWeightedTime(t1, 40) // correct processes + m[2] = NewWeightedTime(t2, 27) // correct processes + m[0] = NewWeightedTime(t3, 33) // faulty processes + totalVotingPower = int64(100) + + median = WeightedMedian(m, totalVotingPower) + assert.Equal(t, t2, median) + // median always returns value between values of correct processes + assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && + (median.Before(t2) || median.Equal(t2))) + + m = make([]*WeightedTime, 8) + t4 := t1.Add(15 * time.Second) + t5 := t1.Add(60 * time.Second) + + m[3] = NewWeightedTime(t1, 10) // correct processes + m[1] = NewWeightedTime(t2, 10) // correct processes + m[5] = NewWeightedTime(t2, 10) // correct processes + m[4] = NewWeightedTime(t3, 23) // faulty processes + m[0] = NewWeightedTime(t4, 20) // correct processes + m[7] = NewWeightedTime(t5, 10) // faulty processes + totalVotingPower = int64(83) + + median = WeightedMedian(m, totalVotingPower) + assert.Equal(t, t3, median) + // median always returns value between values of correct processes + assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && + (median.Before(t4) || median.Equal(t4))) +} diff --git a/types/validator_set.go b/types/validator_set.go index 60fc2d83b59..4dab4d84071 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -29,42 +29,57 @@ type ValidatorSet struct { totalVotingPower int64 } -func NewValidatorSet(vals []*Validator) *ValidatorSet { - validators := make([]*Validator, len(vals)) - for i, val := range vals { +func NewValidatorSet(valz []*Validator) *ValidatorSet { + if valz != nil && len(valz) == 0 { + panic("validator set initialization slice cannot be an empty slice (but it can be nil)") + } + validators := make([]*Validator, len(valz)) + for i, val := range valz { validators[i] = val.Copy() } sort.Sort(ValidatorsByAddress(validators)) - vs := &ValidatorSet{ + vals := &ValidatorSet{ Validators: validators, } - - if len(vals) > 0 { - vs.IncrementAccum(1) + if len(valz) > 0 { + vals.IncrementAccum(1) } - return vs + return vals +} + +// Nil or empty validator sets are invalid. +func (vals *ValidatorSet) IsNilOrEmpty() bool { + return vals == nil || len(vals.Validators) == 0 +} + +// Increment Accum and update the proposer on a copy, and return it. +func (vals *ValidatorSet) CopyIncrementAccum(times int) *ValidatorSet { + copy := vals.Copy() + copy.IncrementAccum(times) + return copy } // IncrementAccum increments accum of each validator and updates the // proposer. Panics if validator set is empty. -func (valSet *ValidatorSet) IncrementAccum(times int) { +func (vals *ValidatorSet) IncrementAccum(times int) { + // Add VotingPower * times to each validator and order into heap. validatorsHeap := cmn.NewHeap() - for _, val := range valSet.Validators { - // check for overflow both multiplication and sum + for _, val := range vals.Validators { + // Check for overflow both multiplication and sum. val.Accum = safeAddClip(val.Accum, safeMulClip(val.VotingPower, int64(times))) validatorsHeap.PushComparable(val, accumComparable{val}) } - // Decrement the validator with most accum times times + // Decrement the validator with most accum times times. for i := 0; i < times; i++ { mostest := validatorsHeap.Peek().(*Validator) // mind underflow - mostest.Accum = safeSubClip(mostest.Accum, valSet.TotalVotingPower()) + mostest.Accum = safeSubClip(mostest.Accum, vals.TotalVotingPower()) if i == times-1 { - valSet.Proposer = mostest + vals.Proposer = mostest } else { validatorsHeap.Update(mostest, accumComparable{mostest}) } @@ -72,36 +87,36 @@ func (valSet *ValidatorSet) IncrementAccum(times int) { } // Copy each validator into a new ValidatorSet -func (valSet *ValidatorSet) Copy() *ValidatorSet { - validators := make([]*Validator, len(valSet.Validators)) - for i, val := range valSet.Validators { +func (vals *ValidatorSet) Copy() *ValidatorSet { + validators := make([]*Validator, len(vals.Validators)) + for i, val := range vals.Validators { // NOTE: must copy, since IncrementAccum updates in place. validators[i] = val.Copy() } return &ValidatorSet{ Validators: validators, - Proposer: valSet.Proposer, - totalVotingPower: valSet.totalVotingPower, + Proposer: vals.Proposer, + totalVotingPower: vals.totalVotingPower, } } // HasAddress returns true if address given is in the validator set, false - // otherwise. -func (valSet *ValidatorSet) HasAddress(address []byte) bool { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 +func (vals *ValidatorSet) HasAddress(address []byte) bool { + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(address, vals.Validators[i].Address) <= 0 }) - return idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) + return idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address, address) } // GetByAddress returns an index of the validator with address and validator // itself if found. Otherwise, -1 and nil are returned. -func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 +func (vals *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) { + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(address, vals.Validators[i].Address) <= 0 }) - if idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) { - return idx, valSet.Validators[idx].Copy() + if idx < len(vals.Validators) && bytes.Equal(vals.Validators[idx].Address, address) { + return idx, vals.Validators[idx].Copy() } return -1, nil } @@ -109,45 +124,45 @@ func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Valida // GetByIndex returns the validator's address and validator itself by index. // It returns nil values if index is less than 0 or greater or equal to // len(ValidatorSet.Validators). -func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) { - if index < 0 || index >= len(valSet.Validators) { +func (vals *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) { + if index < 0 || index >= len(vals.Validators) { return nil, nil } - val = valSet.Validators[index] + val = vals.Validators[index] return val.Address, val.Copy() } // Size returns the length of the validator set. -func (valSet *ValidatorSet) Size() int { - return len(valSet.Validators) +func (vals *ValidatorSet) Size() int { + return len(vals.Validators) } // TotalVotingPower returns the sum of the voting powers of all validators. -func (valSet *ValidatorSet) TotalVotingPower() int64 { - if valSet.totalVotingPower == 0 { - for _, val := range valSet.Validators { +func (vals *ValidatorSet) TotalVotingPower() int64 { + if vals.totalVotingPower == 0 { + for _, val := range vals.Validators { // mind overflow - valSet.totalVotingPower = safeAddClip(valSet.totalVotingPower, val.VotingPower) + vals.totalVotingPower = safeAddClip(vals.totalVotingPower, val.VotingPower) } } - return valSet.totalVotingPower + return vals.totalVotingPower } // GetProposer returns the current proposer. If the validator set is empty, nil // is returned. -func (valSet *ValidatorSet) GetProposer() (proposer *Validator) { - if len(valSet.Validators) == 0 { +func (vals *ValidatorSet) GetProposer() (proposer *Validator) { + if len(vals.Validators) == 0 { return nil } - if valSet.Proposer == nil { - valSet.Proposer = valSet.findProposer() + if vals.Proposer == nil { + vals.Proposer = vals.findProposer() } - return valSet.Proposer.Copy() + return vals.Proposer.Copy() } -func (valSet *ValidatorSet) findProposer() *Validator { +func (vals *ValidatorSet) findProposer() *Validator { var proposer *Validator - for _, val := range valSet.Validators { + for _, val := range vals.Validators { if proposer == nil || !bytes.Equal(val.Address, proposer.Address) { proposer = proposer.CompareAccum(val) } @@ -157,12 +172,12 @@ func (valSet *ValidatorSet) findProposer() *Validator { // Hash returns the Merkle root hash build using validators (as leaves) in the // set. -func (valSet *ValidatorSet) Hash() []byte { - if len(valSet.Validators) == 0 { +func (vals *ValidatorSet) Hash() []byte { + if len(vals.Validators) == 0 { return nil } - hashers := make([]merkle.Hasher, len(valSet.Validators)) - for i, val := range valSet.Validators { + hashers := make([]merkle.Hasher, len(vals.Validators)) + for i, val := range vals.Validators { hashers[i] = val } return merkle.SimpleHashFromHashers(hashers) @@ -170,70 +185,70 @@ func (valSet *ValidatorSet) Hash() []byte { // Add adds val to the validator set and returns true. It returns false if val // is already in the set. -func (valSet *ValidatorSet) Add(val *Validator) (added bool) { +func (vals *ValidatorSet) Add(val *Validator) (added bool) { val = val.Copy() - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(val.Address, valSet.Validators[i].Address) <= 0 + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(val.Address, vals.Validators[i].Address) <= 0 }) - if idx >= len(valSet.Validators) { - valSet.Validators = append(valSet.Validators, val) + if idx >= len(vals.Validators) { + vals.Validators = append(vals.Validators, val) // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return true - } else if bytes.Equal(valSet.Validators[idx].Address, val.Address) { + } else if bytes.Equal(vals.Validators[idx].Address, val.Address) { return false } else { - newValidators := make([]*Validator, len(valSet.Validators)+1) - copy(newValidators[:idx], valSet.Validators[:idx]) + newValidators := make([]*Validator, len(vals.Validators)+1) + copy(newValidators[:idx], vals.Validators[:idx]) newValidators[idx] = val - copy(newValidators[idx+1:], valSet.Validators[idx:]) - valSet.Validators = newValidators + copy(newValidators[idx+1:], vals.Validators[idx:]) + vals.Validators = newValidators // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return true } } // Update updates val and returns true. It returns false if val is not present // in the set. -func (valSet *ValidatorSet) Update(val *Validator) (updated bool) { - index, sameVal := valSet.GetByAddress(val.Address) +func (vals *ValidatorSet) Update(val *Validator) (updated bool) { + index, sameVal := vals.GetByAddress(val.Address) if sameVal == nil { return false } - valSet.Validators[index] = val.Copy() + vals.Validators[index] = val.Copy() // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return true } // Remove deletes the validator with address. It returns the validator removed // and true. If returns nil and false if validator is not present in the set. -func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 +func (vals *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) { + idx := sort.Search(len(vals.Validators), func(i int) bool { + return bytes.Compare(address, vals.Validators[i].Address) <= 0 }) - if idx >= len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) { + if idx >= len(vals.Validators) || !bytes.Equal(vals.Validators[idx].Address, address) { return nil, false } - removedVal := valSet.Validators[idx] - newValidators := valSet.Validators[:idx] - if idx+1 < len(valSet.Validators) { - newValidators = append(newValidators, valSet.Validators[idx+1:]...) + removedVal := vals.Validators[idx] + newValidators := vals.Validators[:idx] + if idx+1 < len(vals.Validators) { + newValidators = append(newValidators, vals.Validators[idx+1:]...) } - valSet.Validators = newValidators + vals.Validators = newValidators // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 + vals.Proposer = nil + vals.totalVotingPower = 0 return removedVal, true } // Iterate will run the given function over the set. -func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { - for i, val := range valSet.Validators { +func (vals *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { + for i, val := range vals.Validators { stop := fn(i, val.Copy()) if stop { break @@ -241,87 +256,106 @@ func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { } } -// Verify that +2/3 of the set had signed the given signBytes -func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error { - if valSet.Size() != len(commit.Precommits) { - return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", valSet.Size(), len(commit.Precommits)) +// Verify that +2/3 of the set had signed the given signBytes. +func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error { + if vals.Size() != len(commit.Precommits) { + return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", vals.Size(), len(commit.Precommits)) } if height != commit.Height() { return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) } + if !blockID.Equals(commit.BlockID) { + return fmt.Errorf("Invalid commit -- wrong block id: want %v got %v", + blockID, commit.BlockID) + } talliedVotingPower := int64(0) round := commit.Round() for idx, precommit := range commit.Precommits { - // may be nil if validator skipped. if precommit == nil { - continue + continue // OK, some precommits can be missing. } if precommit.Height != height { - return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, precommit.Height) + return fmt.Errorf("Invalid commit -- wrong height: want %v got %v", height, precommit.Height) } if precommit.Round != round { - return fmt.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) + return fmt.Errorf("Invalid commit -- wrong round: want %v got %v", round, precommit.Round) } if precommit.Type != VoteTypePrecommit { return fmt.Errorf("Invalid commit -- not precommit @ index %v", idx) } - _, val := valSet.GetByIndex(idx) - // Validate signature + _, val := vals.GetByIndex(idx) + // Validate signature. precommitSignBytes := precommit.SignBytes(chainID) if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { return fmt.Errorf("Invalid commit -- invalid signature: %v", precommit) } - if !blockID.Equals(precommit.BlockID) { - continue // Not an error, but doesn't count - } // Good precommit! - talliedVotingPower += val.VotingPower + if blockID.Equals(precommit.BlockID) { + talliedVotingPower += val.VotingPower + } else { + // It's OK that the BlockID doesn't match. We include stray + // precommits to measure validator availability. + } } - if talliedVotingPower > valSet.TotalVotingPower()*2/3 { + if talliedVotingPower > vals.TotalVotingPower()*2/3 { return nil } return fmt.Errorf("Invalid commit -- insufficient voting power: got %v, needed %v", - talliedVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) + talliedVotingPower, (vals.TotalVotingPower()*2/3 + 1)) } -// VerifyCommitAny will check to see if the set would -// be valid with a different validator set. +// VerifyFutureCommit will check to see if the set would be valid with a different +// validator set. +// +// vals is the old validator set that we know. Over 2/3 of the power in old +// signed this block. +// +// In Tendermint, 1/3 of the voting power can halt or fork the chain, but 1/3 +// can't make arbitrary state transitions. You still need > 2/3 Byzantine to +// make arbitrary state transitions. // -// valSet is the validator set that we know -// * over 2/3 of the power in old signed this block +// To preserve this property in the light client, we also require > 2/3 of the +// old vals to sign the future commit at H, that way we preserve the property +// that if they weren't being truthful about the validator set at H (block hash +// -> vals hash) or about the app state (block hash -> app hash) we can slash +// > 2/3. Otherwise, the lite client isn't providing the same security +// guarantees. // -// newSet is the validator set that signed this block -// * only votes from old are sufficient for 2/3 majority -// in the new set as well +// Even if we added a slashing condition that if you sign a block header with +// the wrong validator set, then we would only need > 1/3 of signatures from +// the old vals on the new commit, it wouldn't be sufficient because the new +// vals can be arbitrary and commit some arbitrary app hash. // -// That means that: -// * 10% of the valset can't just declare themselves kings -// * If the validator set is 3x old size, we need more proof to trust -func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string, +// newSet is the validator set that signed this block. Only votes from new are +// sufficient for 2/3 majority in the new set as well, for it to be a valid +// commit. +// +// NOTE: This doesn't check whether the commit is a future commit, because the +// current height isn't part of the ValidatorSet. Caller must check that the +// commit height is greater than the height for this validator set. +func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID string, blockID BlockID, height int64, commit *Commit) error { + oldVals := vals - if newSet.Size() != len(commit.Precommits) { - return cmn.NewError("Invalid commit -- wrong set size: %v vs %v", newSet.Size(), len(commit.Precommits)) - } - if height != commit.Height() { - return cmn.NewError("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) + // Commit must be a valid commit for newSet. + err := newSet.VerifyCommit(chainID, blockID, height, commit) + if err != nil { + return err } + // Check old voting power. oldVotingPower := int64(0) - newVotingPower := int64(0) seen := map[int]bool{} round := commit.Round() for idx, precommit := range commit.Precommits { - // first check as in VerifyCommit if precommit == nil { continue } if precommit.Height != height { - // return certerr.ErrHeightMismatch(height, precommit.Height) return cmn.NewError("Blocks don't match - %d vs %d", round, precommit.Round) } if precommit.Round != round { @@ -330,54 +364,45 @@ func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string if precommit.Type != VoteTypePrecommit { return cmn.NewError("Invalid commit -- not precommit @ index %v", idx) } - if !blockID.Equals(precommit.BlockID) { - continue // Not an error, but doesn't count - } - - // we only grab by address, ignoring unknown validators - vi, ov := valSet.GetByAddress(precommit.ValidatorAddress) - if ov == nil || seen[vi] { + // See if this validator is in oldVals. + idx, val := oldVals.GetByAddress(precommit.ValidatorAddress) + if val == nil || seen[idx] { continue // missing or double vote... } - seen[vi] = true + seen[idx] = true - // Validate signature old school + // Validate signature. precommitSignBytes := precommit.SignBytes(chainID) - if !ov.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { + if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { return cmn.NewError("Invalid commit -- invalid signature: %v", precommit) } // Good precommit! - oldVotingPower += ov.VotingPower - - // check new school - _, cv := newSet.GetByIndex(idx) - if cv.PubKey.Equals(ov.PubKey) { - // make sure this is properly set in the current block as well - newVotingPower += cv.VotingPower + if blockID.Equals(precommit.BlockID) { + oldVotingPower += val.VotingPower + } else { + // It's OK that the BlockID doesn't match. We include stray + // precommits to measure validator availability. } } - if oldVotingPower <= valSet.TotalVotingPower()*2/3 { + if oldVotingPower <= oldVals.TotalVotingPower()*2/3 { return cmn.NewError("Invalid commit -- insufficient old voting power: got %v, needed %v", - oldVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) - } else if newVotingPower <= newSet.TotalVotingPower()*2/3 { - return cmn.NewError("Invalid commit -- insufficient cur voting power: got %v, needed %v", - newVotingPower, (newSet.TotalVotingPower()*2/3 + 1)) + oldVotingPower, (oldVals.TotalVotingPower()*2/3 + 1)) } return nil } -func (valSet *ValidatorSet) String() string { - return valSet.StringIndented("") +func (vals *ValidatorSet) String() string { + return vals.StringIndented("") } // String -func (valSet *ValidatorSet) StringIndented(indent string) string { - if valSet == nil { +func (vals *ValidatorSet) StringIndented(indent string) string { + if vals == nil { return "nil-ValidatorSet" } valStrings := []string{} - valSet.Iterate(func(index int, val *Validator) bool { + vals.Iterate(func(index int, val *Validator) bool { valStrings = append(valStrings, val.String()) return false }) @@ -386,9 +411,9 @@ func (valSet *ValidatorSet) StringIndented(indent string) string { %s Validators: %s %v %s}`, - indent, valSet.GetProposer().String(), + indent, vals.GetProposer().String(), indent, - indent, strings.Join(valStrings, "\n"+indent+" "), + indent, strings.Join(valStrings, "\n"+indent+" "), indent) } @@ -399,18 +424,18 @@ func (valSet *ValidatorSet) StringIndented(indent string) string { // Sort validators by address type ValidatorsByAddress []*Validator -func (vs ValidatorsByAddress) Len() int { - return len(vs) +func (valz ValidatorsByAddress) Len() int { + return len(valz) } -func (vs ValidatorsByAddress) Less(i, j int) bool { - return bytes.Compare(vs[i].Address, vs[j].Address) == -1 +func (valz ValidatorsByAddress) Less(i, j int) bool { + return bytes.Compare(valz[i].Address, valz[j].Address) == -1 } -func (vs ValidatorsByAddress) Swap(i, j int) { - it := vs[i] - vs[i] = vs[j] - vs[j] = it +func (valz ValidatorsByAddress) Swap(i, j int) { + it := valz[i] + valz[i] = valz[j] + valz[j] = it } //------------------------------------- @@ -434,16 +459,16 @@ func (ac accumComparable) Less(o interface{}) bool { // NOTE: PrivValidator are in order. // UNSTABLE func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []PrivValidator) { - vals := make([]*Validator, numValidators) + valz := make([]*Validator, numValidators) privValidators := make([]PrivValidator, numValidators) for i := 0; i < numValidators; i++ { val, privValidator := RandValidator(false, votingPower) - vals[i] = val + valz[i] = val privValidators[i] = privValidator } - valSet := NewValidatorSet(vals) + vals := NewValidatorSet(valz) sort.Sort(PrivValidatorsByAddress(privValidators)) - return valSet, privValidators + return vals, privValidators } /////////////////////////////////////////////////////////////////////////////// diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 1756f7890c4..e411170746b 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -2,71 +2,73 @@ package types import ( "bytes" + "fmt" "math" "strings" "testing" "testing/quick" - "time" "github.com/stretchr/testify/assert" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" ) func TestValidatorSetBasic(t *testing.T) { - for _, vset := range []*ValidatorSet{NewValidatorSet([]*Validator{}), NewValidatorSet(nil)} { - assert.Panics(t, func() { vset.IncrementAccum(1) }) - - assert.EqualValues(t, vset, vset.Copy()) - assert.False(t, vset.HasAddress([]byte("some val"))) - idx, val := vset.GetByAddress([]byte("some val")) - assert.Equal(t, -1, idx) - assert.Nil(t, val) - addr, val := vset.GetByIndex(-100) - assert.Nil(t, addr) - assert.Nil(t, val) - addr, val = vset.GetByIndex(0) - assert.Nil(t, addr) - assert.Nil(t, val) - addr, val = vset.GetByIndex(100) - assert.Nil(t, addr) - assert.Nil(t, val) - assert.Zero(t, vset.Size()) - assert.Equal(t, int64(0), vset.TotalVotingPower()) - assert.Nil(t, vset.GetProposer()) - assert.Nil(t, vset.Hash()) - - // add - val = randValidator_() - assert.True(t, vset.Add(val)) - assert.True(t, vset.HasAddress(val.Address)) - idx, val2 := vset.GetByAddress(val.Address) - assert.Equal(t, 0, idx) - assert.Equal(t, val, val2) - addr, val2 = vset.GetByIndex(0) - assert.Equal(t, []byte(val.Address), addr) - assert.Equal(t, val, val2) - assert.Equal(t, 1, vset.Size()) - assert.Equal(t, val.VotingPower, vset.TotalVotingPower()) - assert.Equal(t, val, vset.GetProposer()) - assert.NotNil(t, vset.Hash()) - assert.NotPanics(t, func() { vset.IncrementAccum(1) }) - - // update - assert.False(t, vset.Update(randValidator_())) - val.VotingPower = 100 - assert.True(t, vset.Update(val)) - - // remove - val2, removed := vset.Remove(randValidator_().Address) - assert.Nil(t, val2) - assert.False(t, removed) - val2, removed = vset.Remove(val.Address) - assert.Equal(t, val.Address, val2.Address) - assert.True(t, removed) - } + assert.Panics(t, func() { NewValidatorSet([]*Validator{}) }) + + vset := NewValidatorSet(nil) + assert.Panics(t, func() { vset.IncrementAccum(1) }) + + assert.EqualValues(t, vset, vset.Copy()) + assert.False(t, vset.HasAddress([]byte("some val"))) + idx, val := vset.GetByAddress([]byte("some val")) + assert.Equal(t, -1, idx) + assert.Nil(t, val) + addr, val := vset.GetByIndex(-100) + assert.Nil(t, addr) + assert.Nil(t, val) + addr, val = vset.GetByIndex(0) + assert.Nil(t, addr) + assert.Nil(t, val) + addr, val = vset.GetByIndex(100) + assert.Nil(t, addr) + assert.Nil(t, val) + assert.Zero(t, vset.Size()) + assert.Equal(t, int64(0), vset.TotalVotingPower()) + assert.Nil(t, vset.GetProposer()) + assert.Nil(t, vset.Hash()) + + // add + val = randValidator_() + assert.True(t, vset.Add(val)) + assert.True(t, vset.HasAddress(val.Address)) + idx, val2 := vset.GetByAddress(val.Address) + assert.Equal(t, 0, idx) + assert.Equal(t, val, val2) + addr, val2 = vset.GetByIndex(0) + assert.Equal(t, []byte(val.Address), addr) + assert.Equal(t, val, val2) + assert.Equal(t, 1, vset.Size()) + assert.Equal(t, val.VotingPower, vset.TotalVotingPower()) + assert.Equal(t, val, vset.GetProposer()) + assert.NotNil(t, vset.Hash()) + assert.NotPanics(t, func() { vset.IncrementAccum(1) }) + + // update + assert.False(t, vset.Update(randValidator_())) + val.VotingPower = 100 + assert.True(t, vset.Update(val)) + + // remove + val2, removed := vset.Remove(randValidator_().Address) + assert.Nil(t, val2) + assert.False(t, removed) + val2, removed = vset.Remove(val.Address) + assert.Equal(t, val.Address, val2.Address) + assert.True(t, removed) } func TestCopy(t *testing.T) { @@ -218,7 +220,7 @@ func TestProposerSelection3(t *testing.T) { got := vset.GetProposer().Address expected := proposerOrder[j%4].Address if !bytes.Equal(got, expected) { - t.Fatalf(cmn.Fmt("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j)) + t.Fatalf(fmt.Sprintf("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j)) } // serialize, deserialize, check proposer @@ -228,7 +230,7 @@ func TestProposerSelection3(t *testing.T) { computed := vset.GetProposer() // findGetProposer() if i != 0 { if !bytes.Equal(got, computed.Address) { - t.Fatalf(cmn.Fmt("vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)", got, computed.Address, i, j)) + t.Fatalf(fmt.Sprintf("vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)", got, computed.Address, i, j)) } } @@ -382,7 +384,7 @@ func TestValidatorSetVerifyCommit(t *testing.T) { ValidatorIndex: 0, Height: height, Round: 0, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), Type: VoteTypePrecommit, BlockID: blockID, } diff --git a/types/vote.go b/types/vote.go index 9a6180d7556..6481f56b9c5 100644 --- a/types/vote.go +++ b/types/vote.go @@ -10,6 +10,11 @@ import ( cmn "github.com/tendermint/tendermint/libs/common" ) +const ( + // MaxVoteBytes is a maximum vote size (including amino overhead). + MaxVoteBytes = 200 +) + var ( ErrVoteUnexpectedStep = errors.New("Unexpected step") ErrVoteInvalidValidatorIndex = errors.New("Invalid validator index") diff --git a/types/vote_set.go b/types/vote_set.go index 36bf5ef7e32..dbcacbbdb88 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -170,7 +170,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { "Cannot find validator %d in valSet of size %d", valIndex, voteSet.valSet.Size()) } - // Ensure that the signer has the right address + // Ensure that the signer has the right address. if !bytes.Equal(valAddr, lookupAddr) { return false, errors.Wrapf(ErrVoteInvalidValidatorAddress, "vote.ValidatorAddress (%X) does not match address (%X) for vote.ValidatorIndex (%d)\nEnsure the genesis file is correct across all validators.", @@ -190,7 +190,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { return false, errors.Wrapf(err, "Failed to verify vote with ChainID %s and PubKey %s", voteSet.chainID, val.PubKey) } - // Add vote and get conflicting vote if any + // Add vote and get conflicting vote if any. added, conflicting := voteSet.addVerifiedVote(vote, blockKey, val.VotingPower) if conflicting != nil { return added, NewConflictingVoteError(val, conflicting, vote) @@ -201,7 +201,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { return added, nil } -// Returns (vote, true) if vote exists for valIndex and blockKey +// Returns (vote, true) if vote exists for valIndex and blockKey. func (voteSet *VoteSet) getVote(valIndex int, blockKey string) (vote *Vote, ok bool) { if existing := voteSet.votes[valIndex]; existing != nil && existing.BlockID.Key() == blockKey { return existing, true diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 32ceb7b1614..995fb94bdc9 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -3,11 +3,11 @@ package types import ( "bytes" "testing" - "time" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" tst "github.com/tendermint/tendermint/libs/test" + tmtime "github.com/tendermint/tendermint/types/time" ) // NOTE: privValidators are in order @@ -83,7 +83,7 @@ func TestAddVote(t *testing.T) { Height: height, Round: round, Type: VoteTypePrevote, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), BlockID: BlockID{nil, PartSetHeader{}}, } _, err := signAddVote(val0, vote, voteSet) @@ -113,7 +113,7 @@ func Test2_3Majority(t *testing.T) { Height: height, Round: round, Type: VoteTypePrevote, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), BlockID: BlockID{nil, PartSetHeader{}}, } // 6 out of 10 voted for nil. @@ -169,7 +169,7 @@ func Test2_3MajorityRedux(t *testing.T) { ValidatorIndex: -1, // NOTE: must fill in Height: height, Round: round, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), Type: VoteTypePrevote, BlockID: BlockID{blockHash, blockPartsHeader}, } @@ -264,7 +264,7 @@ func TestBadVotes(t *testing.T) { ValidatorIndex: -1, Height: height, Round: round, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), Type: VoteTypePrevote, BlockID: BlockID{nil, PartSetHeader{}}, } @@ -326,7 +326,7 @@ func TestConflicts(t *testing.T) { ValidatorIndex: -1, Height: height, Round: round, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), Type: VoteTypePrevote, BlockID: BlockID{nil, PartSetHeader{}}, } @@ -455,7 +455,7 @@ func TestMakeCommit(t *testing.T) { ValidatorIndex: -1, Height: height, Round: round, - Timestamp: time.Now().UTC(), + Timestamp: tmtime.Now(), Type: VoteTypePrecommit, BlockID: BlockID{blockHash, blockPartsHeader}, } diff --git a/types/vote_test.go b/types/vote_test.go index 836baa615cf..4f544935902 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -1,12 +1,15 @@ package types import ( + "math" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/tmhash" + tmtime "github.com/tendermint/tendermint/types/time" ) func examplePrevote() *Vote { @@ -24,17 +27,17 @@ func exampleVote(t byte) *Vote { } return &Vote{ - ValidatorAddress: []byte("addr"), + ValidatorAddress: tmhash.Sum([]byte("validator_address")), ValidatorIndex: 56789, Height: 12345, Round: 2, Timestamp: stamp, Type: t, BlockID: BlockID{ - Hash: []byte("hash"), + Hash: tmhash.Sum([]byte("blockID_hash")), PartsHeader: PartSetHeader{ Total: 1000000, - Hash: []byte("parts_hash"), + Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), }, }, } @@ -45,7 +48,7 @@ func TestVoteSignable(t *testing.T) { signBytes := vote.SignBytes("test_chain_id") signStr := string(signBytes) - expected := `{"@chain_id":"test_chain_id","@type":"vote","block_id":{"hash":"68617368","parts":{"hash":"70617274735F68617368","total":"1000000"}},"height":"12345","round":"2","timestamp":"2017-12-25T03:00:01.234Z","type":2}` + expected := `{"@chain_id":"test_chain_id","@type":"vote","block_id":{"hash":"8B01023386C371778ECB6368573E539AFC3CC860","parts":{"hash":"72DB3D959635DFF1BB567BEDAA70573392C51596","total":"1000000"}},"height":"12345","round":"2","timestamp":"2017-12-25T03:00:01.234Z","type":2}` if signStr != expected { // NOTE: when this fails, you probably want to fix up consensus/replay_test too t.Errorf("Got unexpected sign string for Vote. Expected:\n%v\nGot:\n%v", expected, signStr) @@ -119,3 +122,30 @@ func TestVoteVerify(t *testing.T) { assert.Equal(t, ErrVoteInvalidSignature, err) } } + +func TestMaxVoteBytes(t *testing.T) { + vote := &Vote{ + ValidatorAddress: tmhash.Sum([]byte("validator_address")), + ValidatorIndex: math.MaxInt64, + Height: math.MaxInt64, + Round: math.MaxInt64, + Timestamp: tmtime.Now(), + Type: VoteTypePrevote, + BlockID: BlockID{ + Hash: tmhash.Sum([]byte("blockID_hash")), + PartsHeader: PartSetHeader{ + Total: math.MaxInt64, + Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), + }, + }, + } + + privVal := NewMockPV() + err := privVal.SignVote("test_chain_id", vote) + require.NoError(t, err) + + bz, err := cdc.MarshalBinary(vote) + require.NoError(t, err) + + assert.Equal(t, MaxVoteBytes, len(bz)) +} diff --git a/version/version.go b/version/version.go index 68a9954f0cc..337ce4eadf2 100644 --- a/version/version.go +++ b/version/version.go @@ -3,19 +3,22 @@ package version // Version components const ( Maj = "0" - Min = "23" - Fix = "1" + Min = "24" + Fix = "0" ) var ( // Version is the current version of Tendermint // Must be a string because scripts like dist.sh read this file. - Version = "0.23.1" + Version = "0.24.0" // GitCommit is the current HEAD set using ldflags. GitCommit string ) +// ABCIVersion is the version of the ABCI library +const ABCIVersion = "0.14.0" + func init() { if GitCommit != "" { Version += "-" + GitCommit